blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
db91137380447cd0534d38b1c0605190d5a09bf1 | Python | rohankk2/hackerrank | /Cracking the coding Interview/Pairs.py | UTF-8 | 235 | 2.796875 | 3 | [] | no_license | def pairs(k, arr):
ma={}
count=0
for ele in arr:
ma[ele]=1
for ele in arr:
i=ele+k
try:
if(ma[i]==1):
count=count+1
except:
continue;
return count;
| true |
76a49249dd809195747c6846a35773ca1a0e1d26 | Python | kchun014/ML-LpNorm | /Correlation.py | UTF-8 | 2,433 | 3.21875 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
def covariance(x, y):
mean_x = np.mean(x)
mean_y = np.mean(y)
# x_m = np.full((1, len(x)), mean_x)
# y_m = np.full((1, len(y)), mean_y)
total = 0
new_x = []
new_y = []
for i in range(len(x)):
new_x.append(x[i] - mean_x)
new_y.append(y[i] - mean_y)
for i in range(len(y)):
total += new_x[i] * new_y[i]
return total / len(x)
def std_dev(x):
calc_mean = np.mean(x)
total = 0
for i in x:
total += np.square(i - calc_mean)
total = total / len(x)
# print(np.sqrt(total))
return np.sqrt(total)
def correlation(x, y):
sdev_x = std_dev(x)
sdev_y = std_dev(y)
cov_xy = covariance(x, y)
# print(covariance(x, y))
return cov_xy/(sdev_x * sdev_y)
def main():
a = np.loadtxt('iris.data', usecols = (0, 1, 2, 3), delimiter = ',')
b = np.loadtxt('wine.data', usecols = (0, 1, 2, 3), delimiter = ',')
#predefine iris and wine columns.
Sepal_L = a[0:150, 0]
Sepal_W = a[0:150, 1]
Petal_L = a[0:150, 2]
Petal_W = a[0:150, 3]
Iris_List = ['', 'Sepal_L', 'Sepal_W', 'Petal_L', 'Petal_W']
Wine_List = ['', 'Alcohol', 'Malus_A', 'Ash']
Alc = b[0:178, 1]
Mal = b[0:178, 2]
Ash = b[0:178, 3]
plt.style.use("classic")
Iris_data = [Sepal_L, Sepal_W, Petal_L, Petal_W]
Wine_data = [Alc, Mal, Ash]
#predefine arrays for wine and iris heatmap.
Wine_Array = np.zeros(shape = (3, 3))
Iris_Array = np.zeros(shape = (4, 4))
#populate array and output in heatmap
for i in range(len(Iris_data)):
for j in range(len(Iris_data)):
Iris_Array[i, j] = correlation(Iris_data[i], Iris_data[j])
plt.matshow(Iris_Array)
plt.colorbar()
axes = plt.gca()
axes.set_xticklabels(Iris_List)
axes.set_yticklabels(Iris_List)
plt.title('Iris_Heatmap')
plt.savefig('Iris_Heatmap', bbox_inches = 'tight', dpi = 100)
plt.show()
#repeat population for wine and output.
for i in range(len(Wine_data)):
for j in range(len(Wine_data)):
Wine_Array[i, j] = correlation(Wine_data[i], Wine_data[j])
plt.matshow(Wine_Array)
plt.colorbar()
axes = plt.gca()
axes.set_xticklabels(Wine_List)
axes.set_yticklabels(Wine_List)
plt.title('Wine_Heatmap')
plt.savefig('Wine_Heatmap', bbox_inches = 'tight', dpi = 100)
plt.show()
main()
| true |
b3eb745ce9e4f0019b87668dd164b4abbcb2c687 | Python | BenPoser/python-projects | /Matplot/main.py | UTF-8 | 534 | 3.453125 | 3 | [] | no_license | import matplotlib.pyplot as plt
years = [1950, 1955, 1960, 1965,
1970, 1975, 1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015]
pops = [2.5, 2.7, 3.0, 3.3, 3.6, 4.0, 4.4,
4.8, 5.3, 5.7, 6.1, 6.5, 6.9, 7.3]
deaths = [1.2, 1.7, 1.8, 2.2, 2.5,
2.7, 2.9, 3, 3.1, 3.3, 3.5, 3.8, 4.0, 4.3]
plt.plot(years, pops, color=(255/255, 100/255, 100/255))
plt.plot(years, deaths, '--', color=(.6, .6, 1))
plt.ylabel("Population in billions")
plt.xlabel("Year")
plt.title("Population Growth")
plt.show() | true |
6195f83a4a95119ad8056eb2410305b7f3771e88 | Python | thales-mro/python-basic | /loops.py | UTF-8 | 132 | 3.78125 | 4 | [] | no_license | #pretty straightforward
items = ['one', 'two', 'three']
for i in items:
print(i)
for j in range(4):
print("Four times!")
| true |
62a3ca2fe58422f0481cf19512042382a40f40e6 | Python | vaishnavinambiar/Function-abstraction-exercises | /tc_factorial.py | UTF-8 | 355 | 3.453125 | 3 | [] | no_license | import unittest
def recur_factorial(n):
if n ==1:
return n
else:
return n*recur_factorial(n-1)
class MyTest(unittest.TestCase):
def test1_fact(self):
self.assertEqual(recur_factorial(5), 120)
def test2_fact(self):
self.assertEqual(recur_factorial(2), 3)
if __name__ == '__main__':
unittest.main()
| true |
94030b19c05290ca8bee03de8cc6d186da034ffb | Python | basin87/python08062021 | /lesson_10/task_3.py | UTF-8 | 253 | 3.609375 | 4 | [] | no_license | def the_longest(sequence_of_words):
length_of_words = []
for word in sequence_of_words.split():
length_of_words.append((len(word), word))
length_of_words.sort()
print(length_of_words[-1][1])
the_longest("What makes a good man") | true |
b4099e6f6ddfa8dcbb849d4ddc3947ee6b18ef2c | Python | hidasri/Python | /Building_Machine_Learning_Systems_Python_Book/numpy.py | UTF-8 | 593 | 3.0625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 5 06:35:40 2017
@author: Sridharan Kamalakannan
"""
import numpy as np
import timeit as ti
list_time=ti.timeit('sum((x*x) for x in range(1000))',number=10000)
print('list time %f'%list_time)
bad_np_time=ti.timeit('sum(na*na)',
setup="import numpy as np;na=np.arange(1000);",
number=10000)
print('bad numpy time %f'%bad_np_time)
good_np_time=ti.timeit('na.dot(na)',
setup="import numpy as np;na=np.arange(1000);",
number=10000)
print('good numpy time %f'%good_np_time) | true |
febc0103013391d7a86643c743bb1987a6b4a8e3 | Python | RPGupta30/Activation-Functions | /Tanh.py | UTF-8 | 691 | 3.484375 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[2]:
import numpy as np
import matplotlib.pyplot as plt
def tanH(x):
s = (1-np.exp(-x))/(1+np.exp(-x))
plt.plot(x,s)
plt.xlabel('valus of x')
plt.ylabel('tanH values')
plt.title('tanH Function')
plt.legend(["Tanh"])
plt.show()
return s
def tanH_Derivative(x):
s = (1-np.exp(-x))/(1+np.exp(-x))
sd = 1 - s**2
plt.plot(x,sd)
plt.xlabel('valus of x')
plt.legend(["Tanh Derivative"])
plt.ylabel('TanH_Derivative values')
plt.title('TanH_Derivative Function')
plt.show()
return sd
x = np.array(np.arange(-10,11,1))
s = print(tanH(x))
sd = print(tanH_Derivative(x))
# In[ ]:
| true |
ecfd393a47e553a2a293a902ad1312fa67b6bdf6 | Python | HenryFreeman1/covid_stats | /datasets.py | UTF-8 | 3,917 | 2.875 | 3 | [] | no_license | import pandas as pd
import json
from datetime import date
import scraper
import urllib
def get_csv_from_url(url,outfile):
"""
download a csv from a url
"""
filename = url[url.rfind("/")+1:]
filepath = '_data/' + outfile
urllib.request.urlretrieve(url, filepath)
return
def get_wb_data(file_path,data_name):
#http://api.worldbank.org/v2/en/indicator/SP.POP.TOTL?downloadformat=csv
df = pd.read_csv(file_path,skiprows=4)
series = df.set_index('Country Name')['2018']
series.name = data_name
#fix the mismatched names in the world bank data
with open('name_mapping.json') as json_file:
name_mapping = json.load(json_file)
data = series.reset_index()
data['Country Name'].replace(name_mapping,inplace=True)
data.set_index('Country Name',inplace=True)
return data
def join_wb_data():
#get pop density
#http://api.worldbank.org/v2/en/indicator/EN.POP.DNST?downloadformat=csv
pop_density = get_wb_data('API_EN.POP.DNST_DS2_en_csv_v2_936296.csv','pop_density')
#get population
#http://api.worldbank.org/v2/en/indicator/SP.POP.TOTL?downloadformat=csv
pop = get_wb_data('API_SP.POP.TOTL_DS2_en_csv_v2_936048.csv','pop')
wb_data = pd.concat([pop_density,pop],axis=1)
return wb_data
def join_data_sources():
df = scraper.get_latest_data()
df = df.join(join_wb_data())
return df
def generate_features(df):
df['deaths_per_million'] = df['deaths'] / df['pop'] * 1000000
df['cases_per_million'] = df['cases'] / df['pop'] * 1000000
#calculate additional metrics
df['death_rate_%'] = df['deaths'] / df['cases'] * 100
return df
def import_static_data():
df = join_data_sources()
df = generate_features(df)
return df
def get_csse_time_series_deaths():
df = pd.read_csv("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv")
meta_cols = ['Province/State','Country/Region','Lat','Long']
time_series = df.drop(meta_cols,axis=1)
#get a unique country name to use as id
df['Province/State'] = df['Province/State'].fillna("")
df['country'] = df['Country/Region'] + " " + df['Province/State']
df['country'] = df['country'].str.rstrip()
#pivot the dataframe
time_series = time_series.T
time_series.columns = df['country']
#melt countries and deaths into single column
df_melted = pd.melt(time_series.reset_index(), id_vars=["index"])
df_melted.columns = ['date','country','deaths']
df_melted['date'] = pd.to_datetime(df_melted['date'])
return df_melted
def get_apple_movement_indices(movement_type='walking'):
"""
get latest time series data from apple on population movement by country
- movement_type <str> enum "walking"|"driving|"transit"
"""
try:
today = date.today().strftime("%Y-%m-%d")
url = f'https://covid19-static.cdn-apple.com/covid19-mobility-data/2006HotfixDev16/v1/en-us/applemobilitytrends-{today}.csv'
df = pd.read_csv(url)
except:
df = pd.read_csv('https://covid19-static.cdn-apple.com/covid19-mobility-data/2006HotfixDev16/v1/en-us/applemobilitytrends-2020-04-24.csv')
meta_cols = ['geo_type','region','transportation_type']
#filter by movement type
df_walk = df.loc[df['transportation_type'] == movement_type]
assert df_walk['region'].is_unique
time_series = df_walk.drop(meta_cols,axis=1)
#pivot the dataframe
time_series = time_series.T
time_series.columns = df_walk['region']
#melt countries and y vals into single column
df_melted = pd.melt(time_series.iloc[:,:].reset_index(),id_vars=['index'])
df_melted.columns = ['date','country','movement_index']
df_melted['date'] = pd.to_datetime(df_melted['date'])
return df_melted
if __name__ == "__main__":
pass | true |
86b3f7fb3f8a56729863dbd27a6cab251d1ebd03 | Python | zhudingsuifeng/python | /2018/repeat.py | UTF-8 | 163 | 3.03125 | 3 | [] | no_license | #!/usr/bin/env python3
#coding = utf-8
def times(s, num):
return s*int(num)
if __name__ == "__main__":
s, num = input().split()
print(times(s, num))
| true |
5d979d96d935f98027861c2039dc056a52ee9b8e | Python | OneCalledSyn/project-euler | /Python Problems/47.py | UTF-8 | 682 | 3.515625 | 4 | [] | no_license | def factor_finder(n):
divisor = 2
factors = set()
while divisor < n ** 0.5 or n == 1:
#print(divisor)
if n % divisor == 0:
n /= divisor
factors.add(divisor)
divisor -= 1
divisor += 1
return len(factors) + 1
current = 2 * 3 * 5 * 7
while current < 200000 :
#print(current)
if factor_finder(current) == 4:
current += 1
if factor_finder(current) == 4:
current += 1
if factor_finder(current) == 4:
current += 1
if factor_finder(current) == 4:
print (current - 3)
break
current += 1
| true |
92199bb5e4f44db70f6e02b1ce7d9081d2f2f52a | Python | slimanej4c/amira | /tf_idf2.py | UTF-8 | 3,081 | 2.765625 | 3 | [] | no_license |
import pandas as pd
import matplotlib.pyplot as plt
from tkinter import *
from tkinter import ttk
from tkinter.filedialog import askopenfilename
import pandas as pd
from pandastable import Table, TableModel
from nettoyage import *
class tf_idf():
def __init__(self,args,root,h,w):
centre_frame1 = Frame(root, bg='white', width=630, height=700)
centre_frame1.grid(row=3, column=0, sticky=N)
centre_frame1.grid_propagate(0)
self.f = centre_frame1
self.h=h
self.w=w
list_golbal=[]
bow=[]
for i in args:
for ii in i :
a=str(ii).split(" ")
list_golbal.append(set(a))
bow.append(a)
wordset=list(frozenset().union(*list_golbal))
#wordict=dict.fromkeys(wordset,0)
list_global_dic=[]
for i in bow:
wordict = dict.fromkeys(wordset, 0)
for word in i :
wordict[word]+=1
list_global_dic.append(wordict)
data=pd.DataFrame([list_global_dic[0],list_global_dic[1]])
self.tfbow = self.count_tf(list_global_dic, bow)
self.idfs = self.count_idf(list_global_dic)
self.tf_idf2 = self.count_tf_idf(self.tfbow, self.idfs)
#tf_idf2 = self.count_tf_idf(tfbow2, idfs)
self.afficher_idf(root)
def afficher_idf(self,root):
centre_frame1 = Frame(root, bg='white', width=630, height=700)
centre_frame1.grid(row=3, column=0, sticky=N)
centre_frame1.grid_propagate(0)
self.data = pd.DataFrame(self.tf_idf2, columns=list(self.tf_idf2[0].keys()))
pt = Table(centre_frame1, dataframe=self.data, height=self.h, width=self.w)
pt.show()
def count_tf(self,wordict,bow):
tf_dict={}
list_tf=[]
for i ,j in zip(wordict,bow):
bow_count = len(j )
tf_dict = {}
for word ,count in i.items():
tf_dict[word]=count/float(bow_count)
list_tf.append(tf_dict)
return list_tf
def count_idf(self,doclist):
import math
idf_dict={}
n=len(doclist)
idf_dict=dict.fromkeys(doclist[0].keys(),0)
print(idf_dict)
for doc in doclist:
for word , val in doc.items():
if val> 0:
idf_dict[word]+=1
print(idf_dict)
for word ,val in idf_dict.items():
idf_dict[word]=math.log(n/float(val))
return idf_dict
def count_tf_idf(self,tfblow,idfs):
tf_idf={}
lts_tf_idf=[]
for i in tfblow:
tf_idf = {}
for word , val in i.items():
tf_idf[word]=val*idfs[word]
lts_tf_idf.append(tf_idf)
return lts_tf_idf
list_text=[["the cat sat in my 1 2 ... @face"],["the dog sat in my bed..... 1 . ;;;;"],["the dog sat in my bed"],["the dog sat in my bed"]]
#tf_idf(list_text) | true |
418a709f31cba2bd154cbd8a3bc43119fc067ed6 | Python | stefanspasov/blockchain | /backend/app/__init__.py | UTF-8 | 1,175 | 2.609375 | 3 | [] | no_license | import sys
import requests
import random
from flask import Flask, jsonify
from backend.blockchain.blockchain import Blockchain
from backend.pubsub import PubSub
app = Flask(__name__)
blockchain = Blockchain()
pubsub = PubSub(blockchain)
for i in range(3):
blockchain.add_block(i)
@app.route('/')
def default():
return 'Welcome to the blockchain'
@app.route('/blockchain')
def route_blockchain():
return jsonify(blockchain.to_json())
@app.route('/blockchain/mine')
def route_blockchain_mine():
transaction_data = 'stubbed_transaction_data'
blockchain.add_block(transaction_data)
block = blockchain.chain[-1]
pubsub.broadcast_block(block)
return jsonify(block.to_json())
ROOT_PORT = 5000
PORT = ROOT_PORT
try:
print('Peer')
if sys.argv[1] == 'peer':
PORT = random.randint(5001, 6000)
result = requests.get(f'http://localhost:{ROOT_PORT}/blockchain')
result_blockchain = Blockchain.from_json(result.json())
blockchain.replace_chain(result_blockchain.chain)
print(f'result.json: {result.json()}')
except Exception as e:
print(f'Not a peer or error: {e}')
app.run(port=PORT) | true |
5efe3dc979a3c9046dc9cbff2e1304d9158cf464 | Python | fcu-d0449763/PythonPractice | /D0449763_hw01_4.py | UTF-8 | 317 | 3.703125 | 4 | [] | no_license | # HW4
a = input()
supper = False
num = False
lower = False
for i in a:
if i.isupper():
supper = True
if i.isnumeric():
num = True
if i.islower():
lower = True
if supper and num and lower:
print("密碼設定完成")
else:
print("密碼設定不符合規定") | true |
e454842155561068ae4b63cdefdae2fabd3db4bd | Python | war-and-peace/SearchEngine | /src/engine/functions.py | UTF-8 | 4,489 | 2.875 | 3 | [
"MIT"
] | permissive | from bs4 import BeautifulSoup
import re
import codecs
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
from os import path
import json
# Helper functions: normalization, lemmatization
def normalize(text):
"""
Converts text to lower string and removes numbers, punctuations,
and other non-alphabetic characters, but leaving space and '*'
"""
text = text.lower()
return re.sub('( +)|(_+)', ' ', re.sub('[0-9]', '', ' '.join(re.findall("([\w]*)", text))))
def normalize_query(text):
text = text.lower()
return re.sub('( +)|(_+)', ' ', re.sub('[0-9]', '', ' '.join(re.findall("([\w\*]*)", text))))
def tokenize(text):
"""
Breaks the text down into tokens
"""
return word_tokenize(text)
def lemmatize(tokens):
lemmatizer = WordNetLemmatizer()
return [lemmatizer.lemmatize(x) for x in tokens]
def remove_stop_word(tokens):
stop_words = set(stopwords.words('english'))
filtered = []
for token in tokens:
if token not in stop_words:
filtered.append(token)
return filtered
def get_collection(dataset_path, collection_path):
collection = {}
ids = []
info = {}
for i in range(1):
file_path = path.join(dataset_path, f'reut2-{str(i).zfill(3)}.sgm')
with codecs.open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
doc = f.read()
soup = BeautifulSoup(doc)
reuters = soup.find_all('reuters')
for x in reuters:
id = x['newid']
title = str(x.find('title'))[7:-8]
body = str(x.find('body'))[6:-9]
ids.append(id)
info[id] = title
collection[id] = [title, body]
with open(collection_path, 'w') as fout:
json.dump(collection, fout)
return ids, info
def regular_inverted_index(collection_path, index_path):
inverted_index = {}
collection = {}
with open(collection_path) as file:
collection = json.load(file)
for key in collection:
# for key, doc in collection:
id = int(key)
for word in lemmatize(tokenize(normalize(collection[key][1]))):
if word in inverted_index:
inverted_index[word].add(id)
else:
inverted_index[word] = {id}
for word in lemmatize(tokenize(normalize(collection[key][0]))):
if word in inverted_index:
inverted_index[word].add(id)
else:
inverted_index[word] = {id}
words = []
for key, value in inverted_index.items():
words.append(key)
with open(path.join(index_path, key), 'w') as file:
json.dump(list(value), file)
return words
# return inverted_index
def soundex(token):
mapping = {'a': 0, 'e': 0, 'i': 0, 'o': 0, 'u': 0, 'h': 0, 'w': 0, 'y': 0,
'b': 1, 'f': 1, 'p': 1, 'v': 1,
'c': 2, 'g': 2, 'j': 2, 'k': 2, 'q': 2, 's': 2, 'x': 2, 'z': 2,
'd': 3, 't': 3,
'l': 4,
'm': 5, 'n': 5,
'r': 6}
if len(token) < 1:
raise ValueError
first = str(token[0])
s = token[1:]
s = ''.join(str(mapping[x]) for x in s)
for i in range(10):
s = re.sub(f'({i}{i}+)', f'{i}', s)
s = re.sub('(0+)', '', s)
ans = first + s + '000'
return ans[:4]
def soundex_inverted_index(collection):
inverted_index = {}
for index in range(len(collection)):
for word in tokenize(normalize(collection[index])):
w = soundex(word)
if w in inverted_index:
inverted_index[w].add(index)
else:
inverted_index[w] = {index}
return inverted_index
def edit_distance(str1, str2, m, n):
if m == 0:
return n
if n == 0:
return m
if str1[m - 1] == str2[n - 1]:
return edit_distance(str1, str2, m - 1, n - 1)
return 1 + min(edit_distance(str1, str2, m, n - 1), edit_distance(str1, str2, m - 1, n),
edit_distance(str1, str2, m - 1, n - 1))
def levenshtein(a, b):
return edit_distance(a, b, len(a), len(b))
def parse_config(config_path):
with open(config_path) as json_file:
return json.load(json_file)
def isValidConfig(config):
if (config['dataset'] is None) or (config['collection'] is None) or (config['index'] is None):
return False
else:
return True
| true |
3c2c4b735a74eb77513464fc5865d0681d87dbc9 | Python | loganwastlund/cs1410 | /isbn_assignment/isbnTests/test_createIndex.py | UTF-8 | 704 | 2.5625 | 3 | [] | no_license | """
Do Not Edit this file. You may and are encouraged to look at it for reference.
"""
import unittest
from isbnTests import isbn_index
class test_createIndex(unittest.TestCase):
def setUp(self):
self.mExpectedResults = { }
return
def tearDown(self):
return
def test001_createIndexExists(self):
self.assertTrue('createIndex' in dir(isbn_index),
'Function "createIndex" is not defined, check your spelling')
return
def test002_createIndexReturnsEmptyDictionary(self):
self.assertDictEqual(isbn_index.createIndex(), self.mExpectedResults)
return
if __name__ == '__main__':
unittest.main()
| true |
be209119f19c7a3aae18d8892f64d39655813824 | Python | algas/shrinkwrap2yarn | /shrinkwrap2yarn/yarn.py | UTF-8 | 2,171 | 2.609375 | 3 | [
"MIT"
] | permissive | from collections import namedtuple
import logging
## Definition of YarnElement
YarnElement = namedtuple('YarnElement', 'name from_ version resolved dependencies')
## Definition of YarnDependency
YarnDependency = namedtuple('YarnDependency', 'name from_ version')
class YarnLock:
def __init__(self, names, version, resolved, dependencies=[]):
self.names = names
self.version = version
self.resolved = resolved.replace('npmjs.org','yarnpkg.com') if resolved else resolved
self.dependencies = dependencies
def __repl__(self):
def _dep_version(d):
if d.from_ and d.from_.startswith('git:'):
return d.from_
return d.version
strs = [
', '.join(self.names) + ':',
' version "{0}"'.format(self.version),
' resolved "{0}"'.format(self.resolved)
]
if len(self.dependencies) > 0:
strs.append(' dependencies:')
strs.extend([' "{0}" "{1}"'.format(d.name, _dep_version(d)) for d in self.dependencies])
strs.append('')
return '\n'.join(strs)
def __str__(self):
return self.__repl__()
class YarnLockFactory:
## Yarn Lock Factory
def _merge(self, yarn_elements):
def _get_name(yarn):
if yarn.from_ is None:
return '"{0}@{1}"'.format(yarn.name, yarn.version)
elif yarn.from_.startswith('git:'):
return '"{0}@{1}"'.format(yarn.name, yarn.from_)
return '"{0}"'.format(yarn.from_)
yarns = {}
for yarn in yarn_elements:
if yarn.resolved not in yarns:
yarns[yarn.resolved] = {'names':set([_get_name(yarn)]), 'version':yarn.version, 'dependencies':yarn.dependencies}
else:
yarns[yarn.resolved]['names'].add(_get_name(yarn))
return yarns
def _locks(self, yarns):
return [YarnLock(yarn['names'], yarn['version'], resolved, yarn['dependencies']) for resolved, yarn in yarns.items()]
def run(self, yarn_elements):
return sorted(self._locks(self._merge(yarn_elements)), key=lambda x:repr(x.names))
| true |
998dfd83c3b59cc3fc3956cbc4630832911f0a5c | Python | klgentle/lc_python | /base_practice/create_foler_and_file_from_list.py | UTF-8 | 1,350 | 3.15625 | 3 | [] | no_license | import os
from pprint import pprint
PATH = "/mnt/d/code_test/lc_python/leet_code/top_interview_questions"
# PATH = "D\\code_test\\lc_python\\leet_code\\top_interview_questions"
def open_file(file_name: str) -> list:
file_name_list = []
with open(file_name, "r") as file:
for line in file.readlines():
id, problem_name, difficulty = line.strip().split("\t")
id = id.zfill(4)
difficulty = difficulty + "_Case"
problem_name = problem_name.strip()
file_name_list.append([id, problem_name, difficulty])
return file_name_list
def make_path_of_all():
for folder_name in ("Easy_Case", "Medium_Case", "Hard_Case"):
new_path = os.path.join(PATH, folder_name)
if not os.path.exists(new_path):
os.makedirs(new_path)
def create_folder_and_file_from_list(file_name_list):
for file_and_folder in file_name_list:
file_name = file_and_folder[0] + "_" + file_and_folder[1] + ".py"
folder_name = file_and_folder[2]
new_file = os.path.join(PATH, folder_name, file_name)
f = open(new_file, "w")
f.close()
if __name__ == "__main__":
make_path_of_all()
file_name = os.path.join(PATH, "list.txt")
file_list = open_file(file_name)
create_folder_and_file_from_list(file_list)
print("Done!")
| true |
1979ff42b924950aaddb181e0ec5e35a2dc0438d | Python | augustonz/Color-Theories | /src/ColorTheories/constants.py | UTF-8 | 6,097 | 2.5625 | 3 | [
"MIT"
] | permissive | import pygame
from ColorTheories.classes.Level import Level
from ColorTheories.classes.Entity import Entity
from ColorTheories.tools import load_font, load_img
pygame.font.init()
roboto = load_font('Roboto-Thin.ttf',25)
roboto.bold = True
_E_MACA = Entity(1, 'Maçã', lambda surface, x, y: surface.blit(load_img('maca.png'),(x,y)))
_E_MACA2 = Entity(1, 'Maçã', lambda surface, x, y: surface.blit(load_img('maca2.png'),(x,y)))
_E_LARANJA = Entity(1, 'Laranja', lambda surface, x, y: surface.blit(load_img('laranja.png'),(x,y)))
_E_CEREJA = Entity(1, 'Cereja', lambda surface, x, y: surface.blit(load_img('cereja.png'),(x+2,y)))
_E_BANANA = Entity(1, 'Banana', lambda surface, x, y: surface.blit(load_img('banana.png'),(x,y)))
_E_UVA = Entity(1, 'Uva', lambda surface, x, y: surface.blit(load_img('uva.png'),(x,y)))
_E_MORANGO = Entity(1, 'Morango', lambda surface, x, y: surface.blit(load_img('morango.png'),(x,y)))
_E_MELANCIA = Entity(1, 'Melancia', lambda surface, x, y: surface.blit(load_img('melancia.png'),(x,y)))
_E_PERA = Entity(1, 'Pera', lambda surface, x, y: surface.blit(load_img('pera.png'),(x,y)))
_E_LIMAO = Entity(1, 'Limão', lambda surface, x, y: surface.blit(load_img('limao.png'),(x,y)))
_E_LETRA_E = Entity(1, 'letra e', lambda surface, x, y: surface.blit(roboto.render(' e ', True, 'white', 'black'), (x+15,y+18)))
_E__LETRA_A = Entity(1, 'letra a', lambda surface, x, y: surface.blit(roboto.render(' a ', True, 'white', 'black'), (x+15,y+18)))
_E_LETRA_B = Entity(1, 'letra b', lambda surface, x, y: surface.blit(roboto.render(' b ', True, 'white', 'black'), (x+15,y+18)))
entities = [prop for prop in dir() if '_E_' in prop]
level_objects = [
Level('1', [_E_LETRA_E, _E__LETRA_A], [_E_LETRA_E, _E__LETRA_A],
[[_E_LETRA_E, _E__LETRA_A],
[_E__LETRA_A, _E_LETRA_E]], 1, [(0,0)]),
Level('2', [_E_LETRA_E, _E__LETRA_A, _E_LETRA_B], [_E_LETRA_E, _E__LETRA_A, _E_LETRA_B],
[[_E_LETRA_E, _E__LETRA_A, _E_LETRA_B],
[_E__LETRA_A, _E_LETRA_B, _E_LETRA_E],
[_E_LETRA_B, _E_LETRA_E, _E__LETRA_A]], 1, [(0,0)]),
Level('3', [_E_BANANA, _E_CEREJA], [_E_BANANA, _E_CEREJA],
[[_E_BANANA, _E_CEREJA],
[_E_CEREJA, _E_BANANA]], 1, [(0,0)]),
Level('4', [_E_MACA, _E_LARANJA,_E_LIMAO], [_E_MACA, _E_LARANJA,_E_LIMAO],
[[_E_MACA, _E_LARANJA,_E_LIMAO],
[_E_LARANJA, _E_LIMAO,_E_MACA],
[_E_LIMAO, _E_MACA,_E_LARANJA]], 1, [(0,1),(0,2)]),
Level('5', [_E_UVA, _E_LARANJA,_E_BANANA], [_E_BANANA, _E_UVA,_E_LARANJA],
[[_E_BANANA, _E_UVA,_E_LARANJA],
[_E_UVA, _E_LARANJA,_E_BANANA],
[_E_LARANJA, _E_BANANA,_E_UVA]], 1, [(1,1)]),
Level('6', [_E_MACA2, _E_PERA,_E_MACA], [_E_MACA, _E_PERA,_E_MACA2],
[[_E_MACA, _E_MACA2,_E_PERA],
[_E_PERA, _E_MACA,_E_MACA2],
[_E_MACA2, _E_PERA,_E_MACA]], 1, [(1,2)]),
Level('7', [_E_CEREJA, _E_MELANCIA,_E_MORANGO], [_E_MELANCIA, _E_CEREJA,_E_MORANGO],
[[_E_MORANGO, _E_CEREJA,_E_MELANCIA],
[_E_MELANCIA, _E_MORANGO,_E_CEREJA],
[_E_CEREJA, _E_MELANCIA,_E_MORANGO]], 1, [(1,0)]),
Level('8', [_E_LIMAO, _E_LARANJA,_E_UVA,_E_PERA], [_E_LARANJA, _E_UVA,_E_PERA,_E_LIMAO],
[[_E_PERA, _E_UVA,_E_LARANJA,_E_LIMAO],
[_E_LIMAO, _E_LARANJA,_E_UVA,_E_PERA],
[_E_LARANJA, _E_LIMAO,_E_PERA,_E_UVA],
[_E_UVA, _E_PERA,_E_LIMAO,_E_LARANJA]], 1, [(0,0),(2,0),(1,1),(1,2),(2,3)]),
Level('9', [_E_LIMAO, _E_MACA2,_E_MELANCIA,_E_CEREJA], [_E_MACA2, _E_MELANCIA,_E_CEREJA,_E_LIMAO],
[[_E_MACA2, _E_CEREJA,_E_LIMAO,_E_MELANCIA],
[_E_MELANCIA, _E_LIMAO,_E_CEREJA,_E_MACA2],
[_E_CEREJA, _E_MELANCIA,_E_MACA2,_E_LIMAO],
[_E_LIMAO, _E_MACA2,_E_MELANCIA,_E_CEREJA]], 1, [(0,0),(1,2),(2,3)]),
Level('10', [None, _E_BANANA,None], [_E_MORANGO, _E_MACA2,_E_BANANA],
[[_E_MORANGO, _E_BANANA,_E_MACA2],
[_E_MACA2, _E_MORANGO,_E_BANANA],
[_E_BANANA, _E_MACA2,_E_MORANGO]], 1, [(0,1),(0,2)]),
Level('11', [_E_CEREJA, None,_E_PERA], [None, _E_LARANJA,None],
[[_E_CEREJA, _E_LARANJA,_E_PERA],
[_E_LARANJA, _E_PERA,_E_CEREJA],
[_E_PERA, _E_CEREJA,_E_LARANJA]], 1, [(2,0),(0,2)]),
Level('12', [_E_LIMAO, None,None,_E_CEREJA], [_E_MACA2, _E_MELANCIA,_E_CEREJA,_E_LIMAO],
[[_E_MACA2, _E_CEREJA,_E_LIMAO,_E_MELANCIA],
[_E_MELANCIA, _E_LIMAO,_E_CEREJA,_E_MACA2],
[_E_CEREJA, _E_MELANCIA,_E_MACA2,_E_LIMAO],
[_E_LIMAO, _E_MACA2,_E_MELANCIA,_E_CEREJA]], 1, [(3,1),(2,0),(1,1),(2,3)]),
Level('13', [_E_MACA, _E_LARANJA,_E_LIMAO], [_E_MACA, _E_LARANJA,_E_LIMAO],
[[_E_MACA, _E_LARANJA,_E_LIMAO],
[_E_LARANJA, _E_LIMAO,_E_MACA],
[_E_LIMAO, _E_MACA,_E_LARANJA]], 1, [(0,1),(0,2)]),
Level('14', [_E_MACA, _E_LARANJA,_E_LIMAO], [_E_MACA, _E_LARANJA,_E_LIMAO],
[[_E_MACA, _E_LARANJA,_E_LIMAO],
[_E_LARANJA, _E_LIMAO,_E_MACA],
[_E_LIMAO, _E_MACA,_E_LARANJA]], 1, [(0,1),(0,2)]),
Level('15', [_E_MACA, _E_LARANJA,_E_LIMAO], [_E_MACA, _E_LARANJA,_E_LIMAO],
[[_E_MACA, _E_LARANJA,_E_LIMAO],
[_E_LARANJA, _E_LIMAO,_E_MACA],
[_E_LIMAO, _E_MACA,_E_LARANJA]], 1, [(0,1),(0,2)]),
Level('16', [_E_MACA, _E_LARANJA,_E_LIMAO], [_E_MACA, _E_LARANJA,_E_LIMAO],
[[_E_MACA, _E_LARANJA,_E_LIMAO],
[_E_LARANJA, _E_LIMAO,_E_MACA],
[_E_LIMAO, _E_MACA,_E_LARANJA]], 1, [(0,1),(0,2)]),
Level('17', [_E_MACA, _E_LARANJA,_E_LIMAO], [_E_MACA, _E_LARANJA,_E_LIMAO],
[[_E_MACA, _E_LARANJA,_E_LIMAO],
[_E_LARANJA, _E_LIMAO,_E_MACA],
[_E_LIMAO, _E_MACA,_E_LARANJA]], 1, [(0,1),(0,2)]),
Level('18', [_E_MACA, _E_LARANJA,_E_LIMAO], [_E_MACA, _E_LARANJA,_E_LIMAO],
[[_E_MACA, _E_LARANJA,_E_LIMAO],
[_E_LARANJA, _E_LIMAO,_E_MACA],
[_E_LIMAO, _E_MACA,_E_LARANJA]], 1, [(0,1),(0,2)]),
Level('19', [_E_MACA, _E_LARANJA,_E_LIMAO], [_E_MACA, _E_LARANJA,_E_LIMAO],
[[_E_MACA, _E_LARANJA,_E_LIMAO],
[_E_LARANJA, _E_LIMAO,_E_MACA],
[_E_LIMAO, _E_MACA,_E_LARANJA]], 1, [(0,1),(0,2)]),
Level('20', [_E_MACA, _E_LARANJA,_E_LIMAO], [_E_MACA, _E_LARANJA,_E_LIMAO],
[[_E_MACA, _E_LARANJA,_E_LIMAO],
[_E_LARANJA, _E_LIMAO,_E_MACA],
[_E_LIMAO, _E_MACA,_E_LARANJA]], 1, [(0,1),(0,2)]),
]
levels = [dict(level) for level in level_objects]
| true |
f3ee6bc20c29069f57ef03329c53d513b7d7b4b1 | Python | Tuanlase02874/Machine-Learning-Kaggle | /Africa Soil Property Prediction Challenge/africa-baseline.py | UTF-8 | 3,328 | 2.65625 | 3 | [
"MIT"
] | permissive | import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import BaggingRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.neural_network import MLPRegressor
from sklearn.linear_model import ElasticNet
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.multioutput import MultiOutputRegressor
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import Imputer
from sklearn import metrics
import numpy as np
def get_multioutput_regressor(base_estimator):
multi = MultiOutputRegressor(base_estimator)
return multi
def get_gaussian_process_regressor():
gp = GaussianProcessRegressor()
return [gp],['Gaussian Process']
def get_mlp_regressor(num_hidden_units=51):
mlp = MLPRegressor(hidden_layer_sizes=num_hidden_units)
return [mlp],['Multi-Layer Perceptron']
def get_ensemble_models():
rf = RandomForestRegressor(n_estimators=51,min_samples_leaf=5,min_samples_split=3,random_state=42)
bag = BaggingRegressor(n_estimators=51,random_state=42)
extra = ExtraTreesRegressor(n_estimators=71,random_state=42)
ada = AdaBoostRegressor(random_state=42)
grad = GradientBoostingRegressor(n_estimators=101,random_state=42)
classifier_list = [rf,bag,extra,ada,grad]
classifier_name_list = ['Random Forests','Bagging','Extra Trees','AdaBoost','Gradient Boost']
return classifier_list, classifier_name_list
def get_linear_model():
elastic_net = ElasticNet()
return [elastic_net],['Elastic Net']
def print_evaluation_metrics(trained_model,trained_model_name,X_test,y_test):
print '--------- For Model : ', trained_model_name ,' ---------\n'
print "R2 Score : ", trained_model.score(X_test,y_test)
print "---------------------------------------\n"
def label_encode_frame(dataframe):
columns = dataframe.columns
encoder = LabelEncoder()
for column in columns:
if type(dataframe[column][0]) is np.nan:
for i in range(len(dataframe)):
if i > 1000:
break
if type(dataframe[column][i]) is str:
dataframe[column] = encoder.fit_transform(dataframe[column].values)
break
elif type(dataframe[column][0]) is str:
dataframe[column] = encoder.fit_transform(dataframe[column].values)
return dataframe
filename = 'training.csv'
train_frame = pd.read_csv(filename)
output_columns = [ 'Ca','P','pH','SOC','Sand']
output_labels = train_frame[output_columns].values
train_frame.drop(output_columns,axis=1,inplace=True)
train_frame = label_encode_frame(train_frame)
del train_frame['PIDN']
X_train,X_test,y_train,y_test = train_test_split(train_frame.values,output_labels,test_size=0.1,random_state=42)
classifier_list, classifier_name_list = get_ensemble_models()
for classifier,classifier_name in zip(classifier_list,classifier_name_list):
multi_regressor = get_multioutput_regressor(classifier)
multi_regressor.fit(X_train,y_train)
print_evaluation_metrics(multi_regressor,classifier_name,X_test,y_test)
| true |
ca07fa3f723b1491301eac703e78641ee543fc0a | Python | mfkiwl/hpcgra | /generator_asm/src/route/module/preprocessing.py | UTF-8 | 406 | 2.703125 | 3 | [
"MIT"
] | permissive | import networkx as nx
def preprocessing(g):
new_g = g.copy()
dic_opcode = nx.get_node_attributes(g, 'opcode')
for n in g.nodes():
#print(dic_opcode[n])
if dic_opcode[n] == "const":
#print(n, dic_opcode[n])
new_g.remove_node(n)
for e in g.edges():
if e[0] == e[1]:
new_g.remove_edge(e[0], e[1])
return new_g | true |
21428b6b65c9c410c8564d38f27b837f527ab73a | Python | rupali23-singh/task_question | /mathamatic.py | UTF-8 | 114 | 3.53125 | 4 | [] | no_license | a=int(input("enter the number"))
b=int(input("enter the number"))
if a!=b:
print(a+b)
else:
print("right") | true |
8512ae09066217904867a2e5b9847263354d943b | Python | yanchidezhang/leetcode | /huawei/huawei.py | UTF-8 | 276 | 3.609375 | 4 | [] | no_license | '''
1. 字符串去重
输入:12ere2
输出:12er
'''
def remove_dup(mystr):
mydict = dict()
for i in range(len(mystr)):
if mystr[i] not in mydict:
mydict[mystr[i]] = i
return ''.join(list(mydict.keys()))
print(remove_dup('12ere2'))
| true |
5a51ce0c709fb69e5b8946098f8065f93b947919 | Python | EunbyeolCho/study_VTK | /render/rendering.py | UTF-8 | 4,876 | 2.75 | 3 | [] | no_license | """
https://vtk.org/doc/nightly/html/classvtkSelectPolyData.html
"""
import vtk
import random
renderer = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(renderer)
renWin.SetSize(1500, 1500)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
iren.SetInteractorStyle(vtk.vtkInteractorStyleTrackballCamera())
#Make Control Point
landmark = vtk.vtkPoints()
landmark.InsertNextPoint([random.randrange(0, 10)/10, random.randrange(0, 10)/10, 0.0])
landmark.InsertNextPoint([random.randrange(0, 10)/10, random.randrange(0, 10)/10, 0.0])
landmark.InsertNextPoint([random.randrange(0, 10)/10, random.randrange(0, 10)/10, 0.0])
# landmark.InsertNextPoint([random.randrange(0, 10)/10, random.randrange(0, 10)/10, 0.0])
# landmark.InsertNextPoint([random.randrange(0, 10)/10, random.randrange(0, 10)/10, 0.0])
# landmark.InsertNextPoint([random.randrange(0, 10)/10, random.randrange(0, 10)/10, 0.0])
landmarkActor = vtk.vtkActor()
pickedPoint = -1
target = vtk.vtkPolyData()
targetActor = vtk.vtkActor()
scalarBar = vtk.vtkScalarBarActor()
def LeftButtonPressed(obj, ev):
pos = obj.GetEventPosition()
picker = vtk.vtkPointPicker()
picker.PickFromListOn()
picker.AddPickList(landmarkActor)
picker.Pick(pos[0], pos[1], 0, renderer)
global pickedPoint
pickedPoint = picker.GetPointId()
def MouseMove(obj, ev):
if pickedPoint == -1 : return
pos = obj.GetEventPosition()
picker = vtk.vtkCellPicker()
picker.Pick(pos[0], pos[1], 0, renderer)
landmark.SetPoint(pickedPoint, picker.GetPickPosition())
landmark.Modified()
def LeftButtonRelease(obj, ev):
global pickedPoint
pickedPoint = -1
#Select Polydata
loop = vtk.vtkSelectPolyData()
loop.SetInputData(target)
loop.SetLoop(landmark)
loop.GenerateSelectionScalarsOn()
# loop.SetSelectionModeToLargestRegion()
loop.SetSelectionModeToSmallestRegion()
loop.Update()
outputScalar = loop.GetOutput().GetPointData().GetScalars()
#Add
if outputScalar == None:
for i in range(landmark.GetNumberOfPoints()):
print(landmark.GetPoint(i), end=" ")
print()
# return
target.GetPointData().SetScalars(outputScalar)
target.GetPointData().Modified()
targetActor.GetMapper().SetScalarRange(outputScalar.GetRange())
##### error!
# AttributeError: 'NoneType' object has no attribute 'GetRange'
if __name__ == "__main__":
planeSource = vtk.vtkPlaneSource()
planeSource.SetResolution(10, 10)
planeSource.Update()
reader = vtk.vtkXMLPolyDataReader()
reader.SetFileName("./sample1.vtp")
reader.Update()
polyData = planeSource.GetOutput()
polyData.GetPointData().RemoveArray("Normals")
triangleFilter = vtk.vtkTriangleFilter()
triangleFilter.SetInputData(polyData)
triangleFilter.Update()
target = reader.GetOutput()
# target = triangleFilter.GetOutput()
numPoints = target.GetNumberOfPoints()
array = vtk.vtkFloatArray()
array.SetNumberOfTuples(numPoints)
for idx in range(numPoints):
array.SetTuple(idx, [0])
target.GetPointData().SetScalars(array)
mapper = vtk.vtkPolyDataMapper()
mapper.SetScalarRange(target.GetPointData().GetScalars().GetRange())
mapper.SetInputData(target)
targetActor = vtk.vtkActor()
targetActor.SetMapper(mapper)
renderer.AddActor(targetActor)
#Show Landmark
numLandmarks = landmark.GetNumberOfPoints()
#Add Closed Lines
lines = vtk.vtkCellArray()
lines.InsertNextCell(numLandmarks+1)
for idx in range(numLandmarks):
lines.InsertCellPoint(idx)
lines.InsertCellPoint(0)
#Make Landmark Polydata
landmarkPoly = vtk.vtkPolyData()
landmarkPoly.SetPoints(landmark)
landmarkPoly.SetLines(lines)
landmarkMapper = vtk.vtkOpenGLSphereMapper()
landmarkMapper.SetInputData(landmarkPoly)
landmarkMapper.SetRadius(.1)
# landmarkActor = vtk.vtkActor()
landmarkActor.SetMapper(landmarkMapper)
lineMapper = vtk.vtkPolyDataMapper()
lineMapper.SetInputData(landmarkPoly)
lineActor = vtk.vtkActor()
lineActor.SetMapper(lineMapper)
renderer.AddActor(lineActor)
renderer.AddActor(landmarkActor)
#Add ScalarBar Actor
scalarBar = vtk.vtkScalarBarActor()
scalarBar.SetLookupTable(targetActor.GetMapper().GetLookupTable())
renderer.AddActor2D(scalarBar)
renderer.SetBackground(0.0, 0.0, 0.0)
renderer.SetBackground2(0.5, 0.5, 0.5)
renderer.SetGradientBackground(True)
renWin.Render()
#Add Interactor
iren.AddObserver("LeftButtonPressEvent", LeftButtonPressed)
iren.AddObserver("InteractionEvent", MouseMove)
iren.AddObserver("EndInteractionEvent", LeftButtonRelease)
iren.Initialize()
iren.Start()
| true |
613ddd10573332898a4449c7b661a7833418d091 | Python | weeksghost/python-qanda | /treehouse/timestamp.py | UTF-8 | 552 | 3.09375 | 3 | [] | no_license | import datetime
TS = [1427769570.266099, 1427769191.268007, 1427774014.309288]
def timestamp_oldest():
pts = []
for tss in TS:
frts = datetime.datetime.fromtimestamp(tss)
pts.append(frts)
pts.sort()
oldest = min(pts)
print(oldest)
# OR
print(datetime.datetime.fromtimestamp(sorted(TS)[0]))
timestamp_oldest()
# Treehouse Answer
"""
def timestamp_oldest(*args):
pos = []
for arg in args:
frts = datetime.datetime.fromtimestamp(arg)
pos.append(frts)
pos.sort()
oldest = min(pos)
return oldest
"""
| true |
a75d1eab3fc917649e1478b9a7cf23106a1be436 | Python | Skill-Zozo/Project-Euler | /palindromeproducts.py | UTF-8 | 963 | 3.84375 | 4 | [] | no_license | import math
def is_palindrome(num):
number_as_string = '%d' % num
return number_as_string == number_as_string[::-1]
def three_digit_factors(N):
# if x is a factor of y, then either x or the co factor of x is <= sqrt(y)
candidates = []
if N%2==0:
candidates = range(2, int(math.sqrt(N)) +1)
else:
candidates = range(3, int(math.sqrt(N)) +1, 2)
for candidate in candidates:
if(N%candidate==0 and (len('%d' % candidate)==3 and len('%d' % (N/candidate))==3)):
return [candidate, int(N/candidate)]
return []
largest_product = 999*999
smallest_product = 100*100
candidates = list(range(smallest_product, largest_product+1))
palindromic_candidates = list(filter(is_palindrome, candidates))[::-1]
for palindrome in palindromic_candidates:
factors = three_digit_factors(palindrome)
if(len(factors) > 0):
print("largest palindrome is: %d which is a product of %d and %d" % (palindrome, factors[0], factors[1]))
break;
print("Ain't shxt") | true |
b04abafc66ebe23e04af08d91102af2b588b5bfe | Python | miholeus/hadoop-mapreduce | /hadoop-streaming/case-union/reducer.py | UTF-8 | 188 | 2.78125 | 3 | [] | no_license | #!/usr/local/bin/python
import sys
lastKey = None
for line in sys.stdin:
words = line.strip().split("\t")
if words[0] != lastKey:
print(words[0])
lastKey = words[0]
| true |
b532cedd465e09d8bcf198eeb995d5948b1c0888 | Python | rafaelponte89/secure_python_dio | /class_03_02_ping.py | UTF-8 | 341 | 2.84375 | 3 | [] | no_license | import os
from colors import Colors
import time
color = Colors()
with open('hosts.txt') as file:
dump = file.read()
dump = dump.splitlines()
file.close()
for ip in dump:
print(color.colorIO('-'*50 ,'yellow'))
os.system(f'ping -c 2 {ip}')
print(color.colorIO('-' * 50, 'yellow'))
time.sleep(5) | true |
85363728f5b92f7aff2e22814cd16ed434ea0df2 | Python | bitcocom/python | /PART1/실습1.py | UTF-8 | 514 | 4.28125 | 4 | [] | no_license | # 파이썬 자료형과 출력문의 이해, 서식지정자의 이해
a = 10 # 정수(int)
print("%d" %a)
print("{0}".format(a))
b = 15.6 # 실수(float)
print("%f" %b)
c = 'A' # 문자(str)
print("%c" %c)
d = "Hello Python" # 문자열(str)
print("%s" %d)
print("{0}".format(d))
# 실습 1 : 한줄에 자신의 정보를 변수에 저장하고 출력하시오.
# 이름(name=문자열), 나이(age=정수), 전화번호(tel=문자열), 몸무게(weight=실수), 혈액형(abo=문자)를 출력하시오.
| true |
1fac4c4ecff8b35c4290f3080edd14464cf6140a | Python | kaloyansabchev/Programming-Basics-with-Python | /05 While Loop Lab/09. Moving.py | UTF-8 | 381 | 3.375 | 3 | [] | no_license | width = int(input())
lenght = int(input())
hight = int(input())
volume = width * lenght * hight
command = input()
while command != 'Done':
boxes = int(command)
volume -= boxes
if volume <= 0:
print(f'No more free space! You need {abs(volume)} Cubic meters more.')
break
command = input()
if volume > 0:
print(f"{volume} Cubic meters left.") | true |
86e017327206af60c11fe149ee90ea7b198588fb | Python | seanwader/sean | /Lazy Man SMA.py | UTF-8 | 8,799 | 3.0625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Jun 19 16:07:31 2020
@author: seanw
"""
# Buy when price < 150ma - green
# Hedge when price < 250ma - red
# Carry when price > 150ma - blue
#
#
# Can switch ticker(lines 26, 41, 42, 49) and dates (lines 28 and 29)
#
#
#
#
from pandas_datareader import data
import matplotlib.pyplot as plt
import pandas as pd
tickers = ['SPY'] ### ONE ###
start_date = '2015 - 01 - 01'
end_date = '2020 - 06 - 19'
panel_data = data.DataReader(tickers, 'yahoo', start_date,end_date)
close = panel_data['Close']
opens = panel_data['Open']
all_weekdays = pd.date_range(start=start_date, end=end_date, freq='B')
close = close.reindex(all_weekdays)
close = close.fillna(method='ffill')
opens = opens.reindex(all_weekdays)
opens = opens.fillna(method='ffill')
spyw = close.loc[all_weekdays, 'SPY']
spywo = opens.loc[all_weekdays, 'SPY'] ### TWO ###
mid_rolling_spyw = spyw.rolling(window=300).mean() ### THREE ###
fig, ax = plt.subplots(figsize=(16,9))
ax.plot(spyw.index, spyw, label='SPY', color = 'black') ### FOUR ###
ax.plot(spyw.index, spywo, color = 'black')
ax.plot(mid_rolling_spyw.index, mid_rolling_spyw, label='300 days rolling') ### FIVE ###
ax.set_xlabel('Date')
ax.set_ylabel('Adjusted closing price ($)')
ax.legend()
ax.grid(True)
# MAs
buy = []
buyindex = []
trashbuy = []
hedge = []
hedgeindex = []
trashhedge = []
carry = []
carryindex = []
trashcarry = []
for i in range(250,spyw.size):
spywtwofifty = ((spyw[i-249]+spyw[i-248]+spyw[i-247]+spyw[i-246]+spyw[i-245]+\
spyw[i-244]+spyw[i-243]+spyw[i-242]+spyw[i-241]+spyw[i-240]+\
spyw[i-239]+spyw[i-238]+spyw[i-237]+spyw[i-236]+spyw[i-235]+\
spyw[i-234]+spyw[i-233]+spyw[i-232]+spyw[i-231]+spyw[i-230]+\
spyw[i-229]+spyw[i-228]+spyw[i-227]+spyw[i-226]+spyw[i-225]+\
spyw[i-224]+spyw[i-223]+spyw[i-222]+spyw[i-221]+spyw[i-220]+\
spyw[i-219]+spyw[i-218]+spyw[i-217]+spyw[i-216]+spyw[i-215]+\
spyw[i-214]+spyw[i-213]+spyw[i-212]+spyw[i-211]+spyw[i-210]+\
spyw[i-209]+spyw[i-208]+spyw[i-207]+spyw[i-206]+spyw[i-205]+\
spyw[i-204]+spyw[i-203]+spyw[i-202]+spyw[i-201]+spyw[i-200]+\
spyw[i-199]+spyw[i-198]+spyw[i-197]+spyw[i-196]+spyw[i-195]+\
spyw[i-194]+spyw[i-193]+spyw[i-192]+spyw[i-191]+spyw[i-190]+\
spyw[i-189]+spyw[i-188]+spyw[i-187]+spyw[i-186]+spyw[i-185]+\
spyw[i-184]+spyw[i-183]+spyw[i-182]+spyw[i-181]+spyw[i-180]+\
spyw[i-179]+spyw[i-178]+spyw[i-177]+spyw[i-176]+spyw[i-175]+\
spyw[i-174]+spyw[i-173]+spyw[i-172]+spyw[i-171]+spyw[i-170]+\
spyw[i-169]+spyw[i-168]+spyw[i-167]+spyw[i-166]+spyw[i-165]+\
spyw[i-164]+spyw[i-163]+spyw[i-162]+spyw[i-161]+spyw[i-160]+\
spyw[i-159]+spyw[i-158]+spyw[i-157]+spyw[i-156]+spyw[i-155]+\
spyw[i-154]+spyw[i-153]+spyw[i-152]+spyw[i-151]+spyw[i-150]+\
spyw[i-149]+spyw[i-148]+spyw[i-147]+spyw[i-146]+spyw[i-145]+\
spyw[i-144]+spyw[i-143]+spyw[i-142]+spyw[i-141]+spyw[i-140]+\
spyw[i-139]+spyw[i-138]+spyw[i-137]+spyw[i-136]+spyw[i-135]+\
spyw[i-134]+spyw[i-133]+spyw[i-132]+spyw[i-131]+spyw[i-130]+\
spyw[i-129]+spyw[i-128]+spyw[i-127]+spyw[i-126]+spyw[i-125]+\
spyw[i-124]+spyw[i-123]+spyw[i-122]+spyw[i-121]+spyw[i-120]+\
spyw[i-119]+spyw[i-118]+spyw[i-117]+spyw[i-116]+spyw[i-115]+\
spyw[i-114]+spyw[i-113]+spyw[i-112]+spyw[i-111]+spyw[i-110]+\
spyw[i-109]+spyw[i-108]+spyw[i-107]+spyw[i-106]+spyw[i-105]+\
spyw[i-104]+spyw[i-103]+spyw[i-102]+spyw[i-101]+spyw[i-100]+\
spyw[i-99]+spyw[i-98]+spyw[i-97]+spyw[i-96]+spyw[i-95]+\
spyw[i-94]+spyw[i-93]+spyw[i-92]+spyw[i-91]+spyw[i-90]+\
spyw[i-89]+spyw[i-88]+spyw[i-87]+spyw[i-86]+spyw[i-85]+\
spyw[i-84]+spyw[i-83]+spyw[i-82]+spyw[i-81]+spyw[i-80]+\
spyw[i-79]+spyw[i-78]+spyw[i-77]+spyw[i-76]+spyw[i-75]+\
spyw[i-74]+spyw[i-73]+spyw[i-72]+spyw[i-71]+spyw[i-70]+\
spyw[i-69]+spyw[i-68]+spyw[i-67]+spyw[i-66]+spyw[i-65]+\
spyw[i-64]+spyw[i-63]+spyw[i-62]+spyw[i-61]+spyw[i-60]+\
spyw[i-59]+spyw[i-58]+spyw[i-57]+spyw[i-56]+spyw[i-55]+\
spyw[i-54]+spyw[i-53]+spyw[i-52]+spyw[i-51]+spyw[i-50]+\
spyw[i-49]+spyw[i-48]+spyw[i-47]+spyw[i-46]+spyw[i-45]+\
spyw[i-44]+spyw[i-43]+spyw[i-42]+spyw[i-41]+spyw[i-40]+\
spyw[i-39]+spyw[i-38]+spyw[i-37]+spyw[i-36]+spyw[i-35]+\
spyw[i-34]+spyw[i-33]+spyw[i-32]+spyw[i-31]+spyw[i-30]+\
spyw[i-29]+spyw[i-28]+spyw[i-27]+spyw[i-26]+spyw[i-25]+\
spyw[i-24]+spyw[i-23]+spyw[i-22]+spyw[i-21]+spyw[i-20]+\
spyw[i-19]+spyw[i-18]+spyw[i-17]+spyw[i-16]+spyw[i-15]+\
spyw[i-14]+spyw[i-13]+spyw[i-12]+spyw[i-11]+spyw[i-10]+\
spyw[i-9]+spyw[i-8]+spyw[i-7]+spyw[i-6]+spyw[i-5]+\
spyw[i-4]+spyw[i-3]+spyw[i-2]+spyw[i-1]+spyw[i])/250)
spywonefifty = ((spyw[i-149]+spyw[i-148]+spyw[i-147]+spyw[i-146]+spyw[i-145]+\
spyw[i-144]+spyw[i-143]+spyw[i-142]+spyw[i-141]+spyw[i-140]+\
spyw[i-139]+spyw[i-138]+spyw[i-137]+spyw[i-136]+spyw[i-135]+\
spyw[i-134]+spyw[i-133]+spyw[i-132]+spyw[i-131]+spyw[i-130]+\
spyw[i-129]+spyw[i-128]+spyw[i-127]+spyw[i-126]+spyw[i-125]+\
spyw[i-124]+spyw[i-123]+spyw[i-122]+spyw[i-121]+spyw[i-120]+\
spyw[i-119]+spyw[i-118]+spyw[i-117]+spyw[i-116]+spyw[i-115]+\
spyw[i-114]+spyw[i-113]+spyw[i-112]+spyw[i-111]+spyw[i-110]+\
spyw[i-109]+spyw[i-108]+spyw[i-107]+spyw[i-106]+spyw[i-105]+\
spyw[i-104]+spyw[i-103]+spyw[i-102]+spyw[i-101]+spyw[i-100]+\
spyw[i-99]+spyw[i-98]+spyw[i-97]+spyw[i-96]+spyw[i-95]+\
spyw[i-94]+spyw[i-93]+spyw[i-92]+spyw[i-91]+spyw[i-90]+\
spyw[i-89]+spyw[i-88]+spyw[i-87]+spyw[i-86]+spyw[i-85]+\
spyw[i-84]+spyw[i-83]+spyw[i-82]+spyw[i-81]+spyw[i-80]+\
spyw[i-79]+spyw[i-78]+spyw[i-77]+spyw[i-76]+spyw[i-75]+\
spyw[i-74]+spyw[i-73]+spyw[i-72]+spyw[i-71]+spyw[i-70]+\
spyw[i-69]+spyw[i-68]+spyw[i-67]+spyw[i-66]+spyw[i-65]+\
spyw[i-64]+spyw[i-63]+spyw[i-62]+spyw[i-61]+spyw[i-60]+\
spyw[i-59]+spyw[i-58]+spyw[i-57]+spyw[i-56]+spyw[i-55]+\
spyw[i-54]+spyw[i-53]+spyw[i-52]+spyw[i-51]+spyw[i-50]+\
spyw[i-49]+spyw[i-48]+spyw[i-47]+spyw[i-46]+spyw[i-45]+\
spyw[i-44]+spyw[i-43]+spyw[i-42]+spyw[i-41]+spyw[i-40]+\
spyw[i-39]+spyw[i-38]+spyw[i-37]+spyw[i-36]+spyw[i-35]+\
spyw[i-34]+spyw[i-33]+spyw[i-32]+spyw[i-31]+spyw[i-30]+\
spyw[i-29]+spyw[i-28]+spyw[i-27]+spyw[i-26]+spyw[i-25]+\
spyw[i-24]+spyw[i-23]+spyw[i-22]+spyw[i-21]+spyw[i-20]+\
spyw[i-19]+spyw[i-18]+spyw[i-17]+spyw[i-16]+spyw[i-15]+\
spyw[i-14]+spyw[i-13]+spyw[i-12]+spyw[i-11]+spyw[i-10]+\
spyw[i-9]+spyw[i-8]+spyw[i-7]+spyw[i-6]+spyw[i-5]+\
spyw[i-4]+spyw[i-3]+spyw[i-2]+spyw[i-1]+spyw[i])/150)
# Buy when price < 150ma
if spyw[i] < spywonefifty:
buy.append(spyw[i])
buyindex.append(all_weekdays[i])
else:
trashbuy.append(spyw[i])
# Hedge when price < 250ma
if spyw[i] < spywtwofifty:
hedge.append(spyw[i])
hedgeindex.append(all_weekdays[i])
else:
trashhedge.append(spyw[i])
# Carry when price > 150ma
if spyw[i] > spywonefifty:
carry.append(spyw[i])
carryindex.append(all_weekdays[i])
else:
trashcarry.append(spyw[i])
# plotting colored dots
plt.scatter(buyindex,buy,color='green',s = 50,alpha=1)
plt.scatter(hedgeindex,hedge,color='red',s = 20,alpha=1)
plt.scatter(carryindex,carry,color='blue',s = 15,alpha=1)
| true |
63b90695309b5e86140b60228c33899839b546ec | Python | 1oglop1/MSexcel | /correct.py | UTF-8 | 5,099 | 3 | 3 | [] | no_license | """!!!WARNING!!! Check if there are any duplicates in sheet before running this.
Purpose of this script is to read the file('report_') produced by validate.py.
Then change the file names of files to represent the value from the spreadsheet.
How to use this script"""
import os
import sys
import argparse
from ast import literal_eval as make_tuple
try:
import xlrd
from xlutils.copy import copy as xlcopy
import xlwt
except ImportError as e:
print(e)
print('Maybe "pip install xlrd xlwt xlutils", would help.')
sys.exit(1)
def _cli():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
argument_default=argparse.SUPPRESS)
parser.add_argument('excel_file', help="Input file - Excel sheet containing the records", nargs='?')
parser.add_argument('-wdir', '--working_directory', default=os.getcwd(), help="Set working directory")
parser.add_argument('-of', '--out_file', default='output.xls', help="Output file, empty will create output.xls")
parser.add_argument('-o','--overwrite', action='store_true', default=False, help='Overwrite input excel file')
args = parser.parse_args()
arguments = vars(args)
try:
a = arguments['excel_file']
except KeyError:
parser.print_usage()
sys.exit(1)
return vars(args)
def rename_file(original_name, new_name, working_directory):
full_original_name = os.path.abspath(os.path.join(working_directory, original_name))
full_new_name = os.path.abspath(os.path.join(working_directory, new_name))
os.rename(full_original_name, full_new_name)
def parse_position(string):
"""Takes empty line wint spaces and ^(13, 45) position and return position
:return: (13, 45)"""
string = string.strip()
if '^(' in string.strip():
start = string.find('^(') + 1
stop = string.index(')') + 1
position = make_tuple(string[start:stop])
return position
return None
def values_to_correct(input_file):
"""Read report_ file and return dictionary of values to be corrected
:return: dictionary {(row, col): corrected_name}"""
to_be_corrected = {}
correct_maually = {}
pos_error_cnt = 0
with open(input_file, 'r') as inf:
# Check header # maybe later
line = inf.readline() # skip first line
for line in inf:
try:
file_in_sheet, file_in_folder, corrected_value = line.strip().split('|')
# if corrected value is a start, use file name from folder
if corrected_value == '*':
corrected_value = file_in_folder
except ValueError:
corrected_value = None
if corrected_value:
# if the filename has been corrected change the name of file
line2 = inf.readline()
pos = parse_position(line2)
if pos:
to_be_corrected[pos] = corrected_value
else:
print(pos, file_in_folder, file_in_sheet)
correct_maually[pos_error_cnt] = (file_in_sheet, file_in_folder)
# print("chaging:{0} => {1}".format(file_in_folder, corrected_value))
return correct_maually, to_be_corrected
def main(working_directory, excel_file, out_file, overwrite, *args, **kwargs):
WORKING_FOLDER = os.path.abspath(working_directory)
WB_FILE = os.path.abspath(os.path.join(WORKING_FOLDER, excel_file)) # excel file
report_file_name = os.path.basename(WB_FILE)
report_file_name = "report_{xls_file}.txt".format(xls_file=report_file_name[:report_file_name.rfind('.')])
REPORT_FILE = os.path.join(WORKING_FOLDER, report_file_name)
if overwrite:
OUTPUT_FILE = WB_FILE
else:
OUTPUT_FILE = os.path.abspath(out_file)
# open work_book to read
read_book = xlrd.open_workbook(WB_FILE, formatting_info=True)
# copy read only to write only
print('Copying excel sheet in memory.')
write_book = xlcopy(read_book)
write_sheet = write_book.get_sheet('Documents')
print("Trying to read report file:")
print(REPORT_FILE)
# get correct values from report_ file
manually_correct, correct_values = values_to_correct(REPORT_FILE)
print('Writing changes')
for (row, col), value in correct_values.items():
print('corrected:',row - 1, col, value)
write_sheet.write(row - 1, col, value)
# write_book.save(OUTPUT_FILE)
print("Saved to:")
print(OUTPUT_FILE)
# write manual corrections
if manually_correct:
print("Please fix these values manually.")
print('Sheet name | File name')
for corr_id, corr_val in manually_correct.items():
msg = '{cid}: {sh} | {fi}'.format(cid=corr_id,
sh=corr_val[0],
fi=corr_val[1])
print(msg)
if __name__ == '__main__':
main(**_cli()) | true |
135e91d83da622ca837cbe1973b1fb3a299d4028 | Python | Forrest-HuYang/Python-Exercise | /gift advanced.py | UTF-8 | 896 | 3.046875 | 3 | [] | no_license | """
ID: tony_hu1
PROG: gift1
LANG: PYTHON3
"""
a = []
money = {}
list_friends = []
for line in open('gift1.in'):
a.append(line.rstrip())
num_friends = int(a[0])
del a[0]
for i in range(num_friends):
money[a[0]] = 0
list_friends.append(a[0])
del a[0]
while len(a) > 0:
give_data = a[1].split(' ')
amount = int(give_data[0])
number = int(give_data[1])
if int(give_data[0]) != 0:
give_residue = amount % number
give_money = amount // number
money[a[0]] = money[a[0]] - amount + give_residue
del a[0]
del a[0]
for i in range(number):
money[a[0]] += give_money
del a[0]
else:
for i in range(number+2):
del a[0]
print (money)
fout = open ('gift1.out', 'w')
for i in range(num_friends):
name = list_friends[i]
fout.write(name + ' ' + str(money[name]) +'\n')
| true |
f050c905bdbca990b20d670fc066ec5c0197119a | Python | pobed2/sudocubes | /sudocube/sudocube_data_extractor.py | UTF-8 | 1,574 | 2.75 | 3 | [] | no_license | #coding: utf-8
from face_extraction.front_face_extractor import FrontFaceExtractor
from face_extraction.side_face_extractor import SideFaceExtractor
from face_extraction.top_face_extractor import TopFaceExtractor
from adjustable_parameters import front_face, side_face, top_face, red_square
class SudocubeDataExtractor(object):
def __init__(self):
self._init_dependencies()
def _init_dependencies(self):
self._front_extractor = FrontFaceExtractor()
self._side_extractor = SideFaceExtractor()
self._top_extractor = TopFaceExtractor()
def extract_data_from_image_with_corner_info(self, image, corners):
front_data, front_red_square = self._front_extractor.extract_data(image, corners[front_face])
side_data, side_red_square = self._side_extractor.extract_data(image, corners[side_face])
top_data, top_red_square = self._top_extractor.extract_data(image, corners[top_face])
red_square_position = self._get_red_square_position(front_red_square, side_red_square, top_red_square)
print top_data
print front_data
print side_data
print red_square_position
return {front_face: front_data, side_face: side_data, top_face: top_data, red_square: red_square_position}
def _get_red_square_position(self, front, side, top):
if front is not None:
position = (front_face, front)
elif side is not None:
position = (side_face, side)
elif top is not None:
position = (top_face, top)
return position | true |
de06ae1576e371e48fc3f1309805d928b141f595 | Python | raghavendra-musubi/linear_algebra | /test-mat-sub.py | UTF-8 | 258 | 3.625 | 4 | [] | no_license | from matrix import Matrix
# 4x1 col vector
list_11 = [[1],[2],[3],[4]]
list_12 = [[5],[6],[7],[8]]
print(Matrix(list_12) - Matrix(list_11))
# 2x3 matrix
list_21 = [[1,2,3],[4,5,6]]
list_22 = [[7,8,9],[10,11,12]]
print(Matrix(list_22) - Matrix(list_21)) | true |
10b7be8623204d38697bcc86ed5cb28078113539 | Python | JCGit2018/bagua | /bagua/torch_api/algorithms/base.py | UTF-8 | 6,436 | 2.65625 | 3 | [
"MIT"
] | permissive | from bagua.torch_api.data_parallel.bagua_distributed import BaguaDistributedDataParallel
from bagua.torch_api.bucket import BaguaBucket
from bagua.torch_api.tensor import BaguaTensor
from bagua.torch_api.communication import BaguaProcessGroup
from typing import List
import torch
class Algorithm:
"""
This is the base class that all Bagua algorithms inherit.
"""
def reify(self, process_group: BaguaProcessGroup):
"""
Create an algorithm instance.
Args:
process_group: The process group to work on.
"""
pass
class AlgorithmImpl:
"""
This is the base class that all Bagua algorithm implementations inherit.
It provides methods that can be override to implement different kinds of
distributed algorithms.
Args:
process_group: The process group to work on.
"""
def __init__(self, process_group: BaguaProcessGroup):
self.process_group = process_group
def need_reset(self) -> bool:
"""
Returns:
``True`` if all initialization methods of the current algorithms should be called again. \
This is useful for algorithms that have multiple stages where each stage needs different initializations.
"""
return False
def init_tensors(self, bagua_ddp: BaguaDistributedDataParallel) -> List[BaguaTensor]:
"""
Given a :class:`~bagua.torch_api.data_parallel.BaguaDistributedDataParallel`, return Bagua tensors to be used in Bagua for later
operations.
Args:
bagua_ddp: :class:`bagua.torch_api.data_parallel.BaguaDistributedDataParallel`.
Returns:
A list of Bagua tensors for communication.
"""
parameters = bagua_ddp.bagua_build_params()
tensors = []
for name, param in parameters.__reversed__():
param = param.bagua_ensure_grad().ensure_bagua_tensor(
name,
bagua_ddp.bagua_module_name,
getter_closure=lambda param: param.grad,
setter_closure=lambda param, t: setattr(param, "grad", t),
)
tensors.append(param)
self._communication_tensor_names = set(name for name, _ in parameters)
assert len(self._communication_tensor_names) == len(
tensors
), "tensor names should be unique"
return tensors
def tensors_to_buckets(
self, tensors: List[List[BaguaTensor]], do_flatten: bool
) -> List[BaguaBucket]:
"""
Given the bucketing suggestion from Bagua, return the actual Bagua buckets.
The default implementation follows the suggestion to do the bucketing.
Args:
tensors: Bagua tensors grouped in different
lists, representing Bagua's suggestion on how to bucketing the
tensors.
do_flatten: Whether to flatten the Bagua buckets.
Returns:
A list of Bagua buckets.
"""
bagua_buckets = []
for idx, bucket in enumerate(tensors):
bagua_bucket = BaguaBucket(
bucket, flatten=do_flatten, name=str(idx)
) # TODO: check duplicated names
bagua_buckets.append(bagua_bucket)
return bagua_buckets
def init_forward_pre_hook(self, bagua_ddp: BaguaDistributedDataParallel):
"""Given a :class:`~bagua.torch_api.data_parallel.BaguaDistributedDataParallel`, return a hook function that will be executed before the
forward process.
Args:
bagua_ddp: :class:`bagua.torch_api.data_parallel.BaguaDistributedDataParallel`.
Returns:
A function that takes the model's input.
"""
def hook(input):
pass
return hook
def init_backward_hook(self, bagua_ddp: BaguaDistributedDataParallel):
"""Given a :class:`~bagua.torch_api.data_parallel.BaguaDistributedDataParallel`, return a hook function that will be executed on every
parameter's gradient computation completion.
Args:
bagua_ddp: :class:`bagua.torch_api.data_parallel.BaguaDistributedDataParallel`.
Returns:
A function that takes the name of a parameter (as in ``torch.nn.Module.named_parameters``) and the parameter itself.
"""
def hook(parameter_name, parameter):
if parameter_name in self._communication_tensor_names:
assert (
parameter.bagua_backend_tensor().data_ptr()
== parameter.grad.data_ptr()
), "bagua backend tensor data_ptr should match parameter grad"
parameter.bagua_mark_communication_ready()
return hook
def init_post_backward_hook(self, bagua_ddp: BaguaDistributedDataParallel):
"""Given a :class:`~bagua.torch_api.data_parallel.BaguaDistributedDataParallel`, return a hook function that will be executed when the
backward pass is done.
Args:
bagua_ddp: :class:`bagua.torch_api.data_parallel.BaguaDistributedDataParallel`.
Returns:
A function that takes no argument.
"""
def hook():
bagua_ddp._bagua_backend.wait_pending_comm_ops()
return hook
def init_post_optimizer_step_hook(self, bagua_ddp: BaguaDistributedDataParallel):
"""Given a :class:`~bagua.torch_api.data_parallel.BaguaDistributedDataParallel`, return a hook function that will be executed when the
``optimizer.step()`` is done.
Args:
bagua_ddp: :class:`bagua.torch_api.data_parallel.BaguaDistributedDataParallel`.
Returns:
A function that gets called after an optimizer's ``step()`` method is called. The function takes the optimizer as its argument.
"""
def hook(optimizer: torch.optim.Optimizer):
pass
return hook
def init_operations(
self,
bagua_ddp: BaguaDistributedDataParallel,
bucket: BaguaBucket,
):
"""Given a :class:`~bagua.torch_api.data_parallel.BaguaDistributedDataParallel`, and a :class:`~bagua.torch_api.bucket.BaguaBucket`,
register operations to be executed on the bucket.
Args:
bagua_ddp: :class:`bagua.torch_api.data_parallel.BaguaDistributedDataParallel`.
bucket: A single bucket to register operations.
"""
| true |
8f92f40f032e4e33ff3d75dba0bad6423c57bab5 | Python | sdss/lvmagp | /python/lvmagp/images/processors/background/daophot.py | UTF-8 | 6,771 | 2.8125 | 3 | [
"BSD-3-Clause"
] | permissive | import asyncio
from typing import Tuple, TYPE_CHECKING, Any, Optional
from photutils.background import Background2D, MedianBackground
from lvmagp.images import Image
from .background import Background
class DaophotBackground(Background):
"""
Daophot class for background.
https://photutils.readthedocs.io/en/stable/background.html
"""
__module__ = "lvmagp.images.processors.background"
def __init__(self, *, box_size=(50, 50), **kwargs):
"""Initializes the wrapper.
Parameters:
box_size int or array_like (int)
The box size along each axis. If box_size is a scalar then a square box of size box_size will be used. If box_size has two elements, they must be in (ny, nx) order. For best results, the box shape should be chosen such that the data are covered by an integer number of boxes in both dimensions. When this is not the case, see the edge_method keyword for more options.
maskarray_like (bool), optional
A boolean mask, with the same shape as data, where a True value indicates the corresponding element of data is masked. Masked data are excluded from calculations. mask is intended to mask sources or bad pixels. Use coverage_mask to mask blank areas of an image. mask and coverage_mask differ only in that coverage_mask is applied to the output background and background RMS maps (see fill_value).
coverage_maskarray_like (bool), optional
A boolean mask, with the same shape as data, where a True value indicates the corresponding element of data is masked. coverage_mask should be True where there is no coverage (i.e., no data) for a given pixel (e.g., blank areas in a mosaic image). It should not be used for bad pixels (in that case use mask instead). mask and coverage_mask differ only in that coverage_mask is applied to the output background and background RMS maps (see fill_value).
fill_value float, optional
The value used to fill the output background and background RMS maps where the input coverage_mask is True.
exclude_percentile float in the range of [0, 100], optional
The percentage of masked pixels in a box, used as a threshold for determining if the box is excluded. If a box has more than exclude_percentile percent of its pixels masked then it will be excluded from the low-resolution map. Masked pixels include those from the input mask and coverage_mask, those resulting from the data padding (i.e., if edge_method='pad'), and those resulting from sigma clipping (if sigma_clip is used). Setting exclude_percentile=0 will exclude boxes that have any masked pixels. Note that completely masked boxes are always excluded. For best results, exclude_percentile should be kept as low as possible (as long as there are sufficient pixels for reasonable statistical estimates). The default is 10.0.
filter_size int or array_like (int), optional
The window size of the 2D median filter to apply to the low-resolution background map. If filter_size is a scalar then a square box of size filter_size will be used. If filter_size has two elements, they must be in (ny, nx) order. filter_size must be odd along both axes. A filter size of 1 (or (1, 1)) means no filtering.
filter_thresholdint, optional
The threshold value for used for selective median filtering of the low-resolution 2D background map. The median filter will be applied to only the background boxes with values larger than filter_threshold. Set to None to filter all boxes (default).
edge_method{‘pad’, ‘crop’}, optional
The method used to determine how to handle the case where the image size is not an integer multiple of the box_size in either dimension. Both options will resize the image for internal calculations to give an exact multiple of box_size in both dimensions.
'pad': pad the image along the top and/or right edges. This is the default and recommended method. Ideally, the box_size should be chosen such that an integer number of boxes is only slightly larger than the data size to minimize the amount of padding.
'crop': crop the image along the top and/or right edges. This method should be used sparingly. Best results will occur when box_size is chosen such that an integer number of boxes is only slightly smaller than the data size to minimize the amount of cropping.
sigma_clipastropy.stats.SigmaClip instance, optional
A SigmaClip object that defines the sigma clipping parameters. If None then no sigma clipping will be performed. The default is to perform sigma clipping with sigma=3.0 and maxiters=10.
bkg_estimatorcallable, optional
A callable object (a function or e.g., an instance of any BackgroundBase subclass) used to estimate the background in each of the boxes. The callable object must take in a 2D ndarray or MaskedArray and have an axis keyword. Internally, the background will be calculated along axis=1 and in this case the callable object must return a 1D ndarray, where np.nan values are used for masked pixels. If bkg_estimator includes sigma clipping, it will be ignored (use the sigma_clip keyword here to define sigma clipping). The default is an instance of SExtractorBackground.
bkgrms_estimatorcallable, optional
A callable object (a function or e.g., an instance of any BackgroundRMSBase subclass) used to estimate the background RMS in each of the boxes. The callable object must take in a 2D ndarray or MaskedArray and have an axis keyword. Internally, the background RMS will be calculated along axis=1 and in this case the callable object must return a 1D ndarray, where np.nan values are used for masked pixels. If bkgrms_estimator includes sigma clipping, it will be ignored (use the sigma_clip keyword here to define sigma clipping). The default is an instance of StdBackgroundRMS.
interpolatorcallable, optional
A callable object (a function or object) used to interpolate the low-resolution background or background RMS image to the full-size background or background RMS maps. The default is an instance of BkgZoomInterpolator, which uses the scipy.ndimage.zoom function.
"""
self.box_size=box_size
self.kwargs=kwargs
self.kwargs["bkg_estimator"] = MedianBackground()
self.kwargs["filter_size"] = (3, 3)
def __call__(self, image: Image, *, box_size=None, **kwargs):
"""return given image substracted.
Args:
image: Image.
Returns:
Background in float.
"""
return Background2D(image.data,
box_size if box_size else self.box_size,
**{**self.kwargs, **kwargs}).background
__all__ = ["DaophotBackground"]
| true |
c0d91dc374e292d57d7c99dc5ac836abaf94de06 | Python | chiranjeevee5/demoPyGIT | /whileValdt.py | UTF-8 | 141 | 3.515625 | 4 | [] | no_license | x = 1
while x<=5:
print("Chiran", end ="")
y = 1
while y<=2:
print(" Deeks", end ="")
y+=1
x+=1
print() | true |
c3397a7896afe00577097cec3399e414fd2661b9 | Python | progaymanalaiwah/PyColor | /PyColor.py | UTF-8 | 3,733 | 3.03125 | 3 | [] | no_license | # Library : PyColor
# Language : Python Version 2 & 3
# Version : 1.0.0
# Auther : Ayman Mahmoud Alaiwah
# Facebook : https://www.fb.com/ProgAymanAlaiwah
# GitHub : https://github.com/ProgAymanAlaiwah
#---------------------------------------------------#
# Used ctypes to access the C-API of the interpreter
import ctypes as c
# Edit Api Interpreter
_get_dict = c.pythonapi._PyObject_GetDictPtr
_get_dict.restype = c.POINTER(c.py_object)
_get_dict.argtypes = [c.py_object]
def get_dict(object):return _get_dict(object).contents.value
# Function Main
def set_code_color(self,code_color) :return "\x1b[1;{0};40m{1}\x1b[0m".format(code_color,self)
def set_code_bg(self,code_color) :return "\033[{0}m{1}\033[00m".format(code_color,self)
# Function Edit Fonts Text
def text_bold(self) :return "\033[1m{0}\033[21m".format(self)
def text_underline(self) :return "\033[4m{0}\033[24m".format(self)
get_dict(str)['text_bold'] = text_bold
get_dict(str)['text_underline'] = text_underline
# Function Edit Text Colors
def white(self) :return set_code_color(self,97)
def black(self) :return set_code_color(self,30)
def red(self) :return set_code_color(self,31)
def green(self) :return set_code_color(self,32)
def yellow(self) :return set_code_color(self,33)
def blue(self) :return set_code_color(self,34)
def magenta(self) :return set_code_color(self,35)
def cyan(self) :return set_code_color(self,36)
def gray(self) :return set_code_color(self,90)
# Add Function Text Colors To Class String The Private Python Using
# Library ctypes API The Edit is On Interpreter Python
get_dict(str)['white'] = white
get_dict(str)['black'] = black
get_dict(str)['red'] = red
get_dict(str)['green'] = green
get_dict(str)['yellow'] = yellow
get_dict(str)['blue'] = blue
get_dict(str)['magenta'] = magenta
get_dict(str)['cyan'] = cyan
get_dict(str)['gray'] = gray
# Function Edit Background Colors
def bg_white(self) :return set_code_bg(self,107)
def bg_black(self) :return set_code_bg(self,40)
def bg_red(self) :return set_code_bg(self,41)
def bg_green(self) :return set_code_bg(self,42)
def bg_yellow(self) :return set_code_bg(self,43)
def bg_blue(self) :return set_code_bg(self,44)
def bg_magenta(self) :return set_code_bg(self,45)
def bg_cyan(self) :return set_code_bg(self,46)
def bg_gray(self) :return set_code_bg(self,100)
# Add Function Background Colors To Class String The Private Python Using
# Library ctypes API The Edit is On Interpreter Python
get_dict(str)['bg_white'] = bg_white
get_dict(str)['bg_black'] = bg_black
get_dict(str)['bg_red'] = bg_red
get_dict(str)['bg_green'] = bg_green
get_dict(str)['bg_yellow'] = bg_yellow
get_dict(str)['bg_blue'] = bg_blue
get_dict(str)['bg_magenta'] = bg_magenta
get_dict(str)['bg_cyan'] = bg_cyan
get_dict(str)['bg_gray'] = bg_gray
# Show All Function Text Color And Background Color
def showColor():
# Array Content All Function Color And Background Color
colors ={
'Text':[
'white'.white(),
'black'.black(),
'red'.red(),
'green'.green(),
'yellow'.yellow(),
'blue'.blue(),
'magenta'.magenta(),
'cyan'.cyan(),
'gray'.gray()
],
'Bg' :[
'bg_white'.bg_white(),
'bg_black'.bg_black(),
'bg_red'.bg_red(),
'bg_green'.bg_green(),
'bg_yellow'.bg_yellow(),
'bg_blue'.bg_blue(),
'bg_magenta'.bg_magenta(),
'bg_cyan'.bg_cyan(),
'bg_gray'.bg_gray()
]
}
print("------------[ Name Function Color ]-------------")
for color in colors['Text']:
print(color)
print("------------[ Name Function Bg Color ]----------")
for color in colors['Bg']:
print(color) | true |
30026b145f5d4b8e7aa5d63f0571a0f23dda3e40 | Python | Talysa7/Ex | /PythonEx/ml/language.py | UTF-8 | 1,549 | 2.78125 | 3 | [] | no_license | from sklearn import svm, metrics
import os.path, glob
import matplotlib.pyplot as plt
import pandas as pd
def load_txt( path ) :
files = glob.glob( path )
data = []
label = []
for filename in files :
file = os.path.basename( filename )
lang = file.split( "-" )
langfile = open( filename, "r", encoding="utf-8" ).read()
langtext = langfile.lower()
code_a = ord("a")
code_z = ord("z")
count = [ n for n in range(0, 26) ]
for char in langtext :
charcode = ord( char )
if code_a <= charcode <= code_z :
count[charcode - code_a] += 1
total = sum( count )
count = list( map( lambda n : n/total, count ) )
data.append( count )
label.append( lang[0] )
return data, label
train_data, train_label = load_txt( "./lang/train/*.txt" )
test_data, test_label = load_txt( "./lang/test/*.txt" )
clf = svm.SVC()
clf.fit( train_data, train_label )
predict = clf.predict( test_data )
score = metrics.accuracy_score( test_label, predict )
print( "score : ", score )
graph_dict = {}
for i in range( 0, len( train_label ) ) :
label = train_label[i]
data = train_data[i]
if not label in graph_dict :
graph_dict[label] = data
asclist = [ [chr(n) for n in range(97, 97+26)] ]
df = pd.DataFrame( graph_dict, index=asclist )
plt.style.use( "ggplot" )
df.plot( kind="bar", subplots=True, ylim=(0, 0.15) )
plt.show()
plt.savefig( "lang-plt.png" )
| true |
be14d168702f9c489a3ada39f6eead60c2aa0a01 | Python | RevansChen/online-judge | /Codewars/8kyu/short-long-short/Python/solution1.py | UTF-8 | 119 | 2.578125 | 3 | [
"MIT"
] | permissive | # Python - 3.6.0
solution = lambda *a: f'{sorted(a, key = len)[0]}{sorted(a, key = len)[1]}{sorted(a, key = len)[0]}'
| true |
47d9cc7008d0967bb5a7ed724d4a8d75456548df | Python | Gopporovoyatillo/uyga-vazifalar | /7- dars LIST (to'yhat).py | UTF-8 | 3,763 | 2.75 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 2 20:35:08 2021
@author: Admin
"""
#Quyidagi mashqlarni bajaring:
#ismlar degan ro'yxat yarating va kamida 3 ta yaqin do'stingizning ismini kiriting
#Ro'yxatdagi har bir do'stingizga qisqa xabar yozib konsolga chiqaring:
#sonlar deb nomlangan ro'yxat yarating va ichiga turli sonlarni yuklang (musbat, manfiy,
# butun, o'nlik).
#Yuqoridagi ro'yxatdagi sonlar ustida turli arifmetik amallar bajarib ko'ring.
#Ro'yxatdagi ba'zi sonlarning qiymatini o'zgartiring, ba'zilarini esa almashtiring.
#t_shaxslarva z_shaxslar degan 2 ta ro'yxat yarating va biriga o'zingiz eng ko'p
# hurmat qilgan tarixiy shaxslarning, ikkinchisiga esa zamonamizdagi tirik
# bo'lgan shaxslarning ismini kiriting.
#Yuqoridagi ro'yxatlarning har biridan bittadan qiymatni sug'urib olib (.pop()),
#quyidagi ko'rinishda chiqaring:
#friendsnomli bo'sh ro'yxat tuzing va unga .append() yordamida 5-6 ta mehmonga chaqirmoqchi bo'lgan do'stlaringizni kiriting.
#Yuqoridagi ro'yxatdan mehmonga kela olmaydigan odamlarni .remove() metodi yordamida o'chrib tashlang.
#Ro'yxatning oxiriga, boshiga va o'rtasiga yangi ismlar qo'shing.
#Yangi mehmonlardeb nomlangan bo'sh ro'yxat yarating. .pop() va .append() metodlari yordamida mehmonga kelgan do'stlaringizning ismini friends ro'yxatidan sug'urib olib, mehmonlar ro'yxatiga qo'shing
#ismlar degan ro'yxat yarating va kamida 3 ta yaqin do'stingizning ismini kiriting
ismlar = ['Ali', 'Vali', 'Hasan', 'Husan', "G'ani"]
#Ro'yxatdagi har bir do'stingizga qisqa xabar yozib konsolga chiqaring:
print("Salom " + ismlar[0] + " ishlaring yaxshimi?")
print(f"{ismlar[2]} va {ismlar[3]} egizaklar")
print(ismlar[-1] + " g'ildirakni g'izillatib g'ildratti")
# sonlar deb nomlangan ro'yxat yarating va ichiga turli sonlarni yuklang (musbat, manfiy, butun, o'nlik).
sonlar = [22, -58.2, 34.0, 67, 1983, 123_456_678_000, 112.4]
print(sonlar)
# Yuqoridagi ro'yxatdagi sonlar ustida turli arifmetik amallar bajarib ko'ring. Ro'yxatdagi ba'zi sonlarning qiymatini o'zgartiring, ba'zilarini esa almashtiring.
sonlar[0] = sonlar[0]+sonlar[-1]
sonlar[1] = -67.8
sonlar[4] = sonlar[4] + 37
del sonlar[5]
print(sonlar)
#t_shaxslarva z_shaxslar degan 2 ta ro'yxat yarating va biriga o'zingiz eng ko'p hurmat qilgan tarixiy shaxslarning, ikkinchisiga esa zamonamizdagi tirik bo'lgan shaxslarning ismini kiriting.
t_shaxslar = ['Amir Temur','Imom Buxoriy', 'Napoleon']
z_shaxslar = ['Bill Gates', 'Elon Musk', 'Doasnald Trump']
#Yuqoridagi ro'yxatlarning har biridan bittadan qiymatni sug'urib olib (.pop()), quyidagi ko'rinishda chiqaring:
print(f"\nMen tarixiy shaxslardan {t_shaxslar.pop(1)} bilan,\n\
zamonaviy shaxslardan esa {z_shaxslar.pop(0)} bilan\n\
suhbat qilishni istar edim\n")
#friends nomli bo'sh ro'yxat tuzing va unga .append() yordamida 5-6 ta mehmonga chaqirmoqchi bo'lgan do'stlaringizni kiriting.
friends = []
friends.append('John')
friends.append('Alex')
friends.append('Danny')
friends.append('Sobirjon')
friends.append('Vanya')
print(friends)
#Yuqoridagi ro'yxatdan mehmonga kela olmaydigan odamlarni .remove() metodi yordamida o'chrib tashlang.
friends.remove('John')
friends.remove('Alex')
print(friends)
#Ro'yxatning oxiriga, boshiga va o'rtasiga yangi ismlar qo'shing.
friends.append('Hasan')
friends.insert(0, 'Husan')
friends.insert(2, 'Ivan')
print(friends)
#Yangi mehmonlar deb nomlangan bo'sh ro'yxat yarating. .pop() va .append() metodlari yordamida mehmonga kelgan do'stlaringizning ismini friends ro'yxatidan sug'urib olib, mehmonlar ro'yxatiga qo'shing.
mehmonlar = []
mehmonlar.append(friends.pop(3))
mehmonlar.append(friends.pop(-1))
mehmonlar.append(friends.pop(0))
print("\nKelgan mehmonlar: ", mehmonlar) | true |
3ef17ae9608edd016dee84902bc55f8e83201806 | Python | esimionato/visdial-diversity | /eval_utils/dialog_generate.py | UTF-8 | 24,316 | 2.515625 | 3 | [
"BSD-3-Clause"
] | permissive | import os
import json
import numpy as np
from nltk.translate.bleu_score import sentence_bleu
import nltk
from nltk.util import ngrams
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.utils.data import DataLoader
import math
from six.moves import range
import matplotlib.pyplot as plt
from eval_utils.rank_questioner import rankQBot
from collections import Counter
from scipy.stats import entropy
def dialogDump(params,
dataset,
split,
aBot,
qBot=None,
beamSize=1,
saveFolder="dialog_output"):
'''
Generates dialog and saves it to a json for later visualization.
If only A-Bot is given, A-Bot answers are generated given GT image,
caption and questions. If both agents are given, dialog is generated
by both agents conversing (A-Bot is shown the GT image and both
agents have access to a caption generated by a pre-trained captioning
model).
Arguments:
params : Parameter dict for all options
dataset : VisDialDataset instance
split : Dataset split, can be 'val' or 'test'
aBot : A-Bot
qBot : Q-Bot (Optional)
beamSize : Beam search width for generating utterrances
saveFolder : Folder path for saving dialog related files
'''
text, dialog_metrics = run_dialog(params,
dataset,
split,
aBot,
qBot,
beamSize=beamSize)
dist_1 = dialog_metrics["dist_1"]
dist_2 = dialog_metrics["dist_2"]
dist_1_CI = dialog_metrics["dist_1_CI"]
dist_2_CI = dialog_metrics["dist_2_CI"]
average_precision_CI = dialog_metrics["average_precision_CI"]
ent_1_CI = dialog_metrics["ent_1_CI"]
ent_2_CI = dialog_metrics["ent_2_CI"]
unique_questions_CI = dialog_metrics["unique_questions_CI"]
mutual_overlap_CI = dialog_metrics["mutual_overlap_CI"]
unique_questions = dialog_metrics["tot_unique_questions"]
tot_examples = dialog_metrics["tot_examples"]
mean_unique_questions = dialog_metrics["mean_unique_questions"]
std_unique_questions = dialog_metrics["std_unique_questions"]
similarity_scores_mean = dialog_metrics["similarity_scores_mean"]
norm_difference_scores_mean = dialog_metrics["norm_difference_scores_mean"]
norm_scores_mean = dialog_metrics["norm_scores_mean"]
huber_scores_mean = dialog_metrics["huber_scores_mean"]
average_precision = dialog_metrics["average_precision"]
per_round_precision = dialog_metrics["per_round_precision"]
bleu_metric = dialog_metrics["mutual_overlap_score"]
novel_questions = dialog_metrics["tot_novel_questions"]
avg_novel_questions = dialog_metrics["avg_novel_questions"]
tot_questions = dialog_metrics["tot_questions"]
nll = dialog_metrics['NLL']
ent_1 = dialog_metrics["ent_1"]
ent_2 = dialog_metrics["ent_2"]
savePathJson = os.path.join(saveFolder,"results.json")
saveMetricsFile = os.path.join(saveFolder,"metrics.txt")
saveLatexFile = os.path.join(saveFolder,"latex.txt")
with open(saveMetricsFile, "w") as fp:
fp.write("Metrics: \n")
with open(saveMetricsFile, "w") as fp:
print("Writing dialog metrics data to file: {}".format(saveMetricsFile))
fp.write("tot unique questions: %d"%unique_questions + "\n")
fp.write("tot examples: %d"%tot_examples + "\n")
fp.write("avg unique questions per example: %f"%mean_unique_questions + "\n")
fp.write("std unique questions per example: %f"%std_unique_questions + "\n")
fp.write("Mutual Overlap: %f"%bleu_metric + "\n")
fp.write("Ent-1: %f"%ent_1 + "\n")
fp.write("Ent-2: %f"%ent_2 + "\n")
fp.write("Dist-1: %f"%dist_1 + "\n")
fp.write("Dist-2: %f"%dist_2 + "\n")
fp.write("novel questions: %d" % novel_questions + "\n")
fp.write("avg novel questions: %f" % avg_novel_questions + "\n")
fp.write("tot_questions: %d" % tot_questions + "\n")
fp.write("average precision for questions: %f"%average_precision + "\n")
fp.write("nll of GT questions: %f"%nll + "\n")
fp.write("Mutual Overlap CI: %f"% mutual_overlap_CI +"\n")
fp.write("Average Precision CI: %f"% average_precision_CI + "\n")
fp.write("Unique Question CI: %f"% unique_questions_CI + "\n")
fp.write("Ent-1-CI: %f"% ent_1_CI + "\n")
fp.write("Ent-2-CI: %f"% ent_2_CI + "\n")
fp.write("Dist-1-CI: %f"% dist_1_CI + "\n")
fp.write("Dist-2-CI: %f"% dist_2_CI + "\n")
fp.write("cos similarity between consecutive rounds \n")
fp.write(",".join(map(str,similarity_scores_mean)) + "\n")
fp.write("difference of norms between consecutive rounds \n")
fp.write(",".join(map(str,norm_difference_scores_mean)) + "\n")
fp.write("mean norm at each round \n")
fp.write(",".join(map(str,norm_scores_mean)) + "\n")
fp.write("huber loss between consecutive rounds \n")
fp.write(",".join(map(str,huber_scores_mean)) + "\n")
fp.write("round to round precision for questions \n")
fp.write(",".join(map(str,per_round_precision)) + "\n")
with open(savePathJson, "w") as fp:
print("Writing dialog text data to file: {}".format(savePathJson))
json.dump(text, fp)
# with open(saveMetricsJson, "w") as fp:
# print("Writing dialog metrics to file: {}".format(saveMetricsJson))
# json.dump(dialog_metrics, fp)
# write latex string
latex_code = " $ " + str(round(novel_questions,2)) + " $ " + " & " + " $ " + str(round(mean_unique_questions,2)) + " $ " + " \pm " + str(round(unique_questions_CI,2)) \
+ " $ \pm $ " + " $ " + str(round(bleu_metric,2)) + " $ " + " $ \pm $ " + " $ " + str(round(mutual_overlap_CI,2)) + " $ " + " & " + " $ " +str(round(ent_1,2)) \
+ " $ " + " $\pm$ " + " $ " + str(round(ent_1_CI,2)) + " $ " + " & " + " $ " + str(round(ent_2,2)) \
+ " $ " + " $\pm$ " + " $ " + str(round(ent_2_CI,2)) + " $ " +\
"& $" + str(round(dist_1,2)) + " $ &" + "$ \pm $" + "& $" + str(round(dist_1_CI,2)) + " $ " + \
"& $" + str(round(dist_2, 2)) + " $ &" + "$ \pm $" + "& $" + str(round(dist_2_CI, 2)) + " $ " + \
" && " + " $ " + str(round(nll,2)) + " $ " + " & " + " $ " + str(round(average_precision,2)) \
+ " $ " + " $ \pm$ " + " $ " + str(round(average_precision_CI,2)) + " $ "
with open(saveLatexFile, "w") as fp:
print("Writing latex code to file: {}".format(saveLatexFile))
fp.write(latex_code)
print("Done!")
fig = plt.figure()
plt.plot(similarity_scores_mean, label='Cos')
plt.plot(norm_difference_scores_mean, label='Norm Penalty')
plt.plot(huber_scores_mean, label='Huber')
plt.title('Similarity of consecutive embeddings')
plt.ylabel('Similarity')
plt.xlabel("Round")
plt.legend()
fig.savefig(os.path.join(saveFolder,'Similarity_Metrics_Plot.png'))
fig = plt.figure()
plt.plot(norm_scores_mean)
plt.title('Norm vs Round')
plt.ylabel('Norm')
plt.xlabel("Round")
plt.legend()
fig.savefig(os.path.join(saveFolder,'norms.png'))
def run_dialog(params,
dataset,
split,
aBot,
qBot=None,
beamSize=1):
assert aBot is not None or (qBot is not None and aBot is not None),\
"Must provide either an A-Bot alone or both \
Q-Bot and A-Bot when generating dialog"
rankMetrics, _ = rankQBot(qBot, dataset, 'val')
old_split = dataset.split
batchSize = dataset.batchSize
numRounds = dataset.numRounds
train_questions = set()
dataset.split = 'train'
dataloader = DataLoader(
dataset,
batch_size=batchSize,
shuffle=False,
num_workers=0,
collate_fn=dataset.collate_fn)
ind2word = dataset.ind2word
to_str_gt = lambda w: str(" ".join([ind2word[x] for x in filter(lambda x:\
x>0,w.data.cpu().numpy())])) #.encode('utf-8','ignore')
to_str_pred = lambda w, l: str(" ".join([ind2word[x] for x in list( filter(
lambda x:x>0,w.data.cpu().numpy()))][:l.data.cpu()[0]])) #.encode('utf-8','ignore')
for idx, batch in enumerate(dataloader):
# append all questions in train in a set to calculate downstream metrics
gtQuestions = Variable(batch['ques'], requires_grad=False)
gtQuesLens = Variable(batch['ques_len'], requires_grad=False)
if gtQuesLens.shape[0] < batchSize:
break
# iterate through the batch and add to dictionary
for j in range(batchSize):
for rnd in range(numRounds):
question_str = to_str_pred(gtQuestions[j,rnd,:], gtQuesLens[j,rnd])
train_questions.add(question_str[8:])
print("train questions len:", len(train_questions))
dataset.split = split
dataloader = DataLoader(
dataset,
batch_size=batchSize,
shuffle=False,
num_workers=0,
collate_fn=dataset.collate_fn)
text = {'data': []}
if '%s_img_fnames' % split not in dataset.data.keys():
print("[Error] Need coco directory and info as input " \
"to -cocoDir and -cocoInfo arguments for locating "\
"coco image files.")
print("Exiting dialogDump without saving files.")
return None
getImgFileName = lambda x: dataset.data['%s_img_fnames' % split][x]
getImgId = lambda x: int(getImgFileName(x)[:-4][-12:])
similarity_scores_mean = Variable(torch.zeros(numRounds))
norm_difference_scores_mean = Variable(torch.zeros(numRounds))
norm_scores_mean = Variable(torch.zeros(numRounds))
huber_scores_mean = Variable(torch.zeros(numRounds))
if params["useGPU"]:
similarity_scores_mean = similarity_scores_mean.cuda()
norm_difference_scores_mean = norm_difference_scores_mean.cuda()
norm_scores_mean = norm_scores_mean.cuda()
huber_scores_mean = huber_scores_mean.cuda()
tot_idx = 0
output_dialog = True
tot_examples = 0
unique_questions = 0
unique_questions_list = []
mutual_overlap_list = []
ent_1_list = []
ent_2_list = []
dist_1_list = []
dist_2_list = []
avg_precision_list = []
bleu_metric = 0
novel_questions = 0
oscillating_questions_cnt = 0
per_round_bleu = np.zeros(numRounds)
ent_1 = 0
ent_2 = 0
for idx, batch in enumerate(dataloader):
print("current batch:",idx)
if idx > 3:
output_dialog = False
tot_idx = tot_idx + 1
imgIds = [getImgId(x) for x in batch['index']]
dialog = [{'dialog': [], 'image_id': imgId} for imgId in imgIds]
if dataset.useGPU:
batch = {key: v.cuda() if hasattr(v, 'cuda')\
else v for key, v in batch.items()}
image = Variable(batch['img_feat'], volatile=True)
caption = Variable(batch['cap'], volatile=True)
# ignoring the last batch
if caption.size()[0] < batchSize:
break
captionLens = Variable(batch['cap_len'], volatile=True)
if qBot is None: # A-Bot alone needs ground truth dialog
gtQuestions = Variable(batch['ques'], volatile=True)
gtQuesLens = Variable(batch['ques_len'], volatile=True)
gtAnswers = Variable(batch['ans'], volatile=True)
gtAnsLens = Variable(batch['ans_len'], volatile=True)
if aBot:
aBot.eval(), aBot.reset()
aBot.observe(
-1, image=image, caption=caption, captionLens=captionLens)
if qBot:
qBot.eval(), qBot.reset()
qBot.observe(-1, caption=caption, captionLens=captionLens)
questions = []
for j in range(batchSize):
caption_str = to_str_gt(caption[j])[8:-6]
dialog[j]['caption'] = caption_str
past_dialog_hidden = None
cur_dialog_hidden = None
question_str_list = [[] for _ in range(batchSize)]
gt_questions_str = [[] for _ in range(batchSize)]
gtQuestions = Variable(batch['ques'], volatile=True)
gtQuesLens = Variable(batch['ques_len'], volatile=True)
gtAnswers = Variable(batch['ans'], volatile=True)
gtAnsLens = Variable(batch['ans_len'], volatile=True)
for round in range(numRounds):
if aBot is not None and qBot is None:
aBot.observe(
round,
ques=gtQuestions[:, round],
quesLens=gtQuesLens[:, round])
aBot.observe(
round,
ans=gtAnswers[:, round],
ansLens=gtAnsLens[:, round])
_ = aBot.forward()
answers, ansLens = aBot.forwardDecode(
inference='greedy', beamSize=beamSize)
elif aBot is not None and qBot is not None:
questions, quesLens = qBot.forwardDecode(
beamSize=beamSize, inference='greedy')
qBot.observe(round, ques=questions, quesLens=quesLens)
aBot.observe(round, ques=questions, quesLens=quesLens)
answers, ansLens = aBot.forwardDecode(
beamSize=beamSize, inference='greedy')
aBot.observe(round, ans=answers, ansLens=ansLens)
qBot.observe(round, ans=answers, ansLens=ansLens)
qBot.encoder()
cur_dialog_hidden = qBot.encoder.dialogHiddens[-1][0]
if round == 0:
past_dialog_hidden = qBot.encoder.dialogHiddens[-1][0]
cos = nn.CosineSimilarity(dim=1, eps=1e-6)
similarity_scores = cos(cur_dialog_hidden, past_dialog_hidden)
norm_difference_scores = torch.abs(torch.norm(cur_dialog_hidden, p=2, dim=1) - \
torch.norm(past_dialog_hidden,p=2,dim=1))
# calculate norm
norm_scores = torch.norm(cur_dialog_hidden, p=2, dim=1)
# calculate Huber Loss/ Difference at consecutive rounds with Huber Threshold = 0.1
threshold = 0.1
norm_differences = torch.abs(cur_dialog_hidden - past_dialog_hidden)
l2_mask = norm_differences <= threshold
norm_differences_new = 0.5 * norm_differences * norm_differences * (l2_mask == 1).float()
l1_mask = norm_differences > threshold
norm_differences_new = norm_differences_new + (((l1_mask == 1).float()) * (threshold *
(norm_differences - (0.5 * threshold))))
huber_scores = torch.sum(norm_differences_new, dim=1)
past_dialog_hidden = cur_dialog_hidden
similarity_scores_mean[round] = similarity_scores_mean[round] + torch.mean(similarity_scores)
norm_difference_scores_mean[round] = norm_difference_scores_mean[round] + torch.mean(norm_difference_scores)
norm_scores_mean[round] = norm_scores_mean[round] + torch.mean(norm_scores)
huber_scores_mean[round] = huber_scores_mean[round] + torch.mean(huber_scores)
for j in range(batchSize):
question_str = to_str_pred(questions[j], quesLens[j]) \
if qBot is not None else to_str_gt(gtQuestions[j])
gt_question_str = to_str_pred(gtQuestions[j,round,:], gtQuesLens[j,round])
gt_questions_str[j].append(gt_question_str[8:])
question_str_list[j].append(question_str[8:])
answer_str = to_str_pred(answers[j], ansLens[j])
if output_dialog:
if round == 0:
norm_score = float(norm_scores[j])
dialog[j]['dialog'].append({
"answer": answer_str[8:],
"question": question_str[8:] + ":" + "N:%.2f" % norm_score + " "
}) # "8:" for indexing out initial <START>
else:
similarity_score = float(similarity_scores[j])
norm_difference_score = float(norm_difference_scores[j])
norm_score = float(norm_scores[j])
huber_score = float(huber_scores[j])
dialog[j]['dialog'].append({
"answer": answer_str[8:],
"question": question_str[8:] + ":" + "C:%.2f" % similarity_score + ";" +
"NP:%.2f" % norm_difference_score + "H:%.2f" % huber_score + ";" +
"N:%.2f" % norm_score + " "
}) # "8:" for indexing out initial <START>
per_round_bleu_batch = np.zeros((numRounds, batchSize))
for j in range(batchSize):
# calculate bleu scores for each question str, with other questions as references to calculate
# mutual overlap
# also calculate round by round bleu score
unigrams = []
bigrams = []
avg_bleu_score = 0
for rnd in range(numRounds):
# Novel sentences metric
cur_ques = question_str_list[j][rnd]
gt_ques = gt_questions_str[j][rnd]
if cur_ques not in train_questions:
novel_questions += 1
# question oscillation metrics
if rnd >= 2:
if cur_ques == question_str_list[j][rnd-2]:
oscillating_questions_cnt += 1
# bleu/mutual overlap metric
references = []
for k in range(numRounds):
if rnd != k:
references.append(nltk.word_tokenize(question_str_list[j][k]))
avg_bleu_score += sentence_bleu(references,nltk.word_tokenize(cur_ques))
per_round_bleu_batch[rnd][j] = sentence_bleu([nltk.word_tokenize(gt_ques)],
nltk.word_tokenize(cur_ques))
unigrams.extend(list(ngrams(nltk.word_tokenize(cur_ques),1)))
bigrams.extend(list(ngrams(nltk.word_tokenize(cur_ques),2)))
avg_bleu_score /= float(numRounds)
mutual_overlap_list.append(avg_bleu_score)
bleu_metric += avg_bleu_score
tot_tokens = len(unigrams)
unigram_ctr = Counter(unigrams)
bigram_ctr = Counter(bigrams)
cur_ent_1 = get_entropy_ctr(unigram_ctr)
ent_1 += cur_ent_1
ent_1_list.append(cur_ent_1)
cur_ent_2 = get_entropy_ctr(bigram_ctr)
ent_2 += cur_ent_2
ent_2_list.append(cur_ent_2)
dist_1 = len(unigram_ctr.keys())/float(tot_tokens)
dist_2 = len(bigram_ctr.keys())/float(tot_tokens)
dist_1_list.append(dist_1)
dist_2_list.append(dist_2)
cur_unique_ques = len(set(question_str_list[j]))
unique_questions += cur_unique_ques
unique_questions_list.append(cur_unique_ques)
# dialog[j]['caption'] += ':' + str(cur_unique_ques)
tot_examples += batchSize
if output_dialog:
text['data'].extend(dialog)
per_round_bleu += np.sum(per_round_bleu_batch,axis=1)
avg_precision_list.extend(np.mean(per_round_bleu_batch,axis=0).tolist())
similarity_scores_mean = similarity_scores_mean * (1.0/tot_idx)
norm_difference_scores_mean = norm_difference_scores_mean * (1.0/tot_idx)
norm_scores_mean = norm_scores_mean *(1.0/tot_idx)
huber_scores_mean = huber_scores_mean *(1.0/tot_idx)
print("Mean Cos Similarity Scores:", similarity_scores_mean)
print("Mean Difference of Norms Scores:", norm_difference_scores_mean)
print("Mean Norm of Dialog State:", norm_scores_mean)
print("Mean Huber Loss(Norm of differences):", huber_scores_mean)
text['opts'] = {
'qbot': params['qstartFrom'],
'abot': params['startFrom'],
'backend': 'cudnn',
'beamLen': 20,
'beamSize': beamSize,
'decoder': params['decoder'],
'encoder': params['encoder'],
'gpuid': 0,
'imgNorm': params['imgNorm'],
'inputImg': params['inputImg'],
'inputJson': params['inputJson'],
'inputQues': params['inputQues'],
'loadPath': 'checkpoints/',
'maxThreads': 1,
'resultPath': 'dialog_output/results',
'sampleWords': 0,
'temperature': 1,
'useHistory': True,
'useIm': True,
}
unique_questions_arr = np.array(unique_questions_list)
# converting metrics to numpy arrays
similarity_scores_mean = similarity_scores_mean.cpu().data.numpy().tolist()
norm_difference_scores_mean = norm_difference_scores_mean.cpu().data.numpy().tolist()
norm_scores_mean = norm_scores_mean.cpu().data.numpy().tolist()
huber_scores_mean = huber_scores_mean.cpu().data.numpy().tolist()
bleu_metric /= float(tot_examples)
ent_1 /= float(tot_examples)
ent_2 /= float(tot_examples)
per_round_bleu = per_round_bleu / float(tot_examples)
print("tot unique questions: ", unique_questions)
print("tot examples: ", tot_examples)
print("avg unique questions per example: ", float(unique_questions) / tot_examples)
print("std unique questions per example: ", float(np.std(unique_questions_arr)))
print("Mutual Overlap (Bleu Metric): ", bleu_metric)
print("tot novel questions: ", novel_questions)
tot_questions = tot_examples * numRounds
print("tot questions: ", tot_questions)
print("avg novel questions: ", float(novel_questions)/float(tot_questions))
print("avg oscillating questions count", float(oscillating_questions_cnt)/tot_questions)
print("osciallation questions count", oscillating_questions_cnt)
dataset.split = old_split
ret_metrics = {}
ret_metrics["tot_unique_questions"] = unique_questions
ret_metrics["tot_examples"] = tot_examples
ret_metrics["mean_unique_questions"] = int((float(unique_questions) / tot_examples) * 100)/100.0
ret_metrics["std_unique_questions"] = int(float(np.std(unique_questions_arr)) * 100)/100.0
ret_metrics["similarity_scores_mean"] = similarity_scores_mean
ret_metrics["norm_difference_scores_mean"] = norm_difference_scores_mean
ret_metrics["norm_scores_mean"] = norm_scores_mean
ret_metrics["huber_scores_mean"] = huber_scores_mean
ret_metrics["mutual_overlap_score"] = bleu_metric
ret_metrics["tot_novel_questions"] = novel_questions
ret_metrics["avg_novel_questions"] = float(novel_questions)/float(tot_questions)
ret_metrics["tot_questions"] = tot_questions
ret_metrics['NLL'] = rankMetrics['logProbsMean']
ret_metrics["average_precision"] = np.mean(per_round_bleu)
ret_metrics["per_round_precision"] = per_round_bleu.tolist()
ret_metrics["ent_1"] = ent_1
ret_metrics["ent_2"] = ent_2
ret_metrics["dist_1"] = np.mean(dist_1_list)
ret_metrics["dist_2"] = np.mean(dist_2_list)
ret_metrics["average_precision_CI"] = (1.96 * np.std(avg_precision_list))/math.sqrt(len(avg_precision_list))
ret_metrics["ent_1_CI"] = (1.96 * np.std(ent_1_list))/math.sqrt(len(ent_1_list))
ret_metrics["ent_2_CI"] = (1.96 * np.std(ent_2_list))/math.sqrt(len(ent_2_list))
ret_metrics["unique_questions_CI"] = (1.96 * np.std(unique_questions_list))/math.sqrt(len(unique_questions_list))
ret_metrics["mutual_overlap_CI"] = (1.96 * np.std(mutual_overlap_list))/math.sqrt(len(mutual_overlap_list))
ret_metrics["dist_1_CI"] = (1.96 * np.std(dist_1_list))/math.sqrt(len(dist_1_list))
ret_metrics["dist_2_CI"] = (1.96 * np.std(dist_2_list))/math.sqrt(len(dist_2_list))
return text,ret_metrics
def get_entropy_ctr(ctr):
values = list(ctr.values())
sum_values = float(sum(values))
probs = [x/sum_values for x in values]
return entropy(probs) | true |
1d7b416fb6052cb1f535e4e3dade038bb5d58572 | Python | rodykings/FEUP-FPRO | /PE/PE2/exactly.py | UTF-8 | 344 | 3.640625 | 4 | [] | no_license |
def exactly(s):
counter = []
for id, value in enumerate([i for i in s]):
if value.isdigit():
counter.append([id, value])
return counter
print(exactly("acc?7??sss?3rr1??????5???5"))
'''
returns the string:
The sequence acc?7??sss?3rr1??????5???5 is OK with the pairs:
('73', '55')
''' | true |
9e348fa5b619741f28f0a6d56ffcc2c8cfd9a4dc | Python | adityadu-18/IRC2020-Rover | /meshgrrid.py | UTF-8 | 238 | 2.78125 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
xvalues = np.array([0, 1, 2, 3, 4]);
yvalues = np.array([0, 1, 2, 3, 4]);
xx, yy = np.meshgrid(xvalues, yvalues)
plt.plot(xx, yy, marker='.', color='k', linestyle='none')
plt.show() | true |
dc1a5a7fd4a6e1613c787e03a6386cf73c8fee05 | Python | Nautilus1993/NetworkSecurity | /playground/network/common/PlaygroundAddress.py | UTF-8 | 3,801 | 3.171875 | 3 | [] | no_license | '''
Created on Nov 25, 2013
@author: sethjn
'''
from Error import InvalidPlaygroundAddressString, InvalidPlaygroundFormat
from twisted.internet.interfaces import IAddress
class PlaygroundAddress(object):
@staticmethod
def FromString(addressString):
if type(addressString) != str:
raise InvalidPlaygroundAddressString("Address string not of type string")
parts = addressString.split(".")
if len(parts) != 4:
raise InvalidPlaygroundAddressString("Address string not of form a.b.c.d")
try:
parts = map(int, parts)
except:
raise InvalidPlaygroundAddressString("Address parts must be integers")
return PlaygroundAddress(parts[0], parts[1], parts[2], parts[3])
def __init__(self, semester, group, individual, index):
self.__validateAddressPart(semester, group, individual, index)
self.__semester = semester
self.__group = group
self.__individual = individual
self.__index = index
self.__addressString = ".".join(map(str, [semester, group, individual, index]))
def __validateAddressPart(self, *parts):
for part in parts:
if not type(part) == int or part < 0:
raise InvalidPlaygroundFormat("Address parts must be positive integers")
def __eq__(self, other):
if isinstance(other, PlaygroundAddress):
return (self.__semester == other.__semester and
self.__group == other.__group and
self.__individual == other.__individual and
self.__index == other.__index)
elif isinstance(str):
return self.__addressString == other
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return self.__addressString.__hash__()
def __getitem__(self, i):
if i < 0 or i > 3:
raise IndexError("Playground Addresses have 4 parts")
if i == 0: return self.__semester
if i == 1: return self.__group
if i == 2: return self.__individual
if i == 3: return self.__index
def semester(self): return self.__semester
def group(self): return self.__group
def individual(self): return self.__individual
def index(self): return self.__index
def getSemesterPair(self):
year = int(self.__semester/10)
semesterCode = int(self.__semester%10)
return (year, semesterCode)
def toString(self):
return self.__addressString
def __repr__(self):
return self.toString()
def __str__(self):
return self.toString()
# figure out how IAddress works...
class PlaygroundAddressPair(object):
def __init__(self, playgroundAddress, port):
if not isinstance(playgroundAddress, PlaygroundAddress):
raise Exception("Argument 1 to PlaygroundAddressPair must be PlaygroundAddress")
if port < 0:
raise Exception("Port must be positive")
self.host = playgroundAddress
self.port = port
self.__stringValue = self.host.toString() + ": "+str(self.port)
self.__hashValue = self.__stringValue.__hash__()
def __eq__(self, other):
if isinstance(other, PlaygroundAddressPair):
return self.host == other.host and self.port == other.port
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return self.__hashValue
def toString(self):
return self.__stringValue
def __repr__(self):
return self.toString()
def __str__(self):
return self.toString() | true |
4e660984bd1eba5448d9322f285554d4461ce038 | Python | samhita101/Python-Practice | /rectangle of rows and columns.py | UTF-8 | 270 | 4 | 4 | [] | no_license | print("user, we will create a rectangle with this: *")
print("user, give a number for the row count.")
r = int(input())
print("User give a number for the column count.")
c = int(input())
print("* " * c)
for i in range(r-2):
print("*", " " * c, "*")
print("* " * c)
| true |
e7c8de4b74a921d303194b8adac57570feea3cc9 | Python | billyio/atcoder | /ABC151-200/ABC190/abc190_c.py | UTF-8 | 368 | 2.671875 | 3 | [
"MIT"
] | permissive | import itertools
N, M = map(int, input().split())
cond = [tuple(map(int, input().split())) for i in range(M)]
K = int(input())
choice = [tuple(map(int, input().split())) for i in range(K)]
ans = 0
for balls in itertools.product(*choice):
balls = set(balls)
cnt = sum(A in balls and B in balls for A, B in cond)
if ans < cnt:
ans = cnt
print(ans)
| true |
411aaa6922370c95e2b5089fd6d734369cc1210f | Python | jsw0402/python-practice | /py5-Q9.py | UTF-8 | 75 | 2.71875 | 3 | [] | no_license | import sys
result=0
for i in sys.argv[1:]:
result+=int(i)
print(result) | true |
fdf7a06ff59822f3d8c6a1c32a1a95dacab81006 | Python | pierreablin/picard | /examples/plot_faces_decomposition.py | UTF-8 | 3,405 | 3.140625 | 3 | [
"BSD-3-Clause"
] | permissive | """
================================================
Comparison of Picard-O and FastICA on faces data
================================================
This example compares FastICA and Picard-O:
Pierre Ablin, Jean-François Cardoso, Alexandre Gramfort
"Faster ICA under orthogonal constraint"
ICASSP, 2018
https://arxiv.org/abs/1711.10873
On the figure, the number above each bar corresponds to the final gradient
norm.
""" # noqa
# Author: Pierre Ablin <pierre.ablin@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import numpy as np
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.decomposition import fastica
from picard import picard
print(__doc__)
image_shape = (64, 64)
rng = np.random.RandomState(0)
def gradient_norm(Y):
psiY = np.tanh(Y)
psidY_mean = 1 - np.mean(psiY ** 2, axis=1)
g = np.dot(psiY, Y.T) / Y.shape[1]
signs = np.sign(psidY_mean - np.diag(g))
g *= signs[:, None]
g = (g - g.T) / 2
return np.linalg.norm(g)
###############################################################################
# Load faces data
dataset = fetch_olivetti_faces(shuffle=True, random_state=rng)
faces = dataset.data
n_samples, n_features = faces.shape
# global centering
faces_centered = faces - faces.mean(axis=0)
# local centering
faces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1)
print("Dataset consists of %d faces" % n_samples)
###############################################################################
# Run Picard-O and FastICA for different number of sources, and store the
# elapsed time.
dimensions = [5, 10, 40, 60]
algorithms = [picard, fastica]
names = ['PicardO', 'FastICA']
colors = ['b', 'orange']
running_times = dict(FastICA=[], PicardO=[])
gradients = dict(FastICA=[], PicardO=[])
for n_components in dimensions:
for name, algorithm in zip(names, algorithms):
if name == 'FastICA':
kwargs = dict(X=faces_centered.T, n_components=n_components,
random_state=rng, max_iter=500, tol=1e-10)
else:
kwargs = dict(X=faces_centered, n_components=n_components,
max_iter=500, tol=1e-5, random_state=rng)
t0 = time()
K, W, Y = algorithm(**kwargs)
running_times[name].append(time() - t0)
gradients[name].append(gradient_norm(Y))
###############################################################################
# Plot the results
def autolabel(rects, gradient_list):
"""
Attach a text label above each bar displaying its height
"""
for rect, gradient_norm in zip(rects, gradient_list):
height = rect.get_height()
g_string = '%.1e' % gradient_norm
g_string = g_string[:5] + g_string[6:]
ax.text(rect.get_x() + rect.get_width() / 2., height,
'G=%.1e' % gradient_norm, fontsize=8,
ha='center', va='bottom')
fig, ax = plt.subplots()
ind = np.arange(len(dimensions))
width = 0.8
for i, (name, color) in enumerate(zip(names, colors)):
rect = ax.bar(2 * ind - 0.05 + i * (width + 0.1), running_times[name],
width=width, color=color, label=name)
autolabel(rect, gradients[name])
ax.set_xticks(2 * ind + width / 2)
ax.set_xticklabels((str(n_components) for n_components in dimensions))
plt.legend()
plt.xlabel('Number of sources')
plt.ylabel('Time (sec.)')
plt.show()
| true |
e5e27713e0c9855012729bdddae467b5b14541ed | Python | ChenJin1997/data | /算法/8) 最大公约数.py | UTF-8 | 976 | 3.4375 | 3 | [] | no_license | # 获取最大公约数
class getMaxCommenDivisor:
# 穷举法
def solution1(self,a,b):
big = max(a,b)
small = min(a,b)
if big % small == 0:
return small
for i in range(small//2,1,-1):
if big % i == 0 and small % i == 0:
return i
# 辗转相除法,正整数a和b的最大公约数等于a和b的余数c和b的最大公约数--欧几米的,俩数大的话余数大慢
def solution2(self,a,b):
big = max(a,b)
small = min(a,b)
if big%small == 0:
return small
self.solution2(big%small,small)
# 更相减损法,正整数a和b的最大公约数等于a和b的差c和b的最大公约数,差值大的话次数多
def solution3(self,a,b):
big = max(a,b)
small = min(a,b)
if big%small == 0:
return small
self.solution2(big-small,small)
a = getMaxCommenDivisor()
c = a.solution3(8,4)
print(c)
| true |
85af92fcd9696be51eba24b888aeed11b8c15acf | Python | tkrollins/Quantum_Algorithms | /PyQuil/DJ-BV/dj_experiment.py | UTF-8 | 8,181 | 2.890625 | 3 | [] | no_license | from pyquil import Program, get_qc
from pyquil.gates import *
from pyquil.api import local_qvm
class Deutsch_Jozsa():
def __init__(self, f):
"""
Initialize the class with a particular n
:param f: the oracle function, represented as a truth table. First n-1 elements are the input bits,
element n is the output bit
"""
self.f = f
self.p = Program()
self.uf = Program()
self.n = len(f[0]) - 1
self.ro = self.p.declare('ro', 'BIT', self.n)
def build_circuit(self):
"""
Return a Program that consists of the entire Deutsch-Jozsa experiment
:param Uf: U_f encoded with oracle function f
:return:
"""
self.initialize_experiment()
self.left_hadamards()
self.build_Uf()
self.right_hadamards()
self.measure_ro()
return self.p
def initialize_experiment(self):
"""
Initialize the first qubits to 0 and the last (helper) qubit to 1
:return: program state after this operation
"""
self.p += Program([I(i) for i in range(self.n)] + [X(8)])
return self.p
def left_hadamards(self):
"""
Add a Hadamard gate to every qubit (corresponds to the left-hand-side hadamards in DJ)
:return: program state after this operation
"""
self.p += Program([H(i) for i in range(self.n)] + [H(8)])
return self.p
def build_Uf(self):
"""
Builds a U_f gate by chaining CCNOT gates. Idea is that any input, x,
that results in f(x) = 1 will flip qubit b.
If n > 2, then helper bits are used
to implement the CCNOT gates correctly.
:return: program state after this operation
"""
h = 5 # position of first helper bit in circuit
q = 0 # position of first qubit bit in circuit
b = 8 # position of b in circuit
def add_CCNOT(x1, x2, target):
"""
Adds a CCNOT gate to self.p, with bits x1, x2, controlling target. If bits x1 or x2 == 0,
then a X gate is temporarily used to flip them
:param x1: Position of first control qubit
:param x2: Position of second control qubit
:param target: Position of controlled qubit
"""
if x1 < self.n and self.bitstring[x1] == 0:
self.uf += X(x1)
if x2 < self.n and self.bitstring[x2] == 0:
self.uf += X(x2)
self.uf += CCNOT(x1, x2, target)
if x1 < self.n and self.bitstring[x1] == 0:
self.uf += X(x1)
if x2 < self.n and self.bitstring[x2] == 0:
self.uf += X(x2)
def add_gates(q, h):
"""
Recursively adds CCNOT gates to the circuit until it reaches the last input qubit (q == self.n-1).
Then adds all the CCNOT gates a second time on the way up to reverse previous X-gates.
:param q: Input qubit position
:param h: Helper qubit position
"""
# if q == n-1, this is the final CCNOT gate that will flip b
if q == self.n - 1:
add_CCNOT(q, h, b)
else:
# These CCNOT gates flip helper bits
add_CCNOT(q, h, h + 1)
add_gates(q + 1, h + 1)
add_CCNOT(q, h, h + 1)
# flip all helper bits to |1>
self.uf += Program(X(h), X(h + 1), X(h + 2))
count = 0
for bitstring in self.f:
self.bitstring = bitstring
# Only create gates for inputs that result in f(x) = 1
if bitstring[-1] == 1:
count += 1
# if more than half of x lead to f(x)=1, f is constant
if count > (2**(self.n-1)):
# U_f(b) = b XOR f(x) = NOT(b)
self.p += X(b)
return self.p
# if n == 1, then only one CCNOT gate is needed to flip b
if self.n == 1:
add_CCNOT(q, h, b)
# if n == 2, then only one CCNOT gate is needed to flip b
elif self.n == 2:
add_CCNOT(q, q + 1, b)
else:
add_CCNOT(q, q + 1, h)
add_gates(q + 2, h)
add_CCNOT(q, q + 1, h)
self.p += self.uf
return self.p
def right_hadamards(self):
"""
Add a Hadamard gate to every qubit but the helper (corresponds to the right-hand-side hadamards in DJ)
:return: program state after this operation
"""
self.p += Program([H(i) for i in range(self.n)])
return self.p
def measure_ro(self):
"""
Measure every qubit but the last one
:return: program state after this operation
"""
self.p += Program([MEASURE(i, self.ro[i]) for i in range(self.n)])
return self.p
def run_DJ(f):
# setup the experiment
dj = Deutsch_Jozsa(f)
p = dj.build_circuit()
# multiple trials - check to make sure that the probability for getting the given outcome is 1
p.wrap_in_numshots_loop(5)
# actually perform the measurement
qvm = get_qc('9q-square-qvm')
qvm.compiler.client.timeout = 600 # number of seconds
with local_qvm():
# one way of measuring:
executable = qvm.compile(p)
result = qvm.run(executable)
print('Results:')
print(result)
print()
# Balanced f's, represented by truth table. First n-1 elements are the input bits, element n is the output bit
f_bal_1 = [[0,0],[1,1]]
f_bal_2 = [[0,0,0], [0,1,0], [1,0,1], [1,1,1]]
f_bal_3 = [[0,0,0,1], [0,0,1,0], [0,1,0,0], [0,1,1,1], [1,0,0,1], [1,0,1,0], [1,1,0,0], [1,1,1,1]]
f_bal_4 = [[0,0,0,0,1],[0,0,0,1,0],[0,0,1,0,1],[0,0,1,1,0],[0,1,0,0,1],[0,1,0,1,0],[0,1,1,0,1],[0,1,1,1,0],
[1,0,0,0,1],[1,0,0,1,0],[1,0,1,0,1],[1,0,1,1,0],[1,1,0,0,1],[1,1,0,1,0],[1,1,1,0,1],[1,1,1,1,0]]
f_bal_5 = [[0,0,0,0,0,1],[0,0,0,0,1,1],[0,0,0,1,0,1],[0,0,0,1,1,1],[0,0,1,0,0,1],[0,0,1,0,1,1],[0,0,1,1,0,1],[0,0,1,1,1,1],
[0,1,0,0,0,0],[0,1,0,0,1,0],[0,1,0,1,0,0],[0,1,0,1,1,0],[0,1,1,0,0,0],[0,1,1,0,1,0],[0,1,1,1,0,0],[0,1,1,1,1,0],
[1,0,0,0,0,0],[1,0,0,0,1,0],[1,0,0,1,0,0],[1,0,0,1,1,0],[1,0,1,0,0,0],[1,0,1,0,1,0],[1,0,1,1,0,0],[1,0,1,1,1,0],
[1,1,0,0,0,1],[1,1,0,0,1,1],[1,1,0,1,0,1],[1,1,0,1,1,1],[1,1,1,0,0,1],[1,1,1,0,1,1],[1,1,1,1,0,1],[1,1,1,1,1,1]]
# Constant f's, represented by truth table. First n-1 elements are the input bits, element n is the output bit
# Constant f with output of 0
f_const_1_0 = [[0,0],[1,0]]
f_const_2_0 = [[0,0,0], [0,1,0], [1,0,0], [1,1,0]]
f_const_3_0 = [[0,0,0,0], [0,0,1,0], [0,1,0,0], [0,1,1,0], [1,0,0,0], [1,0,1,0], [1,1,0,0], [1,1,1,0]]
f_const_4_0 = [[0,0,0,0,0],[0,0,0,1,0],[0,0,1,0,0],[0,0,1,1,0],[0,1,0,0,0],[0,1,0,1,0],[0,1,1,0,0],[0,1,1,1,0],
[1,0,0,0,0],[1,0,0,1,0],[1,0,1,0,0],[1,0,1,1,0],[1,1,0,0,0],[1,1,0,1,0],[1,1,1,0,0],[1,1,1,1,0]]
f_const_5_0 = [[0,0,0,0,0,0],[0,0,0,0,1,0],[0,0,0,1,0,0],[0,0,0,1,1,0],[0,0,1,0,0,0],[0,0,1,0,1,0],[0,0,1,1,0,0],[0,0,1,1,1,0],
[0,1,0,0,0,0],[0,1,0,0,1,0],[0,1,0,1,0,0],[0,1,0,1,1,0],[0,1,1,0,0,0],[0,1,1,0,1,0],[0,1,1,1,0,0],[0,1,1,1,1,0],
[1,0,0,0,0,0],[1,0,0,0,1,0],[1,0,0,1,0,0],[1,0,0,1,1,0],[1,0,1,0,0,0],[1,0,1,0,1,0],[1,0,1,1,0,0],[1,0,1,1,1,0],
[1,1,0,0,0,0],[1,1,0,0,1,0],[1,1,0,1,0,0],[1,1,0,1,1,0],[1,1,1,0,0,0],[1,1,1,0,1,0],[1,1,1,1,0,0],[1,1,1,1,1,0]]
# Constant f with output of 1
f_const_1_1 = [[0,1],[1,1]]
f_const_2_1 = [[0,0,1], [0,1,1], [1,0,1], [1,1,1]]
f_const_3_1 = [[0,0,0,1], [0,0,1,1], [0,1,0,1], [0,1,1,1], [1,0,0,1], [1,0,1,1], [1,1,0,1], [1,1,1,1]]
f_const_4_1 = [[0,0,0,0,1],[0,0,0,1,1],[0,0,1,0,1],[0,0,1,1,1],[0,1,0,0,1],[0,1,0,1,1],[0,1,1,0,1],[0,1,1,1,1],
[1,0,0,0,1],[1,0,0,1,1],[1,0,1,0,1],[1,0,1,1,1],[1,1,0,0,1],[1,1,0,1,1],[1,1,1,0,1],[1,1,1,1,1]]
# Will return [0 0 0 1]
run_DJ(f_bal_4)
# Will return [0 0 0 0]
run_DJ(f_const_4_0)
# Will return [0 0 0 0]
run_DJ(f_const_4_1)
| true |
f6aae0fe16d414820ca3640db6b3441c989c5100 | Python | padfoot999/Logan | /IO_databaseOperations.py | UTF-8 | 8,956 | 3.046875 | 3 | [] | no_license | #!/usr/bin/python -tt
__description__ = 'Handle all database operations'
#For database operations
import psycopg2
import sys
#For ordered dictionary
import collections
from config import CONFIG
import logging
logger = logging.getLogger('root')
#NAME: databaseInitiate
#INPUT: string databaseHost, string databaseName, string databaseUser, string databasePassword
#OUTPUT:
#DESCRIPTION: Setup database
def databaseInitiate():
DATABASE = CONFIG['DATABASE']
databaseHandle = databaseConnect(DATABASE['HOST'], DATABASE['DATABASENAME'], DATABASE['USER'], DATABASE['PASSWORD'])
print str(databaseHandle)
databaseCursor = databaseHandle.cursor()
try:
databaseCursor.execute("CREATE SCHEMA usb_id;")
databaseCursor.execute("CREATE TABLE usb_id.vendor_details(vendorid text, vendorname text);")
databaseCursor.execute("CREATE TABLE usb_id.product_details(prodid text, prodname text, vendorid text);")
#Save changes
databaseHandle.commit()
except:
pass
#NAME: databaseConnect
#INPUT: string databaseHost, string databaseName, string databaseUser, string databasePassword
#OUTPUT: Returns database connection handle if successful
#DESCRIPTION: Connects to database as specified by function parameters
def databaseConnect(databaseHost, databaseName, databaseUser, databasePassword):
databaseConnectionString = "host=" + databaseHost + " dbname=" + databaseName + " user=" + databaseUser + " password=" + databasePassword
logger.info("databaseConnectionString is " + databaseConnectionString + "\n")
try:
databaseConnectionHandle = psycopg2.connect(databaseConnectionString)
except psycopg2.OperationalError as e:
logger.error(('Unable to connect!\n{0}').format(e))
sys.exit(1)
else:
return databaseConnectionHandle
#NAME: cleanStrings
#INPUT: dictionary dictValues
#OUTPUT: dictionary dictValues
#DESCRIPTION: Initialize all string values within input dict to None datatype for new queries
def cleanStrings(dictValues):
for key in dictValues.keys():
if dictValues[key] == '':
dictValues[key] = None
else:
if isinstance(dictValues[key], basestring):
dictValues[key] = dictValues[key].replace("'", "")
dictValues[key] = dictValues[key].replace('"', "")
return dictValues
def cleanBlankStrings(dictValues):
for key in dictValues.keys():
if dictValues[key] == '':
dictValues[key] = None
return dictValues
#NAME: databaseInsert
#INPUT: psycopg2-db-handle databaseConnectionHandle, string databaseSchema, string databaseTable, collections-ordered dictionary dictValues
#OUTPUT: NONE
#DESCRIPTION: Insert dictValues keys AND values into database specified
def databaseInsert(databaseConnectionHandle, databaseSchema, databaseTable, dictValues):
cur = databaseConnectionHandle.cursor()
query = "INSERT INTO " + databaseSchema + "." + databaseTable + " ("
#Creating SQL query statement
for key in dictValues.iterkeys():
query += key
query+=", "
query = query[:-2]
query += ") VALUES ("
for i in range(0,len(dictValues)):
query += "%s, "
query = query[:-2]
query += ");"
dictValues = cleanBlankStrings(dictValues)
try:
logger.info("query is " + query + "\n")
logger.info("dictValues.values() is " + str(dictValues.values()) + "\n")
cur.execute(query, dictValues.values())
logger.info("%s row(s) inserted!" % cur.rowcount)
databaseConnectionHandle.commit()
except psycopg2.OperationalError as e:
logger.error(('Unable to INSERT!\n{0}').format(e))
sys.exit(1)
def databaseExistInsert(databaseConnectionHandle, databaseSchema, databaseTable, dictValues):
rowsInserted = 0
value = None
cur = databaseConnectionHandle.cursor()
query = "INSERT INTO " + databaseSchema + "." + databaseTable + " ("
query2 = ""
query3 = ""
dictValues = cleanStrings(dictValues)
#Creating SQL query statement
for key, value in dictValues.items():
if value is not None:
query += key
query +=", "
query2 +="'" + value + "'"
query2 +=", "
query3 += key + "='" + value + "'"
query3 +=" AND "
query = query[:-2]
query2 = query2[:-2]
query3 = query3[:-5]
query += ") SELECT " + query2 + " WHERE NOT EXISTS (SELECT * FROM " + databaseSchema + "." + databaseTable + " WHERE " + query3 + ");"
try:
logger.info("query is " + query + "\n")
logger.info("dictValues.values() is " + str(dictValues.values()) + "\n")
cur.execute(query)
logger.info("%s row(s) inserted!" % cur.rowcount)
rowsInserted = cur.rowcount
databaseConnectionHandle.commit()
except psycopg2.OperationalError as e:
logger.error(('Unable to INSERT!\n{0}').format(e))
sys.exit(1)
return rowsInserted
#NAME: databaseUpdate
#INPUT: psycopg2-db-handle databaseConnectionHandle, string databaseSchema,
# string databaseTable, collections-ordered dictionary dictSetValues,
# collections-ordered dictionary dictWhereValues
#OUTPUT: NONE
#DESCRIPTION: Update dictSetValues keys AND values into database specified where row fits the criteria defined in dictWhereValues
def databaseUpdate(databaseConnectionHandle, databaseSchema, databaseTable, dictSetValues, dictWhereValues):
cur = databaseConnectionHandle.cursor()
query = "UPDATE " + databaseSchema + "." + databaseTable + " SET "
#Creating SQL query statement
for key in dictSetValues.iterkeys():
query += key
query +="=%s, "
#Remove the comma
query = query[:-2]
query += " WHERE "
for key in dictWhereValues.iterkeys():
query+= key
query +="=%s AND "
#Remove the comma
query = query[:-4]
dictSetValues = cleanStrings(dictSetValues)
dictWhereValues = cleanStrings(dictWhereValues)
updateExecutionList = dictSetValues.values() + dictWhereValues.values()
logger.info("dictSetValues.values() is " + str(dictSetValues.values()) + "\n")
logger.info("dictWhereValues.values() is " + str(dictWhereValues.values()) + "\n")
logger.info("updateExecutionList is " + str(updateExecutionList) + "\n")
try:
logger.info("query is " + query + "\n")
cur.execute(query, updateExecutionList)
logger.info("%s row(s) inserted!" % cur.rowcount)
databaseConnectionHandle.commit()
except psycopg2.OperationalError as e:
logger.error(('Unable to UPDATE!\n{0}').format(e))
sys.exit(1)
#NAME: databaseWhitelist
#INPUT: psycopg2-db-handle databaseConnectionHandle, string databaseSchema, string databaseTable, string groupTransaction, string columnCounted, integer orderRow
#OUTPUT: Returns result list if successful
#DESCRIPTION: Count a specific column uniquely and sorts results by ascending or descending count
#DECRIPTION: example of a query=>
#SELECT DISTINCT col1, COUNT(DISTINCT col2)
#FROM schema.table GROUP BY col1
#ORDER BY count DESC;
def databaseWhitelist(databaseConnectionHandle, project, databaseSchema, databaseTable, groupTransaction, columnCounted, orderRow=0):
logger.info("PROJECT IS " + project)
try:
cur = databaseConnectionHandle.cursor()
except psycopg2.OperationalError as e:
logger.error(('Unable to connect!\n{0}').format(e))
sys.exit(1)
query = "SELECT DISTINCT "
query += groupTransaction + ", COUNT (DISTINCT "
query += columnCounted + ") AS Count, "
query += "string_agg(DISTINCT " + columnCounted + ", ', ') AS IncidentFolder_List "
query += "FROM " + databaseSchema + "." + databaseTable
query += " WHERE imagename IN (SELECT DISTINCT imagename FROM project.project_image_mapping WHERE projectname='" + project + "')"
query += " GROUP BY " + groupTransaction
query += " ORDER BY count "
if orderRow == 0:
query += "DESC;"
else:
query += "ASC;"
try:
logger.info("query is " + query + "\n")
cur.execute(query)
except psycopg2.OperationalError as e:
logger.error(('Unable to SELECT!\n{0}').format(e))
sys.exit(1)
rows = cur.fetchall()
databaseConnectionHandle.commit()
return rows
#NAME: main
#INPUT: NONE
#OUTPUT: NONE
#DESCRIPTION: Provide sample code to show how the functions are called.
def main():
#database configuration for project MAGNETO
#These are sort of like constant, hence the CAPITALS.
#Variables should NOT be in caps.
#Sample test code
#Note that all dictValues needs to be an ordered dictionary!!!
dbhandle = databaseConnect(DATABASE['HOST'], DATABASE['DATABASENAME'], DATABASE['USER'], DATABASE['PASSWORD'])
print "dbhandle is " + str(dbhandle) + "\n"
if __name__ == '__main__':
main()
| true |
b9aa934b0db63350d40d1f0fcbe8ec15ad966173 | Python | msesemov/shelk | /advpyneng/02_oop_special_methods/task_2_1.py | UTF-8 | 2,948 | 3.8125 | 4 | [] | no_license | # -*- coding: utf-8 -*-
'''
Задание 2.1
Скопировать класс IPv4Network из задания 1.1 и добавить ему все методы,
которые необходимы для реализации протокола последовательности (sequence):
* __getitem__, __len__, __contains__, __iter__
* index, count - должны работать аналогично методам в списках и кортежах
И оба метода, которые отвечают за строковое представление экземпляров
класса IPv4Network.
Существующие методы и атрибуты (из задания 1.1) можно менять, при необходимости.
Пример создания экземпляра класса:
In [2]: net1 = IPv4Network('8.8.4.0/29')
Проверка методов:
In [3]: for ip in net1:
...: print(ip)
...:
8.8.4.1
8.8.4.2
8.8.4.3
8.8.4.4
8.8.4.5
8.8.4.6
In [4]: net1[2]
Out[4]: '8.8.4.3'
In [5]: net1[-1]
Out[5]: '8.8.4.6'
In [6]: net1[1:4]
Out[6]: ('8.8.4.2', '8.8.4.3', '8.8.4.4')
In [7]: '8.8.4.4' in net1
Out[7]: True
In [8]: net1.index('8.8.4.4')
Out[8]: 3
In [9]: net1.count('8.8.4.4')
Out[9]: 1
In [10]: len(net1)
Out[10]: 6
Строковое представление:
In [13]: net1
Out[13]: IPv4Network(8.8.4.0/29)
In [14]: str(net1)
Out[14]: 'IPv4Network 8.8.4.0/29'
'''
import ipaddress
class IPv4Network:
def __init__(self, addr):
subnet = ipaddress.ip_network(addr)
self.subnet = subnet
self.address = addr.split('/')[0]
self.mask = subnet.prefixlen
self.broadcast = subnet.broadcast_address
self.allocated = ()
def hosts(self):
result = ()
for ip in self.subnet.hosts():
result += (str(ip),)
return result
def allocate(self, address):
self.allocated += (address,)
def unassigned(self):
result = list(self.hosts())
for used in self.allocated:
result.remove(used)
return tuple(result)
def __str__(self):
return f'IPv4Network {self.subnet}'
def __repr__(self):
return f'IPv4Network({self.subnet})'
def __iter__(self):
return iter(self.hosts())
def __getitem__(self, index):
return self.hosts()[index]
def index(self, item):
return self.hosts().index(item)
def count(self, item):
return self.hosts().count(item)
def __len__(self):
return len(self.hosts())
def __contains__(self, item):
return item in self.hosts()
if __name__ == '__main__':
net1 = IPv4Network('8.8.4.0/29')
for ip in net1:
print(ip)
print(net1[2])
print(net1[1:4])
print('8.8.4.4' in net1)
print(net1.index('8.8.4.4'))
print(net1.count('8.8.4.4'))
print(len(net1))
print(net1)
print(str(net1)) | true |
dd3f42afc2d910155e2459a5a1242111b3042b66 | Python | philippkueng/pingini | /spreadsheetapitest.py | UTF-8 | 1,802 | 2.90625 | 3 | [] | no_license | #!/usr/bin/python
#### Example used as starting point : http://gdata-python-client.googlecode.com/hg/samples/spreadsheets/spreadsheetExample.py
import gdata.spreadsheet.service
import gdata.service
import atom.service
import gdata.spreadsheet
gd_client = gdata.spreadsheet.service.SpreadsheetsService()
gd_client.email = 'pingini1@gmail.com'
gd_client.password = 'xxxx'
gd_client.ProgrammaticLogin()
# Utility function to print some feed information:
def _PrintFeed(feed):
for i, entry in enumerate(feed.entry):
if isinstance(feed, gdata.spreadsheet.SpreadsheetsCellsFeed):
print '%s %s\n' % (entry.title.text, entry.content.text)
elif isinstance(feed, gdata.spreadsheet.SpreadsheetsListFeed):
print '%s %s %s' % (i, entry.title.text, entry.content.text)
# Print this row's value for each column (the custom dictionary is
# built using the gsx: elements in the entry.)
print 'Contents:'
for key in entry.custom:
print ' %s: %s' % (key, entry.custom[key].text)
print '\n',
else:
print '%s %s\n' % (i, entry.title.text)
# Utility to get the last part of a URL, usually useful as some sort of id:
def GetLastIdPart(entryWithId):
id_parts = entryWithId.id.text.split('/')
return id_parts[len(id_parts) - 1]
# Finds the first spreadsheets in the list of all spreadsheets:
sfeed = gd_client.GetSpreadsheetsFeed()
#_PrintFeed(sfeed)
curr_key = GetLastIdPart(sfeed.entry[0])
# Finds the first spreadsheet tab in the list of all tabs of the spreadsheet:
wsfeed = gd_client.GetWorksheetsFeed(curr_key)
ws_key = GetLastIdPart(wsfeed.entry[0])
# Returns a list feed of all non-title rows in the spreadsheet and prints out the count of the entries:
listfeed = gd_client.GetListFeed(curr_key, ws_key)
print len(listfeed.entry)
| true |
ff648611b26f79bcefaad9e7094ff24a6431523c | Python | amz049/IS-Programming-samples | /My original work/Test-1-program-paramiter-to-area.py | UTF-8 | 963 | 4.40625 | 4 | [] | no_license | # Setup and input
import math
perimInital = float(input("Please type in the perimeter: "))
placeHold = 0
# To prevent negative integers from being used
if perimInital < 0:
print("Sorry, you can only use positive numbers in this program")
quit()
# This math is used for finding area of a square
placeHold = perimInital / 4
areaSquare = placeHold ** 2
areaSquare = round(areaSquare, 2)
print("A square with that perimeter would have an area of", areaSquare)
# This math is for finding the area of a cricle
placeHold = perimInital / (2 * math.pi) # finding radius
areaCircle = math.pi * placeHold ** 2
areaCircle = round(areaCircle, 2)
print("A circle with that perimeter would have an area of", areaCircle)
# This math finds the area of Equalateral Triangle
placeHold = perimInital / 3
areaTriangle = (math.sqrt(3) / 4) * (placeHold ** 2)
areaTriangle = round(areaTriangle, 2)
print("A triangle with that perimeter would have an area of", areaTriangle) | true |
cbd512879886770c8f4c58d64bfa5dc68c72d8ec | Python | EmanueleC/Appunti-intelligenza-artificiale | /res/code/neuralNets/scratchNN.py | UTF-8 | 1,692 | 3.390625 | 3 | [] | no_license | from matplotlib import pyplot as plt
import numpy as np
data = [[3, 1.5, 1], [2, 1, 0], [4, 1.5, 1], [3, 1, 0], [3.5, 0.5, 1], [2, 0.5, 0], [5.5, 1, 1], [1, 1, 0]]
mystery = [4.5, 1]
# weights
w1 = np.random.randn()
w2 = np.random.randn()
# bias
b = np.random.randn()
def sigmoid(x):
return 1/(1 + np.exp(-x))
def dSigmoid(x):
return sigmoid(x) * (1 - sigmoid(x))
def graph(formula, x_range):
x = np.array(x_range)
y = formula(x)
plt.plot(x, y)
# scatter data
def scatter():
plt.axis([0, 6, 0, 6])
plt.grid()
for i in range(len(data)):
point = data[i]
color = 'r'
if point[2] == 0:
color = 'b'
plt.scatter(point[0], point[1], c=color)
# training loop
learningRate = 0.2
costs = []
for i in range(50000):
ri = np.random.randint(len(data))
point = data[ri]
z = point[0]*w1 + point[1]*w2 + b
pred = sigmoid(z)
target = point[2]
cost = np.square(pred - target)
if (i % 100 == 0):
print(cost)
costs.append(cost)
dCost = 2 * (pred - target)
dPred = dSigmoid(z)
dz_dw1 = point[0]
dz_dw2 = point[1]
dz_db = 1
dCost_dz = dCost * dPred
dCost_dw1 = dCost * dz_dw1
dCost_dw2 = dCost * dz_dw2
dCost_db = dCost * dz_db
# update weights and bias
w1 = w1 - learningRate * dCost_dw1
w2 = w2 -learningRate * dCost_dw2
b = b - learningRate * dCost_db
plt.clf()
plt.plot(costs)
plt.savefig('costs.png')
plt.clf()
scatter()
zMistery = mystery[0]*w1 + mystery[1]*w2 + b
if(zMistery < 0.5):
color = 'b'
else:
color = 'r'
plt.scatter(mystery[0], mystery[1], c=color)
print(w1,w2,b)
plt.savefig("scatter.png")
| true |
28283a5b5099f6a2ea3bced4f674e179c2cd6c06 | Python | keiffster/program-y | /test/programytest/storage/stores/sql/dao/test_error.py | UTF-8 | 604 | 2.546875 | 3 | [
"MIT"
] | permissive |
import unittest
from programy.storage.stores.sql.dao.error import Error
class ErrorTests(unittest.TestCase):
def test_init(self):
error1 = Error(error='error', file='file', start='100', end='200')
self.assertIsNotNone(error1)
self.assertEqual("<Error(id='n/a', error='error', file='file', start='100', end='200')>", str(error1))
error2 = Error(id='1', error='error', file='file', start='100', end='200')
self.assertIsNotNone(error2)
self.assertEqual("<Error(id='1', error='error', file='file', start='100', end='200')>", str(error2))
| true |
b2b51a9c23cc4932db3de2ab2b7ba35209840cd5 | Python | NHopewell/coding-practice-problems | /Algorithms/leetcode/strings/letterCombinationsOfAPhoneNumber.py | UTF-8 | 1,518 | 3.84375 | 4 | [] | no_license | """
return all possible letter combinations when given string of digits
Example
-------
Input: "23"
Output: ["ad","ae","af","bd","be","bf","cd","ce","cf"]
Input: digits = "2"
Output: ["a","b","c"]
"""
import pytest
from typing import List
import math
def get_letter_combinations(digits: str) -> List[str]:
numbers = '2 3 4 5 6 7 8 9'.split()
letters = [list('abc'), list('def'), list('ghi'),
list('jkl'), list('mno'), list('pqrs'),
list('tuv'), list('wxyz')]
phone_map = {k: v for (k,v) in zip(numbers, letters)}
#length = math.prod([len(phone_map[digit]) for digit in digits])
combinations = []
if len(digits) == 1:
return list(phone_map[digits])
for first_letter in phone_map[digits[0]]:
for second_letter in phone_map[digits[1]]:
to_append = f"{first_letter}{second_letter}"
combinations.append(to_append)
return combinations
############################
def test_get_letter_combinations_one():
expected = ["ad","ae","af","bd","be","bf","cd","ce","cf"]
actual = get_letter_combinations("23")
assert actual == expected
def test_get_letter_combinations_two():
expected = ["a","b","c"]
actual = get_letter_combinations("2")
assert actual == expected
if __name__ == "__main__":
pytest.main()
#get_letter_combinations("23")
| true |
7074b44433d7fd027c7c978c1868434f1c64be7d | Python | lihaoliu-cambridge/Shuusui | /parse_log/test/test_parse_log.py | UTF-8 | 1,826 | 2.640625 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from first_parse_log import find_all_files
from first_parse_log import first_parse_log
import unittest
import re
import os
__author__ = 'LH Liu'
class TestParseLog(unittest.TestCase):
def _list_equal(self, list_one, list_another):
set_one = set(list_one)
set_another = set(list_another)
msg = '{} != {}'.format(list_one, list_another)
if len(set_one) != len(set_another):
raise self.failureException(msg)
for element in set_one:
if element not in set_another:
raise self.failureException(msg)
def test_find_all_files(self):
ori_path = os.path.join(os.path.dirname(__file__), 'data/input')
log_file_regular_expression = re.compile(r'web\.analytics\.access_(\d{6})(\d{2})\d{4}\.log')
result = {
'E:/PythonProject/Work/work_2/alter_code/data/input\\web.analytics.access_201606250000.log': '20160625'
}
self._list_equal(find_all_files(ori_path, log_file_regular_expression), result)
ori_path = os.path.join(os.path.dirname(__file__), 'data/output')
log_file_regular_expression = re.compile(r'web\.analytics\.access_(\d{6})(\d{2})\d{4}\.log')
result = {}
self._list_equal(find_all_files(ori_path, log_file_regular_expression), result)
def test_parse_log(self):
ori_path = os.path.join(os.path.dirname(__file__), 'data/input', 'web.analytics.access_201606250000.log')
log_parse_method = {'client_ip': re.compile(r'^(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\s\-\s\-\s\[')}
result = {'client_ip': '183.69.225.138'}
self.assertEquals(first_parse_log(ori_path, log_parse_method).next(), result)
if __name__ == '__main__':
unittest.main()
| true |
36ba4302c53f897d62f10503edd027155ab61a8b | Python | awoziji/Introduction-to-Algorithms | /chapter10/section1/stack.py | UTF-8 | 690 | 3.890625 | 4 | [] | no_license | class Stack(object):
def __init__(self, length):
self.S = [None for _ in range(length)]
self.top = -1
def is_empty(self):
if self.top == -1:
return True
else:
return False
def push(self, element):
if self.top + 1 == len(self.S):
raise IndexError('stack overflow')
else:
self.top = self.top + 1
self.S[self.top] = element
def pop(self):
if self.is_empty():
raise IndexError('stack underflow')
else:
result = self.S[self.top]
self.S[self.top] = None
self.top = self.top - 1
return result
| true |
27e5a030da22eb12a019263ace89e8b53fe0122b | Python | scl2589/Algorithm_problem_solving | /SWEA/3752_가능한 시험 점수/3752_가능한 시험 점수7.py | UTF-8 | 522 | 3.078125 | 3 | [] | no_license | #Fail (Runtime error)
'''
시간초과 이유는 중복 숫자가 있어도 계속 저장해놓기 때문에, 리스트의 크기가 커지면서 메모리의 크기가 커진다.
'''
T = int(input())
for tc in range(1, T+1):
N = int(input())
p = list(int, input().split()) #문제별 점수
ans = [0] #0점인 경우
for x in p: #문제별 점수
num = []
for y in ans: #가능한 점수
num.append(x+y)
ans += num
print('#{} {}'.format(tc, len(set(ans)))) | true |
54c4e6255daf07af72fd2519815b937a73f09439 | Python | CornellCAC/arxiv-fulltext | /fulltext/services/metrics.py | UTF-8 | 4,000 | 2.59375 | 3 | [] | no_license | """Metric reporting for extractions."""
import os
import boto3
from flask import _app_ctx_stack as stack
from typing import Callable
from fulltext.context import get_application_config, get_application_global
from fulltext import logging
from fulltext.services import credentials
logger = logging.getLogger(__name__)
class MetricsSession(object):
"""Reports processing metrics to CloudWatch."""
namespace = 'arXiv/FullText'
def __init__(self, endpoint_url: str=None, aws_access_key: str=None,
aws_secret_key: str=None, aws_session_token: str=None,
region_name: str=None, verify: bool=True) -> None:
"""Initialize with AWS configuration."""
self.aws_access_key = aws_access_key
self.aws_secret_key = aws_secret_key
self.aws_session_token = aws_session_token
self.verify = verify
self.endpoint_url = endpoint_url
self.cloudwatch = boto3.client('cloudwatch', region_name=region_name,
endpoint_url=endpoint_url,
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key,
aws_session_token=aws_session_token,
verify=verify)
def report(self, metric: str, value: object, units: str=None,
dimensions: dict=None) -> None:
"""
Put data for a metric to CloudWatch.
Parameters
----------
metric : str
value : str, int, float
units : str or None
dimensions : dict or None
"""
metric_data = {'MetricName': metric, 'Value': value}
if units is not None:
metric_data.update({'Unit': units})
if dimensions is not None:
metric_data.update({
'Dimensions': [{'Name': key, 'Value': value}
for key, value in dimensions.items()]
})
self.cloudwatch.put_metric_data(Namespace=self.namespace,
MetricData=[metric_data])
def init_app(app: object = None) -> None:
"""Set default configuration parameters for an application instance."""
config = get_application_config(app)
config.setdefault('CLOUDWATCH_ENDPOINT',
'https://monitoring.us-east-1.amazonaws.com')
config.setdefault('AWS_REGION', 'us-east-1')
config.setdefault('CLOUDWATCH_VERIFY', 'true')
def get_session(app: object = None) -> MetricsSession:
"""Get a new metrics session."""
config = get_application_config(app)
try:
access_key, secret_key, token = credentials.get_credentials()
except IOError as e:
access_key, secret_key, token = None, None, None
logger.debug('failed to load instance credentials: %s', str(e))
if access_key is None or secret_key is None:
access_key = config.get('AWS_ACCESS_KEY_ID', None)
secret_key = config.get('AWS_SECRET_ACCESS_KEY', None)
token = config.get('AWS_SESSION_TOKEN', None)
endpoint_url = config.get('CLOUDWATCH_ENDPOINT', None)
region_name = config.get('AWS_REGION', 'us-east-1')
verify = bool(config.get('CLOUDWATCH_VERIFY', 'true'))
return MetricsSession(endpoint_url, access_key, secret_key, token,
region_name, verify=verify)
def current_session():
"""Get/create :class:`.MetricsSession` for this context."""
g = get_application_global()
if g is None:
return get_session()
if 'metrics' not in g:
g.metrics = get_session()
return g.metrics
def report(metric: str, value: object, units: str = None,
dimensions: dict = None) -> None:
"""
Put data for a metric to CloudWatch.
See :meth:`MetricsSession.report`.
"""
return current_session().report(metric, value, units=units,
dimensions=dimensions)
| true |
e0fc2776e2896cd8ed5a9068e857da512364a0e8 | Python | luxuguang-leo/everyday_leetcode | /00_leetcode/25.reverse-nodes-in-k-group.py | UTF-8 | 1,472 | 3.421875 | 3 | [] | no_license | #
# @lc app=leetcode id=25 lang=python
#
# [25] Reverse Nodes in k-Group
#
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def reverseKGroup(self, head, k):
"""
:type head: ListNode
:type k: int
:rtype: ListNode
"""
if not head:
return None
dummy = nextHead = ListNode(-1)
l = r = dummy.next = head
#类似于滑动串口,先让右边界走K步,然后在[l, r]范围内反转链表
#然后将左边界扩展到r,重复上一步,知道走的步数不是k证明到了链表尾部
while True:
cnt = 0
while r and cnt < k:
cnt +=1
r = r.next
if cnt == k:
pre, cur = r, l
for _ in range(k):
nxt = cur.next
cur.next = pre
pre, cur = cur, nxt
#这个nextHead应该是前面一组的尾结点,这时候需要
#1.将前一组的接到新的头pre
#2.前一组的尾更新,之前的l现在变成了当前组的尾巴,所以应该更新为它
#3.左边界l应该更新为r
nextHead.next = pre
nextHead = l
l = r
else:
return dummy.next
| true |
dad2f1f76cce5df3a5572757b33781bd55418e57 | Python | jorgearanda/morsels | /all_same/all_same.py | UTF-8 | 131 | 3.390625 | 3 | [] | no_license | def all_same(seq):
first_item = next(iter(seq), None)
return all(
item == first_item
for item in seq
)
| true |
2cd4c95d1092b370f3808f5dc62038d59c773cd8 | Python | USFDataVisualization/AffectiveTDA | /misc.py | UTF-8 | 2,004 | 2.84375 | 3 | [] | no_license | import os
subsections = [
('leftEye', 'rightEye', 'leftEyebrow', 'rightEyebrow', 'nose', 'mouth', 'jawline'),
('leftEye', 'rightEye', 'leftEyebrow', 'rightEyebrow', 'nose', 'mouth'),
('leftEyebrow', 'rightEyebrow', 'nose'),
('leftEye', 'rightEye', 'nose'),
('nose', 'mouth')
]
def min_max(subsection, metric_or_nonmetric):
if metric_or_nonmetric == 'metric':
ret = {
"leftEye": (0,8),
"rightEye": (8,16),
"leftEyebrow": (16,26),
"rightEyebrow": (26,36),
"nose": (36,48),
"mouth": (48,68),
"jawline": (68,83)
}
else:
ret = {
"leftEye": [(0,1),(1,2),(2,3),(3,4),(4,5),(5,6),(6,7),(7,0)],
"rightEye": [(8,9),(9,10),(10,11),(11,12),(12,13),(13,14),(14,15),(15,0)],
"leftEyebrow": [(16,17),(17,18),(18,19),(19,20),(20,21),(21,22),(22,23),(23,24),(24,25),(25,0)],
"rightEyebrow": [(26,27),(27,28),(28,29),(29,30),(30,31),(31,32),(32,33),(33,34),(34,35),(35,0)],
"nose": [(36,37),(37,38),(38,39),(39,40),(40,41),(41,42),(42,43),(43,44),(44,45),(45,46),(46,47)],
"mouth": [(48,49),(49,50),(50,51),(51,52),(52,53),(53,54),(54,55),(55,56),(56,57),(57,58),(58,59),(59,0),(60,61),(61,62),(62,63),(63,64),(64,65),(65,66),(66,67),(67,0)],
"jawline": [(67,68),(69,70),(70,71),(71,72),(72,73),(73,74),(74,75),(75,76),(76,77),(77,78),(78,79),(79,80),(80,81),(81,82)]
}
return ret.get(subsection)
def getFileNames(d, extension):
filesindir = []
for elem in os.listdir(d):
if os.path.isdir(d + '/' + elem):
filesindir += getFileNames(d + '/' + elem, extension)
else:
if elem[elem.find('.'):] == extension:
filesindir.append(d + '/' + elem)
return filesindir | true |
bff78adf512791ac0485333a4291e1b4e90da2f4 | Python | lukaszzalewski0001/hex_wars | /hex_wars/graphics.py | UTF-8 | 14,697 | 2.796875 | 3 | [
"Apache-2.0"
] | permissive | '''
Copyright 2019 Łukasz Zalewski.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import ctypes
import pygame
import random
import controls
import game
class NewGameOptions:
'''Options collected from sliders when creating new map'''
def __init__(self, map_size, hex_number, players_number, die_sides_number,
max_dice_on_single_hex):
self.map_size = list(map_size)
self.hex_number = hex_number
self.players_number = players_number
self.die_sides_number = die_sides_number
self.max_dice_on_single_hex = max_dice_on_single_hex
class Graphics:
'''
Graphics object. This class contains rendering functionality, controls
etc.
'''
def __init__(self, map_, gameplay, window_size):
ctypes.windll.user32.SetProcessDPIAware()
self.map_ = map_
self.gameplay = gameplay
self.window_size = window_size
self.surface = pygame.display.set_mode(self.window_size,
pygame.FULLSCREEN)
self.new_game_options = NewGameOptions(
self.map_.size,
self.map_.hex_number,
len(self.map_.players),
self.gameplay.die_sides_number,
self.gameplay.max_dice_on_single_hex)
self.__init_fonts()
self.__init_right_bar()
def __init_fonts(self):
'''Initializes fonts'''
self.font_bar_size = 32
self.font_bar = pygame.font.SysFont(
'arial',
self.font_bar_size,
bold=1)
self.font_sliders = pygame.font.SysFont('arial', 20, bold=1)
def __init_right_bar(self):
'''Initializes right bar and controls on it'''
self.right_bar_units = (self.window_size[0] / 32,
self.window_size[1] / 32)
self.right_bar_rect = (
self.window_size[0] - (self.right_bar_units[0] * 8),
0, self.right_bar_units[0] * 8, self.window_size[1])
self.__init_hex_right_bar_representation()
self.__init_controls()
def __init_hex_right_bar_representation(self):
'''Initializes choosen hex visual representation'''
self.attacking_hex_representation_middle = (
self.window_size[0] - self.right_bar_rect[2] +
self.right_bar_units[0], self.right_bar_units[1] * 2)
self.attacking_hex_representation_polygon = \
self.map_.calculate_hex_polygon(
self.attacking_hex_representation_middle)
self.defending_hex_representation_middle = (
self.window_size[0] - self.right_bar_units[0],
self.right_bar_units[1] * 2)
self.defending_hex_representation_polygon = \
self.map_.calculate_hex_polygon(
self.defending_hex_representation_middle)
self.attacking_hex_power_representation_middle = \
list(self.attacking_hex_representation_middle)
self.attacking_hex_power_representation_middle[1] += \
self.right_bar_units[1] * 4
self.defending_hex_power_representation_middle = \
list(self.defending_hex_representation_middle)
self.defending_hex_power_representation_middle[1] += \
self.right_bar_units[1] * 4
def __init_controls(self):
'''Initializes controls'''
self.__init_sliders()
self.__init_button()
def __init_button(self):
'''Initializes new map button'''
self.button_new_map_rect = self.slider_max_dice_on_single_hex_rect[:]
self.button_new_map_rect[0] += self.right_bar_units[0] * 1
self.button_new_map_rect[1] += self.right_bar_units[1] * 2
self.button_new_map_rect[2] -= self.right_bar_units[0] * 2
self.button_new_map_rect[3] += self.right_bar_units[1] * 2
self.button_new_map = controls.Button(
self.button_new_map_rect, 'new map', self.font_sliders,
(50, 50, 50))
def __init_sliders(self):
'''Initializes sliders'''
self.sliders = []
self.__init_slider_fight_time()
self.__init_slider_map_size_x()
self.__init_slider_map_size_y()
self.__init_slider_hex_number()
self.__init_slider_players_number()
self.__init_slider_die_sides_number()
self.__init_slider_max_dice_on_single_hex()
def __init_slider_fight_time(self):
'''Initializes slider responsible for fight time'''
self.slider_fight_time_rect = \
[self.right_bar_rect[0] + self.right_bar_units[0],
self.right_bar_rect[1] + self.right_bar_units[1] * 10,
self.right_bar_rect[2] - self.right_bar_units[0] * 2,
self.right_bar_units[1] * 0.8]
self.slider_fight_time = controls.Slider(
self.slider_fight_time_rect, [0, -self.right_bar_units[1]],
'fight time', self.font_sliders, 0, 5000,
1000, 100, (0, 0, 0), (255, 150, 0))
self.sliders.append(self.slider_fight_time)
def __init_slider_map_size_x(self):
'''Initializes slider responsible for map size x'''
self.slider_map_size_x_rect = self.slider_fight_time_rect[:]
self.slider_map_size_x_rect[1] += self.right_bar_units[1] * 6
self.slider_map_size_x = controls.Slider(
self.slider_map_size_x_rect,
[0, -self.right_bar_units[1]], 'map size x',
self.font_sliders, 5, 100,
10, 1, (0, 0, 0), (255, 150, 0))
self.sliders.append(self.slider_map_size_x)
def __init_slider_map_size_y(self):
'''Initializes slider responsible for map size y'''
self.slider_map_size_y_rect = self.slider_map_size_x_rect[:]
self.slider_map_size_y_rect[1] += self.right_bar_units[1] * 2
self.slider_map_size_y = controls.Slider(
self.slider_map_size_y_rect,
[0, -self.right_bar_units[1]], 'map size y',
self.font_sliders, 5, 100,
10, 1, (0, 0, 0), (255, 150, 0))
self.sliders.append(self.slider_map_size_y)
def __init_slider_hex_number(self):
'''Initializes slider responsible for hex number'''
self.slider_hex_number_rect = self.slider_map_size_y_rect[:]
self.slider_hex_number_rect[1] += self.right_bar_units[1] * 2
self.slider_hex_number = controls.Slider(
self.slider_hex_number_rect,
[0, -self.right_bar_units[1]], 'hex number',
self.font_sliders, 10, 100,
25, 1, (0, 0, 0), (255, 150, 0))
self.sliders.append(self.slider_hex_number)
def __init_slider_players_number(self):
'''Initializes slider responsible for players number'''
self.slider_players_number_rect = self.slider_hex_number_rect[:]
self.slider_players_number_rect[1] += self.right_bar_units[1] * 2
self.slider_players_number = controls.Slider(
self.slider_players_number_rect,
[0, -self.right_bar_units[1]], 'players number',
self.font_sliders, 2, 20,
2, 1, (0, 0, 0), (255, 150, 0))
self.sliders.append(self.slider_players_number)
def __init_slider_die_sides_number(self):
'''Initializes slider responsible for die sides number'''
self.slider_die_sides_number_rect = self.slider_players_number_rect[:]
self.slider_die_sides_number_rect[1] += self.right_bar_units[1] * 2
self.slider_die_sides_number = controls.Slider(
self.slider_die_sides_number_rect,
[0, -self.right_bar_units[1]], 'die sides number',
self.font_sliders, 2, 50,
6, 1, (0, 0, 0), (255, 150, 0))
self.sliders.append(self.slider_die_sides_number)
def __init_slider_max_dice_on_single_hex(self):
'''Initializes slider responsible for max dice on single hex'''
self.slider_max_dice_on_single_hex_rect = \
self.slider_die_sides_number_rect[:]
self.slider_max_dice_on_single_hex_rect[1] += \
self.right_bar_units[1] * 2
self.slider_max_dice_on_single_hex = controls.Slider(
self.slider_max_dice_on_single_hex_rect,
[0, -self.right_bar_units[1]], 'max dice on single hex',
self.font_sliders, 2, 100,
8, 1, (0, 0, 0), (255, 150, 0))
self.sliders.append(self.slider_max_dice_on_single_hex)
def set_options_for_new_map(self):
'''Reads values from new game options and sets options for new map'''
self.map_.size = self.new_game_options.map_size
map_area = self.map_.size[0] * self.map_.size[1]
self.map_.hex_number = int(
self.new_game_options.hex_number / 100 * map_area)
if self.new_game_options.players_number > self.map_.hex_number:
self.new_game_options.players_number = self.map_.hex_number
self.map_.players = []
self.map_.players.append(game.Player((255, 0, 0)))
for player in range(self.new_game_options.players_number - 1):
self.map_.players.append(game.Player((
random.randrange(40, 215),
random.randrange(40, 215),
random.randrange(40, 215))))
self.gameplay.die_sides_number = self.new_game_options.die_sides_number
self.gameplay.max_dice = self.new_game_options.max_dice_on_single_hex
def read_sliders_values(self):
'''Reads sliders values and saves them to new game options object'''
self.gameplay.fight_time = self.slider_fight_time.value
self.new_game_options.map_size[0] = self.slider_map_size_x.value
self.new_game_options.map_size[1] = self.slider_map_size_y.value
self.new_game_options.hex_number = self.slider_hex_number.value
self.new_game_options.players_number = self.slider_players_number.value
self.new_game_options.die_sides_number = \
self.slider_die_sides_number.value
self.new_game_options.max_dice_on_single_hex = \
self.slider_max_dice_on_single_hex.value
def render(self):
'''Rendering'''
self.surface.fill((0, 0, 0))
self.__draw_visible_hexes()
self.__draw_right_bar_hexes()
self.__draw_right_bar_hexes_power()
self.__draw_controls()
pygame.display.flip()
def __draw_visible_hexes(self):
'''Draws all visible hexes'''
font_dice_number_text_size = int(self.map_.side_length)
font_dice_number_text = pygame.font.SysFont(
'timesnewroman', font_dice_number_text_size, 1)
for hex_ in self.map_.get_visibile_hex_list(self.right_bar_rect):
if hex_ == self.gameplay.attacking_hex:
pygame.draw.lines(self.surface, hex_.player.color, True,
hex_.polygon, 1)
else:
pygame.draw.polygon(self.surface, hex_.player.color,
hex_.polygon)
dice_number_text = font_dice_number_text.render(
str(hex_.dice_number), True, (255, 255, 255))
self.surface.blit(dice_number_text, (
hex_.middle[0] - font_dice_number_text_size / 4,
hex_.middle[1] - font_dice_number_text_size / 2))
pygame.draw.rect(self.surface, (40, 40, 40), self.right_bar_rect)
def __draw_right_bar_hexes(self):
'''Draws choosen hexes representation on right bar'''
if self.gameplay.attacking_hex:
pygame.draw.polygon(
self.surface,
self.gameplay.attacking_hex.player.color,
self.attacking_hex_representation_polygon)
attacking_hex_text = self.font_bar.render(
str(self.gameplay.attacking_hex.dice_number),
True, (255, 255, 255))
self.surface.blit(attacking_hex_text, (
self.attacking_hex_representation_middle[0] -
self.font_bar_size / 4,
self.attacking_hex_representation_middle[1] -
self.font_bar_size / 2))
if self.gameplay.defending_hex:
pygame.draw.polygon(
self.surface,
self.gameplay.defending_hex.player.color,
self.defending_hex_representation_polygon)
defending_hex_text = self.font_bar.render(
str(self.gameplay.defending_hex.dice_number),
True, (255, 255, 255))
self.surface.blit(defending_hex_text, (
self.defending_hex_representation_middle[0] -
self.font_bar_size / 4,
self.defending_hex_representation_middle[1] -
self.font_bar_size / 2))
def __draw_right_bar_hexes_power(self):
'''Draws choosen hexes power representation on right bar'''
if self.gameplay.attacking_hex_power:
attacking_hex_power_text = self.font_bar.render(
str(self.gameplay.attacking_hex_power), True,
self.gameplay.attacking_hex.player.color)
self.surface.blit(attacking_hex_power_text, (
self.attacking_hex_power_representation_middle[0] -
self.font_bar_size / 4,
self.attacking_hex_power_representation_middle[1] -
self.font_bar_size / 2))
if self.gameplay.defending_hex_power:
defending_hex_power_text = self.font_bar.render(
str(self.gameplay.defending_hex_power), True,
self.gameplay.defending_hex.player.color)
self.surface.blit(defending_hex_power_text, (
self.defending_hex_power_representation_middle[0] -
self.font_bar_size / 4,
self.defending_hex_power_representation_middle[1] -
self.font_bar_size / 2))
def __draw_controls(self):
'''Draws controls'''
for slider in self.sliders:
slider.render(self.surface)
self.button_new_map.render(self.surface)
| true |
448f9134dc762884ee8efad6dd4189ca32751a42 | Python | rmcolq/DPhil_analysis | /scripts/filter_bad_sam.py | UTF-8 | 560 | 2.765625 | 3 | [] | no_license | import pysam
import argparse
def filter_sam(sam_file):
sam_reader = pysam.AlignmentFile(sam_file, "r", check_sq=False)
sam_writer = pysam.AlignmentFile("filtered.bam", "wb", template=sam_reader)
for s in sam_reader.fetch(until_eof=True):
if s.cigartuples != None:
sam_writer.write(s)
parser = argparse.ArgumentParser(description='Filters out any lines in SAM which have no cigar, and writes as BAM')
parser.add_argument('--sam', type=str,
help='Input SAM')
args = parser.parse_args()
filter_sam(args.sam)
| true |
59de3294dba29675aa2590534a20da7ef0db0187 | Python | shadownater/CMPUT404lab2 | /client.py | UTF-8 | 734 | 2.875 | 3 | [] | no_license | #!/usr/bin/env python
import socket
clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM);
# AF_INET means we want an IPv4 socket
# SOCK_STREAM means we want a TCP socket
clientSocket.connect( ("www.google.com", 80) ) #address, port
#accepts tuples, which is why there's double (())
#has to do with C programming and structs lol
request = "GET / HTTP/1.0\r\n\r\n" #first / means location on server,
#HTTP... is protocol number, then DOS-stle new lines
clientSocket.sendall(request) #send request to the server
#get response back from Google
response = bytearray()
while True:
part = clientSocket.recv(1024)
if (part):
response.extend(part)
else:
break
print response
| true |
478472dabc5919a85c8e03142ce3966784154052 | Python | frestea09/latihan-python-november | /src/belajarTuple.py | UTF-8 | 136 | 2.578125 | 3 | [] | no_license | from __future__ import print_function
def main():
myTuple = (1,2,3,4,5)
print(myTuple[1])
if __name__ == '__main__':
main() | true |
f852538a19d75d9a05f2484b3cf8f5d4e68a102f | Python | scotttct/tamuk | /Python/Python_Abs_Begin/Mod2_Functions/test.py | UTF-8 | 212 | 3.859375 | 4 | [] | no_license | # def add_numbers(num_1, num_2 = 10):
# return num_1 + num_2
# print(add_numbers(100))
def low_case(words_in):
return words_in.lower()
words_lower = low_case("Return THIS lower")
print(words_lower)
| true |
e5b96a9086458d2ffd9af65fd0a8ddce050c07ff | Python | kgerasimov1/ForTheHorde | /model/application.py | UTF-8 | 2,915 | 2.546875 | 3 | [] | no_license | from model.user import User
from model.film import Film
from pages.internal_page import InternalPage
from pages.login_page import LoginPage
from pages.user_management_page import UserManagementPage
from pages.user_profile_page import UserProfilePage
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.expected_conditions import *
from selenium.webdriver.common.by import By
class Application(object):
def __init__(self, driver, base_url):
driver.get(base_url)
self.wait = WebDriverWait(driver, 10)
self.login_page = LoginPage(driver, base_url)
self.internal_page = InternalPage(driver, base_url)
self.user_profile_page = UserProfilePage(driver, base_url)
self.user_management_page = UserManagementPage(driver, base_url)
def login(self, user):
lp = self.login_page
lp.is_this_page
lp.username_field.send_keys(user.username)
lp.password_field.send_keys(user.password)
lp.submit_button.click()
def ensure_login_as(self, user):
element = self.wait.until(presence_of_element_located((By.CSS_SELECTOR, "nav, #loginform")))
if element.tag_name == "nav":
# we are on internal page
if self.is_logged_in_as(user):
return
else:
self.logout()
self.login(user)
def logout(self):
self.internal_page.logout_button.click()
self.wait.until(alert_is_present()).accept()
def ensure_logout(self):
element = self.wait.until(presence_of_element_located((By.CSS_SELECTOR, "nav, #loginform")))
if element.tag_name == "nav":
self.logout()
def add(self, film):
ip = self.internal_page
ip.add_movie_button.click()
ip.title_field_is_visible
ip.title_field.clear()
ip.title_field.send_keys(film.name)
ip.year_field.clear()
ip.year_field.send_keys(film.year)
ip.save_button.click()
def remove(self):
self.internal_page.remove_button.click()
self.wait.until(alert_is_present()).accept()
def is_logged_in(self):
return self.internal_page.is_this_page
def is_logged_in_as(self, user):
return self.is_logged_in() \
and self.get_logged_user().username == user.username
def is_not_logged_in(self):
return self.login_page.is_this_page
def get_logged_user(self):
self.internal_page.user_profile_link.click()
upp = self.user_profile_page
upp.is_this_page
upp.username_field_is_visible
return User(username=upp.user_form.username_field.get_attribute("value"),
email=upp.user_form.email_field.get_attribute("value"))
def add_user(self, user):
self.internal_page.user_management_link.click()
ump = self.user_management_page
ump.is_this_page
ump.username_field_is_visible
ump.user_form.username_field.send_keys(user.username)
ump.user_form.email_field.send_keys(user.email)
ump.user_form.password_field.send_keys(user.password)
ump.user_form.password1_field.send_keys(user.password)
#ump.user_form.role_select.select_by_visible_text(user.role)
ump.user_form.submit_button.click()
| true |
9635af916d8b3d6e6c5a8f361e593f17c50566d2 | Python | AstridSlet/CDS_Language_Analytics | /assignment_W1/word_counts.py | UTF-8 | 2,315 | 3.875 | 4 | [] | no_license | # ! /usr/bin/python
##### LANGUAGE ANALYTICS ASSIGNMENT 1 #####
# load packages
import os
from pathlib import Path # for filepaths
import string # for removing punctuations
import pandas as pd # for creating df
# create pathname to use in the loop
filepath = os.path.join("data", "100_english_novels", "corpus")
# check if you reach the files you want in the filepath
for filename in Path(filepath).glob("*.txt"):
print(filename)
# create empty list to fill in with the loaded texts
text_list = []
# create empty list to store file names
fileneame_list = []
# create loop that loads all files into a list - and save the file name of each file in another list
for each_file in Path(filepath).glob("*.txt"):
with open(each_file, "r", encoding="utf-8") as file:
file_name = Path(each_file).stem # save just the file name, using the .stem and Path function
fileneame_list.append(file_name) # append filename to list
loaded_text = file.read() # load text file
text_list.append(loaded_text) # append file
# create empty list to store word count and unique words count
word_count_list = []
unique_count_list = []
# cleaning the text files and counting words and unique words
for text in text_list:
words = text.split() # split text
words[0] = '' # removing the first word in each list
# remove punctuations
table = str.maketrans('', '', string.punctuation)
stripped = [w.translate(table) for w in words]
# convert to lower case
words_clean = [word.lower() for word in stripped]
word_count_list.append(len(words_clean)) # count words and append the word count to word_count_list
unique_count_list.append(len(set(words_clean))) # count unique words (using set() function) and append the word count to unique_count_list
# Save result as a CSV file with three columns: filename, total_words, unique_words
# First gathering the three lists created above in a pandas dataframe
df = pd.DataFrame(zip(fileneame_list, word_count_list, unique_count_list),
columns =["filename", "word_count", "unique_words"])
# define output path name + output file name
outpath = os.path.join("output","outputfile.csv")
# save the dataframe in the output folder (using the output path)
df.to_csv(outpath, index=False)
| true |
70bb5ff4d10f4d659da56bae322ea85388080c5b | Python | elliottlin/python | /blackjace-multiprocess/blackjack.py | UTF-8 | 3,338 | 3.03125 | 3 | [] | no_license | import random
import multiprocessing
import math
import time
SIMLULATIONS = 100 * 1000
NUM_DECKS = 4
SHUFFLE_PERC = 75
WIN = 1
DRAW = 0
LOSE = -1
def simulate(queue, batch_size):
deck = []
def new_deck():
std_deck = [
2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10, 11,
2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10, 11,
2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10, 11,
2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10, 11,
]
std_deck = std_deck * NUM_DECKS
random.shuffle(std_deck)
return std_deck[:]
def play_hand():
dealer_cards = []
player_cards = []
# init cards
player_cards.append(deck.pop(0))
dealer_cards.append(deck.pop(0))
player_cards.append(deck.pop(0))
dealer_cards.append(deck.pop(0))
# deal player to 12 or higher
while sum(player_cards) < 12:
player_cards.append(deck.pop(0))
p_sum = sum(player_cards)
if p_sum > 21:
return LOSE
# deal dealer on soft 17
while sum(dealer_cards) < 18:
leave = False
# check for soft 17
if sum(dealer_cards) == 17:
leave = True
for i, card in enumerate(dealer_cards):
if card == 11:
leave = False
dealer_cards[i] = 1
break
if leave:
break
dealer_cards.append(deck.pop(0))
d_sum = sum(dealer_cards)
if d_sum > 21:
return WIN
if d_sum == p_sum:
return DRAW
if d_sum > p_sum:
return LOSE
if d_sum < p_sum:
return WIN
deck = new_deck()
win, draw, lose = 0, 0, 0
for _ in range(0, batch_size):
if (float(len(deck))/(52*NUM_DECKS)) * 100 < SHUFFLE_PERC:
deck = new_deck()
result = play_hand()
if result == WIN:
win += 1
elif result == DRAW:
draw += 1
elif result == LOSE:
lose += 1
queue.put([win, draw, lose])
if __name__ == '__main__':
start_time = time.time()
# simulate
cpus = multiprocessing.cpu_count()
batch_size = int(math.ceil(SIMLULATIONS/ float(cpus)))
queue = multiprocessing.Queue()
processes = []
for i in range(0, cpus):
process = multiprocessing.Process(
target=simulate,
args=(queue, batch_size)
)
processes.append(process)
process.start()
for proc in processes:
proc.join()
finish_time = time.time() - start_time
win, draw, lose = 0, 0, 0
for i in range(0, cpus):
results = queue.get()
win += results[0]
draw += results[1]
lose += results[2]
print("{0: >20} {1}".format("cores", cpus))
print("{0: >20} {1}".format("total simulations", SIMLULATIONS))
print("{0: >20} {1}".format("sim/s", (float(SIMLULATIONS)/finish_time)))
print("{0: >20} {1}".format("time", finish_time))
print("{0: >20} {1}".format("win", (win/float(SIMLULATIONS) * 100)))
print("{0: >20} {1}".format("draw", (draw/float(SIMLULATIONS) * 100)))
print("{0: >20} {1}".format("lose", (lose/float(SIMLULATIONS) * 100)))
| true |
fbf5d949c0e8c2519e435818d5341699faddc300 | Python | CheezyMac/Travis-Discordbot | /main.py | UTF-8 | 1,704 | 2.578125 | 3 | [] | no_license | import configuration as cfg # Import configuration settings
import discord # Import discord API
import features
import sys
if len(sys.argv) != 2:
print("Argument error! Please enter only the bot token as argument")
client = discord.Client()
print(discord.__version__)
active_games = []
@client.event
async def on_message(message):
# Ignore messages sent by ourselves
if message.author == client.user:
return
# Check if message should be routed to any active games
for game in active_games:
if game.game_channel == message.channel:
ret_code = await game.handlemessage(message)
if ret_code == 2: # if game complete, remove from active games
active_games.remove(game)
return
elif ret_code == 0: # If game did not handle message, continue processing
continue
if message.content.upper().startswith(cfg.COMMANDS["play"]):
name = message.content.upper()[len(cfg.COMMANDS["play"]):]
if name in cfg.KNOWN_GAMES:
game, channel_name = await cfg.KNOWN_GAMES[name](client, message)
if game != -1:
active_games.append(game)
await client.send_message(message.channel, features.get_game_ready().format(channel_name))
else:
await features.list_games(client, message.channel)
if cfg.BOT_SEARCH_STRING.upper() in message.content.upper():
print("{} mentioned something I'm interested in!".format(message.author))
@client.event
async def on_ready():
print("Bot ready!")
print("Logged in as {} ({})".format(client.user.name, client.user.id))
client.run(sys.argv[1])
| true |
155768c6357c2d72ec89ab6088c539032deb71de | Python | pyranja/challenges | /euler/e3.py | UTF-8 | 805 | 3.34375 | 3 | [] | no_license | #!/usr/bin/env python3
import sys, math
def solve(n):
return max(factorize(n))
def factorize(it):
assert it > 0, "cannot factorize %s" % it
yield 1
factor = 2
limit = int(math.sqrt(it)) # if no divisor < sqrt(it) found it is a prime
while (it > 1):
if it % factor == 0:
yield factor
it = it // factor
limit = int(math.sqrt(it))
elif factor > limit:
factor = it # it is a prime, shortcut and use itself as last factor
else:
factor += 1
# ========================================= boilerplate ========================================== #
def main(cases):
return '\n'.join([str(solve(int(case))) for case in cases])
if __name__ == '__main__':
print(main(sys.stdin.readlines()[1:]))
| true |
71116471998a0c9765820b76505810da2c39302b | Python | dcrogers127/bball_spreads | /Id_SEGABABA_v2.py | UTF-8 | 2,182 | 2.5625 | 3 | [] | no_license | #############################################
# Program: Id_SEGABABA.py - Id SEcond GAmes on a BAck to BAck
#
# Notes: v2 - standardize input
#
#
#
#
# Date:
#
#############################################
import numpy as np
from pandas import Series, DataFrame
import pandas as pd
import os as os
from datetime import datetime
def Id_SEGABABA(year):
games = pd.read_csv('Data/Box_Scores/' + year + '/Game_List_' + year + '.csv', dtype={'Away_PTS': np.object, 'Home_PTS': np.object})
games['Date'] = pd.to_datetime(games['Date'])
teams = list(games['Home'].value_counts().keys())
teams.sort()
segababa_df = []
for (i, team) in enumerate(teams):
segababa_df.append( games[['Date', 'Home']].loc[games['Home']==team] )
segababa_df[i] = segababa_df[i].rename(columns={'Home':'Team'})
segababa_df[i]['Home_Game'] = True
away_games = games[['Date', 'Away']].loc[games['Away']==team]
away_games = away_games.rename(columns={'Away':'Team'})
away_games['Home_Game'] = False
segababa_df[i] = segababa_df[i].append(away_games)
segababa_df[i] = segababa_df[i].sort_values(by='Date', ascending=False)
gb2b = np.zeros(segababa_df[i].shape[0], dtype=bool)
g3i4 = np.zeros(segababa_df[i].shape[0], dtype=bool)
g4i5 = np.zeros(segababa_df[i].shape[0], dtype=bool)
for l in range( segababa_df[i].shape[0] ):
if (l+1)<segababa_df[i].shape[0]:
gb2b[l] = (segababa_df[i].iloc[l]['Date'] - segababa_df[i].iloc[l+1]['Date']).days==1
if (l+3)<segababa_df[i].shape[0]:
g3i4[l] = (segababa_df[i].iloc[l]['Date'] - segababa_df[i].iloc[l+2]['Date']).days==3
if (l+4)<segababa_df[i].shape[0]:
g4i5[l] = (segababa_df[i].iloc[l]['Date'] - segababa_df[i].iloc[l+3]['Date']).days==4
segababa_df[i]['gb2b'] = gb2b
segababa_df[i]['g3i4'] = g3i4
segababa_df[i]['g4i5'] = g4i5
all_segababa_df = pd.concat(segababa_df)
all_segababa_df.to_csv('Data/Input_Files/SEGABABA_' + year + '.csv', index=False)
Id_SEGABABA('2016')
| true |
6b52c1905e2f680279214c7b01ea0c2275ad6b7c | Python | t0mbs/symbolicpy | /symbolicpy/Property.py | UTF-8 | 3,398 | 3.296875 | 3 | [
"MIT"
] | permissive | import ast
import logging
from SymbolicVariable import *
# TODO: Comment
# TODO: Split into Binary and Conditional subtypes
class Property:
# l and r Can be symbolic variable, or constant value
def __init__(self, l, op, r, is_true = True):
self.l = l
self.r = r
self.is_true = is_true
self.r_isvar = type(r) == SymbolicVariable
self.l_isvar = type(l) == SymbolicVariable
conditional_operators = {
ast.Eq: "==",
ast.NotEq: "!=",
ast.Lt: "<",
ast.LtE: "<=",
ast.Gt: ">",
ast.GtE: ">=",
ast.Or: "or",
ast.And: "and"
}
binary_operators = {
ast.Add: "+",
ast.Sub: "-",
ast.Mult: "*",
ast.Div: "/",
ast.Mod: "%",
ast.Pow: "**",
ast.LShift: "<<",
ast.RShift: ">>"
}
if type(op) in conditional_operators:
self.type = "conditional"
self.op = conditional_operators[type(op)]
elif type(op) in binary_operators:
self.type = "binary"
self.op = binary_operators[type(op)]
else:
logging.error("The operator %s is not supported", op)
def getSymbolicVariables(self):
v = []
if self.r_isvar:
v.append(self.r)
if self.l_isvar:
v.append(self.l)
if isinstance(self.r, Property):
v.extend(self.r.getSymbolicVariables())
if isinstance(self.l, Property):
v.extend(self.l.getSymbolicVariables())
return v
def getExprName(self, v):
if type(v) == SymbolicVariable:
return v.name
return v
def getExpressions(self):
expressions = []
if type(self.r) == Property:
# Need to unwind recursive properties into simple expressions (e.g. x==y, y==z)
# This is done because Z3 does not accept complex expressions contianing conditional operators (e.g. x==y==z)
# 1. Recursive unwind if conditional
if self.r.type == "conditional":
expressions.append("%s %s %s" % (self.getExprName(self.l), self.op, self.getExprName(self.r.l)))
# If conditional, should be recursive
elif self.r.type == "binary":
expressions.append("%s %s %s" % (self.getExprName(self.l), self.op, self.r.getRecursiveExpression()))
return expressions
# Merge expressions lists
expressions[len(expressions):] = self.r.getExpressions()
else:
expressions.append(self.getUnwoundExpression())
return expressions
def getRecursiveExpression(self):
if type(self.r) == Property:
return "%s %s %s" % (self.getExprName(self.l), self.op, self.r.getRecursiveExpression())
else:
return self.getUnwoundExpression()
def getUnwoundExpression(self):
return "%s %s %s" % (self.getExprName(self.l), self.op, self.getExprName(self.r))
def z3String(self):
expr = self.getRecursiveExpression()
if self.is_true:
return expr
return "Not(%s)" % expr
def __str__(self):
expr = self.getRecursiveExpression()
if self.is_true:
return expr
return "not %s" % expr
| true |
76e25e3a55046be096079d6337911a3825ffcab9 | Python | SteDavis20/My_Python_Files | /14) List Remove Duplicates.py | UTF-8 | 530 | 3.421875 | 3 | [] | no_license | a = [1, 2, 1, 3, 1, 4, 1, 4, 2, 3]
def main():
#solution = remove_duplicates_sets(a)
solution = remove_duplicates_loop(a)
print(solution)
def remove_duplicates_loop(list):
b = list
c = []
for i in list:
if i not in c:
for k in b:
if k not in c:
if i == k:
c.append(i)
return c
def remove_duplicates_sets(list):
new_list = set(list)
return new_list
if __name__ == "__main__": main()
| true |
e184e876b64945cd2b3eead036f9ad2e91c6b8e9 | Python | ConnorFoody/apCompsiProject | /vision/CannyHough.py | UTF-8 | 1,794 | 2.78125 | 3 | [] | no_license | # Basic circle detection using OpenCV
import math
import sys
import time
import cv2
#import cv2.cv as cv
import numpy as np
class CircleFind(object):
targetColor = (0, 0, 255)
missedColor = (0, 255,0)
# constants that need to be tuned
kHoleClosingIterations = 9
def __init__(self):
self.morphKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3), anchor=(1,1))
self.size = None
def processImage(self, img):
heading = 0
if self.size is None or self.size[0] != img.shape[0] or self.size[1] != img.shape[1]:
h, w = img.shape[:2]
self.size = (h, w)
self.bin = np.empty((h, w, 1), dtype=np.uint8)
temp = img
targets = []
HIGH = 80
LOW = 10
cv2.blur(img, (7,7), dst=img)
img = cv2.Canny(img, LOW, HIGH)
cimg = cv2.cvtColor(img,cv2.COLOR_GRAY2BGR)
#return img
circles = cv2.HoughCircles(img, cv2.cv.CV_HOUGH_GRADIENT, 1, 10, param1=HIGH,param2=5,minRadius=1,maxRadius=500)
if(circles == None):
print "O nose!!!, nothing in circles!"
return img
print circles[0][0][0]
x = circles[0][0][0]
y = circles[0][0][1]
radius = circles[0][0][2]
cv2.circle(cimg, (x,y), 7, self.targetColor, thickness=radius)
return cimg
if __name__ == '__main__':
prg = CircleFind()
tmp = cv2.imread("images/img5.jpg")
start_time = time.time()
end_time = 0.0
img = tmp
img = prg.processImage(img)
end_time = time.time()
print end_time - start_time
cv2.imshow('Processed', img)
print "Hit ESC to exit"
while True:
key = 0xff & cv2.waitKey(1)
if key == 27:
break
| true |
0707f80ec790db3745c0bff16df7db7d7c5d4220 | Python | sug5806/TIL | /Python/algorithm/sort/insert_sort/prac.py | UTF-8 | 455 | 4.15625 | 4 | [
"MIT"
] | permissive | # 이미 정렬된 리스트에 적절한 곳에 삽입시키므로 삽입정렬이다.
def insertion_sort(arr):
length = len(arr)
for i in range(1, length):
data = arr[i]
j = i - 1
while j >= 0 and data < arr[j]:
arr[j + 1] = arr[j]
j -= 1
arr[j + 1] = data
if __name__ == "__main__":
arr = [1, 5, 2, 74, 25, 22, 17, 9, 3, 67, 98, 33, 21, 6]
insertion_sort(arr)
print(arr)
| true |
307f042e2f5d4f657d6f0f4acfaa9fb12c4202fc | Python | youzi-YJY/Maching-Learning | /Big data and Map Reduce/mrMeanReducer.py | UTF-8 | 688 | 2.640625 | 3 | [] | no_license | import sys
from numpy import mat,mean,power
def read_input(file):
for line in file:
yield line.rsrtip()
input=read_input(sys.stdin)
mapperOut=[line.split('\t') for line in input]
cumVal=0.0
cumSumSq=0.0
cumN=0.0
for instance in mapperOut:
nj=float(instance[0])
cumN+=nj
cumVal+=nj*float(instance[1])
cumSumSq+=nj*float(instance[2])
mean=cumVal/cumN
varSum=(cumSumSq-2*mean*cumVal+cumN*mean*mean)/cumN
print("%d\t%d\t%f" %(cumN,mean,varSum))
print >> sys.stderr,"report:still alive"
# cat inputFile.txt | python mrMeanMapper.py | python mrMeanReducer.py
#In Dos,enter the following command:
#%python mrMeanMapper.py < inputFile.txt | python mrMeanReducer.py | true |
e2ba877686eaa81cc7011743f7e839c55b505c42 | Python | xysun/Beta | /test/nested_test/f3.py | UTF-8 | 118 | 2.578125 | 3 | [] | no_license | from beta import Beta
@Beta(3, 4)
def nested_f1(v):
return v + 1
@Beta(4, 6)
def nested_f2(v):
return v + 2
| true |
90e91e8b3ba475e77b6a658be1dbdaad8bf0d0b0 | Python | hherbol/ase_orca | /io/orca_reader.py | UTF-8 | 4,172 | 2.765625 | 3 | [] | no_license | from __future__ import print_function
import sys
# Number of lines to print from end of orca output file in case of error
ORCA_ERROR_OUT = 15
class OrcaReader:
def auto_type(self, data):
""" tries to determine type"""
try:
return float(data)
except ValueError:
pass
try:
ds = data.split(",")
array = []
for d in ds:
array.append(float(d))
return array
except ValueError:
pass
return data
def __init__(self, filename):
"""filename is optional; if not set, use parse to set the content"""
if isinstance(filename, str):
fileobj = open(filename + ".log", 'r')
else:
fileobj = filename
fileobj.seek(0) # Re-wind fileobj
content = fileobj.read()
content = content.replace("\r\n", "\n")
self.parse(content)
def parse(self, content):
from ase.data import atomic_numbers
self.data = []
seq_count = 0
try:
route = [line[5:] for line in content.split('\n')
if line.startswith('| 1>')][0]
except IndexError:
raise IOError('Could not find route line: job likely crashed.')
if "ABORTING THE RUN" in content:
print("Orca simulation crashed - Unable to read in output.")
print("Final lines:")
for s in content.split('\n')[-ORCA_ERROR_OUT:]:
print("\t%s" % s)
sys.exit()
version = content.split("Program Version")[1].split("\n")[0]
method = route.split("!")[1].split()[0]
charge = content.split("Total Charge")[1].split('\n')[0].split()[-1]
charge = int(charge)
multiplicity = content.split("Multiplicity")[1].split('\n')[0].split()
multiplicity = int(multiplicity[-1])
section, s_position = content, "CARTESIAN COORDINATES (ANGSTROEM)"
s_energy = "FINAL SINGLE POINT ENERGY"
s_gradient = "CARTESIAN GRADIENT"
s_gradient_2 = "The final MP2 gradient"
while s_position in section:
section = section[section.find(s_position) + len(s_position):]
atom_block = section[:section.find('\n\n')].split('\n')[2:]
atoms, positions = [], []
for line in atom_block:
a = line.split()
atoms.append(atomic_numbers[a[0]])
positions.append([float(a[1]), float(a[2]), float(a[3])])
energy = section[section.find(s_energy):].split("\n")[0].split()
energy = float(energy[-1])
# TODO - Read from .engrad file instead
if s_gradient in section:
grad_block = section[section.find(s_gradient):].split("\n\n")
grad_block = grad_block[1].split("\n")
gradient = []
for line in grad_block:
a = line.split()
gradient.append([float(b) for b in a[3:]])
elif s_gradient_2 in section:
grad_block = section[section.find(s_gradient_2):].split("\n\n")
grad_block = grad_block[0].split("\n")[1:]
gradient = []
for line in grad_block:
a = line.split()
gradient.append([float(b) for b in a[1:]])
else:
gradient = None
new_dict = {}
self.data.append(new_dict)
new_dict["Method"] = method
new_dict["Sequence number"] = seq_count
new_dict["Charge"] = charge
new_dict["Multiplicity"] = multiplicity
new_dict["Atomic_numbers"] = atoms
new_dict["Positions"] = positions
new_dict["Energy"] = energy
new_dict["Gradient"] = gradient
new_dict["Version"] = version
def __iter__(self):
"""returns an iterator that iterates over all keywords"""
return self.data.__iter__()
def __len__(self):
return len(self.data)
def __getitem__(self, pos):
return self.data[pos]
| true |
9f4e5f542fe23018c5eca808e7a400335fb1d99c | Python | AIoT-Lab-BKAI/Fuzzy-Q-learning-based-Opportunistic-Communication | /carSimulator_method.py | UTF-8 | 4,553 | 2.875 | 3 | [] | no_license | import math
import random
import numpy as np
from config import Config
def getNearCar(car, currentTime, network):
minDis = float('inf')
listRes = []
for car_ in network.carList:
# remove itself
if car_.id == car.id:
continue
if (car_.startTime - Config.simStartTime) / 60 > currentTime:
continue
if car_.currentNumMessage >= car_.carMaxCapacity:
continue
distance = car.distanceToCar(car_, currentTime)
if distance > Config.carCoverRadius:
continue
if distance < minDis:
minDis = distance
listRes = [car_]
elif distance == minDis:
listRes.append(car_)
# print(listRes)
if listRes:
return listRes[random.randint(0, len(listRes) - 1)]
else:
return None
def getNearRsu(car, currentTime, network):
minDis = float('inf')
listRes = []
for rsu in network.rsuList:
distance = car.distanceToRsu(rsu, currentTime)
if distance > Config.rsuCoverRadius:
continue
if distance < minDis:
minDis = distance
listRes = [rsu]
elif distance == minDis:
listRes.append(rsu)
if listRes:
return listRes[random.randint(0, len(listRes) - 1)]
else:
return None
def getPosition(car, currentTime):
"""
car: Car object
curentTime: number [0; simTime]
return: posistion of this car
"""
currentTimeCar = car.startTime + 60 * currentTime
if currentTimeCar in car.timeLocation:
car.currentLocation = car.timeLocation[currentTimeCar][0]
return car.currentLocation
return car.currentLocation
def distanceToCar(car1, car2, currentTime):
car1Position = car1.getPosition(currentTime)
car2Position = car2.getPosition(currentTime)
dist = np.linalg.norm(np.array(car1Position) - np.array(car2Position))
# print(dist)
return dist
def distanceToRsu(car, rsu, currentTime):
carPosition = car.getPosition(currentTime)
return math.sqrt(
pow(carPosition[0] - rsu.xcord, 2) + pow(carPosition[1] - rsu.ycord, 2) + pow(rsu.zcord, 2)
)
def getAction2(car, message, currentTime, network, optimizer=None):
"""Get action of this car for the message
Args:
car ([CarSimulator]): [description]
message ([Message]): [description]
currentTime ([float]): [description]
network ([Network]): [description]
optimizer ([type], optional): [description]. Defaults to None.
Returns:
action: [0:sendToCar, 1:sendToRsu, 2:sendToGnb or 3:noChange]
nextLocation: [The location where the message will be sent to]
"""
pCarToCar = 0.4
pCarToRsu = 0.4
pCarToGnb = 0.1
rand = random.random()
if rand < pCarToCar:
nearCar = car.getNearCar(currentTime, network)
if nearCar:
return (0, nearCar)
else:
if car.currentNumMessage == car.carMaxCapacity:
return (2, network.gnb)
return (3, None)
elif rand < pCarToCar + pCarToRsu:
nearRsu = car.getNearRsu(currentTime, network)
if nearRsu:
return (1, nearRsu)
else:
if car.currentNumMessage == car.carMaxCapacity:
return (2, network.gnb)
return (3, None)
elif rand < pCarToCar + pCarToRsu + pCarToGnb:
return (2, network.gnb)
else:
return (3, None)
def getAction(car, message, currentTime, network, optimizer=None):
"""
Get action of this car for the message
:param car:
:param message:
:param currentTime:
:param network:
:param optimizer:
:return:
"""
# 0: car, 1:rsu, 2:gnb, 3:no change
# action = epsilon_greedy_policy(Q, State) #
stateInfo = car.optimizer.getState(message)
neighborCar = stateInfo[1]
neighborRsu = stateInfo[2]
car.optimizer.currentState = stateInfo
allActionValues = car.optimizer.mappingStateToValue(stateInfo)
exclude_actions = []
if neighborCar is None:
exclude_actions.append(0)
if neighborRsu is None:
exclude_actions.append(1)
actionByPolicy = car.optimizer.policy(allActionValues, exclude_actions)
car.optimizer.policyAction = actionByPolicy
if actionByPolicy == 0:
res = (0, stateInfo[1])
elif actionByPolicy == 1:
res = (1, stateInfo[2])
elif actionByPolicy == 2:
res = (2, network.gnb)
else:
res = (3, None)
return res
| true |
bffcc0f26c97266e2dac0c024f8430d859db5429 | Python | baitongda/python- | /基础知识/test_replace.py | UTF-8 | 247 | 3.6875 | 4 | [] | no_license | str = "this is string example....wow!!! this is really string"
print(str.replace("is", "was"))
print(str.replace("is", "was", 3))
print(str)
import re
str = "this is string example....wow!!! this is really string"
print(re.sub("is","was",str))
| true |
6315de11dd36dca67b7d91f2184ed783e2963b1d | Python | Incubaid/arakoon | /tools/low_cpu_check.py | UTF-8 | 3,066 | 2.640625 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | import bz2
import struct
import binascii
import os
import time
import sys
def sn_from(buf, offset):
r = struct.unpack_from("q",buf, offset)
return r[0], offset + 8
def int32_from(buf, offset):
r = struct.unpack_from("I", buf, offset)
return r[0], offset + 4
def string_from(buf, offset):
size,o2 = int32_from(buf, offset)
too = o2 + size
v = buf[o2:too]
return v, too
def test(sn,prev):
if sn == prev + 1 or sn == prev:
pass
else:
raise Exception("%i <-> %i" % (sn,prev))
def do_entry(inflated, offset):
t0 = time.time()
sn,o2 = sn_from(inflated, offset)
crc,o3 = int32_from(inflated,o2)
cmd,o4 = string_from(inflated,o3)
t1 = time.time()
delay = t1 - t0
time.sleep(delay)
return (sn,crc,cmd), o4
def do_chunk(prev_i, chunk):
t0 = time.time()
inflated = bz2.decompress(chunk)
t1 = time.time()
delay = t1 - t0
time.sleep(delay)
too = len(inflated)
offset = 0
prev = prev_i
while offset < too:
#print "\t",binascii.hexlify(inflated[offset: offset+16])
(sn,crc,cmd),o2 = do_entry(inflated, offset)
test(sn,prev)
#print sn
offset = o2
prev = sn
#print prev_i,prev
return prev
def do_tlc_chunk(prev, chunk):
t0 = time.time()
inflated = bz2.decompress(chunk)
t1 = time.time()
delay = t1 - t0
time.sleep(delay)
offset = 0
too = len(inflated)
while offset < too:
(sn,crc,cmd), o2 = do_entry(inflated, offset)
test(sn,prev)
offset = o2
prev = sn
return prev
def do_tlf(first, canonical) :
f = open(canonical,'rb')
all = f.read()
f.close()
offset = 0
too = len(all)
while offset < too:
last_i,o2 = sn_from(all,offset)
chunk, o3 = string_from(all, o2)
new_f = do_chunk(first, chunk)
assert last_i == new_f
offset = o3
first = new_f
return first
def do_tlc(first, canonical):
f = open(canonical,'rb')
all = f.read()
f.close()
offset = 0
too = len(all)
while offset < too:
n_entries,o2 = int32_from(all,offset)
chunk,o3 = string_from(all,o2)
new_f = do_tlc_chunk(first, chunk)
offset = o3
first = new_f
return first
def do_dir(dn):
fns = filter(lambda f: f.endswith(".tlf") or f.endswith(".tlc"),
os.listdir(dn))
def n_from(e): return int(e[:e.index('.')])
def cmp(a,b): return n_from(a) - n_from(b)
fns.sort(cmp)
for fn in fns:
canonical = "%s/%s" % (dn,fn)
first = int(fn[:fn.index('.')]) * 100000
if fn.endswith(".tlf"):
last = do_tlf(first, canonical)
else:
last = do_tlc(first, canonical)
assert first + 99999 == last
print fn, "ok"
#do_tlc(500000,'/tmp/010.tlc')
if __name__ == '__main__':
if len(sys.argv) <2:
print "python",sys.argv[0], "<path_to_tlog_dir>"
sys.exit(-1)
else:
do_dir(sys.argv[1])
| true |
0b511c9f77566bda844b3627a62c94425bb8755a | Python | justNeto/VisualCodeHaven | /Projects/MIT_CLASSES/BWSI_Students/Jorge Gabriel Barragán Jiménez/Palindromo | UTF-8 | 229 | 3.359375 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 2 03:00:30 2020
@author: jorge
"""
palabra= str(input(""))
palindromo= palabra[::-1]
if palabra==palindromo:
print("Verdadero")
else:
print("Falso") | true |
a8fa027685fa4ecd0d757129515338ccd738335d | Python | saurabhsingh13no/Analyzing_financial_data | /Machine learning Financial Data_My_version.py | UTF-8 | 5,309 | 2.984375 | 3 | [] | no_license | """Code to analyse the Stock Market Index data by QuantUniversity
Credits : GOOGLE INC for providing the link to dataset and stubs of code
to get started
Date Created: June 8 2017
Author: QuantUniversity : SS
"""
# Libraries used in the code
import pandas as pd
import numpy as np
from pandas.plotting import autocorrelation_plot,scatter_matrix
import matplotlib
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression,LinearRegression
from sklearn import metrics
from sklearn.ensemble import RandomForestClassifier
#from sklearn.cross_validation import cross_val_score
from sklearn.preprocessing import StandardScaler
from sklearn.neural_network import MLPClassifier
# Reading the dataset to for Data Anlaysis
matplotlib.style.use('ggplot')
cd=pd.read_pickle("./closing_data_pickle")
cd=cd.sort_index()
# Plotting the stock market closing index
# pd.concat([cd['snp_close'],
# cd['nyse_close'],
# cd['djia_close'],
# cd['nikkei_close'],
# cd['hangseng_close'],
# cd['ftse_close'],
# cd['dax_close'],
# cd['aord_close']], axis=1).plot(figsize=(20, 15))
# plt.show()
# scaling the closing value of features
cd['snp_close_scaled'] = cd['snp_close'] / max(cd['snp_close'])
cd['nyse_close_scaled'] = cd['nyse_close'] / max(cd['nyse_close'])
cd['djia_close_scaled'] = cd['djia_close'] / max(cd['djia_close'])
cd['nikkei_close_scaled'] = cd['nikkei_close'] / max(cd['nikkei_close'])
cd['hangseng_close_scaled'] = cd['hangseng_close'] / max(cd['hangseng_close'])
cd['ftse_close_scaled'] = cd['ftse_close'] / max(cd['ftse_close'])
cd['dax_close_scaled'] = cd['dax_close'] / max(cd['dax_close'])
cd['aord_close_scaled'] = cd['aord_close'] / max(cd['aord_close'])
# transerfing cd dataframe into external file for analysis into R
cd.to_csv('closing_date_scaled')
# Plotting the sclaed value of stock market closing index
# pd.concat([cd['snp_close_scaled'],
# cd['nyse_close_scaled'],
# cd['djia_close_scaled'],
# cd['nikkei_close_scaled'],
# cd['hangseng_close_scaled'],
# cd['ftse_close_scaled'],
# cd['dax_close_scaled'],
# cd['aord_close_scaled']], axis=1).plot(figsize=(20, 15))
#plt.show()
# Plotting autocorrelation
# fig = plt.figure()
# fig.set_figwidth(20)
# fig.set_figheight(15)
# autocorrelation_plot(cd['snp_close'], label='snp_close')
# autocorrelation_plot(cd['nyse_close'], label='nyse_close')
# autocorrelation_plot(cd['djia_close'], label='djia_close')
# autocorrelation_plot(cd['nikkei_close'], label='nikkei_close')
# autocorrelation_plot(cd['hangseng_close'], label='hangseng_close')
# autocorrelation_plot(cd['ftse_close'], label='ftse_close')
# autocorrelation_plot(cd['dax_close'], label='dax_close')
# autocorrelation_plot(cd['aord_close'], label='aord_close')
# plt.legend(loc='upper right')
# plt.show()
# scatter_matrix(pd.concat([cd['snp_close_scaled'],
# cd['nyse_close_scaled'],
# cd['djia_close_scaled'],
# cd['nikkei_close_scaled'],
# cd['hangseng_close_scaled'],
# cd['ftse_close_scaled'],
# cd['dax_close_scaled'],
# cd['aord_close_scaled']], axis=1), figsize=(20, 20), diagonal='kde')
# plt.show()
# creating a dataframe with log returns of closed indexes
log_return_data = pd.DataFrame()
log_return_data['snp_log_return'] = np.log(cd['snp_close']/cd['snp_close'].shift())
log_return_data['nyse_log_return'] = np.log(cd['nyse_close']/cd['nyse_close'].shift())
log_return_data['djia_log_return'] = np.log(cd['djia_close']/cd['djia_close'].shift())
log_return_data['nikkei_log_return'] = np.log(cd['nikkei_close']/cd['nikkei_close'].shift())
log_return_data['hangseng_log_return'] = np.log(cd['hangseng_close']/cd['hangseng_close'].shift())
log_return_data['ftse_log_return'] = np.log(cd['ftse_close']/cd['ftse_close'].shift())
log_return_data['dax_log_return'] = np.log(cd['dax_close']/cd['dax_close'].shift())
log_return_data['aord_log_return'] = np.log(cd['aord_close']/cd['aord_close'].shift())
print (log_return_data.describe())
pd.concat([log_return_data['snp_log_return'],
log_return_data['nyse_log_return'],
log_return_data['djia_log_return'],
log_return_data['nikkei_log_return'],
log_return_data['hangseng_log_return'],
log_return_data['ftse_log_return'],
log_return_data['dax_log_return'],
log_return_data['aord_log_return']], axis=1).plot(figsize=(20, 15))
plt.show()
# fig = plt.figure()
# fig.set_figwidth(20)
# fig.set_figheight(15)
# Plotting the autocorrelation plot of log returns
autocorrelation_plot(log_return_data['snp_log_return'], label='snp_log_return')
autocorrelation_plot(log_return_data['nyse_log_return'], label='nyse_log_return')
autocorrelation_plot(log_return_data['djia_log_return'], label='djia_log_return')
autocorrelation_plot(log_return_data['nikkei_log_return'], label='nikkei_log_return')
autocorrelation_plot(log_return_data['hangseng_log_return'], label='hangseng_log_return')
autocorrelation_plot(log_return_data['ftse_log_return'], label='ftse_log_return')
autocorrelation_plot(log_return_data['dax_log_return'], label='dax_log_return')
autocorrelation_plot(log_return_data['aord_log_return'], label='aord_log_return')
plt.legend(loc='upper right')
plt.show()
# Plotting scatter matrix for log_return_data
scatter_matrix(log_return_data, figsize=(20, 20), diagonal='kde')
plt.title("scatter matrix for log_return_data")
plt.show()
| true |
8a255c30d04768eb4e172110391da7433f62be42 | Python | liuhuanxg/untitled | /数据分析/day1/2、创建数组.py | UTF-8 | 1,618 | 3.75 | 4 | [] | no_license | import numpy as np
'''
1、使用array函数来创建
格式: np.array(object,dtype=None,copy=True,oreder='K',subok=False,ndmin=0)
objece:接受array。表示想要创建的数据,无默认值
dtype: 接受data-type.表示数组所需的数据类型。如果未给定,则选择保存对象所需的最小类型。默认为None
ndmin:接收int。指定生成数据应该具有的最小维数,默认为None
'''
a1=np.array([1,2,3,4])
print(a1,a1.dtype) #[1 2 3 4] int32
a2=np.array([1,2,3.14,4])
print(a2,a2.dtype) #[1. 2. 3.14 4. ] float64
a2=np.array([1,2,3.14,4],dtype=int)
print(a2,a2.dtype) #[1 2 3 4] int32
a4=np.array([(1,2),(3,4)])
print(a4,a4.ndim)
'''
[[1 2]
[3 4]] 2'''
a5=np.array([[1,2],[3,4]])
print(a5,a5.ndim)
'''
[[1 2]
[3 4]] 2'''
#2、arange()
#格式:arange(开始值,终止值,步长) [开始值,终止值]
arr6=np.arange(1,9,1)
print(arr6)
arr7=np.arange(0,1,0.22)
print(arr7) #缺点是元素个数预估有难度
#3、linspace
#格式:linspace(开始值,终止值,元素个数)
arr8=np.linspace(0.1,1,7) #float型
print(arr8) #[ 0.1 0.25 0.4 0.55 0.7 0.85 1. ]
#使用logspace()函数
#生成10~1 到10~3之间的等比例数
arr9=np.logspace(1,3,3) #float型
print('arr9',arr9)
#其他函数
a12=np.zeros((2,3)) #生成2行3列的0
print(a12)
#empty()函数
a13=np.empty((2,3))
print(a13)
#eye(N)函数
#生成N阶矩阵,并且对角线元素为1
a14=np.eye(3)
print(a14)
#使用diag()函数
a15=np.diag([1,2,3,4]) #对角线为1,2,3,4.其他为0
print(a15) | true |
a2239e6ddea6ac1c35f709721975042b4854de58 | Python | drvpereira/hackerrank | /algorithms/greedy/sherlock and the beast/solution.py | UTF-8 | 232 | 2.96875 | 3 | [] | no_license | for _ in range(int(input())):
n = int(input())
n5 = (n // 3) * 3
n3 = 0
r3 = n % 3
if r3 == 1:
n5 -= 9
n3 = 10
elif r3 == 2:
n5 -= 3
n3 = 5
if n5 < 0 or n3 < 0:
print('-1')
else:
print('5' * n5 + '3' * n3)
| true |
6a38262ab186b332b5698ed0f2aa78a81e20e7a2 | Python | freefly518/hlp | /hlp/mt/model/network.py | UTF-8 | 13,844 | 2.875 | 3 | [] | no_license | """
transformer中网络层的部分
包括:
- 多头注意力(Multi-head attention)
- 点式前馈网络(Point wise feed forward network)
- 编码器层(Encoder layer)
- 解码器层(Decoder layer)
- 编码器(Encoder)
- 解码器(Decoder)
- Transformer
- 优化器(Optimizer)
- 损失函数与指标(Loss and metrics)
在此模块空间中完成数据集及字典的加载
"""
import sys
sys.path.append('..')
import tensorflow as tf
from common import self_attention
from config import get_config as _config
# 多头注意力层
class MultiHeadAttention(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads):
super(MultiHeadAttention, self).__init__()
self.num_heads = num_heads
self.d_model = d_model
assert d_model % self.num_heads == 0
self.depth = d_model // self.num_heads
self.wq = tf.keras.layers.Dense(d_model)
self.wk = tf.keras.layers.Dense(d_model)
self.wv = tf.keras.layers.Dense(d_model)
self.dense = tf.keras.layers.Dense(d_model)
def split_heads(self, x, batch_size):
"""分拆最后一个维度到 (num_heads, depth).
转置结果使得形状为 (batch_size, num_heads, seq_len, depth)
"""
x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
return tf.transpose(x, perm=[0, 2, 1, 3])
def call(self, v, k, q, mask):
batch_size = tf.shape(q)[0]
q = self.wq(q) # (batch_size, seq_len, d_model)
k = self.wk(k) # (batch_size, seq_len, d_model)
v = self.wv(v) # (batch_size, seq_len, d_model)
q = self.split_heads(q, batch_size) # (batch_size, num_heads, seq_len_q, depth)
k = self.split_heads(k, batch_size) # (batch_size, num_heads, seq_len_k, depth)
v = self.split_heads(v, batch_size) # (batch_size, num_heads, seq_len_v, depth)
# scaled_attention.shape == (batch_size, num_heads, seq_len_q, depth)
# attention_weights.shape == (batch_size, num_heads, seq_len_q, seq_len_k)
scaled_attention, attention_weights = self_attention.scaled_dot_product_attention(
q, k, v, mask)
scaled_attention = tf.transpose(scaled_attention,
perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, num_heads, depth)
concat_attention = tf.reshape(scaled_attention,
(batch_size, -1, self.d_model)) # (batch_size, seq_len_q, d_model)
output = self.dense(concat_attention) # (batch_size, seq_len_q, d_model)
return output, attention_weights
# 点式前馈网络(Point wise feed forward network)
def point_wise_feed_forward_network(d_model, dff):
"""
简单的两个全连接层网络
Args:
d_model:第二层dense的维度
dff: 第一层dense的维度
Returns:包含两个dense层的Sequential
"""
return tf.keras.Sequential([
tf.keras.layers.Dense(dff, activation='relu'), # (batch_size, seq_len, dff)
tf.keras.layers.Dense(d_model) # (batch_size, seq_len, d_model)
])
# 编码器层(Encoder layer)
class EncoderLayer(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads, dff, rate=0.1):
super(EncoderLayer, self).__init__()
self.mha = MultiHeadAttention(d_model, num_heads)
self.ffn = point_wise_feed_forward_network(d_model, dff)
self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = tf.keras.layers.Dropout(rate)
self.dropout2 = tf.keras.layers.Dropout(rate)
def call(self, x, training, mask):
attn_output, _ = self.mha(x, x, x, mask) # (batch_size, input_seq_len, d_model)
attn_output = self.dropout1(attn_output, training=training)
out1 = self.layernorm1(x + attn_output) # (batch_size, input_seq_len, d_model)
ffn_output = self.ffn(out1) # (batch_size, input_seq_len, d_model)
ffn_output = self.dropout2(ffn_output, training=training)
out2 = self.layernorm2(out1 + ffn_output) # (batch_size, input_seq_len, d_model)
return out2
# 解码器层
class DecoderLayer(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads, dff, rate=0.1):
super(DecoderLayer, self).__init__()
self.mha1 = MultiHeadAttention(d_model, num_heads)
self.mha2 = MultiHeadAttention(d_model, num_heads)
self.ffn = point_wise_feed_forward_network(d_model, dff)
self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm3 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = tf.keras.layers.Dropout(rate)
self.dropout2 = tf.keras.layers.Dropout(rate)
self.dropout3 = tf.keras.layers.Dropout(rate)
def call(self, x, enc_output, training,
look_ahead_mask, padding_mask):
# enc_output.shape == (batch_size, input_seq_len, d_model)
attn1, attn_weights_block1 = self.mha1(x, x, x, look_ahead_mask) # (batch_size, target_seq_len, d_model)
attn1 = self.dropout1(attn1, training=training)
out1 = self.layernorm1(attn1 + x)
attn2, attn_weights_block2 = self.mha2(
enc_output, enc_output, out1, padding_mask) # (batch_size, target_seq_len, d_model)
attn2 = self.dropout2(attn2, training=training)
out2 = self.layernorm2(attn2 + out1) # (batch_size, target_seq_len, d_model)
ffn_output = self.ffn(out2) # (batch_size, target_seq_len, d_model)
ffn_output = self.dropout3(ffn_output, training=training)
out3 = self.layernorm3(ffn_output + out2) # (batch_size, target_seq_len, d_model)
return out3, attn_weights_block1, attn_weights_block2
# 编码器
class Encoder(tf.keras.layers.Layer):
"""
包含
- 输入嵌入(Input Embedding)
- 位置编码(Positional Encoding)
- N 个编码器层(encoder layers)
输入经过嵌入(embedding)后,该嵌入与位置编码相加。该加法结果的输出是编码器层的输入。编码器的输出是解码器的输入。
"""
def __init__(self, num_layers, d_model, num_heads, dff, input_vocab_size,
maximum_position_encoding, rate=0.1):
super(Encoder, self).__init__()
self.d_model = d_model
self.num_layers = num_layers
self.embedding = tf.keras.layers.Embedding(input_vocab_size, d_model)
self.pos_encoding = self_attention.positional_encoding(maximum_position_encoding,
self.d_model)
self.enc_layers = [EncoderLayer(d_model, num_heads, dff, rate)
for _ in range(num_layers)]
self.dropout = tf.keras.layers.Dropout(rate)
def call(self, x, training, mask):
seq_len = tf.shape(x)[1]
# 将嵌入和位置编码相加。
x = self.embedding(x) # (batch_size, input_seq_len, d_model)
x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))
x += self.pos_encoding[:, :seq_len, :]
x = self.dropout(x, training=training)
for i in range(self.num_layers):
x = self.enc_layers[i](x, training, mask)
return x # (batch_size, input_seq_len, d_model)
# 解码器(Decoder)
class Decoder(tf.keras.layers.Layer):
"""
解码器包括:
- 输出嵌入(Output Embedding)
- 位置编码(Positional Encoding)
- N 个解码器层(decoder layers)
目标(target)经过一个嵌入后,该嵌入和位置编码相加。该加法结果是解码器层的输入。解码器的输出是最后的线性层的输入。
"""
def __init__(self, num_layers, d_model, num_heads, dff, target_vocab_size,
maximum_position_encoding, rate=0.1):
super(Decoder, self).__init__()
self.d_model = d_model
self.num_layers = num_layers
self.embedding = tf.keras.layers.Embedding(target_vocab_size, d_model)
self.pos_encoding = self_attention.positional_encoding(maximum_position_encoding, d_model)
self.dec_layers = [DecoderLayer(d_model, num_heads, dff, rate)
for _ in range(num_layers)]
self.dropout = tf.keras.layers.Dropout(rate)
def call(self, x, enc_output, training,
look_ahead_mask, padding_mask):
seq_len = tf.shape(x)[1]
attention_weights = {}
x = self.embedding(x) # (batch_size, target_seq_len, d_model)
x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))
x += self.pos_encoding[:, :seq_len, :]
x = self.dropout(x, training=training)
for i in range(self.num_layers):
x, block1, block2 = self.dec_layers[i](x, enc_output, training,
look_ahead_mask, padding_mask)
attention_weights['decoder_layer{}_block1'.format(i + 1)] = block1
attention_weights['decoder_layer{}_block2'.format(i + 1)] = block2
# x.shape == (batch_size, target_seq_len, d_model)
return x, attention_weights
# Transformer模型
class Transformer(tf.keras.Model):
"""
Transformer 包括编码器,解码器和最后的线性层。解码器的输出是线性层的输入,返回线性层的输出。
"""
def __init__(self, num_layers, d_model, num_heads, dff, input_vocab_size,
target_vocab_size, pe_input, pe_target, rate=0.1):
super(Transformer, self).__init__()
self.encoder = Encoder(num_layers, d_model, num_heads, dff,
input_vocab_size, pe_input, rate)
self.decoder = Decoder(num_layers, d_model, num_heads, dff,
target_vocab_size, pe_target, rate)
self.final_layer = tf.keras.layers.Dense(target_vocab_size)
def call(self, inp, tar, training, enc_padding_mask,
look_ahead_mask, dec_padding_mask):
enc_output = self.encoder(inp, training, enc_padding_mask) # (batch_size, inp_seq_len, d_model)
# dec_output.shape == (batch_size, tar_seq_len, d_model)
dec_output, attention_weights = self.decoder(
tar, enc_output, training, look_ahead_mask, dec_padding_mask)
final_output = self.final_layer(dec_output) # (batch_size, tar_seq_len, target_vocab_size)
return final_output, attention_weights
# 自定义优化器(Optimizer)
class CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
def __init__(self, d_model, warmup_steps=4000):
super(CustomSchedule, self).__init__()
self.d_model = d_model
self.d_model = tf.cast(self.d_model, tf.float32)
self.warmup_steps = warmup_steps
def __call__(self, step):
arg1 = tf.math.rsqrt(step)
arg2 = step * (self.warmup_steps ** -1.5)
return tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2)
def loss_function(real, pred):
mask = tf.math.logical_not(tf.math.equal(real, 0))
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction='none')
loss_ = loss_object(real, pred)
mask = tf.cast(mask, dtype=loss_.dtype)
loss_ *= mask
return tf.reduce_mean(loss_)
def get_model(input_vocab_size, target_vocab_size):
"""获取模型相关变量"""
learning_rate = CustomSchedule(_config.d_model)
optimizer = tf.keras.optimizers.Adam(learning_rate, beta_1=0.9, beta_2=0.98,
epsilon=1e-9)
train_loss = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
name='train_accuracy')
transformer = Transformer(_config.num_layers, _config.d_model, _config.num_heads, _config.dff,
input_vocab_size + 1, target_vocab_size + 1,
pe_input=input_vocab_size + 1,
pe_target=target_vocab_size + 1,
rate=_config.dropout_rate)
return optimizer, train_loss, train_accuracy, transformer
def load_checkpoint(transformer, optimizer):
# 加载检查点
checkpoint_path = _config.checkpoint_path
ckpt = tf.train.Checkpoint(transformer=transformer,
optimizer=optimizer)
ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=5)
if ckpt_manager.latest_checkpoint:
ckpt.restore(ckpt_manager.latest_checkpoint)
print('已恢复至最新的检查点!')
def main():
"""
忽略
测试部分,用来测试实现schedule sampling
需要完成:
1.Embedding Mix 设置混合词向量算法
2.增加decoder 使得模型训练经过两层decoder,两个decoder参数相同
3.Weights update 只反向传播第二个decoder
"""
# 模拟输入输出
inp = tf.ones([64, 30])
tar = tf.ones([64, 20])
tar_inp = tar[:, :-1]
tar_real = tar[:, 1:]
enc_padding_mask, combined_mask, dec_padding_mask = self_attention.create_masks(inp, tar_inp)
# 模型创建
transformer = Transformer(_config.num_layers, _config.d_model, _config.num_heads, _config.dff,
666, 666,
pe_input=666,
pe_target=666,
rate=_config.dropout_rate)
transformer(inp, tar_inp,
True,
enc_padding_mask,
combined_mask,
dec_padding_mask)
transformer.summary()
pass
if __name__ == '__main__':
main()
| true |
045e85d1ff5f777589a40817f3b7c081b4efa746 | Python | IshanManchanda/competitive-python | /KickStart 2019/Round B/b.py | UTF-8 | 712 | 2.8125 | 3 | [
"MIT"
] | permissive | def main():
from sys import stdin, stdout
from collections import defaultdict
rl = stdin.readline
wl = stdout.write
int1 = int
range1 = range
sum1 = sum
max1 = max
map1 = map
list1 = list
d = defaultdict(int)
t = int1(rl())
for tn in range1(1, t + 1):
n, s = map1(int1, rl().split())
a = list1(map1(int1, rl().split()))
ans = -1
arr = [d.copy(), d.copy()]
arr[1][a[0]] += 1
for i in range1(1, n):
arr.append(arr[i].copy())
arr[i + 1][a[i]] += 1
for i in range1(1, n + 1):
for j in range1(i):
nums = [arr[i][x] - arr[j][x] for x in range1(1001)]
total = sum1(x if x <= s else 0 for x in nums)
ans = max1(ans, total)
wl('Case #%d: %d\n' % (tn, ans))
main()
| true |
22c4923a1bec6c6183097a28eedcc9abf60ad198 | Python | real-limoges/Comp135 | /Assignment8/knn.py~ | UTF-8 | 8,657 | 3.265625 | 3 | [] | no_license | #!/usr/bin/python
import copy
import arff
from math import exp
from math import ceil
import sys
from random import randrange
# Opens the ARFF data file and reads into memory
def get_data(data_file):
unlabeled = []
for row in arff.load(data_file) :
row = list(row)
unlabeled.append({
'class' : int(row.pop()),
'point' : row,
'cert' : float(0)
})
return unlabeled
# Finds the kNN; calls voting_unlabeled to have the certainty calculated
def kNN(training, unlabeled, k, sigma, classes):
for item in range(0,len(unlabeled)):
point = unlabeled[item]
distances = []
kNN = []
for train_point in range(0,len(training)):
dist = distance(point, training[train_point])
list_dist = [dist,train_point]
distances.append(list_dist)
for neighbor in range(0,k):
minimum = distances.pop(distances.index(min(distances)))
kNN.append(minimum)
unlabeled[item]['cert'] = voting_unlabeled(kNN, training, point, sigma, classes)
# for the kNN it calculates the votes for each. returns the difference
# between the first and second place vote (the certainty)
def voting_unlabeled(kNN, training, point, sigma, classes):
votes = []
for item in range(0,classes):
votes.append(0)
for neighbor in range(0,len(kNN)):
x = kNN[neighbor][1]
num = -1*(kNN[neighbor][0]**2)
dem = 2*(sigma**2)
vote = exp(float(num/dem))
votes[training[x]['class']] = votes[training[x]['class']] + vote
max1 = votes.pop(votes.index(max(votes)))
max2 = votes.pop(votes.index(max(votes)))
return (max1-max2)
# Finds the Euclidean distance between the unlabeled and labeled points
def distance(point, train_point):
sqs = 0
for x in range(0,len(point['point'])):
sqs = sqs + (point['point'][x] - train_point['point'][x])**2
return sqs**0.5
# Requests m additional labels from the dataset. If there are less than
# m unlabeled points remaining, it labels the remainder
def request(unlabeled, training, m):
if len(unlabeled) < m:
m = len(unlabeled)
for req in range(0,m):
cert = []
for item in range(0,len(unlabeled)):
cert.append(unlabeled[item]['cert'])
minimum = cert.index(min(cert))
cert.pop(cert.index(min(cert)))
training.append(unlabeled.pop(minimum))
# Randomly pops off V instances from the data read in to serve as the
# validation set
def create_validation(unlabeled, V):
validation = []
for item in range(0,V):
random_index = randrange(0,len(unlabeled))
validation.append(unlabeled.pop(random_index))
return validation
# Randomly pops off k/classes points from each class, after the validation
# data is created
def create_training(unlabeled, k, classes):
training = []
per_class = int(ceil(float(k)/float(classes)))
for class_label in range(0,classes):
x = 0
while x < per_class:
random_index = randrange(0,len(unlabeled))
if unlabeled[random_index]['class'] == class_label:
training.append(unlabeled.pop(random_index))
x = x +1
return training
# finds the kNN of the validation test set. calls function to write to
# file the accuracy of the validation set on the training set.
def kNN_validation(validation, train_set, output_file, classes, sigma, k):
sum = 0
for item in range(0,len(validation)):
point = validation[item]
distances = []
kNN = []
for train_point in range(0,len(train_set)):
dist = distance(point, train_set[train_point])
list_dist = [dist,train_point]
distances.append(list_dist)
for neighbor in range(0,k):
minimum = distances.pop(distances.index(min(distances)))
kNN.append(minimum)
vote = voting_validation(point, kNN, classes, sigma, train_set)
sum = sum + vote
sum = float(sum)/float(len(validation))*100
sum = round(sum, 1)
output_file.write(str(sum))
# finds the voting of the kNN for the validation dataset. Returns 1 if
# the vote has the same class as the true class. Returns 0 otherwise
def voting_validation(vld_pt, kNN, classes, sigma, train_set):
votes = []
for item in range(0,int(classes)):
votes.append(0)
for neighbor in range(0,len(kNN)):
x = kNN[neighbor][1]
num = -1*(kNN[neighbor][0]**2)
dem = 2*(sigma**2)
vote = exp(float(num/dem))
votes[train_set[x]['class']] = votes[train_set[x]['class']] + vote
max1 = votes.index(max(votes))
if vld_pt['class'] == max1:
return 1
else:
return 0
# Preprocesses the data to to have the classes be next to each other
def preprocess(filename, unlabeled):
if filename == 'Ionosphere.arff':
for item in range(0, len(unlabeled)):
unlabeled[item]['class'] = unlabeled[item]['class'] -1
else:
for item in range(0, len(unlabeled)):
if unlabeled[item]['class'] == 1:
unlabeled[item]['class'] = 0
elif unlabeled[item]['class'] == 3:
unlabeled[item]['class'] = 1
elif unlabeled[item]['class'] == 5:
unlabeled[item]['class'] = 2
elif unlabeled[item]['class'] == 7:
unlabeled[item]['class'] = 3
elif unlabeled[item]['class'] == 8:
unlabeled[item]['class'] = 4
# Writes the header for the output files
def initial_write(x, y, output_u, output_r, k):
s = "K = " + str(k) + '\n\n' + "Number of Labeled Instances: " + '\n'
while x > 0:
if x < 5:
y = y + x
x = 0
s = s + str(y)
else:
y = y + 5
x = x - 5
s = s + str(y) + ", "
s = s + '\n\n'
output_u.write(s)
output_r.write(s)
# Runs the uncertainty sampling routine
def uncertainty_sampling(training, validation, unlabeled, sigma, classes, output_u, k, m , item):
s = "Trial " + str(item + 1) + ": "
output_u.write(s)
#Loop through unlabeled and add to training until empty
while len(unlabeled) >0:
print "Uncertainty Sampling"
kNN(training, unlabeled, k, sigma, classes)
request(unlabeled, training, m)
kNN_validation(validation, training, output_u, classes, sigma, k)
if len(unlabeled) != 0:
output_u.write(", ")
output_u.write('\n')
# Runs the random sampling routine
def random_sampling(training_random, validation, unlabeled_random, sigma, classes, output_r, k, m, item):
s = "Trial " + str(item + 1) + ": "
output_r.write(s)
print("Random Sampling")
while len(unlabeled_random) > 0:
request(unlabeled_random, training_random, m)
kNN_validation(validation, training_random, output_r, classes, sigma, k)
if len(unlabeled_random) != 0:
output_r.write(", ")
output_r.write('\n')
# Main Function.
def main(args):
k = int(args[2])
m = 5
output_u = open(args[4], 'w')
output_r = open(args[5], 'w')
# Data Preprocessing
if args[3] == 'Ionosphere.arff':
classes = 2
sigma = 3.0
V = 40
elif args[3] == 'ecoli.arff':
classes = 5
sigma = 0.75
V = 70
unlabeled = get_data(args[3])
preprocess(args[3], unlabeled)
validation = create_validation(unlabeled, V)
training = create_training(unlabeled, k, classes)
unlabeled_random = copy.deepcopy(list(unlabeled))
training_random = copy.deepcopy(list(training))
initial_write(len(unlabeled), len(training), output_u, output_r, k)
for item in range(0,10):
if item != 0:
for y in range(0, len(validation)):
x = validation.pop()
x['cert'] = 0.0
unlabeled.append(x)
for y in range(0,len(training)):
x = training.pop()
x['cert'] = 0.0
unlabeled.append(x)
unlabeled_random = []
training_random = []
validation = create_validation(unlabeled, V)
training = create_training(unlabeled, k, classes)
unlabeled_random = copy.deepcopy(list(unlabeled))
training_random = copy.deepcopy(list(training))
uncertainty_sampling(training, validation, unlabeled, sigma, classes, output_u, k, m, item)
random_sampling(training_random, validation, unlabeled_random, sigma, classes, output_r, k, m, item)
main(sys.argv)
| true |
3a7abc12833adde772f871a1ad6f9f894ab56b85 | Python | wupai/myFirst | /lesson27.py | UTF-8 | 405 | 4.0625 | 4 | [] | no_license | '''
题目:利用递归函数调用方式,将所输入的5个字符,以相反顺序打印出来。
程序分析:无。
'''
def out(s, l):
if l == 0:
return 0
print(s[l - 1])
return out(s, l - 1)
def out1(s):
if len(s) > 0:
print(s[-1])
return out1(s[:-1])
if __name__ == '__main__':
s = input('plese input a word:')
out(s, len(s))
out1(s)
| true |
59b034ec6f1f0eb228a38142c6142594598df9e9 | Python | Avedati/VisionSystemWorkshopCode | /week2/main.py | UTF-8 | 1,228 | 2.734375 | 3 | [] | no_license | import cv2
import numpy as np
cap = cv2.VideoCapture(0)
def coerceIntoTape(polygon):
# TODO: this
return False
def coerceIntoHexagon(polygon):
# TODO: this
return False
while True:
_, picture = cap.read()
gray = cv2.cvtColor(picture, cv2.COLOR_BGR2GRAY)
gaussian = cv2.GaussianBlur(gray, ksize=(5,5), sigmaX=0)
canny = cv2.Canny(gaussian, 80, 160)
kernel = np.ones((15, 15), 'uint8')
dilated = cv2.dilate(canny, kernel, iterations=1)
eroded = cv2.erode(dilated, kernel, iterations=1)
contours, hierarchy = cv2.findContours(eroded, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
polygons = [cv2.approxPolyDP(c, 0.005 * cv2.arcLength(c, True), True) for c in contours]
polygons = [p for p in polygons if len(p) < 20]
polygons = [p for p in polygons if cv2.arcLength(p, False) > 400]
for p in polygons:
object_type = None
if coerceIntoTape(p):
object_type = 'tape'
elif coerceIntoHexagon(p):
object_type = 'hexagon'
if object_type is not None:
# TODO: do something with our object
pass
# optional stuff
cv2.drawContours(picture, contours, -1, (0, 255, 0), 2)
cv2.imshow("contours", picture) # display the picture (optional)
if cv2.waitKey(1) & 0xFF == 27: # escape key (optional)
break
| true |
3cebee47d645b331b75903cd53da2605b1e7a280 | Python | akankshanb/Neural-Nets | /Project3a/feature_extraction.py | UTF-8 | 831 | 2.875 | 3 | [] | no_license | import numpy as np
from scipy.stats import kurtosis, skew
def generate_features(X):
X = np.array(X)
f1 = np.mean(X, axis=0)
c = np.cov(np.transpose(X))
f2 = np.concatenate((c[0, 0:3],c[1, 1:3],c[2, 2:3]), axis=0)
f3 = skew(X)
f4 = kurtosis(X,axis=0, fisher=False)
f5 = np.zeros(3)
f6 = np.zeros(3)
for i in range(3):
g = abs(np.fft.fft(X[:, i]))
g = g[0:int(np.ceil(len(g)/2))]
g[0] = 0
w = 50*np.arange(len(g))/(2*len(g))
v = max(g)
idx = int(np.where(g==v)[0])
f5[i] = v
f6[i] = w[idx]
f=np.concatenate((f5, f6), axis=0)
f=np.concatenate((f3, f4, f), axis=0)
f=np.concatenate((f1, f2, f), axis=0)
return f
# xftrain = generate_features([[1,2,3],[4,5,6],[7,8,9],[1,5,8]])
| true |
96f44e5c79d40b08e12b3dbaf1431abee831b8b0 | Python | Sarah-Marion/Password_Locker-ultimate- | /user_data.py | UTF-8 | 2,146 | 3.109375 | 3 | [
"MIT"
] | permissive | class User:
"""
class that generates new instances of users
"""
def __init__(self, username, password):
self.username = username
self.password = password
user_list = []
def save_user(self):
"""
save_user method saves user object into user_list
"""
User.user_list.append(self)
@classmethod
def user_exists(cls, userName):
"""
Method that checks if a user exists in the user list.
Args:
username: username to search if the user exists
Returns :
Boolean: True or false depending if the user exists
"""
for user in cls.user_list:
if user.username == userName:
return True
else:
return False
@classmethod
def find_user(cls, userName, passwrd):
"""
find_user method that checks if a username already exists
"""
for user in cls.user_list:
if user.username == userName:
return True
else:
return False
@classmethod
def confirm_user(cls, userName, passwrd):
"""
confirm_user method that checks if a password matches a username
"""
for User in cls.user_list:
if cls.find_user(userName, passwrd):
password_match = User.password
if password_match == passwrd:
return True
else:
return False
else:
return False
@classmethod
def change_userpass(cls, userName, new_pass):
"""
change_userpass method changes a user's password
"""
for user in cls.user_list:
if cls.find_user(userName, new_pass):
user.password = new_pass
return user
else:
return False
def user_delete_account(self):
"""
user_delete_account method that deletes a particular acount
"""
User.user_list.remove(self)
| true |
e66ca01fa0f90e2c1d2c7b41ad2312267e3f3f15 | Python | jayshreevashistha/forskml | /Coding Challenge/Day_9/iccsqlite.py | UTF-8 | 982 | 2.6875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu May 16 23:55:19 2019
@author: computer
"""
from bs4 import BeautifulSoup
import requests
wiki = "https://www.icc-cricket.com/rankings/mens/team-rankings/odi"
source = requests.get(wiki).text
soup = BeautifulSoup(source,"lxml")
right_table=soup.find('table', class_='table')
A=[]
B=[]
C=[]
D=[]
E=[]
for bdy in right_table.find_all("tbody"):
for row in bdy.find_all("tr"):
cells = row.find_all('td')
A.append(cells[0].text.strip())
B.append(cells[1].text.strip())
C.append(cells[2].text.strip())
D.append(cells[3].text.strip())
E.append(cells[4].text.strip())
import pandas as pd
from collections import OrderedDict
col_name = ["Position","Team","Matches","Points","Rating"]
col_data = OrderedDict(zip(col_name,[A,B,C,D]))
df = pd.DataFrame(col_data)
import os
import sqlite3
from pandas import DataFrame
conn = sqlite3.connect ( 'icc.db' )
c = conn.cursor()
c.execute() | true |
13fdc2a273ba47f0949d952a709367cf37c0be2f | Python | husensofteng/scripts | /trypsindigestion.py | UTF-8 | 1,692 | 2.71875 | 3 | [] | no_license |
def trypsin_digestion(proseq_incl_stop, miss_cleavage):
"""digest peptides using the tryptic rule, allowing for miss cleavages
@params:
proseq_incl_stop (str): full protein sequence to be digested
miss_cleavage (int): number of allowed missed cleavages
@return:
list: tryptic peptides
"""
all_peptides = []
for protseq in proseq_incl_stop.split('*'):
if len(protseq)<=0:
continue
peptides = []
peptide = ''
"remove the first K/R if in the begining of a reading frame"
protseq_updated = protseq[0::]
if protseq[0]=='K' or protseq[0]=='R' and len(protseq)>1:
protseq_updated = protseq[1::]
for c, aa in enumerate(protseq_updated):
peptide += aa
next_aa = ''
try:
next_aa = protseq_updated[c + 1]
except IndexError:
pass
if aa in ['K', 'R'] and next_aa != 'P': # for trypsin peptides
if len(peptide) > 0:
peptides.append(peptide)
peptide = ''
continue
if len(peptide) > 0:
peptides.append(peptide)
peptides_with_miss_cleavage = []
for i in range(1, miss_cleavage + 1):
for j, pep in enumerate(peptides):
if j + i < len(peptides):
peptide = ''.join([x for x in (peptides[j:j + i + 1])])
peptides_with_miss_cleavage.append(peptide)
peptides.extend(peptides_with_miss_cleavage)
all_peptides.extend(peptides)
return all_peptides
| true |