blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
51f0c7102c60f310e716a81a0cad72f8b0bd5ffb | Python | alfa-netology/hw-work-with-files | /main.py | UTF-8 | 427 | 2.78125 | 3 | [] | no_license | import modules.own_fucntions as functions
# задача 1
file = 'source/recipes.txt'
cook_book = functions.create_dict_from_file(file)
# задача 2
dishes = ('фахитос', 'омлет', 'хинкали')
person_count = 2
print(functions.get_shop_list_by_dishes(dishes, person_count, cook_book))
# задача 3
path = 'source/'
files = ['1.txt', '2.txt', '3.txt']
functions.sort_source_files(path, files)
| true |
d512b50e0a6c9a222900e5c3a236611a35e692aa | Python | eth-csem/csemlib | /csemlib/models/crust.py | UTF-8 | 9,749 | 2.796875 | 3 | [] | no_license | import io
import os
import numpy as np
import scipy.interpolate as interp
import xarray
from .topography import Topography
from csemlib.tools.helpers import load_lib
lib = load_lib()
class Crust(object):
"""
Class handling crustal models.
"""
def __init__(self):
# Setup directories, data structure and smoothing factors.
directory = os.path.split(os.path.split(__file__)[0])[0]
self.directory = os.path.join(directory, 'data','base_model','crust')
self._data = xarray.Dataset()
self.crust_dep_smooth_fac = 0.0
self.crust_vs_smooth_fac = 0.0
# Read colatitude values.
with io.open(os.path.join(self.directory, 'crust_x'), 'rt') as fh:
col = np.asarray(fh.readlines(), dtype=float)
# Read longitude values.
with io.open(os.path.join(self.directory, 'crust_y'), 'rt') as fh:
lon = np.asarray(fh.readlines(), dtype=float)
# Read crustal depth and vs.
for p in ['crust_dep', 'crust_vs']:
with io.open(os.path.join(self.directory, p), 'rt') as fh:
val = np.asarray(fh.readlines(), dtype=float)
val = val.reshape(len(col), len(lon))
self._data[p] = (('col', 'lon'), val)
if p == 'crust_dep':
self._data[p].attrs['units'] = 'km'
elif p == 'crust_vs':
self._data[p].attrs['units'] = 'km/s'
# Add coordinates, converted to radians.
self._data.coords['col'] = np.radians(col)
# Scipy's RectSphereBivariateSpline requires the grid to start at lon 0
self.min_lon = np.min(np.radians(lon))
self._data.coords['lon'] = np.radians(lon) - self.min_lon
# Add units.
self._data.coords['col'].attrs['units'] = 'radians'
self._data.coords['lon'].attrs['units'] = 'radians'
def interpolate(self, colat, lon, param=None, smooth_fac=0.0):
"""
Evaluate crustal depth and vs given on a regular spherical grid on the points of an arbitrary grid, using
spline interpolation.
:param colat: colatitude array.
:param lon: longitude array.
:param param: either 'crust_dep' or 'crust_vs'.
:param smooth_fac: smoothing factor.
:return: Interpolated crustal depth or crustal vs at the input colatitudes and longitudes.
"""
# Create smoother object.
lut = interp.RectSphereBivariateSpline(self._data.coords['col'][::-1],
self._data.coords['lon'],
self._data[param], s=smooth_fac)
# Because the colatitude array is reversed, we must also reverse the request.
colat_reverse = np.pi - colat
# Convert longitudes to coordinate system of the crustal model.
lon_reverse = np.copy(lon) - self.min_lon
lon_reverse[lon_reverse < 0.0] = \
2.0 * np.pi + lon_reverse[lon_reverse < 0.0]
return lut.ev(colat_reverse, lon_reverse)
def eval_point_cloud_grid_data(self, GridData):
"""
Get crustal depth and velocities at the grid points.
:param GridData: GridData structure
:return: Updated GridData.
"""
print('Evaluating Crust')
# Split into crustal and non crustal zone.
cst_zone = GridData.df[GridData.df['r'] >= (6371.0 - 100.5)]
# Compute crustal depths and vs for crustal zone coordinates at the relevant grid points through interpolation.
crust_dep = self.interpolate(cst_zone['c'], cst_zone['l'], param='crust_dep', smooth_fac=self.crust_dep_smooth_fac)
crust_vs = self.interpolate(cst_zone['c'], cst_zone['l'], param='crust_vs', smooth_fac=self.crust_vs_smooth_fac)
# Get Topography.
top = Topography()
top.read()
topo = top.eval(cst_zone['c'], cst_zone['l'])
# Convert crust_depth to thickness, below mountains increase with (positive) topography. In oceans add (negative) topography.
crust_dep += topo
# Add crust and apply a 25 percent taper.
#cst_zone = add_crust_all_params_topo_griddata_with_taper(cst_zone, crust_dep, crust_vs, topo, taper_percentage=0.25)
lib.add_crust(len(cst_zone), crust_dep, crust_vs, topo, cst_zone['vsv'].values, cst_zone['vsh'].values,
cst_zone['vpv'].values, cst_zone['vph'].values, cst_zone['rho'].values, cst_zone['r'].values)
# Append crustal and non crustal zone back together.
GridData.df.update(cst_zone)
return GridData
def add_crust_all_params_topo_griddata_with_taper(cst_zone, crust_dep, crust_vs, topo, taper_percentage=0.25):
"""
Scale crustal vs to the other parameters.
:param cst_zone: Part of GridData that are actually within the crust.
:param crust_dep: Array of crustal depth.
:param crust_vs: Array of crustal velocity.
:param topo: Array of topography, needed to discriminate oceanic and continental scaling.
:param taper_percentage: Vertical taper percentage of the crustal thickness.
:return: Updated GridData.
"""
r_earth = 6371.0
r_ani = 6191.0
s_ani = 0.0011
for i in range(len(cst_zone['r'])):
taper_hwidth = crust_dep[i] * taper_percentage
# If above taper overwrite with crust.
if cst_zone['r'].values[i] > (r_earth - crust_dep[i] + taper_hwidth):
# Ascribe crustal vsh and vsv based on the averaged vs by Meier et al. (2007).
if 'vsv' in cst_zone.columns:
cst_zone['vsv'].values[i] = crust_vs[i] - 0.5 * s_ani * (cst_zone['r'].values[i] - r_ani)
if 'vsh' in cst_zone.columns:
cst_zone['vsh'].values[i] = crust_vs[i] + 0.5 * s_ani * (cst_zone['r'].values[i] - r_ani)
# Scaling to P velocities and density for continental crust.
if topo[i] >= 0:
if 'vpv' in cst_zone.columns:
cst_zone['vpv'].values[i] = 1.5399 * crust_vs[i] + 0.840
if 'vph' in cst_zone.columns:
cst_zone['vph'].values[i] = 1.5399 * crust_vs[i] + 0.840
if 'vp' in cst_zone.columns:
cst_zone['vp'].values[i] = 1.5399 * crust_vs[i] + 0.840
if 'rho' in cst_zone.columns:
cst_zone['rho'].values[i] = 0.2277 * crust_vs[i] + 2.016
# Scaling to P velocities and density for oceanic crust.
if topo[i] < 0:
if 'vpv' in cst_zone.columns:
cst_zone['vpv'].values[i] = 1.5865 * crust_vs[i] + 0.844
if 'vph' in cst_zone.columns:
cst_zone['vph'].values[i] = 1.5865 * crust_vs[i] + 0.844
if 'vp' in cst_zone.columns:
cst_zone['vp'].values[i] = 1.5865 * crust_vs[i] + 0.844
if 'rho' in cst_zone.columns:
cst_zone['rho'].values[i] = 0.2547 * crust_vs[i] + 1.979
# If below taper region in mantle, do nothing and continue.
elif cst_zone['r'].values[i] < (r_earth - crust_dep[i] - taper_hwidth):
continue
# In taper region, taper linearly to mantle properties.
else:
dist_from_mantle = cst_zone['r'].values[i] - (r_earth - crust_dep[i] - taper_hwidth)
taper_width = 2.0 * taper_hwidth
frac_crust = dist_from_mantle / taper_width
frac_mantle = 1.0 - frac_crust
# Ascribe crustal vsh and vsv based on the averaged vs by Meier et al. (2007).
if 'vsv' in cst_zone.columns:
vsv_crust = crust_vs[i] - 0.5 * s_ani * (cst_zone['r'].values[i] - r_ani)
cst_zone['vsv'].values[i] = (vsv_crust * frac_crust) + (cst_zone['vsv'].values[i] * frac_mantle)
if 'vsh' in cst_zone.columns:
vsh_crust = crust_vs[i] + 0.5 * s_ani * (cst_zone['r'].values[i] - r_ani)
cst_zone['vsh'].values[i] = (vsh_crust * frac_crust) + (cst_zone['vsh'].values[i] * frac_mantle)
# Scaling to P velocities and density for continental crust.
if topo[i] >= 0:
if 'vpv' in cst_zone.columns:
cst_zone['vpv'].values[i] = (1.5399 * crust_vs[i] + 0.840) * frac_crust + (cst_zone['vpv'].values[i] * frac_mantle)
if 'vph' in cst_zone.columns:
cst_zone['vph'].values[i] = (1.5399 * crust_vs[i] + 0.840) * frac_crust + (cst_zone['vph'].values[i] * frac_mantle)
if 'vp' in cst_zone.columns:
cst_zone['vp'].values[i] = (1.5399 * crust_vs[i] + 0.840) * frac_crust + (cst_zone['vp'].values[i] * frac_mantle)
if 'rho' in cst_zone.columns:
cst_zone['rho'].values[i] = (0.2277 * crust_vs[i] + 2.016) * frac_crust + (cst_zone['rho'].values[i] * frac_mantle)
# Scling to P velocity and density for oceanic crust.
if topo[i] < 0:
if 'vpv' in cst_zone.columns:
cst_zone['vpv'].values[i] = (1.5865 * crust_vs[i] + 0.844) * frac_crust + (cst_zone['vpv'].values[i] * frac_mantle)
if 'vph' in cst_zone.columns:
cst_zone['vph'].values[i] = (1.5865 * crust_vs[i] + 0.844) * frac_crust + (cst_zone['vph'].values[i] * frac_mantle)
if 'vp' in cst_zone.columns:
cst_zone['vp'].values[i] = (1.5865 * crust_vs[i] + 0.844) * frac_crust + (cst_zone['vp'].values[i] * frac_mantle)
if 'rho' in cst_zone.columns:
cst_zone['rho'].values[i] = (0.2547 * crust_vs[i] + 1.979) * frac_crust + (cst_zone['rho'].values[i] * frac_mantle)
return cst_zone
| true |
eff5a43c88da45c3d7779f054f6eace178f76cbc | Python | synbiomine/synbiomine-tools | /tools/extractModel.py | UTF-8 | 1,238 | 2.53125 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python3
import argparse
import psycopg2
############
### MAIN ###
############
parser = argparse.ArgumentParser('Extract an InterMine model from an existing database.')
parser.add_argument('dbName', help='name of the database.')
parser.add_argument('modelPath', help='path to save the InterMine model xml')
parser.add_argument('--dbUser', help='db user if this is different from the current')
parser.add_argument('--dbHost', help='db host if this is not localhost')
parser.add_argument('--dbPort', help='db port if this is not localhost')
parser.add_argument('--dbPass', help='db password if this is required')
args = parser.parse_args()
modelPath = args.modelPath
dbName = args.dbName
connString = "dbname=%s" % dbName
if args.dbUser:
connString += " user=%s" % args.dbUser
dbHost = 'localhost'
if args.dbHost:
dbHost = args.dbHost
connString += " host=%s" % dbHost
if args.dbPort:
connString + " port=%s" % args.dbPort
if args.dbPass:
connString += " password=%s" % args.dbPass
with psycopg2.connect(connString) as conn, conn.cursor() as cur, open(modelPath, 'w') as f:
cur.execute("SELECT value FROM intermine_metadata WHERE key='model'")
model = cur.fetchone()[0]
f.write(model)
| true |
1716913efec372d8c06b69d1f8391e8f105b00d1 | Python | MrSpadala/COVID-19 | /scripts/stats_province.py | UTF-8 | 3,417 | 3.09375 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"CC-BY-4.0",
"LicenseRef-scancode-public-domain"
] | permissive |
import sys
import json
import argparse
from math import ceil
from pprint import pprint
from matplotlib import pyplot as plt
# Path of the .json dataset by provincia
DATA_FPATH = "../dati-json/dpc-covid19-ita-province.json"
def main():
# Parses arguments from cmd line
args, provinces_input = get_args()
# Load .json province data
with open(DATA_FPATH, encoding="utf-8-sig") as f:
data = json.load(f)
# Create a set of available provinces
provinces = set([x["sigla_provincia"].upper() for x in data])
# Check if the requested provinces are in the data, otherwise throw an error
for prov in provinces_input:
if not prov in provinces:
print(f"Error: province {prov} not found")
exit(1)
# Loop over requested provinces and plot positive cases and increment
for prov in provinces_input:
# Filter only requested area
data_prov = list(
filter(lambda x: x["sigla_provincia"]==prov, data)
)
# Extract the positive cases
positives = [x["totale_casi"] for x in data_prov]
# Remove null elements if present
#positives = [x if x != None else 0 for x in positives]
# Calculate the increment of positive cases day by day
increments = [positives[0]]
for i in range(1, len(positives)):
increments.append(positives[i] - positives[i-1])
# Transform dates from ISO format YYYY-MM-DDTHH:MM:SS to MM-DD
dates = [x["data"][5:10] for x in data_prov]
# Now for the plots, we have too much datapoints to have all of them on the x-axis
# So we calculate how many ticks on x-axis we have to put
N_XTICKS = 9 #ticks on x-axis to plot
date_step = ceil(len(dates) / N_XTICKS) #on x-axis plot only one date every date_step
offset = (len(dates)-1) % date_step #offset of the x-axis ticks so that the last data point has its tick on x-axis
xticks = dates[offset::date_step]
# Plot positive cases
positives_plot = plt.figure(1)
plt.title("Casi Positivi")
plt.plot(dates, positives, label=prov)
plt.xticks(xticks)
plt.legend()
# horizontal line on the highest value if there are different provinces
if len(provinces_input) > 1:
plt.hlines(max(positives), 0, len(dates)-1, "darkred", "--", linewidth=1)
# Plot increments
increments_plot = plt.figure(2)
plt.title("Incremento Giornaliero Positivi")
plt.plot(dates, increments, label=prov)
plt.xticks(xticks)
plt.legend()
# Finally show the data, or save it if it was passed --save argument
if not args.save:
plt.show()
else:
fname_ending = "_".join(provinces_input) + ".png"
plt.figure(1).savefig("tot_" + fname_ending, dpi=150)
plt.figure(2).savefig("inc_" + fname_ending, dpi=150)
def get_args():
""" Parses input arguments, returns tuple (args, province) """
usage = f"""
[ITA] Grafica l'andamento e l'incremento dei casi positivi per le provincie specificate
[ENG] Plots the number and the increment of positive cases for the requested provices
\tUsage: {sys.argv[0]} provincia1 provincia2 ...
\texample: {sys.argv[0]} MI RM VE
"""
parser = argparse.ArgumentParser(description="Covid-19, statistiche per province")
parser.usage = usage
parser.add_argument('--save', help='Saves the image as png image', action='store_true', default=False)
args, province = parser.parse_known_args()
if len(province) == 0:
parser.error("Need at least one provincia")
province = list(map(str.upper, province))
return args, province
if __name__ == '__main__':
main() | true |
852d5ea14964c6225f55df029a43bdfa84bc3f4a | Python | LKY769215561/PythonCar | /PythonCar/venv/src/基础知识/IO编程/StringIO和BytesIO.py | UTF-8 | 881 | 4.3125 | 4 | [
"Apache-2.0"
] | permissive | # -*- coding: UTF-8 -*-
from io import StringIO
from io import BytesIO
'''
StringIO
很多时候,数据读写不一定是文件,也可以在内存中读写。
StringIO顾名思义就是在内存中读写str。
要把str写入StringIO,我们需要先创建一个StringIO,然后,像文件一样写入即可:
'''
f = StringIO()
f.write('hello')
f.write(' ')
f.write('carey')
str = f.getvalue()
print(str)
f2 = StringIO('Hello!\nHi!\nGoodbye!')
while True:
s = f2.readline()
if s == '':
break
print(s.strip())
'''
BytesIO
StringIO操作的只能是str,如果要操作二进制数据,就需要使用BytesIO。
BytesIO实现了在内存中读写bytes,我们创建一个BytesIO,然后写入一些bytes:
'''
f3 = BytesIO()
f3.write('中国'.encode('utf-8'))
print(f3.getvalue())
f4 = BytesIO(b'\xe4\xb8\xad\xe5\x9b\xbd')
print(f4.read())
| true |
ae1c6ce1c2769edb07dd32798c733e664e3fd06f | Python | Prookies/Python-Crash-Course | /unit7/practice7_1.py | UTF-8 | 88 | 3.09375 | 3 | [] | no_license | car=input("what car do you want to lend?")
print("Let me see if I can find you a "+car)
| true |
6867f82d038955e26e4356321de86ec135a18705 | Python | angappanmuthu/PythonTasks | /decorator.py | UTF-8 | 1,053 | 3.203125 | 3 | [] | no_license |
import json
# function to add to JSON
def write_json(data, filename='data.json'):
with open(filename,'w') as f:
json.dump(data, f, indent=4)
json_file = open('data.json')
json_data = json.load(json_file)
# get names from the txt file using file handler
names = [name.strip('\n') for name in open('name.txt','r')]
not_exist_names = names.copy()
def isNameExist(data,names):
for n in names:
for ind,d in enumerate(data):
if n == d['name']:
try:
not_exist_names.remove(d['name'])
except ValueError:
print("ValueError")
return not_exist_names
def create(data):
id = len(json_data)
for nen in data:
#print(nen)
id = id + 1
not_existing_names_data = {"id": id, "name": nen}
#print(not_existing_names_data)
json_data.append(not_existing_names_data)
return json_data
name_not_found = isNameExist(json_data,names)
data = create(name_not_found)
write_json(data)
| true |
4b24e50f53dd3bfe818fe5d46737367552bb4a23 | Python | evgenblaghodir/pyproject | /Ln7_ifelse.py | UTF-8 | 671 | 3.890625 | 4 | [] | no_license | """x = 25
num = input("enter num ")
print(num)
if (x==num):
print ("correct")
else:
print("wrong\n")
age = input("enter age\t")
print(age)
if (int(age) <= 4 ):
print("baby")
elif(int(age) > 4 and int(age) <= 12):
print("child")
elif(int(age) >12 and int(age) <=21):
print("tenager")
else:
print("you are big")
"""
german_cars = ["bmw", "opel", "audi", "vw"]
all_cars = ["ford", "land rover", "jeep", "vw", "seat", "bmw", "opel", "audi", "bugatti"]
if 'lada' in all_cars:
print ("yes")
else:
print ("no")
for item in all_cars:
if item in german_cars:
print(item +" german car")
else:
print(item + " not german car") | true |
ca2997f093f79bfc0d02c55294dc081ecc8796cb | Python | Best1s/python_re | /python_web_spider/web_spider/data_Spider/use_mysql.py | UTF-8 | 1,432 | 2.96875 | 3 | [] | no_license | #coding: utf-8
'''
alter table table_name add 列名 数据类型 [after 插入位置]
alter table table_name change 列命 新列命 新数据类型
alter table table_name 列名称
alter table table_name rename 新表名
'''
#增加新用户 命令格式: grant 权限1, 权限2, ...权限n on 数据库名称.表名称
# to 用户名@用户地址 identified by'密码';
# greant select,insert,update,delete,create,drop on company.employee to username@x.x.x.x identified by '123';
#python 操作mysql 需要安装 mysqldb pip install mysql-python
import MySQLdb
con = MySQLdb.connect(host='localhost',user='root',passwd='123456',port=3306,charset='utf-8')
'''
#cursor() 创建一个游标对象
#commit() 事务提交
#rollback() 事务回滚
#close() 关闭数据库连接
'''
cur = con.cursor()
# 游标对象使用 首先cursor()创建游标对象
# cur = con.cursor()
# execute()用来执行sql语句
# executemany()执行多条sql语句
# close() 关闭游标连接
# fetchone() 用来从结果中取一条记录,并将游标指向下一条记录。
# fetchmany() 从结果中取多条记录。
# fetchall() 从结果中取出所有记录
# scroll() 游标滚动
cur.execute(CREATE TABLE person (id int primary key,name varchar(20),age int)')
cur.execute('INSERT INTO PERSON VALUES (%d,%s,%d)',(1,'qiye1',20))
con.commit()
#查询 修改删除 和sqlite3 一样 | true |
bf3d10738f5293153eb0aa13e7cc63d74a4931d2 | Python | lyk4411/untitled | /beginPython/leetcode/ReorderedPowerof2.py | UTF-8 | 1,557 | 4.125 | 4 | [] | no_license | import itertools
import collections
class ReorderedPowerof2(object):
def reorderedPowerOf2(self, N):
"""
Let's work through an example like N = 128.
In the last line, 'for cand in itertools.permutations(str(N))' will
iterate through the six possibilities cand = ('1', '2', '8'),
cand = ('1', '8', '2'), cand = ('2', '1', '8'), and so on.
The check cand[0] != '0' is a check that the candidate permutation
does not have a leading zero.
The check bin(int("".join(cand))).count('1') == 1 is a check that cand
represents a power of 2: namely, that the number of ones in its binary
representation is 1.
"""
# 搜索遍历所有排列的可能性,然后将其转换为二进制数,如果转换的二进制数只有一个1则是二的倍数。
# return any(cand[0] != '0' and bin(int("".join(cand))).count('1') == 1
# for cand in itertools.permutations(str(N)))
c = collections.Counter(str(N))
print(c)
return any(c == collections.Counter(str(1 << i)) for i in range(32))
if __name__ == '__main__':
a = ReorderedPowerof2()
print(a.reorderedPowerOf2(1))
print(a.reorderedPowerOf2(16))
print(a.reorderedPowerOf2(8))
print(a.reorderedPowerOf2(22))
print(a.reorderedPowerOf2(61))
print("===================================")
print(collections.Counter(str(61)))
print(collections.Counter(str(16)))
print(collections.Counter(str(61)) == collections.Counter(str(16)))
| true |
0fd3eb83159f533073f43b83de8052f3b306bea0 | Python | MRestrepo08/Pandas | /pandas1.py | UTF-8 | 663 | 3.484375 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 7 10:20:26 2021
@author: mauriciorestrepo
"""
import pandas as pd
import numpy as np
#Create dataframes from two Series
s1 = pd.Series([1, 2, 3, 4], index =['a','b','c','d'])
s2 = pd.Series([10, 20, 30, 40], index =['a','b','c','d'])
s = pd.DataFrame({'A1':s1,'A2':s2})
print(s.describe())
s.info()
print(s)
#print(ventas.index)
#print(ventas.values)
#print(ventas.head())
#Method loc
print('loc[b] = ',s.loc['b'])
print('loc[[b],[c]] = ',s.loc[['b','c']])
print('loc[b:d] = ',s.loc['b':'d'])
#Method iloc
print('iloc[1:] = ',s.iloc[1:])
print('iloc[:1] = ',s.iloc[:1])
| true |
427def43ee7a1998041861653196848263d30c04 | Python | koenvb/microscopium | /microscopium/metrics.py | UTF-8 | 4,925 | 3.4375 | 3 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | from __future__ import absolute_import, division
from itertools import combinations
import numpy as np
from scipy.spatial.distance import pdist
from six.moves import map
def sq_to_dist(i, j, n):
"""Convert coordinate of square distance matrix to condensed matrix index.
The condensed version of a squareform, pairwise distance matrix is
a linearisation of the upper triangular, non-diagonal coordinates
of the squareform distance matrix. This function returns the [i, j]-th
coordinate of the condensed array.
eg. given a squareform matrix,
array([[ 0. , 10. , 22.36067977],
[ 10. , 0. , 14.14213562],
[ 22.36067977, 14.14213562, 0. ]])
The condensed version of this matrix is:
array([ 10. , 22.36067977, 14.14213562])
Parameters
----------
i : int
i-th coordinate.
j : int
j-th coordinate.
n : int
Dimension n of n*n distance matrix.
Returns
-------
index : int
Position of pairwise distance [i, j] in
condensed distance matrix.
Reference
---------
In the scipy.spatial.squareform documentation, it is shown that the
index in the condensed array is given by
{n choose 2} - {(n - i) choose 2} + (j - i - 1).
Some simple arithmetic shows that this can be expanded to the formula below.
The documentation can be found in the following link:
http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.spatial.distance.squareform.html
Examples
--------
>>> sq_to_dist(0, 1, 4)
0
>>> sq_to_dist(0, 3, 4)
2
>>> sq_to_dist(1, 2, 4)
3
"""
if i > j:
i, j = j, i
index = i * n + j - i * (i + 1) / 2 - i - 1
return int(index)
def mongo_group_by(collection, group_by):
"""
Group MongoDB collection according to specified field.
Sends aggregate query to MongoDB collection to group
all documents by a given field and returns dictionary
mapping the field to the corresponding (plate, well)
co-ordinate(s).
Parameters
----------
collection : pymongo collection
Pymongo object directing to collection.
group_by : string
Field to group collection by.
Returns
-------
query_dict : dict { string : list of tuple }
Query dictionary mapping the specified group_by field to a list of
(plate, well) co-ordinates.
"""
mongo_query = collection.aggregate([{
'$group' : {
# groups all documents according to specified field
'_id': '$' + group_by,
'coords': {
'$addToSet': {
# add plate and well for each document
# belonging to the group
'plate': '$plate',
'well': '$well'
}
}
}
}])['result']
query_dict = {}
for doc in mongo_query:
query_dict[doc['_id']] = []
for coord in doc['coords']:
try:
new_coord = (int(coord['plate']), str(coord['well']))
query_dict[doc['_id']].append(new_coord)
except KeyError:
pass
return query_dict
def gene_distance_score(X, collection, metric='euclidean'):
"""Find intra/inter gene distance scores between samples.
Parameters
----------
X : Data frame, shape (n_samples, n_features)
Feature data frame.
metric : string, optional
Which distance measure to use when calculating distances.
Must be one of the options allowable in
scipy.spatial.distance.pdist. Default is euclidean distance.
Returns
-------
all_intragene_data : array
An 1D array with intra-gene distances (i.e. distances
between samples with the same gene knocked down).
all_intergene_data : array
An 1D array with inter-gene distances (i.e. distances
between samples with different gene knocked down).
"""
gene_dict = mongo_group_by(collection, 'gene_name')
nsamples = X.shape[0]
npairs = int(nsamples * (nsamples - 1) / 2)
all_intragene_index = []
for key in gene_dict:
if len(gene_dict[key]) > 1:
indices = (X.index.get_loc(coord) for coord in gene_dict[key]
if coord in X.index)
for i, j in combinations(indices, 2):
all_intragene_index.append(sq_to_dist(i, j, X.shape[0]))
all_intragene_index.sort()
all_intergene_index = np.setdiff1d(np.arange(npairs), all_intragene_index,
assume_unique=True)
distance = pdist(X, metric)
all_intragene_data = distance[all_intragene_index]
all_intergene_data = distance[all_intergene_index]
return all_intragene_data, all_intergene_data
| true |
31c2302476ba9fe571bb3fec9e5bddbaa8160b84 | Python | CamposRodrigo/Python | /EX./ex073.py | UTF-8 | 439 | 3.640625 | 4 | [
"MIT"
] | permissive | time = ('Atletico', 'Botafogo', 'Corinthians', 'Gremio',
'Santos', 'Sao Paulo', 'Bahia', 'Avai')
print('-=-'*15)
print(f'Lista de Times do Brasileirao: {time}')
print('-=-'*15)
print(f'Os 5 primeiros sao: {time[0:5]}')
print('-=-'*15)
print(f'Os 4 ultimos sao:{time[-4:]}')
print('-=-'*15)
print(f'Times em ordem alfabetica: {sorted(time)}')
print('-=-'*15)
print(f' O Avai esta na {time.index("Avai")+1} posicao.')
print('-=-'*15) | true |
612db27581f2a3da356fab7075492392b3656bdb | Python | Robert1991/algorithms | /grokking/recursion.py | UTF-8 | 1,222 | 3.78125 | 4 | [] | no_license | def countDown(numbers):
if (numbers == 0):
print("Boom")
return
else:
print(str(numbers) + '...')
countDown(numbers-1)
def fact(input):
if (input == 1):
return 1
else:
return input * fact(input-1)
def sumRec(inputArray, currentTotal = 0):
if (len(inputArray) == 0):
return str(currentTotal)
else:
currentTotal += inputArray.pop(0)
return sumRec(inputArray, currentTotal=currentTotal)
def totalItemNumRec(inputArray, currentCount = 0):
if (len(inputArray) == 0):
return str(currentCount)
else:
currentCount += 1
inputArray.pop(0)
return totalItemNumRec(inputArray, currentCount=currentCount)
def maxNumberInListRec(inputArray, currentMax = 0):
if (len(inputArray) == 0):
return str(currentMax)
else:
if (inputArray[0] >= currentMax):
currentMax = inputArray[0]
inputArray.pop(0)
return maxNumberInListRec(inputArray, currentMax=currentMax)
#countDown(10)
#print(fact(5))
print('sum ' + str(sumRec([1, 2, 3, 4])))
print('totals ' + str(totalItemNumRec([1, 2, 3, 4])))
print('max ' + str(maxNumberInListRec([1, 22, 3, 4])))
#print() | true |
e1cafc24d5b1572288257eb95ed4c70700c746d2 | Python | ModellingWebLab/chaste-project-fitting-pints | /modeling/algorithm.py | UTF-8 | 2,386 | 3 | 3 | [] | no_license | import numpy as np
class ParameterFittingTask(object):
"""
Wrapper class for the following essential parameter fitting arguments:
- Ordered list of parameter names
- ParameterDistribution prior
- Experiment experiment
- dict expData
- Objective objFun
And the following optional arguments:
- dict outputMapping: maps names of simulated output to keys in
expData
- dict inputs: maps names of protocol inputs to desired values
- dict objArgs: arguments to the objective function
Used as simplified input for ParameterFittingAlgorithm class.
"""
def __init__(
self, parameters, prior, experiment, expData, outputMapping=None,
objArgs={}):
self.parameters = parameters
self.prior = prior
self.experiment = experiment
self.expData = expData
self.outputMapping = outputMapping
self.objArgs = objArgs
self.objFun = None
def calculateObjective(self,parameters):
"""
Handles interaction between components to produce Objective output from
parameter values.
Primary method utilized by ParameterFittingAlgorithm.
"""
data1, data2 = {}, {}
# If parameters are specified with the reserved namespace 'obj',
# pass them to the objective function.
# If objective args are specified with the same name, they will be
# overwritten on calls to calculateObjective.
simParams = {}
for key,val in parameters.iteritems():
tokens = key.split(':')
if len(tokens)>1 and tokens[0]=='obj':
if self.objArgs == None:
self.objArgs = {}
self.objArgs[tokens[1]] = val
else:
simParams[key] = val
try:
simData = self.experiment.simulate(simParams)
except:
return -np.inf
# Match experimental/simulated data for input to objective function
if self.outputMapping != None:
for simName,expName in self.outputMapping.iteritems():
data1[simName] = simData[simName]
data2[simName] = self.expData[expName]
else:
data1 = simData
data2 = self.expData
return self.objFun(data1,data2,self.objArgs)
| true |
38c78f9fe70c1ad5d3ddd38b87da0f667e335f17 | Python | sknaht/algorithm | /python1/leetcode/044-wildcard-matching.py | UTF-8 | 1,049 | 3.078125 | 3 | [] | no_license | class Solution(object):
def isMatch(self, s, p):
"""
:type s: str
:type p: str
:rtype: bool
"""
indexp = 0
indexs = 0
lastp = -1
lasts = -1
while indexs < len(s):
if indexp < len(p) and (p[indexp] == s[indexs] or p[indexp] == '?'):
indexp += 1
indexs += 1
elif indexp < len(p) and p[indexp] == '*':
while indexp < len(p) and p[indexp] == '*':
indexp += 1
if indexp == len(p):
return True
lastp = indexp
lasts = indexs
elif lastp >= 0 and lasts >= 0:
indexp, indexs = lastp, lasts + 1
lasts += 1
if lasts >= len(s):
return False
else:
return False
while indexp < len(p) and p[indexp] == '*':
indexp += 1
return indexp == len(p)
print Solution().isMatch('aa', 'a*a')
| true |
6a484b9cbbcab7a4fcaea27925cd6cb2c4181568 | Python | steevc/steemkeybaseimage | /program.py | UTF-8 | 1,906 | 2.875 | 3 | [] | no_license | import os, shutil, sys
from datetime import date
from PIL import Image
import pyperclip
#from tkinter import Tk
maximage = 1000
keybase_local_folder = "/keybase/public/steevc/steemit"
keybase_web_folder = "https://steevc.keybase.pub/steemit/"
def get_filename(filename, description):
# Build name using date and description
name, suffix = os.path.splitext(filename)
newname = date.today().isoformat() + description.replace(" ", "") + suffix
#print(newname)
return newname
def move_file(sourcefile, target):
# Move to target location
shutil.copy(sourcefile, target)
def resize_image(image, target):
# Shrink to below max size
scale = 1.0
if image.size[0] > image.size[1]:
dim = image.size[0]
else:
dim = image.size[1]
while dim > maximage:
scale /= 2
dim /= 2
smimage = image.resize((int(image.size[0]*scale), int(image.size[1]*scale)))
smimage.save(target)
# Issues with pip to install packages
def text_to_clipboard(text):
# Need to install xclip or xsel for Linux
# https://pyperclip.readthedocs.io/en/latest/introduction.html#not-implemented-error
pyperclip.copy(text)
def process(filename, description):
im = Image.open(filename)
#print(im.size)
newname = get_filename(filename, description)
target = os.path.join(keybase_local_folder, newname)
if im.size[0] > maximage or im.size[1] > maximage:
resize_image(im, target)
else:
move_file(filename, target) # Just move and rename
web_file = keybase_web_folder + newname
print("Web file", web_file)
steemit_md = ""
print(steemit_md)
text_to_clipboard(steemit_md)
if __name__ == "__main__":
if len(sys.argv) < 2:
process("/home/steve/Downloads/20160821picks.jpg", "Test picture")
else:
process(sys.argv[1], sys.argv[2]) | true |
00ea6dba8009fd65d38ddebc6c2d2f444553a2f5 | Python | borissova-e/web_scraping | /main.py | UTF-8 | 888 | 3.328125 | 3 | [] | no_license | import requests
from bs4 import BeautifulSoup
KEYWORDS = ['дизайн', 'фото', 'web', 'python']
def find_article_with_keywords(url):
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
for article in soup.find_all('article', class_='post'):
post = article.find('div', 'post__text')
post_text = post.text
for kw in KEYWORDS:
if kw in post_text:
time_element = article.find('span', class_='post__time')
time = time_element.text
title_element = article.find('a', 'post__title_link')
title = title_element.text
link = title_element.attrs.get('href')
print(f'{time} - {title} - {link}')
break
if __name__ == '__main__':
url = 'https://habr.com/ru/all/'
find_article_with_keywords(url)
| true |
908572bfe485e5d6fa27ed5f23831c0c8eaed192 | Python | ethanj6072/Python | /Assignment 3 Binary Search.py | UTF-8 | 2,410 | 3.90625 | 4 | [] | no_license | '''
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
'''
def partition(arrToSort, low, high):
i = (low-1) # index of smaller element
pivot = arrToSort[high] # pivot
for j in range(low, high):
# If current element is smaller than or
# equal to pivot
if int(arrToSort[j]) <= int(pivot):
# increment index of smaller element
i = i+1
arrToSort[i], arrToSort[j] = arrToSort[j], arrToSort[i]
arrToSort[i+1], arrToSort[high] = arrToSort[high], arrToSort[i+1]
return (i+1)
# arrToSort[] is array to be sorted,
# low is starting index,
# high is ending index
# Function to do Quick sort
def quickSort(arrToSort, low, high):
if len(arrToSort) == 1:
return arrToSort
if low < high:
# pi is partitioning index, arrToSort[p] is now
# at right place
pi = partition(arrToSort, low, high)
# Separately sort elements before
# partition and after partition
quickSort(arrToSort, low, pi-1)
quickSort(arrToSort, pi+1, high)
def binSearch(arr, x):
low = 0
mid = 0
high = len(arr) - 1
while low <= high:
mid = (high + low) // 2
# x is greater, ignore left half
if arr[mid] < x:
low = mid + 1
# x is smaller, ignore right half
elif arr[mid] > x:
high = mid - 1
# otherwise x is present at mid
else:
return mid
# fail case
return -1
# Write your code here
numLines = input()
lines = input().split(" ")
lines = list(map(int, lines))
#sort in order
queriesNum = input()
for i in range(int(queriesNum)):
currentInput = input().split(" ") #important! direct index-based solutions don't work on 2-digit nums
pointValue = int(currentInput[0]) + int(currentInput[1])
currentSet = lines.copy()
currentSet.append(pointValue)
quickSort(currentSet, 0, len(currentSet) - 1)
# and now, because I'm supposed to demonstrate understanding of binary search
numBetween = binSearch(currentSet, pointValue)
print(numBetween) | true |
d897d0d163d47d4d5236db1aa9366cb3d1ec8f51 | Python | polineto/introducao_ciencia_computacao_python | /Parte_2/selection_sort.py | UTF-8 | 359 | 2.984375 | 3 | [] | no_license | def selection_sort(lista):
ate_onde_ir = len(lista) - 1
for i in range(ate_onde_ir, 0, -1):
posicao_do_maior = 0
for j in range(i):
if lista[j+1] > lista[posicao_do_maior]:
posicao_do_maior = j+1
lista[i], lista[posicao_do_maior] = lista[posicao_do_maior], lista[i]
return lista
| true |
117b5d7d82b5a35f31afda00307a756e1f6af7ea | Python | hyenee/2021_Openlab_Demo | /app.py | UTF-8 | 5,520 | 2.78125 | 3 | [] | no_license | import streamlit as st
import pandas as pd
import os
# set current SCRIPT file directory as a working directory
os.chdir( os.path.dirname( os.path.abspath(__file__) ) )
st.set_page_config(layout="wide")
pd.set_option('display.max_rows', None)
def main():
st.title('[Demo] 의미·구조적 유사성을 가진 한국어 문자열 생성 기술 연구')
st.subheader('Controllable Text Generator')
# Applying styles to the buttons
st.markdown("""<style>
.st-eb {
background-color:#f63366
} </style>""", unsafe_allow_html=True)
# Select Box for the model
st.sidebar.image('./fig/logo.png', width=150)
model_name = st.sidebar.selectbox("Model", ("t5", "bart", "gpt2"))
num_return_sequences = st.sidebar.slider("Number of return sentences", 0, 100)
st.sidebar.text('')
st.sidebar.text('')
st.sidebar.markdown('### 📃 Template')
st.sidebar.markdown('1. weather.general(day.p=\*,location=\*)')
st.sidebar.markdown('2. weather.humidity(day.p=\*,location=서울,ti_range.p=점심)')
st.sidebar.markdown('3. weather.humidity(day.p=\*,location=\*)')
st.sidebar.markdown('4. weather.rainfall(day.p=내일,location=\*)')
st.sidebar.markdown('5. weather.sunset(day.p=\*,location=논산)')
st.sidebar.markdown('6. weather.temperature(day.p=내일,location=울산)')
st.sidebar.markdown('7. weather.temperature(day.p=\*,location=\*,time=5시)')
st.sidebar.markdown('8. weather.uv(day.p=\*,ti_range.p=\*)')
st.sidebar.markdown('9. weather.uv(day.p=\*,location=\*)')
st.sidebar.markdown('10. weather.windchill(location=*)')
# vocab
intent_label_vocab = load_vocab(os.path.join('./data', "intent.label.vocab"))
slot_tag_label_vocab = load_vocab(os.path.join('./data', "slot_tag.label.vocab"))
slot_value_label_vocab = load_slot_value_vocab(os.path.join('./data', "slot_value.label.vocab"))
row03_spacer1, row03_1, row03_spacer2 = st.beta_columns((.2, 7.1, .2))
with row03_1:
st.markdown("")
see_image = st.beta_expander('You can click here to see the overall architecture 👉')
with see_image:
st.image('./fig/overall_1.PNG', width=700)
st.image('./fig/overall_2.PNG', width=700)
row3_spacer1, row3_1, row3_spacer2 = st.beta_columns((.2, 7.1, .2))
with row3_1:
st.markdown("")
see_data = st.beta_expander('You can click here to see the slot tags and slot values lists 👉')
with see_data:
df = pd.DataFrame( columns = ['Slot tag', 'Slot value'])
for key, value in slot_value_label_vocab.items():
value_str = ', '.join(value)
df=df.append({'Slot tag' : key, 'Slot value' : value_str} , ignore_index=True)
st.table(df)
st.text('')
row13_spacer1, row13_1, row13_spacer2, row13_2, row13_spacer3 = st.beta_columns((.2, 2.3, .2, 2.3, .2))
with row13_1:
intent_option = st.selectbox('Select a intent', intent_label_vocab)
st.write('Selected intent : ', intent_option)
with row13_2:
slot_option = st.text_area("Enter the slot tags and values", "day.p=*,location=*")
_slot_option = ''
if '*' in slot_option:
_slot_option = slot_option.replace('*', '\*')
st.write('slot tags and values : ', _slot_option)
semantic_control_grammar = intent_option + '(' + slot_option + ')'
st.info('semantic control grammar : '+ intent_option + '(' + _slot_option + ')')
# Generate button
if st.button("Generate"):
# Checking for exceptions
if not check_exceptions(num_return_sequences):
# Calling the forward method on click of Generate
with st.spinner('Progress your text .... '):
df = pd.read_csv(os.path.join('./data', model_name, "reranking_100.csv"), delimiter='\t')
is_exist = df['query'] == semantic_control_grammar
filtered = df[is_exist]
filtered.rename(columns = {'generated_texts' : '생성 문장'}, inplace = True)
filtered.reset_index(inplace = True)
filtered = filtered[:num_return_sequences]
html_table = filtered[['생성 문장', 'train_texts']].to_html(col_space='100px', justify='center')
st.table(data=filtered[['생성 문장', 'train_texts']])
def check_exceptions(num_return_sequences):
# Checking for zero on the num of return sequences
if num_return_sequences == 0:
st.error("Please set the number of return sequences to more than one")
return True
return False
def load_vocab(fn):
vocab = []
with open(fn, 'r', encoding='utf-8') as f:
for line in f:
line = line.rstrip()
symbol, _id = line.split('\t')
vocab.append(symbol)
#vocab.sort()
return vocab[1:]
def load_slot_value_vocab(fn):
vocab = {}
with open(fn, 'r', encoding='utf-8') as f:
for line in f:
line = line.rstrip()
slot_tag, slot_value = line.split('\t')
slot_value = slot_value.replace('{', '').replace('}', '').replace("'", '')
slot_value = slot_value.split(',')
vocab[slot_tag] = slot_value
for key, value in vocab.items():
for idx, v in enumerate(value):
value[idx] = v.lstrip().rstrip()
value.sort()
return vocab
if __name__ == '__main__':
main() | true |
2d78038826b708fd89819ff0794e28144adf725c | Python | shweta4377/GNEAPY19 | /venv/Session4J.py | UTF-8 | 418 | 3.75 | 4 | [] | no_license | # Built In Functions on String
# Strings are IMMUTABLE. They Cannot be changed
# Whenever we perform modification on String a new String is created in memory
"""
name = "Fionna Flynn"
newName = name.upper()
print("name is: ",name, hex(id(name)))
print("newName is: ",newName, hex(id(newName)))
"""
name = "Fionna Flynn"
print("name before is: ",name, hex(id(name)))
name = name.upper()
print("name after is: ",name, hex(id(name))) | true |
5eebbda08df3e41b17377b37668d53d24995eef6 | Python | Th3Lourde/l33tcode | /509_Fibonacci_Number.py | UTF-8 | 622 | 3.484375 | 3 | [] | no_license |
class Solution:
def fib_1(self, N):
if N == 0:
return 0
if N == 1 or N == 2:
return 1
elif N >= 2:
return self.fib(N-1) + self.fib(N-2)
def fib(self, N, mem):
if mem[N] != None:
result = mem[N]
elif N == 1 or N == 2:
result = 1
elif N >= 2:
result = self.fib(N-1, mem) + self.fib(N-2, mem)
mem[N] = result
return result
if __name__ == '__main__':
s = Solution()
n = 4
mem = [None] * (n+1)
# print(mem)
# print(s.fib(4, mem))
# print(mem)
| true |
c7fe22dc71f74fe05d70367c74b299d9de6928b1 | Python | hoonihu1/Python_Class_TA | /Codes/정규표현식ex2.py | UTF-8 | 427 | 3.09375 | 3 | [] | no_license | import csv
import re
a = ''
with open('../Input_Folder/python_input.csv', newline='') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in spamreader:
a += ''.join(row)
p = re.compile('\d{3}-\d{4}-\d{4}')
l = re.compile('\d{3}-\d{4}-\d{4}|\d{3}-\d{3}-\d{4}')
n = re.compile('\d{3}\.\d{4}\.\d{4}')
m = p.findall(a)
k = l.findall(a)
g = n.findall(a)
print(m)
print(k)
print(g) | true |
c47019861ea699e56a2d8d89d95635831ef28237 | Python | zihuaweng/leetcode-solutions | /leetcode_python/939.Minimum_Area_Rectangle.py | UTF-8 | 1,468 | 3.28125 | 3 | [] | no_license | #!/usr/bin/env python3
# coding: utf-8
# https://leetcode.com/problems/minimum-area-rectangle/
# Time complexity: O()
# Space complexity: O()
class Solution:
def minAreaRect(self, points: List[List[int]]) -> int:
min_area = float('inf')
cor_x = collections.defaultdict(set)
cor_y = collections.defaultdict(set)
for x, y in points:
cor_x[y].add(x)
cor_y[x].add(y)
for x, y in points:
x_list = cor_x[y]
y_list = cor_y[x]
for i in x_list:
for j in y_list:
if i == x or j == y:
continue
if i in cor_x[j] and j in cor_y[i] and abs(i - x) * abs(j - y) > 0:
min_area = min(min_area, abs(i - x) * abs(j - y))
cor_x[y].remove(x)
cor_y[x].remove(y)
return min_area if min_area < float('inf') else 0
class Solution:
def minAreaRect(self, points: List[List[int]]) -> int:
min_area = float('inf')
cor_y = collections.defaultdict(set)
for x, y in points:
cor_y[x].add(y)
for x1, y1 in points:
for x2, y2 in points:
if x1 == x2 or y1 == y2:
continue
if y1 in cor_y[x2] and y2 in cor_y[x1]:
min_area = min(min_area, abs(x2 - x1) * abs(y2 - y1))
return min_area if min_area < float('inf') else 0 | true |
cff33aada556b6d330aa8039fb691824556fcf9e | Python | ayman-tareq/webscraping | /scrapy_framework/MahmudAhsan/webscrap/wlog.py | UTF-8 | 168 | 2.578125 | 3 | [
"MIT"
] | permissive | import logging
def set_custom_log_info(filename):
logging.basicConfig(filename=filename,level=logging.INFO)
def report(e:Exception):
logging.exception(str(e)) | true |
613894182cf0110733d56917118f6d637c428d34 | Python | PAmerikanos/Spark-Streaming-Ships | /OpenShapeFile.py | UTF-8 | 374 | 2.875 | 3 | [] | no_license | import fiona
latS = 45.0
latN = 51.0
lonW = -10.0
lonE = 0.0
portShp = fiona.open("input/Fishing Ports.shp")
portArray = []
for portCoo in portShp:
portLon = portCoo['geometry']['coordinates'][0]
portLat = portCoo['geometry']['coordinates'][1]
if lonW<=portLon<=lonE and latS<=portLat<=latN:
portArray.append(portCoo['geometry']['coordinates']) | true |
acb12d701c2636e254eaf5aa0c1a9e544c70d8cb | Python | TianaQ/python-scripts | /dir_to_df_and_csv.py | UTF-8 | 1,871 | 3.421875 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 14 13:02:03 2018
@author: tatianakurilo
"""
import sys, os, errno, pandas as pd
# SET PATH to the directory with csv files
try:
dir_path = sys.argv[1]
except:
try:
dir_path = input('Enter path to the data directory: ')
except:
raise Exception('No path provided')
if not os.path.exists(dir_path):
raise FileNotFoundError
# if you need only specific columns, list their names within the brackets: ['column_1', 'column_2', 'etc']
# if you want to keep the order of columns, list all of them in brackets
col_names = [] # empty list equals to False;
def read_csv_data(data_dir, col_names = False):
"""
Reads csv files from the provided directory and returns a pandas dataframe of all files
"""
# create an empty list to store dataframes of each file
df_list = []
def read_csv_to_df(csv_file):
"""
Reads a csv file into a dataframe using only the columns from
the list of column names or all, if no columns list provided
"""
if col_names:
return pd.read_csv(csv_file, usecols=col_names)
else:
return pd.read_csv(csv_file)
# read each csv file to a dataframe and add it to the list
for item in os.listdir(data_dir):
if item.endswith(".csv"):
df_list.append(read_csv_to_df(os.path.join(data_dir, item)))
# return combined dataframe
return pd.concat(df_list, ignore_index=True)
# read all files from dir_path folder to a dataframe according to col_names
df_from_dir = read_csv_data(dir_path, col_names)
# write the dafaframe to csv file
csv_file = dir_path + '.csv'
try:
df_from_dir.to_csv(csv_file, sep=',', encoding='utf-8', index=False)
print(csv_file)
except:
print('Failed to create .csv file')
| true |
e6a08ae176e75450b41f46289ac4823a85ebef2c | Python | bobgautier/rjgtoys-xc | /examples/enforcement.py | UTF-8 | 724 | 3.125 | 3 | [
"MIT"
] | permissive |
from rjgtoys.xc import Bug
from rjgtoys.xc.raises import raises
class Allowed(Exception):
"""This exception may be raised by do_operation."""
def __str__(self):
return "I am allowed"
class NotAllowed(Exception):
"""This exception may not be raised by do_operation."""
def __str__(self):
return "I am not allowed"
@raises(Allowed)
def do_operation(ok: bool):
if ok:
raise Allowed()
else:
raise NotAllowed()
try:
do_operation(True)
assert False, "Should not be reached"
except Allowed:
print("Expected exception raised")
try:
do_operation(False)
assert False, "Should not be reached"
except Bug:
print("There is a bug in do_operation")
| true |
c6e0111da66e205aa1b48d1e81409786eef9a134 | Python | Bradels/IoT-Application-Building-a-Smart-Library-master | /mp/mp_socket.py | UTF-8 | 3,084 | 3.171875 | 3 | [] | no_license | #!/usr/bin/env python3
# Reference: https://realpython.com/python-sockets/
# Documentation: https://docs.python.org/3/library/socket.html
import socket
from library_menu import library_menu
import logging
logging.basicConfig(filename="library.log", level = logging.ERROR)
class mp_socket:
def connection(self, test = None):
"""
Server connetion is to start a TCP socket on the Master Pi (server) so that all reception PIs can connect to the server.
On setting up the connection, server listens to the connection requests and the coming messages.
This message will be the logged in user name which will be used for book borrowing purpose.
When the user selects logout from the library menu, the server will send "logout" message to RP
and RP is required to logout the user. The server stands by until another user logs in
Parameters:
Returns:
"""
testPassed = False
HOST = "" # Empty string means to listen on all IP's on the machine, also works with IPv6.
# Note "0.0.0.0" also works but only with IPv4.
PORT = 65000 # Port to listen on (non-privileged ports are > 1023).
ADDRESS = (HOST, PORT)
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(ADDRESS)
s.listen()
print("Listening on {}...".format(ADDRESS))
# Test server connection:
if(test == "test_setupServer"):
return True
while True:
print("The Smart Library is Ready be Accessed! Waiting for a User to Login . . .")
conn, addr = s.accept()
with conn:
print("Connected to {}".format(addr))
# test client connection
if(test == "test_ConnectToServer"):
testPassed = True
else:
data = conn.recv(4096)
if(data):
#Received message from RP
user = data.decode()
# print("Message from rp: " + user)
# Call library menu to search books/borrow books/return books
logout_req = library_menu().runMenu(user)
if(logout_req == "logout"):
conn.sendall(logout_req.encode())
print("Disconnecting from client.")
print("Closing listening socket.")
print("Done.")
except Exception as e:
logging.error("MP Socket error: {}".format(str(e)))
print(str(e))
#finally:
# Clean up the connection
# conn.close()
return testPassed
if __name__ == "__main__":
mp_socket().connection()
| true |
14771cfd720fd809cb16bf0027eff27441e028d1 | Python | Levara/pygame-hamster | /radionica-subota/igra.py | UTF-8 | 2,595 | 2.96875 | 3 | [
"Unlicense"
] | permissive | import pygame
import random
try:
highscore_file = open("highscore", "r")
highscore = highscore_file.readline()
highscore = int(highscore)
except:
highscore = 0
print highscore
pygame.init()
WIDTH = 800
HEIGHT = 600
screen = pygame.display.set_mode( (WIDTH,HEIGHT) )
pygame.display.set_caption("Nasa igra")
pygame.font.init()
myfont = pygame.font.SysFont('Arial', 30)
clock = pygame.time.Clock()
WHITE = ( 255, 255, 255)
BLACK = ( 0, 0, 0)
BLUE = ( 0, 0, 255)
welcome = myfont.render("Pozdrav! pritisnite space za start!", False, BLUE)
welcome_size = welcome.get_rect()
frog = pygame.image.load("frog.jpg")
hamster = pygame.image.load("hamster.jpg")
hamster = pygame.transform.scale(hamster, (60, 60))
hamster_x = random.randint(20, WIDTH-20)
hamster_y = random.randint(20, HEIGHT-20)
game_state = "welcome"
hit = False
done = False
hamster_time = 2000
score = 0
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
game_state = "game"
elif event.type == pygame.MOUSEBUTTONDOWN:
if game_state == "game":
pos = event.pos
if hamster_pos.collidepoint(pos):
hit = True
if game_state == "welcome":
screen.fill(WHITE)
screen.blit(frog, (0,0) )
screen.blit(welcome,
(WIDTH/2 - welcome_size.width/2,
HEIGHT/2 - welcome_size.height/2) )
elif game_state == "game":
hamster_time -= clock.get_time()
if hit:
hit = False
hamster_time = 2000
hamster_x = random.randint(20, WIDTH-20)
hamster_y = random.randint(20, HEIGHT-20)
score += 1
if hamster_time <= 0:
game_state = "game_over"
screen.fill(BLUE)
hamster_pos = screen.blit(hamster,
(hamster_x, hamster_y))
score_text = myfont.render("Score: %d"%score,
False, WHITE)
hscore_text = myfont.render("Highscore: %d"%highscore, False, WHITE)
screen.blit(score_text, (0,0))
screen.blit(hscore_text, (WIDTH/2,0))
elif game_state == "game_over":
screen.fill(WHITE)
if score > highscore:
highscore = score
highscore_file = open("highscore", "w+")
highscore_file.write("%d"%highscore)
highscore_file.close()
pygame.display.flip()
clock.tick(60)
pygame.quit()
| true |
f80b05df42e35938978a98dd8936ae6bf908ad5c | Python | AngelBeats013/ID3 | /main.py | UTF-8 | 3,348 | 2.828125 | 3 | [] | no_license | import sys
import id3
import utils
# Read args from cmd line
if len(sys.argv) != 5:
print('Four arguments needed! Found: %s' % (len(sys.argv)-1))
exit(1)
training_data_file = sys.argv[1]
validation_data_file = sys.argv[2]
test_data_file = sys.argv[3]
prune_factor = float(sys.argv[4])
print('Use training data from %s' % training_data_file)
print('Use validation data from %s' % validation_data_file)
print('Use training data from %s' % test_data_file)
print('Use prune factor: %s' % prune_factor)
print('')
training_data = utils.read_data(training_data_file)
validation_data = utils.read_data(validation_data_file)
test_data = utils.read_data(test_data_file)
root, node_num, leaf_num = id3.train(training_data)
utils.print_tree(root, training_data)
print('')
print('Pre-Pruned Accuracy')
print('- - - - - - - - - - - - -')
train_accuracy = id3.test(root, training_data) * 100
print('Number of training instances = %s' % len(training_data))
print('Number of training attributes = %s' % len(training_data[0].feature_map))
print('Total number of nodes in the tree = %s' % node_num)
print('Number of leaf nodes in the tree = %s' % leaf_num)
print('Accuracy of the model on the training dataset = %.1f%%' % train_accuracy)
validation_accuracy = id3.test(root, validation_data) * 100
print('')
print('Number of validation instances = %s' % len(validation_data))
print('Number of validation attributes = %s' % len(validation_data[0].feature_map))
print('Accuracy of the model on the validation dataset before pruning = %.1f%%' % validation_accuracy)
test_accuracy = id3.test(root, test_data) * 100
print('')
print('Number of validation instances = %s' % len(test_data))
print('Number of validation attributes = %s' % len(test_data[0].feature_map))
print('Accuracy of the model on the validation dataset before pruning = %.1f%%' % test_accuracy)
print('')
print('Post-Pruned Accuracy')
print('- - - - - - - - - - - - -')
prune_num = int(float(node_num) * prune_factor)
pruned_validation_accuracy, stop_indexes = id3.prune_and_test(root, node_num, prune_num, validation_accuracy / 100.0, validation_data, validation_accuracy)
node_num, leaf_num = id3.count_nodes_after_prune(root, stop_indexes)
pruned_training_accuracy = id3.test(root, training_data, stop_indexes) * 100.0
print('Number of training instances = %s' % len(training_data))
print('Number of training attributes = %s' % len(training_data[0].feature_map))
print('Total number of nodes in the tree = %s' % node_num)
print('Number of leaf nodes in the tree = %s' % leaf_num)
print('Accuracy of the model on the training dataset = %.1f%%' % pruned_training_accuracy)
pruned_validation_accuracy *= 100.0
print('')
print('Number of validation instances = %s' % len(validation_data))
print('Number of validation attributes = %s' % len(validation_data[0].feature_map))
print('Accuracy of the model on the validation dataset after pruning = %.1f%%' % pruned_validation_accuracy)
if len(stop_indexes) == 0:
print('Pruning did not improve validation accuracy!')
pruned_test_accuracy = id3.test(root, test_data, stop_indexes) * 100.0
print('')
print('Number of validation instances = %s' % len(test_data))
print('Number of validation attributes = %s' % len(test_data[0].feature_map))
print('Accuracy of the model on the test dataset after pruning = %.1f%%' % pruned_test_accuracy) | true |
4e38008b30305353490c34aa1a3aaee87cf31b0f | Python | DaHuO/Supergraph | /codes/CodeJamCrawler/16_0_4/RandomNickName/fractileSmall.py | UTF-8 | 975 | 3.015625 | 3 | [] | no_license | def solve(k,c,s) :
ret = -1
return ret
def main():
# raw_input() reads a string with a line of input, stripping the '\n' (newline) at the end.
# This is all you need for most Google Code Jam problems.
t = int(raw_input()) # read a line with a single integer
#l, d, n = [int(s) for s in raw_input().split(" ")] # read a list of integers, 2 in this case
#print l,d,n
global all_primes
for j in xrange(1, t+1):
k,c,s = [int(s) for s in raw_input().split(" ")] # read a list of integers, 2 in
#k tiles,
#c complexity (number of levels = c + 1)
#s grad students
#print k,c,s
answer = "HUH?"
answer = ""
if k > s :
answer = "IMPOSSIBLE"
else :
while s != 0:
answer = answer + " " + str(s)
s = s - 1
print "Case #{}: ".format(j) + str(answer)
if __name__ == "__main__" :
main()
| true |
1d5a058babaf1c4ed74b07a737bac83a5510da50 | Python | aliliang/tricircle | /juno-patches/nova/instance_mapping_uuid_patch/nova/db/sqlalchemy/migrate_repo/versions/255_add_mapping_uuid_column_to_instance.py | UTF-8 | 542 | 2.734375 | 3 | [
"Apache-2.0"
] | permissive | from sqlalchemy import Column, String, MetaData, Table
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True)
mapping_uuid = Column('mapping_uuid',
String(length=36))
instances.create_column(mapping_uuid)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True)
mapping_uuid = instances.columns.mapping_uuid
mapping_uuid.drop()
| true |
34abb23023f32fd1ff18d19494e714760d4adc7b | Python | tomasbm07/IPRP---FCTUC | /9/9_2.py | UTF-8 | 169 | 3.390625 | 3 | [] | no_license |
def produto_escalar(v, w):
if len(v) == 0:
return 0
return (v[0] * w[0]) + produto_escalar(v[1:], w[1:])
print(produto_escalar([1, 2, 3], [5, 6, 7]))
| true |
87c3b24a5b2d852d997d1946d1ddc42144306eda | Python | studnetze/stunetlympics | /random-keyboard-layout/test.py | UTF-8 | 1,546 | 3.046875 | 3 | [] | no_license | #!/usr/bin/python3
# Starts the game (incl. xkb-generation). The game is type a string called
# "jingle" on a random keyboard layout as fast as you can.
# HF
#
# Original Author Tobias Birnbaum, SNT 2015
from subprocess import Popen, PIPE
import time
try:
Popen(['python3', 'create-xkb-symbols.py'])
Popen(['setxkbmap', 'random_layout'])
with Popen(['fortune', '-s', '-n', '50'], stdout=PIPE) as proc:
jingle = proc.stdout.read().decode().replace('\n', '').replace(' ', '')
print("Welcome to Stunetlympics!")
print("-------------------------------------------------------------")
print("Please type in text, displayed in the following as fast as")
print("you can.")
print("Now it's your turn to give kbd-mnemonic a try. Be the best...")
print('Escape hatch into outer space: "setxkbmap de "')
print("-------------------------------------------------------------")
input("Press Enter to continue...")
print("Ready...")
time.sleep(0.5)
print("Steady...")
time.sleep(0.5)
print("Go!")
print("-------------------------------------------------------------\n")
t0 = time.time()
while True:
print(jingle + '\n\n')
var = input('Your guess:\n')
if var == jingle:
break
wallTime = (time.time() - t0)
print("You did it in {0:8.5f}s.\n".format(wallTime))
Popen(['setxkbmap', 'de'])
#name = input("Please enter your name for the hall of fame: ")
# TODO write simple text file
finally:
Popen(['setxkbmap', 'de'])
| true |
ee9ce609c21cae0cd382f8d6c96cddc47a478848 | Python | caleblevy/collections-extended | /tests/test_bags.py | UTF-8 | 5,093 | 2.796875 | 3 | [
"Apache-2.0"
] | permissive |
import pytest
from collections_extended.bags import _basebag, bag, frozenbag, _compat
def test_init():
b = _basebag('abracadabra')
assert b.count('a') == 5
assert b.count('b') == 2
assert b.count('r') == 2
assert b.count('c') == 1
assert b.count('d') == 1
b2 = bag(b)
assert b2 == b
def test_repr():
ms = _basebag()
assert ms == eval(ms.__repr__())
ms = _basebag('abracadabra')
assert ms == eval(ms.__repr__())
def compare_bag_string(b):
s = str(b)
return set(s.lstrip('{').rstrip('}').split(', '))
def test_str():
assert str(_basebag()) == '_basebag()'
assert "'a'^5" in str(_basebag('abracadabra'))
assert "'b'^2" in str(_basebag('abracadabra'))
assert "'c'" in str(_basebag('abracadabra'))
abra_elems = set(("'a'^5", "'b'^2", "'r'^2", "'c'", "'d'"))
assert compare_bag_string(bag('abracadabra')) == abra_elems
if not _compat.is_py2:
assert compare_bag_string(bag('abc')) == compare_bag_string(set('abc'))
def test_count():
ms = _basebag('abracadabra')
assert ms.count('a') == 5
assert ms.count('x') == 0
def test_nlargest():
abra = _basebag('abracadabra')
sort_key = lambda e: (-e[1], e[0])
abra_counts = [('a', 5), ('b', 2), ('r', 2), ('c', 1), ('d', 1)]
assert (sorted(abra.nlargest(), key=sort_key) == abra_counts)
assert sorted(abra.nlargest(3), key=sort_key) == abra_counts[:3]
assert _basebag('abcaba').nlargest(3) == [('a', 3), ('b', 2), ('c', 1)]
def test_from_map():
assert _basebag._from_map({'a': 1, 'b': 2}) == _basebag('abb')
def test_copy():
b = _basebag()
assert b.copy() == b
assert b.copy() is not b
b = _basebag('abc')
assert b.copy() == b
assert b.copy() is not b
def test_len():
assert len(_basebag()) == 0
assert len(_basebag('abc')) == 3
assert len(_basebag('aaba')) == 4
def test_contains():
assert 'a' in _basebag('bbac')
assert 'a' not in _basebag()
assert 'a' not in _basebag('missing letter')
def test_le():
assert _basebag() <= _basebag()
assert _basebag() <= _basebag('a')
assert _basebag('abc') <= _basebag('aabbbc')
assert not _basebag('abbc') <= _basebag('abc')
with pytest.raises(TypeError):
bag('abc') < set('abc')
assert not bag('aabc') < bag('abc')
def test_and():
assert bag('aabc') & bag('aacd') == bag('aac')
assert bag() & bag('safgsd') == bag()
assert bag('abcc') & bag() == bag()
assert bag('abcc') & bag('aabd') == bag('ab')
assert bag('aabc') & set('abdd') == bag('ab')
def test_isdisjoint():
assert bag().isdisjoint(bag())
assert bag().isdisjoint(bag('abc'))
assert not bag('ab').isdisjoint(bag('ac'))
assert bag('ab').isdisjoint(bag('cd'))
def test_or():
assert bag('abcc') | bag() == bag('abcc')
assert bag('abcc') | bag('aabd') == bag('aabccd')
assert bag('aabc') | set('abdd') == bag('aabcd')
def test_add_op():
b1 = bag('abc')
result = b1 + bag('ab')
assert result == bag('aabbc')
assert b1 == bag('abc')
assert result is not b1
def test_add():
b = bag('abc')
b.add('a')
assert b == bag('aabc')
def test_clear():
b = bag('abc')
b.clear()
assert b == bag()
def test_discard():
b = bag('abc')
b.discard('a')
assert b == bag('bc')
b.discard('a')
assert b == bag('bc')
def test_sub():
assert bag('abc') - bag() == bag('abc')
assert bag('abbc') - bag('bd') == bag('abc')
def test_mul():
ms = _basebag('aab')
assert ms * set('a') == _basebag(('aa', 'aa', 'ba'))
assert ms * set() == _basebag()
def test_xor():
assert bag('abc') ^ bag() == bag('abc')
assert bag('aabc') ^ bag('ab') == bag('ac')
assert bag('aabcc') ^ bag('abcde') == bag('acde')
def test_ior():
b = bag()
b |= bag()
assert b == bag()
b = bag('aab')
b |= bag()
assert b == bag('aab')
b = bag('aab')
b |= bag('ac')
assert b == bag('aabc')
b = bag('aab')
b |= set('ac')
assert b == bag('aabc')
def test_iand():
b = bag()
b &= bag()
assert b == bag()
b = bag('aab')
b &= bag()
assert b == bag()
b = bag('aab')
b &= bag('ac')
assert b == bag('a')
b = bag('aab')
b &= set('ac')
assert b == bag('a')
def test_ixor():
b = bag('abbc')
b ^= bag('bg')
assert b == bag('abcg')
b = bag('abbc')
b ^= set('bg')
assert b == bag('abcg')
def test_isub():
b = bag('aabbc')
b -= bag('bd')
assert b == bag('aabc')
b = bag('aabbc')
b -= set('bd')
assert b == bag('aabc')
def test_iadd():
b = bag('abc')
b += bag('cde')
assert b == bag('abccde')
b = bag('abc')
b += 'cde'
assert b == bag('abccde')
def test_hash():
bag_with_empty_tuple = frozenbag([()])
assert not hash(frozenbag()) == hash(bag_with_empty_tuple)
assert not hash(frozenbag()) == hash(frozenbag((0,)))
assert not hash(frozenbag('a')) == hash(frozenbag(('aa')))
assert not hash(frozenbag('a')) == hash(frozenbag(('aaa')))
assert not hash(frozenbag('a')) == hash(frozenbag(('aaaa')))
assert not hash(frozenbag('a')) == hash(frozenbag(('aaaaa')))
assert hash(frozenbag('ba')) == hash(frozenbag(('ab')))
assert hash(frozenbag('badce')) == hash(frozenbag(('dbeac')))
def test_num_unique_elems():
assert bag('abracadabra').num_unique_elements() == 5
def test_pop():
b = bag('a')
assert b.pop() == 'a'
with pytest.raises(KeyError):
b.pop()
| true |
9f4263702b108c3b12ead46fc1fdb63767206c7d | Python | iafjayoza/Python | /Grid_Path/grid_path.py | UTF-8 | 758 | 2.921875 | 3 | [] | no_license | def findpath(l):
grid = [[0 for i in range(6)] for j in range(11)]
for j in range(11):
for i in range(6):
for x in l:
if j == x[0] and i == x[1]:
grid[j][i] = 0
break
else:
if j == 0:
grid[j][i] = 1
#grid[i][j] = grid[i - 1][j]
elif i == 0:
grid[j][i] = 1
#grid[i][j] = grid[i][j - 1]
else:
grid[j][i] = grid[j - 1][i] + grid[j][i - 1]
for i in grid:
print(i)
print(grid[10][5])
return(grid[10][5])
l = [(4,1),(4,2),(4,3),(4,4),(4,5)]
findpath(l)
k = [(4,2)]
findpath(k)
m = []
findpath(m) | true |
90f1a5864f55bd21a75bb4532a23937add253431 | Python | hschoi1/TIL | /misc/unittest_example.py | UTF-8 | 1,897 | 3.640625 | 4 | [] | no_license | # examples from https://docs.python.org/3/library/unittest.html
import unittest
"""
TestCase class provides assert methods to check for and report failures
some include:
assertEqual(a,b)
assertNotEqual(a,b)
assertTrue(x)
assertFalse(x)
assertIs(a,b)
assertIsNot(a,b)
assertIsNone(x)
assertIsNotNone(x)
assertIn(a,b)
assertNotIn(a,b)
assertIsInstance(a,b)
assertNotIsInstance(a,b)
assertRaises(exc, fun, *args, **kwds)
assertRaisesRegex(exc, r, fun, *args, **kwds)
assertWarns(warn, fun, *args, **kwds)
assertWarnsRegex(warn, r, fun, *args, **kwds)
assertLogs(logger, level)
"""
class TestStringMethods(unittest.TestCase):
def test_upper(self):
self.assertEqual('foo'.upper(), 'FOO') # checks for an expected result
def test_isupper(self):
self.assertTrue('FOO'.isupper()) #verify a condition
self.assertFalse('Foo'.isupper())
def test_split(self):
s = 'hello world'
self.assertEqual(s.split(), [1, 'hello', 'world'])
# check that s.split fails when the separator is not a string
with self.assertRaises(TypeError): # verify that a specific exception gets raised
s.split(2)
# setup method allows you to define instructions that will be executed before/after each test method
if __name__ == '__main__':
unittest.main()
"""
.F.
======================================================================
FAIL: test_split (__main__.TestStringMethods)
----------------------------------------------------------------------
Traceback (most recent call last):
File "unittest_example.py", line 15, in test_split
self.assertEqual(s.split(), [1, 'hello', 'world'])
AssertionError: Lists differ: ['hello', 'world'] != [1, 'hello', 'world']
First differing element 0:
'hello'
1
Second list contains 1 additional elements.
First extra element 2:
'world'
- ['hello', 'world']
+ [1, 'hello', 'world']
? +++
""" | true |
1ddd772a9bb26b3c20f4c80e5e81c8bd3e575131 | Python | MBras/AOC2020 | /19/part1.py | UTF-8 | 1,328 | 3.375 | 3 | [] | no_license | import re
lines = open("input.1").read()
lines = open("input.2").read()
lines = lines.splitlines()
readingrules = 1
rules = {}
puzzles = []
for line in lines:
if line == "": # rules and puzzles are seperated by an empty line
readingrules = 0
elif readingrules: # reading rules
m = re.search('(\d*): (.*)', line)
rules[m.group(1)] = m.group(2).replace("\"","")
else: # reading puzzle pieces
puzzles.append(line)
print "Rules:"
print rules
print "Puzzles:"
print puzzles
def genregexp(rule):
regexp = ""
matches = re.findall("(\S+|\|)(?: )*", rule)
# check for "|" in matches to include braces
if "|" in matches:
regexp += "(?:"
for match in matches:
try:
#print "Found it, now looking for " + rules[match]
regexp += genregexp(rules[match])
except KeyError:
#print "Didn't find it, storing " + match
regexp += match
if "|" in matches:
regexp += ")"
return regexp
regexp = "^(" + genregexp(rules["0"]) + ")$"
print "Regexp: " + regexp
# counted with https://regex101.com/r/vcNmUG/2
# but ok, here's the python code to count valid strings
count = 0
for puzzle in puzzles:
match = re.search(regexp, puzzle)
if match:
count += 1
print "Part 1: " + str(count)
| true |
843ee7d4858f7a51ef3bf6bf473865fe3baefeec | Python | sergloko/geekbrains-python | /Lesson4/task3.py | UTF-8 | 356 | 3.96875 | 4 | [] | no_license | # Для чисел в пределах от 20 до 240 найти числа, кратные 20 или 21. Необходимо решить задание в одну строку.
# Подсказка: использовать функцию range() и генератор
my_list = [i for i in range(20,240) if i%20 == 0 or i%21 == 0]
print(my_list) | true |
a9a562c32177904e0be86485fa5e7ee3a06ccd3c | Python | emanah15/Machine-Learning-Project | /datasetCreator.py | UTF-8 | 1,988 | 3.09375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 27 19:38:22 2020
@author: ghulam
"""
import cv2
def generate_dataset(img, user_id, img_id):
cv2.imwrite("dataSet/user."+str(user_id)+"."+str(img_id)+".jpg", img)
def draw_square(img, classifier, scaleFactor, minNeighbours, color, text):
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# detecting features in gray-scale image, returns coordinates, width and height of features
features = classifier.detectMultiScale(gray_img, scaleFactor, minNeighbours)
coords = []
# drawing rectangle around the feature and labeling it
for (x, y, w, h) in features:
cv2.rectangle(img, (x,y), (x+w, y+h), color, 2)
cv2.putText(img, text, (x, y-4), cv2.FONT_HERSHEY_SIMPLEX, 0.8, color, 1, cv2.LINE_AA)
coords = [x, y, w, h]
return coords
# Method to detect the features
def detect(img, faceCascade, img_id):
color = {"blue":(255,0,0), "red":(0,0,255), "green":(0,255,0)}
coords = draw_square(img, faceCascade, 1.15, 10, color['red'], "Face")
if len(coords)==4:
# Updating region of interest by cropping image
roi_img = img[coords[1]:coords[1]+coords[3], coords[0]:coords[0]+coords[2]]
user_id=3 # new number for collecting samples of a new person e.g entering 4 for a new person
generate_dataset(roi_img, user_id, img_id)
return img
# Loading classifier
faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
#read video stream from webcam
video_capture = cv2.VideoCapture(0)
img_id=0
while True:
_, img = video_capture.read()
# Call method we defined above
img = detect(img, faceCascade, img_id)
cv2.imshow("face detection", img)
img_id += 1
if cv2.waitKey(1) & 0xFF == ord('q') or img_id==100:
break
video_capture.release()
cv2.destroyAllWindows()
print('collecting samples complete!!!!') | true |
2160ef402514254d92b973cced23ea723e36fb4d | Python | itarabanovska/hw_itarabanovska | /python_projects/test_project/Homework/hw_4/hw4_3.py | UTF-8 | 799 | 2.953125 | 3 | [] | no_license | import random
user_accounts = {
'cat': ['dfdss', 78493487],
'dog': ['qwesfd', 89638492],
'rat': ['fgyjgf', 90906545],
'pig': ['fgrtgh', 81234234],
'cow': ['gtwxz', 69785948],
'elephant': ['lkol', 55889087],
'cammel': ['vfdd', 10293847],
'tiger': ['gfgeer', 20390004],
'leon': ['gftynnn', 45678034],
'zebra': ['grtyt', 20906540],
}
login = input('Please, enter your login:')
if login in list(user_accounts.keys()):
user_record = user_accounts[login]
password = input('Please, enter your password:')
if password == user_record[0]:
print(user_record[1])
else:
password = (input('Enter your password:'))
secret = random.randint(1000, 1000000)
print(user_accounts.setdefault(login, [password, secret]))
print(user_accounts)
| true |
3e193ace9a08de51750ecb6988aefa9250478286 | Python | djdt/shiz2plot | /util/kvparser.py | UTF-8 | 2,174 | 3.359375 | 3 | [] | no_license | import re
from util.valueparse import convert_string_values
class KeyValParser(object):
"""Parses string of comma separated <key>=<value> pairs."""
def __init__(self, *args, **kwargs):
"""args -> Optional string that is passed to .parse.
kwargs -> Stored as sttributes."""
if len(args) > 1:
raise TypeError(
"File: Takes 0 or 1 argument, {} given.".format(len(args)))
elif len(args) == 1 and args[0] is not None:
self.parse(args[0])
# Parsed values have priority
self.update(kwargs, overwrite=False)
def _convert_value(self, value: str):
try:
if '.' in value:
value = float(value)
else:
value = int(value)
except ValueError:
pass
return value
def _key_is_valid(self, key):
return True
def _lookup_key(self, key, vals):
return vals
def parse(self, string: str, overwrite=True):
"""Parse a string and add values.
string -> (string) comma separated list of <key>=<val>.
overwrite -> (bool) overwrite existing attributes."""
if len(string) == 0:
return
string = string.strip()
tokens = re.split('\,(?=\w+\=)', string)
for token in tokens:
key, vals = token.split('=')
key = key.lower()
if not self._key_is_valid(key):
raise KeyError(key, self.__class__)
vals = convert_string_values(vals)
vals = self._lookup_key(key, vals)
if overwrite or not hasattr(self, key):
setattr(self, key, vals)
def update(self, kwargs: dict, overwrite=False):
"""Adds keys, values to the class.
kwargs -> (dict) key values to add.
overwrite -> (bool) overwrite existing."""
for key, val in kwargs.items():
if not self._key_is_valid(key):
raise KeyError(key, self.__class__)
if overwrite or not hasattr(self, key):
setattr(self, key, val)
def get(self):
return self.__dict__
| true |
54cb5cd98deb21ff35d87d941bf60901fba249c9 | Python | FedericoNardi/MachineLearning | /Project2/Logistic.py | UTF-8 | 12,274 | 2.6875 | 3 | [] | no_license | import numpy as np
import pickle
import scipy as scl
from sklearn.model_selection import train_test_split
import warnings
from scipy.special import expit
import matplotlib.pyplot as plt
def PickRandom(x,y,dim):
X_train = np.zeros([x.shape[0],x.shape[1]])
X_train = np.zeros([y.shape[0],y.shape[1]])
for i in range(dim):
index = np.random.randint(x.shape[0])
X_train[i] = x[index]
def Logistic_Newton(X,y,max_iters,tolerance):
#Initialize beta-parameters
beta = np.zeros(X.shape[1])
Hessian = np.zeros([X.shape[0], X.shape[0]])
X_hessian = X.T
norm = 100
#first step
z = np.dot(X, beta)
p = expit(z)
gradient = -np.dot(X.T,y-p)
for i in range(X.shape[1]):
for j in range(X.shape[0]):
X_hessian[i,j] = X_hessian[i,j]*p[j]*(1 - p[j])
Hessian = np.dot(X_hessian, X)
Hessian_inv = np.linalg.pinv(Hessian)
beta -= np.dot(Hessian, gradient)
for k in range(1,max_iters):
z = np.dot(X, beta)
p = expit(z)
gradient = -np.dot(X.T,y-p)
for i in range(X.shape[1]):
for j in range(X.shape[0]):
X_hessian[i,j] = X_hessian[i,j]*p[j]*(1 - p[j])
Hessian = np.dot(X_hessian, X)
Hessian_inv = np.linalg.pinv(Hessian)
beta -= np.dot(Hessian_inv, gradient)
norm = np.linalg.norm(np.dot(Hessian_inv, gradient))
if(norm < tolerance):
print("Newton method converged to given precision in %d iterations" % k)
break
return beta
def Logistic_GradDesc(X,y,eta,max_iters,tolerance):
#Initialize beta-parameters
beta = np.zeros(X.shape[1])
norm = 100
#first step
z = np.dot(X, beta)
p = expit(z)
gradient = -np.dot(X.T,y-p)
beta -= eta*gradient
norm = np.linalg.norm(gradient)
for k in range(1,max_iters):
z = np.dot(X, beta)
p = expit(z)
gradient = -np.dot(X.T,y-p)
beta -= eta*gradient
norm = np.linalg.norm(gradient)
if(norm < tolerance):
print("Gradient Descent converged to given precision in %d iterations" % k)
break
return beta
def Logistic_SteepGradDesc(X,y,eta,max_iters,tolerance):
#Initialize beta-parameters
beta = np.zeros(X.shape[1])
beta_prev = beta.copy()
norm = 100
eta_k = 0
#first step
z = np.dot(X, beta)
p = expit(z)
gradient = -np.dot(X.T,y-p)
beta -= eta*gradient
norm = np.linalg.norm(gradient)
gradient_prev = gradient.copy()
for k in range(1,max_iters):
z = np.dot(X, beta)
p = expit(z)
gradient_prev = gradient.copy()
gradient = -np.dot(X.T,y-p)
eta_k = np.dot((beta - beta_prev),gradient-gradient_prev) / np.linalg.norm(gradient-gradient_prev)**2
beta_prev = beta.copy()
beta -= eta_k*gradient
norm = np.linalg.norm(gradient)
if(norm < tolerance):
print("Gradient Descent converged to given precision in %d iterations" % k)
break
return beta
def Logistic_StocGradDesc(X,y,eta,max_iters,tolerance):
X_copy = X
y_copy = y
X_batch = np.zeros(X.shape)
y_batch = np.zeros(y.shape)
# Number of minibathces
m = 100
#Element of the minibatches
n = X.shape[0]/m
#Initialize beta-parameters
beta = np.zeros(X.shape[1])
norm = 100
#first step
z = np.dot(X, beta)
p = expit(z)
gradient = -np.dot(X.T,y-p)
beta -= eta*gradient
norm = np.linalg.norm(gradient)
#I create the minibatches
for i in range(X.shape[0]):
random_index = np.random.randint(X.shape[0]-i)
X_batch[i,:] = X_copy[random_index,:]
y_batch[i] = y_copy[random_index]
del X_copy[random_index,:]
del y_copy[random_index]
for k in range(1,max_iters):
for i in range(m):
# I pick a minibatch at random
s = np.random.randint(m)
z = np.dot(X_batch[s*n:(s+1)*n,:], beta)
p = expit(z)
gradient = -np.dot(X_batch[s*n:(s+1)*n,:].T,y_batch[s*n:(s+1)*n]-p)
beta -= eta*gradient
norm = np.linalg.norm(gradient)
gradient = -np.dot(X.T,y-p)
norm = np.linalg.norm(gradient)
if(norm < tolerance):
print("Gradient Descent converged to given precision in %d iterations" % k)
break
return beta
def LogisticFit(x,y,Beta):
ymodel = np.zeros([len(y),1])
P = expit(np.dot(X, Beta))
for i in range(len(y)):
if P[i] < 0.5:
ymodel[i] = 0
else:
ymodel[i] = 1
return ymodel
def AccuracyTest(y1,y2):
Accuracy = 0;
for i in range(len(y1)):
if y1[i] == y2[i]:
Accuracy = Accuracy + 1
Accuracy = Accuracy/len(y1)
return Accuracy
#Comment this to turn on warnings
warnings.filterwarnings('ignore')
np.random.seed() # shuffle random seed generator
# Ising model parameters
L=40 # linear system size
J=-1.0 # Ising interaction
T=np.linspace(0.25,4.0,16) # set of temperatures
T_c=2.26 # Onsager critical temperature in the TD limit
###### define ML parameters
num_classes=2
train_to_test_ratio=0.5 # training samples
# path to data directory
#path_to_data="C:\Anaconda\Programes/IsingData/"
# load data
file_name = "IsingData/Ising2DFM_reSample_L40_T=All.pkl" # this file contains 16*10000 samples taken in T=np.arange(0.25,4.0001,0.25)
#data = pickle.load(open(path_to_data+file_name,'rb'))
data = pickle.load(open(file_name,'rb')) # pickle reads the file and returns the Python object (1D array, compressed bits)
data = np.unpackbits(data).reshape(-1, 1600) # Decompress array and reshape for convenience
data=data.astype('int')
data[np.where(data==0)]=-1 # map 0 state to -1 (Ising variable can take values +/-1)
file_name = "IsingData/Ising2DFM_reSample_L40_T=All_labels.pkl" # this file contains 16*10000 samples taken in T=np.arange(0.25,4.0001,0.25)
#slabels = pickle.load(open(path_to_data+file_name,'rb')) # pickle reads the file and returns the Python object (here just a 1D array with the binary labels)
labels = pickle.load(open(file_name,'rb'))
# divide data into ordered, critical and disordered
X_ordered=data[:70000,:]
Y_ordered=labels[:70000]
X_critical=data[70000:100000,:]
Y_critical=labels[70000:100000]
X_disordered=data[100000:,:]
Y_disordered=labels[100000:]
del data,labels
# define training and test data sets
X=np.concatenate((X_ordered,X_disordered))
Y=np.concatenate((Y_ordered,Y_disordered))
# pick random data points from ordered and disordered states
# to create the training and test sets
X_train,X_test,Y_train,Y_test=train_test_split(X,Y,train_size=train_to_test_ratio)
# full data set
X=np.concatenate((X_critical,X))
Y=np.concatenate((Y_critical,Y))
print('X_train shape:', X_train.shape)
print('Y_train shape:', Y_train.shape)
print()
print(X_train.shape[0], 'train samples')
print(X_critical.shape[0], 'critical samples')
print(X_test.shape[0], 'test samples')
###### apply logistic regression
from sklearn import linear_model
# define regularisation parameter
lmbdas=np.logspace(-5,5,11)
# preallocate data
train_accuracy=np.zeros(lmbdas.shape,np.float64)
test_accuracy=np.zeros(lmbdas.shape,np.float64)
critical_accuracy=np.zeros(lmbdas.shape,np.float64)
train_accuracy_SGD=np.zeros(lmbdas.shape,np.float64)
test_accuracy_SGD=np.zeros(lmbdas.shape,np.float64)
critical_accuracy_SGD=np.zeros(lmbdas.shape,np.float64)
print('Logistic Stuff')
# # Logistic Newton
# # fit training data
# X_train_copy = X_train
# Y_train_copy = Y_train
# beta_N = Logistic_Newton(X_train_copy,Y_train_copy, 1000, 1e-4)
# # check accuracy
# Ymodel_train_N = LogisticFit(X_train,Y_train,beta_N)
# Ymodel_test_N = LogisticFit(X_test,Y_test,beta_N)
# Ymodel_critical_N = LogisticFit(X_critical,Y_critical,beta_N)
# train_accuracy_N = AccuracyTest(Y_train,Ymodel_train_N)
# test_accuracy_N = AccuracyTest(Y_test,Ymodel_test_N)
# critical_accuracy_N = AccuracyTest(Y_critical,Ymodel_critical_N)
# print('Newton')
# print('accuracy: train, test, critical')
# print('liblin: %0.4f, %0.4f, %0.4f' %(train_accuracy_N,test_accuracy_N,critical_accuracy_N) )
# Logistic Gradient descent
# fit training data
beta_GD = Logistic_GradDesc(X_train, Y_train, 0.01, 1000, 1e-4)
# check accuracy
Ymodel_train_GD = LogisticFit(X_train,Y_train,beta_GD)
Ymodel_test_GD = LogisticFit(X_test,Y_test,beta_GD)
Ymodel_critical_GD = LogisticFit(X_critical,Y_critical,beta_GD)
train_accuracy_GD = AccuracyTest(Y_train,Ymodel_train_GD)
test_accuracy_GD = AccuracyTest(Y_test,Ymodel_test_GD)
critical_accuracy_GD = AccuracyTest(Y_critical,Ymodel_critical_GD)
print('Gradient descent')
print('GD: %0.4f, %0.4f, %0.4f' %(train_accuracy_GD,test_accuracy_GD,critical_accuracy_GD) )
# Logistic Steepest Gradient descent
# fit training data
beta_SG = Logistic_SteepGradDesc(X_train, Y_train, 0.1, 1000, 1e-4)
# check accuracy
Ymodel_train_SG = LogisticFit(X_train,Y_train,beta_SG)
Ymodel_test_SG = LogisticFit(X_test,Y_test,beta_SG)
Ymodel_critical_SG = LogisticFit(X_critical,Y_critical,beta_SG)
train_accuracy_SG = AccuracyTest(Y_train,Ymodel_train_SG)
test_accuracy_SG = AccuracyTest(Y_test,Ymodel_test_SG)
critical_accuracy_SG = AccuracyTest(Y_critical,Ymodel_critical_SG)
print('Steepest gradient descent')
print('SG: %0.4f, %0.4f, %0.4f' %(train_accuracy_SG,test_accuracy_SG,critical_accuracy_SG) )
# Logistic Steepest Gradient descent
# fit training data
beta_SCG = Logistic_SteepGradDesc(X_train,Y_train, 0.1, 1000, 1e-4)
# check accuracy
Ymodel_train_SCG = LogisticFit(X_train,Y_train,beta_SCG)
Ymodel_test_SCG = LogisticFit(X_test,Y_test,beta_SCG)
Ymodel_critical_SCG = LogisticFit(X_critical,Y_critical,beta_SCG)
train_accuracy_SCG = AccuracyTest(Y_train,Ymodel_train_SCG)
test_accuracy_SCG = AccuracyTest(Y_test,Ymodel_test_SCG)
critical_accuracy_SCG = AccuracyTest(Y_critical,Ymodel_critical_SCG)
print('Stochastic gradient descent')
print('SCG: %0.4f, %0.4f, %0.4f' %(train_accuracy_SCG,test_accuracy_SCG,critical_accuracy_SCG) )
print('Sklearn')
# loop over regularisation strength
for i,lmbda in enumerate(lmbdas):
# define logistic regressor
logreg=linear_model.LogisticRegression(C=1.0/lmbda,random_state=1,verbose=0,max_iter=1E3,tol=1E-5)
# fit training data
logreg.fit(X_train, Y_train)
# check accuracy
train_accuracy[i]=logreg.score(X_train,Y_train)
test_accuracy[i]=logreg.score(X_test,Y_test)
critical_accuracy[i]=logreg.score(X_critical,Y_critical)
print('accuracy: train, test, critical')
print('liblin: %0.4f, %0.4f, %0.4f' %(train_accuracy[i],test_accuracy[i],critical_accuracy[i]) )
# define SGD-based logistic regression
logreg_SGD = linear_model.SGDClassifier(loss='log', penalty='l2', alpha=lmbda, max_iter=100,
shuffle=True, random_state=1, learning_rate='optimal')
# fit training data
logreg_SGD.fit(X_train,Y_train)
# check accuracy
train_accuracy_SGD[i]=logreg_SGD.score(X_train,Y_train)
test_accuracy_SGD[i]=logreg_SGD.score(X_test,Y_test)
critical_accuracy_SGD[i]=logreg_SGD.score(X_critical,Y_critical)
print('SGD: %0.4f, %0.4f, %0.4f' %(train_accuracy_SGD[i],test_accuracy_SGD[i],critical_accuracy_SGD[i]) )
print('finished computing %i/11 iterations' %(i+1))
# plot accuracy against regularisation strength
plt.semilogx(lmbdas,train_accuracy,'*-b',label='liblinear train')
plt.semilogx(lmbdas,test_accuracy,'*-r',label='liblinear test')
plt.semilogx(lmbdas,critical_accuracy,'*-g',label='liblinear critical')
plt.semilogx(lmbdas,train_accuracy_SGD,'*--b',label='SGD train')
plt.semilogx(lmbdas,test_accuracy_SGD,'*--r',label='SGD test')
plt.semilogx(lmbdas,critical_accuracy_SGD,'*--g',label='SGD critical')
plt.xlabel('$\\lambda$')
plt.ylabel('$\\mathrm{accuracy}$')
plt.grid()
plt.legend()
plt.show() | true |
1098a64b47da93a4a9eb835a387f2ddb5cbaf217 | Python | Tencent-Ti/ti-sign-python | /example/main.py | UTF-8 | 1,685 | 2.71875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import sys
sys.path.append("..")
from tisign.sign import *
def build_http_header_with_signature():
# 以Ti平台 查询用户是否拥有Admin权限 接口为例, 以下是接口的基本信息:
# action: DescribeIsAdmin
# service: ti-auth
# version: 2020-10-10
# content-type: application/json
# http请求方法: POST
# 网关访问地址: 127.0.0.1
# 访问网关的host
host = "127.0.0.1"
# 服务接口
action = 'DescribeIsAdmin'
# 接口版本
version = '2020-10-10'
# 接口所属服务
service = 'ti-auth'
# http请求的content-type, 当前网关只支持: application/json multipart/form-data
conten_type = 'application/json'
# http请求方法,当前网关只支持: POST GET
http_method = 'POST'
# Ti平台生成的鉴权密钥信息(通过 管理中心-个人中心-密钥管理 获取)
secret_id = 'test-secret-id'
secret_key = 'test-secret-key'
# 创建TiSign对象
ts = TiSign(host, action, version, service, conten_type,
http_method, secret_id, secret_key)
# 生成通过网关访问后端服务,所需http的请求header dict 和 签名信息
http_header_dict, authorization = ts.build_header_with_signature()
# 打印签名信息
print("============= 签名字符串 Authorization =============")
print("authorization: " + authorization)
# 打印http header信息
print("============ 通过网关访问后端服务Http请求头 ============")
for key, value in http_header_dict.items():
print(key + ": " + value)
if __name__ == "__main__":
build_http_header_with_signature()
| true |
710a6defca2e6f79e177d8c16edf1c6c60893076 | Python | CousinoMath/py-calculator | /token.py | UTF-8 | 2,344 | 3.453125 | 3 | [
"MIT"
] | permissive | from abc import ABC, abstractmethod
import math
class Token(ABC):
pass
class TokenPlus(Token):
def __init__(self):
pass
def __str__(self):
return '+'
class TokenMinus(Token):
def __init__(self):
pass
def __str__(self):
return '-'
class TokenStar(Token):
def __init__(self):
pass
def __str__(self):
return '*'
class TokenSlash(Token):
def __init__(self):
pass
def __str__(self):
return '/'
class TokenCaret(Token):
def __init__(self):
pass
def __str__(self):
return '^'
class TokenLParen(Token):
def __init__(self):
pass
def __str__(self):
return '('
class TokenRParen(Token):
def __init__(self):
pass
def __str__(self):
return ')'
class TokenEquals(Token):
def __init__(self):
pass
def __str__(self):
return '='
class TokenNumber(Token):
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
class TokenIdentifier(Token):
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
class TokenVariable(TokenIdentifier):
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
class TokenFunction(TokenIdentifier):
def __init__(self, name):
super().__init__(name)
if name == 'acos':
self.func = math.acos
elif name == 'asin':
self.func = math.asin
elif name == 'atan':
self.func = math.atan
elif name == 'cos':
self.func = math.cos
elif name == 'sin':
self.func = math.sin
elif name == 'tan':
self.func = math.tan
else:
self.func = None
class TokenConstant(TokenIdentifier):
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
class TokenEOF(Token):
def __init__(self):
pass
def __str__(self):
return '♣'
def classifyIdentifier(name):
if name in ('pi', 'e'):
return TokenConstant(name)
elif name in ('acos', 'asin', 'atan', 'cos', 'sin', 'tan'):
return TokenFunction(name)
else:
return TokenVariable(name)
| true |
012c40a2f3241c316d6e5bb939ffc38439608022 | Python | iAdityaEmpire/20200408BNPAdvanced | /comprehensions.py | UTF-8 | 1,879 | 3.1875 | 3 | [] | no_license | #!/usr/bin/env python
fruits = ["pomegranate", "cherry", "apricot", "apple",
"lemon", "kiwi", "orange", "lime", "watermelon", "guava",
"papaya", "fig", "pear", "banana", "tamarind", "persimmon",
"elderberry", "peach", "blueberry", "lychee", "grape", "date" ]
# [EXPR for VAR in ITERABLE ... if CONDITION ...]
f1 = tuple([f.upper() for f in fruits])
print("f1: {}\n".format(f1))
ranks = '2 3 4 5 6 7 8 9 10 J Q K A'.split()
suits = 'Clubs Diamonds Hearts Spades'.split()
cards = [f"{r}-{s}" for s in suits for r in ranks]
print("cards: {}\n".format(cards))
short_fruits = [f for f in fruits if len(f) < 6]
print("short_fruits: {}\n".format(short_fruits))
short_fruits = [f.title() for f in fruits if len(f) < 6]
print("short_fruits: {}\n".format(short_fruits))
people = [
('Melinda', 'Gates', 'Gates Foundation', '1964-08-15'),
('Steve', 'Jobs', 'Apple', '1955-02-24'),
('Larry', 'Wall', 'Perl', '1954-09-27'),
('Paul', 'Allen', 'Microsoft', '1953-01-21'),
('Larry', 'Ellison', 'Oracle', '1944-08-17'),
('Bill', 'Gates', 'Microsoft', '1955-10-28'),
('Mark', 'Zuckerberg', 'Facebook', '1984-05-14'),
('Sergey','Brin', 'Google', '1973-08-21'),
('Larry', 'Page', 'Google', '1973-03-26'),
('Linus', 'Torvalds', 'Linux', '1969-12-28'),
]
dobs = [p[3] for p in people]
print("dobs: {}\n".format(dobs))
colors = ["Blue", "red", "RED", 'green', 'RED', 'Red', 'blue', 'GREEN', 'orange', 'ORANGE']
c1 = set(colors)
print("c1: {}\n".format(c1))
# {EXPR for VAR in ITERABLE if CONDITION}
c2 = {c.lower() for c in colors if c.lower() != 'orange'}
print("c2: {}\n".format(c2))
# {KEY:VALUE for VAR in ITERABLE if CONDITION}
fruit_lengths = {f:len(f) for f in fruits}
print("fruit_lengths: {}\n".format(fruit_lengths))
person_info = {f"{p[0]} {p[1]}":f"{p[3]}" for p in people}
print("person_info: {}\n".format(person_info))
| true |
622906bac2019a2d760158e5c23150ea2adf5ce9 | Python | singular-labs/sqlno | /sqlno/common/structures.py | UTF-8 | 2,038 | 2.796875 | 3 | [
"MIT"
] | permissive | from sqlno.common.expressions import Expression
class TableColumn(Expression):
def __init__(self, name, table_name=None, database_name=None):
super(TableColumn, self).__init__('.'.join(filter(None, [database_name, table_name, name])))
class Table(object):
def __init__(self, name, database_name=None):
self.name = name
self.database_name = database_name
self.columns = {}
def c(self, column_name):
column = TableColumn(column_name, table_name=self.name, database_name=self.database_name)
self.columns[column_name] = column
return column
def __getattr__(self, attribute_name):
return self.c(attribute_name)
def as_(self, alias):
return AliasedTable(self.name, alias, database_name=self.database_name)
def __str__(self):
return '.'.join(filter(None, [self.database_name, self.name]))
class AliasedTable(Table):
def __init__(self, name, alias, database_name=None):
super(AliasedTable, self).__init__(name, database_name=database_name)
self.alias = alias
def c(self, column_name):
column = TableColumn(column_name, table_name=self.alias)
self.columns[column_name] = column
return column
def __str__(self):
return '{} as {}'.format(super(AliasedTable, self).__str__(), self.alias)
class Database(object):
def __init__(self, name):
super(Database, self).__init__()
self.name = name
self.tables = {}
def t(self, table_name):
table = Table(table_name, database_name=self.name)
self.tables[table_name] = table
return table
def __getattr__(self, table_name):
return self.t(table_name)
def __str__(self):
return self.name
_global_database = Database(None)
_global_table = Table(None)
def db(database_name):
return Database(database_name)
def t(table_name):
return getattr(_global_database, table_name)
def c(column_name):
return getattr(_global_table, column_name)
| true |
893c204c7b140ea08bb003d312f2346aa3a22520 | Python | faisalnsour/three_digit_cases | /case_count_study.py | UTF-8 | 1,312 | 3.21875 | 3 | [] | no_license | import random
import sets
import numpy
# Expected number of steps for all possible outcomes to happen at least once.
# Dividing by number of reporters to allow for more than one outcome per step.
def expected_steps_per_period(n, r):
return sum([float(n)/k for k in range(1, n+1)]) / r
def covers_all_positive_ints(ints, max):
for i in range(1, max+1):
if i not in ints:
return False
return True
def calculate_coverage_time(reporters):
run_history = []
for run in range(0, EXPERIMENTAL_RUNS):
coverage_set = sets.Set()
steps = 0
# as long as not all values are present, keep adding random integers
while not covers_all_positive_ints(coverage_set, RANGE_UPPER):
[coverage_set.add(random.randint(1, RANGE_UPPER))
for r in range(0, reporters)]
steps += 1
run_history.append(steps)
return numpy.mean(run_history), expected_steps_per_period(RANGE_UPPER, reporters);
RANGE_UPPER = 999
EXPERIMENTAL_RUNS = 10
experiments_stats = []
for reporter_count in [1, 2, 5, 10, 20, 50, 100, 500, 1000, 2000, 5000]:
avg_sim_steps, expected_steps = calculate_coverage_time(reporter_count)
experiments_stats.append([reporter_count, avg_sim_steps, expected_steps])
print(experiments_stats)
| true |
0d0fd23df8a073d1761fe9e01bc3f9ed101fb5ec | Python | KoSangWon/food-picker | /venv/pick_menu.py | UTF-8 | 904 | 3.09375 | 3 | [] | no_license | # coding=utf-8
import random
def pick_menu():
menu_list = ["탕수육", "깐풍기", "칠리새우", "짬뽕", "짜장면",
"유린기", "소바", "초밥", "돈부리", "참치회",
"오코노미야끼", "라멘", "우동", "항정살", "목살",
"삼겹살", "갈비", "갈매기살", "안심", "등심",
"가브리살", "라면", "햄버거", "스테이크", "파스타",
"훈제오리", "베트남 쌀국수", "카레", "만두", "피자",
"냉면", "설렁탕", "닭도리탕", "간장게장", "김치찌개",
"낙지", "불고기백반", "곱창", "치킨", "떡볶이",
"순대", "보쌈", "족발", "찜닭", "감자탕"]
choice = random.choice(menu_list)
print("오늘은 " + choice +" 어떠세요!?")
if __name__ == '__main__':
pick_menu() | true |
738a16f6744a1e0e0eabe8e8666a537faae4a147 | Python | hmp36/Lectures | /Python/orm_models_relationships/apps/app_main/models.py | UTF-8 | 1,274 | 2.5625 | 3 | [] | no_license | from __future__ import unicode_literals
from django.db import models
from django.contrib import messages
class UserManager(models.Manager):
def add(self,request):
errs = []
if len(request.POST["firstName"]) < 1:
errs.append("Your first name cannot be blank!")
if len(request.POST["lastName"]) < 1:
errs.append("Your last name cannot be blank!")
if len(errs) <= 1:
self.create(
firstName=request.POST["firstName"],
lastName = request.POST["lastName"]
)
messages.add_message(request,messages.SUCCESS,"User created successfully!");
else:
for err in errs:
messages.add_message(request,messages.ERROR,err);
class User(models.Model):
firstName = models.CharField(max_length=255)
lastName = models.CharField(max_length=255)
createdAt = models.DateTimeField(auto_now_add=True)
updatedAt = models.DateTimeField(auto_now=True)
manager = UserManager()
def __repr__(self):
return "{}".format(self.firstName)
class Hobby(models.Model):
title = models.CharField(max_length=255)
createdAt = models.DateTimeField(auto_now_add=True)
updatedAt = models.DateTimeField(auto_now=True)
practitioners = models.ManyToManyField(User,related_name="users_hobbies")
def __repr__(self):
return "{}".format(self.title) | true |
b4a6cde982f58ad4faa7829fd2aaabfdefaab39c | Python | Sirius5272/APIKnowledgeMining | /component/model/factory/api_knowledge_mutator/mutate_manager.py | UTF-8 | 1,116 | 2.640625 | 3 | [] | no_license | from component.model.factory.api_knowledge_mutator.delete_object_mutator import DeleteObjectAPIKnowledgeMutator
from component.model.factory.api_knowledge_mutator.synonym_mutator import APIKnowledgeSynonymMutator
from util.log_util import LogUtil
from component.model.api_knowledge import APIKnowledge
from typing import Set, Iterable
class APIKnowledgeMutateManager:
def __init__(self):
self.mutators = [
DeleteObjectAPIKnowledgeMutator(),
APIKnowledgeSynonymMutator()
]
def mutate(self, api_knowledge: APIKnowledge) -> Set[APIKnowledge]:
new_knowledge = set()
if api_knowledge is None:
return set()
for mutator in self.mutators:
mutated_knowledge = mutator.mutate(api_knowledge)
new_knowledge.update(mutated_knowledge)
return new_knowledge
def mutate_knowledge_list(self, knowledge_set: Iterable[APIKnowledge]) -> Set[APIKnowledge]:
new_knowledge = set()
for knowledge in knowledge_set:
new_knowledge.update(self.mutate(knowledge))
return new_knowledge
| true |
570e88f475829d552f1fd3e3ef200767feda13cd | Python | kpgoing/Library | /ORM/UserORM.py | UTF-8 | 743 | 2.5625 | 3 | [] | no_license | # coding=utf-8
__author__ = 'xbw'
from sqlalchemy import Column, String, create_engine, Integer
from sqlalchemy.orm import sessionmaker, relationship
from sqlalchemy.ext.declarative import declarative_base
from BookORM import Book
from BorrowListORM import BorrowList
# 创建对象的基类:
Base = declarative_base()
# 定义User对象:
class User(Base):
# 表的名字:
__tablename__ = 'user'
def __init__(self,username,password):
self.username = username
self.password = password
# 表的结构:
id = Column(Integer, primary_key=True,autoincrement=True)
username = Column(String(20))
password = Column(String(20))
books = relationship(Book)
borrowList = relationship("Borrowlist")
| true |
6408607bd3d97a0c687dc7c85ce6f8d36b5c3c7b | Python | p-mayank/Crawler | /crawler.py | UTF-8 | 1,837 | 2.796875 | 3 | [] | no_license | from bs4 import BeautifulSoup
import os
import requests
from urllib.request import Request, urlopen
def copysol(link, quest):
print("copysol: "+quest)
try:
source_code = Request(link, headers={'User-Agent': 'Mozilla/5.0'})
text = urlopen(source_code).read()
except:
print("505Error! Trying Again!")
copysol(link, quest)
soup = BeautifulSoup(text, 'html.parser')
tag = soup.ol
filename = quest + ".txt"
f = open(filename, 'a')
f.write('\n')
if(tag!=None):
for item in tag.findAll('li'):
f.write(item.text)
f.write('\n')
else:
f.write("Issue with the submission: Skipping!")
print("Issue with the submission: Skipping!")
f.close()
def ext_solution(link, quest):
url=link
try:
source_code = requests.get(url)
except:
print("505Error! Trying Again!")
ext_solution(link, quest)
text = source_code.text
soup = BeautifulSoup(text, 'html.parser')
for link in soup.findAll('a', {'class':None}):
href=link.get('href')
if(href.count('viewsolution')==1):
nlink = "https://www.codechef.com"+href
copysol(nlink, quest)
break
def crawler(username):
try:
os.mkdir(username)
os.chdir(username)
except(Exception):
os.chdir(username)
url="https://www.codechef.com/users/"+username
source_code = requests.get(url)
plain_text = source_code.text
soup = BeautifulSoup(plain_text, 'html.parser')
for link in soup.findAll('a', {'class':None}):
href=link.get('href')
quest = link.text
if(href.count('status')==1):
link = ("https://www.codechef.com"+(href))
ext_solution(link, quest)
username=input("Enter your CodeChef's username: ")
crawler(username)
| true |
1c62150f9ccaeb2bf72f0e2ad09b51463397c2d2 | Python | soelves/portfolio | /IN1000/IN1000/Uke 5/regnefunksjoner.py | UTF-8 | 1,122 | 4.4375 | 4 | [] | no_license | def addisjon(a,b):
return a + b
def subtraksjon(a,b):
return a - b
def divisjon(a,b):
return a / b
def tommerTilCm(antallTommer):
assert antallTommer > 0
return antallTommer * 2.54
def skrivBeregninger():
a = float(input("Skriv inn et tall: "))
b = float(input("Skriv inn et tall til: "))
print("Resultat av summering:", addisjon(a,b))
print("Resultat av subtraksjon:", subtraksjon(a,b))
print("Resultat av divisjon:", divisjon(a,b))
antallTommer = float(input(("Konvertering til tommer fra cm:\nSkriv inn en lengde: ")))
print("Resultat:", tommerTilCm(antallTommer))
def hovedprogram():
#Under er en test av funksjonen addisjon, som skal addere to parametre.
print(addisjon(8,7))
#Test av funksjonen subtraksjon.
assert subtraksjon(8,7) == 1
assert subtraksjon(-8,-7) == -1
assert subtraksjon (-8,7) == -15
#Test av funksjonen divisjon.
assert divisjon(4,2) == 2
assert divisjon(-4,-2) == 2
assert divisjon(-4,2) == -2
#Test av funskjonen tommerTilCm.
print(tommerTilCm(10))
skrivBeregninger()
hovedprogram()
| true |
7813d744c3b2b36eccdeb4f4b66c3a94bac03add | Python | aaroncosmith/Week-1-Digital-Crafts | /pythonDay4/matrix_addition.py | UTF-8 | 411 | 4.125 | 4 | [] | no_license | # give two-dimensional lists of numbers of the size 2 x 2
x1 = [3, -3]
y1 = [6, 4]
x2 = [4, 8]
y2 = [2, 6]
x3 = []
y3 = []
# lets add those together!
#im going to use the backwards way of doing this
# i need more python know-how to get this better
x3.append(x1[0] + x2[0])
x3.append(x1[1] + x2[1])
y3.append(y1[0] + y2[0])
y3.append(y1[1] + y2[1])
print(x3)
print(y3)
# need to work on my loops more!!!
| true |
a77cf1b6f499d3e3ada09d1f42637a48ed1c0f69 | Python | hardshah/YTBulkDownloaderApp | /DownloaderApp/env/Lib/site-packages/pop_config/comps/subcommands.py | UTF-8 | 348 | 2.5625 | 3 | [] | no_license | """
Gather subcommands for a parser
"""
from typing import Any
from typing import Dict
def get(hub, raw_cli: Dict[str, Any], arg: str) -> Dict[str, Any]:
comps = raw_cli[arg]
subcommands = comps.get("subcommands", [])
if not isinstance(subcommands, list):
subcommands = [subcommands]
return {"subcommands": subcommands}
| true |
6577b5250c5d321405d775b80c23b5f5d73533b8 | Python | NatanielOtero18/pythonAdvanceExamples | /dictionary.py | UTF-8 | 1,062 | 3.90625 | 4 | [] | no_license | # Estructura de datos clave valor. Nos permite manejar Json
# nombre_dictionary= {"clave": valor, "clave":valor, ...}
carBmw = {"brand": "bmw", "potencia": 158, "precio": 34000}
carPorsche = {"brand": "porsche", "potencia": 215, "precio": 48000}
# es posible encadenar diccionarios.
carsToBuy = {"coches": [carBmw, carPorsche]}
# Acceso al dato
# print (carsToBuy["coches"])
# print (carBmw["brand"])
print("..................")
# accede a los valores
for value in carBmw.values():
print(value)
print("..................")
# accede a las claves
for key in carBmw.keys():
print(key)
print("..................")
# acceder a la clave valor indistintamente
for itemKey, itemValue in carBmw.items():
print(itemKey, itemValue)
print("..................")
print(carBmw)
# modificación del dato
carBmw["brand"]= "ferrari"
# Añadir nuevo elemento
carBmw["nuevo"]= True
print(carBmw)
# borrar key, se puede utilizar del o pop de la misma forma
del carBmw["brand"]
carBmw.pop("potencia")
print(carBmw)
# limpiar diccionario
carBmw.clear()
print(carBmw) | true |
e91d17cc3a3bd7e80ec822ae96d244c1f129551c | Python | ryan-foo/babybel | /src/eval.py | UTF-8 | 55,172 | 2.953125 | 3 | [
"MIT"
] | permissive | import functools
from enum import Enum
import random
import sys
import string
import types
import re
import traceback
import math
import operator as op
'''
BABYBEL
'''
# Beyond my advisor, Professor Olivier Danvy, who has provided me guidance beyond compare, I give my thanks to Lucas Vieira, who implemented Believe (C), and William Annis, who implemented PyLisp, for fortifying my understand of interpreters for Lisp languages.
'''
Useful Constants
'''
debug = True
SYMBOLCANDIDATES = string.digits + string.ascii_letters + '-+*/:&$?='
CHARCANDIDATES = ['\\' + char for char in SYMBOLCANDIDATES]
WS = string.whitespace
SPECIAL = "()`',@"
SYNTAX = WS + SPECIAL
INT = re.compile(r'^[+-]?\d+$')
FLOAT = re.compile(r'^[+-]?(\d+\.\d*$|\d*\.\d+$)')
_nummap = {
INT: int,
FLOAT: float
}
'''
Axioms
'''
symbol_table = {}
g_env = symbol_table
'''
A list of all characters. Its elements are of the form (c . b), where
c is a character and b is its binary representation in the form of a
string of \1 and \0 characters.
'''
chars_char = SYMBOLCANDIDATES
chars_ord = list(map(ord, str(chars_char)))
chars_bin = [format(char, '08b') for char in chars_ord]
chars = dict(zip(chars_char, chars_bin))
# This will be included in the global env later.
'''
Defining Bel Types as Python Classes
'''
'''
Every type is implemented in Python as a BelType object.
They inherit from BelType, and are either a Char, Pair, Symbol, Stream or Number.
Numbers are non-standard but implementing them makes our job easier.
'''
class BelType():
'''
Abstract class for others to inherit from. The base class of every type in Bel.
'''
def __init__(self):
pass
def isbeltype(self):
return True
def beltype(self):
if isinstance(self, Symbol):
return "symbol"
if isinstance(self, Char):
return "char"
if isinstance(self, String):
return "pair"
if isinstance(self, Pair):
return "pair"
if isinstance(self, List):
return "pair"
else:
raise TypeError("This is not a Bel type.")
def __repr__(self):
'''
This will be extended by the other types. (Pairs, for example.)
'''
if nilp(self):
return "()"
elif atomp(self):
return ("%s") % (self)
elif pairp(self):
return "(%s)" % print_pair_aux(self.a, self.d)
else:
raise TypeError("Not a Bel Type.")
'''
PRINTING AND READING
'''
def print_val(v):
if nilp(v):
return "()"
elif atomp(v):
return print_atom(v)
elif pairp(v):
return print_pair(v)
else:
raise TypeError("Not a Bel Type.")
class Atom(BelType):
'''
Abstract class to inherit from. Atoms are always true, unless if they're nil.
Everything in Bel is an Atom, except Pairs.
'''
def __init__(self):
pass
def istrue(self, name):
if self.n == "nil":
return False
else:
return True
class Symbol(Atom):
'''
There is the fact that it is a symbol,
then it's the name.
Symbol, by default you can initialize with nil.
Alternative: special value, you can look at which
it's bound to undefined.
'''
def __init__(self, name, value = None):
if [char in SYMBOLCANDIDATES for char in name]:
# When we instantiate a symbol, every char within that symbol should be a valid symbol.
self.n = name # variable
self._val = value # value, ideally we'd initialize to the Bel nil symbol, but we don't have recursive typing in Python.
# you look up using the python string value of the symbol, not the symbol itself. key difference.
if name not in symbol_table: #Update symbol table if its not there.
symbol_table[self.n] = self.v
@property
def v(self):
if self.n == "nil":
return self
elif self._val is None:
return Symbol("nil")
return self._v
def __repr__(self):
return self.n
def __len__(self):
return 1
def cons(self, item):
'''
Stick yourself on the back of an item, and that becomes a pair.
'''
return Pair(item, self)
def get(self, property):
try:
return symbol_table[self.n][property]
except:
return "nil"
def istrue(self, name):
if self.n == "nil":
return False
else:
return True
def eq(self, other):
return self.n == other.n
class Char(Atom):
'''
A character is a 8-bit integer. We care about their bit representation.
They look like this, and we eventually want a table that looks like this.
The nice thing is we already have them in that representation, up top.
'''
def __init__(self, name):
# if __name__ in CHARCANDIDATES:
self.n = name
def __repr__(self):
return self.n
def cons(self, item):
'''
Stick yourself on the back of an item, and that becomes a pair.
'''
return Pair(item, self)
bel_nil = Symbol("nil")
bel_t = Symbol("t")
bel_o = Symbol("o")
bel_lit = Symbol("lit")
bel_prim = Symbol("prim")
bel_clo = Symbol("clo")
symbol_table["nil"] = Symbol("nil")
symbol_table["t"] = Symbol("t")
symbol_table["o"] = Symbol("o")
symbol_table["chars"] = chars
# Numbers are not a fundamental Bel type, but implementing them as a Bel type makes our job a lot easier. // Believe
class Number(Atom):
def __init__(self, value):
if isinstance(value, int) or isinstance(value, float):
self.v = value
else:
raise TypeError("You need to use a float or int to construct a number.")
def __len__(self):
return 1
def length(self):
return 1
def cons(self, item):
return Pair(item, self)
def __repr__(self):
return str(self.v)
def __gt__(self, other):
if type(self) == (type(other)):
if self.v > other.v:
return 1
else:
return 0
else:
return -1
def __cmp__(self, other):
if type(self) == type(other):
if self.v == other.v:
return 0
elif self.v > other.v:
return 1
else:
return -1
def __add__(self, other):
if type(self) == type(other):
return Number(self.v + other.v)
else:
return self.v + other
__radd__ = __add__
def __sub__(self, other):
if type(self) == type(other):
return Number(self.v - other.v)
else:
return self.v - other
__rsub__ = __sub__
def __mul__(self, other):
if type(self) == type(other):
return Number(self.v * other.v)
else:
return self.v * other
__rmul__ = __mul__
def __truediv__(self, other):
if type(self) == type(other):
return Number(self.v / other.v)
else:
return self.v / other.v
__rtruediv__ = __truediv__
# We don't ask too many questions about what you put in a pair, so long as its a Bel Type.
class Pair(BelType):
def __init__(self, a = bel_nil, d = bel_nil):
'''
TODO: We temporarily get rid of the type checking for Bel types because we create environment with Python functions in the variables.
'''
# if (a.isbeltype()) and (d.isbeltype()):
self.a = a
self.d = d
# else:
# raise TypeError("Type not assigned, please instantiate pair with a BelType")
def __repr__(self):
'''
Print the car of the pair.
Then look at the cdr of the pair.
If the cdr of the pair is nil, print ')' and stop.
If the cdr is an atom, print a dot, then print that atom.
Otherwise, you write a space and parantheses, print the car.
And then recursively call repr again.
'''
return "(%s)" % print_pair_aux(self.a, self.d)
def __rawrepr__(self):
'''
Dot notation
'''
if self.d == bel_nil:
return "(%s)" % (self.a, self.d)
else:
if numberp(self.a) and numberp(self.d):
return "(%s . %s)" % (self.a.v, self.d.v)
elif numberp(self.a):
return "(%s . %s)" % (self.a.v, self.d)
elif numberp(self.d):
return "(%s . %s)" % (self.a, self.d)
else:
return "(%s . %s)" % (self.a, self.d)
def replacea(self, val):
if val.isbeltype():
self.a = val
return val
else:
raise TypeError("%s is not a Beltype" % (val))
def replaced(self, val):
if val.isbeltype():
self.d = val
return val
else:
raise TypeError("%s is not a Beltype" % (val))
def car(self):
return self.a
def cdr(self):
return self.d
def cons(self, item):
return Pair(item, self)
def length(self):
'''
Return length.
Traverse the list.
If it's not a proper list, break.
'''
if not proper_listp(self):
raise TypeError("Not a proper list")
leng = 0
itr = self
while not nilp(itr):
leng += 1
itr = itr.d
return leng
def print_pair_aux(a, d):
if nilp(d):
return a
elif atomp(d):
return ("%s . %s") % (a, d)
else:
return ("%s %s") % (a, print_pair_aux(d.a, d.d))
class String(BelType):
'''
A string is a list, where all the args are chars.
But since we don't have a notion of chars at the moment,
we will just implement string as its own type.
'''
def __init__(self, string):
self.str = make_string(string)
'''
Ryan will not fuss with characters that much, Bel String is fine and its a great time saver.
We can have a print_string to get it in our desired representation to the external user if we want to.
'''
'''
A proper list of characters is called a string, and has a special notation,
zero or more characters within double quotes.
"hello world" is a string.
"hello" is a string.
"" is a string.
(\a \b \c) is a string, which can be represented as "abc".
(Question: And the challenge of \backslash comes in!)
(Thought (QUESTION): Since \ or backslash is already a reserved keyword in Python, we can do as well to use # instead?)
Answer: it's up to the language implementer what the # character is.
Strings evaluate to themselves.
'''
# Streams are non-critical for our exploration purposes.
class Stream(BelType):
def __init__(self, status):
status = status
raise NameError("Unimplemented")
# Pointer to a raw stream
# The cache of the stream
# The amount of the stream that is full.
class StreamStatus(Enum):
CLOSED = 1
READ = 2
WRITE = 3
'''
Predicates
Thanks to luksamuk's Believe
'''
def symbolp(x: BelType):
return isinstance(x, Symbol)
def nilp(x: BelType):
return symbolp(x) and x.n == "nil"
def pairp(x: BelType):
return isinstance(x, Pair)
def atomp(x: BelType):
return not (pairp(x))
def charp(x: BelType):
return isinstance(x, Char)
def streamp(x: BelType):
return isinstance(x, Stream)
def numberp(x: BelType):
return isinstance(x, Number)
def idp(x: BelType, y: BelType):
'''
If they are different types,
then return false.
'''
if (x.isbeltype() and y.isbeltype()) and (type(x) == type(y)):
if symbolp(x):
return x.n == y.n
elif charp(x):
return x.n[chars] == y.n[chars]
elif numberp(x):
return x.v == y.v
else:
return (x is y)
else:
if (x.isbeltype() and y.isbeltype()):
return False
else:
raise TypeError("Identity can only be called on Bel types")
def errorp(x: BelType):
# Tests if an object is a list in the format (lit err . rest)
if not pairp(x):
return 0
if not idp(x.a, Symbol("lit")):
return 0
cdr = x.d
if not idp(cdr.a, Symbol("err")):
return 0
return 1
def proper_listp(x: BelType):
'''
(1 2 3) is a proper list
(1 . (2 . (3 . nil))) is a proper list
(1 2 3 . 4) is not a proper list
nil is a proper list
() is a proper list
We traverse the list, pairwise. If the cdr is nil, it is proper.
If it is a pair, it continues. If the cdr is anything else, it is not a list!
'''
if not pairp(x) and not nilp(x):
return bel_nil
itr = x
while not nilp(itr):
if not pairp(itr):
return bel_nil
itr = itr.d
'''
Maybe termination error?
Ages ago, Prof Danvy talked to JMC about the first implementation of Lisp. 0, False, or something else.
Anything that can be used will be abused.
You face these kind of choices.
'''
return 1
def number_listp(x: BelType):
'''
Both a proper list,
and a list of numbers.
'''
if not pairp(x) and not nilp(x):
return False
itr = x
while not nilp(itr):
car = itr.a
if not pairp(itr):
return False
if not numberp(car):
return False
itr = itr.d
return True
def stringp(x: BelType):
'''
An object is a string if it is a proper list of characters.
'''
if not pairp(x) and not nilp(x):
return False
itr = x
while not nilp(itr):
car = itr.a
if not pairp(itr):
return False
if not charp(car):
return False
itr = itr.d
return True
def literalp(x: BelType):
'''
Takes a proper list. Is the list a literal -- is the first element the symbol "lit".
'''
if not proper_listp(x):
return False
return idp(x.a, Symbol("lit"))
def primitivep(x: BelType):
'''
Takes a literal.
Is it a primitive?
- Primitive: second element of the list is the symbol "prim".
'''
return (literalp(x) and idp(x.d.a, Symbol("prim")))
def closurep(x: BelType):
'''
Takes a literal.
Is it a closure?
- Closure: second element of the list is the symbol "clo".
'''
return literalp(x) and idp(x.d.a, Symbol("clo"))
def quotep(x: BelType):
'''
Tests if a list is a quoted form.
'''
if not proper_listp(x):
return 0
return idp(x.a, Symbol("quote"))
# Takes multiple args and constructs a Bel List. But these multiple args are NOT a list.
def make_list(*args):
'''
* is the unpacking operator, and will construct
a tuple out of those args, to be passed one by one to the make_list function.
'''
if (len(args) <= 0):
return bel_nil
base_pair = Pair(args[-1], bel_nil)
result_list = base_pair
for i in range(len(args)-2,-1,-1):
result_list = Pair(args[i], result_list)
return result_list
# make_string takes a python string and transforms it into a Bel one. Utility for making string.
def make_string(string):
n = len(string)
if (n == 0):
return bel_nil;
# Strings are represented internally as a Bel List of characters. We will make a list of chars.
# We use the list comprehension to transform string into a Python list of Bel chars.
string = [Char(char) for char in string]
# Then, we unpack the args and make a Bel list of those Bel chars.
bel_string = make_list(*string)
return bel_string
def bel_to_python_string(string):
'''
Traverses a Bel string,
takes out the character
values individually and
adds them to a python string.
'''
res = ''
if not stringp(string):
raise TypeError("Not a Bel String")
'''
Since it is a string,
all of its elements are chars,
and it is a proper list.
'''
else:
itr = string
while not nilp(itr):
res = res + itr.a.n
itr = itr.d
return res
# TODO: Streams
# Error has no formal specification in Bel, other than "there might be an err function which throws an error in the system".
# TODO: Specify and implement errors.
class Env():
'''
An environment is a list of pairs, and each pair (var . val) is the binding of a symbol var to the value val.
'''
def __init__(self, env = bel_nil):
'''
The environment begins as an empty list.
'''
self.e = env
def __repr__(self):
'''
The environment will always be a list of pairs.
So printing an environment is either the same
as printing out a list, or printing out an empty
list, aka bel_nil.
TODO
'''
if self.e == bel_nil:
'''
The empty list.
'''
return "%s" % (bel_nil)
else:
'''
Otherwise,
we can treat it as any other list.
'''
return "(%s)" % print_pair_aux(self.e.a, self.e.d)
def push(self, var, val):
'''
Add a var.val pair to the environment.
'''
if (var.isbeltype()) and (val.isbeltype()):
new_pair = Pair(var, val)
self.e = Pair(new_pair, self.e)
else:
raise TypeError("Type not assigned, please instantiate var val pair with a BelType")
l_env = Env()
d_env = Env()
def env_lookup(env, symbol):
'''
Traverses an environment given a symbol. It returns the associated value or returns nil.
'''
if nilp(env):
return (bel_nil, False)
if nilp(env.e):
return (bel_nil, False)
'''
Calling nilp on env isn't enough,
we have to check inside it, and
ask it to tell us what env.e is.
'''
elif atomp(env.e):
return (bel_nil, False)
elif not symbolp(symbol):
raise TypeError("We cannot look up %s, as it is not a symbol" % (symbol))
else:
itr = env.e
while (not nilp(itr)):
# While we have not hit the end of the env.
cand = itr.a
# The candidate pair is the first var /val pair of the iterator. If var is the symbol we are looking for, then return it's associated val. Otherwise, continue down the list.
if (symbolp(cand.a) and idp(symbol, cand.a)):
return (cand.d, True)
else:
itr = itr.d
if isinstance(itr, Env):
itr = itr.e
return (bel_nil, False)
def lookup(l_env, symbol):
'''
A lookup function that traverses the dynamic scope, then the lexical scope, then the global scope.
Any variable is either bound in dynamic, lexical or global environments.
What
It will serve useful to have a very fast lookup for symbols used often. The most often used symbols will be in the global environment.
"First do it right, then do it fast."
"Early optimization is the source of a lot of evil." - Donald Knuth
How
- lookup in the lexical scope and dynamic scope, we can use an association list because they tend to be small and you don't have to hold them
- the assoc list is a time-honored tradition to represent environments (Danvy)
- not the most efficient space-wise. In practice, variables should be compiled into their lexical offset (or how far they are in the lexical environment.)
- Global environment
- Direct access in a table (the symbol table is implemented as a hash table), so we can access those variables in constant time.
'''
# Dynamic scope
value, found = env_lookup(d_env, symbol)
if found:
return value
# Lexical scope
value, found = env_lookup(l_env, symbol)
if found:
# print("%s is in the lexical environment!" % symbol)
# print("Here is it's value: %s" % value)
return value
# Global scope
value = symbol_table[symbol.n]
return value
'''
Clears out the old val associated with symbol in a given environment and makes way for the new_val.
'''
def replace_val_in_env(env, symbol, new_val):
if nilp(env):
return bel_nil
itr = env.e
while not nilp(itr):
cand = itr.a
if idp(symbol, (cand.a)):
cand.d = new_val
return symbol
itr = itr.d
return bel_nil
'''
Assigns a symbol to a value. It tries to do so in the dynamic, then lexical, then global envs.
'''
def assign_val(l_env, symbol, new_val):
# Dynamic assignment
ret = replace_val_in_env(d_env, symbol.n, new_val)
if not nilp(ret):
return symbol
# Lexical assignment
ret = replace_val_in_env(l_env, symbol.n, new_val)
if not nilp(ret):
return symbol
# When no assignment is made, we push a global value
symbol_table[symbol.n] = new_val
'''
Literals
are Bel objects that evaluate to themselves.
They are seen in the form (lit . rest), where lit is a symbol, and rest is a proper list of things.
Primitives and functions are literals.
Generating literals: it creates a pair where the car is the symbol lit, and the cdr is anything that should be treated as a literal (i.e, it should evaluate to itself.)
'''
def make_literal(rest):
if not proper_listp(rest):
raise TypeError("%s is not a proper list, it cannot be turned into a literal." % rest)
return Pair(Symbol("lit"), rest)
'''
Primitives are literals.
'''
# LIST OF PRIMITIVES (LOP)
def make_primitive(symbol):
return make_literal(Pair(bel_prim, Pair(symbol, bel_nil)))
def register_primitive_in_env(x):
'''
x is a Python string representation of the primitive.
This function will take our global env (symbol table) and register the primitives on it.
'''
symbol = Symbol(x)
symbol_table[x] = make_primitive(symbol)
primitives = ["id", "join", "car", "cdr", "type", "xar", "xdr", "sym", "nom", "wrb", "rdb", "ops", "cls", "stat", "coin", "sys", "+", "-", "*", "/", "<", "<=", ">", ">=", "=", "err", "pair?", "cons", "g_env", "apply"]
def generate_primitives(primitives):
for primitive in primitives:
register_primitive_in_env(primitive)
generate_primitives(primitives)
def make_closure(l_env, rest):
'''
Creating a closure.
A list must have two elements:
one, a lambda list, and the second should be the body of the function.
'''
return make_literal(Pair(bel_clo, Pair(l_env, rest)))
'''
EVALUATION
Metacircular evaluator
- we have eval and apply,
and they call themselves mutually,
they will have auxillary functions
and special forms to produce
a working interpreter for a Lisp.
'''
'''
eval, or the evaluation function, takes an expression, identifies what it is, and executes it accordingly.
When a simple application is performed, we take a list and consider the first element the symbol that the function is bound to.
We evaluate every element of the list, including the function, before applying the closure to the rest of the evaluated elements, which then will be passed as arguments to the function.
The closure captures the lexical env of when it is evaluated.
'''
def eval(exp, l_env):
# if not (isinstance(exp, BelType)):
# raise TypeError("%s is Not a Bel Type" % exp)
# print("Expression being evaluated: %s" % exp)
# print("Lexical environment: %s" % l_env)
if numberp(exp):
return exp
elif symbolp(exp):
# If they are an axiom symbol, evaluate to themselves.
if (idp(exp, bel_nil) or idp(exp, bel_t) or idp(exp, bel_o)):
return exp
else:
# Otherwise, look them up in the environment, from dynamic to lexical to global.
return lookup(l_env, exp)
elif quotep(exp):
return special_quote(exp, l_env)
elif literalp(exp):
# Literals evaluate to themselves.
return exp
elif stringp(exp):
# Strings evaluate to themselves.
return exp
elif pairp(exp):
if idp(exp.a, Symbol("fn")):
# Construct lit, clo, lexical environment, and the body of the function, and return the (lit clo l_env (formal parameters) (body) etc.)
return make_closure(l_env, exp.d)
elif idp(exp.a, Symbol("if")):
# print("Special If")
# We will call special_if on the cdr of the expr, since we already know that it's a pair, and that the first half of the pair is if..
# We will eval_tp on the same expression,
# trusting eval_tp to handle it with an iterative
# approach.
return eval_tp(exp, l_env)
elif idp(exp.a, Symbol("set")):
return special_set(exp.d, l_env)
elif idp(exp.a, Symbol("define")):
return special_set(exp.d, l_env)
elif idp(exp.a, Symbol("def")):
print("yes, special def")
return special_def(exp.d, l_env)
# Otherwise it is an application of a function.
else:
'''
Anything that calls eval recursively
will not be done in a straight recursive
manner.
Instead, we call eval_tp, which evaluates
the body of a closure. This instantiates
a while loop that...
'''
return function_apply(eval(exp.a, l_env),
eval_list(exp.d, l_env))
raise Error("%d is not a proper list, you cannot apply a function." % exp)
'''
eval_tp is a new function,
that is called to evaluate the body of a closure.
A Bel expression is ordinarily evaluated through recursive calls to eval,
while tail-recursive sub-expressions are evaluated through the iteration
of the while loop in eval_tp.
'''
def eval_tp(exp, l_env):
while(True):
# print("Eval TP")
# print("Expression being evaluated: %s" % exp)
# print("Lexical environment: %s" % l_env)
if numberp(exp):
return exp
elif symbolp(exp):
# If they are an axiom symbol, evaluate to themselves.
if (idp(exp, bel_nil) or idp(exp, bel_t) or idp(exp, bel_o)):
return exp
else:
# Otherwise, look them up in the environment, from dynamic to lexical to global.
return lookup(l_env, exp)
elif quotep(exp):
return special_quote(exp, l_env)
elif literalp(exp):
# Literals evaluate to themselves.
return exp
elif stringp(exp):
# Strings evaluate to themselves.
return exp
elif pairp(exp):
if idp(exp.a, Symbol("fn")):
# Construct lit, clo, lexical environment, and the body of the function, and return the (lit clo l_env (formal parameters) (body) etc.)
return make_closure(l_env, exp.d)
elif idp(exp.a, Symbol("if")):
'''
Test
'''
body = exp.d
if nilp(body.d):
# Then it's car is the final else branch of the if expression.
exp = body.a
continue
if not pairp(body.d):
raise SyntaxError("The cdr of this pair has to be a pair.")
else:
# If the cdr of the body is a pair, then
# The car of that pair is the test, and the cadr is the consequent branch.
test = body.a
conseq = body.d.a
# print("IF: test: %s" % test)
# print("IF: conseq: %s" % conseq)
# We evaluate the test, with a non-tail call to eval.
if not nilp(eval(test, l_env)):
# print("Eval the consequent. We have passed the test.")
# If the test evaluates to a truthy value (not nil), then return evaluation of the consequent.
exp = conseq
continue
# Otherwise, we tail call eval on the cadr on the pair, or the then branch.
else:
# print("Tail call on the else* branch.")
# Else* branch.
# print("Else branch: %s" % body.d.d)
if nilp(body.d.d.d):
exp = body.d.d.a # Else branch: exp
continue
else:
# The if-then-else is not finished, so construct an if expression.
exp = Pair(Symbol("if"), body.d.d)
continue
'''
It iteratively treats the if.
'''
elif idp(exp.a, Symbol("set")):
return special_set(exp.d, l_env)
elif idp(exp.a, Symbol("define")):
return special_set(exp.d, l_env)
elif idp(exp.a, Symbol("def")):
return special_def(exp.d, l_env)
'''
def: function definition
(def n p e)
is an abbreviation for
(set n (lit clo nil p e))
'''
# Otherwise it is an application of a function.
else:
'''
Anything that calls eval recursively
will not be done in a straight recursive
manner.
Instead, we call eval_tp, which evaluates
the body of a closure. This instantiates
a while loop.
'''
fun = eval(exp.a, l_env)
args = eval_list(exp.d, l_env)
a_fun = fun
a_args = args
while (primitivep(a_fun) and (idp(a_fun.d.d.a, Symbol("apply")))):
arity_check(a_args, 2)
a_fun = a_args.a
a_args = a_args.d.a
if closurep(a_fun):
l_env = a_fun.d.d.a # current lexical environment
params = a_fun.d.d.d.a # actual parameters
body = a_fun.d.d.d.d.a # function body (value that the fn returns)
new_env = Env(bind(params, a_args, l_env))
exp = body
l_env = new_env
elif primitivep(a_fun): # guaranteed not to be apply
return apply_primitive(a_fun.d.d.a, a_args)
else:
raise TypeError("Not a function")
else:
raise TypeError("%d is an unidentified function object (UFO)." % exp)
'''
APPLY
"is the application function. It takes a function and applies that to the list of evaluated arguments.
A function can be a primitive but also a literal closure.
We bind arguments to the formal parameters, create an extended environment, and evaluate under the new environment."
'''
def bind(params, args, l_env):
# print("Lexical Env: %s" % l_env)
'''
Args is a list of values.
If param is a symbol,
then that is a variadic expression,
then you extend, or cons params with args
on top of l_env.
Creates a new environment.
'''
if symbolp(params):
return Pair(Pair(params, args), l_env)
elif pairp(params):
if (symbolp(params.a)):
if pairp(args):
'''
Then we are in the case where params and args are a pair.
So we can then take the car of both, and cons them.
'''
return Pair(Pair(params.a, args.a), bind(params.d, args.d, l_env))
else:
raise IndexError("Arity mismatch -- we have too little args")
else:
raise SyntaxError("Illegal formal -- formal needs to be a symbol")
'''
DECISION POINT: (potential point for heated discussion)
Arity mismatch: not enough args.
Or we could pad them / bind them with nil.
Or if there are too many args, we can ignore them.
"Worse is Better vs The Right Thing"
'''
elif nilp(params):
if nilp(args):
return l_env
else:
raise IndexError("Arity mismatch -- we have too many args")
else:
raise SyntaxError("Illegal formal -- formal needs to be a pair or a symbol")
def function_apply(fun, args):
if primitivep(fun):
'''
Function could be predefined (Primitives),
in which case:
they are applied to lists of Bel values
- the arity is checked
- each argument is fetched from args [a Bel List]
types are tested
the actual values are extracted from each value
the operation at hand is carried out on the values
we formulate a new result
We create a new function class that performs
the arity check.
'''
return apply_primitive(
fun.d.d.a, args)
elif closurep(fun):
l_env = fun.d.d.a # current lexical environment
params = fun.d.d.d.a # actual parameters
body = fun.d.d.d.d.a # function body (value that the fn returns)
new_env = Env(bind(params, args, l_env))
if errorp(new_env):
return new_env
return eval_tp(body, new_env) # evaluate the body given the new extended environment
else:
raise TypeError("Not a function")
@functools.lru_cache(maxsize=None)
def eval_list(bel_list, l_env):
if nilp(bel_list):
return bel_nil
eval_head = eval(bel_list.a, l_env)
eval_rest = eval_list(bel_list.d, l_env)
return Pair(eval_head, eval_rest)
'''
when do we stop evaluating? - we stop when we hit nil.
if we hit nil, we add nil to our list, break.
otherwise, we continue evaluating the head of to_eval,
and adding that to our existing construction (eval_result).
(we construct the evaluated list on the fly).
'''
@functools.lru_cache(maxsize=None)
def eval_list_tp(bel_list, l_env):
eval_result = eval(bel_list.a, l_env)
to_eval = bel_list.d
while(True):
if nilp(to_eval):
eval_result = Pair(eval_result, bel_nil)
break
else:
print("to eval: %s" % to_eval)
eval_result = Pair(eval_result, eval(to_eval.a, l_env))
to_eval = to_eval.d # proceed down the list
print("to eval after .d: %s" % to_eval)
continue
return eval_result
'''
PRIMITIVE FUNCTIONS
When you look up these symbols (id, join, '+' -- you are returned with "lit prim +" in the symbol table). The environment has been updated by generate_primitives, which registers these primitives. This leads us to the primitive branch in eval, which then tells us to call these primitives accordingly.
'''
def lookup_prim(sym, lit):
'''
Takes the symbol,
and helps point us
to the right function
to return.
'''
return idp(sym, Symbol(lit))
def arity_check(args, num):
length = args.length()
if length > num:
raise IndexError("Arity mismatch. Too many args.")
if length < num:
raise IndexError("Arity mismatch. Too little args.")
'''
If they are equal,
then it passes
and the rest of the primitive function
continues to execute.
If they are not,
it raises an arity mismatch error and execution stops.
Possible extension: error-handling could stop at the REPL level,
not at the meta-level. Right now we lean on Python's error handling,
but in an ideal world we would have implemented native error handling
for Bel.
Hence, we declare the Bel interpreter as not to be used in anger, or a prototype -- (an expression that tells us that it is not to be used for production level code).
'''
def apply_primitive(sym, args):
'''
You get the symbol "+", for example, from (lit prim +).
This is used to look up the correct primitive to apply.
Recall that args is a Bel list. (and it could be empty,
or a list of 1.)
You do an arity check, you fetch the (evaluated) arguments
to the function from the args list,
apply the necessary depending on what the function specifies,
and return the evaluated result in the form of a Bel value.
Voila! Primitive applied.
'''
'''
FUNCTIONS
'''
if lookup_prim(sym, "id"):
return prim_id(args)
elif lookup_prim(sym, "join"):
return prim_join(args)
elif lookup_prim(sym, "car"):
return prim_car(args)
elif lookup_prim(sym, "cdr"):
return prim_cdr(args)
elif lookup_prim(sym, "type"):
return prim_type(args)
elif lookup_prim(sym, "xar"):
return prim_xar(args)
elif lookup_prim(sym, "xdr"):
return prim_xdr(args)
# elif lookup_prim(sym, "sym"):
# return prim_sym(args)
# elif lookup_prim(sym, "nom"):
# return prim_nom(args)
# elif lookup_prim(sym, "wrb"):
# return prim_wrb(args)
# elif lookup_prim(sym, "rdb"):
# return prim_rdb(args)
# elif lookup_prim(sym, "ops"):
# return prim_ops(args)
# elif lookup_prim(sym, "cls"):
# return prim_cls(args)
# elif lookup_prim(sym, "stat"):
# return prim_stat(args)
elif lookup_prim(sym, "coin"):
return prim_coin(args)
elif lookup_prim(sym, "sys"):
return prim_sys(args)
elif lookup_prim(sym, "apply"):
return prim_apply(args)
# Babybel Primitives
# elif lookup_prim(sym, "list"):
# pass
elif lookup_prim(sym, "pair?"):
if pairp(args.a):
return bel_t
else:
return bel_nil
elif lookup_prim(sym, "cons"):
return prim_cons(args)
# OPERATORS
elif lookup_prim(sym, "+"):
return prim_add(args)
elif lookup_prim(sym, "-"):
return prim_sub(args)
elif lookup_prim(sym, "*"):
return prim_mul(args)
elif lookup_prim(sym, "/"):
return prim_div(args)
elif lookup_prim(sym, "<"):
return prim_lt(args)
elif lookup_prim(sym, "<="):
return prim_leq(args)
elif lookup_prim(sym, ">"):
return prim_gt(args)
elif lookup_prim(sym, ">="):
return prim_geq(args)
elif lookup_prim(sym, "="):
return prim_eq(args)
elif lookup_prim(sym, "let"):
return prim_let(args)
# OTHER PRIMITIVES
elif lookup_prim(sym, "err"):
return prim_err(args)
elif lookup_prim(sym, "g_env"):
return prim_genv(args)
# Otherwise it's not a primitive.
else:
print(args)
raise ValueError("Unknown Bel primitive.")
def prim_id(args):
'''
Identity compares two elements
to see if they are identical.
'''
arity_check(args, 2)
if idp(args.a, args.d.a):
return bel_t
else:
return bel_nil
# Bel truth is not the same as Python truth...
def prim_join(args):
'''
Join creates a pair with x as car and y as cdr.
'''
arity_check(args, 2)
return Pair(args.a, args.d.a)
def prim_car(args):
'''
Returns the head of the pair.
'''
arity_check(args, 1)
return args.a.a
def prim_cdr(args):
'''
Returns the rest of the pair.
'''
arity_check(args, 1)
return args.a.d
def prim_cons(args):
'''
Cons does the same thing as join: it creates a pair with x as car and y as cdr, for now.
We can consider extending it to be variadic.
'''
arity_check(args, 2)
return Pair(args.a, args.d.a)
def prim_type(args):
# print("Checking type of: %s" % args)
arity_check(args, 1)
if isinstance(args, Pair):
args = args.a
if isinstance(args, Symbol):
return Symbol("symbol")
elif isinstance(args, Char):
return Symbol("char")
elif isinstance(args, Pair):
return Symbol("pair")
elif isinstance(args, Stream):
return Symbol("stream")
elif isinstance(args, Number):
return Symbol("number")
else:
raise TypeError("Not a Bel type")
def prim_xar(args):
arity_check(args, 2)
pair = args.a
val = args.d.a # the head, of the rest...
if not pairp(pair):
raise TypeError("%s is not a pair" % pair)
else:
pair.a = val
return val
def prim_xdr(args):
arity_check(args, 2)
pair = args.a
val = args.d.a # the head, of the rest...
if not pairp(pair):
raise TypeError("%s is not a pair" % pair)
else:
pair.d = val
return val
def prim_sym(args):
'''
Takes a bel string x and gives us a symbol.
'''
arity_check(args, 1)
string = args.a
if not stringp(string):
raise TypeError("%s is not a string." % string)
python_string = bel_to_python_string(string)
return Symbol(python_string)
'''
Unimplemented: Bel to Python String.
'''
def prim_nom(args):
'''
Takes a symbol x and returns a list of characters that correspond to the name of x.
'''
raise ValueError("Unimplemented")
def prim_apply(args):
'''
It takes two args.
It has to be an applicable,
either a primitive function,
or a closure.
We do two things:
We implement apply as part of the menagerie of predefined values.
We define apply in the while loop
of proper tail recursion.
Apply will preserve proper tail
recursion.
The first argument we can apply
is something we can apply. (a primitive
or a closure)
The second argument is a Bel list. It may not be proper, it doesn't matter.
The result of eval_list is a Bel list.
Apply the first argument to the second.
ev_list
Single fixpoint operator you can apply
to any function of any arity...
We implement a bounded form of apply
that takes two arguments.
First, we check it takes two args.
We trust that when we apply the function,
whatever we are applying will check its own arguments.
'''
arity_check(args, 2)
a_fun = args.a
a_args = args.d.a
if primitivep(a_fun):
return apply_primitive(
a_fun.d.d.a, a_args)
elif closurep(a_fun):
# print("We are in a closure.")
l_env = a_fun.d.d.a # current lexical environment
params = a_fun.d.d.d.a # actual parameters
body = a_fun.d.d.d.d.a # function body (value that the fn returns)
# print("Current lexical environment: %s" % l_env)
new_env = Env(bind(params, a_args, l_env))
# print("We are evaluating a closure with the body: %s" % body)
# print("We are evaluating a closure, with the lexical environment of %s" % new_env)
return eval_tp(body, new_env) # evaluate the body given the new extended environment
else:
raise TypeError("application of %s in apply: Not a function" % a_fun)
'''
Functions related to streams.
'''
def prim_wrb(args):
'''
Tries to write a bit x to the stream y.
'''
arity_check(args, 2)
raise ValueError("Unimplemented")
def prim_rdb(args):
'''
Tries to read a bit from the stream x.
'''
arity_check(args, 1)
raise ValueError("Unimplemented")
def prim_ops(args):
'''
Opens a stream that writes to or reads from the place whose name is the string x,
depending on whether y is out or in respectively.
'''
arity_check(args, 2)
raise ValueError("Unimplemented")
def prim_cls(args):
'''
Closes stream args.a.
'''
arity_check(args, 1)
raise ValueError("Unimplemented")
def prim_stat(args):
'''
Returns either closed, in or out depending on stream state.
'''
raise ValueError("Unimplemented")
'''
Others.
'''
def prim_coin(args):
'''
Returns either t and nil randomly.
'''
arity_check(args, 0)
rand = random.randint(0, 1)
if rand == 0:
return bel_nil
else:
return bel_t
def prim_sys(args):
'''
Sends x (presumably a string) as a command to the os.
'''
raise ValueError("Unimplemented")
'''
OPERATORS
A note. Nice thing here is that since we have already done the work
of defining operators prematurely upon Numbers, shadowing the Python functions
__add__, __sub__ and the like, then we can just call them using the necessary operators, having specified exactly how they should behave.
'''
def prim_add(args):
'''
Variadic addition.
Adds all values in the list. (+ 1 2 3 4) would
evaluate to 10.
If it is a singleton, return itself.
'''
if not number_listp(args):
raise TypeError("Cannot add non-numbers.")
length = args.length()
if length == 0:
return Number(0)
elif length == 1:
return args.a
res = args.a
itr = args.d
while not nilp(itr):
res = res + itr.a
itr = itr.d
return res
def prim_sub(args):
'''
Variadic subtraction.
Takes a list of numbers.
Subtracts all values in the list. (- 1 2 3 4) would
evaluate to -10.
If it is a singleton, then invert the value of the single number.
'''
# Broken, fix on variadic numbers.
if not number_listp(args):
raise TypeError("Cannot subtract non-numbers.")
length = args.length()
if length == 0:
return Number(0)
elif length == 1:
return prim_mul(Number(-1), args.a)
res = args.a
itr = args.d
while not nilp(itr):
res = res - itr.a
itr = itr.d
return res
def prim_mul(args):
'''
Variadic multiplication.
Takes a list of numbers.
Multiplies all values in the list. (* 1 2 3 4) would
evaluate to 24.
If it is a singleton, return itself.
'''
if not number_listp(args):
raise TypeError("Cannot multiply non-numbers.")
length = args.length()
if length == 0:
return Number(1)
elif length == 1:
return args.a
res = args.a
itr = args.d
while not nilp(itr):
res = res * itr.a
itr = itr.d
return res
def prim_div(args):
'''
Variadic division.
Takes a list of numbers.
Multiplies all values in the list. (/ 16 2 2 2) would
evaluate to 2.
If it is a singleton, return itself.
If an attempt is made to divide by 0, then Python
will complain on our behalf, but an extension
might be to check for that.
'''
if not number_listp(args):
raise TypeError("Cannot divide non-numbers.")
length = args.length()
if length == 0:
return Number(1)
elif length == 1:
return args.a
res = args.a
itr = args.d
while not nilp(itr):
res = res / itr.a
itr = itr.d
return res
def prim_lt(args):
'''
Takes two numbers.
If the first is less
than the second, then return t.
Else, return nil.
'''
arity_check(args, 2)
if not number_listp(args):
raise TypeError("Cannot compare non-numbers.")
if args.a < args.d.a:
return bel_t
else:
return bel_nil
def prim_leq(args):
'''
Takes two numbers.
If the first is less
than or equal to the second, then return t.
Else, return nil.
'''
arity_check(args, 2)
if not number_listp(args):
raise TypeError("Cannot compare non-numbers.")
if args.a <= args.d.a:
return bel_t
else:
return bel_nil
def prim_gt(args):
'''
Takes two numbers.
If the first is greater
than the second, then return t.
Else, return nil.
'''
arity_check(args, 2)
if not number_listp(args):
raise TypeError("Cannot compare non-numbers.")
if args.a > args.d.a:
return bel_t
else:
return bel_nil
def prim_geq(args):
'''
Takes two numbers.
If the first is greater
than or equal to the second, then return t.
Else, return nil.
'''
arity_check(args, 2)
if not number_listp(args):
raise TypeError("Cannot compare non-numbers.")
if args.a >= args.d.a:
return bel_t
else:
return bel_nil
def prim_eq(args):
'''
Takes two values,
either they respond true to id,
then they respond to the same place in memory.
If they respond to true, the result is true.
If the first is a pair, and the second is a pair,
return false.
If neither is a pair, just return nil.
If the first and second are both pairs.
Verify if their cdr is the empty list. (happens in about 30% of the cases)
If they are not compatible with id,
they may be pairs.
Takes two numbers.
If their values are equal, return t.
Else, return nil.
First extension would be to chars and symbols.
A second extension to consider might be to include pairs (i.e,
it will crawl the pair / list to ensure every element is equal).
This is less strict than identity.
'''
arity_check(args, 2)
# Different types
if prim_type(args.a).n != prim_type(args.d.a).n:
print("Different Types")
return bel_nil
if prim_type(args.a).n == "number":
if args.a.v == args.d.a.v:
return bel_t
else:
return bel_nil
if prim_type(args.a).n == "symbol":
if args.a.n == args.d.a.n:
return bel_t
else:
return bel_nil
if prim_type(args.a).n == "char":
if args.a.n == args.d.a.n:
return bel_t
else:
return bel_nil
if prim_type(args.a).n == "stream":
raise ValueError("Streams are unimplemented")
'''
A pair is equal to another pair
if and only if every element in pair A is equal
pairwise to every element in pair B.
If it is a pair,
then compare the cars and cdrs.
If it is a proper list,
then traverse the list and compare elements pairwise.
'''
if prim_type(args.a) == Symbol("pair"):
print("We are comparing a pair")
if not nilp(args.a.d) and not nilp(args.d.a.d):
pass
raise ValueError("Equality of pairs is unimplemented")
else:
raise TypeError("Cannot call equality on non-Bel types")
'''
OTHER PRIMITIVES
'''
# With thanks to Luke Vieira for the error format.
def prim_err(args):
string = args.a
if not stringp(string):
return make_error(String("First argument of error must be a string"), bel_nil)
return make_error(string, args.d)
def prim_genv(args):
print(g_env)
return bel_nil
def prim_define(args):
'''
Takes a name (Symbol) and an expression.
It evaluates the expression and puts it
on the global environment.
Unless it exists, at which point
you override it.
'''
arity_check(args, 2)
if not symbolp(args.a):
raise TypeError("Cannot call define on non-symbol")
symbol = args.a
expr = args.d.a
symbol_table[symbol.n] = eval(expr, bel_nil)
print("Symbol defined: %s" % symbol)
print("Result of eval on expr: %s" % eval(expr, bel_nil))
return symbol
'''
SPECIAL FORMS
'''
def special_quote(exp, l_env):
'''
An expression in the form (quote a)
should be passed to special quote.
We can only quote one object, which could be a pair.
Evaluating an object that has been quoted removes the quote.
This is key to code as data, et al.
'''
n = exp.length()
if n != 2:
raise ValueError("We can only quote one object.")
return exp.d.a
def special_if(exp, l_env):
body = exp
if nilp(body.d):
# Then it's car is the final else branch of the if expression.
return eval(body.a, l_env)
if not pairp(body.d):
raise SyntaxError("The cdr of this pair has to be a pair.")
else:
# If the cdr of the body is a pair, then
# The car of that pair is the test, and the cadr is the consequent branch.
test = body.a
conseq = body.d.a
# We evaluate the test, with a non-tail call to eval.
if not nilp(eval(test, l_env)):
# If the test evaluates to a truthy value (not nil), then return evaluation of the consequent.
return eval(conseq, l_env)
# Otherwise, we tail call eval on the cadr on the pair, or the then branch.
else:
# print("Tail call on the then branch.")
# Then branch.
# print("Then branch: %s" % body.d.d)
return special_if(body.d.d, l_env)
def special_if_itr(exp, l_env):
'''
The while loop evaluates the expression denoted by tp.
At the same level of the control stack,
we are moving through the expression.
The simplest way to make a tail recursive function
is to write it iteratively.
You are only extending the lexical environment,
which is temporary until the next call.
Afterwards, you extend the original lexical environment...
'''
pass
def special_dyn(rest, l_env):
pass
def special_set(clauses, l_env):
'''
Set = bounding each vi to the value of ei.
(set v1 e1 ... vn en)
'''
print(clauses)
syms = bel_nil
vals = bel_nil
itr = clauses
while not nilp(itr):
sym = itr.a
if not symbolp(sym) or nilp(sym):
raise TypeError("You can only bind global bindings with symbols.")
val = eval(itr.d.a, l_env)
syms = Pair(sym, syms)
vals = Pair(val, vals)
itr = itr.d.d
while not nilp(syms):
assign_val(l_env, syms.a, vals.a)
syms = syms.d
vals = vals.d
return bel_nil
def special_def(args, l_env):
'''
(def n p e)
is an abbreviation for
(set n (lit clo nil p e))
'''
sym = args.a
p = args.d.a
e = args.d.d.a
rest = make_list(p, e)
lit_clo = Pair(sym, Pair(make_closure(bel_nil, rest)))
return special_set(lit_clo, l_env)
def let(bindings, body):
'''
bindings = ((name . expr) (name . expr) (name . expr))
body =
(let ((x 1)) (f x)) = ((fn (x) (f x)) 1)
(let ((x 1) (y 2)) (+ x y)) = ((fn (x y) (+ x y)) 1 2)
The formal (x y) becomes the formals, (x y)
the definiens (1 and 2) become the actuals (1 2), or the args
The body becomes this body.. etc
Landin's Correspondence
A block structure and function applications mean the same.
'''
pass
def map_quote(args):
'''
Takes a Bel list,
puts quotes on
all the args within
the Bel list,
returns that new
quoted Bel list.
(1 2 3) -> ('1 '2 '3)
'''
if nilp(args):
return bel_nil
else:
'''
Has to be a list as what we pass to it is constructed by eval_list.
'''
return Pair(Pair(Symbol("quote"), Pair(args.a, bel_nil)), map_quote(args.d))
# EOF | true |
c6a162553c923745c755ee7ff220c6bfcd346045 | Python | Mehedee-Hassan/mla-z | /lesson7.py | UTF-8 | 500 | 2.609375 | 3 | [] | no_license |
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
dataset = pd.read_csv('C:\\Users\\mehedee\\Documents\\Python Scripts\\tutorial\\Artificial_Neural_Networks\\ML_DS\\Data.csv')
X = dataset.iloc[:,:-1].values
Y = dataset.iloc[:,3].values
# handeling missing data
from sklearn.preprocessing import Imputer
imputer = Imputer(missing_values="NaN" ,strategy="mean", axis = 0)
impiter = imputer.fit(X[:,1:3])
X[:, 1:3] = imputer.transform(X[:,1:3])
| true |
ac7d0996779654add8098e7aad24a646f1121b9c | Python | Blaxzter/UM_ARS_G8 | /01_PSO/src/main.py | UTF-8 | 1,455 | 2.671875 | 3 | [] | no_license | """
Author Guillaume Franzoni Darnois
"""
from ParticleSwarmOptimization import PSO
from OptimizationFunction import OptimizationFunction
from src.Visualizer import Visualizer
import Constants as Const
from src.VizTest import VizTest
if __name__ == "__main__":
opti = OptimizationFunction(a=0, b=100)
selected_function = opti.rastrigin
# ---Create PSO object to be used in the animation frames
pso = PSO(selected_function)
pso.optimize()
#
# for swarm in pso.swarms:
# for i in range(Const.N_ITERATIONS):
# print(str(pso.swarms.index(swarm)) + " " + str(i) + " " + str(list(filter(lambda data: data.get("best") and data.get('swarm') == pso.swarms.index(swarm), pso.history.get(i)))[0].get('id')))
print("Optimization Done")
test_name = "sim"
func_name = "Reduced Ackley"
parameter = "One sided initialization "
title = f"PSO Simulation - {parameter} - {func_name}"
write_title = f"{test_name.replace(' ', '_')}_{func_name}_{parameter.replace(' ', '_')}_{Const.N_SWARMS}_{Const.N_PARTICLES}_{Const.C1}_{Const.C2}"
viz = VizTest(selected_function, pso.history, title,
dict(
avg_vel=pso.average_velocity_history,
avg_alt=pso.average_altitude_history,
best_alt=pso.best_altitude_history,
))
print("Viz Done")
viz.show_fig()
viz.write_fig(write_title.lower())
| true |
8bc945dc12265c4ee275691160015f4dc9dd92cc | Python | ErwinKomen/RU-passim | /stemmap/File.py | UTF-8 | 1,588 | 3.390625 | 3 | [] | no_license | import os
import contextlib
@contextlib.contextmanager
def as_handle(handleish, mode="r", **kwargs):
r"""Context manager to ensure we are using a handle.
Context manager for arguments that can be passed to SeqIO and AlignIO read, write,
and parse methods: either file objects or path-like objects (strings, pathlib.Path
instances, or more generally, anything that can be handled by the builtin 'open'
function).
When given a path-like object, returns an open file handle to that path, with provided
mode, which will be closed when the manager exits.
All other inputs are returned, and are *not* closed.
Arguments:
- handleish - Either a file handle or path-like object (anything which can be
passed to the builtin 'open' function, such as str, bytes,
pathlib.Path, and os.DirEntry objects)
- mode - Mode to open handleish (used only if handleish is a string)
- kwargs - Further arguments to pass to open(...)
Examples
--------
>>> from Bio import File
>>> import os
>>> with File.as_handle('seqs.fasta', 'w') as fp:
... fp.write('>test\nACGT')
...
10
>>> fp.closed
True
>>> handle = open('seqs.fasta', 'w')
>>> with File.as_handle(handle) as fp:
... fp.write('>test\nACGT')
...
10
>>> fp.closed
False
>>> fp.close()
>>> os.remove("seqs.fasta") # tidy up
"""
try:
with open(handleish, mode, **kwargs) as fp:
yield fp
except TypeError:
yield handleish
| true |
15f5af8a82d8fc8a4a493bc60d4ecf6f7acd66cd | Python | AllenInstitute/bmtk | /bmtk/tests/utils/reports/compartment/test_compartment_reader.py | UTF-8 | 5,190 | 2.640625 | 3 | [
"BSD-3-Clause"
] | permissive | import os
import numpy as np
from bmtk.utils.reports import CompartmentReport
try:
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
nhosts = comm.Get_size()
barrier = comm.Barrier
except Exception as exc:
rank = 0
nhosts = 1
barrier = lambda: None
cpath = os.path.dirname(os.path.realpath(__file__))
output_file = os.path.join(cpath, 'compartment_files/multi_population_report.h5')
def build_file():
rank_cells = [(0, 10, 'v1'), (1, 50, 'v1'), (2, 100, 'v1'), (3, 1, 'v1'), (4, 200, 'v1'), (0, 100, 'v2'), (1, 50, 'v2')]
cr = CompartmentReport(output_file, mode='w', tstart=0.0, tstop=100.0, dt=0.1, variable='Vm', units='mV')
for node_id, n_elements, pop in rank_cells:
cr.add_cell(node_id=node_id, population=pop, element_ids=np.arange(n_elements),
element_pos=np.zeros(n_elements))
for i in range(1000):
for node_id, n_elements, pop in rank_cells:
cr.record_cell(node_id, population=pop, vals=[node_id]*n_elements, tstep=i)
cr.close()
def test_compartment_reader():
report = CompartmentReport(output_file, 'r')
assert(len(report.populations) == 2)
# Check v1 population
assert('v1' in report.populations)
v1_grp = report['v1']
assert(np.all(np.sort(v1_grp.node_ids()) == np.arange(5)))
assert(v1_grp.tstart() == 0.0)
assert(v1_grp.tstop() == 100.0)
assert(v1_grp.dt() == 0.1)
assert(v1_grp.units() == 'mV')
assert(v1_grp.n_elements() == 361)
assert(v1_grp.element_pos().size == 361)
assert(v1_grp.element_ids().size == 361)
assert(v1_grp.data().shape == (1000, 361))
assert(v1_grp.data(0).shape == (1000, 10))
assert(v1_grp.data(0, time_window=(0.0, 50.0)).shape == (500, 10))
assert(np.all(np.unique(v1_grp.data(0)) == [0.0]))
assert(v1_grp.data(1).shape == (1000, 50))
assert(np.all(np.unique(v1_grp.data(1)) == [1.0]))
assert(v1_grp.data(2).shape == (1000, 100))
assert(np.all(np.unique(v1_grp.data(2)) == [2.0]))
assert(v1_grp.data(3).shape == (1000, 1))
assert(np.all(np.unique(v1_grp.data(3)) == [3.0]))
assert(v1_grp.data(4).shape == (1000, 200))
assert(np.all(np.unique(v1_grp.data(4)) == [4.0]))
# Check v2 population
assert('v2' in report.populations)
v1_grp = report['v2']
assert(np.all(np.sort(v1_grp.node_ids()) == np.arange(2)))
assert(v1_grp.tstart() == 0.0)
assert(v1_grp.tstop() == 100.0)
assert(v1_grp.dt() == 0.1)
assert(v1_grp.units() == 'mV')
assert(v1_grp.n_elements() == 150)
assert(v1_grp.element_pos().size == 150)
assert(v1_grp.element_ids().size == 150)
assert(v1_grp.data().shape == (1000, 150))
assert(v1_grp.data(0).shape == (1000, 100))
assert(v1_grp.data(0, time_window=(0.0, 50.0)).shape == (500, 100))
assert(np.all(np.unique(v1_grp.data(0)) == [0.0]))
assert(v1_grp.data(1).shape == (1000, 50))
assert(np.all(np.unique(v1_grp.data(1)) == [1.0]))
def test_compartment_reader2():
report = CompartmentReport(output_file, 'r', default_population='v1')
assert(len(report.populations) == 2)
assert('v1' in report.populations)
assert(np.all(np.sort(report.node_ids()) == np.arange(5)))
assert(report.tstart() == 0.0)
assert(report.tstop() == 100.0)
assert(report.dt() == 0.1)
assert(report.units() == 'mV')
assert(report.n_elements() == 361)
assert(report.element_pos().size == 361)
assert(report.element_ids().size == 361)
assert(report.data().shape == (1000, 361))
assert(report.data(0).shape == (1000, 10))
assert(report.data(0, time_window=(0.0, 50.0)).shape == (500, 10))
assert(np.all(np.unique(report.data(0)) == [0.0]))
assert(report.data(1).shape == (1000, 50))
assert(np.all(np.unique(report.data(1)) == [1.0]))
assert(report.data(2).shape == (1000, 100))
assert(np.all(np.unique(report.data(2)) == [2.0]))
assert(report.data(3).shape == (1000, 1))
assert(np.all(np.unique(report.data(3)) == [3.0]))
assert(report.data(4).shape == (1000, 200))
assert(np.all(np.unique(report.data(4)) == [4.0]))
# Check v2 population
assert('v2' in report.populations)
assert(np.all(np.sort(report.node_ids(population='v2')) == np.arange(2)))
assert(report.tstart(population='v2') == 0.0)
assert(report.tstop(population='v2') == 100.0)
assert(report.dt(population='v2') == 0.1)
assert(report.units(population='v2') == 'mV')
assert(report.n_elements(population='v2') == 150)
assert(report.element_pos(population='v2').size == 150)
assert(report.element_ids(population='v2').size == 150)
assert(report.data(population='v2').shape == (1000, 150))
assert(report.data(0, population='v2').shape == (1000, 100))
assert(report.data(0, population='v2', time_window=(0.0, 50.0)).shape == (500, 100))
assert(np.all(np.unique(report.data(0, population='v2')) == [0.0]))
assert(report.data(1, population='v2').shape == (1000, 50))
assert(np.all(np.unique(report.data(1, population='v2')) == [1.0]))
if __name__ == '__main__':
#build_file()
#test_compartment_reader()
test_compartment_reader2()
| true |
324aa29c6184f3cfe30925797f8a90146e8fbb6e | Python | Wesleyliao/QWOP-RL | /game/env.py | UTF-8 | 5,525 | 2.625 | 3 | [] | no_license | import time
import gym
import numpy as np
from gym import spaces
from pynput.keyboard import Controller
from pynput.keyboard import Key
from selenium import webdriver
from stable_baselines.common.env_checker import check_env
PORT = 8000
PRESS_DURATION = 0.1
MAX_EPISODE_DURATION_SECS = 120
STATE_SPACE_N = 71
ACTIONS = {
0: 'qw',
1: 'qo',
2: 'qp',
3: 'q',
4: 'wo',
5: 'wp',
6: 'w',
7: 'op',
8: 'o',
9: 'p',
10: '',
}
class QWOPEnv(gym.Env):
meta_data = {'render.modes': ['human']}
pressed_keys = set()
def __init__(self):
# Open AI gym specifications
super(QWOPEnv, self).__init__()
self.action_space = spaces.Discrete(len(ACTIONS))
self.observation_space = spaces.Box(
low=-np.inf, high=np.inf, shape=[STATE_SPACE_N], dtype=np.float32
)
self.num_envs = 1
# QWOP specific stuff
self.gameover = False
self.previous_score = 0
self.previous_time = 0
self.previous_torso_x = 0
self.previous_torso_y = 0
self.evoke_actions = True
# Open browser and go to QWOP page
self.driver = webdriver.Chrome()
self.driver.get(f'http://localhost:{PORT}/Athletics.html')
# Wait a bit and then start game
time.sleep(2)
self.driver.find_element_by_xpath("//body").click()
self.keyboard = Controller()
self.last_press_time = time.time()
def _get_variable_(self, var_name):
return self.driver.execute_script(f'return {var_name};')
def _get_state_(self):
game_state = self._get_variable_('globalgamestate')
body_state = self._get_variable_('globalbodystate')
# Get done
if (
(game_state['gameEnded'] > 0)
or (game_state['gameOver'] > 0)
or (game_state['scoreTime'] > MAX_EPISODE_DURATION_SECS)
):
self.gameover = done = True
else:
self.gameover = done = False
# Get reward
torso_x = body_state['torso']['position_x']
torso_y = body_state['torso']['position_y']
# Reward for moving forward
reward1 = max(torso_x - self.previous_torso_x, 0)
# # Penalize for low torso
# if torso_y > 0:
# reward2 = -torso_y / 5
# else:
# reward2 = 0
# # Penalize for torso vertical velocity
# reward3 = -abs(torso_y - self.previous_torso_y) / 4
# # Penalize for bending knees too much
# if (
# body_state['joints']['leftKnee'] < -0.9
# or body_state['joints']['rightKnee'] < -0.9
# ):
# reward4 = (
# min(body_state['joints']['leftKnee'], body_state['joints']['rightKnee'])
# / 6
# )
# else:
# reward4 = 0
# Combine rewards
reward = reward1 * 2 # + reward2 + reward3 + reward4
# print(
# 'Rewards: {:3.1f}, {:3.1f}, {:3.1f}, {:3.1f}, {:3.1f}'.format(
# reward1, reward2, reward3, reward4, reward
# )
# )
# Update previous scores
self.previous_torso_x = torso_x
self.previous_torso_y = torso_y
self.previous_score = game_state['score']
self.previous_time = game_state['scoreTime']
# Normalize torso_x
for part, values in body_state.items():
if 'position_x' in values:
values['position_x'] -= torso_x
# print('Positions: {:3.1f}, {:3.1f}, {:3.1f}'.format(
# body_state['torso']['position_x'],
# body_state['leftThigh']['position_x'],
# body_state['rightCalf']['position_x']
# ))
# print('Knee angles: {:3.2f}, {:3.2f}'.format(
# body_state['joints']['leftKnee'],
# body_state['joints']['rightKnee']
# ))
# Convert body state
state = []
for part in body_state.values():
state = state + list(part.values())
state = np.array(state)
return state, reward, done, {}
def _release_all_keys_(self):
for char in self.pressed_keys:
self.keyboard.release(char)
self.pressed_keys.clear()
def send_keys(self, keys):
# Release all keys
self._release_all_keys_()
# Hold down current key
for char in keys:
self.keyboard.press(char)
self.pressed_keys.add(char)
# print('pressed for', time.time() - self.last_press_time)
# self.last_press_time = time.time()
time.sleep(PRESS_DURATION)
def reset(self):
# Send 'R' key press to restart game
self.send_keys(['r', Key.space])
self.gameover = False
self.previous_score = 0
self.previous_time = 0
self.previous_torso_x = 0
self.previous_torso_y = 0
self._release_all_keys_()
return self._get_state_()[0]
def step(self, action_id):
# send action
keys = ACTIONS[action_id]
if self.evoke_actions:
self.send_keys(keys)
else:
time.sleep(PRESS_DURATION)
return self._get_state_()
def render(self, mode='human'):
pass
def close(self):
pass
if __name__ == '__main__':
env = QWOPEnv()
check_env(env)
while True:
if env.gameover:
env.reset()
else:
env.step(env.action_space.sample())
| true |
72346f51627eb790939e853f118fba9fb0c62b43 | Python | jacquelynchow/MLHLocalHackDay2019 | /graph.py | UTF-8 | 2,601 | 3.09375 | 3 | [
"MIT"
] | permissive |
# importing the required module
import matplotlib.pyplot as plt
from matplotlib.dates import (DAILY, DateFormatter, rrulewrapper, RRuleLocator, drange)
import numpy as np
import datetime
print "\n Welcome to the Swarthmore Meal Plan Visual Budget Tool"
print "========================================================="
mealPlan = raw_input("Enter your meal plan: ")
if mealPlan == "SWAT":
# print "SWAT"
diningDols = 150/55
elif mealPlan == "GARNET":
# print "GARNET"
diningDols = 300/55
elif mealPlan == "PHOENIX":
# print "PHOENIX"
diningDols = 400/55
elif mealPlan == "PARRISH":
# print "PARRISH"
diningDols = 500/55
elif mealPlan == "PPR":
# print "PPR"
diningDols = 200/55
elif mealPlan == "COMMUTER":
# print "COMMUTER"
diningDols = 100/55
else:
print "Not a valid meal plan."
graphType = 0
# while graphType != 3:
graphType = raw_input("Please enter choose a graph type, 1 or 2: ")
y = []
y2 = []
if int(graphType) == 1:
sum = 0.0
spendingFile = open("spendingDataForGraph.txt", 'r')
for amt in spendingFile:
# print amt.strip()
y.append(float(amt.strip()))
sum += float(amt.strip())
avg = sum/55
for i in range(55):
y2.append(diningDols)
else:
sum = 0.0
spendingFile = open("spendingDataForGraph.txt", 'r')
for amt in spendingFile:
y.append(sum)
sum += float(amt.strip())
avg = sum/55
for i in range(55):
y2.append(diningDols*(i+1))
# y3 = []
# for i in range(55):
# y3.append(avg)
# if graphType != 3:
#8/27 - 12/3
np.random.seed(19680801)
# tick every 5th easter
# rule = rrulewrapper(DAILY, interval=70)
# loc = RRuleLocator(rule)
formatter = DateFormatter('%m/%d/%y')
date1 = datetime.date(2019, 8, 27)
date2 = datetime.date(2019, 12, 03)
date3 = datetime.date(2019, 8, 27)
x = []
for i in range(1, len(y)+1):
x.append(i)
# x.append(date2)
# date3 = datetime.date(2000, 1, 30)
delta = datetime.timedelta(days=100)
# x axis values
# corresponding y axis values
# fig, ax = plt.subplots()
# plotting the points
plt.plot(x, y, label = "Current Spending")
plt.plot(x, y2, label = "Budgeted Spending")
# plt.plot(x, y3, label = "Projected Spending")
# ax.xaxis.set_major_locator(loc)
# ax.xaxis.set_major_formatter(formatter)
# ax.xaxis.set_tick_params(rotation=30, labelsize=10)
# naming the x axis
plt.xlabel('Date')
# naming the y axis
plt.ylabel('Amount Spent ($)')
plt.legend()
# giving a title to my graph
plt.title('Meal Plan Budgeting: ' + mealPlan)
# print x
# print y
# function to show the plot
plt.show()
| true |
784d819314c0cb19d797fcd9cfe2eac4f91bf896 | Python | evanwill/poemRemix | /PiPyPoems/GoldenTreasuryPiPoems.py | UTF-8 | 1,030 | 3.875 | 4 | [] | no_license | # import stuff
import pandas
import random
# create pandas dataframe from the poem CSV
poemDF = pandas.read_csv('GoldenTreasuryLines.csv')
# create list with digits of Pi
piDigits = [3, 1, 4, 1, 5, 9, 2, 6, 5, 3, 5, 8, 9, 7, 9, 3, 2, 3, 8, 4, 6, 2, 6, 4, 3, 3, 8, 3, 2, 7, 9, 5, 0, 2, 8, 8, 4, 1, 9, 7, 1, 6, 9, 3, 9, 9, 3, 7, 5, 1, 0, 5, 8, 2, 0, 9, 7, 4, 9, 4, 4]
# create a random poem based on digits of Pi
# first decide number of lines in the poem,
# I based this on the approximate range observed in the original corpus.
poemLength = random.randrange(5,60)
# start the author variable, other authors will be added by the loop.
author = 'Pi Py Poetry'
# create a poem of the random length,
# the digits of Pi are the number of words per line.
for i in range(poemLength):
if piDigits[i] == 0:
print("\n")
else:
newLine = poemDF[poemDF['words'] == piDigits[i]].sample()
author += ', ' + newLine.values[0][0]
line = newLine.values[0][1]
print(line)
print('\nBy '+author) | true |
6a86dea9e6c693ae2a6c721ce53682eade18fa7b | Python | antonkhmv/merkle-tree | /Hashing.py | UTF-8 | 804 | 3.1875 | 3 | [] | no_license | import hashlib
def reformat(item):
return '' if type(item) == bytes else str(item)
class TestStringHashing:
def __init__(self, item):
self.input = reformat(item)
def digest(self):
return self.input
def get_hash_func(string):
if string == "sha1":
return hashlib.sha1
elif string == "sha224":
return hashlib.sha224
elif string == "sha256":
return hashlib.sha256
elif string == "sha384":
return hashlib.sha224
elif string == "sha512":
return hashlib.sha512
elif string == "blake2b":
return hashlib.blake2b
elif string == "blake2s":
return hashlib.blake2s
elif string == "test":
return TestStringHashing
raise ValueError("Hashing algorithm \"" + string + "\" not found.")
| true |
4fe8c9359141d5e07411da555691c12b1c4888f1 | Python | iamvickynguyen/Kattis-Solutions | /older_brother.py | UTF-8 | 800 | 3.484375 | 3 | [] | no_license | def PrimeFactor(n):
power = {}
while not n % 2:
try:
power[2] += 1
except KeyError:
power[2] = 1
n //= 2
i = 3
while i * i <= n:
while not n % i:
try:
power[i] += 1
except KeyError:
power[i] = 1
n //= i
i += 2
if n != 1:
power[n] = 1
return power
def IsPrime(n):
if n < 5:
return n == 2 or n == 3
if not (n % 6 == 1 or n % 6 == 5):
return False
for m in range(5, int(n ** 0.5) + 1, 6):
if not (n % m and n % (m + 2)):
return False
return True
q = int(input())
factor = PrimeFactor(q)
if len(factor) == 1 and IsPrime(list(factor.keys())[0]):
print('yes')
else:
print('no')
| true |
f52574a57aa5061b0100825869fd8e055919a751 | Python | chico-state-acm/SpringComp2019 | /problems/TidyNumbers/gen_tests.py | UTF-8 | 413 | 3.171875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/python3
from sys import argv, stderr
from random import randint
if len(argv) != 3:
print('USAGE: gen_tests.py <num digits> <num tests>', file=stderr)
exit(1)
num_tests = int(argv[2])
num_digits = int(argv[1])
print(num_tests)
for test in range(num_tests):
for i in range(num_digits):
first = 1 if i == 0 else 0
print('0123456789'[randint(first, 9)], end='')
print()
| true |
16e648083f1eeb744b4c93e8061e8a1669c42ce7 | Python | bglossner/prereq-class-builder | /Web_Scrape/setup_db.py | UTF-8 | 5,924 | 2.9375 | 3 | [] | no_license | import pprint
from pymongo import MongoClient
def insert_majors_collection(db):
majors = [
{
"mid": "CSC",
"Name": "Computer Science",
"Needed": [],
"Support": [],
"GEs": [],
"Units": 0,
"Concentrations": []
},
{
"mid": "MATH",
"Name": "Mathematics",
"Needed": [],
"Support": [],
"GEs": [],
"Units": 0,
"Concentrations": []
}
]
# Inserting multiple documents (rows) into the collection (table)
# Note: To insert one document, object must be a dictionary and use
# insert_one(object)
db.majors_collection.insert_many(majors)
def check_insertion_majors(db):
# Get all the documents in a collection with find()
print("\nPRINTING ALL MAJORS")
for major in db.majors_collection.find():
pprint.pprint(major)
# Get first document matching the condition passed in from a collection with
# find_one(). If nothing is specified, first doc in collection is passed back
print("\nQUERYING CSC")
major_obj = db['majors_collection'].find_one({"mid":"CSC"})
pprint.pprint(major_obj);
print("\nQUERYING MATH")
major_obj = db['majors_collection'].find_one({"mid":"MATH"})
pprint.pprint(major_obj);
def insert_minors_collection(db):
minors = [
{
"mid": "DATA",
"Name": "Data Science",
"Needed": [],
"Support": [],
"GEs": [],
"Units": 0,
"Concentrations": []
},
{
"mid": "MATH",
"Name": "Mathematics",
"Needed": [],
"Support": [],
"GEs": [],
"Units": 0,
"Concentrations": []
}
]
# Inserting multiple documents (rows) into the collection (table)
# Note: To insert one document, object must be a dictionary and use
# insert_one(object)
db.minors_collection.insert_many(minors)
def check_insertion_minors(db):
# Get all the documents in a collection with find()
print("\nPRINTING ALL MINORS")
for minor in db.minors_collection.find():
pprint.pprint(minor)
# Get first document matching the condition passed in from a collection with
# find_one(). If nothing is specified, first doc in collection is passed back
print("\nQUERYING DATA")
major_obj = db['minors_collection'].find_one({"mid":"DATA"})
pprint.pprint(major_obj);
def insert_classes_collection(db):
classes = [
{
"cid": "CSC_202",
"Name": "Data Structures",
"Prereqs": [],
"Majors": [], #Should be mid
"Terms_offered": [],
"Units": 0,
"USCP": False,
"GWR": False
},
{
"cid": "MATH_143",
"Name": "Calculus III",
"Prereqs": [],
"Majors": [], #Should be mid
"Terms_offered": [],
"Units": 0,
"USCP": False,
"GWR": False
}
]
# Inserting multiple documents (rows) into the collection (table)
# Note: To insert one document, object must be a dictionary and use
# insert_one(object)
db.classes_collection.insert_many(classes)
def check_insertion_classes(db):
# Get all the documents in a collection with find()
print("\nPRINTING ALL CLASSES")
for classs in db.classes_collection.find():
pprint.pprint(classs)
# Get first document matching the condition passed in from a collection with
# find_one(). If nothing is specified, first doc in collection is passed back
print("\nQUERYING CSC_202")
major_obj = db['classes_collection'].find_one({"cid":"CSC_202"})
pprint.pprint(major_obj);
print("\nQUERYING MATH_143")
major_obj = db['classes_collection'].find_one({"cid":"MATH_143"})
pprint.pprint(major_obj);
def insert_general_ed_collection(db):
general_ed = [
{
"gid": "A1",
"classes": [],
"majors":[]
},
{
"gid": "A2",
"classes": [],
"majors":[]
}
]
# Inserting multiple documents (rows) into the collection (table)
# Note: To insert one document, object must be a dictionary and use
# insert_one(object)
db.general_ed_collection.insert_many(general_ed)
def check_insertion_general_ed(db):
# Get all the documents in a collection with find()
print("\nPRINTING ALL GENERAL ED")
for ge in db.general_ed_collection.find():
pprint.pprint(ge)
# Get first document matching the condition passed in from a collection with
# find_one(). If nothing is specified, first doc in collection is passed back
print("\nQUERYING A1")
major_obj = db['general_ed_collection'].find_one({"gid":"A1"})
pprint.pprint(major_obj);
client = MongoClient()
dbnames = client.list_database_names()
dbname = "prereq"
if dbname in dbnames:
print("Dropping database", dbname)
client.drop_database(dbname)
prereq_db = client[dbname]
insert_majors_collection(prereq_db)
insert_minors_collection(prereq_db)
insert_classes_collection(prereq_db)
insert_general_ed_collection(prereq_db)
# Uncomment below four lines for testing purposes. Caution: very long output.
check_insertion_majors(prereq_db)
check_insertion_minors(prereq_db)
check_insertion_classes(prereq_db)
check_insertion_general_ed(prereq_db)
print("\nAll Collections:")
pprint.pprint(prereq_db.list_collection_names())
| true |
8fa3de55149a2d2af8fe52f1388af9a4c8e95a17 | Python | alexvogel/algorithms-and-data-structures | /algorithmic_toolbox/4-2_majority_element.py | UTF-8 | 9,215 | 3.328125 | 3 | [] | no_license | # Uses python3
import argparse
import datetime
import random
import sys
# def get_majority_element_fast(nrcall, a, left, right):
# # if array is only 2 elements long, the base case is reached
# # and a majority element is determined
# if left == right:
# print('call ' + str(nrcall) + ': ' + str(left) + ' ' + str(right) )
# print('return ' + str(nrcall) + ': ' + str(a[left]))
# print('-----')
# return a[left]
# if left + 1 == right:
# if a[left] == a[right]:
# print('call ' + str(nrcall) + ': ' + str(left) + ' ' + str(right) )
# print('return A2 ' + str(nrcall) + ': ' + str(a[left]))
# print('-----')
# return a[left]
# else:
# print('call ' + str(nrcall) + ': ' + str(left) + ' ' + str(right) )
# print('return ' + str(nrcall) + ': -1')
# print('-----')
# return -1
# # in every other case
# # split array in half and determine majority on each halves
# mid = int(left + (right - left) / 2)
# length_left_halve = mid - left + 1
# length_right_halve = right - (mid + 1) + 1
# majority_left_halve = get_majority_element_fast(nrcall+1, a, left, mid)
# majority_right_halve = get_majority_element_fast(nrcall+1, a, mid+1, right)
# print('left_length is ' + str(length_left_halve))
# print('right_length is ' + str(length_right_halve))
# if length_left_halve > length_right_halve:
# print('call ' + str(nrcall) + ': ' + str(left) + ' ' + str(right) )
# print('return X: ' + str(nrcall) + ': ' + str(majority_left_halve))
# print('-----')
# return majority_left_halve
# elif length_right_halve > length_left_halve:
# print('call ' + str(nrcall) + ': ' + str(left) + ' ' + str(right) )
# print('return Y: ' + str(nrcall) + ': ' + str(majority_right_halve))
# print('-----')
# return majority_right_halve
# if majority_left_halve == -1 and majority_right_halve >= 0:
# return majority_right_halve
# elif majority_right_halve == -1 and majority_left_halve >= 0:
# return majority_left_halve
# if majority_left_halve == majority_right_halve:
# print('call ' + str(nrcall) + ': ' + str(left) + ' ' + str(right) )
# print('return B: ' + str(nrcall) + ': ' + str(majority_left_halve))
# print('-----')
# return majority_left_halve
# else:
# print('call ' + str(nrcall) + ': ' + str(left) + ' ' + str(right) )
# print('return C: ' + str(nrcall) + ': -1')
# print('-----')
# return -1
def get_majority_element_naive(a, left, right):
count = {}
for i in range(0, len(a)):
if a[i] in count:
count[a[i]] += 1
else:
count[a[i]] = 1
#print(count)
#print(max(count.values()))
#print(int(len(a)/2))
if (max(count.values()) > int(len(a)/2)):
return 1
else:
return -1
if __name__ == '__main__':
version = '0.1'
date = '2018-03-18'
parser = argparse.ArgumentParser(description='majority element',
epilog='author: alexander.vogel@prozesskraft.de | version: ' + version + ' | date: ' + date)
# parser.add_argument('--stresstest', action='store_true',
# help='perform a stress test')
# parser.add_argument('--fast', action='store_true',
# help='use the fast algorithm')
# parser.add_argument('--naive', action='store_true',
# help='use the naive algorithm')
args = parser.parse_args()
# # perform stress test?
# if args.stresstest:
# while(True):
# n = random.randint(1, 5)
# print(n)
# a = []
# createSequenceWithMajority = bool(random.getrandbits(1))
# if(createSequenceWithMajority):
# print('creating a list with a majority element')
# # how often should the majority element be put in list
# amountMajorityElement = random.randint(int(n/2+1), n)
# # how often the other elements
# amountOtherElements = n - amountMajorityElement
# # what should the majority element be
# majorityElement = random.randint(0, 100)
# # put the majority element in list
# a = [majorityElement] * amountMajorityElement
# # fill list with other random elements
# for i in range(0, amountOtherElements):
# a.append(random.randint(0, 100))
# else:
# print('creating a list withOUT a majority element')
# # fill list with other random elements
# for i in range(0, n):
# a.append(random.randint(0, 100))
# # shuffle list
# random.shuffle(a)
# # print list
# print(' '.join(str(lulu) for lulu in a ))
# # run the algos
# current_time1 = datetime.datetime.now()
# res1 = get_majority_element_naive(a, 0, len(a)-1)
# res_naive = 0
# if res1 != -1:
# res_naive = 1
# current_time2 = datetime.datetime.now()
# res2 = get_majority_element_fast(1, a, 0, len(a)-1)
# res_fast = 0
# if res2 != -1:
# res_fast = 1
# current_time3 = datetime.datetime.now()
# if res_naive != res_fast:
# print("ERROR: result naive: " + str(res_naive))
# print("ERROR: result fast: " + str(res_fast))
# break
# else:
# print('OK: ' + str(res_naive))
# print('time consumed: naive:' + str(current_time2-current_time1) + ' fast:' + str(current_time3-current_time2))
# print('------')
# elif args.fast:
# input = sys.stdin.read()
# n, *a = list(map(int, input.split()))
# majority_element = get_majority_element_fast(1, a, 0, n-1)
# if majority_element != -1:
# #print('1 majority element is ' + str(majority_element))
# print(1)
# else:
# print(0)
# elif args.naive:
# input = sys.stdin.read()
# n, *a = list(map(int, input.split()))
# if get_majority_element_naive(a, 0, n-1) != -1:
# print(1)
# else:
# print(0)
# # this is called when no arguments are used
# else:
# input = sys.stdin.read()
# n, *a = list(map(int, input.split()))
# if get_majority_element_fast(1, a, 0, n-1) != -1:
# print(1)
# else:
# print(0)
input = sys.stdin.read()
n, *a = list(map(int, input.split()))
if get_majority_element_naive(a, 0, n-1) != -1:
print(1)
else:
print(0)
# original programming assignment
'''
### 4.2 Majority Element
#### Problem Introduction
Majority rule is a decision rule that selects the alternative which has a majority, that is, more than half the votes. Given a sequence of elements a_1 , a_2 , . . . , a_n , you would like to check whether it contains an element that appears more than n/2 times. A naive way to do this is the following.
```
MajorityElement(a_1 , a_2 , . . . , a_n ):
for i from 1 to n:
currentElement ← a_i
count ← 0
for j from 1 to n:
if a j = currentElement:
count ← count + 1
if count > n/2:
return a_i
return “no majority element”
```
The running time of this algorithm is quadratic. Your goal is to use the divide-and-conquer technique to
design an O(n log n) algorithm.
#### Problem Description
**Task:** The goal in this code problem is to check whether an input sequence contains a majority element.
**Input Format:** The first line contains an integer n, the next one contains a sequence of n non-negative
integers a_0, a_1, . . . , a_n−1 .
**Constraints:** 1 ≤ n ≤ 10^5 ; 0 ≤ a_i ≤ 10^9 for all 0 ≤ i < n.
**Output Format:** Output 1 if the sequence contains an element that appears strictly more than n/2 times,
and 0 otherwise.
#### Sample 1
*Input:*
5
2 3 9 2 2
*Output:*
1
2 is the majority element.
#### Sample 2
*Input:*
4
1 2 3 4
*Output:*
0
There is no majority element in this sequence.
#### Sample 3
*Input:*
4
1 2 31
*Output:*
0
This sequence also does not have a majority element (note that the element 1 appears twice and hence
is not a majority element).
#### What To Do
This problem can be solved by the divide-and-conquer algorithm in time O(n log n). Indeed, if a sequence of length n contains a majority element, then the same element is also a majority element for one of its halves. Thus, to solve this problem you first split a given sequence into halves and make two recursive calls. Do you see how to combine the results of two recursive calls?
It is interesting to note that this problem can also be solved in O(n) time by a more advanced (non-divide-and-conquer) algorithm that just scans the given sequence twice.
#### Implementation in Python
'''
| true |
451d9a110d16313f4fc22b224cd395a3c3022655 | Python | mtlam/ASTP-720_F2020 | /HW1/interpolation.py | UTF-8 | 971 | 3.65625 | 4 | [
"BSD-3-Clause"
] | permissive | '''
Michael Lam
ASTP-720, Spring 2020
Set of functions to perform interpolation
Should only do linear interpolation for now
as we haven't discussed matrices yet, sorry
'''
def linear_interpolator(xs, ys):
"""
Linear interpolation of points (x, y)
Parameters
----------
xs : list, np.ndarray
List of x values
ys : list, np.ndarray
List of x values
Returns
-------
return_function : float
Interpolating function
"""
def return_function(xp):
""" xp is the x value to find the interpolation """
for i in range(len(xs)-1):
# Define interpolating range for this iteration
x0 = xs[i]
x1 = xs[i+1]
if not (x0 <= xp <= x1):
continue
y0 = ys[i]
y1 = ys[i+1]
xd = (xp - x0)/float(x1 - x0)
yp = y0*(1-xd) + y1*xd
return yp
return None
return return_function
| true |
21192e85e0ebe4e2dcb7c63612438ede5a322c50 | Python | zefciu/szkolenie-vavatech-6-2018 | /blackjack/blackjack/tests.py | UTF-8 | 695 | 2.75 | 3 | [] | no_license | from nose.tools import assert_equal, assert_true, assert_false
from blackjack.hand import Hand, Card
def test_hard_hand():
hand = Hand([
Card('7', 'H'),
Card('2', 'D'),
Card('K', 'S'),
])
assert_equal(hand.value, 19)
assert_false(hand.soft)
assert_false(hand.blackjack)
def test_soft_hand():
hand = Hand([
Card('A', 'H'),
Card('5', 'D'),
])
assert_equal(hand.value, 16)
assert_true(hand.soft)
assert_false(hand.blackjack)
def test_blackjack():
hand = Hand([
Card('A', 'H'),
Card('Q', 'D'),
])
assert_equal(hand.value, 21)
assert_true(hand.soft)
assert_true(hand.blackjack)
| true |
7abb3307ecb3707e3e92534f6e40b9f63bb08ea4 | Python | noatgnu/dotDNA | /dot_dna/primer.py | UTF-8 | 2,141 | 3.03125 | 3 | [
"MIT"
] | permissive | from xml.etree.ElementTree import Element
class Primer:
def __init__(self):
self.strand = "0"
self.start = 0
self.stop = 0
self.seq = ""
self.temperature = 0
self.name = ""
self.composition = {}
self.length = 0
def from_element(self, element: Element):
if element.tag == "Primer":
for k in element.attrib:
if k == "name":
self.name = element.attrib[k]
elif k == "sequence":
self.set_seq(element.attrib[k])
for bindingSite in element:
if "simplified" not in bindingSite.attrib:
location = bindingSite.attrib["location"].split("-")
self.start = int(location[0])
self.stop = int(location[1])
self.temperature = int(bindingSite.attrib["meltingTemperature"])
self.strand = bindingSite.attrib["boundStrand"]
def from_string(self, seq):
self.set_seq(seq)
self.temperature = self.calculate_melting_temp()
def calculate_melting_temp(self):
if len(self.seq) > 13:
return 64.9 + 41*(self.composition["G"]+self.composition["C"]-16.4)/(self.composition["G"]+self.composition["C"]+self.composition["A"]+self.composition["T"])
else:
return 2*(self.composition["A"]+self.composition["T"]) + 4*(self.composition["G"]+self.composition["C"])
def to_dict(self):
return {"Position": "{}-{}".format(self.start, self.stop), "Name": self.name, "Direction": "Forward" if self.strand == "0" else "Reverse", "Sequence": self.seq, "Temperature": self.temperature}
def __repr__(self):
l = "{} {} {}-{} T:{:.2f}".format(self.strand, self.name, self.start, self.stop, self.temperature)
for i in "ATGC":
l += " {}:{:.2f}%".format(i, self.composition[i]/self.length*100)
return l
def set_seq(self, seq):
self.seq = seq
self.composition = {}
self.length = len(seq)
for i in "ATGC":
self.composition[i] = self.seq.count(i) | true |
ee417fba0903f33e3acbc5d15290e22f5e350cb5 | Python | biagioboi/socio-technical-congruence-optimizer | /ExtractDevelopersComunicationInfo.py | UTF-8 | 5,841 | 2.765625 | 3 | [] | no_license | import requests
import matplotlib.pyplot as plt
import networkx as nx
session = requests.Session()
session.auth = ("username", "token")
class ExtractDevelopersCommunicationInfo:
def __init__(self, repository_name):
self._repository_name = repository_name
self._graph = nx.Graph()
self._contributors = dict()
self._developers_list = []
def get_developers(self):
cont = 1
to_return = dict()
while True:
devs = session.get(
'https://api.github.com/repos/' + self._repository_name + '/contributors',
headers={'content-type': 'application/vnd.github.v3+json'},
params={'page': cont, 'per_page': 100})
if devs.status_code != 200:
raise ApiError(devs.status_code)
else:
if len(devs.json()) == 0:
break
for x in devs.json():
dev_detail = session.get(
'https://api.github.com/users/' + x['login'],
headers={'content-type': 'application/vnd.github.v3+json'})
if dev_detail.status_code != 200:
raise ApiError(dev_detail.status_code)
else:
y = dev_detail.json()
to_return[y['login']] = y['name']
cont += 1
self._developers_list = to_return
def get_communications_between_contributors(self):
contributors_for_issue = self.get_contributors_for_issue(self.get_issues())
contributors = dict()
# for each item we have an issue with related devs that took part of issue with related interactions
for issue_id, devs in contributors_for_issue.items():
if len(devs) > 1: # if empty or equal to 1, we don't consider it
for k, v in devs.items(): # for each dev
if k not in contributors: # if we haven't seen this dev, we initialize him
contributors[k] = dict()
for k1, v1 in devs.items():
if k1 == k:
continue
if k1 not in contributors[k]:
contributors[k][k1] = 0
contributors[k][k1] += v1
self._contributors = contributors
return self.construct_graph()
def construct_graph(self):
y = self._graph
for contributor, devs in self._contributors.items():
for dev, val in devs.items():
y.add_edge(contributor, dev, weight=val)
pos = nx.spring_layout(y)
nx.draw_networkx_nodes(y, pos, node_size=70)
nx.draw_networkx_edges(y, pos, edgelist=y.edges, edge_color="b", style="solid")
nx.draw_networkx_labels(y, pos, font_size=5, font_family="sans-serif")
plt.axis("off")
plt.show()
# create the adjacent matrix
matrix = []
for contributor in self._contributors:
matrix_contributor = []
for contributor2, devs in self._contributors.items():
if contributor in self._contributors[contributor2]:
matrix_contributor.append(self._contributors[contributor2][contributor])
else:
matrix_contributor.append(0)
matrix.append(matrix_contributor)
return self._contributors
def get_issues(self):
# It's possible to catch only one page for time, and for each page at most 100 issues
# so we should consider at least 10 pages to have a good dataset
cont = 1
to_return = dict()
while True:
issues = session.get(
'https://api.github.com/repos/' + self._repository_name + '/issues',
headers={'content-type': 'application/vnd.github.v3+json'},
params={'page': cont, 'per_page': 100, 'state': 'all'})
if issues.status_code != 200:
raise ApiError(issues.status_code)
else:
if len(issues.json()) == 0:
break
for x in issues.json():
to_return[x['number']] = x['comments_url']
cont += 1
return to_return
def get_contributors_for_issue(self, comments_urls):
to_return = dict()
self.get_developers()
cont = 0
for k, v in comments_urls.items():
cont += 1
to_return[k] = dict()
comments = session.get(v,
headers={'content-type': 'application/vnd.github.v3+json'})
if comments.status_code != 200:
raise ApiError(comments.status_code)
else:
for item in comments.json():
if item['author_association'] == "CONTRIBUTOR":
# Check if the developer is included into the contributors list
if item['user']['login'] not in self._developers_list:
continue
if item['user']['login'] in to_return[k]:
to_return[k][item['user']['login']] = to_return[k][item['user']['login']] + 1
else:
to_return[k][item['user']['login']] = 1
real_to_retrun = dict()
for k, v in to_return.items():
real_to_retrun[k] = dict()
for keydev, number in v.items():
real_to_retrun[k][self._developers_list[keydev]] = number
return real_to_retrun
class ApiError(Exception):
"""An API Error Exception"""
def __init__(self, status):
self.status = status
def __str__(self):
return "APIError: status={}".format(self.status)
| true |
d57b0955336a07a87bbb4a41e038289a642f81d8 | Python | fabriciosilmeida/machineLearning | /naive_bayes.py | UTF-8 | 669 | 3.640625 | 4 | [] | no_license | #Codigo de machine learning
from sklearn.naive_bayes import GaussianNB
#Medidor de precisão
from sklearn.metrics import accuracy_score
def naiveBayes(X_train, Y_train, X_test, Y_test, predict):
#Iniciando classificador
clf = GaussianNB()
#Treinando modelo
clf.fit(X_train, Y_train)
#Precisão
clfs = clf.predict(X_test)
acuracy = int(accuracy_score(Y_test, clfs)*100)
#Previsão
predict = clf.predict([predict])
#Mostrando resultado da predição
print("")
print("Algoritimo: Naive Bayes")
print("Precisão do treinamento: {}%".format(acuracy))
print("Previsão: {} \n".format(predict)) | true |
d46bbdc44e85b05ea952daf9ffdf8541b4d12f93 | Python | muhammads97/CNN_codelab_tutorial_mnist | /neural_network/init.py | UTF-8 | 961 | 2.5625 | 3 | [] | no_license | import tensorflow as tf
import math
def get_placeholders(input_shape, output_shape, lr_decay = False, dropout = False):
x = tf.placeholder(tf.float32, input_shape)
Y = tf.placeholder(tf.float32, output_shape)
if lr_decay and dropout:
step = tf.placeholder(tf.float32)
keep = tf.placeholder(tf.float32)
return x, Y, step, keep
elif lr_decay:
step = tf.placeholder(tf.float32)
return x, Y, step
elif dropout:
keep = tf.placeholder(tf.float32)
return x, Y, keep
else:
return x, Y, step, keep
def get_Wb(shape, random_type = "normal"):
if random_type == "normal":
w = tf.Variable(tf.truncated_normal(shape, stddev = 0.1))
b = tf.Variable(tf.ones([shape[-1]])/10)
return w, b
def get_lr(start, step = False):
if not step:
return start
else:
return 0.0001 + tf.train.exponential_decay(start, step, 2000, 1/math.e) | true |
f1e6cb4424e4409b4cfe274ba82cc2cb2200379c | Python | clockworkengineer/Constrictor | /FPE/plugins/fileannouncer_handler.py | UTF-8 | 1,121 | 2.75 | 3 | [
"MIT"
] | permissive | """Simple file announcer example plugin handler.
"""
import pathlib
import logging
from core.constants import CONFIG_SOURCE, CONFIG_NAME
from core.interface.ihandler import IHandler
from core.config import ConfigDict
from core.factory import Factory
from core.handler import Handler
class FileAnnouncerHandler(IHandler):
"""File Announcer
"""
def __init__(self, handler_config: ConfigDict) -> None:
"""Copy handler config and setup source directory.
"""
self.handler_config = handler_config.copy()
Handler.setup_path(self.handler_config, CONFIG_SOURCE)
def process(self, source_path: pathlib.Path) -> bool:
"""Print out name of any file copied into watch folder.
"""
try:
logging.info("File %s.", source_path)
return True
except OSError as error:
logging.error("Error in handler %s : %s",
self.handler_config[CONFIG_NAME], error)
def register() -> None:
"""Register plugin as a watcher handler.
"""
Factory.register("FileAnnouncer", FileAnnouncerHandler)
| true |
013ef1e923e910b1edf6d4a32822f61278ee6a42 | Python | huli/disaster-response-pipeline | /models/test_train_classifier.py | UTF-8 | 363 | 2.796875 | 3 | [
"MIT"
] | permissive | import pytest
from train_classifier import tokenize
def test_tokenize_sentence():
assert tokenize('Hi, this is my first tweet') == ['first', 'tweet']
def test_tokenize_remove_special_chars():
assert tokenize('This is absolute %&ç*%"*%ç') == ['this', 'absolute']
def test_tokenize_remove_stopwords():
assert tokenize('those am is') == [] | true |
ca582ed733e609dae7a45cb927cc78240aad7de3 | Python | bennettgberg/jet_finding_integerSW | /coe_to_hex.py | UTF-8 | 1,429 | 2.921875 | 3 | [] | no_license |
import math
#This script will convert a coe file (in binary format) to a usable hexadecimal file for input to C++ emulation.
nfibers = 12 #how many fibers have data in the coe file
nphi = 27 #how many total phi sectors there are (zero out unused ones)
wordlength = 100
tpe = 24 #tracks per event (for each fiber)
nevents = 161 #how many events to make inputs for
coe = open("vcu118_input_patterns.coe", "r")
#read info lines at top of file
for i in range(10):
coe.readline()
for event in range(nevents):
for track in range(tpe):
all_tracks = coe.readline()
for phi in range(nphi):
if phi < nfibers:
data = ""
for i in range(phi*wordlength, (phi+1)*wordlength, 4):
hexnum = hex(int(all_tracks[i:i+4], 2))
#print "hexnum: " + str(hexnum)
data = data + str(hexnum)[2]
else:
zlist = ['0' for i in range(wordlength)]
#print str(zlist)
data = "".join(zlist)
fname = "phi" + str(phi) + ".dat"
if event == 0 and track == 0:
let = 'w'
else:
let = 'a' #if not first time opening file, append to it (don't overwrite)
phifile = open(fname, let)
phifile.write("0x" + data + "\n")
phifile.close()
#now write 0s to signify end of event.
for phi in range(nphi):
fname = "phi" + str(phi) + ".dat"
phifile = open(fname, 'a')
phifile.write("0x" + "".join(['0' for i in range(int(math.ceil(wordlength/4)))]) + "\n")
print "Data written successfully"
coe.close()
| true |
cfe22926250eb27761226d7a41de196f8380d633 | Python | syfantid/CC-Labs-Solutions | /Lab-2/WordCountTensorFlow_2.py | UTF-8 | 464 | 3.09375 | 3 | [] | no_license | import nltk
nltk.download('punkt')
import re
from collections import Counter
def get_tokens_no_punct():
with open('FirstContactWithTensorFlow.txt', 'r') as tf:
text = tf.read()
lowers = text.lower()
no_punctuation = re.sub(r'[^\w\s]', '', lowers)
tokens = nltk.word_tokenize(no_punctuation)
return tokens
tokens = get_tokens_no_punct()
count = Counter(tokens)
print (count.most_common(10))
print(sum(count.values())) | true |
01565a24557cc9732211d08f210c641b8eeb861b | Python | nansencenter/DAPPER | /examples/basic_1.py | UTF-8 | 3,465 | 3 | 3 | [
"MIT"
] | permissive | # ## Illustrate usage of DAPPER to (interactively) run a synthetic ("twin") experiment.
# #### Imports
# <b>NB:</b> On <mark><b>Gooble Colab</b></mark>,
# *replace* `%matplotlib notebook` (right below) by\
# `!python -m pip install git+https://github.com/nansencenter/DAPPER.git`
# %matplotlib notebook
from mpl_tools import is_notebook_or_qt as nb
import dapper as dpr
import dapper.da_methods as da
# #### Load experiment setup: the hidden Markov model (HMM)
from dapper.mods.Lorenz63.sakov2012 import HMM
# #### Generate the same random numbers each time this script is run
dpr.set_seed(3000)
# #### Simulate synthetic truth (xx) and noisy obs (yy)
HMM.tseq.T = 30 # shorten experiment
xx, yy = HMM.simulate()
# #### Specify a DA method configuration ("xp" is short for "experiment")
# xp = da.OptInterp()
# xp = da.Var3D()
# xp = da.ExtKF(infl=90)
xp = da.EnKF('Sqrt', N=10, infl=1.02, rot=True)
# xp = da.PartFilt(N=100, reg=2.4, NER=0.3)
# #### Assimilate yy, knowing the HMM; xx is used to assess the performance
xp.assimilate(HMM, xx, yy, liveplots=not nb)
# #### Average the time series of various statistics
# print(xp.stats) # ⇒ long printout
xp.stats.average_in_time()
# #### Print some of these time-averages
# print(xp.avrgs) # ⇒ long printout
print(xp.avrgs.tabulate(['rmse.a', 'rmv.a']))
# #### Replay liveplotters
xp.stats.replay(
# speed=.6 # `speed` does not work in notebooks
)
# #### Further diagnostic plots
if nb:
import dapper.tools.viz as viz
viz.plot_rank_histogram(xp.stats)
viz.plot_err_components(xp.stats)
viz.plot_hovmoller(xx)
# #### Explore objects
if nb:
print(xp)
if nb:
print(HMM)
# #### Excercise: Why are the replay plots not as smooth as the liveplot?
# *Hint*: provide the keyword `store_u=True` to `assimilate()` to avoid this.
# #### Excercise: Why does the replay only contain the blue lines?
# #### Excercise: Try out each of the above DA methods (currently commented out).
# Next, remove the call to `replay`, and set `liveplots=False` above.
# Now, use the iterative EnKS (`iEnKS`), and try to find a parameter combination
# for it so that you achieve a lower `rmse.a` than with the `PartFilt`.
#
# *Hint*: In general, there is no free lunch. Similarly, not all methods work
# for all problems; additionally, methods often have parameters that require
# tuning. Luckily, in DAPPER, you should be able to find suitably tuned
# configuration settings for various DA methods *in the files that define the
# HMM*. If you do not find a suggested configuration for a given method, you
# will have to tune it yourself. The example script `basic_2` shows how DAPPER
# facilitates the tuning process, and `basic_3` takes this further.
# #### Excercise: Run an experiment for each of these models
# - LotkaVolterra
# - Lorenz96
# - LA
# - QG
# #### Excercise: Printing other diagnostics.
# - Create a new code cell, and copy-paste the above `print(...tabulate)`
# command into it. Then, replace `rmse` by `err.rms`. This should yield
# the same printout, as is merely an abbreviation of the latter.
# - Next, figure out how to print the time average *forecast (i.e. prior)* error
# (and `rmv`) instead. Explain (in broad terms) why the values are larger than
# for the *analysis* values.
# - Finally, instead of the `rms` spatial/field averages,
# print the regular mean (`.m`) averages. Explain why `err.m` is nearly zero,
# in contrast to `err.rms`.
| true |
ae12a77f13d323d60483dbe48e2b1b6e03fec925 | Python | philpot/chester | /chessboard.py | UTF-8 | 2,415 | 3.203125 | 3 | [] | no_license | #!/usr/bin/env python
from __future__ import print_function
from collections import MutableSequence
def fval(f):
return {'a': 0,
'b': 1,
'c': 2,
'd': 3,
'e': 4,
'f': 5,
'g': 6,
'h': 7}[f.lower()]
def canon_indices(indices):
(f, r) = indices
try:
x = f - 1
except:
x = fval(f)
y = r - 1
if 1 <= x <= 8 and 1 <= y <= 8:
return (x, y)
else:
raise ValueError
def impl_y(r):
y = r - 1
if 0 <= y <= 7:
return y
else:
raise ValueError
def impl_x(f):
try:
x = f - 1
except:
x = fval(f)
if 0 <= x <= 7:
return x
else:
raise ValueError
class Chessboard(MutableSequence):
def __init__(self):
self.b = [[ 64, 32, 48, 80, 82, 50, 34, 66],
[ 16, 18, 20, 22, 24, 26, 28, 30],
[ 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0],
[144, 146, 148, 150, 152, 154, 156, 158],
[192, 160, 176, 208, 224, 178, 162, 194]]
# pw 0 001 000 0 .. 0 001 111 0 16, 18, 20, 22, 24, 26, 28, 30
# pb 1 001 000 0 .. 1 001 111 0 144, 146, 148, 150, 152, 154, 156, 158
# Nw 0 010 000 0 .. 0 010 001 0 32, 34
# Nb 1 010 000 0 .. 1 010 001 0 160, 162
# Bw 0 011 000 0 .. 0 011 001 0 48, 50
# Bb 1 011 000 0 .. 1 011 001 0 176, 178
# Rw 0 100 000 0 .. 0 100 001 0 64, 66
# Rb 1 100 000 0 .. 1 100 001 0 192, 194
# Qw 0 101 000 0 80 (.. 82, ...)
# Qb 1 101 000 0 208 (.. 210, ...)
# Kw 0 110 000 0 96
# Kb 1 110 000 0 224
super(Chessboard, self).__init__()
def __getitem__(self, indices):
(x, y) = canon_indices(indices)
return self.b[y][x]
def __delitem__(self, i):
pass
def __setitem__(self, indices, val):
(x, y) = canon_indices(indices)
self.b[y][x] = val
def insert(self, i):
pass
def __len__(self):
return len(self.b)
def rank(self, r):
return self.b[impl_y(r)]
def file(self, f):
x = impl_x(f)
return [row[x] for row in self.b]
| true |
5b97d7692746b1d226213e68ccb90a2bcddc7525 | Python | sweetMegan/ItemCFDemo | /python版/用户协同过滤改进.py | UTF-8 | 3,978 | 2.75 | 3 | [] | no_license | # encoding: utf-8
import math
import operator
def UserSimilarity(train):
item_Users = dict()
for u, items in train.items():
for i in items:
# 构建倒排表item_Users
if i not in item_Users.keys():
item_Users[i] = set()
item_Users[i].add(u)
print item_Users #{'a': set(['A', 'B']), 'c': set(['B', 'D']), 'b': set(['A', 'C']), 'e': set(['C', 'D']), 'd': set(['A', 'D'])}
# 构建矩阵
C = dict()
N = dict()
for i, users in item_Users.items():
for user in users:
# 记录user浏览过的物品数量
if N. has_key(user) == False:
N[user] = 0
N[user] += 1 #N["A"] = 3, N["B"] = 2, N["C"] = 2, N["D"] = 3
# 构建矩阵
for v in users:
#
# 1.C["A"] = {
#
# }
#
if C. has_key(user) == False:
C[user] = dict()
# 2.C["A"]["A"] = 0
if C[user]. has_key(v) == False:
C[user][v] = 0
if user == v:
# C["A"]["A"] = 0
continue
# C["A"]["B"] = 1
# 惩罚用户user和用户v共同兴趣列表中热门物品对他们相似度的影响
C[user][v] += 1 / math.log(1+len(users))
print C
# 相似度矩阵
# C[u][v]
# u
# v "A" "B" "C" "D"
# "A" value
# "B"
# "C"
# "D"
W = dict()
for u, related_users in C.items():
for v, value in related_users.items():
if W. has_key(u) == False:
W[u] = dict()
if W[u]. has_key(v) == False:
W[u][v] = 0
#
W[u][v] = value/math.sqrt(N[u]*N[v])
return W
train = {"A": ["a", "b", "d"], "B": ["a", "c"], "C": ["b", "e"], "D": ["c", "d", "e"]}
w = UserSimilarity(train)
a = sorted(w["A"].items(), key=operator.itemgetter(1), reverse=True)[0:10]
print a
print "==========Recommend============="
# 推荐物品
def Recommend(user,train,W,K):
rank = dict()
interacted_items = train[user]
for v, wuv in sorted(w[user].items(), key=operator.itemgetter(1), reverse=True)[0:K]:
print v, wuv
for i in train[v]:
if i in interacted_items:
continue
# rvi代表用户v对物品IDE兴趣,因为使用的是单一行为的隐反馈数据,所以所有的rvi=1
# 如果是显示反馈数据,如点赞 需要获取rvi的值再与wuv相乘后的结果
rvi = 1
if rank. has_key(i) == False:
rank[i] = 0
rank[i] += wuv * 1
# for i, rvi in train[v].items():
# if i in interacted_items:
# rank[i] += wuv * rvi
# 将结果降序排序
res = sorted(rank.items(), key=operator.itemgetter(1), reverse=True)[0:10]
return res
r = Recommend("A", train, w, 10)
print r
r = Recommend("B", train, w, 10)
print r
# /usr/local/Cellar/python@2/2.7.15_1/Frameworks/Python.framework/Versions/2.7/bin/python2.7 /Users/zhqmac/PycharmProjects/untitled3/用户协同过滤改进.py
# {'a': set(['A', 'B']), 'c': set(['B', 'D']), 'b': set(['A', 'C']), 'e': set(['C', 'D']), 'd': set(['A', 'D'])}
# {'A': {'A': 0, 'C': 0.9102392266268373, 'B': 0.9102392266268373, 'D': 0.9102392266268373}, 'C': {'A': 0.9102392266268373, 'C': 0, 'D': 0.9102392266268373}, 'B': {'A': 0.9102392266268373, 'B': 0, 'D': 0.9102392266268373}, 'D': {'A': 0.9102392266268373, 'C': 0.9102392266268373, 'B': 0.9102392266268373, 'D': 0}}
# ==========Recommend=============
# C 0.371603608184
# B 0.371603608184
# D 0.303413075542
# A 0.0
# [('c', 0.6750166837258342), ('e', 0.6750166837258342)]
# A 0.371603608184
# D 0.371603608184
# B 0.0
# [('d', 0.7432072163671103), ('b', 0.37160360818355515), ('e', 0.37160360818355515)]
| true |
64d4a261f685e75b699825dea4c373a471a62998 | Python | stchinn/astr-119 | /astr-119-hw-2/data_types.py | UTF-8 | 252 | 3.328125 | 3 | [
"MIT"
] | permissive | import numpy as np
i = 10
print(type(i))
a_i = np.zeros(i, dtype=int)
print(type(a_i))
print(type(a_i[0]))
#floats
x = 119.0
print(type(x))
y = 1.19e2
print(type(y))
z = np.zeros(i,dtype=float)
print(type(z))
print(type(z[0])) | true |
3b689be695f64c918996a9f6f3e783ad8b466076 | Python | sdss/autoscheduler | /python/web/lib/helpers.py | UTF-8 | 7,852 | 2.546875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""Helpers used in platewebapp2."""
from flask import current_app
import datetime
from petunia.lib import app_globals
import numpy as np
from sdss.utilities import convert
#from webhelpers import date, feedgenerator, html, number, misc, text
def filterPlates(plateList, evening_twilight, morning_twilight, nowDatetime, maxNumber=30, plugged=False):
""" sdfjsakdf
"""
# Empty list to fill with PlatePointing objects
platePointings = list()
for plate in plateList:
for platePointing in plate.plate_pointings:
if len(platePointings) >= maxNumber: return platePointings
# PlatePointing.times returns a dictionary of times for the object:
# key | value
# ---------------------
# nominal | nominal observing time for the plate, UTC
# min | minimum acceptable observing time for the plate, UTC
# max | maximum acceptable observing time for the plate, UTC
# see platedb ModelClasses for more info on this function
platePointing_times = platePointing.times(nowDatetime)
platePointing_times['begintime'] = convert.datetime2decHour(platePointing_times['min'])
try:
plateTileStatus = plate.tile.calculatedTileStatus()
except: # Plate has no associated tile, or MARVELS / APOGEE
plateTileStatus = "???"
# Display window is from 0 to 13 UTC
# If both min and max times are outside this window, continue
# Later code will handle min>max, e.g. when plate starts before APOJD rollover
mintime = convert.datetime2decHour(platePointing_times['min'])
maxtime = convert.datetime2decHour(platePointing_times['max'])
current_app.logger.debug("ID, min, max | %d | %.3f | %.3f | %.3f | %.3f" % (plate.plate_id, mintime, maxtime, evening_twilight, morning_twilight))
# If outside window, go to next plate pointing for the plate
if mintime > 13 and maxtime > 13:
continue
# If tile is complete, this plate is done
elif "Complete" in plateTileStatus:
break
if not plugged:
# Don't display plates which aren't visible for very long
if maxtime - evening_twilight < 1.0: #- units: hours
current_app.logger.debug("WARNING: Drop plate %d with short evening visibility: %.f min" % (plate.plate_id, 60*(maxtime-evening_twilight)))
if maxtime - evening_twilight > 0.5: #- units: hours
current_app.logger.debug("plate %d observable for %.f minutes" % (plate.plate_id, 60*(maxtime-evening_twilight)))
break
elif maxtime > morning_twilight and morning_twilight - mintime < 1.0:
current_app.logger.debug("WARNING: Drop plate %d with short morning visibility: %.f min" % (plate.plate_id, 60*(morning_twilight-mintime)))
break
platePointings.append(platePointing)
return platePointings
def platePointingObservabilityColors(platePointing, evening_twilight, morning_twilight, nowDatetime, hourTicker=False):
plate = platePointing.plate
platePointing_times = platePointing.times(nowDatetime)
# For each hour on the visibility plot, the following routine figures out what color
# to display in the square as follows for any given half-hour:
# - If the pointing is within 3 degrees of Zenith, color red
# - If the pointing is between 5 degrees and 3 degrees of Zenith, color yellow
# - If the plate isn't visible, color white
# - If none of those conditions are met, color it based on what survey it is
bgcolors = dict()
for dec_hr in np.arange(0.0,14.0,app_globals.app_globals.bin_size):
hour = np.floor(dec_hr)
mins = (dec_hr - hour) * 60.0
#- Fill in default background colors
if "Complete" in plate.calculatedCompletionStatus():
bgcolors[dec_hr] = app_globals.app_globals.page_colors["completed_plugged_bg"]
else:
bgcolors[dec_hr] = "#FFFFFF"
hrDatetime = datetime.datetime.combine(nowDatetime.date(),datetime.time(int(hour),int(mins),0))
# Calculate the altitude of the pointing at the hour
pntg_alt, pntg_az = convert.raDec2AltAz(float(platePointing.pointing.center_ra), float(platePointing.pointing.center_dec), app_globals.app_globals.apo_lat, app_globals.app_globals.apo_lon, hrDatetime)
# If the minimum time is greater than the maximum time, i.e. the observing time wraps around a day,
# do something special to determine what cells to color in
if convert.datetime2decHour(platePointing_times['min']) > convert.datetime2decHour(platePointing_times['max']):
if ((24.0 - convert.datetime2decHour(platePointing_times['nominal'])) < convert.datetime2decHour(platePointing_times['nominal']) and \
dec_hr > convert.datetime2decHour(platePointing_times['min'])) or\
dec_hr <= convert.datetime2decHour(platePointing_times['max']):
bgcolors[dec_hr] = app_globals.app_globals.survey_colors[plate.surveys[0].label]
if pntg_alt > 85.0 and pntg_alt < 87.0:
bgcolors[dec_hr] = app_globals.app_globals.page_colors["zenith_watch"]
elif pntg_alt > 87.0:
bgcolors[dec_hr] = app_globals.app_globals.page_colors["zenith_warning"]
# If the min, or the max lie within this half hour and the next half hour, or if this half hour
# is between the two, color the cell the survey color
elif convert.datetime2decHour(platePointing_times['min']) >= dec_hr and convert.datetime2decHour(platePointing_times['min']) < dec_hr+app_globals.app_globals.bin_size or \
dec_hr > convert.datetime2decHour(platePointing_times['min']) and dec_hr < convert.datetime2decHour(platePointing_times['max']) or \
convert.datetime2decHour(platePointing_times['max']) >= dec_hr and convert.datetime2decHour(platePointing_times['max']) < dec_hr+app_globals.app_globals.bin_size :
bgcolors[dec_hr] = app_globals.app_globals.survey_colors[plate.surveys[0].label]
# If the plate is near Zenith, color it yellow or red depending on
# how close it is
if pntg_alt > 85.0 and pntg_alt < 87.0:
bgcolors[dec_hr] = app_globals.app_globals.page_colors["zenith_watch"]
elif pntg_alt > 87.0:
bgcolors[dec_hr] = app_globals.app_globals.page_colors["zenith_warning"]
if evening_twilight > dec_hr+app_globals.app_globals.bin_size and (bgcolors[dec_hr] == "#FFFFFF" or bgcolors[dec_hr] == app_globals.app_globals.page_colors["plugged"]):
bgcolors[dec_hr] = app_globals.app_globals.page_colors["twilight"]
if morning_twilight < dec_hr+app_globals.app_globals.bin_size and (bgcolors[dec_hr] == "#FFFFFF" or bgcolors[dec_hr] == app_globals.app_globals.page_colors["plugged"]):
bgcolors[dec_hr] = app_globals.app_globals.page_colors["twilight"]
if hourTicker:
current_dec_hr = nowDatetime.hour + nowDatetime.minute/60.0
if current_dec_hr >= dec_hr and current_dec_hr < dec_hr+app_globals.app_globals.bin_size and bgcolors[dec_hr] not in app_globals.app_globals.survey_colors.values():
bgcolors[dec_hr] = app_globals.app_globals.page_colors["current_time"]
return bgcolors
| true |
2aeb911ab3ead5a510db5ae3cf38445742617487 | Python | jaytlang/networkedlasers | /app/laser.py | UTF-8 | 8,304 | 2.84375 | 3 | [
"BSD-3-Clause-Clear",
"BSD-2-Clause"
] | permissive | # Laser Projector Preprocessor
#
# Used to turn arbitary images and video into laser trajectories, that are either sent over the network to a
# FPGA or stored locally as a CSV, PNG, or COE file.
#
# fischerm@mit.edu, Fall 2020
from sys import argv
import cv2
import numpy as np
import trajectory_planner as tp
import os
import socket
cv2.setUseOptimized(True) # no idea if this does anything but hahaa openCV go brrrrr
# parse input options
if len(argv) == 1:
raise SystemExit("No options specified. Try 'laser.py -h' for more information.")
if '-h' in argv:
print("""Usage: python3 laser.py [OPTION]
-i input source (required). path to file if source is a file, or number if webcam.
-o output destination. path to directory, but use -n for streaming to projector.
-t output file type. options include any combination of 'png' or 'csv' or 'coe'.
-n network address of the laser, if it's output is desired.
-q quiet mode. will not show the rendered frame.
examples:
python3 app/laser.py -i input.jpg -o output/ -n f0:0d:be:ef:ba:be
python3 app/laser.py -i 0 -n f0:0d:be:ef:ba:be""")
exit()
if '-i' not in argv:
raise SystemExit("No input file specified, specify one with -i or run 'laser.py -h' for more information.")
if '-t' in argv and '-o' not in argv:
raise SystemExit("Output type specified, but no output directory specified. Specify one with -o or run 'laser.py -h' for more information.")
# Set capture to whatever was passed in with the -i option
input_filename = argv[argv.index('-i') + 1]
try:
source = int(input_filename)
except:
source = input_filename
cap = cv2.VideoCapture(source)
if (cap.isOpened() == False):
raise SystemExit(f"Error opening input file {input_filename}")
# Set output directory to whatever was passed in with the -o option
output_directory = argv[argv.index('-o') + 1] if '-o' in argv else None
# Set output type to whatever was passed in with the -t option
output_types = []
if '-t' in argv:
for arg in argv[argv.index('-t')+1:]:
if len(arg) != 2 and '-' not in arg:
output_types.append(arg)
else:
break
# Create network interface file descriptor if the -n parameter is specified
if '-n' in argv:
fd = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Resize image to desired x and y resolution. Used to decrease the number of points in the
# image and to maintain aspect ratio
def prep_frame(frame, desired_x_resolution, desired_y_resolution):
x_resolution = frame.shape[0]
y_resolution = frame.shape[1]
x_scale_factor = desired_x_resolution/x_resolution
y_scale_factor = desired_y_resolution/y_resolution
if x_scale_factor > y_scale_factor:
return cv2.resize(frame, None, fx=y_scale_factor, fy=y_scale_factor)
else:
return cv2.resize(frame, None, fx=x_scale_factor, fy=x_scale_factor)
# Export image to directory path as n.png, where n is the number of .png files in directory plus one
def save_png(path, image):
files = [i for i in os.listdir(path) if '.png' in i]
filename = f'{path}/{len(files)}.png'
cv2.imwrite(filename, image)
# Export image to directory path as n.csv, where n is the number of .csv files in directory plus one
def save_csv(path, trajectory):
import pandas as pd
files = [i for i in os.listdir(path) if '.csv' in i]
filename = f'{path}/{len(files)}.csv'
pd.DataFrame(trajectory.astype(int)).to_csv(filename, header=False)
# Export image to directory path as n.coe, where n is the number of .coe files in directory plus one
def save_coe(path, trajectory):
files = [i for i in os.listdir(path) if '.coe' in i]
filename = f'{path}/{len(files)}.coe'
output_lines = ['memory_initialization_radix=16;\n','memory_initialization_vector=\n']
input_lines = trajectory.tolist()
zero_pad = lambda input_str, length: '0'*(length - len(input_str)) + input_str
for input_line_number, input_line in enumerate(input_lines):
x, y, r, g, b = [format(int(i), 'x') for i in input_line]
if input_line_number == len(input_lines) - 1:
output_lines.append( zero_pad(x, 4) + zero_pad(y, 4) + zero_pad(r, 2) + zero_pad(g,2) + zero_pad(b,2) + ';')
else:
output_lines.append( zero_pad(x, 4) + zero_pad(y, 4) + zero_pad(r, 2) + zero_pad(g,2) + zero_pad(b,2) + ',\n')
with open(filename, 'w') as output_file:
output_file.writelines(output_lines)
def save_traj(path, trajectory):
files = [i for i in os.listdir(path) if '.traj' in i]
filename = f'{path}/{len(files)}.traj'
output_lines = []
input_lines = trajectory.tolist()
for input_line_number, input_line in enumerate(input_lines):
x, y, r, g, b = [int(i) for i in input_line]
control = '02' if input_line_number == len(input_lines) - 1 else '01'
x = format(65535 - (x*128), 'x')
y = format(65535 - (y*128), 'x') # mirror y because galvos are oriented wierdly
r = format(r, 'x')
g = format(g, 'x')
b = format(b, 'x')
output_lines.append(control + zero_pad(x, 4) + zero_pad(y, 4) + zero_pad(r, 2) + zero_pad(g,2) + zero_pad(b,2) + '\n')
with open(filename, 'w') as output_file:
output_file.writelines(output_lines)
def send_trajectory_udp(fd, trajectory, destination_ip='142.79.194.65', port_number=42069):
input_lines = trajectory.tolist()
packet_list = []
# Packet format is as follows:
# Control (1 byte) - either 0x01 for adding to framebuffer, or 0x02 to swap framebuffers. Other values are invalid.
# x (2 bytes)
# y (2 bytes)
# r (1 byte)
# g (1 byte)
# b (1 byte)
# Total: 8 bytes
for input_line_number, input_line in enumerate(input_lines):
x, y, r, g, b = [int(i) for i in input_line]
control = '02' if input_line_number == len(input_lines) - 1 else '01'
x = format(65535 - (x*128), 'x')
y = format(65535 - (y*128), 'x') # mirror y because galvos are oriented wierdly
r = format(r, 'x')
g = format(g, 'x')
b = format(b, 'x')
zero_pad = lambda input_str, length: '0'*(length - len(input_str)) + input_str
data = control + zero_pad(x, 4) + zero_pad(y, 4) + zero_pad(r, 2) + zero_pad(g,2) + zero_pad(b,2)
fd.sendto(bytes.fromhex(data), (destination_ip, port_number))
while(cap.isOpened()):
ret, frame = cap.read()
if ret == True:
# Rescale frame to 512x512
frame = prep_frame(frame, 512, 512)
# Canny filtering
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Convert image to grayscale
gray_filtered = cv2.bilateralFilter(gray, 7, 50, 50) # Smooth without removing edges
edges = cv2.Canny(gray, 30, 120) # Apply canny filter
edges_filtered = cv2.Canny(gray_filtered, 60, 120)
# Trajectory Planning
blur = cv2.blur(frame,(5,5))
trajectory, degeneracies = tp.calculate_trajectory(edges_filtered)
colorized_trajectory = tp.colorize_trajectory(blur, trajectory)
rendered_trajectory = tp.draw_trajectory(np.zeros_like(frame), colorized_trajectory)
# Stack images to display together for comparison
edges_filtered_colored = cv2.cvtColor(edges_filtered, cv2.COLOR_GRAY2BGR)
images = np.hstack((frame, edges_filtered_colored, rendered_trajectory))
# Save the frame if option specified in argv
if 'png' in output_types:
save_png(output_directory, rendered_trajectory)
if 'csv' in output_types:
save_csv(output_directory, colorized_trajectory)
if 'coe' in output_types:
save_coe(output_directory, colorized_trajectory)
if 'traj' in output_types:
save_traj(output_directory, colorized_trajectory)
# Write frame over the network, if the option is specified
if '-n' in argv:
send_trajectory_udp(fd, colorized_trajectory)
# Display the resulting frame if -q not in options
if '-q' not in argv:
cv2.imshow('Frame', images)
if cv2.waitKey(25) & 0xFF == ord('q'): # Press Q to exit
break
else:
break
cap.release()
cv2.destroyAllWindows() | true |
6d8b7a4d22dd6d32cc80f7943dfb6c7530924c2f | Python | kartiks22/Minesweeper | /easy.py | UTF-8 | 10,159 | 2.765625 | 3 | [] | no_license | from Tkinter import *
import tkMessageBox
import random
#This is the main class
class Easy:
def __init__(self,master):
#Takes image files from the folder
self.tile_plain=PhotoImage(file="tile_plain.gif")
self.tile_clicked=PhotoImage(file="tile_clicked.gif")
self.tile_mine=PhotoImage(file="tile_mine.gif")
self.tile_flag=PhotoImage(file="tile_flag.gif")
self.tile_wrong=PhotoImage(file="tile_wrong.gif")
self.tile_no=[]
for x in range(1,9):
self.tile_no.append(PhotoImage(file="tile_"+str(x)+".gif"))
#creates a frame
self.frame=Frame(master)
self.frame.grid()
self.Back_button=Button(self.frame,text="< back",command=self.Back)
self.Back_button.grid(row=0,column=0,columnspan=3)
self.Restart_button=Button(self.frame,text="restart",command=self.Res1)
self.Restart_button.grid(row=0,column=3,columnspan=3)
self.Quit_button=Button(self.frame,text="Quit >",command=self.Quit1) #Displays Quit button.
self.Quit_button.grid(row=0,column=6,columnspan=3)
self.flags=0
self.correct_flags=0
self.clicked=0
self.buttons={}
self.mines=0
x_cord=1
y_cord=0
for x in range(81):
mine=0
image_p=self.tile_plain
if (random.uniform(0,1)<0.15): #computer chooses no.of mines randomly
mine=1
self.mines=self.mines+1
#0:button
#1:wether mine present(1:present,0:not present)
#2:state(0:unclicked,1:clicked;2:flagged)
#3:button number
#4:coordinates
#5:nearby mines
self.buttons[x]=[Button(self.frame,image=image_p),mine,0,x,[x_cord,y_cord],0]
self.buttons[x][0].bind("<Button-1>",self.lclick1(x))
self.buttons[x][0].bind("<Button-3>",self.rclick1(x))
y_cord=y_cord+1;
if(y_cord==9):
y_cord=0
x_cord=x_cord+1
for keys in self.buttons:
self.buttons[keys][0].grid(row=self.buttons[keys][4][0],column=self.buttons[keys][4][1])
#checks no.of mines near a tile
for keys in self.buttons:
x_crd=self.buttons[keys][4][0]
y_crd=self.buttons[keys][4][1]
self.near_mines=0 #Different for boundary tiles as no.of tiles surrounding it are different.
if(keys==0):
if (self.check_mines(keys+1)):
self.near_mines=self.near_mines+1
if (self.check_mines(keys+9)):
self.near_mines=self.near_mines+1
if (self.check_mines(keys+10)):
self.near_mines=self.near_mines+1
self.buttons[keys][5]=self.near_mines
elif(keys==8):
if (self.check_mines(keys-1)):
self.near_mines=self.near_mines+1
if (self.check_mines(keys+9)):
self.near_mines=self.near_mines+1
if (self.check_mines(keys+8)):
self.near_mines=self.near_mines+1
self.buttons[keys][5]=self.near_mines
elif(keys==72):
if (self.check_mines(keys-9)):
self.near_mines=self.near_mines+1
if (self.check_mines(keys+1)):
self.near_mines=self.near_mines+1
if (self.check_mines(keys-8)):
self.near_mines=self.near_mines+1
self.buttons[keys][5]=self.near_mines
elif(keys==80):
if (self.check_mines(keys-1)):
self.near_mines=self.near_mines+1
if (self.check_mines(keys-9)):
self.near_mines=self.near_mines+1
if (self.check_mines(keys-8)):
self.near_mines=self.near_mines+1
self.buttons[keys][5]=self.near_mines
elif(x_crd==1):
if(self.check_mines(keys-1)):
self.near_mines=self.near_mines+1
if(self.check_mines(keys+1)):
self.near_mines=self.near_mines+1
if(self.check_mines(keys+10)):
self.near_mines=self.near_mines+1
if(self.check_mines(keys+9)):
self.near_mines=self.near_mines+1
if(self.check_mines(keys+8)):
self.near_mines=self.near_mines+1
self.buttons[keys][5]=self.near_mines
elif(y_crd==0):
if(self.check_mines(keys+1)):
self.near_mines=self.near_mines+1
if(self.check_mines(keys-9)):
self.near_mines=self.near_mines+1
if(self.check_mines(keys-8)):
self.near_mines=self.near_mines+1
if(self.check_mines(keys+9)):
self.near_mines=self.near_mines+1
if(self.check_mines(keys+10)):
self.near_mines=self.near_mines+1
self.buttons[keys][5]=self.near_mines
elif(x_crd==9):
if(self.check_mines(keys+1)):
self.near_mines=self.near_mines+1
if(self.check_mines(keys-1)):
self.near_mines=self.near_mines+1
if(self.check_mines(keys-10)):
self.near_mines=self.near_mines+1
if(self.check_mines(keys-9)):
self.near_mines=self.near_mines+1
if(self.check_mines(keys-8)):
self.near_mines=self.near_mines+1
self.buttons[keys][5]=self.near_mines
elif(y_crd==8):
if(self.check_mines(keys-1)):
self.near_mines=self.near_mines+1
if(self.check_mines(keys+9)):
self.near_mines=self.near_mines+1
if(self.check_mines(keys+8)):
self.near_mines=self.near_mines+1
if(self.check_mines(keys-9)):
self.near_mines=self.near_mines+1
if(self.check_mines(keys-10)):
self.near_mines=self.near_mines+1
self.buttons[keys][5]=self.near_mines
else:
if(self.check_mines(keys-1)):
self.near_mines=self.near_mines+1
if(self.check_mines(keys+1)):
self.near_mines=self.near_mines+1
if(self.check_mines(keys+8)):
self.near_mines=self.near_mines+1
if(self.check_mines(keys+9)):
self.near_mines=self.near_mines+1
if(self.check_mines(keys+10)):
self.near_mines=self.near_mines+1
if(self.check_mines(keys-10)):
self.near_mines=self.near_mines+1
if(self.check_mines(keys-9)):
self.near_mines=self.near_mines+1
if(self.check_mines(keys-8)):
self.near_mines=self.near_mines+1
self.buttons[keys][5]=self.near_mines
#creates two labels which shows the the no.of mines in the game and no.of flags you can still plant.
self.label2=Label(self.frame,text="Mines: "+str(self.mines))
self.label2.grid(row=10,column=0,columnspan=5)
self.label3=Label(self.frame,text="Flags: "+str(self.flags))
self.label3.grid(row=10,column=5,columnspan=5)
def Res1(self):
self.Restart1()
#after completion of one game, we use this function to forget the previous game and show the new game.
def Quit1(self):
self.Quit()
def Back(self):
self.frame.grid_forget()
self.__init__(self)
#after completion of one game, we use this function to forget the previous game and show the new game.
def forgetframe(self):
self.frame.grid_forget()
def check_mines(self,keys):
try:
if (self.buttons[keys][1]==1):
return True
else:
return False
except KeyError:
pass
def lclick1(self,keys):
return lambda Button: self.l_click1(self.buttons[keys])
def rclick1(self,keys):
return lambda Button: self.r_click1(self.buttons[keys])
#open the tile if we left click on it.
def l_click1(self,button):
if(button[1]==1):
for keys in self.buttons:
if (self.buttons[keys][1]!=1 and self.buttons[keys][2]==2):
self.buttons[keys][0].configure(image=self.tile_wrong)
if (self.buttons[keys][1]==1 and self.buttons[keys][2]!=2):
self.buttons[keys][0].configure(image=self.tile_mine)
self.end(1)
else:
if (button[5]==0):
button[0].configure(image=self.tile_clicked)
self.cln_emp_tile1(button[3])
else:
button[0].configure(image=self.tile_no[button[5]-1])
if (button[2]!=1):
button[2]=1
self.clicked=self.clicked+1
if (self.clicked==81-self.mines):
self.win(1)
#plants flag if once right clicked and removes it if clicked once again.
def r_click1(self,button):
if (button[2]==0):
button[0].configure(image=self.tile_flag)
button[2]=2
button[0].unbind("<Button-1>")
self.flags=self.flags+1
self.update_flag()
elif (button[2]==2):
button[0].configure(image=self.tile_plain)
button[2]=0
button[0].bind("<Button-1>",self.lclick1(button[3]))
self.flags=self.flags-1
self.update_flag()
#This function opens all the empty tiles near a tile.
def cln_emp_tile1(self,button_num):
button_list=[button_num]
while (len(button_list)!=0):
keys=button_list.pop()
x_crd=self.buttons[keys][4][0]
y_crd=self.buttons[keys][4][1]
if (keys==0):
self.check(keys+1,button_list)
self.check(keys+9,button_list)
self.check(keys+10,button_list)
elif(keys==8):
self.check(keys-1,button_list)
self.check(keys+9,button_list)
self.check(keys+8,button_list)
elif(keys==72):
self.check(keys-9,button_list)
self.check(keys-8,button_list)
self.check(keys+1,button_list)
elif(keys==80):
self.check(keys-10,button_list)
self.check(keys-9,button_list)
self.check(keys-1,button_list)
elif(x_crd==1):
self.check(keys-1,button_list)
self.check(keys+1,button_list)
self.check(keys+10,button_list)
self.check(keys+9,button_list)
self.check(keys+8,button_list)
elif(y_crd==0):
self.check(keys-9,button_list)
self.check(keys-8,button_list)
self.check(keys+1,button_list)
self.check(keys+9,button_list)
self.check(keys+10,button_list)
elif(x_crd==9):
self.check(keys-10,button_list)
self.check(keys-9,button_list)
self.check(keys-8,button_list)
self.check(keys-1,button_list)
self.check(keys+1,button_list)
elif(y_crd==8):
self.check(keys-10,button_list)
self.check(keys-9,button_list)
self.check(keys-1,button_list)
self.check(keys+8,button_list)
self.check(keys+9,button_list)
else:
self.check(keys-8,button_list)
self.check(keys-9,button_list)
self.check(keys-10,button_list)
self.check(keys-1,button_list)
self.check(keys+1,button_list)
self.check(keys+10,button_list)
self.check(keys+9,button_list)
self.check(keys+8,button_list)
def check(self,keys,button_list):
try:
if(self.buttons[keys][2]==0):
if(self.buttons[keys][5]==0):
self.buttons[keys][0].configure(image=self.tile_clicked)
button_list.append(keys)
else:
self.buttons[keys][0].configure(image=self.tile_no[self.buttons[keys][5]-1])
self.buttons[keys][2]=1
self.clicked=self.clicked+1
except KeyError:
pass
| true |
8000cc1e8bd8d420ed25f76c095930c53c0de93b | Python | AlexanderBackis/mg_analysis_notebook | /scripts/he3/read.py | UTF-8 | 4,414 | 2.984375 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
ImportHe3.py: Imports He3 data taken using the MCA4 Multichannel Analyzer
"""
import os
import struct
import shutil
import zipfile
import re
import numpy as np
import pandas as pd
# =============================================================================
# EXTRACT DATA
# =============================================================================
def extract_events(file_path):
""" Imports MCA4 data. This is ascii encoded hex and is in 64 bit "words".
Hex 1->4: Charge amplitude
Hex 5->15: Time
Hex 16: Channel and pile up
Args:
file_path (str): Path to '.mesytec'-file that contains the data
Returns: he3_df (DataFrame): DataFrame containing data
"""
# Masks
CHANNEL_MASK = 0x0000000000000003
PILE_UP_MASK = 0x000000000000000C
TIME_MASK = 0x0000FFFFFFFFFFF0
ADC_MASK = 0xFFFF000000000000
BREAK_MASK = 0xFFF0000000000000
# Bit shifts
CHANNEL_MASK = 0
TIME_SHIFT = 4
ADC_SHIFT = 48
PILE_UP_SHIFT = 2
# Import data
data = np.loadtxt(file_path, dtype='str', delimiter='\n')
start_idx = np.where(data == '[DATA]')[0][0]
size = len(data)
# Declare dictionary to store data
he3_dict = {'ch': np.empty([size], dtype=int),
'tof': np.empty([size], dtype=float),
'adc': np.empty([size], dtype=int),
'pile_up': np.empty([size], dtype=int)}
count = 0
# Extracts information from data
for i, row in enumerate(data[start_idx+1:]):
# Convert ASCII encoded HEX to int (shouldn't it be uint?)
word = int(row, 16)
# Check if we should save data
if (word & BREAK_MASK) != 0:
# Extract values using masks
he3_dict['ch'][count] = (word & CHANNEL_MASK)
he3_dict['tof'][count] = ((word & TIME_MASK) >> TIME_SHIFT) * 8e-9
he3_dict['adc'][count] = (word & ADC_MASK) >> ADC_SHIFT
he3_dict['pile_up'][count] = (word & PILE_UP_MASK) >> PILE_UP_SHIFT
count += 1
# Print progress of clustering process
if i % 1000 == 1:
percentage_finished = int(round((i/len(data))*100))
print('Percentage: %d' % percentage_finished)
# Only save the events, cut unused rows
for key in he3_dict:
he3_dict[key] = he3_dict[key][0:count]
he3_df = pd.DataFrame(he3_dict)
return he3_df
# =============================================================================
# SAVE DATA
# =============================================================================
def save_data(df, path):
"""
Saves clustered data to specified HDF5-path.
Args:
path (str): Path to HDF5 containing the saved clusters and events
df (DataFrame): Events
Yields:
Clustered data is saved to specified path.
"""
# Export to HDF5
df.to_hdf(path, 'df', complevel=9)
# =============================================================================
# LOAD DATA
# =============================================================================
def load_data(path):
"""
Loads clustered data from specified HDF5-path.
Args:
path (str): Path to HDF5 containing the saved clusters and events
Returns:
df (DataFrame): Events
"""
df = pd.read_hdf(path, 'df')
return df
# =============================================================================
# FILTER DATA
# =============================================================================
def filter_data(df, parameters):
"""
Filters clusters based on preferences set on GUI.
Args:
ce (DataFrame): Clustered events
parameters (dict): Dictionary containing information about which
parameters to filter on, and within which range.
Returns:
ce_red (DataFrame): DataFrame containing the reduced data according to
the specifications set on the GUI.
"""
df_red = df
for parameter, (min_val, max_val, filter_on) in parameters.items():
if filter_on:
df_red = df_red[(df_red[parameter] >= min_val) &
(df_red[parameter] <= max_val)]
return df_red
| true |
219096268d3a88342848dd1b850c078dfba8ac50 | Python | yuanguLeo/yuanguPython | /CodeDemo/TogetherNavigating/eight/greeter.py | UTF-8 | 4,907 | 4.28125 | 4 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/6/16 14:42
'''
需求8.1例题
'''
def greet_user():
'''显示问候语'''
print('hello, Python! ')
greet_user()
'''
需求8-1:display_message()的函数,打印一句话
'''
def display_message(username): #括号里面的是形参
'''练习def定义函数'''
print('\nhello, '+ username)
display_message('tom') #调用传递的是实参
'''
需求8-2:喜欢的图书
'''
def favorite_book(title):
print('\nOne of my favorite books is ' + title)
favorite_book('Alice in Wonderland')
'''
需求8-3:编写一个名为:make_shirt()的函数,接受一个尺码和T恤上要印的字样
'''
def make_shirt(size,pattern):
print('\n我需要'+size+'码的T恤,要印'+pattern+'图案')
make_shirt('ss','篮球')
make_shirt(size = 'xl',pattern = 'jondan')
'''
需求8-4:大号T恤,修改函数make_shirt(),在默认值情况下,印“I love python”
'''
def make_shirt(size,pattern='I lova python!'):
print('\n我需要'+size+'的T恤,要印'+pattern+'图案')
make_shirt('大号')
make_shirt('中号')
make_shirt('小号','start python')
'''
需求8-5:编写一个名为describe_city()的函数,接受一座城市的名字和该城市所以属国家
'''
def describe_city(city,countries='china'):
print('\n'+city+' is in '+countries)
describe_city('beijing')
describe_city('shanghai')
describe_city('yanqin','beijing')
'''
需求8-6:编写city_country()的函数,并返回
'''
def city_country(city,country):
return ('\n'+city+' , '+country)
#return {'city': city,'country':country}
massage = city_country('北京','中国')
print(massage)
'''
需求8-7:make_album()函数,创建一个描述音乐专辑的字典,接受歌手和专辑名,打印返回值
'''
def make_album(singer_name,album_name,singer_number=' '):
if singer_number:
return {'singer_name':singer_name,
'album_name':album_name,
'singer_number':singer_number}
else:
return {'singer_name':singer_name,'album_name':album_name}
a = make_album('许巍','无尽光芒')
print(a)
b = make_album('赵雷','无法长大','10首歌曲')
print(b)
'''
需求8-9:魔术师
'''
def show_magician(name_lists):
for name_list in name_lists:
print('\n需求8-9: '+ name_list +'hello!')
name_lists = ['西里尔·高山','刘谦','杰森·拉蒂']
show_magician(name_lists)
'''
需求8-10:编写一个make_great()的函数,在每个魔术师的名字中都加入字样“the Great”,
调用show_magician,确实列表变了
'''
def show_magician(name_lists):
for name_list in name_lists:
print('\n需求8-10: ' + name_list + 'hello')
def make_great(name_lists):
n = 0
while n < len(name_lists):
name_lists[n] = '\n需求8-10: the Great'+ name_lists[n]
n += 1
show_magician(name_lists)
name_lists = ['西里尔·高山','刘谦','杰森·拉蒂']
make_great(name_lists)
'''
需求8-11:
'''
def show_magician(name_lists,new_name_lists):
'''练习8-11'''
while name_lists:
name_list = name_lists.pop()
print('\n需求8-11: ' + name_list + ',hello!')
new_name_lists.append(name_list)
def make_greats(name_lists):
n = 0
while n < len(name_lists):
name_lists[n] = 'the Great'+ name_lists[n]
n += 1
show_magician(name_lists,new_name_lists)
name_lists = ['西里尔·高山','刘谦','杰森·拉蒂']
new_name_lists = []
show_magician(name_lists[:],new_name_lists)
make_greats(name_lists)
'''
需求8-12:编写一个函数,只有一个形参,收集所以食材,
'''
def sandwich(*foods):
for food in foods:
print('\n您的' + food +'三明治')
sandwich('Panini','Banh Mi','Montreal')
'''
需求8-13:将user_profile.py改为自己的信息
'''
#复制user_profile.py
def build_property(last,first,**user_info):
'''练习8-13'''
profile = {}
profile['last_name'] = last
profile['first_name'] = first
print(profile)
for key,value in user_info.items():
profile[key] = value
return profile
user_profile = build_property('张','三',age='30',occ='软件测试')
print(user_profile)
'''
需求8-14:编写一个函数,将汽车信息储存到字典中,这个函数总是接受制造商和型号,还接受任意数量的
关键字实参,这样调用函数,提供必不可少的信息,名称-值对,颜色和选装等
'''
def car_profiles(manufacturers,model,**profiles):
'''练习8-15'''
profile_dicts = {}
profile_dicts['manufacturer_name'] = manufacturers
profile_dicts['model'] = model
for profile_key,profile_value in profiles.items():
profile_dicts[profile_key] = profile_value
return profile_dicts
car_profile = car_profiles('领克','05',color='黑色',optional='360度监控')
print(car_profile)
| true |
a134c555f349d1873f441954b3999057532569cb | Python | modkzs/passwdcal | /passwdCal.py | UTF-8 | 668 | 3.140625 | 3 | [] | no_license | #!/usr/bin/python
import os
import time
import hashlib
import argparse
def getMD5(str):
m = hashlib.md5()
m.update(str)
passwd = m.hexdigest()
return passwd
parser = argparse.ArgumentParser(description='Calculate passwd according to username and applications or website')
parser.add_argument('-u', metavar = "string", help = "username")
parser.add_argument('-a', metavar = "string", help = "application name")
parser.add_argument('-l', metavar = "int", type=int, help = "password length", default=10)
user = parser.parse_args().u
app = parser.parse_args().a
length = parser.parse_args().l
print(getMD5(user + "-" + app).title()[:parser.parse_args().l])
| true |
5bba1b5f084bc1e4bac015e9f86c507f0a43b8db | Python | Satily/leetcode_python_solution | /solutions/solution869.py | UTF-8 | 1,014 | 3.71875 | 4 | [
"MIT"
] | permissive | class NumberExploder(object):
def __init__(self, num):
self.num_count = [0] * 10
while num > 0:
self.num_count[num % 10] += 1
num //= 10
def __eq__(self, other):
for index in range(10):
if self.num_count[index] != other.num_count[index]:
return False
return True
class Solution:
def reorderedPowerOf2(self, N):
"""
:type N: int
:rtype: bool
"""
if N == 0:
return False
if N == 1:
return True
ne = NumberExploder(N)
n = 1
for _ in range(31):
n *= 2
tne = NumberExploder(n)
if ne == tne:
return True
return False
if __name__ == "__main__":
print(Solution().reorderedPowerOf2(1))
print(Solution().reorderedPowerOf2(10))
print(Solution().reorderedPowerOf2(16))
print(Solution().reorderedPowerOf2(24))
print(Solution().reorderedPowerOf2(46))
| true |
1d94586d4067faa9c367556d22aa1d5dac15fbb7 | Python | avinashKondur/Machine-Learning-Projects | /NLP/Classifier.py | UTF-8 | 9,892 | 2.9375 | 3 | [] | no_license |
import nltk
import random
from nltk.corpus import movie_reviews
from nltk.classify.scikitlearn import SklearnClassifier
import pickle
from sklearn.naive_bayes import MultinomialNB, BernoulliNB
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.svm import SVC, LinearSVC, NuSVC
from nltk.classify import ClassifierI
from statistics import mode
from nltk.tokenize import word_tokenize
import copy
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib import style
import time
class Classifier:
def __init__(self, classifierName, posFile, negFile):
self._name = classifierName
pos = pd.read_table(posFile,delimiter='\n',header=None, names=["text"] )
pos['sentiment'] = 1 #1 for positive
neg = pd.read_table(negFile,delimiter='\n',header=None, names=["text"] )
neg['sentiment'] = 2 #2 for negative
pos_words=[]
for s in pos['text']:
short_p_words.extend(word_tokenize(str(s)))
neg_words=[]
for s in neg['text']:
neg_words.extend(word_tokenize(str(s)))
all_words=[]
for w in pos_words:
all_words.append(w.lower())
for w in neg_words:
all_words.append(w.lower())
all_words = nltk.FreqDist(all_words)
self.word_features = list(all_words.keys())[:int(len(all_words)*0.8)]
documents = pos.get_values()
documents = np.concatenate((documents,neg.get_values()),axis=0)
#shuffle the documents
random.shuffler(documents)
#prepare X and T, classification
self.X = document[:,0:1]
self.T = documents[:,1:2]
if classifierName == 'NaiveBayesClassifier':
self.classifier = nltk.NaiveBayesClassifier
elif classifierName == 'MaxEntropy':
classifier = nltk.MaxentClassifier
elif classifierName == 'MultinomialNB':
self.classifier = SklearnClassifier(MultinomialNB())
elif classifierName == 'BernoulliNB':
self.classifier = SklearnClassifier(BernoulliNB())
elif classifierName == 'LogisticRegression':
self.classifier = SklearnClassifier(LogisticRegression())
elif classifierName == 'SGDClassifier':
self.classifier = SklearnClassifier(SGDClassifier())
elif classifierName == 'LinearSVC':
self.classifier = SklearnClassifier(SGDClassifier())
elif classifierName == 'NuSVC':
self.classifier = SklearnClassifier(SGDClassifier())
else:
raise ValueError('Not a valid classifier name')
def find_features(self,document):
words = word_tokenize(document)
features = {}
for w in self.word_features:
features[w] = (w in words)
return features
def train(self,Xtrain,numIterations = 100, algorithm = nltk.classify.MaxentClassifier.ALGORITHMS[0]):
print('Training the dataset')
featuresets = [(self.find_features(rev), category) for (rev, category) in Xtrain]
if self._name = 'NaiveBayesClassifier':
self.classifer.train(featuresets)
self.classifer.show_most_informative_features(15)
elif self._name = 'MaxEntropy':
classifier = nltk.MaxentClassifier.train(featuresets, algorithm, max_iter=numIterations)
classifier.show_most_informative_features(10)
else:
self.classifer.train(featuresets)
print('Training the dataset is done')
def evaluate(self,X,T):
testing_set = [(self.find_features(rev), category) for (rev, category) in X]
results = np.array([[1]])
i=0
for review in testing_set:
label = review[1]
text = review[0]
determined_label = classifier.classify(text)
if i ==0:
results [0] = 1 if determined_label=='pos' else 2
i= i+1
else:
results = np.concatenate((results,np.array([[1 if determined_label=='pos' else 2]])),axis=0)
print("Original {0} accuracy percent:{1}".format(self._name, (nltk.classify.accuracy(self.classifier, testing_set))*100))
#plot the results
self.Plot(results,T)
return (nltk.classify.accuracy(self.classifier, testing_set))*100, results
def Plot(self,results, T):
style.use("ggplot")
fig = plt.figure()
ax1 = fig.add_subplot(1,1,1)
#start plotting expected results
xar = []
yar = []
x = 0
y = 0
for l in T:
x += 1
if l==1:
y += 1
elif l==2:
y -= 1
xar.append(x)
yar.append(y)
ax1.plot(xar,yar)
#start plotting determined results
xar = []
yar = []
x = 0
y = 0
for l in results:
x += 1
if l==1:
y += 1
elif l==2:
y -= 1
xar.append(x)
yar.append(y)
ax1.plot(xar,yar)
plt.show()
def trainValidateTestKFoldsClassification(self,parameterSets,nFolds,shuffle=False,verbose=False):
if nFolds < 3:
raise ValueError('ERROR: trainValidateTestKFoldsClassification requires nFolds >= 3.')
# Collect row indices for each class
classes = np.unique(self.T)
K = len(classes)
rowIndicesByClass = []
for c in classes:
rowsThisClass = np.where(self.T == c)[0]
if shuffle:
np.random.shuffle(rowsThisClass)
rowIndicesByClass.append(rowsThisClass)
# Collect start and stop indices for the folds, within each class
startsStops = []
if verbose:
print(' In each of',nFolds,'folds, Class-Counts ',"")
for k,rowIndicesThisClass in enumerate(rowIndicesByClass):
nSamples = len(rowIndicesThisClass)
nEach = int(nSamples / nFolds)
if verbose:
print('{}-{},'.format(classes[k],nEach), " ") #'samples in each of',nFolds,'folds.')
if nEach == 0:
raise ValueError("trainValidateTestKFoldsClassification: Number of samples in each fold for class {} is 0.".format(k))
startsThisClass = np.arange(0,nEach*nFolds,nEach)
if k < K-1: #last class
stopsThisClass = startsThisClass + nEach
else:
stopsThisClass = startsThisClass + nSamples #Each
startsStops.append(list(zip(startsThisClass,stopsThisClass)))
print()
results = []
for testFold in range(nFolds):
# Leaving the testFold out, for each validate fold, train on remaining
# folds and evaluate on validate fold.
bestParms = None
bestValidationAccuracy = 0
for parms in parameterSets:
validateAccuracySum = 0
for validateFold in range(nFolds):
if testFold == validateFold:
continue
trainFolds = np.setdiff1d(range(nFolds), [testFold,validateFold])
rows = []
for tf in trainFolds:
for k in range(K):
a,b = startsStops[k][tf]
rows += rowIndicesByClass[k][a:b].tolist()
Xtrain = self.X[rows,:]
Ttrain = self.T[rows,:]
# Construct Xvalidate and Tvalidate
rows = []
for k in range(K):
a,b = startsStops[k][validateFold]
rows += rowIndicesByClass[k][a:b].tolist()
Xvalidate = self.X[rows,:]
Tvalidate = self.T[rows,:]
self.train(Xtrain)
validateAccuracy = self.evaluate(Xvalidate,Tvalidate)
validateAccuracySum += validateAccuracy
validateAccuracy = validateAccuracySum / (nFolds-1)
if bestParms is None or validateAccuracy > bestValidationAccuracy:
bestParms = parms
bestValidationAccuracy = validateAccuracy
rows = []
for k in range(K):
a,b = startsStops[k][testFold]
rows += rowIndicesByClass[k][a:b].tolist()
Xtest = self.X[rows,:]
Ttest = self.T[rows,:]
newXtrain = np.vstack((Xtrain,Xvalidate))
newTtrain = np.vstack((Ttrain,Tvalidate))
self.train(newXtrain,newTtrain)
trainAccuracy = self.evaluate(newXtrain,newTtrain)
testAccuracy= self.evaluate(Xtest,Ttest)
resultThisTestFold = [bestParms, trainAccuracy,
bestValidationAccuracy, testAccuracy]
results.append(resultThisTestFold)
if verbose:
print(resultThisTestFold)
return results
def printResults(self,results):
print('{:4s} {:>20s}{:>8s}{:>8s}{:>8s}'.format('Algo','Parameters','TrnAcc','ValAcc','TesAcc'))
print('-------------------------------------------------')
for row in results:
# 20 is expected maximum number of characters in printed parameter value list
print('{:>4s} {:>20s} {:7.2f} {:7.2f} {:7.2f}'.format(self._name,str(row[0]),*row[1:]))
| true |
b8b5e4e098cbf4c98782dda85538293e6ab88156 | Python | cephasosayi/PersonalBlog-with-Django | /blog/models.py | UTF-8 | 1,548 | 2.5625 | 3 | [] | no_license | from django.db import models
from django.urls import reverse
from django.utils.timezone import now
# Create your models here.
class Category(models.Model):
"""Model representing a book genre."""
name = models.CharField(max_length=200, help_text='Enter a post category (e.g. travel)')
def __str__(self):
"""String for representing the Model object."""
return self.name
class Article(models.Model):
title = models.CharField(max_length=200, null=False)
description = models.TextField(max_length=1000, help_text='Enter your post content here')
author = models.ForeignKey('Author', on_delete=models.SET_NULL, null = True)
category = models.ManyToManyField(Category )
photo = models.ImageField(upload_to='article_photo', null=True)
post_date = models.DateTimeField(default=now, editable=False)
def display_genre(self):
return ', '.join(category.name for category in self.category.all()[:3])
display_genre.short_description = 'Category'
class Meta:
ordering = ['post_date']
def __str__(self):
return f'{self.title} ({self.author.first_name})'
def get_absolute_url(self):
return reverse('blog-detail', args=[str(self.id)])
class Author(models.Model):
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
bio = models.TextField(max_length=1000)
class Meta:
ordering = ['last_name', 'first_name']
def __str__(self):
return f'{self.first_name},{ self.last_name}'
def get_absolute_url(self):
return reverse('author-detail', args=[str(self.id)])
| true |
26e840b51fe7d0c37c58b5f01d766cb751d99747 | Python | curiousTauseef/algorithm-design | /mincut-scc/mincut.py | UTF-8 | 824 | 2.984375 | 3 | [] | no_license | import random
import copy
import itertools
def subroutine(graph):
while (len(graph) > 2):
id1 = random.choice(list(graph.keys()))
id2 = random.choice(graph[id1])
contracted = graph[id1] + graph[id2]
del graph[id2]
graph[id1] = [x for x in contracted if x not in (id1, id2)]
for node in graph:
graph[node] = [id1 if x == id2 else x for x in graph[node]]
values = list(graph.values())
return len(values[0])
def readgraph(file):
graph = {}
with open (file) as f:
for line in f:
line = [int(x) for x in line.split()]
graph[line.pop(0)] = line
return graph
def mincut(n, graph):
cut = float("inf")
for i in range (n):
cut = min(cut, subroutine(copy.deepcopy(graph)))
return cut
| true |
02560ddc957b0952e0d2679aaccc3a63f1c4fd6e | Python | SergeyTyurin/Web_Storage | /StorageS/templatetags/filters.py | UTF-8 | 596 | 2.578125 | 3 | [] | no_license | from django import template
register = template.Library()
@register.filter
def get_value(d, key):
if key >= 7:
return d['many']
elif key > 0:
return d['middle']
elif key == 0:
return d['absence']
@register.filter
def products_plural(count):
if count < 10 or count >= 20:
if count % 10 in (2, 3, 4):
return str(count) + ' товара'
elif count % 10 == 1:
return str(count) + ' товар'
else:
return str(count) + ' товаров'
else:
return str(count) + ' товаров'
| true |
91a36022a3cc9a7807029c9153aa9d54e099dfa8 | Python | riyazkittur/python-practice | /if-statement.py | UTF-8 | 383 | 3.078125 | 3 | [] | no_license | is_available = False
is_discount = False
is_coming = True
if is_available and is_discount:
print("Product is available and discount is 10%")
elif not (is_available) or not (is_discount):
print("Product is either not available or no discount")
elif is_available or is_coming:
print("Product is either available or coming soon")
else:
print("Product is not available")
| true |
3d66a575ee130583689436e2180ebfdd059ec6ff | Python | jefferyansah/Easing-Animations-with-Python | /easing/easing.py | UTF-8 | 14,624 | 2.953125 | 3 | [
"MIT"
] | permissive | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib import animation, rc
rc('animation', html='html5')
from IPython.display import HTML, Image
from itertools import groupby
class Eased:
""" This class takes the original time vector and raw data (as a m*n matrix or dataframe) along with an output vector and interpolation function
For the input data, the rows are the different variables and the columns correspond to the time points"""
def __init__(self, data,data_y=None, in_t=None):
if isinstance(data, pd.DataFrame):
self.labels=np.append(data.index.values,np.array([data.index.values[0],data.index.values[0]]))
self.int_t = np.arange(len(self.labels)-1)
self.data = np.vstack((data.values,data.values[0,:]))
self.n_dims = data.shape[1]
self.columns=data.columns
elif isinstance(data, np.ndarray):
if in_t is None:
in_t=np.arange(np.shape(data)[0])
print("No time vector included - defaulting to number of rows")
self.int_t = in_t
self.data = data
self.n_dims = len(np.shape(data))
else:
print('\033[91m' + "Data is unrecognized type : must be either a numpy array or pandas dataframe")
def No_interp(self,smoothness=10):
out_t=np.linspace(min(self.int_t),max(self.int_t),len(self.int_t)*smoothness)
self.n_steps = int(np.ceil(len(out_t) / len(self.int_t)))
self.out_t = out_t
#This Function maps the input vecotor over the outuput time vector without interoplation
if self.n_dims == 1: # if the input is only one row
self.eased = np.zeros((len(self.out_t), 1))
for i, t in enumerate(self.out_t):
self.eased[i] = self.data[int(np.floor(i / self.n_steps))]
else: #if the input is a multidimensional row
self.eased = np.zeros((np.shape(self.data)[0], len(self.out_t)))
for z in range(np.shape(self.data)[0]):
for i, t in enumerate(self.out_t):
self.eased[z, i] = self.data[z, int(np.floor(i / self.n_steps))]
return self.eased
def power_ease(self, n,smoothness=10):
out_t=np.linspace(min(self.int_t),max(self.int_t),len(self.int_t)*smoothness)
self.n_steps = int(np.ceil(len(out_t) / len(self.int_t)))
self.out_t = out_t
sign = n % 2 * 2
if self.n_dims == 1:
self.eased = np.zeros((len(self.out_t), 1))
j = 0
for i in range(len(self.int_t) - 1):
start = self.data[i]
end = self.data[i + 1]
for t in np.linspace(0, 2, self.n_steps):
if (t < 1):
val = (end - start) / 2 * t ** n + start
else:
t -= 2
val = (1 - sign) * (-(end - start) / 2) * (t ** n - 2 * (1 - sign)) + start
self.eased[j] = val
j += 1
self.eased[j:] = self.data[i + 1]
else:
self.eased = np.zeros(( len(self.out_t),np.shape(self.data)[1]))
for z in range(np.shape(self.data)[1]):
j = 0
for i in range(len(self.int_t) - 1):
start = self.data[ i,z]
end = self.data[ i + 1,z]
for t in np.linspace(0, 2, self.n_steps):
if (t < 1):
val = (end - start) / 2 * t ** n + start
else:
t -= 2
val = (1 - sign) * (-(end - start) / 2) * (t ** n - 2 * (1 - sign)) + start
self.eased[ j,z] = val
j += 1
self.eased[ j:,z] = self.data[ i + 1,z]
return self.eased
def scatter_animation2d(self,n=3,smoothness=30,speed=1.0,gif=False,destination=None,plot_kws=None,label=False):
"""
Flexibly create a 2d scatter plot animation.
This function creates a matplotlib animation from a pandas Dataframe or a MxN numpy array. The Columns are paired
with x and y coordinates while the rows are the individual time points.
This takes a number of parameters for the animation, as well as
Parameters
----------
n: Exponent of the power smoothing
smoothness: how smooth the frames of the animation are
speed: speed
inline:
gif:
destination:
:return:
"""
#Running checks on data for mishappen arrays.
if np.shape(self.data)[1]%2!=0:
print('\033[91m' + "Failed: Data must have an even number of columns")
exit()
if np.shape(self.data)[0]<np.shape(self.data)[1]:
print('\033[91m' + "Warning : Data has more columns (xys) than rows (time)")
if plot_kws is None:
plot_kws = dict()
it_data=self.power_ease(n,smoothness)
# filling out missing keys
vanilla_params={'s':10,'color':'black','xlim':[np.min(it_data)-1, np.max(it_data)+1],'ylim':[np.min(it_data)-1,np.max(it_data)+1],'xlabel':'','ylabel':'','alpha':1.0,'figsize':(6,6)}
for key in vanilla_params.keys():
if key not in plot_kws.keys():
plot_kws[key] = vanilla_params[key]
fig, ax = plt.subplots(figsize=plot_kws['figsize'])
ax.set_xlim(plot_kws['xlim'])
ax.set_ylim(plot_kws['ylim'])
ax.set_xlabel(plot_kws['xlabel'])
ax.set_ylabel(plot_kws['ylabel'])
if label==True:
label_text = ax.text(plot_kws['xlim'][1]*0.75, plot_kws['ylim'][1]*.9, '',fontsize=18)
n_dots=int(np.shape(self.data)[1]/2)
dots=[]
for i in range(n_dots):
dots.append(ax.plot([], [], linestyle='none', marker='o', markersize=plot_kws['s'], color=plot_kws['color'], alpha=plot_kws['alpha']))
def animate(z):
for i in range(n_dots):
dots[i][0].set_data(it_data[z,i*2],it_data[z,i*2+1])
if label==True:
label_text.set_text(self.labels[int(np.floor((z+smoothness/2)/smoothness))])
return dots,label_text
else:
return dots
anim = animation.FuncAnimation(fig, animate, frames=len(self.out_t),interval=400/smoothness/speed, blit=False)
if destination is not None:
if destination.split('.')[-1]=='mp4':
writer = animation.writers['ffmpeg'](fps=60)
anim.save(destination, writer=writer, dpi=100)
if destination.split('.')[-1]=='gif':
anim.save(destination, writer='imagemagick', fps=smoothness)
if gif==True:
return Image(url='animation.gif')
else:
return anim
def barchart_animation(self,n=3,smoothness=30,speed=1.0,gif=False,destination=None,plot_kws=None,label=False,zero_edges=True,loop=True):
'''
This barchart animation create line barcharts that morph over time using the eased data class
It takes the following additional arguments
:param n: this is the power curve modifier to passed to power_ease
:param smoothness: this is a rendering parameter that determines the relative framerate over the animation
:param speed: How quickly does the animation unfold // a value of 1 indicates the default [R>0]
:param destination: This is the output file (if none it will be displayed inline for jupyter notebooks) - extension determines filetype
:param plot_kws: These are the matplotlib key work arghuments that can be passed in the event the defaults don't work great
:param label: This is an optional paramter that will display labels of the pandas rows as the animation cycles through
:return: rendered animation
'''
it_data = self.power_ease(n, smoothness)
x_vect=np.arange(len(self.columns))
### running checks on the paramters
#Runing checks on parameters
assert speed>0, "Speed value must be greater than zero"
# filling out missing keys
vanilla_params = {'s': 10, 'color': 'black', 'xlim': [min(x_vect) - 1, max(x_vect) + 1],
'ylim': [np.min(it_data) - 1, np.max(it_data) + 1], 'xlabel': '', 'ylabel': '','title': '',
'alpha': 1.0, 'figsize': (6, 6)}
for key in vanilla_params.keys():
if key not in plot_kws.keys():
plot_kws[key] = vanilla_params[key]
fig, ax = plt.subplots(figsize=plot_kws['figsize'])
ax.set_xlim(plot_kws['xlim'])
ax.set_ylim(plot_kws['ylim'])
ax.set_title(plot_kws['title'])
ax.set_xlabel(plot_kws['xlabel'])
ax.set_ylabel(plot_kws['ylabel'])
ax.set_xticks(x_vect-np.mean(np.diff(x_vect))/2)
ax.set_xticklabels(list(self.columns),rotation=90)
plt.tight_layout()
if label == True:
label_text = ax.text(plot_kws['xlim'][1] * 0.25, plot_kws['ylim'][1] * .9, '', fontsize=18)
lines=[]
lines.append(ax.plot([], [], linewidth=3, drawstyle='steps-pre', color=plot_kws['color'], alpha=plot_kws['alpha']))
# add zero padding to the data // makes for prettier histogram presentation
if zero_edges==True:
zero_pad=np.zeros((it_data.shape[0],1))
it_data=np.hstack((zero_pad,it_data,zero_pad))
x_vect=[min(x_vect)-1]+list(x_vect)+[max(x_vect)+1]
def animate(z):
lines[0][0].set_data(x_vect, it_data[z, :])
if label==True:
label_text.set_text(self.labels[int(np.floor((z+smoothness/2)/smoothness))])
return lines,label_text
else:
return lines
anim = animation.FuncAnimation(fig, animate, frames=it_data.shape[0],interval=400/smoothness/speed, blit=False)
if destination is not None:
if destination.split('.')[-1]=='mp4':
writer = animation.writers['ffmpeg'](fps=60)
anim.save(destination, writer=writer, dpi=100)
if destination.split('.')[-1]=='gif':
anim.save(destination, writer='imagemagick', fps=smoothness)
if gif==True:
return Image(url='animation.gif')
else:
return anim
def timeseries_animation(self,n=1,speed=1.0,interp_freq=0,starting_pos = 25,gif=False,destination=None,plot_kws=None,final_dist=False):
'''
This method creates a timeseiers animation of ergodic processes
:param smoothness:
:param speed:
:param interp_freq: This is the number of steps between each given datapoint interp_freq=1 // no additional steps
:param gif:
:param destination:
:param plot_kws:
:param label:
:param zero_edges:
:param loop:
:return:
'''
interp_freq+=1
data = self.power_ease(n=n, smoothness=interp_freq)
assert min(data.shape)==1, "timeseries animation only take 1 dimensional arrays"
data=[k for k, g in groupby(list(data))]
fig, ax = plt.subplots(1, 2, figsize=(12, 4),gridspec_kw={'width_ratios': [3, 1]},sharey=True)
max_steps=len(data)
vanilla_params = {'s': 10, 'color': 'black', 'xlim': [0, starting_pos],
'ylim': [np.min(data) - 1, np.max(data) + 1], 'xlabel': '', 'ylabel': '','title': '',
'alpha': 1.0, 'figsize': (12, 3),'linestyle':'none','marker':'o'}
if plot_kws==None:
plot_kws={}
x_vect=np.linspace(1,starting_pos,starting_pos*interp_freq)
# Creating NaN padding at the end for time series plot
data = np.append(data, x_vect * np.nan)
# fill out parameters
for key in vanilla_params.keys():
if key not in plot_kws.keys():
plot_kws[key] = vanilla_params[key]
ax[0].set_ylim(plot_kws['ylim'])
ax[1].set_ylim(plot_kws['ylim'])
ax[0].set_xlim(plot_kws['xlim'])
lines=[]
lines.append(ax[0].plot([], [], linewidth=3, color=plot_kws['color'], alpha=plot_kws['alpha'],linestyle=plot_kws['linestyle'], marker=plot_kws['marker']))
if 'bins' not in plot_kws.keys():
plot_kws['bins']=np.linspace(plot_kws['ylim'][0],plot_kws['ylim'][1],20)
#plotting light grey final dist:
if final_dist==True:
bins, x = np.histogram(data,bins=plot_kws['bins'])
ax[1].plot(bins, x[1:], linewidth=3, drawstyle='steps-pre', color='#d3d3d3')
else:
bins, x = np.histogram(data,bins=plot_kws['bins'])
ax[1].plot(bins, x[1:], linewidth=3, drawstyle='steps-pre', color='#d3d3d3',alpha=0)
histlines=[]
histlines.append(ax[1].plot([], [], linewidth=3, drawstyle='steps-pre',color=plot_kws['color'], alpha=plot_kws['alpha']))
# This function plots the distribution of flowing information // so we start at the beining and plot forward
# reverse the orientation of data
trace_data=data[::-1]
def animate(z):
lines[0][0].set_data(x_vect, trace_data[-(starting_pos*interp_freq+1)-z:-1-z])
# compute the histogram of what what has passed
if z>0:
bins, x = np.histogram(trace_data[-(z):-1],bins=plot_kws['bins'])
histlines[0][0].set_data(bins,x[1:])
lines.append(ax[1].plot([], [], linewidth=3, color=plot_kws['color'], alpha=plot_kws['alpha']))
return lines
anim = animation.FuncAnimation(fig, animate, frames=max_steps,interval=400/speed, blit=False)
if destination is not None:
if destination.split('.')[-1]=='mp4':
writer = animation.writers['ffmpeg'](fps=60)
anim.save(destination, writer=writer, dpi=100)
if destination.split('.')[-1]=='gif':
anim.save(destination, writer='imagemagick', fps=30)
if gif==True:
return Image(url='animation.gif')
else:
return anim
if __name__ == "__main__":
print('EASING : A library for smooth animations in python : version 0.1.0')
# simple example : one point moving over time
# data = np.random.random((10, 2))
# Eased(data).scatter_animation2d(n=3, speed=0.5, destination='media/singlepoint.gif')
| true |
1ae8fe3386f7bf61cd7b78ce012eea4ed6cdda41 | Python | diegobaron2612/copa_transparente | /desafios/pontos.py | UTF-8 | 177 | 3.171875 | 3 | [] | no_license | def adiciona_pontos(texto):
if isinstance(texto, str):
return ".".join(list(texto))
return None
def remove_pontos(texto):
return "".join(texto.split("."))
| true |