blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
3e4a7c8a736f0885cbeaf38f38f5879866fb0046 | Python | dubeamit/Daily_Coding | /is_arr_rotated.py | UTF-8 | 533 | 3.96875 | 4 | [] | no_license | # GIVEN TWO ARRAY CHECK IF THEY ARE ROTATION OF EACH OTHER
def is_arr_rotated(arr1, arr2):
if len(arr1) != len(arr2):
return False
key = arr1[0]
key_index = -1
for i in range(len(arr2)):
if key == arr2[i]:
key_index = i
break
for i in range(len(arr1)):
j = (key_index+i) % len(arr1)
if arr1[i] != arr2[j]:
return False
return True
print(is_arr_rotated([1,2,3,4,5,6,7],[4,5,6,7,1,2,3]))
print(is_arr_rotated([1,2,3,4,5,6,7],[4,5,6,7,1,2,0])) | true |
21071fe6fb89612db0d649e2a05369a41937f1c3 | Python | akrherz/pals | /cgi-bin/archivewx/iowawx/year_graph.py | UTF-8 | 4,919 | 2.765625 | 3 | [] | no_license | #!/usr/local/bin/python
# This program will generate graphs of daily temperature data
# Daryl Herzmann 4-10-99
import gd, sys, functs, tempfile, style, os, pg
from cgi import *
mydb = pg.connect('coop', 'meteor.geol.iastate.edu', 5432)
xwidth = 800
yheight = 500
base_fref = '/home/httpd/html/archivewx/iowawx/graphs/'
base_href = '/archivewx/iowawx/graphs/'
def content():
form = FormContent()
if form.has_key("city"):
city = form["city"][0]
else:
style.SendError("Please Enter a City")
if form.has_key("year"):
year = str(form["year"][0])
else:
style.SendError("Please Enter a Valid Year")
if form.has_key("loop"):
loop = str(form["loop"][0])
else:
loop = 1
if int(year) < 1893 or int(year) > 1998:
style.SendError("Please Enter a Valid Year")
return city, year, loop
def query_station(city, year):
results = mydb.query("SELECT high,low from "+city+" WHERE date_part('year', day) = "+year+" ")
results = results.getresult()
return results
def image(city_name, year):
im = gd.image((xwidth,yheight))
# Allocate Colors
red = im.colorAllocate((255,0,0))
green = im.colorAllocate((0,255,0))
blue = im.colorAllocate((0,0,255))
black = im.colorAllocate((0,0,0))
white = im.colorAllocate((255,255,255))
lgreen = im.colorAllocate((127,125,85))
label = gd.gdFontMediumBold
title = gd.gdFontGiant
im.fill((10,10), black) # Sets up backround of image
im.string(title, (10, 5), "Temperature Mins / Maxs for "+city_name+" during "+year , white)
im.string(title, (xwidth - 450, yheight - 100), "Consecutive Days", white)
im.stringUp(title, (0, yheight - 250), "Temperature ", white)
im.origin((20,0),2,3)
im.line((0,20),(380,20), lgreen) # 100 degree line
im.line((0,88),(380,88), lgreen) # 32 degree line
im.line((0,120),(380,120), lgreen) # 0 degree line
im.string(label, (0, 16), "100 F", lgreen)
im.string(label, (0, 84), "32 F", lgreen)
im.string(label, (0, 116), "0 F", lgreen)
im.origin((50,0),2,3)
im.line((90,83),(90,93), white) # April degree line
im.line((181,83),(181,93), white) # July degree line
im.line((273,83),(273,93), white) # October degree line
return im
def parse_data(results):
highs = []
lows = []
for i in range(len(results)):
highdata = i+1, 120 - int(results[i][0])
lowdata = i+1, 120 - int(results[i][1])
highs.append(highdata)
lows.append(lowdata)
return tuple(highs), tuple(lows)
def html_gif(filename):
print '<HTML>'
print '<img src="'+filename+'">'
print '<H3>Options:</H3>'
print '<P><a href="'+filename+'">Shift-Click to download this graph</a>'
print '<P><a href="index.py?opt=graph_yearly">Try another query</a>'
def make_animate(dirname):
os.chdir('/home/httpd/html/archivewx/iowawx/graphs/'+dirname+'/')
os.popen('/usr/bin/gifsicle --delay=100 --loopcount=3 *.gif > anim.gif')
return '/archivewx/iowawx/graphs/'+dirname+'/anim.gif'
def Main():
city, year, loop = content() # Returns forms values
# City => 3 or 4 digit station code
# year => 4 digit string for year
# loop => an interger count variable
style.header("Historical Iowa WX Data", "white") # Set up HTML page for apache
city_name = functs.convert_station(city) # Convert code into string name of city
im = [] # Needs an array to set up all the picts
dirname = tempfile.mktemp() # Create a directory name via tempfile module
dirname = dirname[-5:-2] + dirname[-1:] # I only want intergers in the name
while (os.path.isdir(base_fref+dirname) ):
dirname = tempfile.mktemp() # Create a directory name via tempfile module
dirname = dirname[-5:-2] + dirname[-1:]
os.mkdir(base_fref+dirname, 0777) # Create a directory over in HTML side
for i in range(int(loop)): # Create int(loop) number of gif images
im.append(image(city_name, year)) # Create an image instance to be modified
results = query_station(city, year) # Query database system for results
if len(results) == 0: # If no results are found, quit the loop
break
highs, lows = parse_data(results) # Parse out the results into two tuples
red = im[i].colorAllocate((255,0,0)) # Allocate needed colors for lines on graph
blue = im[i].colorAllocate((0,0,255))
im[i].lines(highs, red) # High values put on graph in red
im[i].lines(lows, blue) # Low values put on graph in blue
im[i].writeGif(base_fref+dirname+"/const"+str(i)+".gif") # Create gif graph
year = str(int(year)+ 1) # increment year value by one
if loop > 0: # If a loop was needed, then we need to animate it
gif_file = make_animate(dirname) # based on the assumption that all gifs are in one dir
else:
gif_file = base_href+dirname+"/const0.gif" # Otherwise only one gif exists so that is where
# It is at
html_gif(gif_file) # Create the HTML neccessary to view the finished product
style.std_bot() # Finish and exit...
Main()
| true |
a335b735d2b6fd2245e8ff36eb3c2768306f846e | Python | kaviraj333/programme.py | /swapping.py | UTF-8 | 52 | 2.59375 | 3 | [] | no_license | n,m=map(int,input().split())
t=n
n=m
m=t
print(n,m)
| true |
eae5b37c86576a5015b87cfdb77976e4d3148cac | Python | socolofs/tamoc | /bin/ambient/np_profile_from_ctd.py | UTF-8 | 5,397 | 2.890625 | 3 | [
"MIT"
] | permissive | """
Create a profile from an ASCII CTD datafile
===========================================
Use the TAMOC ambient module to create profiles in netCDF format for use by
TAMOC from data in text files downloaded from a CTD. This file demonstrates
working with the data from the R/V Brooks McCall at Station BM 54 on May 30,
2010, stored in the file /Raw_Data/ctd_BM54.cnv.
This script demonstrates the new version of the `ambient.Profile` object, which uses `xarray`. For the older version, which used netCDF datasets, see the script with the same file name but prepended by 'nc'.
Notes
-----
Much of the input data in the script (e.g., columns to extract, column names,
lat and lon location data, date and time, etc.) is read by the user manually
from the header file of the CTD text file. These data are then hand-coded in
the script text. While it would be straightforward to automate this process
for a given format of CTD files, this step is left to the user to customize to
their own data sets.
Requires
--------
This script read data from the text file::
./Profiles/Raw_Data/ctd_BM54.dat
Returns
-------
This script generates a `ambient.Profile` object, whose netCDF file is written
to the file::
./Profiles/Profiles/BM54.nc
"""
# S. Socolofsky, July 2013, Texas A&M University <socolofs@tamu.edu>.
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from tamoc import ambient
from tamoc import seawater
from netCDF4 import date2num, num2date
from datetime import datetime
import xarray as xr
import numpy as np
import matplotlib.pyplot as plt
import os
if __name__ == '__main__':
# Get the path to the input file
__location__ = os.path.realpath(os.path.join(os.getcwd(),
os.path.dirname(__file__),
'../../tamoc/data'))
dat_file = os.path.join(__location__,'ctd_BM54.cnv')
# Load in the data using numpy.loadtxt
raw = np.loadtxt(dat_file, comments = '#', skiprows = 175,
usecols = (0, 1, 3, 8, 9, 10, 12))
# Remove reversals in the CTD data and get only the down-cast
raw_data = ambient.extract_profile(raw, z_col=3, z_start=50.0)
# Reorganize this data into the correct order
data = np.zeros(raw_data.shape)
ztsp = ['z', 'temperature', 'salinity', 'pressure']
ztsp_units = ['m', 'deg C', 'psu', 'db']
chem_names = ['oxygen', 'wetlab_fluorescence', 'density']
chem_units = ['mg/l', 'mg/m^3', 'kg/m^3']
data[:,0] = raw_data[:,3]
data[:,1] = raw_data[:,0]
data[:,2] = raw_data[:,4]
data[:,3] = raw_data[:,1]
data[:,4] = raw_data[:,6]
data[:,5] = raw_data[:,2]
data[:,6] = raw_data[:,5]
# Create an ambient.Profile object for this dataset
chem_names = ['oxygen', 'wetlab_fluorescence', 'density']
bm54 = ambient.Profile(data, ztsp=ztsp, ztsp_units=ztsp_units,
chem_names=chem_names, chem_units=chem_units, err=0.00001)
# Plot the density profile using the interpolation function
z = np.linspace(bm54.z_min, bm54.z_max, 250)
rho = np.zeros(z.shape)
T = np.zeros(z.shape)
S = np.zeros(z.shape)
C = np.zeros(z.shape)
O2 = np.zeros(z.shape)
tsp = bm54.get_values(z, ['temperature', 'salinity', 'pressure'])
for i in range(len(z)):
rho[i] = seawater.density(tsp[i,0], tsp[i,1], tsp[i,2])
T[i], S[i], C[i], O2[i] = bm54.get_values(z[i], ['temperature',
'salinity', 'wetlab_fluorescence', 'oxygen'])
# Extract data for comparison
z_m = bm54.ds.coords['z'].values
rho_m = bm54.ds['density'].values
plt.figure(1)
plt.clf()
plt.show()
ax1 = plt.subplot(121)
ax1.plot(rho, z)
ax1.set_xlabel('Density (kg/m^3)')
ax1.set_ylabel('Depth (m)')
ax1.invert_yaxis()
ax1.set_title('Computed data')
# Compare to the measured profile
ax2 = plt.subplot(1,2,2)
ax2.plot(rho_m, z_m)
ax2.set_xlabel('Density (kg/m^3)')
ax2.invert_yaxis()
ax2.set_title('Measured data')
plt.draw()
plt.figure(2)
plt.clf()
plt.show()
ax1 = plt.subplot(131)
ax1.plot(C*1.e6, z, '-', label='Fluorescence (g/m^3)')
ax1.set_xlabel('CTD component values')
ax1.set_ylabel('Depth (m)')
ax1.set_ylim([800, 1500])
ax1.set_xlim([0, 40])
ax1.invert_yaxis()
ax1.locator_params(tight=True, nbins=6)
ax1.legend(loc='upper right', prop={'size':10})
ax1.grid(True)
ax2 = plt.subplot(132)
ax2.plot(T - 273.15, z, '-', label='Temperature (deg C)')
ax2.plot(O2*1.e3, z, '--', label='Oxygen (g/m^3)')
ax2.set_xlabel('CTD component values')
ax2.set_ylabel('Depth (m)')
ax2.set_ylim([800, 1500])
ax2.set_xlim([0, 8])
ax2.invert_yaxis()
ax2.locator_params(tight=True, nbins=6)
ax2.legend(loc='upper right', prop={'size':10})
ax2.grid(True)
ax3 = plt.subplot(133)
ax3.plot(S, z, '-', label='Salinity (psu)')
ax3.set_xlabel('CTD component values')
ax3.set_ylabel('Depth (m)')
ax3.set_ylim([800, 1500])
ax3.set_xlim([34.5, 35])
ax3.invert_yaxis()
ax3.locator_params(tight=True, nbins=6)
ax3.legend(loc='upper right', prop={'size':10})
ax3.grid(True)
plt.draw()
# Close the netCDF dataset
bm54.close_nc()
| true |
60901985d99fde32913a8383b647c0401b0b3577 | Python | LolsMeow/Data-Science | /legislators.py | UTF-8 | 577 | 3.625 | 4 | [] | no_license | """
CSci 39542: Introduction to Data Science
Program 2: Senator's Name
Jiaming Zheng
jiaming.zheng745@myhunter.cuny.edu
Resources: https://www.geeksforgeeks.org/how-to-select-multiple-columns-in-a-pandas-dataframe/
"""
import pandas as pd
input_File = input("Enter input file name: ")
output_File = input("Enter output file name: ")
legis = pd.read_csv(input_File)
data = []
for index, row in legis.iterrows():
if pd.notnull(row['senate_class']):
data.append([row['first_name'], row['last_name']])
df = pd.DataFrame(data, columns = ['first_name','last_name'])
df.to_csv(output_File, index = False) | true |
886b09b50688f0c66b120f85fb55eecd807f8ef9 | Python | soheilred/notes | /opt_project/mip.py | UTF-8 | 1,174 | 3.28125 | 3 | [] | no_license | #!/usr/bin/python
# Copyright 2019, Gurobi Optimization, LLC
# This example formulates and solves the following simple MIP model:
# maximize
# x + y + 2 z
# subject to
# x + 2 y + 3 z <= 4
# x + y >= 1
# x, y, z binary
from gurobipy import *
try:
# Create a new model
m = Model("mip1")
# Create variables
x1 = m.addVar(vtype=GRB.CONTINUOUS, name="x1")
x2 = m.addVar(vtype=GRB.CONTINUOUS, name="x2")
x3 = m.addVar(vtype=GRB.CONTINUOUS, name="x3")
x4 = m.addVar(vtype=GRB.CONTINUOUS, name="x4")
# Set objective
m.setObjective(-4 * x1 - 2 * x2, GRB.MINIMIZE)
# Set constraints
m.addConstr(x1 + x2 + x3 == 5.0, "c0")
m.addConstr(2 * x1 + 0.5 * x2 + x4 == 8.0, "c1")
m.addConstr(x1 >= 0, "c2")
m.addConstr(x2 >= 0, "c3")
m.addConstr(x3 >= 0, "c4")
m.addConstr(x4 >= 0, "c5")
# Optimize model
m.optimize()
for v in m.getVars():
print('%s %g' % (v.varName, v.x))
print('Obj: %g' % m.objVal)
except GurobiError as e:
print('Error code ' + str(e.errno) + ": " + str(e))
except AttributeError:
print('Encountered an attribute error') | true |
898ceb63ac8b3bcbbbfab5ad7b5cc3349f43ffe9 | Python | 9592/codegram | /codegram/images/views.py | UTF-8 | 6,591 | 3.109375 | 3 | [
"MIT"
] | permissive | #장고 템플릿 사용 안함
#from django.shortcuts import render
#APIView: 엘리멘트를 가져오고 보여주고 method 관리
#reponse : http response
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from . import models, serializers
# Create your views here.
"""
requeset 에 대하여
httpRequest 의 속성으로는 다음과 같이 사용할수 있음
request.scheme, body, path, path_info, method 등
print (request.'attb') 로 확인가능
request.user 는 현재 사용자의 대한 정보를 확인 할수 있음
print (request.user.website)
Post 는 업로드 및 데이터 수정
테스트 했던 코드는 다 주석 처리!
class ListAllImages(APIView):
#get http method (self : 이미 정의된 variable, reqeust : Object 요청, format : json,xml 등 None(기본)= json
def get(self, request, format=None):
#httpRequest 속성
#print (request.scheme)
#print (request.path)
#objects 는 많은 Objects 을 가지고 있음 (all, set, get 등등)
#전체 이미지 오브젝트를 넣음 (단 이건 파이썬 Object)
all_images = models.Image.objects.all()
#위 파이썬 오브젝트를 serializer 해서 json 으로 변경
#시리얼 라이저는 단수(1개 값만 받음) 라서 여러개의 경우 따로 옵션을 정해줘야함 (many=True)
serializer = serializers.imageSerializer (all_images, many=True)
#response(data 는 불러올 데이터 )
return Response(data=serializer.data)
# 시리얼 라이즈에 맞는 URL 을 맞춰서 입력 해야함
class ListAllComments(APIView):
def get(self,request,format=None):
#전부 다 확인하고 싶을때 all()
#all_comment = models.Comment.objects.all()
#조건을 통해 확인 하고 싶을때 filter ()
#id
#all_comment = models.Comment.objects.filter(id=2)
#creator
#all_comment = models.Comment.objects.filter(creator=1)
#아래 출력으로 User ID 확인 가능 (현재 사용중인)
#print (request.user.id)
#유저 변수로 작업
user_id = request.user.id
all_comment = models.Comment.objects.filter(creator=user_id)
serializer = serializers.CommentSerializer(all_comment, many=True)
return Response(data=serializer.data)
class ListAllLikes(APIView):
def get(self, request, format=None):
all_likes = models.Like.objects.all()
serializer = serializers.LikeSerializer(all_likes, many=True)
return Response(data=serializer.data)
"""
class Feed(APIView):
def get(self,request,format=None):
user = request.user
following_users = user.following.all()
image_list = []
for following_user in following_users :
#print(following_user.images.all()[:2])
user_images = following_user.images.all()[:10]
for image in user_images:
#image_list 안에 가져온 user image 정보를 넣어라
image_list.append(image)
#sorted 메소드 사용 (1. 어떤 리스트 정렬, 2. key(function 불러옴) = 기준점(key,길이,이름 등등), 3. 역으로 정렬? )
#lamda 사용 해서 아래 get_key 펑션 만들지 않고 바로 사용이 가능함
#sorted_list = sorted(image_list,key=get_key,reverse =True)
sorted_list = sorted(image_list,key=lambda image: image.created_at,reverse =True)
#print(image_list)
print (sorted_list)
serializer = serializers.imageSerializer(sorted_list, many=True)
return Response(serializer.data)
"""
#image 불러와서 image 안에 생성 날짜 리턴 .. lamda 사용해서 펑션 만들지 않고 바로 사용 가능
def get_key(image):
return image.created_at
"""
# 이미지에 대한 좋아요 처리 (한번 좋아요 두번 다시 해제)
class LikeImage(APIView):
def get(self,request,image_id,format=None):
user = request.user
try:
#Image_id 있다면?
found_image = models.Image.objects.get(id=image_id)
except models.Image.DoesNotExists:
#return Response(status=404)
return Response(status=status.HTTP_404_NOT_FOUND)
try:
preexisting_like = models.Like.objects.get(
creator=user,
image=found_image
)
preexisting_like.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
except models.Like.DoesNotExist:
#model 에서 user + image id 가져와서
new_like = models.Like.objects.create(
creator = user,
image = found_image
)
#저장
new_like.save()
#print(image_id)
return Response(status=status.HTTP_201_CREATED)
#이미지에 댓글 단거 확인
class CommentOnImage(APIView):
def post(self,request,image_id,format=None):
user = request.user
try :
#image ID 가져옴
found_image = models.Image.objects.get(id=image_id)
#존재하지 않으면 404
except models.Image.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
#POST 로 날린 데이터 받아오기 (Serializer)
serializer = serializers.CommentSerializer (data=request.data)
#Serializer 유효값 체크
if serializer.is_valid():
#유효하면 Save for 사용자와 사용자 프로필 사진
serializer.save(creator=user, image=found_image)
return Response(data=serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
#print (request.data)
# 코멘트 (댓글 삭제하기)
class Comment(APIView):
def delete(self,request,comment_id,format=None):
#사용자 정보 받아오기
user = request.user
try:
#Comment_id와 생성자 정보 받아옴
comment = models.Comment.objects.get(id=comment_id, creator=user)
#삭제 처리
comment.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
except models.Comment.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
| true |
3a517da8bb3ba356a8a5abb12f1de77004f192e9 | Python | lizetheP/PensamientoC | /programas/LabLiz/ciclo2.py | UTF-8 | 219 | 2.96875 | 3 | [] | no_license | def ciclo2():
d = 0
r = 13
s = r / 2
while s > 2 or r % 2 == 0 :
d = d + 1
r = r - 2
s = s - 2
print(str(d) + " " + str(r) + " " + str(s))
def main():
ciclo2()
main()
| true |
198c013d888100ac571e0858936d56958db357f8 | Python | idoSinai/uni-coursework | /Defensive programming - 20937/mm13/question 2/bank.py | UTF-8 | 259 | 3.75 | 4 | [] | no_license |
class BankAccount:
def __init__(self, name, amt):
self.name = name
self.amt = amt
def __str__(self):
return "Your account, [%s], has [%d] dollars." % (self.name, self.amt)
t1 = BankAccount("Bob", 100)
print(t1)
| true |
887908ae261a5d40db29d2334e1d75edbc8dc750 | Python | Kwab3na/Revisions-Old | /Assignment Operator.py | UTF-8 | 114 | 2.9375 | 3 | [] | no_license | first_number = 2
second_number = 5
first_number += 2
second_number -= 3
print(first_number)
print(second_number) | true |
df89548a2fca5de61f9595342146c96707ac54b6 | Python | SergeiG81/python | /Lesson-4/Num_7.py | UTF-8 | 772 | 4 | 4 | [] | no_license | '''
Реализовать генератор с помощью функции с ключевым словом yield, создающим очередное значение.
При вызове функции должен создаваться объект-генератор. Функция должна вызываться следующим
образом: for el in fact(n). Функция отвечает за получение факториала числа, а в цикле необходимо выводить
только первые n чисел, начиная с 1! и до n!.
'''
def for_el_in_fact():
n = int(input())
for el in range(1,n+1):
yield el
g = for_el_in_fact()
fact = 1
for el in g:
fact = fact * el
print(fact) | true |
7df566f6ed9b317985f4ae8c8254342fa4856b99 | Python | Therodin/diminui-oeaumentoAporcentagem | /ex013i.py | UTF-8 | 612 | 4 | 4 | [] | no_license | i = str(input('Deseja caucular um aumento ou um abaixo salarial?: '))
if i == 'aumento':
am = float(input('Qual o salário do funcionario? R$: '))
pc = float(input('Qual a porcentagem do reajuste?: '))
ea = am + (am * pc / 100)
print(f'O aumento em {pc:.0f}% salarial do funcionario ficará {ea:.2f}' )
else:
i = 'abaixo'
ab = float(input('Qual o salário do funcionario?: '))
pct = float(input('Qual a porcentagem do reajuste?: '))
ae = ab - (ab * pct / 100)
print(f'O desconto salarial avaliado em {pct:.0f}% do funcionário será {ae:.2f}')
| true |
5e8e9bf5b650e2283e54202ee86b3ceb9d7cff03 | Python | nneonneo/doublethink | /bitstr.py | UTF-8 | 685 | 3.28125 | 3 | [] | no_license | def bytes_to_bits(data):
return BitString(''.join('{:08b}'.format(ord(c)) for c in data))
class BitString(list):
def write_safe(self, pos, size, value):
if isinstance(value, (int, long)):
value = '{:0{width}b}'.format(value & ((1 << size) - 1), width=size)
elif not size:
size = len(value)
if not all(v is None or v == value[i] for i,v in enumerate(self[pos:pos+size])):
raise ValueError("attempted write to position %d of %s already has value %s" % (pos, value, self[pos:pos+size]))
self[pos:pos+size] = value
@staticmethod
def repr(bs):
return ''.join('z' if v is None else v for v in bs)
| true |
8014d183a818295f990ea92f03085fd5a541e95c | Python | mhee4321/python_algorithm | /thisIsCodingTest/Sorting/24.antenna.py | UTF-8 | 123 | 2.953125 | 3 | [] | no_license | n = int(input())
data = list(map(int, input().split()))
data.sort()
print(data[0]+data[n-1] //2)
# print(data[(n-1)//2]) | true |
ae9453b78d268a20bdb984ac5916c4e016335b92 | Python | UWPCE-PythonCert-ClassRepos/220-Advanced-Summer-2019 | /students/Sally_Shen/lesson04/assignment/src/basic_operations.py | UTF-8 | 3,835 | 3.1875 | 3 | [] | no_license | import peewee
import logging
logger = logging.getLogger("basic_operations")
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler("db.log")
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
database = peewee.SqliteDatabase("customers.db")
database.connect()
database.execute_sql('PRAGMA foreign_key = ON;')
database.execute_sql('drop table if exists customer;')
class BaseModel(peewee.Model):
class Meta:
database = database
class Customer(BaseModel):
"""
This class defines a Customer in the databaase
Customer ID.
Name.
Lastname.
Home address.
Phone number.
Email address.
Status (active or inactive customer).
Credit limit.
"""
customer_id = peewee.CharField(primary_key=True, max_length=30)
name = peewee.CharField(max_length=50)
last_name = peewee.CharField(max_length=50, null=True)
address = peewee.CharField(max_length=75)
phone_number = peewee.CharField(max_length=15)
email = peewee.CharField(max_length=320) # based on max email address length search
status = peewee.CharField(max_length=10) # "Inactive" or "Active"
credit_limit = peewee.FloatField()
def create_tables():
with database.transaction():
logger.debug("Creating table Customer")
database.create_tables([Customer])
def drop_tables():
with database.transaction():
logger.debug("Dropping table Customer")
database.drop_tables([Customer])
def add_customer(customer_id, name, last_name, address, phone_number, email, status, credit_limit):
try:
with database.transaction():
customer = Customer.create(
customer_id=customer_id,
name=name,
last_name=last_name,
address=address,
phone_number=phone_number,
email=email,
status=status,
credit_limit=credit_limit
)
logger.debug(f"Customer saved {customer_id}")
except Exception as e:
logger.warning(f"error creating {customer_id}")
logger.warning(e)
def search_customer(customer_id):
try:
customer = Customer.get(Customer.customer_id == customer_id)
logger.debug(f"Found customer {customer_id}")
return {
"name": customer.name,
"last_name": customer.last_name,
"phone_number": customer.phone_number,
"email": customer.email,
}
except Exception as e:
logger.warning(f"error reading customer {customer_id}")
logger.warning(e)
return {}
def delete_customer(customer_id):
try:
customer = Customer.get(Customer.customer_id == customer_id)
with database.transaction():
customer.delete_instance()
logger.debug(f"Deleted customer id {customer_id}")
return True
except Exception as e:
logger.warning(e)
return False
def update_customer_credit(customer_id, credit_limit):
try:
with database.transaction():
customer = Customer.get(Customer.customer_id == customer_id)
customer.credit_limit = credit_limit
customer.save()
except Exception as e:
raise ValueError("NoCustomer {}".format(customer_id))
def list_active_customers():
try:
active_customers = Customer.select().where(Customer.status == 'active')
for customer in active_customers:
print(f"{customer.name} {customer.last_name} status is {customer.status}")
return len(active_customers)
except Exception as e:
print(e)
return 0
if __name__ == '__main__':
add_customer(1, "Sally", "Shen", "5859 20th pl., Seattle, WA, 98115", "917-888-9999",
"shenyingyy@gmail.com", "active", 2000)
| true |
e513d7814d3f926546b96b7f1222f6fb3d971bf4 | Python | aiborra11/crypto-scraper | /source/utils.py | UTF-8 | 2,320 | 3.265625 | 3 | [] | no_license | import pandas as pd
from tqdm import tqdm
from datetime import datetime, timedelta
def interval_to_scrape(day1='20141122', max_date=''):
"""
Taking the first available date (if anything else is specified when calling the function) and converts it
into dateformat to add one day and iterate every csv file in the website.
Arguments:
----------
day1 {[str]} -- date from which we want to start to collect data.
max_date {[str]} -- last day we want to have data collected. In case we do not provide amy it will get today's date.
Returns:
--------
{[list]}
list of dates we will use to collect data.
"""
dates = []
date_format = datetime.strptime(str(day1), '%Y%m%d')
if max_date:
max_date = max_date
else:
max_date = int(datetime.today().strftime('%Y%m%d'))
for day in range(2500):
next_day = str(date_format + timedelta(days=day))
next_day_format = next_day.replace('-', '').split()[0]
if int(next_day_format) <= int(max_date):
dates.append(next_day_format)
return dates
def data_scraper(interval_to_update, crypto=''):
"""
Iterates through a list of dates scraping the data for the specified cryptocurrency.
Arguments:
----------
interval_to_update {[list]} -- Interval of dates we are willing to collect.
crypto {[str]} -- Cryptocurrency name we are willing to collect.
Returns:
--------
{[dataset]}
Dataset stored in mongo for a specific date and crypto.
"""
cryptos_info = crypto.split('_')
crypto_data = pd.DataFrame()
warnings = []
for date in tqdm(interval_to_update):
try:
# Scraping data
dataset = pd.read_csv(
f'https://s3-eu-west-1.amazonaws.com/public-testnet.bitmex.com/data/trade/{date}.csv.gz')
# Cleaning the name of the cryptocurrency to use it as a filter
crypto = [crypt for crypt in cryptos_info if crypt in dataset['symbol'].unique()][0]
crypto_data = pd.concat([crypto_data, dataset[dataset['symbol'] == crypto]])
except:
# Adding dates we cannot get data and return it for warnings
warnings.append(date)
return crypto_data, warnings, crypto
| true |
87a512cb537f3d69534431afeac5d1ef26ccc6e3 | Python | JKapple/11.2 | /Employee.py | UTF-8 | 350 | 3.109375 | 3 | [] | no_license | import ProductionWorker
usrFName = input("Enter your first name: ")
usrLName = input("Enter your last name: ")
usrShift = input("Enter your shift number: ")
usrWage = input("Enter your hourly pay rate: ")
if usrShift == "1" :
usrShift = "Day"
elif usrShift == "2":
usrShift = "Night"
else:
usrShift = "Incorrect Value"
| true |
95fa3876623cb07c5c539e579f1e608bd568ea63 | Python | BelenAleman/spacy-sentence-bert | /main.py | UTF-8 | 1,015 | 2.84375 | 3 | [
"MIT"
] | permissive | import spacy
import numpy as np
from sentence_transformers import SentenceTransformer
def vectorise(sent):
return model.encode([sent.text])[0]
def overwrite_vectors(doc):
doc.user_hooks['vector'] = vectorise
doc.user_span_hooks['vector'] = vectorise
doc.user_token_hooks['vector'] = vectorise
return doc
nlp = spacy.blank('en')
nlp.add_pipe(overwrite_vectors)
# https://github.com/UKPLab/sentence-transformers
model = SentenceTransformer('bert-base-nli-mean-tokens') # 768
model = SentenceTransformer('roberta-large-nli-stsb-mean-tokens') # 1024
sentences = ['This framework generates embeddings for each input sentence',
'Sentences are passed as a list of string.',
'The quick brown fox jumps over the lazy dog.',
'Sentences are given as a list of strings']
docs = [nlp(s) for s in sentences]
print(docs[0].vector.shape)
m = np.zeros((len(docs), len(docs)))
for i, d_i in enumerate(docs):
for j, d_j in enumerate(docs):
m[i,j] = d_i.similarity(d_j)
print(m) | true |
9ec01ddf4312d7cdbfee30513f0ae1696cda147a | Python | z-ashley/change-in-shares | /convert_csv.py | UTF-8 | 225 | 2.53125 | 3 | [] | no_license | import pandas as pd
import numpy as np
# Reading the csv file
df_new = pd.read_csv('arkg_holdings.csv')
# saving xlsx file
GFG = pd.ExcelWriter('arkg_holdings.xlsx')
df_new.to_excel(GFG, index = False)
GFG.save()
| true |
af20818cc6d2b9aea31726d1fb66eaa1a7076d30 | Python | Aasthaengg/IBMdataset | /Python_codes/p02594/s662503125.py | UTF-8 | 64 | 3.21875 | 3 | [] | no_license | i = int(input())
if i >=30:
print("Yes")
else:
print("No") | true |
0d9e37547ee0984ae2455f7a89a1036309a3b208 | Python | jakubdabek/prolog-uni | /logic-programming/list2/5.py | UTF-8 | 1,695 | 2.671875 | 3 | [] | no_license | txt = """
?- between(1, 12, N), writeln(N), time((lista(N, _), fail)).
1
% 18 inferences, 0.000 CPU in 0.001 seconds (0% CPU, Infinite Lips)
2
% 31 inferences, 0.000 CPU in 0.000 seconds (?% CPU, Infinite Lips)
3
% 78 inferences, 0.000 CPU in 0.000 seconds (?% CPU, Infinite Lips)
4
% 281 inferences, 0.000 CPU in 0.000 seconds (?% CPU, Infinite Lips)
5
% 1,355 inferences, 0.000 CPU in 0.000 seconds (?% CPU, Infinite Lips)
6
% 8,069 inferences, 0.000 CPU in 0.000 seconds (?% CPU, Infinite Lips)
7
% 56,516 inferences, 0.016 CPU in 0.004 seconds (391% CPU, 3617024 Lips)
8
% 453,163 inferences, 0.031 CPU in 0.033 seconds (95% CPU, 14501216 Lips)
9
% 4,088,361 inferences, 0.297 CPU in 0.301 seconds (99% CPU, 13771321 Lips)
10
% 40,974,839 inferences, 3.109 CPU in 3.179 seconds (98% CPU, 13177838 Lips)
11
% 451,618,970 inferences, 33.422 CPU in 34.325 seconds (97% CPU, 13512676 Lips)
12
% 5,428,949,729 inferences, 403.297 CPU in 413.507 seconds (98% CPU, 13461423 Lips)
false.
"""
for i, info in enumerate(txt[1::2], start=1):
fact = factorial(i)
inferences = int(''.join(info.split()[1].split(',')))
print(f"{i:2} | {fact:12} | {inferences:12} | {(inferences/fact):4.2f}")
"""
1 | 1 | 18 | 18.00
2 | 2 | 31 | 15.50
3 | 6 | 78 | 13.00
4 | 24 | 281 | 11.71
5 | 120 | 1355 | 11.29
6 | 720 | 8069 | 11.21
7 | 5040 | 56516 | 11.21
8 | 40320 | 453163 | 11.24
9 | 362880 | 4088361 | 11.27
10 | 3628800 | 40974839 | 11.29
11 | 39916800 | 451618970 | 11.31
12 | 479001600 | 5428949729 | 11.33
"""
| true |
eaf3880adef6a0f113ced2a618fe90f02260d04c | Python | Jooakim/adventofcode | /2016/day05/day05.py | UTF-8 | 799 | 3.375 | 3 | [] | no_license | import md5
import re
def crack_pass(num_of_chars, key):
found = 0
i = 0
password = ''
while found < num_of_chars:
m = md5.new(key + str(i))
if (re.match('^0{5}',m.hexdigest())):
password += m.hexdigest()[5]
found += 1
i += 1
return password
def crack_pass_2(num_of_chars, key):
found = 0
i = 0
password = {}
while found < num_of_chars:
m = md5.new(key + str(i))
if (re.match('^0{5}',m.hexdigest())):
if (re.match('[0-7]', m.hexdigest()[5]) and not password.has_key(int(m.hexdigest()[5]))):
password[int(m.hexdigest()[5])] = m.hexdigest()[6]
found += 1
i += 1
return password
password = crack_pass_2(8, 'wtnhxymk')
print(str(password))
| true |
c65f00d98b58ef95f2fb28d0b2f3eb1ca5960d89 | Python | atuanpham/wacv-journal-version | /main.py | UTF-8 | 3,340 | 2.5625 | 3 | [
"MIT"
] | permissive | import os
import errno
import click
import numpy as np
from configs.ibsr import IBSRConfig
from src.data.preprocessor import Preprocessor
from src.models.unet import Unet
from src.data.utils import DataUtils
# Use IBSRConfig
# You can create another config in 'configs' directory and change _config variable
_config = IBSRConfig
# Model instance used for training, predicting, evaluating
unet = Unet(_config.IMG_SIZE, _config.IMG_SIZE, _config.WEIGHTS_PATH)
@click.group()
def cli():
# Initiate essential directories
try:
os.makedirs(_config.PROCESSED_TRAIN_DATA_DIR)
os.makedirs(_config.PROCESSED_TEST_DATA_DIR)
except OSError as e:
if e.errno != errno.EEXIST:
raise
@click.command('preprocess')
@click.option('--train-dir', 'raw_train_data_dir',
type=click.Path(), default=_config.RAW_TRAIN_DIR)
@click.option('--test-dir', 'raw_test_data_dir',
type=click.Path(), default=_config.RAW_TEST_DIR)
@click.option('--processed-train-dir', 'processed_train_dir',
type=click.Path(), default=_config.PROCESSED_TRAIN_DATA_DIR)
@click.option('--processed-test-dir', 'processed_test_dir',
type=click.Path(), default=_config.PROCESSED_TEST_DATA_DIR)
def process_data(raw_train_data_dir, raw_test_data_dir, processed_train_dir, processed_test_dir):
preprocessor = Preprocessor(raw_train_data_dir=raw_train_data_dir,
raw_test_data_dir=raw_test_data_dir,
processed_train_data_dir=processed_train_dir,
processed_test_data_dir=processed_test_dir,
postfix_data_file=_config.POSTFIX_DATA_FILE,
postfix_mask_data_file=_config.POSTFIX_MASK_DATA_FILE, transpose=[1, 2, 0, 3])
click.echo('Start processing data.')
preprocessor.do_preprocess()
click.echo('Data have been processed.')
@click.command()
def train():
data_utils = DataUtils(_config.PROCESSED_TRAIN_DATA_DIR, _config.PROCESSED_TEST_DATA_DIR)
data, mask = data_utils.get_train_data()
unet.train(data, mask, _config.EPOCHS)
@click.command()
@click.option('--data-path', 'data_path', type=click.Path())
@click.option('--predictions-path', 'predictions_path', type=click.Path())
def predict(data_path, predictions_path):
data = np.load(data_path)
predictions = unet.predict(data)
np.save(predictions_path, predictions)
@click.command()
def evaluate():
data_utils = DataUtils(_config.PROCESSED_TRAIN_DATA_DIR, _config.PROCESSED_TEST_DATA_DIR)
test_data, test_mask = data_utils.get_test_data()
predictions = unet.predict(test_data)
accuracy_csf = unet.evaluate(predictions, test_mask, 0)
accuracy_gm = unet.evaluate(predictions, test_mask, 1)
accuracy_wm = unet.evaluate(predictions, test_mask, 2)
average = unet.evaluate_average(predictions, test_mask)
click.echo('\n')
click.echo('Accuracy of CSF: {}'.format(accuracy_csf))
click.echo('Accuracy of GM: {}'.format(accuracy_gm))
click.echo('Accuracy of WM: {}'.format(accuracy_wm))
click.echo('Average: {}'.format(average))
# Add commands
cli.add_command(process_data)
cli.add_command(train)
cli.add_command(predict)
cli.add_command(evaluate)
if __name__ == '__main__':
cli()
| true |
92dfca95d9deb0686367394726fc4419af6832c3 | Python | yonathansantosa/dcgan | /model.py | UTF-8 | 3,825 | 2.84375 | 3 | [] | no_license | import torch
import torch.nn as nn
# Generator Model
class G_layer(nn.Module):
def __init__(self, nz, ngf, kernel_g, nc):
super(G_layer, self).__init__()
self.main = nn.Sequential(
# input is Z, going into a convolution
# nn.ConvTranspose2d(input_dim, output_dim, kernel_size, stride, padding)
nn.ConvTranspose2d(nz, ngf * 8, kernel_g[0], 1, 0, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(True),
# state size. (ngf*8) x 4 x 4 || 16x16
nn.ConvTranspose2d(ngf * 8, ngf * 4, kernel_g[1], 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True),
# state size. (ngf*4) x 8 x 8 || 32x32
nn.ConvTranspose2d(ngf * 4, ngf * 2, kernel_g[2], 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
# state size. (ngf*2) x 16 x 16 || 64x64
nn.ConvTranspose2d(ngf * 2, ngf, kernel_g[3], 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
# state size. (ngf) x 32 x 32 || 128x128
nn.ConvTranspose2d(ngf, nc, kernel_g[4], 2, 1, bias=False),
nn.Tanh()
# state size. (nc) x 64 x 64 || 256x256
)
def forward(self, input):
out = self.main(input)
return out
# Discriminator
class D_layer(nn.Module):
def __init__(self, nc, ndf, kernel_d):
super(D_layer, self).__init__()
# input is (nc) x 64 x 64
self.conv1 = nn.Sequential(
nn.Conv2d(nc, ndf, kernel_d[0], 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf) x 32 x 32
)
self.conv2 = nn.Sequential(
nn.Conv2d(ndf, ndf * 2, kernel_d[1], 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*2) x 16 x 16
)
self.conv3 = nn.Sequential(
nn.Conv2d(ndf * 2, ndf * 4, kernel_d[2], 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*4) x 8 x 8
)
self.conv4 = nn.Sequential(
nn.Conv2d(ndf * 4, ndf * 8, kernel_d[3], 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*8) x 4 x 4
)
self.conv5 = nn.Sequential(
nn.Conv2d(ndf * 8, 1, kernel_d[4], 1, 0, bias=False),
nn.Sigmoid()
# state size. 1
)
def forward(self, input):
output1 = self.conv1(input)
output2 = self.conv2(output1)
output3 = self.conv3(output2)
output4 = self.conv4(output3)
output5 = self.conv5(output4)
return output5.view(-1, 1).squeeze(1)
def generate_feature(self, input):
output1 = self.conv1(input)
output2 = self.conv2(output1)
output3 = self.conv3(output2)
output4 = self.conv4(output3)
output5 = self.conv5(output4)
maxpool0 = nn.MaxPool2d(16)
maxpool1 = nn.MaxPool2d(4)
maxpool2 = nn.MaxPool2d(2)
# feature0 = maxpool0(output1).view(input.size(0), -1).squeeze(1)
feature1 = maxpool1(output2).view(input.size(0), -1).squeeze(1)
feature2 = maxpool2(output3).view(input.size(0), -1).squeeze(1)
feature3 = output4.view(input.size(0), -1).squeeze(1)
# print(feature1.size())
# print(feature2.size())
# print(feature3.size())
return torch.cat((feature1, feature2, feature3), 1)
# return torch.cat((feature1, feature2), 1)
# return feature1
# return feature0
# return torch.cat((feature0, feature1), 1)
# return output | true |
6f91617ca4e562a325460d1b7a021559bebd9bf3 | Python | Hicha-m/affiche-interactive | /src/main.py | UTF-8 | 9,769 | 2.890625 | 3 | [
"MIT"
] | permissive | """
Affiche interactive
Idée Originale: Damien Muti
Code : Olivier Boesch
version 0.95 : added loop
"""
__version__ = '0.95'
# Kivy : set providers for audio and video
# with env vars
import os
os.environ["KIVY_VIDEO"] = "ffpyplayer"
os.environ["KIVY_AUDIO"] = "sdl2" # workaround for gstreamer bug on rpi3
# Kivy : set specific config
# app fullscreen and don't show cursor
from kivy.config import Config
# Config.set("graphics", "fullscreen", "auto")
Config.set("graphics", "show_cursor", "0")
# kivy imports
import json
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.screenmanager import ScreenManager, Screen, NoTransition
from kivy.core.window import Window
from kivy.uix.widget import Widget
from kivy.core.audio import SoundLoader
from kivy.logger import Logger
from kivy.properties import BooleanProperty
class MediaManager(Widget):
"""Media Manager: capture each keypress,
lookup in config for medium attached to this key, switch on the right screen
and transmit medium path to image or video widgets. Plays audio directly"""
def __init__(self, config_file=None, **kwargs):
"""Class init"""
super().__init__(**kwargs)
self.config = {}
self.load_config_file(config_file)
self._keyboard = Window.request_keyboard(self._keyboard_closed, self)
self._keyboard.bind(on_key_down=self.keyboard_callback)
self.sound = None
self.video = None
self.image = None
def load_config_file(self, src=None):
"""load_config_file: loads a configuration file for the app that links key to media
arguments:
src : filename of config file (defaults to 'config.json')
format of the file :
{
"key1": {"media_type": "video", "src": "media/videoname.mp4"},
"key2": {"media_type": "image", "src": "media/imagename.jpg"},
"key3": {"media_type": "sound", "src": "media/soundname.mp3"}
}
correct key names can be found with the key inspector app
"""
# if a name if not provided -> defaults to config.json in the media dir
if src is None:
src = 'media/config.json'
try:
# open and load file
Logger.info("Media Config: Loading file {}".format(src))
with open(src, 'r') as f:
data = f.read()
# decode json and store dict
self.config = json.loads(data)
Logger.info("Media Config: Config Loaded -> {}".format(str(self.config)))
# if we can't load config -> no need to continue : raise exception
except Exception as e:
raise e
def _keyboard_closed(self):
"""_keyboard_closed: unbind callback when keyboard is closed"""
self._keyboard.unbind(on_key_down=self.keyboard_callback)
self._keyboard = None
def keyboard_callback(self, keyboard, keycode, text, modifiers):
"""keyboard_callback: get keypress and try to find media accordingly"""
# which key ?
Logger.info("Keyboard: Pressed {}".format(str(keycode)))
# try to get the right medium in config
media = self.config.get(keycode[1], None)
Logger.info("Media: found {}".format(str(media)))
# no medium -> finished
if media is None:
return
# medium found in config but not on disk -> warning
if not os.path.exists(media['src']):
Logger.warning("Media: file not found {}".format(str(media)))
return
# medium found is a video
if media['media_type'] == 'video':
# get the loop parameter in config
loop = media.get("loop", False)
# clear if an image is displayed
if self.screen_manager.current == 'image':
self.image.clear_image()
# switch to video screen
self.screen_manager.current = 'video'
# give medium path to the video screen
self.video.change_media(media, loop)
# medium found is an image
elif media['media_type'] == 'image':
# stop if a video is displayed
if self.screen_manager.current == 'video':
self.video.stop_media()
# switch to image screen
self.screen_manager.current = 'image'
# give medium path to the image screen
self.image.change_media(media)
# medium found is a sound
elif media['media_type'] == 'sound':
# get the loop parameter in config
loop = media.get("loop", False)
# get absolute path
src = os.path.join(os.getcwd(), media['src'])
# if no sound was loaded -> try load it directly
if self.sound is None:
self.sound = SoundLoader.load(src)
# if it is loaded -> play it
if self.sound:
self.sound.play()
self.sound.loop = loop
# there was already a sound loaded
else:
# if it's not the same -> stop it, try load the new one then play it
if self.sound.source != src:
self.sound.stop()
self.sound = SoundLoader.load(src)
if self.sound:
self.sound.play()
self.sound.loop = loop
# it's the same as before -> toggle state play/pause
elif self.sound.state == 'play':
self.sound.stop()
else:
self.sound.play()
class VideoScreen(Screen):
"""VideoScreen: display videos"""
# TODO: make seek correctly work as play after stop doesn't show the image (no rewind)
# should we loop the video ?
loop = BooleanProperty(False)
def get_video_widget(self):
"""get_video_widget: return video widget"""
return self.ids['video_widget']
def on_eos_loop(self):
"""on_eos_loop: what to do on eos (end of stream)"""
wid = self.get_video_widget()
Logger.info("Video: {} end of stream".format(wid.source))
if self.loop:
wid = self.get_video_widget()
wid.seek(0)
wid.state = 'play'
Logger.info("Video: {} looping".format(wid.source))
def change_media(self, media, loop):
"""change_media: change media source to new one or toggle state of the current one
args :
media : current medium relative path to the app directory"""
# should we loop this video ?
self.loop = loop
# get absolute path
src = os.path.join(os.getcwd(), media['src'])
# get video widget
wid = self.get_video_widget()
# if it's not the same -> load and play
if wid.source != src:
wid.source = src
wid.state = 'play'
# it's the same -> toggle state play/stop
elif wid.state == 'play':
# rewind video before pause
wid.seek(0)
wid.state = 'stop'
else:
wid.state = 'play'
def stop_media(self):
"""stop_media : rewind and stop current media"""
wid = self.get_video_widget()
wid.seek(0)
wid.state = 'stop'
class ImageScreen(Screen):
"""ImageScreen: display images"""
def get_image_widget(self):
"""get_image_widget: return image widget"""
return self.ids['image_widget']
def change_media(self, media):
"""change_media: change media source to new one or toggle state of the current one
args :
media : current medium relative path to the app directory"""
# get absolute path to media
src = os.path.join(os.getcwd(), media['src'])
# get image widget
wid = self.get_image_widget()
# if the source changed -> load the new one
if wid.source != src:
wid.source = src
# else -> set screen to black (no source)
else:
wid.source = ""
def clear_image(self):
"""clear_image: clear image to have a black screen"""
# get image widget
image = self.get_image_widget()
# unset source to obtain a black screen
image.source = ""
# Load the kv file
# note : this could be set automatically if the kv file is named affiche.kv
# (because the app is named AFFICHEApp)
Builder.load_file('afficheinteractive.kv')
class AfficheApp(App):
"""AfficheApp: main application class"""
def build(self):
"""build: setup of the ui"""
# create the media manager
self.media_manager = MediaManager()
# create video and image screens
video_screen = VideoScreen(name='video')
image_screen = ImageScreen(name='image')
# set a reference of the screens in the media manager
self.media_manager.video = video_screen
self.media_manager.image = image_screen
# create the screen manager
sm = ScreenManager(transition=NoTransition())
# add the two screens to the screen manager
sm.add_widget(image_screen)
sm.add_widget(video_screen)
# set a reference of the screen manager in the media manager
self.media_manager.screen_manager = sm
# return the root widget for display
return sm
def _on_keyboard_settings(self, window, *largs):
"""overridden function to prevent F1 key from displaying settings"""
pass
# if this file is the main file -> launch the app
if __name__ == '__main__':
AfficheApp().run()
| true |
6b308d6b1b0ae7c93b95030d0c2207d3e1179777 | Python | kritikakaram/Customer-Churn-Prediction | /Churn_Prediction.py | UTF-8 | 5,933 | 2.9375 | 3 | [] | no_license |
# Importing the required libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn as sk
color = sns.color_palette()
from sklearn import neighbors
from sklearn.neighbors import KNeighborsClassifier
# Reading the dataset
data = pd.read_csv("WA_Fn-UseC_-Telco-Customer-Churn.csv")
data.head(10)
data.columns
len(data.columns)
data.info()
# Converting the column "TotalCharges" from object to float.
data["TotalCharges"]= pd.to_numeric(data["TotalCharges"],errors="coerce")#.fillna(0,downcast="infer")
data.isnull().sum()
data.head()
# Exploratory Data Analysis
# Droping the column "customerID"
data.drop("customerID",axis=1,inplace =True)
# Converting the column "TotalCharges" from object to float.
data["TotalCharges"]= pd.to_numeric(data["TotalCharges"],errors="coerce")#.fillna(0,downcast="infer")
#Finding the median of the column TotalCharges
a = data["TotalCharges"].median()
# replacing na values in TotalCharges with Median
data["TotalCharges"].fillna(a, inplace = True)
# Replacing "No internet service" and "No phone service" with "No"
data.replace("No internet service","No",inplace = True)
data["MultipleLines"].replace("No phone service","No",inplace = True)
# Binning the Tenure column
data.tenure.value_counts()
data["tenure"]= pd.cut(data["tenure"],bins=5)
tenure_churn=pd.crosstab(data["tenure"],data["Churn"]).apply(lambda x: (x/x.sum()*100),axis=1)
tenure_churn.plot.bar()
# Gender
gender_count= data.gender.value_counts()
sns.barplot(gender_count.index, gender_count.values)
# Relation between Churn and Gender
gender_churn= pd.crosstab(data["gender"],data["Churn"])
gender_churn.plot.bar()
# Now to get relation between each feature and the Target variable "Churn"
a=data.columns
a=list(a)
a.pop() # Removing the churn column
for i in (a):
if data[i].dtype == "object" or data[i].dtype=="int64":
df=pd.crosstab(data[i],data["Churn"]).apply(lambda x: (x/x.sum()*100),axis=1)
print(i, "vs Churn")
print(df)
df.plot.bar()
plt.show()
print("----------------------")
print("----------------------")
print("----------------------")
# Churn
churn_count= data.Churn.value_counts()
sns.barplot(churn_count.index,churn_count.values)
# Separating the features and target columns
x= data.iloc[:,:-1].values
y=data.iloc[:,-1].values
col= data.columns
col=col.drop("Churn")
# Encoding the categorical data
from sklearn.preprocessing import LabelEncoder
LE_x= LabelEncoder()
for i in range(len(col)-1):
x[:,i]=LE_x.fit_transform(x[:,i])
LE_y= LabelEncoder()
y=LE_y.fit_transform(y)
z=pd.DataFrame(x,columns=col)
# OneHot Encoding the required columns
X=pd.get_dummies(z ,columns=["tenure","InternetService","Contract","PaymentMethod"])
X["Churn"]=pd.DataFrame(y)
Y=X.iloc[:,-1]
X.drop("Churn",axis=1,inplace = True)
# Spliting the data into Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test= train_test_split(X,Y, test_size=0.3)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc= StandardScaler()
X_train.iloc[:,13:15]= sc.fit_transform(X_train.iloc[:,13:15])
X_test.iloc[:,13:15]= sc.transform(X_test.iloc[:,13:15])
# Dropping the column "TotalCharges" as it is higly correlated with "Monthly Charges" and "Tenure"
X_train.drop("MonthlyCharges",axis=1,inplace= True)
X_test.drop("MonthlyCharges",axis=1,inplace= True)
#############################################################33
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier()
#GRID SEARCH
n_estimators = [100, 300, 500]
max_depth = [4, 5,7]
min_samples_split = [2, 5, 6]
min_samples_leaf = [1, 2, 3]
criterion = ['gini','entropy']
from sklearn.model_selection import GridSearchCV
hyperF = dict(n_estimators = n_estimators, max_depth = max_depth,
min_samples_split = min_samples_split,
min_samples_leaf = min_samples_leaf, criterion=criterion)
gridF = GridSearchCV(model, hyperF, cv = 3, verbose = 1,
n_jobs = -1)
bestF = gridF.fit(X_train, Y_train)
best_param= gridF.best_params_
forestOpt = RandomForestClassifier(random_state = 1,criterion='entropy', max_depth = 7, class_weight='balanced', n_estimators = 100, min_samples_split = 6, min_samples_leaf = 2)
modelOpt = forestOpt.fit(X_train, Y_train)
y_pred = modelOpt.predict(X_test)
print(modelOpt.feature_importances_)
y_pred_prob =modelOpt.predict_proba(X_test)[:, 1]
from sklearn.metrics import roc_auc_score
# Calculate roc auc
roc_value = roc_auc_score(Y_test, y_pred_prob)
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
cm = (confusion_matrix(Y_test,y_pred))
cm_list=cm.tolist()
cm_list[0].insert(0,'Real True')
cm_list[1].insert(0,'Real False')
#plot_confusion_matrix(cm)
print(classification_report(Y_test,y_pred))
print(accuracy_score(Y_test, y_pred))
#ROC
def plot_roc_curve(fpr, tpr):
plt.plot(fpr, tpr, color='orange', label='ROC')
plt.plot([0, 1], [0, 1], color='darkblue', linestyle='--')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic (ROC) Curve')
plt.legend()
plt.show()
from sklearn.metrics import roc_curve
fpr, tpr, thresholds = roc_curve(Y_test, y_pred_prob)
plot_roc_curve(fpr, tpr)
#matplotlib inline
#do code to support model
#"data" is the X dataframe and model is the SKlearn object
feats = {} # a dict to hold feature_name: feature_importance
for feature, importance in zip(data.columns, modelOpt.feature_importances_):
feats[feature] = importance #add the name/value pair
importances = pd.DataFrame.from_dict(feats, orient='index').rename(columns={0: 'Feature-importance'})
importances.sort_values(by='Feature-importance').plot(kind='bar', rot=85)
| true |
87662ef04924a1d437f9a1b163479bc0eefaa831 | Python | coolsgupta/leetcode | /Longest_Substring_Without_Repeating_Characters.py | UTF-8 | 573 | 3.28125 | 3 | [] | no_license | class Solution(object):
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
win = []
maxl = 0
for c in s:
if c not in win:
win.append(c)
else:
maxl = max(maxl, len(win))
for x in win:
win.pop(0)
if c not in win:
win.append(c)
break
return max(maxl, len(win))
s = "yfsrsrpzuya"
print (Solution().lengthOfLongestSubstring(s)) | true |
28842631ece9a93ee5cf9534738f4227f0e37a87 | Python | PlattsSEC/HackerRank | /algorithms/graph_theory/shortest_reach/prithaj.py | UTF-8 | 810 | 2.953125 | 3 | [
"MIT"
] | permissive | #Python 3.x
from collections import deque
EDGE_DISTANCE = 6
def bfs(g,s):
S,q,dist = set(),deque(),{k:-1 for k in range(1,len(g)+1)}
S.add(s)
q.append(s)
dist[s] = 0
while q:
current_node = q.popleft()
for i in g[current_node]:
if i in S: continue
S.add(i)
q.append(i)
dist[i] = dist[current_node] + EDGE_DISTANCE
return dist
queries = eval(input())
for i in range(queries):
n,m = map(int,input().split())
g = {k:set() for k in range(1,n+1)}
for j in range(m):
a,b = map(int,input().split())
g[a].add(b)
g[b].add(a)
start = eval(input())
val = bfs(g,start)
output = ""
for k in val:
if k!= start:
output = output + str(val[k])+ " "
print(output) | true |
28331b764d96742347e074ada1bf8d10fd8ff7fc | Python | RobinVercruysse/AdventOfCode | /2019/day7/Day7.py | UTF-8 | 5,270 | 2.625 | 3 | [] | no_license | from typing import List
from itertools import permutations
with open('input') as fp:
original_intcodes = [int(intcode) for intcode in fp.readline().split(',')]
amplifier_inputs = {}
def amplifier_process(intcodes: List[int], phase_setting, amplifier_index):
take_input = False
ptr = 0
while ptr < len(intcodes):
intcodestr = str(intcodes[ptr])
intcode = int(intcodestr[len(intcodestr) - 2:])
parameter_modes = intcodestr[:len(intcodestr) - 2][::-1]
if intcode == 1:
ptr1 = intcodes[ptr + 1]
if len(parameter_modes) > 0 and parameter_modes[:1] == '1':
value1 = ptr1
else:
value1 = intcodes[ptr1]
ptr2 = intcodes[ptr + 2]
if len(parameter_modes) > 1 and parameter_modes[1:2] == '1':
value2 = ptr2
else:
value2 = intcodes[ptr2]
ptr3 = intcodes[ptr + 3]
intcodes[ptr3] = value1 + value2
ptr += 4
elif intcode == 2:
ptr1 = intcodes[ptr + 1]
if len(parameter_modes) > 0 and parameter_modes[:1] == '1':
value1 = ptr1
else:
value1 = intcodes[ptr1]
ptr2 = intcodes[ptr + 2]
if len(parameter_modes) > 1 and parameter_modes[1:2] == '1':
value2 = ptr2
else:
value2 = intcodes[ptr2]
ptr3 = intcodes[ptr + 3]
intcodes[ptr3] = value1 * value2
ptr += 4
elif intcode == 3:
ptr1 = intcodes[ptr + 1]
if take_input:
intcodes[ptr1] = amplifier_inputs[amplifier_index]
else:
intcodes[ptr1] = phase_setting
take_input = True
ptr += 2
elif intcode == 4:
ptr1 = intcodes[ptr + 1]
if len(parameter_modes) > 0 and parameter_modes[:1] == '1':
value = ptr1
else:
value = intcodes[ptr1]
yield value
ptr += 2
elif intcode == 5:
ptr1 = intcodes[ptr + 1]
ptr2 = intcodes[ptr + 2]
if len(parameter_modes) > 0 and parameter_modes[:1] == '1':
value1 = ptr1
else:
value1 = intcodes[ptr1]
if len(parameter_modes) > 0 and parameter_modes[1:2] == '1':
value2 = ptr2
else:
value2 = intcodes[ptr2]
if value1 != 0:
ptr = value2
else:
ptr += 3
elif intcode == 6:
ptr1 = intcodes[ptr + 1]
ptr2 = intcodes[ptr + 2]
if len(parameter_modes) > 0 and parameter_modes[:1] == '1':
value1 = ptr1
else:
value1 = intcodes[ptr1]
if len(parameter_modes) > 0 and parameter_modes[1:2] == '1':
value2 = ptr2
else:
value2 = intcodes[ptr2]
if value1 == 0:
ptr = value2
else:
ptr += 3
elif intcode == 7:
ptr1 = intcodes[ptr + 1]
if len(parameter_modes) > 0 and parameter_modes[:1] == '1':
value1 = ptr1
else:
value1 = intcodes[ptr1]
ptr2 = intcodes[ptr + 2]
if len(parameter_modes) > 1 and parameter_modes[1:2] == '1':
value2 = ptr2
else:
value2 = intcodes[ptr2]
ptr3 = intcodes[ptr + 3]
if value1 < value2:
intcodes[ptr3] = 1
else:
intcodes[ptr3] = 0
ptr += 4
elif intcode == 8:
ptr1 = intcodes[ptr + 1]
if len(parameter_modes) > 0 and parameter_modes[:1] == '1':
value1 = ptr1
else:
value1 = intcodes[ptr1]
ptr2 = intcodes[ptr + 2]
if len(parameter_modes) > 1 and parameter_modes[1:2] == '1':
value2 = ptr2
else:
value2 = intcodes[ptr2]
ptr3 = intcodes[ptr + 3]
if value1 == value2:
intcodes[ptr3] = 1
else:
intcodes[ptr3] = 0
ptr += 4
elif intcode == 99:
raise Exception()
def get_result(phase_settings):
amplifiers = []
for amplifier in range(5):
amplifier_inputs[amplifier] = 0
amplifiers.append(amplifier_process(original_intcodes.copy(), phase_settings[amplifier], amplifier))
output = 0
try:
current_amplifier = 0
while True:
output = next(amplifiers[current_amplifier])
if current_amplifier >= 4:
current_amplifier = 0
else:
current_amplifier += 1
amplifier_inputs[current_amplifier] = output
except Exception:
return output
highest_signal = 0
best_permutation = None
for permutation in permutations([5, 6, 7, 8, 9]):
signal = get_result(permutation)
if signal > highest_signal:
highest_signal = signal
best_permutation = permutation
print(best_permutation)
print(highest_signal)
| true |
415d13196bc7a2637af1b2dd56941d22cd2ee5a0 | Python | chinatip/problem-solving | /codeforces/112A.py | UTF-8 | 172 | 3.109375 | 3 | [] | no_license | from sys import stdin, stdout
a = stdin.readline().rstrip().lower()
b = stdin.readline().rstrip().lower()
if a > b:
print 1
elif a == b:
print 0
else:
print -1
| true |
7140df7bc4748d28460abc8bfd1aada1da9d9b37 | Python | CarmenSaldana/NounNumberCaseOrder | /sup_scripts/pars_compl_hu_tr_UD.py | UTF-8 | 6,001 | 2.703125 | 3 | [] | no_license | '''
This script runs on Python 3 and relies on having the UD treebanks (v 2.1) and libraries in CLIQS. Follow the instructions on https://github.com/Futrell/cliqs
'''
import pandas as pd
import sys
sys.path.append('/Users/carmen/UD/cliqs-master/cliqs/')#Path to folder containing corpora.py
import corpora
import numpy as np
from scipy import stats
dict_number={'tr': ['Number=Sing','Number=Plur'], 'hu': ['Number=Sing','Number=Plur']}
dict_cases = {'tr':['Nom','Gen', 'Acc', 'Dat', 'Loc', 'Abl','Equ', 'Ins'], 'hu':['Nom','Gen', 'Acc', 'Dat', 'Loc', 'Abl','Ess', 'Ins', 'Ine', 'All', 'Ill', 'Ade', 'Ela','Sub','Sup','Del','Tra','Ter','Tem']}
def get_sentences(language):
corpus=corpora.ud_corpora[language]
sentences=corpus.sentences()
return list(sentences)
def prob(system,phrase):
p=float(system.count(phrase))/len(system)
return p
def get_entropy_system(system):
system1=[i for i in system if i!='NA']
h=0
for i in set(system1):
p=prob(system1,i)
h-=p*np.log2(p)
return h
def pars_conservative(sentences,marked_value):
form_type=[]
for sentence in sentences:
for n in sentence.nodes():
if n!=0:
pos = sentence.node[n]['pos']
inflection = sentence.node[n]['infl']
numUn_caseUn = (pos == 'NOUN' and all(infl in inflection for infl in ['Number=Sing', 'Nom']))
numUn_caseM = (pos == 'NOUN' and all(infl in inflection for infl in ['Number=Sing', marked_value]))
numM_caseU = (pos == 'NOUN' and all(infl in inflection for infl in ['Number=Plur', 'Nom']))
list_bool_value = [numUn_caseUn, numM_caseU, numUn_caseM]
form_type.append([sentence.node[n]['lemma'] if i else 'NA' for i in list_bool_value])
df_lemmas_pars = pd.DataFrame(form_type, columns=['Unmarked', 'Number','Case'])
common_number = [i for i in set(list(df_lemmas_pars['Unmarked'])) if i in set(list(df_lemmas_pars['Number'])) and i !='NA']
common_case = [i for i in set(list(df_lemmas_pars['Unmarked'])) if i in set(list(df_lemmas_pars['Case'])) and i !='NA']
ratio_num = [list(df_lemmas_pars['Number']).count(i)/list(df_lemmas_pars['Unmarked']).count(i) for i in common_number ]
ratio_case = [list(df_lemmas_pars['Case']).count(i)/list(df_lemmas_pars['Unmarked']).count(i) for i in common_case ]
return ratio_num, ratio_case
def test_parsability(language, dict_number, dict_cases):
sentences = get_sentences(language)
list_case = dict_cases[language]
case_pars=[]
for case_value in list_case[1:]:
pars=pars_conservative(sentences, case_value)
case_pars.extend(pars[-1])
return (np.median(pars[-2]), np.median(case_pars), stats.mannwhitneyu(pars[-2],case_pars))
def test_parsability_pl_acc(language, dict_number, dict_cases):
sentences = get_sentences(language)
list_case = dict_cases[language][2:3]
case_pars=[]
for case_value in list_case:
pars=pars_conservative(sentences, case_value)
case_pars.extend(pars[-1])
return (np.median(pars[-2]), np.median(case_pars), stats.mannwhitneyu(pars[-2],case_pars))
def freq_infl(sentences,list_infl):
freq=[]
freq_by=[]
lemmas=[]
lemmas_by=[]
for sentence in sentences:
for n in sentence.nodes():
if n!=0:
pos = sentence.node[n]['pos']
inflection = sentence.node[n]['infl']
bool_value = (pos == 'NOUN' and any(infl in inflection for infl in list_infl))
list_bool_value = [pos == 'NOUN' and infl in inflection for infl in list_infl]
freq.append(bool_value)
freq_by.append(list_bool_value)
if bool_value: lemmas.append(sentence.node[n]['lemma'])
lemmas_by.append([sentence.node[n]['lemma'] if i else 'NA' for i in list_bool_value])
df= pd.DataFrame(freq_by, columns=list_infl)
df_lemmas = pd.DataFrame(lemmas_by, columns=list_infl)
sums=[sum(df[infl])for infl in list_infl]
df_sums = pd.DataFrame([[sum(sums)]+sums], columns=['total']+list_infl)
h_lemmas=get_entropy_system(lemmas)
h_lemmas_by = df_lemmas.apply(get_entropy_system)
norm_h_by=[round((h_lemmas_by[i]/(get_entropy_system(list(range(df_sums[i][0]))))),3) for i in list_infl]
norm_h = [round(h_lemmas/(get_entropy_system(list(range(df_sums['total'][0])))),3)]
return norm_h, norm_h_by, df_sums, h_lemmas_by, h_lemmas, df_lemmas
def integration_complexity(language):
sentences = get_sentences(language)
list_number = dict_number[language]
list_case = dict_cases[language]
df_number = freq_infl(sentences, list_number)
df_case = freq_infl(sentences, list_case)
return list(zip(['Number','Case']+list_number+list_case,df_number[0]+df_case[0]+df_number[1]+df_case[1]))
#############################################################
# parsability
#############################################################
'''
Turkish
'''
print('#################################################')
print('Parsability number & case in Turkish UD corpus:')
print (test_parsability('tr', dict_number, dict_cases))
print (test_parsability_pl_acc('tr', dict_number, dict_cases))
'''
Hungarian
'''
print('Parsability number & case in Hungarian UD corpus:')
print (test_parsability('hu', dict_number, dict_cases))
print (test_parsability_pl_acc('hu', dict_number, dict_cases))
print('#################################################')
#############################################################
# Integration complexity
#############################################################
'''
Turkish
'''
print('#################################################')
print('Integration complexity number & case Turkish UD corpus:')
print(integration_complexity('tr'))
'''
Hungarian
'''
print('Integration complexity number & case Hungarian UD corpus:')
print(integration_complexity('hu'))
print('#################################################')
| true |
0e8d203a9f0bf545914d5dbeb306afb60d6b780a | Python | CorleoneDany/Les_Aventures_De_Riem | /Les_Aventures_De_Riem.py | UTF-8 | 2,862 | 3.03125 | 3 | [] | no_license | # TO DO : Remplacer le rectangle par une image de Riem
# Créer les bruitages ainsi que les events qui les fera trigger
# Corriger la variable path pour qu'elle s'adapte à toutes les machines
import os
import time
try:
import pygame
except ImportError:
import subprocess
subprocess.call([r"Install.bat"])
pygame.init()
pygame.mixer.init(frequency=22050, size=-16, channels=2, buffer=4096)
path = os.path.dirname(os.path.realpath(__file__))
screenWidth = 1000
screenHeight = 600
win = pygame.display.set_mode((screenWidth, screenHeight))
pygame.display.set_caption("Les aventures de Riem")
class Personnage:
x = 300
y = 300
width = 50
height = 50
velocity = 10
isJump = False
jumpCount = 10
color = (255,255,255)
run = True
personnage1 = Personnage()
personnage1.velocity = 20
personnage1.x = 0
personnage1.y = 0
personnage1.color = (255,0,0)
personnage2 = Personnage()
personnage2.color = (0,255,0)
while run:
pygame.time.delay(100)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
keys = pygame.key.get_pressed()
if keys[pygame.K_LEFT] and personnage1.x > personnage1.velocity:
personnage1.x -= personnage1.velocity
if keys[pygame.K_RIGHT] and personnage1.x < screenWidth - personnage1.width - personnage1.velocity:
personnage1.x += personnage1.velocity
if keys[pygame.K_UP] and personnage1.y > personnage1.velocity:
personnage1.y -= personnage1.velocity
if keys[pygame.K_DOWN] and personnage1.y < screenHeight - personnage1.height - personnage1.velocity:
personnage1.y += personnage1.velocity
if keys[pygame.K_a] and personnage2.x > personnage2.velocity:
personnage2.x -= personnage2.velocity
if keys[pygame.K_d] and personnage2.x < screenWidth - personnage2.width - personnage2.velocity:
personnage2.x += personnage2.velocity
if keys[pygame.K_w] and personnage2.y > personnage2.velocity:
personnage2.y -= personnage2.velocity
if keys[pygame.K_s] and personnage2.y < screenHeight - personnage2.height - personnage2.velocity:
personnage2.y += personnage2.velocity
if abs((personnage1.x+25) - (personnage2.x+25)) < 50 and abs((personnage1.y+25) - (personnage2.y+25)) < 50:
personnage1.color = (0,0,0)
personnage1.x = 9999
personnage1.y = 9999
pygame.mixer.music.load(path + "/Son/game_over.OGG")
pygame.mixer.music.play()
win.fill((0))
pygame.draw.rect(win, personnage1.color, (personnage1.x, personnage1.y, personnage1.width, personnage1.height))
pygame.draw.rect(win, personnage2.color, (personnage2.x, personnage2.y, personnage2.width, personnage2.height))
pygame.display.update()
pygame.quit() | true |
dec5cca0268eb06413cfa2b1a2375431fa9383f5 | Python | adichouhan14/python-assignment | /py assignments/module 4/fruit.py | UTF-8 | 908 | 2.5625 | 3 | [] | no_license | fruits={1:{'name':'Apple','scientific name':'Malus domestica','producers':['United States','Turkey'],'neutrition values':{'carbohydrates':'13.81g','fat':'0.17g','protein':'0.26g'}},
2:{'name':'Mango','scientific name':'Mangifera indica','producers':['Índia','China','Thailand'],'neutrition values':{'carbohydrates':'15g','fat':'0.4g','protein':'0.8g'}},
3:{'name':'Guava','scientific name':'Psidium guajava','producers':['Índia','China','Thailand'],'neutrition values':{'carbohydrates':'14g','fat':'1g','protein':'2.6g'}},
}
pro=[]
for i in range(1,4):
pro.append(fruits[i]['neutrition values']['protein'])
print(fruits[pro.index(max(pro))+1]['name'])
print(max(pro))
pro=[]
for i in range(1,4):
if ('China' in fruits[i]['producers']):
pro.append(fruits[i]['neutrition values']['protein'])
print(fruits[pro.index(max(pro))+1]['name'])
print(max(pro))
| true |
f72979794cab708532fd065a5f3a93847f298eb9 | Python | pablolupo84/CRUDapp | /00_test/ClientViewApp.py | UTF-8 | 14,563 | 2.90625 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import tkinter as tk
from tkinter import ttk
from tkinter import *
from tkinter import messagebox
from tkinter.scrolledtext import ScrolledText
import sqlite3
class ClientesFrame(ttk.Frame):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
#-----------------COMIENZO DE CAMPOS-----------------------
self.miFrame = Frame(self, width=350, height=400)
self.miFrame.pack()
self.datacuadroNombre = StringVar()
self.datacuadroTelefono = StringVar()
self.cuadroNombre = Entry(self.miFrame, textvariable=self.datacuadroNombre)
self.cuadroNombre.grid(row=1, column=1, padx=10, pady=1,columnspan=3)
self.cuadroNombre.config(justify="center")
self.cuadroTelefono = Entry(self.miFrame, textvariable=self.datacuadroTelefono)
self.cuadroTelefono.grid(row=2, column=1, padx=10, pady=10,columnspan=3)
self.cuadroTelefono.config(justify="center")
self.botonCreateDB = Button(self.miFrame, text="Create DB", width=10,command=lambda:self.crearDB())
self.botonCreateDB.grid(row=1, column=4, padx=10, pady=10)
self.botonLimpiar = Button(self.miFrame, text="Limpiar", width=10,command=lambda:self.borrarInputBox())
self.botonLimpiar.grid(row=2, column=4, padx=10, pady=10)
#-----------------COMIENZO DE ETIQUETAS-----------------------
self.NombreLabel = Label(self.miFrame, text="Nombre: ")
self.NombreLabel.grid(row=1, column=0, padx=10, pady=10)
self.TelefonoLabel = Label(self.miFrame, text="Telefono: ")
self.TelefonoLabel.grid(row=2, column=0, padx=10, pady=10)
#-----------------Visor de Clientes-----------------------
self.miFrame_3 = Frame(self)
self.miFrame_3.pack()
self.tituloLabel=Label(self.miFrame_3,text="Clientes",fg="blue",bg="white",font=("Times New Roman",20))
self.tituloLabel.grid(row=0, column=1, padx=10, pady=10,sticky="we")
self.treeVentas = ttk.Treeview(self.miFrame_3,columns = ("ID_USUARIO","NOMBRE_USUARIO","TELEFONO"))
self.treeVentas.grid(row=1,column=1,padx=10,pady=10)
self.treeVentas['show']='headings'
self.treeVentas.heading('#0', text='column0', anchor=tk.W)
self.treeVentas.heading('#1', text='ID_USUARIO', anchor=tk.W)
self.treeVentas.heading('#2', text='NOMBRE_USUARIO', anchor=tk.W)
self.treeVentas.heading('#3', text='TELEFONO', anchor=tk.W)
self.treeVentas.column('#0',width=70,minwidth=70,stretch=tk.YES)
self.treeVentas.column('#1',width=80,minwidth=80,stretch=tk.YES)
self.treeVentas.column('#2',width=150,minwidth=150,stretch=tk.YES)
self.treeVentas.column('#3',width=150,minwidth=150,stretch=tk.YES)
for row in self.consultarClientes():
self.treeVentas.insert('',END, values=row)
self.scrollVert2=Scrollbar(self.miFrame_3,command=self.treeVentas.yview)
self.scrollVert2.grid(row=1,column=2,sticky="nsnew")
self.treeVentas.config(yscrollcommand=self.scrollVert2.set)
#-----------------COMIENZO DE BOTONES-----------------------
self.miFrame_2 = Frame(self)
self.miFrame_2.pack()
self.botonCreate = Button(self.miFrame_2, text="Nuevo", width=10,command=lambda:self.InsertarData())
self.botonCreate.grid(row=4, column=0, padx=10, pady=10)
self.botonReadUSER =Button(self.miFrame_2, text="Buscar", width=10,command=lambda:self.ReadDataUser())
self.botonReadUSER.grid(row=4, column=1, padx=10, pady=10)
self.botonUpdate = Button(self.miFrame_2, text="Actualizar", width=10,command=lambda:self.updateData())
self.botonUpdate.grid(row=4, column=2, padx=10, pady=10)
self.botonDelete = Button(self.miFrame_2, text="Borrar", width=10,command=lambda:self.deleteData())
self.botonDelete.grid(row=4, column=3, padx=10, pady=10)
#-----------------FUNCIONES-----------------------
def UpdateTreeViewClientes(self):
for row in self.treeVentas.get_children():
self.treeVentas.delete(row)
for row in self.consultarClientes():
self.treeVentas.insert('',END, values=row)
def consultarClientes(self):
miConexion = sqlite3.connect("CLIENTES")
miCursor = miConexion.cursor()
arreglo = []
try:
miCursor.execute("SELECT * FROM CLIENTES")
listamiCursor=miCursor.fetchall() #recuperar los datos
for ventas in listamiCursor:
arreglo.append(ventas)
miCursor.close()
return arreglo
except:
return arreglo
def crearDB(self):
self.crearDB_Clientes()
self.crearDB_ventas()
def crearDB_Clientes(self):
miConexion = sqlite3.connect("CLIENTES")
miCursor = miConexion.cursor()
try:
miCursor.execute('''
CREATE TABLE CLIENTES(
ID INTEGER PRIMARY KEY AUTOINCREMENT,
NOMBRE_USUARIO VARCHAR(50),
TELEFONO VARCHAR(50))
''')
miConexion.commit()
messagebox.showinfo("ClientView", "BBDD CLIENTES creada con exito!!")
miConexion.close()
except:
messagebox.showinfo("ClientView", "BBDD CLIENTES YA EXISTE!!")
def crearDB_ventas(self):
miConexion = sqlite3.connect("VENTAS")
miCursor = miConexion.cursor()
try:
miCursor.execute('''
CREATE TABLE VENTAS(
ID_VENTAS INTEGER PRIMARY KEY AUTOINCREMENT,
ID_USUARIO INTEGER,
DESCRIPCION VARCHAR(50),
TOTAL_VENTA REAL,
DESCUENTO REAL)
''')
miConexion.commit()
messagebox.showinfo("ClientView", "BBDD Ventas creada con exito!!")
miConexion.close()
except:
messagebox.showinfo("ClientView", "BBDD Ventas YA EXISTE!!")
def leerInfoInputBox(self):
listadata = [self.datacuadroNombre.get(),self.datacuadroTelefono.get()]
return listadata
def borrarInputBox(self):
self.datacuadroNombre.set("")
self.datacuadroTelefono.set("")
print("ClientView - Se borran todos los campos")
def InsertarData(self):
try:
miConexion = sqlite3.connect("CLIENTES")
miCursor = miConexion.cursor()
print("Successfully Connected to SQLite")
listadata=self.leerInfoInputBox()
count = miCursor.execute("INSERT INTO CLIENTES VALUES (NULL,?,?)", listadata)
miConexion.commit()
print("Record inserted successfully into CLIENTES table ", miCursor.rowcount)
messagebox.showinfo("ClientView", "BBDD creada con exito!!")
miConexion.close()
self.UpdateTreeViewClientes()
except:
print("Failed to insert data into sqlite table")
finally:
if (miConexion):
miConexion.close()
print("The SQLite connection is closed")
def ReadDataUser(self):
try:
miConexion = sqlite3.connect("CLIENTES")
miCursor = miConexion.cursor()
sql_update_query = """SELECT * FROM CLIENTES WHERE NOMBRE_USUARIO = ?"""
name=str(self.cuadroNombre.get())
miCursor.execute(sql_update_query,(name,))
listamiCursor=miCursor.fetchone() #recuperar los datos
if(listamiCursor!=None):
self.datacuadroNombre.set(listamiCursor[1])
self.datacuadroTelefono.set(listamiCursor[2])
else:
messagebox.showinfo("ClientView", "NO ENCONTRADO!!")
self.borrarInputBox()
miConexion.commit()
miConexion.close()
self.UpdateTreeViewClientes()
except:
print("Failed to ReadData data into sqlite table")
finally:
if (miConexion):
miConexion.close()
print("The SQLite connection is closed")
def updateData(self):
try:
miConexion = sqlite3.connect("CLIENTES")
miCursor = miConexion.cursor()
sql_update_query="""SELECT * FROM CLIENTES WHERE NOMBRE_USUARIO = ?"""
name=str(self.cuadroNombre.get())
miCursor.execute(sql_update_query,(name,))
listamiCursor=miCursor.fetchone() #recuperar los datos
if(listamiCursor!=None):
for usuario in listamiCursor:
data= self.leerInfoInputBox()
data.append(listamiCursor[0])
sql_update_query = """UPDATE CLIENTES set NOMBRE_USUARIO = ? ,TELEFONO = ? where ID = ?"""
miCursor.execute(sql_update_query, data)
messagebox.showinfo("ClientView App", "BBDD ACTUALIZADA con exito!!")
else:
messagebox.showinfo("ClientView App", "No se actualizo!!")
miConexion.commit()
miCursor.close()
self.UpdateTreeViewClientes()
except:
print("Failed to Actualizar data into sqlite table")
finally:
if (miConexion):
miConexion.close()
print("The SQLite connection is closed")
def deleteData(self):
try:
miConexion = sqlite3.connect("CLIENTES")
miCursor = miConexion.cursor()
sql_update_query="""SELECT * FROM CLIENTES WHERE NOMBRE_USUARIO = ?"""
name=str(self.cuadroNombre.get())
miCursor.execute(sql_update_query,(name,))
listamiCursor=miCursor.fetchone() #recuperar los datos
if(listamiCursor!=None):
sql_update_query = """DELETE FROM CLIENTES WHERE ID = ?"""
miCursor.execute(sql_update_query, (listamiCursor[0],))
messagebox.showinfo("ClientView App", "dATO ELIMINADO!!")
else:
messagebox.showinfo("ClientView App", "No se PUDO ELIMINAR!!")
miConexion.commit()
miCursor.close()
self.UpdateTreeViewClientes()
except:
print("Failed to deleteData data into sqlite table")
finally:
if (miConexion):
miConexion.close()
print("The SQLite connection is closed")
class VentasFrame(ttk.Frame):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.miFrame_3 = Frame(self)
self.miFrame_3.pack()
self.tituloLabel=Label(self.miFrame_3,text="Ventas",fg="blue",bg="white",font=("Times New Roman",20))
self.tituloLabel.grid(row=0, column=1, padx=10, pady=10,sticky="we")
self.treeVentas = ttk.Treeview(self.miFrame_3,columns = ("ID_VENTAS","ID_USUARIO","DESCRIPCION","TOTAL_VENTA","DESCUENTO"))
self.treeVentas.grid(row=1,column=1,padx=10,pady=10)
self.treeVentas['show']='headings'
self.treeVentas.heading('#0', text='column0', anchor=tk.W)
self.treeVentas.heading('#1', text='ID_VENTAS', anchor=tk.W)
self.treeVentas.heading('#2', text='ID_USUARIO', anchor=tk.W)
self.treeVentas.heading('#3', text='DESCRIPCION', anchor=tk.W)
self.treeVentas.heading('#4', text='TOTAL_VENTA', anchor=tk.W)
self.treeVentas.heading('#5', text='DESCUENTO', anchor=tk.W)
self.treeVentas.column('#0',width=90,minwidth=90,stretch=tk.YES)
self.treeVentas.column('#1',width=90,minwidth=90,stretch=tk.YES)
self.treeVentas.column('#2',width=90,minwidth=90,stretch=tk.YES)
self.treeVentas.column('#3',width=90,minwidth=90,stretch=tk.YES)
self.treeVentas.column('#4',width=90,minwidth=90,stretch=tk.YES)
self.treeVentas.column('#5',width=90,minwidth=90,stretch=tk.YES)
for row in self.consultarVentas():
self.treeVentas.insert('',END, values=row)
self.scrollVert2=Scrollbar(self.miFrame_3,command=self.treeVentas.yview)
self.scrollVert2.grid(row=1,column=2,sticky="nsnew")
self.treeVentas.config(yscrollcommand=self.scrollVert2.set)
def UpdateTreeViewVentas(self):
for row in self.treeVentas.get_children():
self.treeVentas.delete(row)
for row in self.consultarVentas():
self.treeVentas.insert('',END, values=row)
def consultarVentas(self):
miConexion = sqlite3.connect("VENTAS")
miCursor = miConexion.cursor()
arreglo = []
try:
miCursor.execute("SELECT * FROM VENTAS")
listamiCursor=miCursor.fetchall() #recuperar los datos
for ventas in listamiCursor:
arreglo.append(ventas)
miCursor.close()
return arreglo
except:
return arreglo
def consultarVentasporClientes(self,id_usuario):
cnx = self.conectar()
cursor = cnx.cursor()
sql_qry="""SELECT * FROM ventas WHERE ID_USUARIO = %s"""
cursor.execute(sql_qry,(id_usuario,))
arreglo = cursor.fetchall()
self.CerrarConexion(cnx)
return arreglo
def ModificarVenta(self):
try:
miVenta=Ventas()
itemVenta = self.treeVentas.focus()
#print(itemVenta)
idVentas=int(self.treeVentas.item(itemVenta,"values")[0])
opcion=messagebox.askquestion("Eliminar","Desea eliminar la venta Selecionada?")
miVenta.EliminarVenta(idVentas)
self.UpdateTreeViewVentas()
except:
messagebox.showwarning("ModificarVenta","No ha selecionado ninguna venta")
class Application(ttk.Frame):
def __init__(self, main_window):
super().__init__(main_window)
main_window.title("ClientesApp - Gestion Clientes-Ventas PL")
self.notebook = ttk.Notebook(self)
self.greeting_frame = ClientesFrame(self.notebook)
self.notebook.add(
self.greeting_frame, text="Clientes", padding=10)
self.about_frame = VentasFrame(self.notebook)
self.notebook.add(
self.about_frame, text="Ventas", padding=10)
self.notebook.pack(padx=10, pady=10)
self.pack()
main_window = tk.Tk()
app = Application(main_window)
app.mainloop() | true |
3ccad72b1a2cfacd2d6fe88917c3285b6dfcf695 | Python | Albus/bssbridge | /bssbridge/lib/ftp/__init__.py | UTF-8 | 650 | 2.6875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import aioftp
import pydantic
class FtpUrl(pydantic.AnyUrl):
user_required: bool = True
allowed_schemes: frozenset = {"ftp", "ftps"}
@property
def is_secure(self) -> bool:
return self.scheme == "ftps"
def __init__(self, url: pydantic.StrictStr, **kwargs):
super(FtpUrl, self).__init__(url=url, port=kwargs['port'] or 21,
**{k: kwargs[k] for k in kwargs if k not in ["port"]})
def get_client(url: FtpUrl) -> aioftp.Client:
return aioftp.Client.context(host=url.host, port=url.port, user=url.user,
password=url.password, ssl=url.is_secure)
| true |
2c209d5c41f5e5d249d411cb93e4d8e99f3fb510 | Python | flogothetis/Data-Structures-Python | /Trees/Red-Black Tree/RedBlackTree.py | UTF-8 | 4,095 | 3.203125 | 3 | [] | no_license | from Node import Node
class RedBlackTree:
def __init__(self):
self.root=None
self.size=0
def rightRotation(self,node):
if(node is None):
return
x=node.left
y=x.left
x.parent = node.parent
if(node.parent==None):
self.root =x
else:
if(node.isLeftChild):
x.isLeftChild=True
x.parent.left=x
else:
x.isLeftChild=False
x.parent.right=x
if(x.right is not None):
node.left=x.right
node.left.isLeftChild=True
node.left.parent=node
else:
node.left=None
x.right=node
node.parent=x
node.isLeftChild=False
def leftRotation(self,node):
if(node is None):
return
x=node.right
y=x.right
x.parent = node.parent
if(node.parent==None):
self.root =x
else:
if(node.isLeftChild):
x.isLeftChild=True
x.parent.left=x
else:
x.isLeftChild=False
x.parent.right=x
if(x.left is not None):
node.right=x.left
node.right.isLeftChild=False
node.right.parent=node
else:
node.right=None
x.left=node
node.parent=x
node.isLeftChild=True
def leftRightRotation(self, node):
self.leftRotation(node.left)
self.rightRotation(node)
def rightLeftRotation(self, node):
self.rightRotation(node.right)
self.leftRotation(node)
def inOrder(self):
self.inOrderRec(self.root)
def inOrderRec(self,root):
if(root is None):
return
self.inOrderRec(root.left)
print ( root.key, end =' ')
self.inOrderRec(root.right)
def rotateTree(self,node):
if(node.isLeftChild):
if(node.parent.isLeftChild):
self.rightRotation(node.parent.parent)
node.isBlack=False
node.parent.isBlack=True
node.parent.right.isBlack=False
else:
self.rightLeftRotation(node.parent.parent)
node.isBlack=True
node.right.isBlack=False
node.left.isBlack=False
else:
if(node.parent.isRightChild):
self.leftRotation(node.parent.parent)
node.isBlack=False
node.parent.isBlack=True
node.parent.left.isBlack=False
else:
self.leftRightRotation(node.parent.parent)
node.isBlack = True
node.right.isBlack=False
node.left.isBlack=False
def correctTree(self,node):
if(node.isLeftChild):
#Black Aunt
if(node.parent.parent.right is None or node.parent.parent.right.isBlack):
return self.rotateTree(node)
#Color Flip
if(node.parent.parent.right!=None ):
node.parent.parent.right.isBlack=True
node.parent.isBlack=True
node.parent.parent.isBlack=False
else:
if (node.parent.parent.left is None or node.parent.parent.left.isBlack):
return self.rotateTree(node)
#Color Flip
if (node.parent.parent.left != None):
node.parent.parent.left.isBlack = True
node.parent.isBlack = True
node.parent.parent.isBlack = False
def checkColor(self,node):
if(node==self.root):
node.isBlack=True
return
if(node.isBlack==False and node.parent.isBlack==False):
self.correctTree(node)
self.checkColor(node.parent)
def insert(self,key):
#Create a new Node
newNode= Node(key)
if(self.root is None):
self.root=newNode
else:
#else invoke recursive function
self.insertNode(self.root,newNode)
#check violation
self.checkColor(newNode)
self.size+=1
self.root.isBlack=True
def insertNode(self,root,newNode):
if(root is None):
root=newNode
return
if(newNode.key <= root.key):
if(root.left==None):
root.left=newNode
newNode.parent=root
newNode.isLeftChild=True
return
self.insertNode(root.left,newNode)
else:
if (root.right == None):
root.right = newNode
newNode.parent = root
newNode.isLeftChild = False
return
self.insertNode(root.right,newNode)
def preOrder(self):
self.preOrderRec(self.root)
def preOrderRec(self, root):
if root is None:
return
print(root.key, end=" ")
self.preOrderRec(root.left)
self.preOrderRec(root.right)
tree=RedBlackTree()
tree.insert(7)
tree.insert(6)
tree.insert(5)
tree.insert(4)
tree.insert(3)
tree.insert(2)
tree.insert(1)
print("Inoder Traversal of Created Tree")
tree.inOrder()
print()
print("PostOrder Traversal of Created Tree")
tree.preOrder()
| true |
2db62300904791a5befd88e145cdc9af282ad33a | Python | 981377660LMT/algorithm-study | /20_杂题/atc競プロ/競プロ典型 90 問/todo047 - Monochromatic Diagonal(★7).py | UTF-8 | 1,962 | 3.609375 | 4 | [] | no_license | # 如果行列字符相等 那么就是这种颜色
# 如果行列字符不相等 那么就是另外一种颜色
# !求有多少条正对角线同一颜色组成
# n<=1e6
# 1.如果n很小(n<=2000) 可以全部算出来
# R G B
# 变换规律:表示成0 1 2 结果为-(p1+p2)模三
# !2.n很大的时候需要哈希判断子串相等
# 左下半,判断
# 右上半,判断
from typing import Sequence
import sys
MAPPING = {'B': 0, 'W': 1, 'R': 2}
sys.setrecursionlimit(int(1e9))
input = sys.stdin.readline
MOD = int(1e9 + 7)
class StringHasher:
_BASE = 131
_MOD = 998244353
_OFFSET = 96
@staticmethod
def setBASE(base: int) -> None:
StringHasher._BASE = base
@staticmethod
def setMOD(mod: int) -> None:
StringHasher._MOD = mod
@staticmethod
def setOFFSET(offset: int) -> None:
StringHasher._OFFSET = offset
def __init__(self, sequence: Sequence[str]):
self._sequence = sequence
self._prefix = [0] * (len(sequence) + 1)
self._base = [0] * (len(sequence) + 1)
self._prefix[0] = 0
self._base[0] = 1
for i in range(1, len(sequence) + 1):
self._prefix[i] = (
self._prefix[i - 1] * StringHasher._BASE + ord(sequence[i - 1]) - self._OFFSET
) % StringHasher._MOD
self._base[i] = (self._base[i - 1] * StringHasher._BASE) % StringHasher._MOD
def getHashOfSlice(self, left: int, right: int) -> int:
"""s[left:right]的哈希值"""
assert 0 <= left <= right <= len(self._sequence)
left += 1
upper = self._prefix[right]
lower = self._prefix[left - 1] * self._base[right - (left - 1)]
return (upper - lower) % StringHasher._MOD
# n = int(input())
# s1 = input()
# s2 = input()
a = StringHasher(sequence='abcdefg')
b = StringHasher(sequence='abcdefg')
# todo 没看懂
| true |
fe9c1aa24f87454c37ba8b1d2fa676b71e6e8494 | Python | gaberosser/moveright | /core/register.py | UTF-8 | 3,700 | 2.65625 | 3 | [] | no_license | import sqlite3
import os
import copy
from config import cfg
from core import get_logger
import datetime
import collections
import itertools
LOGGER = get_logger("register")
DEFAULT_KWARGS = cfg["sqlite"]
class AccessLog(object):
_schema = collections.OrderedDict([
("dt", "TIMESTAMP NOT NULL"),
("outcode", "INTEGER NOT NULL"),
("property_type", "INTEGER NOT NULL"),
("result", "TEXT"),
("success", "INTEGER NOT NULL CHECK( success IN (0, 1) )"),
("num_retries", "INTEGER")
])
_default_for_insert = {
"dt": datetime.datetime.now
}
def __init__(self, **kwargs):
if len(kwargs) == 0:
kwargs = copy.copy(DEFAULT_KWARGS)
kwargs["detect_types"] = sqlite3.PARSE_DECLTYPES
assert "database" in kwargs, "Required kwarg `database` not supplied"
db_dir = os.path.dirname(kwargs["database"])
if not os.path.isdir(db_dir):
os.makedirs(db_dir)
LOGGER.info("Created new directory %s for sqlite database.", db_dir)
self.connection = sqlite3.connect(**kwargs)
LOGGER.info("Using sqlite DB at %s", kwargs["database"])
self.table_names = None
self.update_table_names()
@property
def cursor(self):
return self.connection.cursor()
def update_table_names(self):
ret = self.cursor.execute("SELECT name FROM sqlite_master WHERE type = 'table'")
vals = ret.fetchall()
self.table_names = set(itertools.chain(*vals))
def _create_if_not_exists_sql(self, table_name):
sql = [f"CREATE TABLE IF NOT EXISTS {table_name} ("]
last_ = list(self._schema.keys())[-1]
for attr_name, attr_det in self._schema.items():
the_el = " ".join([attr_name, attr_det])
if attr_name != last_:
# comma on all but the final line
the_el += ","
sql.append(the_el)
sql.append(")")
return "".join(sql)
def create_access_log_table(self, table_name, overwrite=False):
if overwrite:
sql = f"""
DROP TABLE {table_name}
"""
try:
self.cursor.execute(sql)
LOGGER.info("Dropped table %s.", table_name)
except Exception:
pass
sql = self._create_if_not_exists_sql(table_name)
self.cursor.execute(sql)
self.update_table_names()
if table_name not in self.table_names:
raise KeyError(f"I just tried to create a table called {table_name}, but it isn't in the list of table "
f"names after an update. Did creation fail?")
def log(self, table_name, **insert_kwargs):
if table_name not in self.table_names:
self.create_access_log_table(table_name)
# check that the insert kwargs include only fields in the schema
unknown_kwargs = set(insert_kwargs).difference(self._schema)
if len(unknown_kwargs) > 0:
unknown_str = ",".join(unknown_kwargs)
raise KeyError(f"{len(unknown_kwargs)} unknown insert_kwargs: {unknown_str}.")
ins_str = f"INSERT INTO {table_name} VALUES("
ins_vals = []
last_ = list(self._schema.keys())[-1]
for k in self._schema:
val = insert_kwargs.get(k)
if val is None and k in self._default_for_insert:
val = self._default_for_insert[k]()
ins_vals.append(val)
ins_str += "?"
if k != last_:
ins_str += ", "
ins_str += ");"
self.cursor.execute(ins_str, tuple(ins_vals))
self.connection.commit()
| true |
fe5ebe910efca48d8b301da2ef45949044acf435 | Python | dm36/interview-practice | /pierre_practice/ch1/chained_comparison_operators.py | UTF-8 | 139 | 3.21875 | 3 | [] | no_license | print (1 < 2 < 3)
print (1 < 2 and 2 < 3)
print (1 < 3 > 2)
print (1 < 3 and 3 > 2)
print (1 == 2 or 2 < 3)
print (1 == 1 or 100 == 1)
| true |
c296724dffe8589891409bbaddd55767d233b86f | Python | DivyaNairCent/COMP_237 | /Youtube_Spam_Classifier.py | UTF-8 | 4,663 | 3.09375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
COMP237 - NLPProject - Group 3
Youtube Spam Classifier
@authors:
Divya Nair - 301169854
Diego Narvaez - 301082195
Sreelakshmi Pushpan - 301170860
Nestor Romero - 301133331
Jefil Tasna John Mohan - 301149710
"""
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.model_selection import StratifiedShuffleSplit
import os
from sklearn.model_selection import cross_val_score
from sklearn.metrics import accuracy_score
import re
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from pandas import DataFrame
### 1. Load data into Pandas Dataframe
data = pd.read_csv('./Youtube03-LMFAO.csv')
### 2. Basic data exploration
"""
print(data.head())
print(data.describe(include='all'))
print(data.CONTENT.describe())
"""
#Dataset with only the selected columns
data_new = data[['CONTENT','CLASS']].copy()
#Comments preprocessing
lemmatizer = WordNetLemmatizer()
corpus = []
for i in range(0,len(data_new)):
review = re.sub('[^a-zA-Z0-9:)]+', ' ', data_new['CONTENT'][i])
review = review.lower()
review = review.split()
review = [lemmatizer.lemmatize(word) for word in review if not word in stopwords.words('english')]
review = ' '.join(review)
corpus.append(review)
corpus = DataFrame(corpus,columns=['comments'])
data_new['CONTENT'] = corpus['comments']
data_new['CONTENT'].eq('').values.any()
data_new.sample(frac=1)
### 3. Data preparation for model building - NLTK
### 4. Output highlights
### 5. Downscale data
data_X_raw = data_new['CONTENT']
data_y = data_new['CLASS']
count_vectorizer = CountVectorizer()
data_X_vector = count_vectorizer.fit_transform(data_X_raw) # Fit the Data
model_tfidf = TfidfTransformer()
data_X = model_tfidf.fit_transform(data_X_vector)
"""
print(type(data_X_vector))
print(count_vectorizer.vocabulary_)
words_analysis = list(range(len(count_vectorizer.vocabulary_)))
for key in count_vectorizer.vocabulary_:
value = count_vectorizer.vocabulary_[key]
words_analysis[value] = key
print(len(count_vectorizer.get_feature_names()))
print(data_X_vector.shape)
print(data_X_vector[:5])
print(data_X[0:5])
words_analysis[795] #high tfidf
words_analysis[619] #high tfidf
words_analysis[668] #low tfidf
"""
### 7. Split dataset 75-25
split_data = StratifiedShuffleSplit(n_splits=5, test_size=0.25, random_state=0)
split_data.get_n_splits(data_X, data_y)
for train_index, test_index in split_data.split(data_X, data_y):
X_train, X_test = data_X[train_index], data_X[test_index]
y_train, y_test = data_y[train_index], data_y[test_index]
### 8. Naive Bayes Classifier
classifier = GaussianNB()
classifier.fit(X_train.toarray(),y_train)
classifier.score(X_test.toarray(),y_test)
y_pred = classifier.predict(X_test.toarray())
### 9. Cross validate model 5-fold
### 10. Testing results, confusion matrix and accuracy
score = cross_val_score(classifier, X_train.toarray(), y_train, scoring='accuracy', cv=5);
print(score);
print('Mean of Score: ', score.mean())
results = pd.DataFrame(data = {'comments' : X_test, 'result' : y_pred, 'expected': y_test })
accuracy = accuracy_score(y_test,y_pred)
print('\n','Accuracy')
print(accuracy, '\n\n')
print("Confusion Matrix:")
print(confusion_matrix(y_test, y_pred), '\n')
print("Classification Report:")
classification_report=classification_report(y_test,y_pred)
print(classification_report)
### 11. Test the model
input_data = [
'wierd but funny¿',
'Party Rock....lol...who wants to shuffle!!!',
'wow!!!!!! increible song!!!!!!!!!',
'Best song ever!!!!',
'give it a like',
'Check out this video on YouTube:',
'One of my favorite videos',
'Divya and Jefil could totally make this dance',
'Sreelakshmi great recommendation!!',
'Diego this a new recommendation for your playlist',
'Nestor was this song popular in Colombia?'
]
# Transform input data using count vectorizer
input_tc = count_vectorizer.transform(input_data)
type(input_tc)
# Transform vectorized data using tfidf transformer
input_tfidf = model_tfidf.transform(input_tc)
type(input_tfidf)
# Predict the output categories
predictions = classifier.predict(input_tfidf.toarray())
# Print the outputs
for sent, category in zip(input_data, predictions):
if category == 0:
label = 'Ham'
else:
label = 'Spam'
print('\nInput:', sent, '\nPredicted category:', \
category,'-', label)
| true |
017447e9f59f97feb00b2e0127c3bc22be47e2bb | Python | nicholas5416/MyTools | /Scripts/Other/random_cipher.py | UTF-8 | 374 | 3.21875 | 3 | [] | no_license | import os
char = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
for a in char:
for b in char:
for c in char:
for d in char:
for e in char:
for f in char:
for g in char:
for h in char:
os.system("echo " + a + b +c + d +e + f + g + h + " >> 8wei.txt")
print ("char Over.")
input()
#每位数字有8种可能,就是10**8
| true |
fe53540d19dae2badad47596bab8e2bf2d36babf | Python | suryateja-207/PythonPackagesDownloadUtility | /script.py | UTF-8 | 1,765 | 2.875 | 3 | [] | no_license | import json
import subprocess
import argparse
def parse_dependencies(json_file):
try:
dependencies = open(json_file)
dependencies = dependencies.read()
dependencies = json.loads(dependencies)
dependency_list = dependencies["Dependencies"]
return dependency_list
except Exception as e:
print("Exception: %s %s" %(type(e), e))
print("Couldn't open file: %s" % json_file)
def install_dependency(dependency):
try:
subprocess.check_output(["pip", "install", dependency])
print("Succcesfully installed %s" %dependency)
except Exception as e:
print("Exception: %s %s" %(type(e), e))
print("Couldn't install dependency: %s %s" %( dependency.split("==")[0] ,dependency.split("==")[1]) )
return 1
return 0
def installer(json_file):
print (json_file)
dlist = parse_dependencies(json_file)
icount = 0
flist = list()
for item in dlist:
ret = install_dependency(item)
if 0 == ret:
icount=icount+1
else:
flist.append(item)
if 0 < len(flist):
print("Packages failed to install:")
for item in flist:
print (item.split("==")[0])
elif 0 == len(flist):
if len(dlist) == icount:
print("Success")
parser = argparse.ArgumentParser(description="Dependency installer for python."
"Expects input as a json file with"
"dependencies in a list")
parser.add_argument("-f", "--json_file", type=str, help="path to json file")
args = parser.parse_args()
if args.json_file:
installer(args.json_file)
else:
parser.print_help() | true |
f42562014372fa6bbb3490c2092fac1c9215152c | Python | MariOdyssey/Weeping-Hall | /MethodsPractice/002两数相加.py | UTF-8 | 975 | 3.234375 | 3 | [] | no_license | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def addTwoNumbers(self, l1, l2) -> int:
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
l3 = []
remainder = 0
for i in range(max(len(l1), len(l2))):
if not l1[i]==None or l2[i]==None:
l3.append((l1[i] + l2[i]) % 10 + remainder)
remainder = (l1[i] + l2[i])//10
if l1[i]==None:
l3.append(l2[i] + remainder)
remainder = 0
if l2[i] == None:
l3.append(l1[i] + remainder)
remainder = 0
if i == max(len(l1), len(l2)) and l1[i]!=None and l2[i]!=None:
l3.append((l1[i] + l2[i]) % 10 + remainder)
l3.append((l1[i] + l2[i])//10)
return l3
| true |
30a5e01dfbd8e6793c5788e72d65a6385156938b | Python | fengyxp/Decision-Tree | /ID3/chooseBestFeatureToSplit.py | UTF-8 | 1,065 | 3.234375 | 3 | [] | no_license | from calc_shannon_ent import calcShannonEnt
from splitDataSet_function import splitDataSet
# 选择最好的数据集划分方式
def chooseBestFeatureToSplit(dataSet):
'''
函数将数据集按照信息增益最大的标准分类, 返回一个最好的分类索引
:param dataSet: 需要分类属性的数据集
'''
numFeatures = len(dataSet[0]) - 1 # 计算给定数据集的特征属性数
baseEntropy = calcShannonEnt(dataSet) # 计算给定数据集的香农熵
bestInfoGain = 0.0
bestFeature = -1 # 最好的特征属性序列
for i in range(numFeatures):
featList = [example[i] for example in dataSet] # 获取特征属性的列表
uniqueVals = set(featList) # 特征属性列表的 唯一值
newEntroy = 0.0
for value in uniqueVals:
subDataSet = splitDataSet(dataSet, i, value)
prob = len(subDataSet) / float(len(dataSet))
newEntroy += prop * calcShannonEnt(subDataSet)
infoGain = baseEntropy - newEntroy # 计算信息增益
if (infoGain > bestInfoGain):
bestInfoGain = infoGain
bestFeature = i
return bestFeature
| true |
2b4738bbdc205180d19b432cbcacfb08506b4c4a | Python | shmehta21/DS_Algo | /two_sum.py | UTF-8 | 868 | 3.765625 | 4 | [] | no_license |
def twoSum(nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
: O(n^^2) complexity
"""
result = []
for i in range(len(nums)):
for j in range(1,len(nums)):
#print(f'{i},{j}')
if i != j and nums[i] + nums[j] == target:
result.append(i)
result.append(j)
else:
j += 1
if result:
break
i += 1
return result
def two_sum_eff(nums, target):
'''
O(n) complexity
'''
num_to_index={}
for i, num in enumerate(nums):
if target - num in num_to_index:
return [i, num_to_index[target-num]]
num_to_index[num] = i
return []
if __name__ == '__main__':
nums = [2,7,9,11]
target = 9
result = two_sum_eff(nums, target)
print(result)
| true |
6af83041a0953ebe66111b104c1c34ea697b042a | Python | Kamesh-Mishra/Data_Science | /Data Analytics/hands_on_activity.py | UTF-8 | 3,604 | 3.8125 | 4 | [] | no_license |
"""
Analysis of Salaries Data ( Hand On Activity )
1. Which Male and Female Professor has the highest and the lowest salaries
2. Which Professor takes the highest and lowest salaries.
3. Missing Salaries - should be mean of the matching salaries of those
whose service is the same
4. Missing phd - should be mean of the matching service
5. How many are Male Staff and how many are Female Staff.
Show both in numbers and Graphically using Pie Chart.
Show both numbers and in percentage
6. How many are Prof, AssocProf and AsstProf.
Show both in numbers adn Graphically using a Pie Chart
7. Who are the senior and junior most employees in the organization.
8. Draw a histogram of the salaries divided into bin starting
from 50K and increment of 15K
"""
import pandas as pd
df = pd.read_csv("Salaries.csv")
# 1. Which Male and Female Professor has the highest and the lowest salaries
df1 = df[(df["salary"]==df[(df['sex']== "Female") & (df["rank"] == "Prof")]["salary"].min())]
print(df1)
# 2. Which Professor takes the highest and lowest salaries.
df1 = df[(df["salary"]==df[(df["rank"] == "Prof")]["salary"].min())]
print(df1)
df2 = df[(df["salary"]==df[(df["rank"] == "Prof")]["salary"].max())]
print(df2)
# 3. Missing Salaries - should be mean of the matching salaries of those
# whose service is the same
df2 =df[(df["service"] == 18)]["salary"].mean()
df1 =df[(df["service"] == 18)]
df1 = df1.fillna(df2)
print(df1)
df3 =df[(df["service"] == 2)]["salary"].mean()
df4 =df[(df["service"] == 2)]
df4 = df4.fillna(df3)
print(df4)
########## or ###########
for i in df[df["salary"].isnull()]["service"].values:
df4 =df[(df["service"] == i)]
df3 =df[(df["service"] == i)]["salary"].mean()
df4 = df4.fillna(df3)
print(df4)
# 4. Missing phd - should be mean of the matching service
for i in df[df["phd"].isnull()]["service"].values:
df4 =df[(df["service"] == i)]
df3 =df[(df["service"] == i)]["phd"].mean()
df4 = df4.fillna(df3)
print(df4)
# 5. How many are Male Staff and how many are Female Staff.
# Show both in numbers and Graphically using Pie Chart.
# Show both numbers and in percentage
df["sex"].value_counts()
df["sex"].value_counts(normalize = True)
import matplotlib.pyplot as plt
labels = "Male","female"
sizes = [df["sex"].value_counts()[0],df["sex"].value_counts()[1]]
explode = 0,0
colors = ["yellow","pink"]
plt.pie(sizes,explode,labels,colors, autopct='%1.2f%%', shadow=True)
plt.show()
# 6. How many are Prof, AssocProf and AsstProf.
# Show both in numbers adn Graphically using a Pie Chart
print(df["rank"].value_counts())
import matplotlib.pyplot as plt
labels = df["rank"].value_counts().index[0],df["rank"].value_counts().index[1],df["rank"].value_counts().index[2]
sizes = [df["rank"].value_counts().values[0],df["rank"].value_counts().values[1],df["rank"].value_counts().values[2]]
explode = 0,0,0
colors = ["R","G","B"]
plt.pie(sizes,explode,labels,colors , autopct = '%1.2f%%', shadow = True)
plt.show()
# 7. Who are the senior and junior most employees in the organization.
df["service"].sort_values().head()
df['service'].sort_values().tail()
# 8. Draw a histogram of the salaries divided into bin starting
# from 50K and increment of 15K
import matplotlib.pyplot as plt
a = df["salary"].sort_values().values
plt.hist(a, bins = [50000,65000,80000,95000,110000,125000,140000,155000,170000,185000,200000])
plt.axis(25000,215000)
plt.xlabel("salary")
plt.ylabel("candidate")
plt.show()
| true |
11ffd9928288c7fd873a9bbea08e718fe0e3e1ba | Python | Gnahue/sanFranciscoBiclas | /prediccion/modelo/algoritmo/serialization.py | UTF-8 | 324 | 3.125 | 3 | [] | no_license | import pickle
def serialize_tree(tree, file_name):
print ('Serializando arbol')
output = open(file_name,'wb')
pickle.dump(tree, output)
output.close()
def desserialize_tree(file_name):
print ('Desserializando arbol: ' + file_name)
pkl_file = open(file_name, 'rb')
return pickle.load(pkl_file)
| true |
dd25ae628eb5ce8d770e62defd50a21aaa01f25f | Python | magnusoy/OpenCV-Python-Applications | /blending images/blending_images.py | UTF-8 | 644 | 2.640625 | 3 | [
"MIT"
] | permissive | import cv2
import numpy as np
img1 = cv2.imread("road.jpg")
img2 = cv2.imread("car.jpg")
img2_gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(img2_gray, 240, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)
road = cv2.bitwise_and(img1, img1, mask=mask)
car = cv2.bitwise_and(img2, img2, mask=mask_inv)
result = cv2.add(road, car)
cv2.imshow("img1", img1)
cv2.imshow("img2", img2)
cv2.imshow("road background", road)
cv2.imshow("car no background", car)
cv2.imshow("mask", mask)
cv2.imshow("mask inverse", mask_inv)
cv2.imshow("result", result)
cv2.waitKey(0)
cv2.destroyAllWindows() | true |
7d73f8d5b59c849092fd18da4bed0d5122dd30ec | Python | julianje/ImageInference | /models/error_analysis_per_sample.py | UTF-8 | 7,181 | 2.578125 | 3 | [] | no_license | import csv
import sys
from Bishop import *
# Load the map we're simulating.
WORLD = sys.argv[1]
# Load the (encoded) coordinates of the door(s) and observation for this map.
ENCODED_DOORS = sys.argv[2]
ENCODED_OBSERVATIONS = sys.argv[3]
# Initialize the number of utility functions we want to sample.
NUM_SAMPLES = int(sys.argv[4])
# Initialize the number of paths we want to sample per utility function.
NUM_PATHS = 1000
# Initialize the iteration of the current world.
WORLD_NUM = sys.argv[5]
# Set the stage of the path we're analyzing.
STAGE = "entering"
# Set the path for storing the model predictions.
PATH = "../data/experiment_3/model/predictions/Manhattan/" \
+ "error_analysis_per_sample/" + WORLD_NUM + "/"
# Computes the likelihood that one agent was in the room based on the
# observations.
def scene_likelihood_one_agent(agent_0, observations):
# Generate paths according to the current map and policy.
simulations_0 = agent_0.SimulateAgents(NUM_PATHS, ResampleAgent=False,
Simple=False, Verbose=False, replan=False)
# Iterate through each path and check if it's consistent with the
# observations.
scene_matches = np.zeros(NUM_PATHS)
for i in range(len(simulations_0.States)):
# Extract the subset of the path we're analyzing based on the stage.
state_sequence_0 = simulations_0.States[i]
if STAGE == "entering":
path_0 = state_sequence_0[0:len(state_sequence_0)/2]
elif STAGE == "exiting":
path_0 = state_sequence_0[len(state_sequence_0)/2: \
len(state_sequence_0)]
elif STAGE == "either":
path_0 = state_sequence_0
# Take the set intersection of the observations and this path.
intersection_0 = set(observations).intersection(set(path_0))
# Check if this path explains the observations.
scene_matches[i] = 1.0 if intersection_0 == set(observations) else 0.0
# Compute the scene likelihood.
scene_likelihood = sum(scene_matches) * 1.0 / NUM_PATHS
return([scene_matches, scene_likelihood, simulations_0])
# Computes the likelihood that two agents were in the room based on the
# observations.
def scene_likelihood_two_agents(agent_1, agent_2, observations):
# Generate paths according to the current map and policy.
simulations_1 = agent_1.SimulateAgents(NUM_PATHS, ResampleAgent=False, \
Simple=False, Verbose=False, replan=False)
simulations_2 = agent_2.SimulateAgents(NUM_PATHS, ResampleAgent=False, \
Simple=False, Verbose=False, replan=False)
# Iterate through each pair of paths and check if they're consistent with
# the observations.
scene_matches = np.zeros(NUM_PATHS)
for i in range(len(simulations_1.States)):
# Extract the subset of the path we're analyzing based on the stage.
state_sequence_1 = simulations_1.States[i]
state_sequence_2 = simulations_2.States[i]
if STAGE == "entering":
path_1 = state_sequence_1[0:len(state_sequence_1)/2]
path_2 = state_sequence_2[0:len(state_sequence_2)/2]
elif STAGE == "exiting":
path_1 = state_sequence_1[len(state_sequence_1)/2: \
len(state_sequence_1)]
path_2 = state_sequence_2[len(state_sequence_2)/2: \
len(state_sequence_2)]
elif STAGE == "either":
path_1 = state_sequence_1
path_2 = state_sequence_2
# Check that the paths are unique.
if path_1 == path_2:
continue
# Take the set intersection of the observations and both paths.
intersection_1 = set(observations).intersection(set(path_1))
intersection_2 = set(observations).intersection(set(path_2))
# Check that each path contains at least one of the observations, and
# that the union of both paths contains both of the observations.
if (len(intersection_1) > 0 and len(intersection_2) > 0) \
and intersection_1.union(intersection_2) == set(observations):
scene_matches[i] = 1.0
else:
scene_matches[i] = 0.0
# Compute the scene likelihood.
scene_likelihood = sum(scene_matches) * 1.0 / NUM_PATHS
return([scene_matches, scene_likelihood, simulations_1, simulations_2])
# Transform x- and y-coordinates into a state representation.
def transform_state(agent, coords):
return (coords[0]*agent.Plr.Map.mapwidth) + coords[1]
# Create three agents for this map (while suppressing print output).
sys.stdout = open(os.devnull, "w")
agent_0 = LoadObserver("../stimuli/experiment_3/"+WORLD, Silent=True)
agent_1 = LoadObserver("../stimuli/experiment_3/"+WORLD, Silent=True)
agent_2 = LoadObserver("../stimuli/experiment_3/"+WORLD, Silent=True)
sys.stdout = sys.__stdout__
# Decode the coordinates of the doors and the observation for this map.
doors = [[int(num) for num in pair.split(" ")] \
for pair in ENCODED_DOORS.split("-")]
observations = [transform_state(agent_0, \
[int(num) for num in pair.split(" ")]) \
for pair in ENCODED_OBSERVATIONS.split("-")]
# Sample utility functions and compute the likelihood of the scene given
# a set of sampled paths.
print("Map: "+WORLD)
results_one_agent = [0] * NUM_SAMPLES
results_two_agents = [0] * NUM_SAMPLES
for i in range(NUM_SAMPLES):
# Let the user know which sample we're on.
print("Utility function #: "+str(i+1))
# Sample which door each agent will use.
door_0 = random.choice(doors)
door_1 = random.choice(doors)
door_2 = random.choice(doors)
agent_0.SetStartingPoint(door_0[0], Verbose=False)
agent_1.SetStartingPoint(door_1[0], Verbose=False)
agent_2.SetStartingPoint(door_2[0], Verbose=False)
agent_0.Plr.Map.ExitState = door_0[1]
agent_1.Plr.Map.ExitState = door_1[1]
agent_2.Plr.Map.ExitState = door_2[1]
# Run the planner for each agent (while supressing print output).
sys.stdout = open(os.devnull, "w")
agent_0.Plr.Prepare()
agent_1.Plr.Prepare()
agent_2.Plr.Prepare()
sys.stdout = sys.__stdout__
# Randomly sample utility functions (instead of reward functions) so that
# agents aren't influenced by how far the goals are.
agent_0.Plr.Utilities = np.array([random.random()*100 \
for goal in agent_0.Plr.Map.ObjectNames])
agent_1.Plr.Utilities = np.array([random.random()*100 \
for goal in agent_1.Plr.Map.ObjectNames])
agent_2.Plr.Utilities = np.array([random.random()*100 \
for goal in agent_2.Plr.Map.ObjectNames])
# Compute the likelihood of the observations if one or two agents were in
# the room.
results_one_agent[i] = scene_likelihood_one_agent(agent_0, observations)
results_two_agents[i] = scene_likelihood_two_agents(agent_1, agent_2,
observations)
# Open a file for writing the data.
with open(PATH+WORLD+"_agents_posterior.csv", "w") as file:
writer = csv.writer(file)
for i in range(NUM_SAMPLES):
# Compute the mean likelihood over sampled reward functions for one
# agent.
mean_likelihood_one_agent = np.mean([results_one_agent[j][1] \
for j in range(i+1)])
# Compute the mean likelihood over sampled reward functions for two
# agents.
mean_likelihood_two_agents = np.mean([results_two_agents[j][1] \
for j in range(i+1)])
# Compute the posterior using Bayes' theorem.
evidence = mean_likelihood_one_agent + mean_likelihood_two_agents
posterior = 0 if evidence == 0 \
else (mean_likelihood_two_agents/evidence)
# Write the data to a file.
writer.writerow([i+1, WORLD, mean_likelihood_one_agent, \
mean_likelihood_two_agents, posterior])
| true |
ebe879419b82bc16ab9b26a02313f924ce5d79ea | Python | makeevmr/My-works | /hw5/task2/Наркоманы.py | UTF-8 | 1,893 | 3.421875 | 3 | [] | no_license | T1 = int(input("Введите время заполнения шприца "))
T2 = int(input("Введите время передачи шприца "))
T3 = int(input("Введите время инъекции шприца "))
seconds = int(input("Введите время "))
T1_1 = T1
T2_1 = T2
T3_1 = T3
kol_spots = 0
kol_periods = 0
kol_sec = 0
print('1: ', end='')
print('2:')
a = False # a False если нет шприца у 2 наркомана и а True в др случае
d = False # d False если первая инъекция еще не введена и True в ином случае
for i in range(seconds):
flag = False
if T1_1 != 0: # Набираем шприц
print('N ', end='')
T1_1 -= 1
else:
if a is True: # Если есть шприц у наркомана 2 - ждем
print('. ', end='')
else:
if T2_1 != 0: # Процесс передачи шприца если его нет у наркомана 2
T2_1 -= 1
print('p ', end='')
if T2_1 == 0: # Шприц передан
flag = True
a = True
T1_1 = T1
T2_1 = T2
if d is True:
kol_spots += 1
print('.')
if a is False:
if d is True:
kol_spots += 1
print('.')
if a is True and flag is False:
if T3_1 != 0:
T3_1 -= 1
print('I')
if T3_1 == 0:
if d is True:
kol_sec = kol_sec + kol_spots + T3
kol_periods += 1
kol_spots = 0
T3_1 = T3
d = True
a = False
if kol_periods != 0:
print(kol_sec/kol_periods)
else:
print('Нет двух концов инъекции')
| true |
4db1be01e11239d9a41e43ea6a52fcd500304635 | Python | josegreg/hello-world | /course2/clase3/ex3_1.py | UTF-8 | 762 | 3.796875 | 4 | [] | no_license | '''
Crear un diccionario que almacene los valores de un producto
(nombre, marca, descripcion,costos)
'''
lista_prod = {
"nombre" : "Fabuloso",
"marca" : "Ml",
"descripcion" : "Limpiador de pisos líquido",
"costos" : 15,
"tiendas" : ["Gigante","Soriana","Comercial"]
}
print(lista_prod)
'''
Crear un dicconario que almacene los datos de un experimento
'''
med1 = {
"nombre" : "Jose",
"tiempo" : 2.5,
"PH" : 8,
"temperatura" : "33.5C"
}
med2 = {
"nombre" : "Alberto",
"tiempo" : 1.87,
"PH" : 4,
"temperatura" : "34C"
}
med3 = {
"nombre" : "Oscar",
"tiempo" : 3.2,
"PH" : 8,
"temperatura" : "25C"
}
exp_med = {
"1" : med1,
"2" : med2,
"3" : med3
}
print(exp_med) | true |
0251d30c2ac0f575bf6cc7ebcb5582974ff2b8b9 | Python | vputz/marion-biblio | /marion_biblio/csv_queries.py | UTF-8 | 1,967 | 3.03125 | 3 | [] | no_license | """
A module for doing quick queries from a CSV input file to JSON query output.
This is meant to be for simple queries where the result will not be
stored separately from the initial CSV files, ie for one-off upload-and-view
queries.
This may change in the future
"""
import csv
import json
import fileinput
def location_and_value( filename, parameters, locationFunc ):
"""
Returns a dict of
{
nodes: { id(int) : { lat: float, lon: float, text: string, value: integer } }
not_located : [ string ]
}
Parameters: dict of
locationColumn: int
textColumn: int
valueColumn: int
If valueColumn is not defined, a value of 1 is assumed for all locations.
First row currently not skipped. This will move to a parameter in future
In this case, locationfunc maps a single location to either a lat/long pair or None
"""
result = { 'nodes' : {}, 'not_located' : [] }
node_index = 0
with open( filename, encoding="ISO-8859-1") as csvfile:
reader = csv.reader( csvfile ) #fileinput.FileInput( [filename], openhook=fileinput.hook_encoded("utf-8")) )
for row in reader :
try :
node_index = node_index + 1
location = row[ int(parameters['locationColumn']) ]
loc = locationFunc( row[ int(parameters['locationColumn']) ], True )
if loc == None :
result['not_located'].append( location )
else :
node = loc
if 'textColumn' in parameters :
node['text'] = row[ int(parameters['textColumn']) ]
else :
node['text'] = ''
if 'valueColumn' in parameters :
node['value'] = row[ int(parameters['valueColumn']) ]
result['nodes'][node_index] = node
except :
continue
return result
| true |
43daccccd05cd090e11a724772bfccfec8bbad02 | Python | LianBorda/TrabajosPython | /py2_retos1/reto3.py | UTF-8 | 305 | 4.3125 | 4 | [] | no_license |
n_1 = 89
n_2= 34
print(f'Los numeros ingresados son:{n_1} y {n_2}')
print('SUMA:')
s= n_1+n_2
print(f'{ n_1} + {n_2} = {s}')
print('RESTA:')
r= n_1-n_2
print(f'{n_1} - {n_2} = {r}')
print('PRODUCTO:')
m= n_1*n_2
print(f'{n_1} * {n_2} = {m}')
print('DIVISION:')
d= n_1/n_2
print(f'{n_1} / {n_2} = {d}')
| true |
e9972001599df78ed3179c3d27b25c71cf5d7e38 | Python | TaeYeon-kim-ai/STUDY_1.py | /tf114/tf11_3_cancer.py | UTF-8 | 1,732 | 2.984375 | 3 | [] | no_license | # 이진 분류
from sklearn.datasets import load_breast_cancer
import tensorflow as tf
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
datasets = load_breast_cancer()
x_data = datasets.data
y_data = datasets.target.reshape(-1,1)
print(x_data.shape, y_data.shape) #(569, 30) (569,1)
x = tf.placeholder(tf.float32, shape = [None, 30])
y = tf.placeholder(tf.float32, shape = [None, 1])
w = tf.Variable(tf.random_normal([30, 1]), name = 'weight')
b = tf.Variable(tf.random_normal([1]), name = 'bias')
x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, train_size = 0.8, random_state = 64)
#MinMax
scaler = MinMaxScaler()
scaler.fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)
#hypothesis
hypothesis = tf.sigmoid(tf.matmul(x, w) + b)
#COMPILE
#loss
cost = -tf.reduce_mean(y * tf.log(hypothesis) + (1-y) * tf.log(1 - hypothesis))
train = tf.train.AdamOptimizer(learning_rate=0.0007).minimize(cost)
predicted = tf.cast(hypothesis > 0.5, dtype = tf.float32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, y), dtype = tf.float32))
with tf.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
for step in range(2001):
sess.run(train, feed_dict={x:x_train, y:y_train})
if step % 50 == 0:
print(step, '\t loss', sess.run(cost, feed_dict={x:x_train, y:y_train}))
print('Acc :', sess.run(accuracy, feed_dict={x:x_test, y:y_test}))
print('score :', accuracy_score(y_test, sess.run(predicted, feed_dict={x:x_test})))
# Acc : 0.9649123
# score : 0.9649122807017544 | true |
11cba371b6e13919a972f8722e9b3ea305f85de3 | Python | welfare-state-analytics/tesseract-comparison | /edits/real_dates_cleaner.py | UTF-8 | 599 | 2.640625 | 3 | [] | no_license | import numpy as np
import pandas as pd
import os
import re
os.chdir('/home/robinsaberi/Git_Repos/Tesseract/annotated_newspaper_data/manual_content')
data = np.loadtxt('real_date.txt', delimiter=',', dtype=str)
# Maybe make non-names ints?
def stringExtract(string):
name, _, txt = string.partition('_')
edition, page, paragraph = re.findall(r'\d+', txt)[:3]
return(name, edition, page, paragraph)
real_date_fixed = pd.DataFrame(list(map(stringExtract, data)))
real_date_fixed.columns = ['name','edition','page','paragraph']
real_date_fixed.to_csv('real_date_fixed.csv', index=False) | true |
323b620fcbf70a204c13a987f845a3f5cd9f8fc0 | Python | harsh-2024/Generators-using-turtle | /shape_generator.py | UTF-8 | 519 | 3.46875 | 3 | [] | no_license | import turtle
from turtle import Turtle, Screen
import random
timmy = Turtle()
my_screen = Screen()
turtle.colormode(255)
def color_picker():
r = random.randint(0,255)
g = random.randint(0,255)
b = random.randint(0,255)
my_color = (r,g,b)
return my_color
def draw_shape(sides):
angle = 360/sides
for _ in range(sides):
timmy.forward(100)
timmy.right(angle)
for sides in range(3, 11):
timmy.color(color_picker())
timmy.pensize(5)
draw_shape(sides)
| true |
1625e377b3463ebb2670c8b7fb241ceeff4ae3bc | Python | new-power-new-life/Leetcode-30-Days-Code-Challenge-April-2020 | /Week4/3 - LRU Cache/Solution.py | UTF-8 | 1,544 | 3.71875 | 4 | [] | no_license | #
# Created on Fri Apr 24 2020
#
# Title: Leetcode - LRU Cache
#
# Author: Vatsal Mistry
# Web: mistryvatsal.github.io
#
# Using OrderedDict, because it maintains the order of keys inserted as LIFO.
from collections import OrderedDict
class LRUCache:
def __init__(self, capacity):
# Initialize cache size.
self.cache_capacity = capacity
# Create a hashmap/hashtable datastrcuture lookalike using OrderedDict
self.hashmap = OrderedDict()
def get(self, key):
# Check if key is present in hashmap, if not return -1.
if key in self.hashmap:
# If key is present, pop the value for a while and push it again inorder to
# reorder the keys, and make this one as top, most recently used.
value = self.hashmap.pop(key)
self.hashmap[key] = value
return value
return -1
def put(self, key, value):
# If key is present in hashmap and pop it.
if key in self.hashmap:
self.hashmap.pop(key)
# Demonstrating size of cache is full by checking length of hashmap with cache_capacity
elif len(self.hashmap) == self.cache_capacity:
# Pop the least recently used item. Do it by passing last=False
# last=False makes the OrderedDict to popitem based on FIFO rather than LIFO
# default value of argument last is True making the OrderedDict LIFO
self.hashmap.popitem(last=False)
# Push the latest value
self.hashmap[key] = value
| true |
f387658b5aed9e7ac014c0fb10147a0bac996a4e | Python | agorji/DeepProfiler | /deepprofiler/dataset/indexing.py | UTF-8 | 1,327 | 2.640625 | 3 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | import pandas as pd
import deepprofiler.dataset.metadata
def write_compression_index(config):
metadata = deepprofiler.dataset.metadata.Metadata(config["paths"]["index"], dtype=None)
new_index = metadata.data
for ch in config["dataset"]["images"]["channels"]:
new_index[ch] = new_index[ch].str.split("/").str[-1]
png_path = lambda x: "/" + x.replace("."+config["dataset"]["images"]["file_format"], ".png")
new_index[ch] = new_index["Metadata_Plate"].astype(str) + new_index[ch].map(png_path)
new_index.to_csv(config["paths"]["compressed_metadata"] + "/compressed.csv")
## Split a metadata file in a number of parts
def split_index(config, parts):
index = pd.read_csv(config["paths"]["metadata"] + "/index.csv")
plate_wells = index.groupby(["Metadata_Plate", "Metadata_Well"]).count()["Metadata_Site"]
plate_wells = plate_wells.reset_index().drop(["Metadata_Site"], axis=1)
part_size = int(len(plate_wells) / parts)
for i in range(parts):
if i < parts-1:
df = plate_wells[i*part_size:(i+1)*part_size]
else:
df = plate_wells[i*part_size:]
df = pd.merge(index, df, on=["Metadata_Plate", "Metadata_Well"])
df.to_csv(config["paths"]["metadata"] + "/index-{0:03d}.csv".format(i), index=False)
print("All set")
| true |
ff82cf1631511d4bc06f802e89fa336f99f03ad2 | Python | Aasthaengg/IBMdataset | /Python_codes/p02238/s778634630.py | UTF-8 | 541 | 3.4375 | 3 | [] | no_license | def dfs(v): # vを訪問する
global time
seen[v] = 1
start[v] = time
time += 1
for next_v in graph[v]:
if seen[next_v]: continue
dfs(next_v)
end[v] = time
time += 1
n = int(input())
graph = []
for _ in range(n):
line = list(map(int, input().split()))
graph.append([v - 1 for v in line[2:]])
seen = [0] * n # 0: 未訪問、1: 訪問済
start = [0] * n
end = [0] * n
time = 1
for v in range(n):
if not seen[v]: dfs(v)
for i in range(n):
print(i+1, start[i], end[i])
| true |
afc0473760f5045aeb43312cc40a3a450d27b7b0 | Python | allie-rae/Sorting | /src/iterative_sorting/iterative_sorting.py | UTF-8 | 1,615 | 4.53125 | 5 | [] | no_license | # TO-DO: Complete the selection_sort() function below
#
# Algorithm
# Start with current index = 0
# For all indices EXCEPT the last index:
# a. Loop through elements on right-hand-side of current index and find the smallest element
# b. Swap the element at current index with the smallest element found in above loop
#
#
def selection_sort(arr):
# loop through n-1 elements
for i in range(0, len(arr) - 1):
cur_index = i
smallest_index = cur_index
# TO-DO: find next smallest element
for each in range(i+1, len(arr)):
if arr[each] < arr[smallest_index]:
smallest_index = each
# TO-DO: swap
arr[cur_index], arr[smallest_index] = arr[smallest_index], arr[cur_index]
return arr
# Loop through your array
# Compare each element to its neighbor
# If elements in wrong position (relative to each other, swap them)
# If no swaps performed, stop. Else, go back to the element at index 0 and repeat step 1.
# TO-DO: implement the Bubble Sort function below
arr1 = [1, 5, 8, 4, 2, 9, 6, 0, 3, 7]
def bubble_sort(arr):
for i in range(len(arr)):
swapped = False
for current_index in range(len(arr)-1):
if arr[current_index] > arr[current_index+1]:
arr[current_index +
1], arr[current_index] = arr[current_index], arr[current_index+1]
swapped = True
if swapped == False:
break
return arr
print(bubble_sort(arr1))
# STRETCH: implement the Count Sort function below
def count_sort(arr, maximum=-1):
return arr
| true |
d483519f0e77b74dc36f64c1c25edcc7e2804dd5 | Python | mackilroy007/CF-ADS-Bumberger-Mark | /ExerciseIntermediate2.py | UTF-8 | 660 | 4.78125 | 5 | [] | no_license | value = int(input("Please enter the amount of money to withdraw:\n"))
if (value % 10 !=0) :
print("please enter a value divisible by 10")
else:
if (value >= 100) :
hundred = int(value/100)
value = value % 100
print("%d notes of 100€" %hundred)
if (value >= 50) :
fifty = int(value/50)
value = value % 50
print("%d notes of 50€" %fifty)
if (value >= 20) :
twenty = int(value/20)
value = value % 20
print("%d notes of 20€" %twenty)
if (value >= 10) :
ten = int(value/10)
print("%d notes of 10€" %ten) | true |
07300008621f0a727a93cf786264176a1649bcc4 | Python | CSi-CJ/PythonDemo | /demo/demo07.py | UTF-8 | 426 | 3.25 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# owner:CSi
# datetime:2020/11/12 16:09
# software: PyCharm
# for语句
words = ['gzl', 'lml', 'wsl']
for val in words:
print(val, len(val))
for v in words[:]:
if v == 'wsl':
words.insert(0, v)
print(words)
# range()函数
for i in range(5):
print(i)
# 生成一个链表
for num in range(5, 10):
print(num)
for nu in range(0, 10, 3):
print(nu)
| true |
54de8c1f468eb40e647584450fa24b719373cc58 | Python | bupthl/Python | /Python从菜鸟到高手/projects/bra/analyze/demo01.py | UTF-8 | 1,714 | 2.96875 | 3 | [] | no_license | '''
--------《Python从菜鸟到高手》源代码------------
欧瑞科技版权所有
作者:李宁
如有任何技术问题,请加QQ技术讨论群:264268059
或关注“极客起源”订阅号或“欧瑞科技”服务号或扫码关注订阅号和服务号,二维码在源代码根目录
如果QQ群已满,请访问https://geekori.com,在右侧查看最新的QQ群,同时可以扫码关注公众号
“欧瑞学院”是欧瑞科技旗下在线IT教育学院,包含大量IT前沿视频课程,
请访问http://geekori.com/edu或关注前面提到的订阅号和服务号,进入移动版的欧瑞学院
“极客题库”是欧瑞科技旗下在线题库,请扫描源代码根目录中的小程序码安装“极客题库”小程序
关于更多信息,请访问下面的页面
https://geekori.com/help/videocourse/readme.html
'''
from pandas import *
from matplotlib.pyplot import *
import sqlite3
import sqlalchemy
engine = sqlalchemy.create_engine('sqlite:///bra.sqlite')
rcParams['font.sans-serif'] = ['SimHei']
sales = read_sql('select source,size1 from t_sales',engine)
size1Count = sales.groupby('size1')['size1'].count()
print(size1Count)
size1Total = size1Count.sum()
print(size1Total)
print(type(size1Count))
size1 = size1Count.to_frame(name='销量')
print(size1)
options.display.float_format = '{:,.2f}%'.format
size1.insert(0,'比例', 100 * size1Count / size1Total)
print(size1)
size1.index.names=['罩杯']
print(size1)
# 数据可视化
# 一个DataFrame由一个或多个Series组成
print(size1['销量'])
labels = ['A罩杯','B罩杯','C罩杯','D罩杯']
size1['销量'].plot(kind='pie',labels = labels, autopct='%.2f%%')
axis('equal')
legend()
show() | true |
ddab9a2bee42da66380d1bc407944d6fa0992503 | Python | mitmedialab/MediaCloud-Text-Viz-Study | /scripts/generate_viz_data.py | UTF-8 | 4,008 | 2.875 | 3 | [
"MIT"
] | permissive | """
Script to generate data for user study.
Every participant should answer the questions based on the same data.
"""
import mediacloud
import logging.config
import json
import sys
import os
import numpy as np
#from gensim.models.keyedvectors import KeyedVectors
from gensim.models import Word2Vec
from sklearn.decomposition import PCA
from sklearn.metrics.pairwise import cosine_similarity
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
OUTPUT_DIR = 'server/static'
MODEL_DIR = 'vector-models'
NORMALIZE = False
# set up logger
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logger = logging.getLogger(__name__)
logger.info("-------------------------------------------------------------------")
try:
MC_API_KEY = os.environ['MC_API_KEY']
except KeyError:
logging.error('You need to define the MC_API_KEY environment variable.')
sys.exit(0)
mc = mediacloud.api.MediaCloud(MC_API_KEY)
try:
TOPIC_ID = sys.argv[1]
except IndexError:
logging.error('You need to specify a topic id as an argument.')
sys.exit(0)
NUM_WORDS = 50
COS_SIM_THRESHOLD = 0.95 # approx. equal to cos(18 degs)
def filter_vocab(words, model):
"""
Remove words not in model vocabulary or duplicates
"""
to_be_removed = []
unique_words = []
for w in words:
try:
model[w['term'].strip()]
except KeyError:
to_be_removed.append(w)
try:
print 'Not in vocabulary:', w['term']
except UnicodeEncodeError:
print 'Not in vocabulary (Unicode Error)'
if w['term'] not in unique_words:
unique_words.append(w['term'])
else:
to_be_removed.append(w)
for w in to_be_removed:
words.remove(w)
return words
if __name__ == '__main__':
# Get top words
top_words = mc.topicWordCount(TOPIC_ID, num_words=NUM_WORDS)
# Load word2vec model
model_file_path = os.path.join(BASE_DIR, MODEL_DIR, 'w2v-topic-model-{}'.format(TOPIC_ID))
model = Word2Vec.load(model_file_path)
# Remove words not in model vocab or duplicates
top_words = filter_vocab(top_words, model)
# Get 2D embeddings
embeddings = [model[w['term'].strip()] for w in top_words]
pca = PCA(n_components=2)
two_d_embeddings = pca.fit_transform(np.asarray(embeddings))
# Construct JSON (counts, embeddings, cosine similarities)
results = []
for i, word in enumerate(top_words):
# find other words that are most similar to this word
similar = []
for other_word in top_words:
if word['term'] != other_word['term']: # avoid double-counting
# get similar words based on 2D embeddings
temp = [{'term': x['term'], 'index': j} for j,x in enumerate(top_words)]
other_i = filter(lambda x: x['term'] == other_word['term'], temp)[0]['index']
this_i = filter(lambda x: x['term'] == word['term'], temp)[0]['index']
sim_score_2d = np.asscalar(cosine_similarity(two_d_embeddings[this_i].reshape(1,-1),
two_d_embeddings[other_i].reshape(1,-1)))
if sim_score_2d > COS_SIM_THRESHOLD:
similar.append({'text': other_word['term'], 'count': other_word['count'],
'score': sim_score_2d})
w2v_x = two_d_embeddings[i][0]
w2v_y = two_d_embeddings[i][1]
if NORMALIZE:
norm = np.linalg.norm(two_d_embeddings[i])
w2v_x /= norm
w2v_y /= norm
results.append({'text': word['term'], 'count': word['count'],
'similar': similar,'w2v_x': float(w2v_x),
'w2v_y': float(w2v_y)})
# Write json object to file
output_file_path = os.path.join(BASE_DIR, OUTPUT_DIR, 'vizData.json')
with open(output_file_path, 'w') as output:
output.write(json.dumps(results))
| true |
e21a6e11f75ed30fa202d33b6c667d8982839f38 | Python | rahmattamhar074/belajar_python | /modul_satu/latihan15.py | UTF-8 | 141 | 3.296875 | 3 | [] | no_license | print('konversi suhu celcius ke fahrenheit')
c = int(input('masukkan nilai celcius :'))
f = c*9/5+32
print('nilai suhu dalam fahrenheit', f)
| true |
3c90cd515e9cba7b78f5a114898af67a67dea507 | Python | iLoveTux/timestamp | /tstamp/tstamp.py | UTF-8 | 2,226 | 3.6875 | 4 | [] | no_license | """
Author: iLoveTux
Date: 06/04/14
purpose: to provide a class which abstracts away handling of
current timestamps.
usage:
>>> from timestamp import Timestamp
>>> timestamp = Timestamp()
>>> print timestamp
Wednesday June 04, 17:51:59
>>> print timestamp.epoch
1401918719
>>> print timestamp.timestamp
20140604175159
>>> print timestamp.strftime('%X')
17:51:59
>>> print timestamp.strftime('%x')
06/04/14
>>> print timestamp.strftime('%c')
06/04/14 17:51:59
>>> print timestamp.strftime('%C')
NOTE: timestamp.strftime follows the table here:
https://docs.python.org/2/library/time.html#time.strftime
"""
from time import time
from datetime import datetime
class Timestamp(object):
def __init__(self, epoch=None):
if epoch:
self._epoch = epoch
else:
self._epoch = time()
self._timestamp = datetime.fromtimestamp(self._epoch)
self._format = format
@property
def epoch(self):
"""
returns the time (in seconds) since Jan 1, 1970 at midnight.
"""
return int(self)
@property
def friendly(self):
"""
returns a friendly, human-readable version of the date and time.
"""
return self._timestamp.strftime('%A %B %d, %X')
@property
def timestamp(self):
"""
returns a pretty useful representation of the time:
YYYYmmddhhmmss.
I use this a lot for adding a timestamp to filenames.
"""
return self._timestamp.strftime('%Y%m%d%H%M%S')
def strftime(self, format):
"""
returns the time formatted according to format.
format follows the rules you can find here:
https://docs.python.org/2/library/time.html#time.strftime
"""
return self._timestamp.strftime(format)
def __repr__(self):
return str(self._epoch)
def __str__(self):
"""
returns a friendly, human-readable version of the date and time.
"""
return self.friendly
def __int__(self):
"""
returns the time (in seconds) since Jan 1, 1970 at midnight.
"""
return int(self._epoch)
| true |
6851dcd59ec361c26838d8a272e80fa83377d97a | Python | vilhjalmurkari/Backgammon | /testing.py | UTF-8 | 211 | 3.1875 | 3 | [] | no_license | myDict = {}
array = [1,2,3,4]
myDict[str(array)] = 10
print(myDict[str(array)])
print(len(myDict))
myDict[str(array)] = 30
print(len(myDict))
myDict[str(array)] += 10
print(len(myDict))
print(myDict[str(array)]) | true |
9647791ae807413d709092d574691f50aa9a25e7 | Python | AroMorin/DNNOP | /backend/algorithms/spiking1.py | UTF-8 | 3,753 | 2.875 | 3 | [
"MIT"
] | permissive | """It is expected that the hyper_params object passed to the class is compatible
with the chosen algorithm. Thus, since Learner is chosen here, it is expected that
the hyper_params object will contain the expected information/params in the
expected locations.
We need to create an optimizer object. This object will be initialized with the
desired hyper parameters. An example of hyper params is the number of Anchors.
The optimizer object will own the pool.?
"""
from __future__ import division
from .algorithm import Algorithm
from .spiking1_backend.hyper_parameters import Hyper_Parameters
from .spiking1_backend.engine import Engine
class SPIKING1(Algorithm):
def __init__(self, model, alg_params):
print ("Using Learner7 algorithm")
super(SPIKING1, self).__init__()
self.hyper_params = Hyper_Parameters(alg_params) # Create a hyper parameters object
self.engine = Engine(model, self.hyper_params) # Create a pool object
self.populations = False
self.model = model
self.minimizing = self.hyper_params.minimizing
self.initial_score = self.hyper_params.initial_score
self.top_score = self.initial_score
self.target = None
self.set_target()
def set_target(self):
if self.minimizing:
self.target = self.hyper_params.target + self.hyper_params.tolerance
else:
self.target = self.hyper_params.target - self.hyper_params.tolerance
def step(self, feedback):
"""This method takes in the environment, runs the models against it,
obtains the scores and accordingly updates the models.
"""
inference, score = feedback
print(inference)
#print(score.item())
#score = self.regularize(score)
self.engine.analyze(score, self.top_score)
self.engine.set_elite()
self.engine.update_state()
self.engine.generate()
self.engine.update_weights()
self.update_top_score(score)
def regularize(self, score):
norm = self.engine.vector.norm()
score = score+(0.01*norm)
return score
def update_top_score(self, score):
"""Analysis is still needed even if there's no improvement,
so other modules know that this as well. Hence, can't "return" after
initial condition.
"""
if self.engine.jumped:
self.top_score = score
else:
v = 0.001
if self.minimizing and self.top_score>0.:
self.top_score = self.top_score*(1.+v)
elif self.minimizing and self.top_score<0.:
self.top_score = self.top_score*(1.-v)
elif not self.minimizing and self.top_score>0.:
self.top_score = self.top_score*(1.-v)
elif not self.minimizing and self.top_score<0.:
self.top_score = self.top_score*(1.+v)
def print_state(self):
if self.engine.analyzer.replace:
print ("------Setting new Elite-------")
if self.engine.frustration.jump:
print("------WOOOOOOHHOOOOOOOO!-------")
if self.engine.analyzer.improved:
print("Improved!")
print ("Top Score: %f" %self.top_score)
print("Memory: %d" %self.engine.frustration.count)
print("Frustration: %f" %self.engine.frustration.tau)
print("Integrity: %f" %self.engine.integrity.value)
print("Bin: ", self.engine.integrity.step_size.bin)
print("Step size: %f" %self.engine.integrity.step_size.value)
print("Selections: %d" %self.engine.noise.num_selections)
print("P: ", self.engine.selection_p.p[0:10])
print("Variance(P): %f\n" %self.engine.selection_p.variance)
#
| true |
c39a9b958240a90d0391f6bca2dfb0c3ed4a536e | Python | hehehe47/LeetCode | /IXL/winner.py | UTF-8 | 629 | 3.578125 | 4 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/10/19 22:53
# @Author : Patrick
# @File : winner.py
# @Software: PyCharm
def winner(andrea, maria, s):
# Write your code here
score_and, score_mar = 0, 0
i = 0 if s == 'Even' else 1
a = andrea[i:]
m = maria[i:]
j = 0
while j < len(a):
if j % 2 == 0:
score_and += a[j] - m[j]
score_mar += m[j] - a[j]
j += 1
if score_and == score_mar:
return 'Tie'
elif score_and > score_mar:
return 'Andrea'
else:
return 'Maria'
print(winner([1, 2, 3], [2, 1, 3], 'Odd'))
| true |
a09bf33f90e3158992497a1c499dc15e4326bc1f | Python | Kolhar-Prashant/Projec-Euler-solutions. | /Permuted multiples.py | UTF-8 | 353 | 3.59375 | 4 | [] | no_license |
def check(n):
for multipler in range(2,7):
m = n * multipler
for e in str(m):
if e not in str(n):
return 0
perm_mul.append(n)
perm_mul = []
for num in range(2,1000000):
check(num)
print("Number to contain all the permutations of digits in it's multiples from 2x to 6x is :",perm_mul) | true |
595bc4fceb030d957f65d2708b2c89e0bd8f7149 | Python | MrFiona/python_module_summary | /绘图工具matplotlib/解析csv文件绘图.py | UTF-8 | 2,199 | 2.859375 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Time : 2017-11-02 12:03
# Author : MrFiona
# File : draw_practice.py
# Software: PyCharm Community Edition
from pandas import Series, DataFrame
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
from operator import itemgetter
from matplotlib.ticker import MultipleLocator
#todo 设置中文字体
mpl.rcParams['font.sans-serif'] = ['SimHei']
df = pd.read_csv('ca_list_copy(2).csv')
#todo zwyx列的平均值
value = df['zwyx'].mean()
#todo 将zwyx列0值取代为该列平均值
df['zwyx'] = df['zwyx'].replace(0,value)
#todo 对zwyx列作平均值统计,其他列作计数统计
for col in df.columns:
if col == 'zwyx':
print(col + ' mean:\t', df[col].mean())
else:
print(col + ' count:\t', df[col].value_counts())
#todo 得到zwmc字段的唯一值列表
df['zwmc'].unique()
#todo 对dd字段分组
group = df.groupby('dd')
#todo 绘图
city_info_list = []
for ele in group:
print('max:\t', ele[1]['zwyx'].max())
city_info_list.append((ele[0], ele[1]['zwyx'].max()))
#todo 保存结果数据
# df.to_excel('result_csv.xls')
def set_label(rects):
i = 1
for rect in rects:
plt.text(i, rect, rect)
i += 1
#todo 取前10城市
city_info_list.sort(key=itemgetter(1), reverse=True)
top_city_info_list = city_info_list[:10]
fig, ax = plt.subplots()
fig.set_facecolor('peru')
# plt.title(u'城市最高薪资对比图', fontsize=15, color='blue')
fig.set_size_inches(w=10, h=6)
x = [ele[0] for ele in top_city_info_list]
y = [str(ele[1]) for ele in top_city_info_list]
ax.plot(range(1, len(y) + 1), y, 'o--r', linewidth=2, mfc='y', mec='b', ms=8, alpha=0.8)
set_label(y)
plt.ylabel(u'最高薪资', fontsize=15, color='m')
plt.xlabel(u'城市', fontsize=15, color='b')
plt.axis([0, len(x)+1, eval(min(y)) - 2000, eval(max(y)) + 2000])
plt.xticks(range(1, 12))
ax.set_xticklabels(x,rotation=-45)
plt.legend([u'10大城市最高薪资对比图'], loc='upper center', fontsize=14, facecolor='c', shadow=True)
#todo y轴主刻度最小单位设为1
ax.yaxis.set_major_locator( MultipleLocator(1500) )
plt.grid(color='peru', linestyle='--')
plt.show() | true |
671ff57b856361ec53494117d24bbcca9d90ce27 | Python | irtafaqohh/prediksi-ratiing | /latihan2.py | UTF-8 | 202 | 4.0625 | 4 | [] | no_license | nilai1 = 80
nilai2 = 80
if(nilai1 < nilai2):
print("Nilai 1 kurang dari nilai 2")
elif(nilai1 == nilai2):
print("Nilai 1 dan Nilai 2 sama")
else:
print("Nilai 2 lebih besar dari nilai 1")
| true |
6bf412e79ad7801c8d447f4d8873af27b2d9245b | Python | Txiag/URI-Python | /Beginner/1009.py | UTF-8 | 122 | 3.5625 | 4 | [] | no_license | Nome = input()
a = float(input())
b = float(input())
salario = a + (b*0.15)
print('TOTAL = R$ {:.2f}'.format(salario)) | true |
793c6fab5a1cf7b0924c163bcadd2869d333933d | Python | robertmatej/Python_ALX | /zjazd4/tworzenie_pliku_wykonwczego/hello.py | UTF-8 | 122 | 3.0625 | 3 | [] | no_license | import sys
if len(sys.argv) > 1:
print (f" Hello world {sys.argv[1]} :)")
else:
print ("Hello world !")
input() | true |
d98d1323825c47ebdd2d832b62e597995767ea45 | Python | AlexCS1337/TsvDemos2015 | /fileRenamer.py | UTF-8 | 901 | 2.90625 | 3 | [] | no_license | """
File renaming script
Lindsay Ward, 12/09/2011
Modified 16/06/2014 - just use .replace instead of slicing the start; assume that's safe
"""
import os
# Remove start (from LearnJCU)
START_TEXT = "Project Milestone 2 (Full Site) & Peer Assessment_"
DIRECTORY = "/Users/sci-lmw1/Google Drive/CP2010/CP2010 2014/StudentWork/"
os.chdir(DIRECTORY)
for filename in os.listdir('.'):
# get rid of LearnJCU's massive file name text and also the "attempt" text
newName = filename.replace(START_TEXT, "").replace("attempt", "")
# when testing, I use the print line and comment out the rename line, then I swap them over
os.rename(filename, newName)
# print(newName)
"""
# Replace %20 with space
os.chdir("/Users/sci-lmw1/Google Drive/JCU General/Resource Packages (Templates)/")
for filename in os.listdir('.'):
newName = filename.replace("%20", " ")
os.rename(filename, newName)
"""
| true |
fa74f4a6184e9b3df7346c28ac423e94cb956754 | Python | dennlinger/hypergraph-document-store | /SSDBM_figures/runtime_eval/get_entities.py | UTF-8 | 1,611 | 2.84375 | 3 | [
"MIT"
] | permissive | """
Gets n entities and their respective degrees.
"""
from PostgresConnector_SSDBM import PostgresConnector
import numpy as np
import json
if __name__ == "__main__":
pc = PostgresConnector(port=5436)
with pc as opc:
# noinspection SqlNoDataSourceInspection
opc.cursor.execute("SELECT t.term_text, COUNT(*) as degree "
"FROM terms t, term_occurrence toc "
"WHERE t.term_id = toc.term_id "
" AND t.is_entity = true "
"GROUP BY t.term_text, t.term_id")
res = opc.cursor.fetchall()
np.random.seed(3019)
indices = np.random.choice(len(res), 3000, replace=False)
formatted = {}
for idx in indices:
if len(formatted) >= 1990:
break
# "proper sanitizing", they said
if "'" in res[idx][0]:
continue
else:
key = res[idx][0]
formatted[key] = {"degree": res[idx][1]}
# adding manually 10 highly relevant terms
formatted["Hillary Clinton"] = {"degree": 58778}
formatted["London"] = {"degree": 15540}
formatted["North Carolina"] = {"degree": 5742}
formatted["CBS News"] = {"degree": 4120}
formatted["Asia"] = {"degree": 2957}
formatted["Conservative Party (UK)"] = {"degree": 2347}
formatted["2016-FA"] = {"degree": 2022}
formatted["Nigeria"] = {"degree": 1828}
formatted["Jill Stein"] = {"degree": 1811}
formatted["2016-09-07"] = {"degree": 1270}
with open("entities.json", "w") as f:
json.dump(formatted, f, indent=2, ensure_ascii=False)
| true |
12fdca310d57c96402aebeb509633ed860b74f02 | Python | jhharwood88/Pytest-PS | /07/demos/code/gilded_rose/test_gilded_rose.py | UTF-8 | 1,754 | 3.484375 | 3 | [
"MIT"
] | permissive | import csv
import pytest
from gilded_rose import Item, GildedRose
def read_items():
cases = []
with open("items.csv") as f:
reader = csv.DictReader(f)
for row in reader:
name = row['name']
sell_in = int(row['sell_in'])
quality = int(row['quality'])
expected_sell_in = int(row['expected_sell_in'])
expected_quality = int(row['expected_quality'])
case = (name, sell_in, quality, expected_sell_in, expected_quality)
cases.append(case)
return cases
@pytest.mark.parametrize("name, sell_in, quality, expected_sell_in, expected_quality",
read_items())
def test_update_items(name, sell_in, quality, expected_sell_in, expected_quality):
item = Item(name, sell_in, quality)
gr = GildedRose([item])
gr.update_quality()
assert item.sell_in == expected_sell_in
assert item.quality == expected_quality
# We read in the items from a spreadsheet, where each test case contains a row from the spreadsheet. This will give values for pre and post argument being called. We use a paramataraized test where the arguemnts will be a list of original arguemtsn as well as the read items argument that reads from the spreadsheet. The item consturctor will create the items, and then put them in the gilded rose item list. We can then update the quality, and then asser that the update quality method is working correctly. This will validate the spreadsheet, and then we can evaluate the tests themselves by using a coverage report to make sure its fully covering the gilded rose methods. We can see from full to partially covered items, as well as uncovered meaning we can then go back and refine our tests cases. | true |
da6916b28de2f78a236b81d1c79a7975fc8f22a7 | Python | Jimmy-INL/google-research | /simulation_research/signal_processing/spherical/associated_legendre_function.py | UTF-8 | 3,282 | 2.9375 | 3 | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library of computing associated Legendre function of the first kind."""
import math
from jax import lax
import jax.numpy as jnp
import numpy as np
def gen_normalized_legendre(l_max,
x):
r"""Computes the normalized associated Legendre functions (ALFs).
The ALFs of the first kind are used in spherical harmonics. The spherical
harmonic of degree `l` and order `m` can be written as
`Y_l^m(θ, φ) = N_l^m * P_l^m(cos(θ)) * exp(i m φ)`, where `N_l^m` is the
normalization factor and θ and φ are the colatitude and longitude,
repectively. `N_l^m` is chosen in the way that the spherical harmonics form
a set of orthonormal basis function of L^2(S^2). For the computational
efficiency of spherical harmonics transform, the normalization factor is
embedded into the computation of the ALFs. In addition, normalizing `P_l^m`
avoids overflow/underflow and achieves better numerical stability. Three
recurrence relations are used in the computation. Note that the factor of
\sqrt(1 / (4 𝛑)) is used in the formulation.
Args:
l_max: The maximum degree of the associated Legendre function. Both the
degrees and orders are `[0, 1, 2, ..., l_max]`.
x: A vector of type `float32`, `float64` containing the sampled points in
spherical coordinates, at which the ALFs are computed; `x` is essentially
`cos(θ)`.
Returns:
The 3D array of shape `(l_max + 1, l_max + 1, len(x))` containing the
normalized values of the ALFs at `x`.
"""
dtype = lax.dtype(x)
if dtype not in (jnp.float32, jnp.float64):
raise TypeError(
'x.dtype={} is not supported, see docstring for supported types.'
.format(dtype))
if x.ndim != 1:
raise ValueError('x must be a 1D array.')
p = np.zeros((l_max + 1, l_max + 1, x.shape[0]))
# The initial value p(0,0).
initial_value = 0.5 / np.sqrt(math.pi)
p[0, 0] = initial_value
# Compute the diagonal entries p(l,l) with recurrence.
y = np.sqrt(1.0 - x * x)
for l in range(1, l_max + 1):
a = -1.0 * np.sqrt(1.0 + 0.5 / l)
p[l, l] = a * y * p[l - 1, l - 1]
# Compute the off-diagonal entries with recurrence.
for l in range(l_max):
b = np.sqrt(2.0 * l + 3.0)
p[l + 1, l] = b * x * p[l, l]
# Compute the remaining entries with recurrence.
for m in range(l_max + 1):
for l in range(m + 2, l_max + 1):
c0 = l * l
c1 = m * m
c2 = 2.0 * l
c3 = (l - 1.0) * (l - 1.0)
d0 = np.sqrt((4.0 * c0 - 1.0) / (c0 - c1))
d1 = np.sqrt(((c2 + 1.0) * (c3 - c1)) / ((c2 - 3.0) * (c0 - c1)))
p[l, m] = d0 * x * p[l - 1, m] - d1 * p[l - 2, m]
return jnp.asarray(p)
| true |
16822e29a86c7ca840690bdb023140ae64d6d97b | Python | Girin7716/KNU-Algorithm-Study | /src/kimkihyun/week_11/BOJ_2589/2589.py | UTF-8 | 1,751 | 3.234375 | 3 | [] | no_license | # 보물섬
# L:육지, W: 바다
from collections import deque
N,M = map(int,input().split())
board = [list(input()) for _ in range(N)]
dx = [1,0,-1,0]
dy = [0,1,0,-1]
def bfs(x,y):
q = deque()
visited = [[0 for _ in range(M)] for _ in range(N)]
q.append((x,y))
visited[x][y] = 1
maxValue = 0
while q:
x,y = q.popleft()
maxValue = max(maxValue,visited[x][y] - 1)
for i in range(4):
nx = x + dx[i]
ny = y + dy[i]
if nx < 0 or nx >= N or ny < 0 or ny >= M or board[nx][ny] == 'W' or visited[nx][ny] != 0:
continue
visited[nx][ny] = visited[x][y] + 1
q.append((nx,ny))
return maxValue
answer = 0
for i in range(N):
for j in range(M):
if board[i][j] == 'L':
answer = max(answer,bfs(i,j))
print(answer)
# from collections import deque
#
# N,M = map(int,input().split())
# board = [list(input()) for _ in range(N)]
#
# dx = [1,0,-1,0]
# dy = [0,1,0,-1]
#
# answer = -int(1e9)
#
# def bfs(x,y):
# q = deque()
# visited = [[0 for _ in range(M)] for _ in range(N)]
#
# q.append((x,y,0))
# maxValue = 0
# while q:
# x,y,dist = q.popleft()
# visited[x][y] = dist
# maxValue = max(maxValue,dist)
# for i in range(4):
# nx = x + dx[i]
# ny = y + dy[i]
# if nx < 0 or nx >= N or ny < 0 or ny >= M or visited[nx][ny] != 0 or board[nx][ny] == 'W':
# continue
# visited[nx][ny] = dist+1
# q.append((nx,ny,dist+1))
#
# return maxValue
#
# for i in range(N):
# for j in range(M):
# if board[i][j] == 'L':
# answer = max(answer,bfs(i,j))
#
# print(answer) | true |
5b3ce6cb86c45dfc8f8729430d8002b48d1a6979 | Python | Jawmo/Hope | /engine/handler/input_handler.py | UTF-8 | 3,986 | 3.1875 | 3 | [
"MIT"
] | permissive | from engine.lex import Lex
from engine.gen import Gen
from pprint import pprint
class Input_Handler():
def __init__():
pass
def full_handler(self, user_input, input_kwargs):
target = None
print("HANDLER | Searching for target:", user_input)
if target == None:
print("Checking 1")
target = Input_Handler.target_items_in_room(self, user_input, input_kwargs)
if target == None:
print("Checking 2")
target = Input_Handler.target_npcs_in_room(self, user_input, input_kwargs)
if target == None:
print("Checking 3")
target = Input_Handler.target_players_in_room(self, user_input, input_kwargs)
if target == None:
print("Checking 4")
target = Input_Handler.target_self(self, user_input, input_kwargs)
if target == None:
print("Checking 5")
target = Input_Handler.target_self_inventory(self, user_input, input_kwargs)
if target == None:
print("Checking 6")
target = Input_Handler.target_items_in_self_r_hand(self, user_input, input_kwargs)
if target == None:
print("Checking 7")
target = Input_Handler.target_items_in_self_l_hand(self, user_input, input_kwargs)
if target is not None:
print("HANDLER | Target in Full Handler:", pprint(target), target.name)
# pprint(vars(target))
else:
print("HANDLER | Target in Full Handler:", target)
return target
def target_items_in_room(self, user_input, input_kwargs):
# check if the target is an item in the room
target = None
for y in Gen.items_in_room(self):
if Lex.first_three(y.keyword) in user_input:
target = y
print("HANDLER | Target Found: item in room, ", target)
return target
def target_items_in_container(self, user_input, input_kwargs):
target = None
for y in Gen.items_in_container(input_kwargs['target_parent']):
if Lex.first_three(y.keyword) in user_input:
target = y
print("HANDLER | Target Found: item in container, ", target)
# print("Items in room:", item_list)
return target
def target_npcs_in_room(self, user_input, input_kwargs):
# check if the target is an NPC in the room
for npc in Gen.npcs_in_room(self):
if user_input in npc.name.lower():
target = npc
print("HANDLER | Target Found: NPC, ", target)
break
else:
target = None
return target
def target_players_in_room(self, user_input, input_kwargs):
# check if the target is a player in the room
for player in Gen.players_in_room(self):
if user_input in player.name:
target = player
print("HANDLER | Target Found: player, ", target)
break
else:
target = None
return target
def target_self(self, user_input, input_kwargs):
# check if you are the target
if self.name == user_input:
target = self
print("HANDLER | Target Found: self, ", target)
else:
target = None
return target
def target_self_inventory(self, user_input, input_kwargs):
# check if the target is in your inventory
target = None
for inv in self.inventory:
if self.inventory[inv]['contents']:
item = self.inventory[inv]['contents']
if Lex.first_three(item.keyword) in user_input:
target = item
return target
def target_items_in_self_r_hand(self, user_input, input_kwargs):
# check right hand
target = None
print("hand user_input:", user_input)
if self.inventory['r_hand']['contents']:
if Lex.first_three(self.inventory['r_hand']['contents'].keyword) in user_input:
target = item
print("HANDLER | Target Found: self r_hand, ", target)
return target
def target_items_in_self_l_hand(self, user_input, input_kwargs):
# check left hand next
target = None
print("hand user_input:", user_input)
if self.inventory['l_hand']['contents']:
if Lex.first_three(self.inventory['l_hand']['contents'].keyword) in user_input:
target = item
print("HANDLER | Target Found: self l_hand, ", target)
return target | true |
969757c0b52b302870fed315363c7e60ea27cd45 | Python | adrianamendez/BigDataLab1 | /lab1/mapper.py | UTF-8 | 857 | 3.03125 | 3 | [] | no_license | #!/usr/bin/env python
# mapper.py
# encoding: utf-8
import sys
from bs4 import BeautifulSoup,SoupStrainer
soup = BeautifulSoup(sys.stdin, features="lxml")
title = soup.findAll('title')
places = soup.findAll('places')
topics = soup.findAll('topics')
for wordtitle in title:
title_final = str(wordtitle.string)
title_final = title_final.split()
for wordtitlefinal in title_final:
print('%s\t%s' % ("A "+wordtitlefinal, "1"))
for wordplaces in places:
places_final = str(wordplaces.string)
places_final = places_final.split()
for wordplacesfinal in places_final:
print('%s\t%s' % ("B "+wordplacesfinal, "1"))
for wordtopics in topics:
topics_final = str(wordtopics.string)
topics_final = topics_final.split()
for wordtopicsfinal in topics_final:
print('%s\t%s' % ("C "+wordtopicsfinal, "1"))
| true |
633ae175aa3605955c51a31f2512067d0bcc5c2c | Python | takumi152/atcoder | /abc147c.py | UTF-8 | 894 | 2.625 | 3 | [] | no_license | def main():
n = int(input())
a = [0 for _ in range(n)]
xy = [None for _ in range(n)]
for i in range(n):
a[i] = int(input())
xy[i] = [None for _ in range(a[i])]
for j in range(a[i]):
xy[i][j] = tuple(map(int, input().split()))
ans = 0
for i in range(1 << n):
m = [0 for _ in range(n)]
p = 0
for j in range(n):
m[j] = (1 if i & (1 << j) else 0)
p += (1 if i & (1 << j) else 0)
if p < ans:
continue
good = True
for j in range(n):
if m[j]:
for k in range(a[j]):
if m[xy[j][k][0]-1] != xy[j][k][1]:
good = False
break
if not good:
break
if good:
ans = p
print(ans)
if __name__ == '__main__':
main()
| true |
edf3c3f0f88382f9fc80223abbcf3410778f08d0 | Python | gbeine/pyfronius | /pyfronius/fronius.py | UTF-8 | 15,228 | 2.578125 | 3 | [
"MIT"
] | permissive | import json
import logging
import urllib.request
_LOGGER = logging.getLogger(__name__)
URL_POWER_FLOW = "{}://{}/solar_api/v1/GetPowerFlowRealtimeData.fcgi"
URL_SYSTEM_METER = "{}://{}/solar_api/v1/GetMeterRealtimeData.cgi?Scope=System"
URL_SYSTEM_INVERTER = "{}://{}/solar_api/v1/GetInverterRealtimeData.cgi?Scope=System"
URL_DEVICE_METER = "{}://{}/solar_api/v1/GetMeterRealtimeData.cgi?Scope=Device&DeviceId={}"
URL_DEVICE_STORAGE = "{}://{}/solar_api/v1/GetStorageRealtimeData.cgi?Scope=Device&DeviceId={}"
URL_DEVICE_INVERTER_CUMULATIVE = "{}://{}/solar_api/v1/GetInverterRealtimeData.cgi?Scope=Device&DeviceId={}&DataCollection=CumulationInverterData"
URL_DEVICE_INVERTER_COMMON = "{}://{}/solar_api/v1/GetInverterRealtimeData.cgi?Scope=Device&DeviceId={}&DataCollection=CommonInverterData"
class Fronius:
'''
Interface to communicate with the Fronius Symo over http / JSON
Attributes:
host The ip/domain of the Fronius device
useHTTPS Use HTTPS instead of HTTP
timeout HTTP timeout in seconds
'''
def __init__(self, host, useHTTPS = False, timeout = 10):
'''
Constructor
'''
self.host = host
self.timeout = timeout
if useHTTPS:
self.protocol = "https"
else:
self.protocol = "http"
def current_power_flow(self):
'''
Get the current power flow of a smart meter system.
'''
url = URL_POWER_FLOW.format(self.protocol, self.host)
_LOGGER.debug("Get current system power flow data for {}".format(url))
return self._current_data(url, self._system_power_flow)
def current_system_meter_data(self):
'''
Get the current meter data.
'''
url = URL_SYSTEM_METER.format(self.protocol, self.host)
_LOGGER.debug("Get current system meter data for {}".format(url))
return self._current_data(url, self._system_meter_data)
def current_system_inverter_data(self):
'''
Get the current inverter data.
The values are provided as cumulated values and for each inverter
'''
url = URL_SYSTEM_INVERTER.format(self.protocol, self.host)
_LOGGER.debug("Get current system inverter data for {}".format(url))
return self._current_data(url, self._system_inverter_data)
def current_meter_data(self, device = 0):
'''
Get the current meter data for a device.
'''
url = URL_DEVICE_METER.format(self.protocol, self.host, device)
_LOGGER.debug("Get current meter data for {}".format(url))
return self._current_data(url, self._device_meter_data)
def current_storage_data(self, device = 0):
'''
Get the current storage data for a device.
'''
url = URL_DEVICE_STORAGE.format(self.protocol, self.host, device)
_LOGGER.debug("Get current storage data for {}".format(url))
return self._current_data(url, self._device_storage_data)
def current_inverter_data(self, device = 1):
'''
Get the current inverter data of one device.
'''
url = URL_DEVICE_INVERTER_COMMON.format(self.protocol, self.host, device)
_LOGGER.debug("Get current inverter data for {}".format(url))
return self._current_data(url, self._device_inverter_data)
def _fetch_json(self, url):
_LOGGER.info("Fetch data from {}".format(url))
request = urllib.request.urlopen(url)
return json.loads(request.read().decode())
def _status_data(self, json):
sensor = {}
sensor['timestamp'] = { 'value': json['Head']['Timestamp'] }
sensor['status'] = json['Head']['Status']
sensor['status_code'] = { 'value': json['Head']['Status']['Code'] }
sensor['status_reason'] = { 'value': json['Head']['Status']['Reason'] }
sensor['status_message'] = { 'value': json['Head']['Status']['UserMessage'] }
return sensor
def _current_data(self, url, fun):
json = self._fetch_json(url)
sensor = self._status_data(json)
# break if Data is empty
if not json['Body'] or not json['Body']['Data']:
_LOGGER.info("No data returned from {}".format(url))
return sensor
else:
return fun(sensor, json['Body']['Data'])
def _system_power_flow(self, sensor, data):
_LOGGER.debug("Converting system power flow data: '{}'".format(data))
site = data['Site']
inverter = data['Inverters']['1'] # TODO: implement more inverters
self._copy(inverter, sensor, "Battery_Mode", 'battery_mode')
self._copy(inverter, sensor, "SOC", 'state_of_charge', '%')
self._copy(site, sensor, "BatteryStandby", 'battery_standby')
self._copy(site, sensor, "E_Day", 'energy_day', 'Wh')
self._copy(site, sensor, "E_Total", 'energy_total', 'Wh')
self._copy(site, sensor, "E_Year", 'energy_year', 'Wh')
self._copy(site, sensor, "Meter_Location", 'meter_location')
self._copy(site, sensor, "Mode", 'meter_mode')
self._copy(site, sensor, "P_Akku", 'power_battery', 'W')
self._copy(site, sensor, "P_Grid", 'power_grid', 'W')
self._copy(site, sensor, "P_Load", 'power_load', 'W')
self._copy(site, sensor, "P_PV", 'power_photovoltaics', 'W')
self._copy(site, sensor, "rel_Autonomy", 'relative_autonomy', '%')
self._copy(site, sensor, "rel_SelfConsumption", 'relative_self_consumption', '%')
return sensor
def _system_meter_data(self, sensor, data):
_LOGGER.debug("Converting system meter data: '{}'".format(data))
sensor['meters'] = { }
for i in data:
sensor['meters'][i] = self._meter_data(data[i])
return sensor
def _system_inverter_data(self, sensor, data):
_LOGGER.debug("Converting system inverter data: '{}'".format(data))
sensor['energy_day'] = { 'value': 0, 'unit': "Wh" }
sensor['energy_total'] = { 'value': 0, 'unit': "Wh" }
sensor['energy_year'] = { 'value': 0, 'unit': "Wh" }
sensor['power_ac'] = { 'value': 0, 'unit': "W" }
sensor['inverters'] = {}
if "DAY_ENERGY" in data:
for i in data['DAY_ENERGY']['Values']:
sensor['inverters'][i] = { }
sensor['inverters'][i]['energy_day'] = { 'value': data['DAY_ENERGY']['Values'][i], 'unit': data['DAY_ENERGY']['Unit'] }
sensor['energy_day']['value'] += data['DAY_ENERGY']['Values'][i]
if "TOTAL_ENERGY" in data:
for i in data['TOTAL_ENERGY']['Values']:
sensor['inverters'][i]['energy_total'] = { 'value': data['TOTAL_ENERGY']['Values'][i], 'unit': data['TOTAL_ENERGY']['Unit'] }
sensor['energy_total']['value'] += data['TOTAL_ENERGY']['Values'][i]
if "YEAR_ENERGY" in data:
for i in data['YEAR_ENERGY']['Values']:
sensor['inverters'][i]['energy_year'] = { 'value': data['YEAR_ENERGY']['Values'][i], 'unit': data['TOTAL_ENERGY']['Unit'] }
sensor['energy_year']['value'] += data['YEAR_ENERGY']['Values'][i]
if "PAC" in data:
for i in data['PAC']['Values']:
sensor['inverters'][i]['power_ac'] = { 'value': data['PAC']['Values'][i], 'unit': data['TOTAL_ENERGY']['Unit'] }
sensor['power_ac']['value'] += data['PAC']['Values'][i]
return sensor
def _device_meter_data(self, sensor, data):
_LOGGER.debug("Converting meter data: '{}'".format(data))
sensor.update(self._meter_data(data))
return sensor
def _device_storage_data(self, sensor, data):
_LOGGER.debug("Converting storage data from '{}'".format(data))
if 'Controller' in data:
controller = self._controller_data(data['Controller'])
sensor.update(controller)
if 'Modules' in data:
sensor['modules'] = { }
module_count = 0;
for module in data['Modules']:
sensor['modules'][module_count] = self._module_data(module)
module_count += 1
return sensor
def _device_inverter_data(self, sensor, data):
_LOGGER.debug("Converting inverter data from '{}'".format(data))
self._copy(data, sensor, "DAY_ENERGY", 'energy_day')
self._copy(data, sensor, "TOTAL_ENERGY", 'energy_total')
self._copy(data, sensor, "YEAR_ENERGY", 'energy_year')
self._copy(data, sensor, "FAC", 'frequency_ac')
self._copy(data, sensor, "IAC", 'current_ac')
self._copy(data, sensor, "IDC", 'current_dc')
self._copy(data, sensor, "PAC", 'power_ac')
self._copy(data, sensor, "UAC", 'voltage_ac')
self._copy(data, sensor, "UDC", 'voltage_dc')
return sensor
def _meter_data(self, data):
meter = {}
self._copy(data, meter, "Current_AC_Phase_1", 'current_ac_phase_1', 'A')
self._copy(data, meter, "Current_AC_Phase_2", 'current_ac_phase_2', 'A')
self._copy(data, meter, "Current_AC_Phase_3", 'current_ac_phase_3', 'A')
self._copy(data, meter, "EnergyReactive_VArAC_Sum_Consumed", 'energy_reactive_ac_consumed', 'Wh')
self._copy(data, meter, "EnergyReactive_VArAC_Sum_Produced", 'energy_reactive_ac_produced', 'Wh')
self._copy(data, meter, "EnergyReal_WAC_Minus_Absolute", 'energy_real_ac_minus', 'Wh')
self._copy(data, meter, "EnergyReal_WAC_Plus_Absolute", 'energy_real_ac_plus', 'Wh')
self._copy(data, meter, "EnergyReal_WAC_Sum_Consumed", 'energy_real_consumed', 'Wh')
self._copy(data, meter, "EnergyReal_WAC_Sum_Produced", 'energy_real_produced', 'Wh')
self._copy(data, meter, "Frequency_Phase_Average", 'frequency_phase_average', 'H')
self._copy(data, meter, "PowerApparent_S_Phase_1", 'power_apparent_phase_1', 'W')
self._copy(data, meter, "PowerApparent_S_Phase_2", 'power_apparent_phase_2', 'W')
self._copy(data, meter, "PowerApparent_S_Phase_3", 'power_apparent_phase_3', 'W')
self._copy(data, meter, "PowerApparent_S_Sum", 'power_apparent', 'W')
self._copy(data, meter, "PowerFactor_Phase_1", 'power_factor_phase_1', 'W')
self._copy(data, meter, "PowerFactor_Phase_2", 'power_factor_phase_2', 'W')
self._copy(data, meter, "PowerFactor_Phase_3", 'power_factor_phase_3', 'W')
self._copy(data, meter, "PowerFactor_Sum", 'power_factor', 'W')
self._copy(data, meter, "PowerReactive_Q_Phase_1", 'power_reactive_phase_1', 'W')
self._copy(data, meter, "PowerReactive_Q_Phase_2", 'power_reactive_phase_2', 'W')
self._copy(data, meter, "PowerReactive_Q_Phase_3", 'power_reactive_phase_3', 'W')
self._copy(data, meter, "PowerReactive_Q_Sum", 'power_reactive', 'W')
self._copy(data, meter, "PowerReal_P_Phase_1", 'power_real_phase_1', 'W')
self._copy(data, meter, "PowerReal_P_Phase_2", 'power_real_phase_2', 'W')
self._copy(data, meter, "PowerReal_P_Phase_3", 'power_real_phase_3', 'W')
self._copy(data, meter, "PowerReal_P_Sum", 'power_real', 'W')
self._copy(data, meter, "Voltage_AC_Phase_1", 'voltage_ac_phase_1', 'V')
self._copy(data, meter, "Voltage_AC_Phase_2", 'voltage_ac_phase_2', 'V')
self._copy(data, meter, "Voltage_AC_Phase_3", 'voltage_ac_phase_3', 'V')
self._copy(data, meter, "Voltage_AC_PhaseToPhase_12", 'voltage_ac_phase_to_phase_12', 'V')
self._copy(data, meter, "Voltage_AC_PhaseToPhase_23", 'voltage_ac_phase_to_phase_23', 'V')
self._copy(data, meter, "Voltage_AC_PhaseToPhase_31", 'voltage_ac_phase_to_phase_31', 'V')
self._copy(data, meter, "Meter_Location_Current", 'meter_location')
self._copy(data, meter, "Enable", 'enable')
self._copy(data, meter, "Visible", 'visible')
if "Details" in data:
self._copy(data['Details'], meter, "Manufacturer", 'manufacturer')
self._copy(data['Details'], meter, "Model", 'model')
self._copy(data['Details'], meter, "Serial", 'serial')
return meter
def _controller_data(self, data):
controller = {}
self._copy(data, controller, "Capacity_Maximum", 'capacity_maximum', 'Ah')
self._copy(data, controller, "DesignedCapacity", 'capacity_designed', 'Ah')
self._copy(data, controller, "Current_DC", 'current_dc', 'A')
self._copy(data, controller, "Voltage_DC", 'voltage_dc', 'V')
self._copy(data, controller, "Voltage_DC_Maximum_Cell", 'voltage_dc_maximum_cell', 'V')
self._copy(data, controller, "Voltage_DC_Minimum_Cell", 'voltage_dc_minimum_cell', 'V')
self._copy(data, controller, "StateOfCharge_Relative", 'state_of_charge', '%')
self._copy(data, controller, "Temperature_Cell", 'temperature_cell', 'C')
self._copy(data, controller, "Enable", 'enable')
if "Details" in data:
self._copy(data['Details'], controller, "Manufacturer", 'manufacturer')
self._copy(data['Details'], controller, "Model", 'model')
self._copy(data['Details'], controller, "Serial", 'serial')
return controller
def _module_data(self, data):
module = { }
self._copy(data, module, "Capacity_Maximum", 'capacity_maximum', 'Ah')
self._copy(data, module, "DesignedCapacity", 'capacity_designed', 'Ah')
self._copy(data, module, "Current_DC", 'current_dc', 'A')
self._copy(data, module, "Voltage_DC", 'voltage_dc', 'V')
self._copy(data, module, "Voltage_DC_Maximum_Cell", 'voltage_dc_maximum_cell', 'V')
self._copy(data, module, "Voltage_DC_Minimum_Cell", 'voltage_dc_minimum_cell', 'V')
self._copy(data, module, "StateOfCharge_Relative", 'state_of_charge', '%')
self._copy(data, module, "Temperature_Cell", 'temperature_cell', 'C')
self._copy(data, module, "Temperature_Cell_Maximum", 'temperature_cell_maximum', 'C')
self._copy(data, module, "Temperature_Cell_Minimum", 'temperature_cell_minimum', 'C')
self._copy(data, module, "CycleCount_BatteryCell", 'cycle_count_cell', 'C')
self._copy(data, module, "Status_BatteryCell", 'status_cell')
self._copy(data, module, "Enable", 'enable')
if "Details" in data:
self._copy(data['Details'], module, "Manufacturer", 'manufacturer')
self._copy(data['Details'], module, "Model", 'model')
self._copy(data['Details'], module, "Serial", 'serial')
return module
def _copy(self, source, target, sid, tid, unit = None):
if sid in source and isinstance(source[sid], dict) and 'Value' in source[sid]:
target[tid] = { 'value': source[sid]['Value'] }
if "Unit" in source[sid]:
target[tid]['unit'] = source[sid]['Unit']
elif sid in source:
target[tid] = { 'value': source[sid] }
else:
target[tid] = { 'value': 0 }
if unit is not None:
target[tid]['unit'] = unit
if target[tid]['value'] is None:
target[tid]['value'] = 0
return
| true |
6fc2124ac7768a6201f603b6eff51f0539f0d0fd | Python | rhiroyuki/46SimplePythonExercises | /exercise12.py | UTF-8 | 329 | 4.21875 | 4 | [] | no_license | """
Define a procedure histogram() that takes a list of integers and prints
a histogram to the screen.
For example, histogram([4, 9, 7]) should print the following:
****
*********
*******
"""
def histogram2(intList):
for number in intList:
print('*' * number)
if __name__ == '__main__':
histogram([4, 9, 7])
| true |
5e3ca6aeb91c4fabefebdbc2e9c3744098c8841e | Python | lijolawrance/Solutions | /Richest_customer_wealth.py | UTF-8 | 225 | 2.96875 | 3 | [] | no_license | import numpy as np
def maximumWealth(accounts):
result = list(map(sum, accounts))
print(max(np.sum(accounts, axis=1)))
return max(result)
ac = [[2, 8, 7], [7, 1, 2], [1, 9, 5]]
d = maximumWealth(ac)
print(d)
| true |
4bd5754e8bde9e4954165253bfab4c5981fc4d13 | Python | andyyu/coding-problems | /isomorphic_strings.py | UTF-8 | 916 | 4.125 | 4 | [] | no_license | # Andy Yu
'''
Given two strings s and t, determine if they are isomorphic.
Two strings are isomorphic if the characters in s can be replaced to get t.
All occurrences of a character must be replaced with another character while preserving the order of characters. No two characters may map to the same character but a character may map to itself.
For example,
Given "egg", "add", return true.
Given "foo", "bar", return false.
Given "paper", "title", return true.
Note:
You may assume both s and t have the same length.
Difficult: Easy
Solution Notes:
Clever solution (not mine) -
def is_isomorphic(s, t):
return len(set(zip(s, t))) == len(set(s)) == len(set(t))
O(n) time
O(1) space
'''
def is_isomorphic(s, t):
d1 = {}
d2 = {}
for i, j in zip(s, t):
if d1.get(i, j) != j:
return False
if d2.get(j, i) != i:
return False
else:
d1[i] = j
d2[j] = i
return True
| true |
8cdc9bb9477bc64fc18e1267f2b5ea181bb20a9f | Python | mzemlyanikin/depthai-experiments | /gen2-coronamask/main.py | UTF-8 | 5,569 | 2.53125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
from pathlib import Path
import cv2
import depthai as dai
import numpy as np
import time
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-nd', '--no-debug', action="store_true", help="Prevent debug output")
parser.add_argument('-cam', '--camera', action="store_true",
help="Use DepthAI 4K RGB camera for inference (conflicts with -vid)")
parser.add_argument('-vid', '--video', type=str,
help="Path to video file to be used for inference (conflicts with -cam)")
args = parser.parse_args()
if not args.camera and not args.video:
raise RuntimeError(
"No source selected. Use either \"-cam\" to run on RGB camera as a source or \"-vid <path>\" to run on video"
)
debug = not args.no_debug
def create_pipeline():
# Start defining a pipeline
pipeline = dai.Pipeline()
# Define a source - color camera
camRgb = pipeline.createColorCamera()
camRgb.setPreviewSize(300, 300)
camRgb.setInterleaved(False)
camRgb.setFps(40)
# Define a neural network that will make predictions based on the source frames
nn = pipeline.createMobileNetDetectionNetwork()
nn.setConfidenceThreshold(0.5)
nn.setBlobPath(str(Path("models/model.blob").resolve().absolute()))
nn.setNumInferenceThreads(2)
# nn.input.setBlocking(False)
if args.camera:
camRgb.preview.link(nn.input)
else:
detection_in = pipeline.createXLinkIn()
detection_in.setStreamName("detection_in")
detection_in.out.link(nn.input)
# Create outputs
xoutRgb = pipeline.createXLinkOut()
xoutRgb.setStreamName("rgb")
camRgb.preview.link(xoutRgb.input)
nnOut = pipeline.createXLinkOut()
nnOut.setStreamName("nn")
nn.out.link(nnOut.input)
return pipeline
def to_planar(arr: np.ndarray, shape: tuple) -> list:
return cv2.resize(arr, shape).transpose(2, 0, 1).flatten()
class FPSHandler:
def __init__(self, cap=None):
self.timestamp = time.time()
self.start = time.time()
self.framerate = cap.get(cv2.CAP_PROP_FPS) if cap is not None else None
self.frame_cnt = 0
self.ticks = {}
self.ticks_cnt = {}
def next_iter(self):
if not args.camera:
frame_delay = 1.0 / self.framerate
delay = (self.timestamp + frame_delay) - time.time()
if delay > 0:
time.sleep(delay)
self.timestamp = time.time()
self.frame_cnt += 1
def tick(self, name):
if name in self.ticks:
self.ticks_cnt[name] += 1
else:
self.ticks[name] = time.time()
self.ticks_cnt[name] = 0
def tick_fps(self, name):
if name in self.ticks:
return self.ticks_cnt[name] / (time.time() - self.ticks[name])
else:
return 0
def fps(self):
return self.frame_cnt / (self.timestamp - self.start)
labelMap = ["background", "no mask", "mask", "no mask"]
if args.camera:
fps = FPSHandler()
else:
cap = cv2.VideoCapture(str(Path(args.video).resolve().absolute()))
fps = FPSHandler(cap)
# Pipeline defined, now the device is connected to
with dai.Device(create_pipeline()) as device:
# Start pipeline
device.startPipeline()
if args.camera:
qRgb = device.getOutputQueue(name="rgb", maxSize=4, blocking=False)
else:
detIn = device.getInputQueue("detection_in")
# Output queues will be used to get the rgb frames and nn data from the outputs defined above
qDet = device.getOutputQueue(name="nn", maxSize=4, blocking=False)
if args.video:
cap = cv2.VideoCapture(str(Path(args.video).resolve().absolute()))
detections = []
frame = None
# nn data (bounding box locations) are in <0..1> range - they need to be normalized with frame width/height
def frameNorm(frame, bbox):
normVals = np.full(len(bbox), frame.shape[0])
normVals[::2] = frame.shape[1]
return (np.clip(np.array(bbox), 0, 1) * normVals).astype(int)
def shouldRun():
return cap.isOpened() if args.video else True
def getFrame():
if args.video:
return cap.read()
else:
return True, qRgb.get().getCvFrame()
while shouldRun():
read_correctly, frame = getFrame()
if not read_correctly:
break
fps.next_iter()
if not args.camera:
tstamp = time.monotonic()
lic_frame = dai.ImgFrame()
lic_frame.setData(to_planar(frame, (300, 300)))
lic_frame.setTimestamp(tstamp)
lic_frame.setType(dai.RawImgFrame.Type.BGR888p)
lic_frame.setWidth(300)
lic_frame.setHeight(300)
detIn.send(lic_frame)
detections = qDet.get().detections
cv2.putText(frame, "Fps: {:.2f}".format(fps.fps()), (2, frame.shape[0] - 4), cv2.FONT_HERSHEY_TRIPLEX, 0.4, color=(255, 255, 255))
for detection in detections:
bbox = frameNorm(frame, (detection.xmin, detection.ymin, detection.xmax, detection.ymax))
cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (255, 0, 0), 2)
cv2.putText(frame, labelMap[detection.label], (bbox[0] + 10, bbox[1] + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)
cv2.putText(frame, f"{int(detection.confidence * 100)}%", (bbox[0] + 10, bbox[1] + 40), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)
cv2.imshow("rgb", frame)
if cv2.waitKey(1) == ord('q'):
break
| true |
5cfb795ab7bed98bef5d905b6f6482a4c109a08f | Python | coole123/NPTEL_Assignments | /The Joy of Computing using Python/Assignments6_Digits.py | UTF-8 | 603 | 3.984375 | 4 | [] | no_license | #You are given a number A which contains only digits 0's and 1's. Your task is to make all digits same by
#just flipping one digit (i.e. 0 to 1 or 1 to 0 ) only. If it is possible to make all the digits same
#by just flipping one digit then print 'YES' else print 'NO'.
def canMakeAllSame(str):
zeros = 0
ones = 0
for i in range(0, len(str)):
ch = str[i];
if (ch == '0'):
zeros = zeros + 1
else:
ones = ones + 1
return (zeros == 1 or ones == 1);
num=input()
if(canMakeAllSame(num)):
print("YES")
else:
print("NO")
| true |
18de0a231fcb61ed2f2c7d3aa18cfab507c87337 | Python | yingning/text-scene | /text_scene/preprocessing/data_utils.py | UTF-8 | 20,780 | 2.609375 | 3 | [] | no_license | import re
import csv
import numpy as np
import pandas as pd
from math import ceil
from os.path import basename
from collections import defaultdict, Counter
from nltk.stem import PorterStemmer
from nltk.corpus import stopwords
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import LabelEncoder
from keras.preprocessing.sequence import pad_sequences
from paths import (
CAPTIONS_FILE,
SENTENCES_CSV,
REJECTED_IMGS_FILE,
ANNOTATED_IMGS_FILE,
REDO_IMGS_FILE,
IMG_URLS,
COMBINED_MTURK_RESULTS_CSV,
MTURK_RESULTS_CSV,
BATCH_URLS_CSV,
COMBINED_BATCH_RESULTS_CSV
)
q1map = {'0': 'indoors', '1': 'outdoors'}
q2map = {'0': 'man-made', '1': 'natural'}
q3map = {'0': 'transportation_urban',
'1': 'restaurant',
'2': 'recreation',
'3': 'domestic',
'4': 'work_education',
'5': 'other_unclear',
'NA': 'NA'}
q4map = {'0': 'body_of_water',
'1': 'field_forest',
'2': 'mountain',
'3': 'other_unclear',
'NA': 'NA'}
def make_datadict(results_csv, keep_url=False):
"""
Read in the results of MTurk annotation and create
a dictionary mapping images to question responses, where the
responses are converted from integers to the corresponding label
strings.
Parameters
----------
results_csv: path to csv file storing MTurk annotation results
keep_url: boolean, if True, keep the image names (keys in the dict)
otherwise, shorten the url to just the filename
Returns
-------
datadict: dictionary mapping image keys to list of annotations, where
an annotation is a list of 4 strings. The index of each string
corresponds to which question in the MTurk template it's a
response to.
"""
datadict = defaultdict(list)
with open(results_csv, 'r') as csvf:
reader = csv.reader(csvf)
next(reader)
for row in reader:
if keep_url:
img_file = row[0]
else:
img_file = url2filename(row[0])
q1 = q1map[row[2]] if row[2].isdigit() else row[2]
q2 = q2map[row[3]] if row[3].isdigit() else row[3]
q3 = q3map[row[4]] if (row[4].isdigit() or row[4] == 'NA') else row[4]
q4 = q4map[row[5]] if (row[5].isdigit() or row[5] == 'NA') else row[5]
datadict[img_file].append([q1, q2, q3, q4])
return datadict
def write_sentence_csv(datadict, captions_file, out_csv):
"""
Given a datadict constructed from a results csv, write a new
csv file by mapping each annotated image to its 5 corresponding
captions and labeling each caption with the label given in results csv.
"""
with open(captions_file) as cfile, open(out_csv, 'wb') as outfile:
writer = csv.writer(outfile)
writer.writerow(['sentence', 'q1', 'q2', 'q3', 'q4', 'img_file'])
n_sents = 0
for line in cfile:
split_line = line.split()
split_line = [w if not w.isdigit()
else '<NUMBER>'
for w in split_line]
img_file = split_line[0].split('#')[0]
if img_file in datadict:
n_sents += 1
sentence = ' '.join(split_line[1:]).lower()
annotations = datadict[img_file]
if len(annotations) != 4: # need to have 4 annotation fields
annotations = annotations[0]
assert len(annotations) == 4
writer.writerow([sentence] + annotations + [img_file])
print "Wrote sentence csv with %i sentences." % n_sents
def get_img_lists(img_url_file=IMG_URLS, log_file=ANNOTATED_IMGS_FILE,
keep_url=False):
all_images = set()
with open(img_url_file, 'rb') as csvfile:
reader = csv.reader(csvfile)
next(reader)
for row in reader:
if keep_url:
all_images.add(row[0])
else:
all_images.add(url2filename(row[0]))
annotated = set()
with open(log_file, 'r') as log:
for line in log:
if keep_url:
annotated.add(line.strip())
else:
annotated.add(url2filename(line.strip()))
not_annotated = list(all_images - annotated)
return annotated, not_annotated
#########################################
### Annotation using MTurk layout/UI ###
#########################################
def write_batch_urls_csv(img_urls, outcsv=BATCH_URLS_CSV, n_imgs=100):
"""get image urls for unannotated images and write them to
`outcsv`."""
with open(outcsv, 'wb') as out:
writer = csv.writer(out)
writer.writerow(['img_url'])
for img_url in img_urls:
writer.writerow([img_url.strip()])
def write_annotated_urls(img_urls, outfile=ANNOTATED_IMGS_FILE):
"""given a list of image urls, write them to `annotated_imgs.txt`."""
with open(outfile, 'a') as f:
for img_url in img_urls:
f.write(img_url.strip() + '\n')
def make_batch(n_imgs=100):
"""Create image url csv file for images to be annotated in the batch
and write those image urls to annotated_imgs.txt."""
_, unannotated = get_img_lists(keep_url=True)
to_annotate = np.random.choice(unannotated, size=(n_imgs,), replace=False)
write_batch_urls_csv(to_annotate)
write_annotated_urls(to_annotate)
def write_results_from_batch_csv(batch_csv, outcsv):
"""write results csv from mturk generated batch results.
Writes to `outcsv`."""
df = pd.read_csv(batch_csv)
with open(outcsv, 'wb') as out:
writer = csv.writer(out)
writer.writerow(['image_url', 'worker_id', 'q1', 'q2', 'q3', 'q4'])
for _, row in df.iterrows():
img_url = row['Input.img_url'].strip()
worker_id = row['WorkerId'].strip()
q1 = int(row['Answer.Answer_1'])
q2 = int(row['Answer.Answer_2'])
try:
q3 = int(row['Answer.Answer_3'])
except ValueError:
q3 = row['Answer.Answer_3']
try:
q4 = int(row['Answer.Answer_4'])
except ValueError:
q4 = row['Answer.Answer_4']
if pd.isnull(q4):
q4 = 'NA'
elif pd.isnull(q3):
q3 = 'NA'
writer.writerow([img_url, worker_id, q1, q2, q3, q4])
def append_batch_results(batch_results_csv=COMBINED_BATCH_RESULTS_CSV,
mturk_results_csv=MTURK_RESULTS_CSV):
with open(mturk_results_csv, 'ab') as mcsv, open(batch_results_csv, 'rb') as bcsv:
reader = csv.reader(bcsv)
next(reader)
writer = csv.writer(mcsv, lineterminator='\n')
for row in reader:
writer.writerow(row)
####################
### Data loaders ###
####################
def sentences_df(sentence_csv=SENTENCES_CSV, labels='full', drop_unk=True,
label_unk=None, distant=None, keep_filename=False,
special_tokens=False):
"""
Create a dataframe out of the data in `sentence_csv`.
Each row contains a sentence and its label. The label set
is determined by the value of the `labels` parameter.
"""
df = label_unk if isinstance(label_unk, pd.DataFrame) else pd.read_csv(sentence_csv)
# start and end tokens
if special_tokens:
ins_toks = lambda row: ' '.join(['<s>'] + row['sentence'].split() + ['<e>'])
df['sentence'] = df.apply(lambda row: ins_toks(row), axis=1)
if not keep_filename:
df = df.drop(['img_file'], 1)
if labels == 'full':
if drop_unk:
df = df[df.q3 != 'other_unclear']
df = df[df.q4 != 'other_unclear']
def full_map(q1, q2, q3, q4):
label = [q1, q2, q3, q4]
label = [l for l in label if type(l) is str]
return '/'.join(label)
df['label'] = df.apply(
lambda x: full_map(x['q1'], x['q2'], x['q3'], x['q4']), axis=1)
df = df.drop(['q1', 'q2', 'q3', 'q4'], 1)
return df
elif labels == 'in_out':
df = df.drop(['q2', 'q3', 'q4'], 1)
df.columns = ['sentence', 'label']
return df
elif labels == 'man_nat':
df = df.drop(['q1', 'q3', 'q4'], 1)
df.columns = ['sentence', 'label']
return df
elif labels == '3way':
def threeway_map(q1, q2, q3, q4):
return '/'.join([q1, q2])
df['label'] = df.apply(
lambda x: threeway_map(x['q1'], x['q2'], x['q3'], x['q4']), axis=1)
df = df.drop(['q1', 'q2', 'q3', 'q4'], 1)
return df
elif labels == 'function':
if drop_unk:
df = df[df.q3 != 'other_unclear']
df = df[df.q4 != 'other_unclear']
def fn_map(q1, q2, q3, q4):
if isinstance(q3, str):
return q3
elif isinstance(q4, str):
return 'natural'
df['label'] = df.apply(
lambda x: fn_map(x['q1'], x['q2'], x['q3'], x['q4']), axis=1)
df = df.drop(['q1', 'q2', 'q3', 'q4'], 1)
return df
def load_dataset(df, ngram_order=1, pad=False, stem=False, omit_stop=False,
word2idx=None, truncate=False):
"""
Creates numpy arrays out of a dataframe. If `pad` is set to
`True`, X array will be of size (n_samples, maxlen), where each
element of a sample is an index corresponding to `word2idx`.
Otherwise, X array will be of size (n_samples, vocab_size+1),
where each element of a sample is either 1, indicating that
the word corresponding to that index is in the sentence (bag
of words/ngrams representation).
"""
sentences = df['sentence'].values
vocab = []
stemmer = PorterStemmer()
stop = stopwords.words('english')
for sentence in sentences:
if omit_stop:
sentence = [w
for w in re.split("-| ", sentence)
if w not in stop and w not in "?.,-!()"]
else:
sentence = [w
for w in re.split("-| ", sentence)
if w not in "?.,-!()"]
if ngram_order == 1:
for word in sentence:
if stem:
vocab.append(stemmer.stem(word))
else:
vocab.append(word)
else:
# all n from 1 to n
for n in range(1, ngram_order+1):
for ngram in zip(*[sentence[i:] for i in range(n)]):
vocab.append(ngram)
if not word2idx:
# start at 1 to allow masking in Keras
word2idx = {w:i for i,w in enumerate(set(vocab), start=1)}
X_ind = []
for i,sentence in enumerate(sentences):
if omit_stop:
sentence = [w
for w in re.split("-| ", sentence)
if w not in stop and w not in "?.,-!()"]
else:
sentence = [w
for w in re.split("-| ", sentence)
if w not in "?.,-!()"]
if ngram_order == 1:
if stem:
indices = [word2idx[stemmer.stem(w)] for w in sentence]
else:
indices = [word2idx[w] for w in sentence]
X_ind.append(indices)
else:
indices = []
for n in range(1, ngram_order+1):
for ngram in zip(*[sentence[i:] for i in range(n)]):
indices.append(word2idx[ngram])
X_ind.append(indices)
X = np.zeros((len(sentences), len(word2idx)+1))
for i,sample in enumerate(X_ind):
for idx in sample:
X[i,idx] = 1
l_enc = LabelEncoder()
y = l_enc.fit_transform(df['label'].values)
if pad:
if truncate:
X = pad_sequences(X_ind, padding='post', maxlen=20, truncating='post')
else:
X = pad_sequences(X_ind, padding='post')
return X, y, word2idx, l_enc
#######################
### Majority voting ###
#######################
def write_majority_vote_csv(results_csv, outfile):
"""
Given the results of crowdsourced annotation with more than 1 assignment
per HIT, perform majority voting and write the results to `outfile`.
Does not write results with one or more questions without majority
consensus.
Parameters
----------
results_csv: csv containing image urls and annotations (4 answers each)
with responses from more than one annotator
outfile: csv file to be written
"""
datadict = make_datadict(results_csv)
voted_datadict = majority_vote_dict(datadict, keep_all=False)
with open(outfile, 'w') as out:
header = ['image_url', 'worker_id', 'q1', 'q2', 'q3', 'q4']
writer = csv.writer(out)
writer.writerow(header)
for img_file, answers in voted_datadict.items():
row = list(answers)
row.insert(0, 'majority') # worker_id
row.insert(0, img_file)
writer.writerow(row)
def majority_vote_dict(datadict, keep_all=True):
"""
Performs majority voting on annotated data. For each question
for each image file, select as the correct the answer given by
two or more annotators. If there is no majority consensus,
no answer will be selected as correct.
Parameters
----------
datadict: dictionary created by `datadict` method mapping each
image filename to a list of lists where each sublist contains
the annotators answers to each of the 4 questions asked in the
task
keep_all: boolean, if True, keep result for each image whether or
not all of the corresponding questions have majority consensus;
if False, discard annotations for images that contain at least
one question without majority consensus from `voted_datadict`.
Returns
-------
voted_datadict: dictionary mapping each image filename to either
a list of 4 answers corresponding to the majority consensus
for each question, or a list of lists corresponding to the
original responses (if no consensus).
"""
voted_datadict = {}
# return (value, count) tuple for most common value
most_common = lambda x: Counter(x).most_common(1)[0]
is_majority = lambda x,n: x[1] >= max(2., ceil(n / 2.))
nb_no_majority_imgs = 0
nb_no_majority_questions = 0
for img_file, answer_lists in datadict.items():
if len(answer_lists) == 1:
voted_datadict[img_file] = answer_lists[0]
else:
no_majority_img = False
answers = zip(*[answer_list for answer_list in answer_lists])
majority = [most_common(a) for a in answers]
majority_answers = []
for i, a in enumerate(majority):
if is_majority(a, len(answers[i])):
majority_answers.append(a[0])
else:
majority_answers.append(answers[i])
no_majority_img = True
nb_no_majority_questions += 1
# check to make sure majorities for q3 and q4 both aren't NA
is_na = lambda x: x == 'NA'
if sum([is_na(m) for m in majority_answers]) > 1:
no_majority_img = True
majority_answers[0] = ('no', 'majority')
# check to rule out invalid combos
if majority_answers[1] == 'man-made' and \
majority_answers[3] in ['field_forest', 'body_of_water']:
no_majority_img = True
majority_answers[0] = ('invalid', 'data')
if no_majority_img:
#print "no majority: %s" % img_file
nb_no_majority_imgs += 1
voted_datadict[img_file] = majority_answers
if not keep_all:
voted_datadict = dict(voted_datadict)
for img_file, answers in voted_datadict.items():
if any(isinstance(a, tuple) for a in answers):
del voted_datadict[img_file]
print "No majority for %i questions/%i images." % \
(nb_no_majority_questions, nb_no_majority_imgs)
return voted_datadict
##################################
### Annotation result analysis ###
##################################
def unique_workers(results_csv):
worker_ids = Counter()
with open(results_csv) as csvfile:
reader = csv.reader(csvfile)
next(reader)
for row in reader:
worker_ids[row[1]] += 1
for k,v in worker_ids.most_common():
print "%s: %s" % (k, v)
return len(worker_ids)
def make_kappa_matrix(datadict, labels='full'):
"""
Create a N x M matrix where N is the number of images
and M is the number of labels (14 for full) to be used
for computing fleiss's kappa
"""
datadict = dict(datadict)
if labels == 'full':
def full_map(q1, q2, q3, q4):
label = [q1, q2, q3, q4]
label = [l for l in label if l != 'NA']
return '/'.join(label)
label_map = full_map
elif labels == 'in_out':
label_map = lambda q1,q2,q3,q4: q1
elif labels == 'man_nat':
label_map = lambda q1,q2,q3,q4: q2
elif labels == '3way':
def threeway_map(q1, q2, q3, q4):
return '/'.join([q1, q2])
label_map = threeway_map
elif labels == 'function':
def fn_map(q1, q2, q3, q4):
if q3 == 'NA':
return 'natural'
else:
return q3
label_map = fn_map
for img, answer_lists in datadict.items():
datadict[img] = [label_map(*a) for a in answer_lists]
if len(datadict[img]) < 3:
del datadict[img]
labels = set([a for answer_list in datadict.values() for a in answer_list])
label2id = {l:i for i,l in enumerate(labels)}
nb_labels = len(labels)
matrix = np.zeros((len(datadict), nb_labels), dtype='int32')
for i, (img, label_lists) in enumerate(datadict.items()):
for label in label_lists:
matrix[i, label2id[label]] += 1
return matrix
def fleiss_kappa(results_csv, labels='full'):
from scripts import fleiss
datadict = make_datadict(results_csv)
matrix = make_kappa_matrix(datadict, labels=labels)
return fleiss.kappa(matrix)
def write_no_majority_list():
with open(REDO_IMGS_FILE, 'a') as r:
datadict = make_datadict(COMBINED_MTURK_RESULTS_CSV, keep_url=True)
voted_dict = majority_vote_dict(datadict, keep_all=True)
for img_file, answers in voted_dict.items():
if any(isinstance(a, tuple) for a in answers):
r.write(img_file + '\n')
########################
### Helper functions ###
########################
def combine_csvs(csv1, csv2, outcsv):
with open(csv1,'rb') as c1, open(csv2,'rb') as c2, open(outcsv, 'wb') as outfile:
c1reader = csv.reader(c1)
c2reader = csv.reader(c2)
next(c1reader)
next(c2reader)
writer = csv.writer(outfile)
for row in c1reader:
writer.writerow(row)
for row in c2reader:
writer.writerow(row)
print "Combined %s and %s into %s." % (basename(csv1),
basename(csv2),
basename(outcsv))
def url2filename(url):
return url.split('/')[-1]
def load_bin_vec(fname, vocab):
"""
Loads 300x1 word vecs from Google (Mikolov) word2vec. Taken from
CNN_sentence https://github.com/yoonkim/CNN_sentence
"""
word_vecs = {}
with open(fname, "rb") as f:
header = f.readline()
vocab_size, layer1_size = map(int, header.split())
binary_len = np.dtype('float32').itemsize * layer1_size
for line in range(vocab_size):
word = []
while True:
ch = f.read(1)
if ch == ' ':
word = ''.join(word)
break
if ch != '\n':
word.append(ch)
if word in vocab:
word_vecs[word] = np.fromstring(f.read(binary_len),
dtype='float32')
else:
f.read(binary_len)
return word_vecs
def add_unknown_words(word_vecs, vocab, k=300):
unknown_words = []
for word in vocab:
if word not in word_vecs:
unknown_words.append(word)
word_vecs[word] = np.random.uniform(-0.25,0.25,k)
word_vecs['<unk>'] = np.random.uniform(-0.25,0.25,k)
print "Added %i unknown words to word vectors." % len(unknown_words)
#print unknown_words
| true |
1b745bcdb3998dfaa4b7533ae2b7b73e99c92777 | Python | grschafer/Tarrasque | /tarrasque/binding.py | UTF-8 | 7,020 | 3.03125 | 3 | [] | no_license | import collections
Snapshot = collections.namedtuple("Snapshot",
"tick, user_messages, game_events, world,"
" modifiers")
TICKS_PER_SECOND = 30
class StreamBinding(object):
"""
The StreamBinding class is Tarrasque's metaphor for the replay. Every
Tarrasque entity class has a reference to an instance of this
class, and when the tick of the instance changes, the data returned by
those classes changes. This makes it easy to handle complex object graphs
without explicitly needing to pass the Skadi demo object around.
.. note:: Where methods on this class take absolute tick values (i.e. the
``start`` and ``end`` arguments to :meth:`iter_ticks`), special string
arguments may be passed. These are:
* ``"start"`` - The start of the replay
* ``"draft"`` - The start of the draft
* ``"pregame"`` - The end of the draft phase
* ``"game"`` - The time when the game clock hits 0
* ``"postgame"`` - The time the ancient is destroyed
* ``"end"`` - The last tick in the replay
These values will not be 100% accurate, but should be good +-50 ticks
"""
@property
def user_messages(self):
"""
The user messages for the current tick.
"""
return self._user_messages
@property
def game_events(self):
"""
The game events in the current tick.
"""
from .gameevents import GameEvent
events = []
for data in self._game_events:
events.append(GameEvent(stream_binding=self, data=data))
return events
# Just another layer of indirection
# These are properties for autodoc reasons mostly
@property
def world(self):
"""
The Skadi wold object for the current tick.
"""
return self._snapshot.world
@property
def tick(self):
"""
The current tick.
"""
return self._snapshot.tick
@property
def demo(self):
"""
The Skadi demo object that the binding is reading from.
"""
return self._demo
@property
def modifiers(self):
"""
The Skadi modifiers object for the tick.
"""
return self._snapshot.modifiers
@property
def string_tables(self):
"""
The string_table provided by Skadi.
"""
return self._stream.string_tables
@property
def prologue(self):
"""
The prologue of the replay.
"""
return self._stream.prologue
def __init__(self, demo, start_tick=None, start_time=None):
self._demo = demo
self._user_messages = []
self._game_events = []
# Do this to bootstrap go_to_tick("end")
self._state_change_ticks = {
"end": self.demo.file_info.playback_ticks - 2,
}
self.go_to_tick("end")
self._state_change_ticks = {
"start": 0,
"draft": self._time_to_tick(self.info.draft_start_time),
"pregame": self._time_to_tick(self.info.pregame_start_time),
"game": self._time_to_tick(self.info.game_start_time),
"postgame": self._time_to_tick(self.info.game_end_time),
"end": self.demo.file_info.playback_ticks - 2
}
if start_tick is not None:
self.go_to_tick(start_tick)
elif start_time is not None:
self.go_to_time(start_time)
else:
self.go_to_tick("game")
def iter_ticks(self, start=None, end=None, step=1):
"""
A generator that iterates through the demo's ticks and updates the
:class:`StreamBinding` to that tick. Yields the current tick.
The start parameter defines the tick to iterate from, and if not set, the
current tick will be used instead.
The end parameter defines the point to stop iterating; if not set,
the iteration will continue until the end of the replay.
The step parameter is the number of ticks to consume before yielding
the tick; the default of one means that every tick will be yielded. Do
not assume that the step is precise; the gap between two ticks will
always be larger than the step, but usually not equal to it.
"""
if start is None:
start = self.tick
elif start in self._state_change_ticks:
start = self._state_change_ticks[start]
if end in self._state_change_ticks:
end = self._state_change_ticks[end]
if end is not None:
assert start < end
if start > self.demo.file_info.playback_ticks or start < 0:
raise IndexError("Tick {} out of range".format(tick))
self._user_messages = []
self._game_events = []
last_tick = start - step - 1
self._stream = self.demo.stream(tick=start)
for snapshot in self._stream:
self._snapshot = Snapshot(*snapshot)
if end is not None and self.tick >= end:
break
self._user_messages.extend(self._snapshot.user_messages)
self._game_events.extend(self._snapshot.game_events)
if self.tick - last_tick < step:
continue
else:
last_tick = self.tick
yield self.tick
self._user_messages = []
self._game_events = []
def go_to_tick(self, tick):
"""
Moves to the given tick, or the nearest tick after it. Returns the tick
moved to.
"""
if tick in self._state_change_ticks:
tick = self._state_change_ticks[tick]
if tick > self.demo.file_info.playback_ticks or tick < 0:
raise IndexError("Tick {} out of range".format(tick))
self._stream = self.demo.stream(tick=tick)
self._snapshot = Snapshot(*next(iter(self._stream)))
self._user_messages = self._snapshot.user_messages[:]
self._game_events = self._snapshot.game_events[:]
return self.tick
def _time_to_tick(self, time):
"""
Converts a time to a tick.
"""
current_time = self.info.game_time
return int(self.tick + (time - current_time) * TICKS_PER_SECOND) - 2
def go_to_time(self, time):
"""
Moves to the tick with the given game time. Could potentially overshoot,
but not by too much. Will not undershoot.
Returns the tick it has moved to.
"""
target_tick = self._time_to_tick(time)
for tick in self.iter_ticks(start=target_tick):
if self.info.game_time > time:
return tick
def __iter__(self):
return self.iter_ticks()
@property
def players(self):
"""
A list of :class:`Player` objects, one for each player in the game.
This excludes spectators and other non-hero-controlling players.
"""
from . import Player
return [p for p in Player.get_all(self) if
p.index != None and p.team != "spectator"]
@property
def info(self):
"""
The :class:`GameInfo` object for the replay.
"""
from .gameinfo import GameInfo
info = GameInfo.get_all(self)
assert len(info) == 1
return info[0]
@staticmethod
def from_file(filename, *args, **kwargs):
"""
Loads the demo from the filename, and then initialises the
:class:`StreamBinding` with it, along with any other passed arguments.
"""
import skadi.demo
demo = skadi.demo.construct(filename)
return StreamBinding(demo, *args, **kwargs) | true |
eeb133f0bd974909d1c09063ed98c2345a4f57db | Python | ohtjqkd/zun1121 | /venv/downloader.py | UTF-8 | 673 | 2.703125 | 3 | [] | no_license | import pandas, datetime
def download(keyword, contents):
now = datetime.datetime.now().strftime("%Y-%m-%d-%h-%M")
df = pandas.DataFrame(contents)
df['company'] = df['company'].apply(lambda x: x.strip() if x else x)
df['homepage'] = df['homepage'].apply(lambda x: x.strip() if x else x)
df['address'] = df['address'].apply(lambda x: x.replace(" 지도보기", "") if x else x)
df['email'] = df['email'].apply(lambda x: [mail.replace("mailto:","") for mail in x] if x else x)
try:
df.to_csv(path_or_buf=f'{now}_{keyword}.csv', encoding="utf-8-sig")
print("다운로드 완료")
except:
print("다운로드 실패")
return False
return True | true |
393070c82ecc79fc3f69de264bcac9eb3f914e0d | Python | komo-fr/AtCoder | /abc/146/b/main.py | UTF-8 | 234 | 2.84375 | 3 | [] | no_license | #!/usr/bin/env python3
import string
N = int(input().split()[0])
S = input()
s_list = list(string.ascii_uppercase + string.ascii_uppercase)
r = ""
for c in list(S):
n = s_list.index(c)
r += s_list[n + N]
ans = r
print(ans)
| true |
d0fe853ee897831332714d110444e711102e7a79 | Python | RotemHalbreich/Ariel_OOP_2020 | /Classes/week_09/TA/simon_group/5-1-while-loop.py | UTF-8 | 781 | 4.65625 | 5 | [] | no_license | # With while loop we can execute a set of statements as long as a condition is true.
# Print i as long as i is less than 6:
i = 1
while i < 6:
print(i)
i += 1
# break statement we can stop the loop even if condition is true:
# Exit the loop when i is 3:
i = 1
while i < 6:
print(i)
if i == 3:
break
i += 1
# continue statement can stop the current iteration, and continue with the next:
# Continue to the next iteration if i is 3:
i = 0
while i < 6:
i += 1
if i == 3:
continue
print(i)
# else:
i = 1
while i < 6:
print(i)
i += 1
if i == 3:
break
else: # this is relevant mainly for cases of break
print("i is no longer less than 6") # run the block of code once when the condition no longer is true.
| true |
22796b22b50c296a1f2b785d7867474abff12735 | Python | d-kozak/enticing | /scripts/tests/distribute_corpus.test.py | UTF-8 | 1,259 | 2.59375 | 3 | [
"MIT"
] | permissive | import unittest
from distribute_corpus import handle_args, split_files
class HandleArgumentsTest(unittest.TestCase):
def test_incorrect_arguments(self):
self.assertRaises(ValueError, lambda: handle_args([]))
class SplitFilesTest(unittest.TestCase):
def test_just_one_server(self):
res = split_files(['one', 'two', 'three'], ['main'])
self.assertDictEqual(res, {
'main': ['one', 'two', 'three']
})
def test_two_servers(self):
res = split_files(['one', 'two', 'three'], ['main', 'sec'])
self.assertDictEqual(res, {
'main': ['one', 'two'],
'sec': ['three']
})
def test_three_servers(self):
res = split_files(['one', 'two', 'three'], ['main', 'sec', 'foo'])
self.assertDictEqual(res, {
'main': ['one'],
'sec': ['two'],
'foo': ['three']
})
def test_three_servers_six_files(self):
res = split_files(['one', 'two', 'three', 'four', 'five', 'six'], ['main', 'sec', 'foo'])
self.assertDictEqual(res, {
'main': ['one', 'two'],
'sec': ['three', 'four'],
'foo': ['five', 'six']
})
if __name__ == '__main__':
unittest.main()
| true |
a6b4ec605bf086afe961868f681fdd3e38377d66 | Python | mojiayi/mojiayi-learn-python | /basic-sort-algorithmic/count-sort.py | UTF-8 | 753 | 3.796875 | 4 | [] | no_license |
#coding=utf-8
'''
计数排序
'''
import random
#定义随机数生成函数
def randomInt(length):
arr = []
while len(arr) < length:
arr.append(random.randint(0,9))
return arr
#程序入口
length = 15
arr = randomInt(length)
print('before sort=', arr)
count = [0 for x in range(0, max(arr)+1)]
for item in arr:
count[item] = count[item] + 1
print('count=', count)
index = 1
while index < len(count):
count[index] = count[index] + count[index - 1]
index = index + 1
print('count result=', count)
result = [0 for x in range(0, length)]
index = length - 1
while index >= 0:
result[count[arr[index]]-1] = arr[index]
count[arr[index]] = count[arr[index]] - 1
index = index - 1
print('after sort=', result) | true |
78956770a1a61e6279a8bfe0e85c4679434e4f61 | Python | expert-m/simple-ciphers | /ciphers/binary_gamma_chiper.py | UTF-8 | 895 | 3.28125 | 3 | [] | no_license | from typing import Sequence
class BinaryGammaCipher:
"""Двоичное гаммирование."""
@classmethod
def encode(cls,
message: str,
key: str,
table: Sequence,
alphabet: Sequence) -> list:
result = []
for i in range(len(message)):
v1 = table[alphabet.index(message[i])]
v2 = table[alphabet.index(key[i % len(key)])]
result.append(v1 ^ v2)
return result
@classmethod
def decode(cls,
message: Sequence,
key: str,
table: Sequence,
alphabet: Sequence) -> str:
result = ''
for i in range(len(message)):
v1 = message[i]
v2 = table[alphabet.index(key[i % len(key)])]
result += alphabet[table.index(v1 ^ v2)]
return result
| true |
73e515a5f56f5cdc2bcdeacd02a2d588a0ef9ea4 | Python | LiliaG-hiramatsu/POO | /Proyecto/EfectorFinal.py | UTF-8 | 1,733 | 3.109375 | 3 | [] | no_license | from time import time, localtime, strftime
from Reporte import Reporte
class EfectorFinal(Reporte):
def CambiarEstadoEF(self, EstadoEfector):
fecha = strftime(" %a, %d %b %Y %H:%M:%S ", localtime())
if EstadoEfector == 1:
self.EstadoEF = 1
l = "\n " + fecha + "\nPistola de pintura activada"
elif EstadoEfector == 0:
self.EstadoEF = 0
l = "\n " + fecha + "\nPistola de pintura desactivada"
print(l)
self.reportar(l)
return l
def ElegirColor(self, EstadoEfector, Color):
if EstadoEfector == 1:
if Color == 1:
self.Color = 1
l = "Pintando de color azul"
elif Color == 2:
self.Color = 2
l = "Pintando de color amarillo"
elif Color == 3:
self.Color = 3
l = "Pintando de color rojo"
else:
l = "La pistola de pintura se encuentra desactivada\n"
if Color == 1:
self.Color = 1
l += "Cambio de color a azul"
elif Color == 2:
self.Color = 2
l += "Cambio de color a amarillo"
elif Color == 3:
self.Color = 3
l += "Cambio de color a rojo"
print(l)
self.reportar(l)
return l
def CambiarVel(self, vel):
self.Vel_ef = vel
vel = str(vel)
l = "La velocidad del efector final es "+vel+" rad/s\n"
print(l)
self.reportar(l)
return l
def __init__(self):
self.EstadoEF = 0 #APAGADO
self.Vel_ef = None
self.Color = None
Reporte.__init__(self)
| true |
e1230a4af5275ea155a543758736ba73ab9cf396 | Python | Martinjoh1/Creative-Writting | /a08_upc_start.py | UTF-8 | 5,320 | 3.65625 | 4 | [
"MIT"
] | permissive | ######################################################################
# Author: John Martin TODO: Change this to your names
# Username: MartinJoh TODO: Change this to your usernames
#
# Assignment: A08: UPC Bar Codes
#
# Purpose: Determine how to do some basic operations on lists
#
######################################################################
# Acknowledgements:
#
# None: Original work
# licensed under a Creative Commons
# Attribution-Noncommercial-Share Alike 3.0 United States License.
####################################################################################
import turtle
#guard=[]
#upc=[]
# draw( ,hieight )
#if color == 0
# t.color ("black")
#t.begin_fill
#draw rectangle
#t.end_fill()
def is_valid_input(barcode):
'''
:param barcode:
:return:
'''
if int(len(barcode)) == 12: #checks if line is right length, and has the propper amount of numbers
return True
else:
return False
def convert_to_list (input_code):
'''
:param input_code:
:return: Turns users input into a list
'''
converted_input= list(str(input_code)) #Turns user input into list that is mutuable
#print(converted_input)
return converted_input
def is_valid_modulo(upc_list):
'''
Couter (any variable name ) before for loop, set counter to zero
Odd (0) and even (1) <---- (what to start with)
loop through add i to different variable that starts out as zeor
add to that other variable
add pluse two to the counter in the for loop
:param upc_list:
:return: Upc_list with modulo check number appended to the end
'''
even_sum = 0
odd_sum = 0
#modulo=upc_list[0:11]
for num in upc_list:
if int(num) % 2 == 0: #takes the remainder of num divided by 2
even_sum += int(num)
#print("even_sum=" + str(even_sum))
else:
odd_sum += int(num)
#print("odd sum = " + str(odd_sum))
total_sum= even_sum + 3*odd_sum #adds even sum to the odd sum multiplied by 3
#print("total sum = " + str(total_sum))
equation_number = int(total_sum) % 10
#print("equation num = " + str(equation_number))
if equation_number > 0:
check_number = 10 - equation_number
print(str(check_number))
upc_list.append(check_number)
else:
return total_sum
return upc_list
def convert_binary(upc_barcode_list):
"""
:param upc_barcode_list:
:return:
"""
translatorleft = {"0":"0001101", #Makes a class for each of the numbers binary equals
"1":"0011001",
"2":"0010011",
"3":"0111101",
"4":"0100011",
"5":"0110001",
"6":"0101111",
"7":"0111011",
"8":"0110111",
"9":"0001011"}
translatorright = {"0":"1110010",
"1":"1100110",
"2":"1101100",
"3":"1000010",
"4":"1011100",
"5":"1001110",
"6":"1010000",
"7":"1000100",
"8":"1001000",
"9":"1110100"}
guardbar= "101"
centerbar= "01010"
binaryleft = ""
binaryright= ""
for x in upc_barcode_list[0:7]:
if x in translatorleft.keys():
binaryleft += str(translatorleft[x])
#print(str(binaryleft))
else:
return "?"
for i in upc_barcode_list[7:]:
if i in translatorright.keys():
binaryright += str(translatorright[i])
print (guardbar + binaryleft + centerbar + binaryright + guardbar)
return guardbar + binaryleft + centerbar + binaryright + guardbar #combines binary to form binary version of barcode
def binary_image(binary):
'''
:param binary:
:return: none
'''
guard=[]
upc=list(binary)
binny = turtle.Turtle()
binny.speed(0)
binny.pensize(4)
binny.shape("arrow")
binny.penup()
binny.setpos(-200,150)
binny.pendown()
(x,y)= binny.pos()
for i,e in enumerate(upc):
if e == "1":
binny.goto(x,y-200)
binny.goto(x,y)
x+=4
binny.penup()
binny.goto(x,y)
binny.pendown()
def make_numbers(input_code):
'''
:param input_code:
:return: None
'''
bin = turtle.Turtle()
bin.speed(0)
bin.pensize(4)
bin.shape("arrow")
bin.penup()
bin.right(90)
bin.forward(100)
bin.right(90)
bin.forward(50)
bin.pendown()
bin.write(input_code,False,"center",("Arial",20,"normal"))
def main():
wn = turtle.Screen()
input_code = input("Enter a 12 digit code [0-9]: ")
while not is_valid_input(input_code):
input_code = input("Invalid number. Enter a 12 digit code [0-9]: ")
upc_list = convert_to_list(input_code)
upc_barcode_list = is_valid_modulo(upc_list)
upc_binary=convert_binary(upc_barcode_list)
binary_image(upc_binary)
make_numbers(input_code)
wn.exitonclick()
if __name__ == "__main__":
main()
| true |
6ebb9359d690c1fe8067f98162b98c4be4bbb381 | Python | Abhi-Tiw1/ML-pipeline | /ml_pipeline_example.py | UTF-8 | 2,362 | 2.5625 | 3 | [
"MIT"
] | permissive |
"""
Example code
Code implements the ml pipeline on the iris dataset to do binary calssification
"""
from sklearn import datasets
from ml_pipeline import *
def run_ml_pipeline(X, y, fs_alg, nof, feats, clfrs, samp_type, no_out_vec, no_iters=50):
"""Calls get_cv_out for X number of iternation and saves the results """
#code gets the average cv values and other things for all the different
tot_metric=np.empty((0,no_out_vec))
f_names_tot=np.empty((0))
no_iters=50
#output for a single fold --> repeated 50 times
for rseed in range(0,no_iters,1):
f_names,out_metric=get_cv_out(X,y,fs_alg,nof,np.array(feats),clfrs,samp_type,rseed)
tot_metric=np.vstack((tot_metric,out_metric))
f_names_tot=np.hstack((f_names_tot,f_names))
return f_names_tot, tot_metric
iris = datasets.load_iris()
X = iris.data
y = iris.target
#making the problem binary
X=X[:100,:]
y=y[:100]
target_names = iris.target_names
#feature names
feats=np.array(['1','2','3','4'])
#sampling type
samp_type='none'
#number of features given as 2 out of 4
nof=2
#classifiers to use
clfrs=['svm_rbf', 'svm_lnr','rf20', 'knn10','lr']
# feature selection algorithm - recurcive feature elimination
fs_alg='rfe'
#metric names for performance measurement
metrics_= ['bacc','f1','mcc','pre','rec','coh']
no_metrics=len(metrics_)
#output column names --> classifier + metric
out_col_perfold=get_out_col_names(clfrs,metrics_)
#final colum which also stores informaiton about balance of dataset
cols = out_col_perfold+['balance']
no_fin_cols=len(cols)
#open the design matrix
fin_arr_out=np.empty((0,no_fin_cols))
print('Balance is',np.round(np.mean(y),3))
print('Shape of arrays is ',X.shape,y.shape,'\n--------------')
no_out_vec=len(out_col_perfold)
f_names_tot, tot_metric= run_ml_pipeline(X,y,fs_alg,nof,feats,clfrs, samp_type, no_out_vec)
#saves the feature analysis for this feature selection aglo
outpath_fin='./ml_pipeline_out/'
if not os.path.exists(outpath_fin):
os.makedirs(outpath_fin)
out_vec=save_perfold_res(tot_metric,out_col_perfold,f_names_tot,outpath_fin)
out_vec=np.round(out_vec,3)
out_vec=np.hstack((out_vec,np.round(np.mean(y),3)))
#results for a given pwl level and feat selection method
fin_arr_out=np.vstack((fin_arr_out,out_vec))
#saving for all pwls and given lobe
fnm_all='output_fin.csv'
save_df_results(fin_arr_out,cols,outpath_fin,fnm_all)
| true |
64863c45be696c6a089ac87202930eda54ef2466 | Python | TkachenkoBrothers/ZI | /Decoder.py | UTF-8 | 8,086 | 2.53125 | 3 | [] | no_license | #!/usr/bin/env python
import pyaudio
from wave import struct as wave_struct
import wave
from numpy import blackman
from numpy.fft import rfft
from numpy import array
import numpy
import sys, time, os
import curses
from threading import Thread
from config import*
loop_running = False
#WINDOW = blackman(vars.CHUNK) # Using blackman window. For more information see
try: # Windows?
import msvcrt
def kbfunc():
return ord(msvcrt.getch()) if msvcrt.kbhit() else 0
except: # Unix/Mac
import select
def kbfunc():
inp, out, err = select.select([sys.stdin], [], [], 0.001) # 0.1 second delay
return sys.stdin.readline() if sys.stdin in inp else 0
def play(vars):
#define stream chunk
#chunk = 1024
#open a wav format music
f = wave.open(r"WaveTest.wav","rb")
#instantiate PyAudio
p = pyaudio.PyAudio()
#open stream
stream = p.open(format = p.get_format_from_width(f.getsampwidth()),
channels = f.getnchannels(),
rate = f.getframerate(),
output = True)
#read data
data = f.readframes(vars.CHUNK)
#paly stream
while data != '':
stream.write(data)
data = f.readframes(vars.CHUNK)
#stop stream
stream.stop_stream()
stream.close()
#close PyAudio
p.terminate()
f.close()
class Decoder:
term_width = 80 # Stores the character width of the terminal
def __init__(self, vars):
self.term_width = int(168)
self.freq_list = []
self.binstr = ''
self.finStr = ''
self.WINDOW = blackman(vars.CHUNK) # Using blackman window. For more information see
def _open_audio(self, vars):
""" Opens the audio device for listening """
audio = pyaudio.PyAudio()
stream = None
while True: # Fix for Mac OS
stream = audio.open(format = pyaudio.paInt16,
channels = 1,
rate = vars.RATE,
input = True,
output = False, # TODO
frames_per_buffer = vars.CHUNK)
try:
# On Mac OS, the first call to stream.read usually fails
data = stream.read(vars.CHUNK)
break
except:
stream.close()
self.audio = audio
self.stream = stream
def _loop(self, vars):
""" This loop runs until the user hits the Enter key """
last_n = [0] * vars.SAMPLE_SIZE # Stores the values of the last N frequencies.
# This list is used as an array
curpos = 0 # Stores the index to the array where we will store our next value
last_avg = 1 # Stores the average of the last N set of samples.
# This value will be compared to the current average to detect
# the change in note
# play stream and find the frequency of each chunk
#i = 0
while loop_running:
perfect_cnt = 0
data = self.stream.read(vars.CHUNK)
# unpack the data and times by the hamming window
indata = array(wave_struct.unpack("%dh"%(len(data)/2), data))*self.WINDOW
# Take the fft and square each value
fftData=abs(rfft(indata))**2
# find the maximum
which = fftData[1:].argmax() + 1
# use quadratic interpolation around the max
thefreq = 0
if which != len(fftData)-1:
y0, y1, y2 = numpy.log(fftData[which-1:which+2:])
x1 = (y2 - y0) * .5 / (2 * y1 - y2 - y0)
# find the frequency and output it
thefreq = (which+x1)*vars.RATE/vars.CHUNK
else:
thefreq = which*vars.RATE/vars.CHUNK
# Store this freq in an array
last_n[curpos] = int(thefreq)
curpos += 1
if curpos == vars.SAMPLE_SIZE:
curpos = 0
this_avg = sum(last_n) / vars.SAMPLE_SIZE # Compute the average
#print(thefreq)
self.freq_list.append(thefreq)
#i += 1
#if i == 1800:
# break
def theMostCommonFreq(self, start, end, vars):
amount_zeros = 0
amount_ones = 0
amount_of_b = 0
amount_of_begin = 0
for a in range(start, end, 1):
if (self.freq_list[a] >= vars.Target_0 - 10) and (self.freq_list[a] <= vars.Target_0 + 10):
amount_zeros = amount_zeros+1
if (self.freq_list[a] >= vars.Target_1 - 10) and (self.freq_list[a] <= vars.Target_1 + 10):
amount_ones = amount_ones + 1
if (self.freq_list[a] >= vars.Target_b - 10) and (self.freq_list[a] <= vars.Target_b + 10):
amount_of_b = amount_of_b + 1
if (self.freq_list[a] >= vars.Target_Begin - 10) and (self.freq_list[a] <= vars.Target_Begin + 10):
amount_of_begin = amount_of_begin + 1
#if amount_ones == 0 and amount_zeros == 0 and amount_of_b == 0:
# return -1
if amount_zeros > amount_ones and amount_zeros > amount_of_b and amount_zeros > amount_of_begin:
return 0
if amount_ones > amount_zeros and amount_ones > amount_of_b and amount_ones > amount_of_begin:
return 1
if amount_of_b > amount_ones and amount_of_b > amount_zeros and amount_of_b > amount_of_begin:
return 2
if amount_of_begin > amount_ones and amount_of_begin > amount_zeros and amount_of_begin > amount_of_b:
return -1
def analize_freq_list(self, vars):
signal_size = 0
beg = 0
end = 0
for a in range(0, len(self.freq_list), 1):
if (self.freq_list[a] >= vars.Target_Begin - 10) and (self.freq_list[a] <= vars.Target_Begin+10)and beg == 0:
beg = a
if (self.freq_list[a] >= vars.Target_0 - 30) and (self.freq_list[a] <= vars.Target_0 + 30)or(self.freq_list[a] >= vars.Target_1 - 30) and (self.freq_list[a] <= vars.Target_1 +30):
end = a - 1
break
signal_size = end - beg
signal_size = 16
for s in range(end, len(self.freq_list), signal_size):
if self.theMostCommonFreq(s, s + signal_size, vars) == 0:
self.binstr += '0'
if self.theMostCommonFreq(s, s + signal_size, vars) == 1:
self.binstr += '1'
if self.theMostCommonFreq(s, s + signal_size, vars) == 2:
self.binstr += 'b'
if self.theMostCommonFreq(s, s + signal_size, vars) == -1:
break
def transformBinStr(self):
arr = []
b = ''
k = []
self.finStr = ''
for a in range(0, len(self.binstr), 1):
if self.binstr[a] == 'b':
k.append(a)
for m in range(0, len(k), 1):
if m > 0:
b = ''
for i in range(k[m-1]+1, k[m]-1, 1):
b += self.binstr[i]
arr.append(int(b, 2))
if m == len(k)-1:
b = ''
for p in range(k[m]+1, len(self.binstr), 1):
b += self.binstr[p]
arr.append(int(b, 2))
for j in arr:
if j <= 128:
self.finStr += chr(j)
else:
self.finStr += "*"
def _close_audio(self):
""" Call this function at the end """
self.stream.close()
self.audio.terminate()
def process(self, vars):
print 'loop_finish'
self.analize_freq_list(vars)
print self.binstr
self._close_audio()
self.transformBinStr()
self.binstr = ''
print self.finStr
def decode(self, vars):
self.term_width = int(168)
self.freq_list = []
self.binstr = ''
self.finStr = ''
self._open_audio(vars)
self._loop(vars)
| true |