blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
e570bdf18b78cee5262c7807f75a04290748dffa | Python | ChloeZ52/punchin-bot | /src/bot.py | UTF-8 | 5,906 | 2.859375 | 3 | [] | no_license | import os
import random
import discord
from dotenv import load_dotenv
from discord.ext import commands
from datetime import datetime
from gsheet import *
load_dotenv()
TOKEN = os.getenv("DISCORD_TOKEN")
GUILD = os.getenv("DISCORD_GUILD")
PUNCHIN_CHANNEL = os.getenv("PUNCHIN_CHANNEL")
SPREADSHEET_ID = os.getenv("SPREADSHEET_ID")
TIME_FORMAT = os.getenv("TIME_FORMAT")
DATE_FORMAT = os.getenv("DATE_FORMAT")
DATE_RANGE = os.getenv("DATE_RANGE")
USER_RANGE = os.getenv("USER_RANGE")
# Enable the members privileged intent
intents = discord.Intents.default()
intents.members = True
bot = commands.Bot(command_prefix="$", intents=intents)
@bot.event
async def on_ready():
guild = discord.utils.find(lambda g: g.name == GUILD, bot.guilds)
print(
f"{bot.user.name} is connected to the following guild:\n"
f"{guild.name}(id: {guild.id})\n"
)
members = "\n - ".join([member.name for member in guild.members])
print(f"Guild Members:\n - {members}\n")
@bot.event
async def on_voice_state_update(member, before, after):
# Check if action is triggered in the right guild
if before.channel != None:
guild = before.channel.guild
elif after.channel != None:
guild = after.channel.guild
if guild.name != GUILD:
return
user = member.name
now = datetime.now()
current_time = now.strftime(TIME_FORMAT)
punchin_channel = discord.utils.find(
lambda g: g.name == PUNCHIN_CHANNEL, bot.get_all_channels()
)
sheet = gsheet()
# Insert today's date if not found
date_index = sheet.get_today_index(SPREADSHEET_ID)
if date_index == None:
today = datetime.today()
sheet.append_value(
SPREADSHEET_ID, DATE_RANGE, {"values": [[today.strftime(DATE_FORMAT)]]}
)
# Find the cell address to insert current_time
update_index = sheet.get_update_address(SPREADSHEET_ID, user)
update_range = f"{update_index}:{update_index}"
# When user joins the voice channel
if before.channel == None and after.channel != None:
await punchin_channel.send(f"{user} joins the Study Room at {current_time}")
prev_time = sheet.get_value(SPREADSHEET_ID, update_range)
new_value = (
current_time if len(prev_time) == 0 else prev_time[0][0] + current_time
)
sheet.insert_value(SPREADSHEET_ID, update_range, {"values": [[new_value]]})
# When user leaves the voice channel
elif before.channel != None and after.channel == None:
await punchin_channel.send(f"{user} leaves the Study Room at {current_time}")
prev_time = sheet.get_value(SPREADSHEET_ID, update_range)
new_value = f"{prev_time[0][0]} ~ {current_time}\n"
sheet.insert_value(SPREADSHEET_ID, update_range, {"values": [[new_value]]})
@bot.event
async def on_member_join(member):
user = member.name
print(f"{user} joined!\n")
sheet = gsheet()
user_arr = sheet.get_value(SPREADSHEET_ID, USER_RANGE)
# Check if user already exists in Google Sheets
try:
if user_arr[0].index(user) != 0:
return
except ValueError:
print(f"Adding user {user} to Google Sheets...")
user_arr[0].append(user)
sheet.insert_value(SPREADSHEET_ID, USER_RANGE, {"values": user_arr})
@bot.command(name="echo", help="Use the bot to echo input")
async def echo_input(ctx, *, arg):
await ctx.send(f"This is from user {ctx.message.author.name}: {arg}")
@bot.command(name="stop", help="Shutdown the bot")
async def stop_bot(ctx):
await ctx.send("Stopping the bot...")
await bot.logout()
@bot.command(name="get_my_study_record", help="Retrieve user's study time record")
async def send_to_google_sheet(ctx):
user = ctx.author.name
sheet = gsheet()
dates_arr = sheet.get_value(SPREADSHEET_ID, DATE_RANGE)
user_index = sheet.get_user_index(SPREADSHEET_ID, user)
user_range = f"{user_index}:{user_index}"
user_data_arr = sheet.get_value(SPREADSHEET_ID, user_range)
output = ""
for i, dates in enumerate(dates_arr):
if i != 0:
date = dates[0]
user_data = "" if len(user_data_arr[i]) == 0 else user_data_arr[i][0]
user_data = user_data.replace("\n", " | ")
output += date + ": | "
output += user_data + "\n"
await ctx.send(output)
@bot.command(name="get_my_study_hours", help="Retrieve user's study hours")
async def send_to_google_sheet(ctx):
user = ctx.author.name
sheet = gsheet()
dates_arr = sheet.get_value(SPREADSHEET_ID, DATE_RANGE)
user_index = sheet.get_user_index(SPREADSHEET_ID, user)
user_range = f"{user_index}:{user_index}"
user_data_arr = sheet.get_value(SPREADSHEET_ID, user_range)
output = ""
for i, dates in enumerate(dates_arr):
if i != 0:
date = dates[0]
output += date + ": "
if len(user_data_arr[i]) == 0:
hours = 0
else:
hours = 0
for time in user_data_arr[i][0].split("\n"):
if len(time) != 0:
start_time = time.split("~")[0]
try:
end_time = time.split("~")[1]
hour_digit = int(end_time.split(":")[0]) - int(
start_time.split(":")[0]
)
minutes_digit = int(end_time.split(":")[1]) - int(
start_time.split(":")[1]
)
hours += hour_digit + minutes_digit / 60
# Might be in the middle of study time
except IndexError:
hours += 0
output += str(round(hours, 2)) + "\n"
print(output)
await ctx.send(output)
bot.run(TOKEN) | true |
600575b08e3f2d3506e223a27be184809a5b7a08 | Python | Ashish-012/Competitive-Coding | /dp/stairCase.py | UTF-8 | 597 | 3.703125 | 4 | [] | no_license | '''
We pass a list qb to store the paths we already found with which we can reach our goal. We check if a path exists in our list to
reach the goal we don't calculate it again we just return its value. So this decreases the complexity from O(2^n) to O(n) as we
dont need to calculate all the paths we already calculated in previous iterations.
'''
def countStair(n, qb):
if n == 0:
return 1
if n < 0:
return 0
if qb[n]:
return qb[n]
path = countStair(n-1, qb) + countStair(n-2 ,qb) + countStair(n-3, qb)
qb[n] = path
return path
n = 10
qb = [None]*100
print(countStair(n, qb))
| true |
58530165177836adfe8d0ad6271058c345a514d9 | Python | nlavie/Differential-Gene-Expression- | /untitled folder/statistics/geneanalysis.py | UTF-8 | 2,500 | 3.015625 | 3 | [] | no_license | #!/usr/bin/env python3
import matplotlib.pyplot as plt
import pandas as pd
def analyze(data, column):
print('%s: %s < 0.05' % (column, data[data[column] < 0.05].shape[0]))
print('%s: %s < 0.1' % (column, data[data[column] < 0.1].shape[0]))
ttest = pd.read_csv('ttest.fdr.csv')
print('Results by ttest:')
analyze(ttest, '2-fdr h2c')
analyze(ttest, '2-fdr h2hm')
analyze(ttest, '2-fdr h2f')
# Skipping for performance
# ranksum = pd.read_csv('ranksum.fdr.csv')
# print('Results by ranksums:')
# analyze(ranksum, '2-fdr h2c')
# analyze(ranksum, '2-fdr h2hm')
# analyze(ranksum, '2-fdr h2f')
def plot_DE(data, column, title):
pvals = data.sort_values(column)[column]
plt.scatter(list(range(pvals.count())), pvals, c='r', marker='x')
plt.axhline(y = 0.05)
plt.xlabel('rank')
plt.ylabel(column)
plt.title(title)
plt.savefig('%s.png' % column)
plt.clf()
plot_DE(ttest, 'h2c 1-pvalue', 'Healthy to Convalescent Differential Expression 1-tailed')
plot_DE(ttest, '1-fdr h2c', 'Healthy to Convalescent Differential Expression 1-tailed FDR')
plot_DE(ttest, 'c2h 1-pvalue', 'Convalescent to Healthy Differential Expression 1-tailed')
plot_DE(ttest, '1-fdr c2h', 'Convalescent to Healthy Differential Expression 1-tailed FDR')
plot_DE(ttest, 'h2c 2-pvalue', 'Healthy to Convalescent Differential Expression 2-tailed')
plot_DE(ttest, '2-fdr h2c', 'Healthy to Convalescent Differential Expression 2-tailed FDR')
plot_DE(ttest, 'h2hm 1-pvalue', 'Healthy to Hemorrhagic Differential Expression 1-tailed')
plot_DE(ttest, '1-fdr h2hm', 'Healthy to Hemorrhagic Differential Expression 1-tailed FDR')
plot_DE(ttest, 'hm2h 1-pvalue', 'Hemorrhagic to Healthy Differential Expression 1-tailed')
plot_DE(ttest, '1-fdr hm2h', 'Hemorrhagic to Healthy Differential Expression 1-tailed FDR')
plot_DE(ttest, 'h2hm 2-pvalue', 'Healthy to Hemorrhagic Differential Expression 2-tailed')
plot_DE(ttest, '2-fdr h2hm', 'Healthy to Hemorrhagic Differential Expression 2-tailed FDR')
plot_DE(ttest, 'h2f 1-pvalue', 'Healthy to Fever Differential Expression 1-tailed')
plot_DE(ttest, '1-fdr h2f', 'Healthy to Fever Differential Expression 1-tailed FDR')
plot_DE(ttest, 'f2h 1-pvalue', 'Fever to Healthy Differential Expression 1-tailed')
plot_DE(ttest, '1-fdr f2h', 'Fever to Healthy Differential Expression 1-tailed FDR')
plot_DE(ttest, 'h2f 2-pvalue', 'Healthy to Fever Differential Expression 2-tailed')
plot_DE(ttest, '2-fdr h2f', 'Healthy to Fever Differential Expression 2-tailed FDR')
| true |
5a5e1bb7028f8a7593f463fbe8bf6e8f5f33aec0 | Python | raja08blr/PythonExamples | /privacy_link_scraper.py | UTF-8 | 4,065 | 2.78125 | 3 | [] | no_license | '''Author ***RAJAREDDY*****'''
import requests
from bs4 import BeautifulSoup
import xlwt
from xlwt import Workbook
import time
import ast
# url to scrape
url = "https://www.microsoft.com/en-in/store/top-free/apps/pc"
base = "https://www.microsoft.com"
# get all categories
def getCategory(url):
content = requests.get(url)
ref = BeautifulSoup(content.text,'lxml')
cat_list = []
# find all the items in category
for item in ref.find('div',{"id": "refine-by-menu-title-Category"}).findAll('a'):
# replace ' & ' with '_and_' in name
# append (name, href) tuple to list
cat_list.append((item['aria-label'].replace(' & ','_and_'), item['href'])) # href = /en-in/store/top-free/apps/pc?category=xyz
return cat_list[1:] # remove "all category" item from list
# info of the app
# add error handling
def writeItemInfo(url,row,category,sheet):
policy = "NULL"
app_page = requests.get(url)
i = 0
while app_page.status_code != 200:
print("status code "+ str(app_page.status_code))
i +=1
if i>10:
return -1
if app_page.status_code == 404:
input("continue?")
else:
time.sleep(5)
app_page = requests.get(url)
# ----------------here --------
data = BeautifulSoup(app_page.text,'lxml')
name = data.find('h1',{"id":"DynamicHeading_productTitle"}).getText()
print("name : "+ name)
itemTemp = data.find('h4',text="Additional terms")
items = []
if itemTemp != None:
items = itemTemp.find_parent('div').findAll('a')
# check atteribute valu-pairs
for item in items:
cn = ast.literal_eval(item.attrs['data-m'])['cN']
if cn == 'PrivacyPolicy Uri':
policy = item['href']
content = [name,category,policy]
print(content)
for t in range(len(content)):
sheet.write(row,t,content[t])
return 0
# get links of all items on current page
def getLinksOnPage(ref):
print("got link on page\n")
item_list = []
items = ref.find('div',{"class": "c-group f-wrap-items context-list-page"}).findAll('a')
for item in items:
item_list.append(item['href'])
return item_list
# creates .xls file , use pandas for xlsx
def create_excel(cat_url,category):
print("creating excel \n")
file = "C:\\Users\\rmishra1\\Desktop\\work\\policyReader\\scraper\\"+category+".xls"
workbook = xlwt.Workbook()
sheet = workbook.add_sheet(category)
content = ["name","category","privacy policy"]
for i in range(len(content)):
sheet.write(0,i,content[i])
val = 1
while True:
content = requests.get(cat_url)
while content.status_code != 200:
time.sleep(5)
content = requests.get(cat_url)
ref = BeautifulSoup(content.text,'lxml')
next_btn = ref.find('a', {"aria-label": "next page"})
if next_btn != None:
next_btn = next_btn['href']
elif cat_url[-2] != '-':
time.sleep(15)
content = requests.get(cat_url)
ref = BeautifulSoup(content.text,'lxml')
next_btn = ref.find('a', {"aria-label": "next page"})
next_btn = next_btn['href']
else:
next_btn = '-1'
#print(next_btn)
list = getLinksOnPage(ref)
for l in list:
print(l)
val+=1 + writeItemInfo(base+l,val,category,sheet)
print(next_btn[-2])
workbook.save(file)
if next_btn[-2] == '-':
break
cat_url = base+next_btn
def scrape():
cat = getCategory(url)
print(cat)
for c in cat:
print("now scraping: "+ str(c[0]))
create_excel(base+c[1],c[0])
scrape()
| true |
5d633760f1e0b0c962f6745c3a9cd1a2264e4a65 | Python | McFlyWYF/Python | /Test.py | UTF-8 | 2,359 | 4.40625 | 4 | [] | no_license | import sys
print (sys.argv)
print("Hello World");print("中北大学")
if False:
print("Answer")
'''这是多行
注释'''
print("True")
else:
print("Answer")
print("False")
print('我是***') # 这是简单输出语句
import sys;
x = 'runoob'; print(x + '\n')
x = 'a';y = 'b';
print(x,y) #不换行
print(x),print(y)
'''变量赋值不需要类型声明
在使用前必须赋值,赋值以后变量才会被创建
'''
count = 1 #整型
number = 2.6 #浮点数
name = "wang" #字符串
print (count)
print(number)
print(name)
# 多个变量赋值
a = b = c = 1
d,e,f = 1,2.3,"jone"
print(a,b,c)
print(d,e,f)
'''标准数据类型
1.Numbers 数字
2.String 字符串
3.List 列表
4.Tuple 数组
5。Dictionary 字典
'''
#Python数字
'''数字数据类型存储数值,不可改变的数据类型'''
var1 = 1
var2 = 2
print(var1)
#删除对象引用
del var1
#python支持4种数字类型
# int long float complex(复数)
#复数 实部和虚部都是浮点数
print(complex(1.1,2.2))
print(1.2 + 2.3j)
#获取子串
#下标可以为空 可以是负数
#变量名[头下标 : 尾下标]
a = "hello world this is china "
print(a[6:11])
print(a * 2) #输出2次
print(a + "ok") #连接字符串
#List 列表,有序的对象集合
#包含 字符 数字 字符串 或者可以嵌套列表 用[]标识 也可以使用[:]截取
list = ['python',520,'a',23.45]
listadd = ['android']
print(list) #输出整个列表
print(list[0]) #输出第一个元素 相当于数组
print(list[0:2]) #输出第一个 第二个元素
print(list * 2) #输出两次
print(list + listadd)
#对第一个元素重新赋值
list[0] = "hello"
print(list)
#数组,也叫元组,类似于列表,用()标识,不能二次赋值
tuple = ('python',123,23.56,"hello world")
tupleadd = ('android')
print(tuple)
print(tuple[0])
print(tuple[0:3])
print(tuple * 2)
#下面语句无效,因为元组不允许二次赋值
'''tuple[0] = "hello"
print(tuple)
'''
#字典,无序的对象集合,通过键来存取的,用“{}”标识,由索引key和值value组成
dict = {"name":"Jhon","number":123}
dict[1] = "One"
dict[2] = "Two"
print(dict[1]) #输出键为one的值
print(dict[2])
print(dict) #输出完整的字典
print(dict.keys()) #输出所有键
print(dict.values()) #输出所有值
| true |
e7292a2dc6f7601cbde357022600bfcd6606125d | Python | Halogen/Agent-based-covid-modelling | /covmod-master/covmod/main.py | UTF-8 | 479 | 3.125 | 3 | [
"MIT"
] | permissive | """
File Doc
"""
import numpy as np
class Stan():
"""
Class Doc
"""
def __init__(self):
val = 1
self.temp = 5
self.val = self.tester(val)
def tester(self, temp_value):
"""
Func Doc
"""
new_value = temp_value + np.random.randint(10) + self.temp
return new_value
def other_func(self):
"""
Another function to make pylint happy
"""
self.temp = self.temp ** 2
| true |
d2c16d0f8333550514227971bdb56289f7a15887 | Python | zeionara/clusterizer | /clusterizer/filter.py | UTF-8 | 2,677 | 2.78125 | 3 | [
"BSD-3-Clause"
] | permissive | import re, csv, time
import datetime, argparse
from clusterizer.loggers import log, log_percents
from clusterizer.performance import measure
from clusterizer.filesystem import read_file, write_file, clear_file, extract
def parse_args():
parser = argparse.ArgumentParser(description = 'Filter out articles which do not contain given keywords')
parser.add_argument('-in', '--input', help = 'input file contating lemmatized text for filtering', type = str, required = True, dest = 'input_filename')
parser.add_argument('-out', '--output', help = 'output file for saving text after filtering', type = str, required = True, dest = 'output_filename')
parser.add_argument('-k', '--keywords', help = 'file containing keywords for filtering', type = str, required = True, dest = 'keywords_filename')
parser.add_argument('-n', '--number-of-documents', help = 'number of documents to analyze from the input file',
type = int, required = False, default = -1, dest = 'number_of_documents')
parser.add_argument('-s', '--step', help = """step to output logging messages about the process (percents) -
make sense only if whole-size parameter also has been set up""", type = float, required = False, default = -1, dest = 'log_step')
parser.add_argument('-w', '--whole-size', help = 'whole number of articles in the document - useful for generating logging messages',
type = int, required = False, default = -1, dest = 'whole_size')
parser.add_argument('-i', '--index', help = 'index of the line (starting from 0) contating text for lemmatization if input file has .csv format',
type = int, required = False, dest = 'index')
parser.add_argument('-v', '--verbose', help = 'turn on logging messages', action = 'store_const', const = True, default = False, dest = 'verbose')
parser.add_argument('-c', '--clear', help = 'clear output file before start', action = 'store_const', const = True, default = False, dest = 'clear')
return parser.parse_args()
def main():
args = parse_args()
log('Starting extraction of the required documents...', args.verbose)
if args.clear:
clear_file(args.output_filename)
is_csv = args.input_filename.split('.')[1] == 'csv'
measure(args.whole_size)(extract)(args.input_filename, args.output_filename,
(lambda file: csv.reader(file)) if is_csv else (lambda file: file),
(lambda row: row[args.index]) if is_csv else (lambda row: row),
args.whole_size, args.log_step, args.number_of_documents, args.verbose, args.keywords_filename)
log('Finished extraction of the required documents!', args.verbose)
if __name__ == '__main__':
main() | true |
59440d35d82564d693338329b38abe9dadc52c86 | Python | bishnusilwal/python-files | /10.convert_seconds_to_day,hour,minutes,second.py | UTF-8 | 654 | 4.46875 | 4 | [] | no_license | #Lab_Exercise_1
"""
Q10. Write a Python program to convert seconds to day, hour, minutes and seconds.
"""
print("This program will convert seconds to day, hour, minutes and seconds")
givenTime = int(input("Write the second to which you want to convert"))
time = givenTime
day = time // (24*3600) # // Divides and returns the integer value of the quotient. It dumps the digits after the decimal.
time = time % (24*3600) # % Divides and returns the value of the remainder.
hour = time // (3600)
time = time % 3600
minutes = time // 60
time = time % 60
second =time
print("The answer is",day,"days",hour,"hours",minutes,"minutes",second,"seconds")
| true |
abac4b0f32aaf32d02717133468793dc92961109 | Python | fank-cd/python_leetcode | /Problemset/house-robber-ii/house-robber-ii.py | UTF-8 | 844 | 3.21875 | 3 | [] | no_license |
# @Title: 打家劫舍 II (House Robber II)
# @Author: 2464512446@qq.com
# @Date: 2020-11-04 11:54:16
# @Runtime: 40 ms
# @Memory: 13.4 MB
class Solution:
def rob(self, nums: List[int]) -> int:
if not nums:
return 0
size = len(nums)
if size <= 2:
return max(nums)
def helper(nums):
if not nums:
return 0
size = len(nums)
if size <= 2:
return max(nums)
dp = [0] * size
dp[0] = nums[0]
dp[1] = max(nums[0],nums[1])
for i in range(2,size):
dp[i] = max(dp[i-1],nums[i]+dp[i-2])
return dp[-1]
print(nums[1:],nums[:size-1])
print(helper(nums[1:]),helper(nums[:size-1]))
return max(helper(nums[1:]),helper(nums[:size-1]))
| true |
405161a07b89b18f889c93dda63af16231343efb | Python | kirankumbhar/python_data_structures_algorithms | /data_structures/python/linked_list/reverse.py | UTF-8 | 697 | 3.84375 | 4 | [] | no_license | from singly_linked_list import (
Node,
get_input_for_linked_list,
create_linked_list,
print_linked_list,
)
def reverse_linked_list(head: Node):
prev = None
current = head
next_node = None
while current:
next_node = current.next
current.next = prev
prev = current
current = next_node
return prev # prev becomes heaed at the end
if __name__ == "__main__":
elements = get_input_for_linked_list()
linked_list_head = create_linked_list(elements)
print_linked_list(linked_list_head)
print("======= Reversed Linked List =======")
new_head = reverse_linked_list(linked_list_head)
print_linked_list(new_head) | true |
a73dff241b0d4a05d383cb050c2a2fd20a0119b6 | Python | Baazigar007/Smart-Traffic-Light | /STL_Py/venv/Images/image_car_count.py | UTF-8 | 772 | 3.046875 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 8 01:12:51 2019
@author: Baazigar
"""
import cv2
def counting():
img = "C:\\Users\\Baazigar\\Desktop\\Images for project\\cars front.jpg"
cascade_src = 'C:\\Users\\Baazigar\\Desktop\\Images for project\\opencv-samples-master\\vehicle-detection-haar\\cars3.xml'
car_cascade = cv2.CascadeClassifier(cascade_src)
img = cv2.imread(img,1)
#img = cv2.resize(img,(16*100,9*100))
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cars = car_cascade.detectMultiScale(gray, 1.05 ,2)
print (len(cars))
for (x,y,w,h) in cars:
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,255),2)
cv2.imshow('rectangled', img)
cv2.waitKey(0)
counting() | true |
872dbc27bf54853b38fc1a9bd85e8e8ff037ac9c | Python | ThanatosDi/EpubConv_Python | /modules/engine/fanhuaji.py | UTF-8 | 6,994 | 2.71875 | 3 | [
"Apache-2.0"
] | permissive | import asyncio
import json
from typing import IO, Any, Callable, Dict, List, Optional, Tuple, Type, Union
import aiohttp
import requests
API = 'https://api.zhconvert.org'
class FanhuajiEngine():
"""繁化姬轉換引擎"""
def _request(self, endpoint: str, payload: dict):
with requests.get(f'{API}{endpoint}', data=payload) as response:
if response.status_code == 200:
response.encoding = 'utf-8'
return json.loads(response.text)
raise RequestError(
f'zhconvert Request error. status code: {response.status_code}')
async def _async_request(self, session, endpoint: str, payload: dict):
async with session.get(f'{API}{endpoint}', data=payload) as response:
if response.status == 200:
content = await response.json()
await session.close()
return content
raise AsyncRequestError(
f'zhconvert AsyncRequest error. status code: {response.status}')
def _slice(self, content: str) -> Optional[List[str]]:
"""文字內容每 50,000 字進行一次切片處理
Args:
content ([str]): 文字內容
Returns:
Optional[List[str]]: 回傳為 list 且裡面為切片後的 str
"""
chunks = []
chunks_count = len(content)//50_000+1
for i in range(0, chunks_count):
chunks.append(content[50_000*i:50_000*(i+1)])
return chunks
def convert(self, **kwargs):
"""繁化姬轉換
API doc : https://docs.zhconvert.org/api/convert/
Arguments:
text : 欲轉換的文字\n\n
converter : 所要使用的轉換器。有 Simplified (簡體化)、 Traditional (繁體化)、
China (中國化)、 Taiwan (台灣化)、WikiSimplified (維基簡體化)、
WikiTraditional (維基繁體化)。\n\n
ignoreTextStyles : 由那些不希望被繁化姬處理的 "樣式" 以逗號分隔所組成的字串。
通常用於保護特效字幕不被轉換,
例如字幕組的特效字幕若是以 OPJP 與 OPCN 作為樣式名。
可以設定 "OPJP,OPCN" 來作保護。\n\n
jpTextStyles : 告訴繁化姬哪些樣式要當作日文處理(預設為伺服器端自動猜測)。
若要自行設定,則必須另外再加入 *noAutoJpTextStyles 這個樣式。
所有樣式以逗號分隔組成字串,
例如: "OPJP,EDJP,*noAutoJpTextStyles" 表示不讓伺服器自動猜測,
並指定 OPJP 與 EDJP 為日文樣式。\n\n
jpStyleConversionStrategy : 對於日文樣式該如何處理。
"none" 表示 無(當成中文處理) 、 "protect" 表示 保護 、
"protectOnlySameOrigin" 表示 僅保護原文與日文相同的字 、
"fix" 表示 修正 。\n\n
jpTextConversionStrategy : 對於繁化姬自己發現的日文區域該如何處理。
"none" 表示 無(當成中文處理) 、 "protect" 表示 保護 、
"protectOnlySameOrigin" 表示 僅保護原文與日文相同的字 、
"fix" 表示 修正 。\n\n
modules : 強制設定模組啟用/停用 。 -1 / 0 / 1 分別表示 自動 / 停用 / 啟用 。
字串使用 JSON 格式編碼。使用 * 可以先設定所有模組的狀態。
例如:{"*":0,"Naruto":1,"Typo":1} 表示停用所有模組,
但啟用 火影忍者 與 錯別字修正 模組。\n\n
userPostReplace : 轉換後再進行的額外取代。
格式為 "搜尋1=取代1\\n搜尋2=取代2\\n..." 。
搜尋1 會在轉換後再被取代為 取代1 。\n\n
userPreReplace : 轉換前先進行的額外取代。
格式為 "搜尋1=取代1\\n搜尋2=取代2\\n..." 。
搜尋1 會在轉換前先被取代為 取代1 。\n\n
userProtectReplace : 保護字詞不被繁化姬修改。
格式為 "保護1\\n保護2\\n..." 。
保護1 、 保護2 等字詞將不會被繁化姬修改。
"""
ALLOW_KEYS = [
'text',
'converter',
'ignoreTextStyles',
'jpTextStyles',
'jpStyleConversionStrategy',
'jpTextConversionStrategy',
'modules',
'userPostReplace',
'userPreReplace',
'userProtectReplace',
]
error_keys = [key for key in kwargs.keys() if key not in ALLOW_KEYS]
if error_keys:
raise FanhuajiInvalidKey(f"Invalid key: {', '.join(error_keys)}")
if kwargs.get('text', None) is None or kwargs.get('converter', None) is None:
raise FanhuajiMissNecessarykey(f"Miss necessary key")
response = self._request('/convert', kwargs)
return self._text(response)
def _text(self, response) -> Union[None, str]:
if response['code'] != 0:
return None
return response['data']['text']
async def async_convert(self, **kwargs):
ALLOW_KEYS = [
'text',
'converter',
'ignoreTextStyles',
'jpTextStyles',
'jpStyleConversionStrategy',
'jpTextConversionStrategy',
'modules',
'userPostReplace',
'userPreReplace',
'userProtectReplace',
]
error_keys = [key for key in kwargs.keys() if key not in ALLOW_KEYS]
if error_keys:
raise FanhuajiInvalidKey(f"Invalid key: {', '.join(error_keys)}")
content = kwargs.get('text', None)
converter = kwargs.get('converter', None)
if content is None or converter is None:
raise FanhuajiMissNecessarykey(f"Miss necessary key")
session = aiohttp.ClientSession()
chunks = self._slice(kwargs.get('text'))
texts = []
for chunk in chunks:
payload = {
'text': chunk,
'converter': converter
}
response = await self._async_request(session, '/convert', payload)
texts.append(self._text(response))
return ''.join(texts)
class RequestError(Exception):
pass
class AsyncRequestError(Exception):
pass
class FanhuajiInvalidKey(Exception):
pass
class FanhuajiMissNecessarykey(Exception):
pass
| true |
3c9287685927c8bde8d1b6529d3768d30c6a811d | Python | BPie/figpie | /figpie/printer.py | UTF-8 | 4,491 | 2.734375 | 3 | [] | no_license | #encoding=utf-8
from __future__ import absolute_import
from __future__ import print_function
import sys
import re
from collections import OrderedDict, defaultdict, deque
from operator import getitem
from blessed import Terminal
from . import properties as props
from . import style
class Printer:
def __init__(self, terminal, debug=None):
self._t = terminal
self._style = style.StyleFactory(self._t)
self._displaied_attrs = defaultdict( lambda: False, {
'type': True,
'name': True,
'short': True,
'value': True})
self._loc_cache = {}
self._debug = debug
def __call__(self, state, input_handler):
self._clear()
self._print_header(state)
self._print_current(state)
self._print_footer(input_handler.input_value, state.warnings)
''' Marks (highlights) short(cut) in a given name'''
def _mark_short_in_name(self, name, short, f=None):
# default formatter
if not f:
f = self._t.cyan
# need to add info about short if not found in name
if not re.findall(short, name, flags=re.IGNORECASE):
name = '[{}]: '.format(short) + name
return re.sub(short, f(short), name, 1, re.IGNORECASE)
def _clear(self):
print(self._t.clear())
@property
def _current_loc(self):
return self._t.get_location()
''' Prints a footer. '''
def _print_footer(self, input_value, warnings, prefix_msg=''):
th = self._t.height
prompt = '{} >>> {}'.format(prefix_msg, input_value)
msgs = {
th: prompt,
th-1: ' ',
th-2: '_'*self._t.width,
th-3: warnings
}
for h, msg in msgs.iteritems():
with self._t.location(0, h):
print(msg, end='')
if msg == prompt:
x = self._current_loc[1]
y = self._current_loc[0]
self._loc_cache['prompt'] = x, y
def _print_current(self, state):
if state.mode in ('container', 'enum'):
self._print_options(state)
elif state.mode in ('property',):
self._print_edit(state)
elif state.mode in ('action',):
pass
else:
raise RuntimeError('wrong state: {}'.format(state))
def _print_options(self, state):
if state.mode not in ('container', 'enum'):
raise RuntimeError('wrong state {}'.format(state))
with self._t.location(0, 2):
print(self._t.center(' ~~~< options >~~~ '))
for short, cell in state.options.iteritems():
self._print_option(cell, short)
def _print_edit(self, state):
assert state.mode == 'property'
with self._t.location(0,2):
print(self._t.center(' ~~~< edit >~~~ '))
self._print_option(state.current, '>>')
# for short, cell in self._parent_options.iteritems():
# if cell == self._current:
# formatter = None
# else:
# formatter = self._t.dim
# self._print_option(cell, short, formatter)
''' Returns styled string for given cell's attribute.
Returns empty string if attrname is not found in _displaied_attrs dict.
'''
def _get_styled_attr(self, cell, attrname, str_template):
if not self._displaied_attrs[attrname] or not hasattr(cell, attrname):
return ''
attr_val = getattr(cell, attrname)
filled_str = str_template.format(attr_val)
styled_str = self._style(cell, attrname)(filled_str)
return styled_str
def _print_header(self, state):
with self._t.location(0,0):
tail = state.path[:-1]
head = state.path[-1]
print(self._t.center(self._t.black_on_white('/'.join(tail))
+ self._t.bold_black_on_white('/'+head)))
def _print_option(self, cell, short, style=None):
msg = ''
attrstr = lambda n,t: self._get_styled_attr(cell, n, t)
marked = self._mark_short_in_name(cell.name, short)
msg += self._t.rjust(attrstr('type', '[{}] '), width=15)
msg += self._t.center(marked, width=10)
msg += self._t.ljust(attrstr('value', '= {}'), width=15)
if style is None:
style = self._style()
print(style(self._t.center(msg)))
| true |
7cadd19145262a6e7a0d5667702bcaabee18848c | Python | zuckinhobot/index | /index/main.py | UTF-8 | 1,277 | 2.734375 | 3 | [] | no_license | from index.structure import *
import pickle
def next(self, file_idx) -> TermOccurrence:
bytes_doc_id = file_idx.read(4)
if not bytes_doc_id:
return None
try:
next_from_file = pickle.load(file_idx)
return self.lst_occurrences_tmp.pop()
except Exception as e:
return None
t= [TermOccurrence(1,1,1),
TermOccurrence(2,1,1),
TermOccurrence(1,2,2),
TermOccurrence(2,2,1),
TermOccurrence(1,3,1),
TermOccurrence(3,3,1),
TermOccurrence(1,4,1),
TermOccurrence(1,5,1),
TermOccurrence(1,6,1),
TermOccurrence(2,6,1),
TermOccurrence(3,7,1),
TermOccurrence(3,8,1)]
def storeData():
# initializing data to be stored in db
Omkar = {'key': 'Omkar', 'name': 'Omkar Pathak',
'age': 21, 'pay': 40000}
Jagdish = {'key': 'Jagdish', 'name': 'Jagdish Pathak',
'age': 50, 'pay': 50000}
# database
db1 = {}
for i in t:
db = []
db.append(i.term_id)
db.append(i.doc_id)
db.append(i.term_freq)
dbfile = open('examplePickle', 'ab')
pickle.dump(db, dbfile)
dbfile.close()
# Its important to use binary mode
def loadData():
# for reading also binary mode is important
dbfile = open('examplePickle', 'rb')
db = pickle.load(dbfile)
for keys in db:
print(keys, '=>', db[keys])
dbfile.close()
if __name__ == '__main__':
storeData()
loadData() | true |
61baec39a0def44a48fb77ffb3382989dd01c0a6 | Python | atdavidpark/problem-solving | /baekjoon/10799.py | UTF-8 | 540 | 3.203125 | 3 | [] | no_license | '''
def solve(str):
s = []
ret = 0
i = 0
while i < len(str):
if str[i] == '(':
s.append(str[i])
if str[i + 1] == ')':
# count
s.pop()
ret += len(s)
i += 1
else:
# close
s.pop()
ret += 1
i += 1
return ret
print(solve(input()))
'''
ans = d = i = 0
for c in input():
j = c < ')' # ( : 1 / ) : 0
d += 2 * j - 1
ans += (d - (~(-d) * j)) * i
i = j
print(ans)
| true |
fc5354d3370044fa917196d5521d7d2bfb4894bb | Python | dsamruddhi/Physics-Informed-Neural-Networks | /examples/harmonic_oscillator/harmonic_oscillator.py | UTF-8 | 4,386 | 3.484375 | 3 | [] | no_license | """
Harmonic Oscillator: Implementation of a PINN to calculate the force acting on a spring when extended to a location 'x'
at time 't'. x and t are related as x = sin(t). We assume the underlying relation is given by Hooke's law as
F(x) = -kx - (0.1)sin(x) where k = 0.1 and compare the performance of a normal neural network with a
physics informed neural network in terms of predicting the force at different time instants.
"""
import numpy as np
import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense
import matplotlib.pyplot as plt
if __name__ == '__main__':
tf.random.set_seed(468)
"""" Global parameters """
EPOCHS = 50
k = 1
"""" Define the harmonic oscillator process """
def get_positions(time_instants):
return np.sin(time_instants)
def get_force(positions):
return -k*positions + 0.1*np.sin(positions)
t = np.linspace(0, 10, num=101)
x = get_positions(t)
F = get_force(x)
plt.figure(1)
plt.plot(t, F, 'b')
plt.legend(["Actual force"])
plt.xlabel("Time (t)")
plt.ylabel("Force")
plt.title("Force acting on the spring vs. time")
plt.show()
"""" Collect data samples from the forward process """
ts = [0, 3, 6.3, 9.3]
xs = get_positions(ts)
F_data = get_force(xs)
plt.figure(2)
plt.plot(t, F, 'b')
plt.plot(ts, F_data, 'o', color="black")
plt.legend(["Actual force", "Measured force"])
plt.xlabel("Time (t)")
plt.ylabel("Force")
plt.title("Force acting on the spring vs. time")
plt.show()
"""" Simple Network to predict force using only measured data """
simple_model = Sequential()
simple_model.add(Dense(32, activation="tanh"))
simple_model.add(Dense(1))
simple_model.compile(optimizer='adam', loss='mse')
simple_model.fit(xs, F_data, epochs=EPOCHS)
F_pred = simple_model.predict(x)
plt.figure(3)
plt.plot(t, F, 'b')
plt.plot(ts, F_data, 'o', color="black")
plt.plot(t, F_pred, '--', color="red")
plt.legend(["Actual force", "Measured force", "NN prediction"])
plt.xlabel("Time (t)")
plt.ylabel("Force")
plt.title("Force predicted by simple Neural Network")
plt.show()
"""" Physics Informed Network to predict force using data and PDE loss """
pinn_model = Sequential()
pinn_model.add(Dense(32, activation="tanh"))
pinn_model.add(Dense(1))
optimizer = tf.keras.optimizers.Adam()
def pinn_loss(x, F_act, F_pred):
reg_param = 0.1
mse_loss = tf.reduce_mean((F_act-F_pred)**2)
temp = -k*x + 0.1*tf.sin(x)
temp = tf.cast(temp, tf.float32)
pde_loss = tf.reduce_mean((F_pred - temp)**2)
return mse_loss + reg_param*pde_loss
@tf.function
def train_step(x, F_act):
with tf.GradientTape() as pinn_tape:
F_pinn = pinn_model(x, training=True)
loss = pinn_loss(x, F_act, F_pinn)
gradients = pinn_tape.gradient(loss, pinn_model.trainable_variables)
optimizer.apply_gradients(zip(gradients, pinn_model.trainable_variables))
for epoch in range(0, EPOCHS):
for input, F_act in zip(xs, F_data):
input = input.reshape((1, 1))
F_act = np.float32(F_act)
train_step(input, F_act)
print("PINN training complete")
F_pinn_pred = pinn_model.predict(x)
"""" Performance: NN vs. PINN """
plt.figure(4)
plt.plot(t, F, color="blue")
plt.plot(ts, F_data, 'o', color="black")
plt.plot(t, F_pred, '--', color="red")
plt.plot(t, F_pinn_pred, '-', color="green")
plt.legend(["Actual force", "Measured force", "NN prediction", "PINN prediction"])
plt.xlabel("Time (t)")
plt.ylabel("Force")
plt.title("NN vs. PINN")
plt.show()
"""" Extrapolation performance: NN vs. PINN """
t_new = np.linspace(10, 20, num=101)
x_new = get_positions(t_new)
F_new_act = get_force(x_new)
F_new_NN = simple_model.predict(x_new)
F_new_PINN = pinn_model.predict(x_new)
plt.figure(5)
plt.plot(t_new, F_new_act, color="blue")
plt.plot(t_new, F_new_NN, "--", color="red")
plt.plot(t_new, F_new_PINN, '-', color="green")
plt.legend(["Actual force", "NN prediction", "PINN prediction"])
plt.xlabel("Time (t)")
plt.ylabel("Force")
plt.title("Extrapolation: NN vs. PINN")
plt.show()
| true |
81b87b915e2d984077536a719ae87375250e62b0 | Python | leonuz/Challenges_2021_Public | /misc/flying-spaghetti-monster/solve/fsm.py | UTF-8 | 3,792 | 3 | 3 | [] | no_license | import inspect
import itertools
import json
import random
import string
import networkx
import sympy
GEN_CHUNK = 8192
x = sympy.symbols("x")
def linfunc_gen(n=None):
i = 0
if n is None:
gi = itertools.count()
else:
gi = range(n // GEN_CHUNK + 2)
ps = iter(sympy.ntheory.generate.Sieve())
for i in gi:
cs = random.sample(range(GEN_CHUNK), GEN_CHUNK)
for p, c in zip(ps, cs):
yield sympy.Poly(p * x + c + (i * GEN_CHUNK), x)
class FSM():
def __init__(self, g):
self.g = g
@classmethod
def new(cls, alphabet=string.printable):
# We shuffle the alphabet so that monotonic state numbers don't leak it
alphabet = list(alphabet)
random.shuffle(alphabet)
g = networkx.complete_graph(alphabet, create_using=networkx.DiGraph)
# Add loop edges so we can repeat characters
for n in g.nodes:
g.add_edge(n, n)
# Now generate random linear functions for each edge
ec = len(g.edges)
for e, f in zip(random.sample(tuple(g.edges), ec), linfunc_gen(ec)):
ed = g.edges[e]
ed["f"], ed["n"] = f, ord(e[1])
ed["fe"] = fe = f.as_expr()
# Now that we've added the values to the edges, we can anonymise the
# node into a monotonic series of integer values
return cls(networkx.convert_node_labels_to_integers(g))
@classmethod
def load(cls, fobj):
g = networkx.DiGraph()
for edge_repr in fobj.readlines():
a_b, n, fe = edge_repr.strip().split("\t")
a, b = a_b.split("->")
a, n, b = map(int, (a, n, b))
f = sympy.Poly(fe)
g.add_edge(a, b, n=n, f=f, fe=fe)
return cls(g)
def save(self, fobj):
for a, b in random.sample(tuple(self.g.edges), len(self.g.edges)):
ed = self.g.edges[(a,b)]
fobj.write(f"{a}->{b}\t{ed['n']}\t{ed['fe']}\n")
def as_edges(self, inputs):
# Convert an input iterable into a series of edge traversals. Note that
# we add an extra edge from the 0th element to itself to ensure it is
# explicitly included in the edge list.
first, *rest = inputs
for e in networkx.selfloop_edges(self.g):
if self.g.edges[e]["n"] == ord(first):
yield e
a = e[0]
break
else:
raise LookupError(f"Failed to find loop edge for {first!r}")
for c in rest:
for b, ed in self.g[a].items():
if ed["n"] == ord(c):
yield a, b
a = b
break
else:
raise LookupError(f"Failed to find edge from {a!r} for {c!r}")
def get_comp(self, inputs):
# Compose the linear functions along a path of input values
fa = sympy.Poly(x)
for e in self.as_edges(inputs):
fb = self.g.edges[e]["f"]
fa = fb.compose(fa)
return fa, e[1]
if __name__ == "__main__":
print("Creating new FSM")
fsm_obj = FSM.new()
fsm_obj.save(open("fsm.txt", "w"))
print("Creating canned challenges")
canned_data = []
for data in (l.strip() for l in open("canned-inputs.txt").readlines()):
if not data or data.startswith("#"):
continue
data, *maybe_timeout = data.split(";")
f, sf = fsm_obj.get_comp(data)
datum = {
"data": data,
"f_expr": str(f.as_expr()),
"final_state": sf,
}
if maybe_timeout:
(timeout, ) = maybe_timeout
datum["timeout"] = int(timeout)
canned_data.append(datum)
json.dump(canned_data, open("canned.json", "w"))
print("Done")
| true |
8dac6fa4e87c5309e291d2951f1c63200a7694a8 | Python | ChengaoJ/Natural-Language-Processing | /HW1 Spell Correction/hw1/SmoothUnigramModel.py | UTF-8 | 1,197 | 3.59375 | 4 | [] | no_license | import math, collections
class SmoothUnigramModel:
def __init__(self, corpus):
"""Initialize your data structures in the constructor."""
self.unigramCounts = collections.defaultdict(lambda: 0)
self.totalCount = 0
self.zeroCount = 0
self.train(corpus)
def train(self, corpus):
""" Takes a corpus and trains your language model.
Compute any counts or other corpus statistics in this function.
"""
for sentence in corpus.corpus:
for datum in sentence.data:
self.unigramCounts[datum.word] += 1
self.totalCount += 1
# P(current) for unigram model
def score(self, sentence):
""" Takes a list of strings as argument and returns the log-probability of the
sentence using your language model. Use whatever data you computed in train() here.
"""
# count each incremented word
for word in sentence:
if word not in self.unigramCounts:
self.zeroCount += 1
# apply laplace smoothing to unigram model
score = 0.0
for word in sentence:
count = self.unigramCounts[word]
score += math.log(count + 1)
score -= math.log(self.totalCount + self.zeroCount)
return score | true |
b559088605ab7b312623559618ec7c3b5fb19ffe | Python | parvatijay2901/FaceNet_FR | /Web/imutils/video/count_frames.py | UTF-8 | 1,616 | 3.0625 | 3 | [
"MIT"
] | permissive | # import the necessary packages
from ..convenience import is_cv3
import cv2
def count_frames(path, override=False):
# grab a pointer to the video file and initialize the total
# number of frames read
video = cv2.VideoCapture(path)
total = 0
# if the override flag is passed in, revert to the manual
# method of counting frames
if override:
total = count_frames_manual(video)
# otherwise, let's try the fast way first
else:
# lets try to determine the number of frames in a video
# via video properties; this method can be very buggy
# and might throw an error based on your OpenCV version
# or may fail entirely based on your which video codecs
# you have installed
try:
# check if we are using OpenCV 3
if is_cv3():
total = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
# otherwise, we are using OpenCV 2.4
else:
total = int(video.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
# uh-oh, we got an error -- revert to counting manually
except:
total = count_frames_manual(video)
# release the video file pointer
video.release()
# return the total number of frames in the video
return total
def count_frames_manual(video):
# initialize the total number of frames read
total = 0
# loop over the frames of the video
while True:
# grab the current frame
(grabbed, frame) = video.read()
# check to see if we have reached the end of the
# video
if not grabbed:
break
# increment the total number of frames read
total += 1
# return the total number of frames in the video file
return total | true |
66b2bbb6fce1d04ed1638775adb670a6c4722932 | Python | ChristopherStavros/Python_Study | /Projects/Intro_and_ATBSwPy/textToSpeech.py | UTF-8 | 343 | 2.765625 | 3 | [] | no_license | '''
pip install pyttsx3
python -m pip install pypiwin32
https://rdmilligan.wordpress.com/2015/04/01/text-to-speech-using-python/
'''
import pyttsx3
engine = pyttsx3.init()
#text = "Welcome to Steez-corp corporate headquarters in Hotlanta"
text_input = input()
def speak(text):
engine.say(text)
engine.runAndWait()
speak(text_input) | true |
88cace8c88f4a656b709d41b65d65730cc54b1ef | Python | AlexanderAlexeyuk/Vascualar_Age | /Проект для сосудистого возраста.py | UTF-8 | 4,049 | 2.8125 | 3 | [] | no_license | import tkinter as tk
from tkinter import ttk
import sqlite3
class Main(tk.Frame):
def __init__(self, root):
super().__init__(root)
self.init_main()
self.db = db
self.view_records()
def init_main(self):
toolbar = tk.Frame(bg='#d7d8e0', bd=2)
toolbar.pack(side=tk.TOP, fill=tk.X)
btn_open_dialog = tk.Button(toolbar, text='Ввести данные пациента', command=self.open_dialog, bg='#d7d8e0', bd=0,
compound=tk.TOP)# image=self.add_img)
btn_open_dialog.pack(side=tk.LEFT)
self.tree = ttk.Treeview(self, columns=('number', 'surname', 'password_age', 'vascular_age'), height=15, show='headings')
self.tree.column('number', width=30, anchor=tk.CENTER)
self.tree.column('surname', width=305, anchor=tk.CENTER)
self.tree.column('password_age', width=130, anchor=tk.CENTER)
self.tree.column('vascular_age', width=130, anchor=tk.CENTER)
self.tree.heading('number', text='№')
self.tree.heading('password_age', text='Паспортный возраст')
self.tree.heading('vascular_age', text='Сосудистый возраст')
self.tree.heading('surname', text='ФИО')
self.tree.pack()
def records(self, surname, password_age, vascular_age):
self.db.insert_data(surname, password_age, vascular_age)
self.view_records()
def view_records(self):
self.db.c.execute('''SELECT * FROM patients_data_1''')
[self.tree.delete(i) for i in self.tree.get_children()]
[self.tree.insert('', 'end', values=row) for row in self.db.c.fetchall()]
def open_dialog(self):
Child()
class Child(tk.Toplevel):
def __init__(self):
super().__init__(root)
self.init_child()
self.view = app
def init_child(self):
self.title('Ввести данные пациента')
self.geometry('400x220+400+300')
self.resizable(False, False)
label_name = tk.Label(self, text='ФИО пациента')
label_name.place(x=50, y=10)
label_password_age = tk.Label(self, text='Возраст:')
label_password_age.place(x=50, y=60)
label_vascular_age = tk.Label(self, text='Возраст:')
label_vascular_age.place(x=50, y=110)
self.entry_surname = ttk.Entry(self)
self.entry_surname.place(x=200, y=10)
self.entry_password_age = ttk.Entry(self)
self.entry_password_age.place(x=200, y=60)
self.entry_vascular_age = ttk.Entry(self)
self.entry_vascular_age.place(x=200, y=110)
# self.combobox = ttk.Combobox(self, values=[u'Паспортный', u'Сосудистый'])
# self.combobox.current(0)
# self.combobox.place(x=200, y=80)
btn_cancel = ttk.Button(self, text='Закрыть', command=self.destroy)
btn_cancel.place(x=300, y=170)
btn_ok = ttk.Button(self, text='Ввести')
btn_ok.place(x=220, y=170)
btn_ok.bind('<Button-1>', lambda event: self.view.records(self.entry_surname.get(),self.entry_password_age.get(), self.entry_vascular_age.get()))
self.grab_set()
self.focus_set()
class DB:
def __init__(self):
self.conn = sqlite3.connect('patients_data_1.db')
self.c = self.conn.cursor()
self.c.execute(
'''CREATE TABLE IF NOT EXISTS patients_data_1 (id integer primary key, surname text, password_age real, vascular_age real)''')
self.conn.commit()
def insert_data(self, surname, password_age, vascular_age):
self.c.execute('''INSERT INTO patients_data_1 (surname, password_age, vascular_age) VALUES (?, ?, ?)''',
(surname, password_age, vascular_age))
self.conn.commit()
if __name__ == "__main__":
root = tk.Tk()
db = DB()
app = Main(root)
app.pack()
root.title("Диспансеризация")
root.geometry("650x450+300+200")
root.resizable(False, False)
root.mainloop() | true |
fb2244c29847a712d896201e1f63129d5502fcd6 | Python | morbrian/carnd-vdat | /lessons_functions.py | UTF-8 | 15,373 | 2.796875 | 3 | [] | no_license | import matplotlib.image as mpimg
import numpy as np
import cv2
from skimage.feature import hog
import os
import os.path as path
import matplotlib.pyplot as plt
from scipy.ndimage.measurements import label
def save_hog_sequence(output_image_name, title, image, hog_vis, file_features):
"""
save the image to the output file
:param output_image_name: file name of image describing hog processing
:param title: title of image
:param image: image data
:param hog_vis: single or array of hog channels
:param file_features: other features, like spatial or histo
"""
hog_vis_count = 1 if len(hog_vis) > 3 else 3
row_size = hog_vis_count + 2
fig = plt.figure(figsize=(25, 4))
subplot = plt.subplot(1, row_size, 1)
subplot.axis('off')
subplot.set_title(title)
plt.imshow(image)
if hog_vis_count == 1:
subplot = plt.subplot(1, row_size, 2)
subplot.axis('off')
subplot.set_title('single channel hog')
plt.imshow(hog_vis, cmap='gray')
else:
for i in range(3):
subplot = plt.subplot(1, row_size, i + 2)
subplot.axis('off')
subplot.set_title('hog ch{}'.format(i))
plt.imshow(hog_vis[i], cmap='gray')
subplot = plt.subplot(1, row_size, row_size)
subplot.set_ylim([0, 500])
subplot.set_title('color+spatial feats')
subplot.plot(file_features)
plt.savefig(output_image_name, bbox_inches='tight', dpi=50)
plt.close(fig)
print("saved to: {}".format(output_image_name))
def get_hog_features(img, orient, pix_per_cell, cell_per_block,
vis=False, feature_vec=True):
"""
Produce HOG features using skimage and optionally return visual representations.
:param img: image to process
:param orient: number of orientation summary cells
:param pix_per_cell: number of pixels to include
:param cell_per_block: number of cells in each block
:param vis: use True to generate the hog visual representation
:param feature_vec: True if the returned vector should be raveled
:return:
"""
if vis is True:
features, hog_image = hog(img, orientations=orient,
pixels_per_cell=(pix_per_cell, pix_per_cell),
cells_per_block=(cell_per_block, cell_per_block),
transform_sqrt=True,
visualise=vis, feature_vector=feature_vec)
return features, hog_image
# Otherwise call with one output
else:
features = hog(img, orientations=orient,
pixels_per_cell=(pix_per_cell, pix_per_cell),
cells_per_block=(cell_per_block, cell_per_block),
transform_sqrt=True,
visualise=vis, feature_vector=feature_vec)
return features
def convert_color(image, conv):
if conv == 'HSV':
return cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
elif conv == 'LUV':
return cv2.cvtColor(image, cv2.COLOR_RGB2LUV)
elif conv == 'HLS':
return cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
elif conv == 'YUV':
return cv2.cvtColor(image, cv2.COLOR_RGB2YUV)
elif conv == 'YCrCb':
return cv2.cvtColor(image, cv2.COLOR_RGB2YCrCb)
# Define a function to compute binned color features
def bin_spatial(img, size=(32, 32)):
# Use cv2.resize().ravel() to create the feature vector
features = cv2.resize(img, size).ravel()
# Return the feature vector
return features
# Define a function to compute color histogram features
# NEED TO CHANGE bins_range if reading .png files with mpimg!
def color_hist(img, nbins=32, bins_range=(0, 255)):
# Compute the histogram of the color channels separately
channel1_hist = np.histogram(img[:, :, 0], bins=nbins, range=bins_range)
channel2_hist = np.histogram(img[:, :, 1], bins=nbins, range=bins_range)
channel3_hist = np.histogram(img[:, :, 2], bins=nbins, range=bins_range)
# Concatenate the histograms into a single feature vector
hist_features = np.concatenate((channel1_hist[0], channel2_hist[0], channel3_hist[0]))
# Return the individual histograms, bin_centers and feature vector
return hist_features
# Define a function to extract features from a list of images
# Have this function call bin_spatial() and color_hist()
def extract_features(image_file, color_space='RGB', spatial_size=(32, 32),
hist_bins=32, orient=9,
pix_per_cell=8, cell_per_block=2, hog_channel='ALL',
spatial_feat=True, hist_feat=True, hog_feat=True,
tag='', vis=False, vis_folder=None, flip=False):
file_features = []
vis_features = []
hog_vis = None
if vis is True and vis_folder is not None:
if not path.exists(vis_folder):
os.makedirs(vis_folder)
bins_range = (0, 255)
# bins_range = (0, 1.0)
file_extension = path.splitext(image_file)[1]
if file_extension == ".png":
image = cv2.imread(image_file)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
elif file_extension == ".jpg" or file_extension == ".jpeg":
image = mpimg.imread(image_file)
else:
raise ValueError("UNKNOWN FILE EXTENSION: {}".format(image_file))
if flip is True:
image = np.fliplr(image)
# apply color conversion if other than 'RGB'
if color_space != 'RGB':
feature_image = convert_color(image, color_space)
else:
feature_image = np.copy(image)
if spatial_feat is True:
spatial_features = bin_spatial(feature_image, size=spatial_size)
file_features.append(spatial_features)
if vis is True:
vis_features.append(spatial_features)
if hist_feat is True:
# Apply color_hist()
hist_features = color_hist(feature_image, nbins=hist_bins, bins_range=bins_range)
file_features.append(hist_features)
if vis is True:
vis_features.append(hist_features)
if hog_feat is True:
# Call get_hog_features()
if hog_channel == 'ALL':
hog_features = []
hog_vis = []
for channel in range(feature_image.shape[2]):
if vis is True:
hfeat, hvis = get_hog_features(feature_image[:, :, channel],
orient, pix_per_cell, cell_per_block,
vis=vis, feature_vec=True)
hog_features.append(hfeat)
hog_vis.append(hvis)
else:
hog_features.append(get_hog_features(feature_image[:, :, channel],
orient, pix_per_cell, cell_per_block,
vis=vis, feature_vec=True))
hog_features = np.ravel(hog_features)
else:
if vis is True:
hog_features, hog_vis = \
get_hog_features(feature_image[:, :, hog_channel], orient,
pix_per_cell, cell_per_block, vis=vis, feature_vec=True)
else:
hog_features = get_hog_features(feature_image[:, :, hog_channel], orient,
pix_per_cell, cell_per_block, vis=vis, feature_vec=True)
# Append the new feature vector to the features list
file_features.append(hog_features)
if vis is True:
save_hog_sequence('/'.join([vis_folder, "{}-hog-sequence.jpg".format(tag)]),
tag, image, hog_vis, np.concatenate(vis_features))
return np.concatenate(file_features)
def extract_features_list(imgs, color_space='RGB', spatial_size=(32, 32),
hist_bins=32, orient=9,
pix_per_cell=8, cell_per_block=2, hog_channel='ALL',
spatial_feat=True, hist_feat=True, hog_feat=True,
tag='', vis_count=5, vis_folder=None):
# Create a list to append feature vectors to
features = []
# Iterate through the list of images
for i, file in enumerate(imgs):
vis = i < vis_count
file_features = \
extract_features(file, color_space=color_space, spatial_size=spatial_size,
hist_bins=hist_bins, orient=orient,
pix_per_cell=pix_per_cell, cell_per_block=cell_per_block, hog_channel=hog_channel,
spatial_feat=spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat,
tag="{}-{}".format(tag, i), vis=vis, vis_folder=vis_folder)
features.append(file_features)
file_features = \
extract_features(file, color_space=color_space, spatial_size=spatial_size,
hist_bins=hist_bins, orient=orient,
pix_per_cell=pix_per_cell, cell_per_block=cell_per_block, hog_channel=hog_channel,
spatial_feat=spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat,
tag="{}-{}-flip".format(tag, i), vis=vis, vis_folder=vis_folder, flip=True)
features.append(file_features)
# Return list of feature vectors
return features
# Define a function to draw bounding boxes
def draw_boxes(img, bboxes, color=(0, 0, 255), thick=6):
# Make a copy of the image
imcopy = np.copy(img)
# Iterate through the bounding boxes
for bbox in bboxes:
# Draw a rectangle given bbox coordinates
cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)
# Return the image copy with boxes drawn
return imcopy
# Define a single function that can extract features using hog sub-sampling and make predictions
def find_cars(img, classifier, ystart=400, ystop=704, xstart=0, xstop=1279, scale=1,
orient=9, pix_per_cell=4, cell_per_block=2, cells_per_step=2,
spatial_size=(32, 32), hist_bins=32, grid=False):
img_tosearch = img[ystart:ystop, xstart:xstop, :]
ctrans_tosearch = cv2.cvtColor(img_tosearch, cv2.COLOR_RGB2LUV)
if scale != 1:
imshape = ctrans_tosearch.shape
ctrans_tosearch = cv2.resize(ctrans_tosearch, (np.int(imshape[1]/scale), np.int(imshape[0]/scale)))
ch1 = ctrans_tosearch[:, :, 0]
ch2 = ctrans_tosearch[:, :, 1]
ch3 = ctrans_tosearch[:, :, 2]
# Define blocks and steps as above
nxblocks = (ch1.shape[1] // pix_per_cell)-1
nyblocks = (ch1.shape[0] // pix_per_cell)-1
nfeat_per_block = orient*cell_per_block**2
# 64 was the orginal sampling rate, with 8 cells and 8 pix per cell
window = pix_per_cell * 8
nblocks_per_window = (window // pix_per_cell)-1
nxsteps = (nxblocks - nblocks_per_window) // cells_per_step
nysteps = (nyblocks - nblocks_per_window) // cells_per_step
# Compute individual channel HOG features for the entire image
hog1 = get_hog_features(ch1, orient, pix_per_cell, cell_per_block, feature_vec=False)
hog2 = get_hog_features(ch2, orient, pix_per_cell, cell_per_block, feature_vec=False)
hog3 = get_hog_features(ch3, orient, pix_per_cell, cell_per_block, feature_vec=False)
bboxes = []
for xb in range(nxsteps):
for yb in range(nysteps):
ypos = yb*cells_per_step
xpos = xb*cells_per_step
# Extract HOG for this patch
hog_feat1 = hog1[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
hog_feat2 = hog2[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
hog_feat3 = hog3[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3))
xleft = xpos*pix_per_cell
ytop = ypos*pix_per_cell
# Extract the image patch
subimg = cv2.resize(ctrans_tosearch[ytop:ytop+window, xleft:xleft+window], (64, 64))
# Get color features
spatial_features = bin_spatial(subimg, size=spatial_size)
hist_features = color_hist(subimg, nbins=hist_bins)
# Scale features and make a prediction
test_features = np.hstack((spatial_features, hist_features, hog_features)).reshape(1, -1)
test_prediction = classifier.predict(test_features)
if test_prediction == 1 or grid is True:
xbox_left = np.int(xleft*scale)
ytop_draw = np.int(ytop*scale)
win_draw = np.int(window*scale)
bboxes.append(((xbox_left+xstart, ytop_draw+ystart),
(xbox_left+win_draw+xstart, ytop_draw+win_draw+ystart)))
return bboxes
def add_heat(heatmap, bbox_list):
# Iterate through list of bboxes
for box in bbox_list:
# Add += 1 for all pixels inside each bbox
# Assuming each "box" takes the form ((x1, y1), (x2, y2))
heatmap[box[0][1]:box[1][1], box[0][0]:box[1][0]] += 1
# Return updated heatmap
return heatmap
def apply_threshold(heatmap, threshold):
# Zero out pixels below the threshold
heatmap[heatmap <= threshold] = 0
# Return thresholded map
return heatmap
def nonoverlapping_bboxes(labels):
bboxes = []
# Iterate through all detected cars
for car_number in range(1, labels[1]+1):
# Find pixels with each car_number label value
nonzero = (labels[0] == car_number).nonzero()
# Identify x and y values of those pixels
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Define a bounding box based on min/max x and y
match_box = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy)))
# width = match_box[1][0] - match_box[0][0]
# height = match_box[1][1] - match_box[0][1]
# if match_box[1][1] > 620 and width > 64 and height > 64:
# bboxes.append(match_box)
# elif match_box[1][0] <= 570:
bboxes.append(match_box)
# Return the image
return bboxes
def draw_bboxes(img, bboxes, color=(0, 0, 255), thick=4):
# Iterate through all detected cars
for bbox in bboxes:
# Draw the box on the image
cv2.rectangle(img, bbox[0], bbox[1], color, thick)
# Return the image
return img
def extract_and_save_blocks(image, bboxes, output_folder):
counter = 0
for bbox in bboxes:
file = '/'.join([output_folder, "block{:05d}.jpg".format(counter)])
print(bbox)
block = image[bbox[0][1]:bbox[1][1], bbox[0][0]:bbox[1][0], :]
cv2.imwrite(file, cv2.cvtColor(cv2.resize(block, (64, 64)), cv2.COLOR_BGR2RGB))
counter += 1
def produce_heatmap(image, raw_bboxes, threshold=1):
heat = np.zeros_like(image[:, :, 0]).astype(np.float)
# Add heat to each box in box list
heat = add_heat(heat, raw_bboxes)
# Apply threshold to help remove false positives
heat = apply_threshold(heat, threshold)
# Visualize the heatmap when displaying
heatmap = np.clip(heat, 0, 255)
return heatmap
def fuse_bboxes(heatmap):
labels = label(heatmap)
fused_bboxes = nonoverlapping_bboxes(labels)
return fused_bboxes
| true |
df0e66af169048c12c549a6222eb61c0b994e11b | Python | alanbeech/PlantWateringSystem | /main.py | UTF-8 | 1,097 | 2.984375 | 3 | [] | no_license | import utime
from machine import I2C
from pico_i2c_lcd import I2cLcd
from machine import ADC
# display stuff
I2C_ADDR = 0x27
I2C_NUM_ROWS = 4
I2C_NUM_COLS = 20
i2c = I2C(0, sda=machine.Pin(0), scl=machine.Pin(1), freq=400000)
lcd = I2cLcd(i2c, I2C_ADDR, I2C_NUM_ROWS, I2C_NUM_COLS)
# temperature stuff
adc = ADC(machine.Pin(27))
sensor_temp = machine.ADC(4)
conversion_factor = 3.3 / (65535)
while True:
# read on board temperature sensor
reading = sensor_temp.read_u16() * conversion_factor
# adjust it to temperature value
temperature = 27 - (reading - 0.706) / 0.001721
lcd.clear()
lcd.move_to(0, 0)
lcd.putstr("temp: {}".format(temperature))
# read capacitive moisture sensor value
mreading = adc.read_u16()
# output reading
print(mreading)
# some better maths will be needed below to trigger based on change boundaries
if mreading > 30000:
lcd.move_to(0, 1)
lcd.putstr("moisture: Dry")
else:
lcd.move_to(0, 1)
lcd.putstr("moisture: Wet")
# wait 2 seconds and loop again
utime.sleep(2)
| true |
04d1ca48a5afa43e8ad6797d5d7e7d67d990c47e | Python | blue9057/config | /genhost.py | UTF-8 | 1,360 | 2.578125 | 3 | [] | no_license | #!/usr/bin/env python
import os
import socket
def readconf():
fd = open("hosts.cfg", "rb")
lines = fd.readlines()
lines = [line.split() for line in lines if len(lines) > 3 and (not '#' in line)]
d = {}
for line in lines:
d[line[0]] = line[1]
return d
def readhost():
fd = open("/etc/hosts", "r")
d = {}
lines = fd.readlines()
lines = [line.split() for line in lines]
lines = [line[1] for line in lines if len(line) > 1]
for line in lines:
d[line] = 0
return d
def main():
config = readconf();
host = readhost()
alias_ip_dict = {}
update = False
for key in config.keys():
if not key in host:
update = True
print("%s is not in host" % key)
if(update):
for key in config.keys():
domain = config[key]
print("Querying %s" % domain)
ip = socket.gethostbyname(domain)
alias_ip_dict[key] = ip
fdh = open("/etc/hosts", "rb")
fd = open("/tmp/hosts_append", "wb")
while True:
line = fdh.readline()
if (line == None) or (len(line) == 0) or ("MY_HOSTS" in line):
break
fd.write(line)
fd.write("### MY_HOSTS ###\n")
for key in alias_ip_dict.keys():
ip = alias_ip_dict[key]
fd.write("%016s\t%s\n" % (ip, key))
fd.close()
os.system("sudo cp /tmp/hosts_append /etc/hosts")
if __name__ == '__main__':
main()
| true |
b62026f3ad3f4beccf2945fdbb2b9b76d9c7554e | Python | moshthepitt/apr | /users/tests.py | UTF-8 | 1,841 | 2.578125 | 3 | [] | no_license | """Tests for users app"""
from django.test import TestCase, override_settings
from model_mommy import mommy
from django.conf import settings
from users.utils import get_client_id
@override_settings(APR_CLIENTID_PREFIX="D.")
class TestUtils(TestCase):
"""Test ultils"""
def test_get_client_id(self):
"""Test get_client_id"""
user = mommy.make('auth.User')
customer = mommy.make('customers.Customer', user=user)
mommy.make(
'users.Client',
first_name="A",
last_name="Keller",
client_id="{}K18".format(settings.APR_CLIENTID_PREFIX),
creator=user,
customer=customer)
client2 = mommy.make(
'users.Client',
first_name="A",
last_name="Kelly",
creator=user,
customer=customer)
client3 = mommy.make(
'users.Client',
first_name="A",
last_name="",
creator=user,
customer=customer)
client4 = mommy.make(
'users.Client',
first_name="A",
last_name="Z",
creator=user,
customer=customer)
self.assertEqual(
"D.K 19",
get_client_id(
client2,
prefix=settings.APR_CLIENTID_PREFIX,
separator=" ",
use_name=True))
self.assertEqual(
"D.A 1",
get_client_id(
client3,
prefix=settings.APR_CLIENTID_PREFIX,
separator=" ",
use_name=True))
self.assertEqual(
"D.Z 1",
get_client_id(
client4,
prefix=settings.APR_CLIENTID_PREFIX,
separator=" ",
use_name=True))
| true |
f2d637a1d1aeb0336b9f5b811f878cd5ea9c7241 | Python | rlatjdxo0717/Python-Practice | /data03/pack01/module02.py | UTF-8 | 245 | 3.734375 | 4 | [] | no_license | print("편한 따옴표를 쓰세요.")
# 주석 ctrl + /
# 변수에 대한 선언이 없다.
name = 10
print("이름은",name,"입니다")
print("이름은" + str(name) + "입니다")
age = 100 #변수 생성이 값이 들어갈때
print(age) | true |
900b725c00c1a66cd202c0ea63d54941dff39d8c | Python | philiphyx/Scraper | /Quote-Generator.py | UTF-8 | 599 | 2.625 | 3 | [] | no_license | # from twilio.rest import TwilioRestClient
import requests
# account_sid = " <Your sid> "
# auth_token = " <Your auth_token> "
#
# ourNumber = " <Your number> "
requestParams = {
"method": "getQuote",
"key": "457653",
"format": "json",
"lang": "en"
}
url = "http://api.forismatic.com/api/1.0/"
requestToApi = requests.post(url, params=requestParams) # Requests the qoute from the API
json = requestToApi.json() # This grabs the data from the response from API
print(json)
finishedQuote = json['quoteText'] + " -" + json['quoteAuthor'] # The finished quote!
print(finishedQuote) | true |
d3716e471855d98bdc7005d90161d58c5c846e33 | Python | narendra-ism/Python_tutorial_basic_ | /Pyhton tutorial /30Explicit_String_Conversion.py | UTF-8 | 173 | 3.359375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 8 17:53:50 2018
@author: narendra
"""
# Turn 3.14 into a string on line 3!
print("The value of pi is around " + str(3.14)) | true |
505620f10981f603b237208c61c53b6eaa566016 | Python | patildayananda/Raspberry-Pi | /buzzr PPT.py | UTF-8 | 1,137 | 3.015625 | 3 | [] | no_license | import os
import time
import Rpi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setup(22, GPIO.OUT)
Count = 0
def buzzer_loop ( ) :
# dot dot dot
GPIO.output(22, GPIO.HIGH)
time.sleep(.1)
GPIO.output(22, GPIO.LOW)
time.sleep(.1)
GPIO.output(22, GPIO.HIGH)
time.sleep(.1)
GPIO.output(22, GPIO.LOW)
time.sleep(.1)
GPIO.output(22, GPIO.HIGH)
time.sleep(.1)
GPIO.output(22, GPIO.LOW)
time.sleep(.1)
# dash dash dash
GPIO.output(22, GPIO.HIGH)
time.sleep(.2)
GPIO.output(22, GPIO.LOW)
time.sleep(.2)
GPIO.output(22, GPIO.HIGH)
time.sleep(.2)
GPIO.output(22, GPIO.LOW)
time.sleep(.2)
GPIO.output(22, GPIO.HIGH)
time.sleep(.1)
GPIO.output(22, GPIO.LOW)
time.sleep(.1)
os.system(‘clear’)
print “…..buzzer code……”
count = input(“how many times would you like to run loop? : ”)
while (count > 0) :
buzzer_loop()
count = count -1
time.sleep(1)
| true |
88447c3d5def339c0ebcb92592dcaa52360a76d0 | Python | kylepollina/fmri_natural_language_processing | /src/svm.py | UTF-8 | 1,763 | 2.921875 | 3 | [] | no_license | from sklearn.decomposition import PCA
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
class SVM:
def __init__(self,
training_samples,
training_labels,
test_samples,
test_labels,
pca_n_components,
cv_folds,
decision_function_shape):
self.X_train = training_samples
self.y_train = training_labels
self.X_test = test_samples
self.y_test = test_labels
self.pca = PCA(n_components=pca_n_components)
self.cv_folds = cv_folds
self.learning_function = SVC(gamma='scale', decision_function_shape=decision_function_shape)
self.istrained = False
self.isverbose = True
def print(self, string):
if(self.isverbose):
print(string)
def isverbose(self):
return self.isverbose
def istrained(self):
return self.istrained
def train(self):
self.print("Training svm...")
self.X_train_pca = self.pca.fit_transform(self.X_train)
self.X_test_pca = self.pca.fit_transform(self.X_test)
self.learning_function.fit(self.X_train_pca, self.y_train)
self.istrained = True
def test(self):
self.print("Testing svm...")
predictions = self.learning_function.predict(self.X_test_pca)
confusion_matrix = np.zeros((12, 12))
for i in range(len(predictions)):
predicted_label = predictions[i]
correct_label = self.y_test[i]
confusion_matrix[predicted_label][correct_label] += 1
self.confusion_matrix = confusion_matrix
self.predictions = predictions
self.accuracy_score = accuracy_score(predictions, self.y_test)
| true |
4ff28881b35bfbd2cec3a3adb1dbd849b7870e91 | Python | Aasthaengg/IBMdataset | /Python_codes/p03631/s132642454.py | UTF-8 | 119 | 3.53125 | 4 | [] | no_license | a = input()
b = ""
for i in range(len(a)):
b += a[len(a) - i - 1]
if a == b:
print("Yes")
else:
print("No") | true |
4ef6f42674b8c9ad829925d4377850c9bcd9efc2 | Python | marcosvnl/exerciciosPythom3 | /ex079.py | UTF-8 | 668 | 4.4375 | 4 | [] | no_license | # Crie um programa onde o usúario possa digitar vários valores numericos e cadastreos em uma lista. Caso o número
# já exista lá dentro ele não será adicionado. No final, serão exibidos todos os valores ínicos digitados em
# ordem crescente.
números = list()
while True:
num = int(input('Digite um número: '))
if num not in números:
números.append(num)
else:
print('Valor existente, não sera adicioinado')
resposta = ' '
while resposta not in 'SN':
resposta = str(input('Quer continuar a cadastra números [S/N]')).strip().upper()[0]
if resposta == 'N':
break
números.sort()
print(f'{números}')
| true |
7bd08e06ec2ef947c3676db3b6cc0382dc64453f | Python | nekonoyume/mquant_study | /book/할수있다퀀트투자/chapter_10_42.py | UTF-8 | 1,490 | 2.84375 | 3 | [] | no_license | from raw_data_access.Fnguide import Fnguide
from raw_data_access.Krx import Krx
def chapter_10_42_for_2019_2nd_quarter():
# pbr, gp/a
# gp/a = 매출총 이익/자산총계
# http://www.stockpedia.co.kr/report/view/2912
# pbr은 낮게, gp/a 는 높게
# 문제점 : 'IFRS 연결'제무가 아닌 종목 'CJ씨푸드', '매출총이익'이 없는 종목 '메리츠금융지주'
fnguide = Fnguide()
krx = Krx()
print('종목명,PBR,GP/A')
stock_item_list = krx.get_all_stock_item_list()
stock_item_list = stock_item_list[1200:]
for stock_item in stock_item_list:
df = None
df = fnguide.get_fnguide_dataframe(stock_item[0], 'IFRS(연결)')
if df is None:
continue
df = df.set_index('IFRS(연결)')
if 'Net Quarter' not in df.columns:
continue
df = df['Net Quarter']
if "2019/06" not in df.columns:
continue
series = df['2019/06']
pbr = series[('PBR',)]
asset = float(series[('자산총계',)])
df = fnguide.get_fnguide_financial_dataframe(stock_item[0], 'IFRS(연결)')
df = df.set_index('IFRS(연결)')
series = df['2019/06']
if '매출총이익' not in series.index:
continue
sales_profit = series['매출총이익']
gpa = round(sales_profit/asset, 2)
print(stock_item[1] + ',' + str(pbr) + ',' + str(gpa))
chapter_10_42_for_2019_2nd_quarter() | true |
970242c9a6c81a9f1617e8ad483e20715a55e345 | Python | KONASANI-0143/Dev | /requirements/venky_task/inheritance/multi level inheritance.py | UTF-8 | 297 | 2.71875 | 3 | [] | no_license | class a:
def m1(self):
print("this is m1 of a")
class b(a):
def m2(self):
print("this is a m2 of b")
class c(b):
def m3(self):
print("this is a m3 of c")
class d(c):
def m4(self):
print("this is a m4 of d")
r=d()
r.m1()
r.m2()
r.m3()
r.m4()
print()
p=d.__bases__
print(p)
| true |
38228149d0401c00f6d3bf9a7dc4c6985c3626df | Python | OldFuzzier/Data-Structures-and-Algorithms- | /HashTable/136_Single_Number.py | UTF-8 | 759 | 3.625 | 4 | [] | no_license | #
# coding=utf-8
# 136. Single Number
# Myway: hashset, search in hashset O(1), then Time Complexity: O(n)
class Solution(object):
def singleNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
s = set()
for num in nums:
if num in s: # Trickier: use hashset: search O(1)
s.remove(num)
continue
s.add(num)
return s.pop()
# PCway: 两次for, 一次用dict计数,一次进行search
# class Solution(object):
# def singleNumber(self, nums):
# dic = {}
# for num in nums:
# dic[num] = dic.get(num, 0)+1
# for key, val in dic.items():
# if val == 1:
# return key
| true |
b8d188d0831ea18bd050769a1febcff77d7a3202 | Python | abirmoy/Python-Everyday | /Word Frequency/word_frequency.py | UTF-8 | 1,375 | 3.625 | 4 | [] | no_license | # https://code.tutsplus.com/tutorials/counting-word-frequency-in-a-file-using-python--cms-25965
import re
import json
def listToString(s):
# Function to convert
# initialize an empty string
str1 = ""
# traverse in the string
for ele in s:
str1 += ele
# return string
return str1
frequency = {}
document_text = open('PG_English.txt', 'r', encoding='utf-8')
text_string = document_text.read().lower()
match_pattern = re.findall(r'\b[a-z]{3,15}\b', text_string)
for word in match_pattern:
count = frequency.get(word,0)
frequency[word] = count + 1
frequency_list = frequency.keys()
word_frequency =[]
word_list_alphabet =[] # STORING WORDS FOR SORTING
text_file = open("Output.txt", "w", encoding='utf8')
for words in frequency_list:
# ## for save as text
text_file.write(str(frequency[words])+" -")
text_file.write(str(words) + '\n')
# ## deffault tutorial
# word_frequency.append(words)
# word_frequency.append(frequency[words])
# # print(words, frequency[words])
# # SAVE AS LIST FOR SORTING
# a = str(words) + '-' + str(frequency[words]) + '\n'
# word_list_alphabet.append(a)
# # print(word_frequency)
# # print(word_list_alphabet.sort())
# print(word_list_alphabet)
# text_file.write(listToString(word_list_alphabet))
text_file.close() | true |
ce98a4667307803bbb78de9eee8098df903d5549 | Python | rachelli12/module4 | /coupon_calculations.py | UTF-8 | 1,966 | 3.953125 | 4 | [] | no_license | """
Program name: coupon_calculations.py
Author: Rachel Li
Last modified date: 06/09/2020
The purpose of this program is to write function that accepts
amount of purchase, cash coupon, percent coupon,
and it will calculate and return total order item.
"""
#calculate price after cash coupon and percent coupon
def calculate_price(price, cash_coupon, percent_coupon):
if cash_coupon == 5:
if percent_coupon == 10:
pre_shipping_cost = (price - cash_coupon)*(1-percent_coupon/100)
elif percent_coupon == 15:
pre_shipping_cost = (price - cash_coupon)*(1-percent_coupon/100)
elif percent_coupon == 20:
pre_shipping_cost = (price - cash_coupon)*(1-percent_coupon/100)
elif cash_coupon == 10:
if percent_coupon == 10:
pre_shipping_cost = (price - cash_coupon)*(1-percent_coupon/100)
elif percent_coupon == 15:
pre_shipping_cost = (price - cash_coupon)*(1-percent_coupon/100)
elif percent_coupon == 20:
pre_shipping_cost = (price - cash_coupon)*(1-percent_coupon/100)
return pre_shipping_cost
def calculate_shipping(pre_shipping_cost):
if pre_shipping_cost < 10.00:
shipping = 5.95
with_shipping = pre_shipping_cost + shipping
if 10.00 <= pre_shipping_cost < 30.00:
shipping = 7.95
with_shipping = pre_shipping_cost + shipping
if 30.00 <= pre_shipping_cost < 50.00:
with_shipping = pre_shipping_cost + shipping
if pre_shipping_cost >= 50.00:
shipping = 0.00
with_shipping = pre_shipping_cost + shipping
return with_shipping
price = float(input("Price of item:$ "))
cash_coupon = float(input("Cash Coupon 5 or 10:$ "))
percent_coupon = float(input("Percent discount 10, 15 or 20: "))
tax = 1.06
cost = calculate_shipping(calculate_price(price, cash_coupon, percent_coupon))*tax
print(f'The total price after tax is ${cost:.2f}')
if __name__ == '__main__':
pass
| true |
eb188d1d6be35002588867567da32956990a7f1d | Python | antonis357/ShadowCloak-BE | /shadowcloak/stylometry/api/services/attribution.py | UTF-8 | 5,037 | 3.328125 | 3 | [] | no_license | from stylometry.models import Author
from rest_framework.exceptions import APIException
import nltk
import matplotlib
import math
# John Burrows’ Delta Method
def find_author_with_burrows_delta(texts_by_author, anonymous_text):
word_tokens_by_author = {}
whole_corpus = []
# Create a list of word tokens for each author
for author in texts_by_author:
tokens = nltk.word_tokenize(texts_by_author[author])
# Filter out punctuation
word_tokens_by_author[author] = ([token for token in tokens
if any(c.isalpha() for c in token)])
# Convert papers to lowercase to count all tokens of the same word together
# regardless of case
word_tokens_by_author[author] = (
[token.lower() for token in word_tokens_by_author[author]])
whole_corpus += word_tokens_by_author[author]
# Get a frequency distribution
whole_corpus_freq_dist = list(nltk.FreqDist(whole_corpus).most_common(50))
# The main data structure that holds features of the whole corpus
features = [word for word,freq in whole_corpus_freq_dist]
feature_freqs = {}
for author in texts_by_author:
# Create a dictionary for each candidate's features
feature_freqs[author] = {}
# A helper value containing the number of tokens in the author's subcorpus
overall = len(word_tokens_by_author[author])
# Calculate each feature's presence in the subcorpus
for feature in features:
presence = word_tokens_by_author[author].count(feature)
feature_freqs[author][feature] = presence / overall
# The data structure into which we will be storing the "corpus standard" statistics
corpus_features = {}
# For each feature...
for feature in features:
# Create a sub-dictionary that will contain the feature's mean
# and standard deviation
corpus_features[feature] = {}
# Calculate the mean of the frequencies expressed in the subcorpora
feature_average = 0
for author in texts_by_author:
feature_average += feature_freqs[author][feature]
feature_average /= len(texts_by_author)
corpus_features[feature]["Mean"] = feature_average
# Calculate the standard deviation using the basic formula for a sample
feature_stdev = 0
for author in texts_by_author:
diff = feature_freqs[author][feature] - corpus_features[feature]["Mean"]
feature_stdev += diff*diff
feature_stdev /= (len(texts_by_author) - 1)
feature_stdev = math.sqrt(feature_stdev)
corpus_features[feature]["StdDev"] = feature_stdev
feature_zscores = {}
for author in texts_by_author:
feature_zscores[author] = {}
for feature in features:
# Z-score definition = (value - mean) / stddev
# We use intermediate variables to make the code easier to read
feature_val = feature_freqs[author][feature]
feature_mean = corpus_features[feature]["Mean"]
feature_stdev = corpus_features[feature]["StdDev"]
feature_zscores[author][feature] = ((feature_val-feature_mean) /
feature_stdev)
# Tokenize the test case
tokens_of_anonymous_text = nltk.word_tokenize(anonymous_text)
# Filter out punctuation and lowercase the tokens
tokens_of_anonymous_text = [token.lower() for token in tokens_of_anonymous_text
if any(c.isalpha() for c in token)]
# Calculate the test case's features
overall = len(tokens_of_anonymous_text)
testcase_freqs = {}
for feature in features:
presence = tokens_of_anonymous_text.count(feature)
testcase_freqs[feature] = presence / overall
# Calculate the test case's feature z-scores
testcase_zscores = {}
for feature in features:
feature_val = testcase_freqs[feature]
feature_mean = corpus_features[feature]["Mean"]
feature_stdev = corpus_features[feature]["StdDev"]
testcase_zscores[feature] = (feature_val - feature_mean) / feature_stdev
# print("Anonymous text's z-score for feature", feature, "is", testcase_zscores[feature])
# Calculate Delta score between each author and unknown text z-scores
delta_score_by_author = {}
for author in texts_by_author:
delta = 0
for feature in features:
delta += math.fabs((testcase_zscores[feature] -
feature_zscores[author][feature]))
delta /= len(features)
author = Author.objects.filter(pk=author).values().first()
delta_score_by_author[author.get("name")] = delta
print( "Delta score for candidate", author.get("name"), "is", delta )
# Find author name with the lowest Delta score
probable_author = min(delta_score_by_author, key=delta_score_by_author.get)
return probable_author | true |
15b8fe5315e42d730ba379078d8811c58ba5f1f0 | Python | Vrokm/LeetCode-record | /695/695.py | UTF-8 | 1,038 | 3.15625 | 3 | [] | no_license | class Solution:
def maxAreaOfIsland(self, grid: List[List[int]]) -> int:
m, n = len(grid), len(grid[0])
max_area = 0
self.area = 1
def dfs(x, y, flag, grid):
dx = [-1, 1, 0, 0]
dy = [0, 0, 1, -1]
for i in range(4):
newx = x+dx[i]
newy = y+dy[i]
if newx>=0 and newx<m and newy>=0 and newy<n and flag[newx][newy]==0 and grid[newx][newy]==1:
flag[newx][newy] = 1
self.area += 1
dfs(newx, newy, flag, grid)
flag = copy.deepcopy(grid)
for i in range(m):
for j in range(n):
flag[i][j] = 0
for i in range(m):
for j in range(n):
if flag[i][j]==0 and grid[i][j]==1:
flag[i][j] = 1
self.area = 1
dfs(i, j, flag, grid)
max_area = max(max_area, self.area)
return max_area | true |
559c7421abb54ebe43e81611818303161fab1836 | Python | lento234/masterthesis | /figures/style/flipbook/flipbook.py | UTF-8 | 1,242 | 2.65625 | 3 | [] | no_license |
"""
Latex code in the begin of the \mainmatter
\rfoot[]{\setlength{\unitlength}{1mm}
\begin{picture}(0,0)
\put(-10,-5){\includegraphics[scale=0.1]{./figures/style/flipbook/pic\thepage.png}}
\end{picture}}
"""
import os
import glob
import re
numbers = re.compile(r'(\d+)')
def numericalSort(value):
parts = numbers.split(value)
parts[1::2] = map(int, parts[1::2])
return parts
part1 = sorted(glob.glob('./raw_part1/*.png'), key=numericalSort)
part2 = sorted(glob.glob('./raw_part2/*.png'), key=numericalSort)
# Constants
startNum = 25
startNum2 = len(part1[startNum::])
convertString = 'convert %s -colorspace Gray -density 10 -flop animation%g.png'
#convertString = 'convert %s -colorspace Gray -density 10 -flop -contrast-stretch -75x0 animation%g.png'
#os.system('rm *.png')
# Convert
for i,fileName in enumerate(part1[startNum::]):
os.system(convertString % (fileName, i))
print i
for i,fileName in enumerate(part2):
os.system(convertString% (fileName, startNum2 + i))
print startNum2 + i
# Reorder and rename
convertedFiles = sorted(glob.glob('*.png'), key=numericalSort)
for i,fileName in enumerate(convertedFiles[::-1]):
os.system('mv %s pic%g.png' % (fileName, i+1))
print i
| true |
c051b66a8f4e6d94be3ce838ba48ea18c23e295e | Python | lj1918/SmartTrader | /SmartTraderFrameWork/trader/stConstant.py | UTF-8 | 960 | 2.546875 | 3 | [] | no_license | from SmartTraderFrameWork.trader.language import constant
# 将常量定义添加到vtConstant.py的局部字典中
# 有趣的机制,
d = locals()
for name in dir(constant):
if '__' not in name:
d[name] = constant.__getattribute__(name)
d = {}
def test_en():
from SmartTraderFrameWork.trader.language.english import constant
# 将常量定义添加到stConstant.py的局部字典中
d = locals()
for name in dir(constant):
if '__' not in name:
d[name] = constant.__getattribute__(name)
print(constant.__getattribute__(name))
def test_cn():
from SmartTraderFrameWork.trader.language.chinese import constant
# 将常量定义添加到stConstant.py的局部字典中
d = locals()
for name in dir(constant):
if '__' not in name:
d[name] = constant.__getattribute__(name)
print(constant.__getattribute__(name))
if __name__ == '__main__':
test_en() | true |
21c49bc9064f2cb7899e2fdab8e25dd56fecc06f | Python | johnelutz/toga | /src/core/toga/widgets/base.py | UTF-8 | 10,200 | 2.84375 | 3 | [
"BSD-3-Clause"
] | permissive | from builtins import id as identifier
from colosseum import CSS
from toga.platform import get_platform_factory
class Point:
""" The :obj:`Point class hold the x and y coordinates of a point.
Args:
top (int or float): The y coordinate of the point.
left (int or float): The x coordinate of the point.
"""
def __init__(self, top, left):
self.top = top
self.left = left
def __repr__(self):
return '<Point (%s,%s)>' % (self.left, self.top)
class Layout:
""" The :obj:`Layout` is the mathematical representation of a box.
It has attributes like width, height, top, left to describe its bounding box.
With the :obj:`dirty` flag it can track whether or not its as well as the
layout of all children needs to be reevaluated.
Args:
node (:obj:`toga.Widget`): The widget that the layout should be attached to.
width (int): The width of the box.
height (int): The height of the box.
top (int): The y coordinate of the top side of the box.
left (int): The x coordinate of the left side of the box.
"""
def __init__(self, node, width=None, height=None, top=0, left=0):
self.node = node
self.width = width
self.height = height
self.top = top
self.left = left
self._dirty = True
def __repr__(self):
if self.node:
return '<Layout%s (%sx%s @ %s,%s)>' % (
{
True: ' (dirty)',
False: '',
None: ' (evaluating)'
}[self._dirty],
self.width, self.height,
self.absolute.left, self.absolute.top
)
else:
return '<Layout%s (%sx%s @ %s,%s)>' % (
{
True: ' (dirty)',
False: '',
None: ' (evaluating)'
}[self._dirty],
self.width, self.height,
self.left, self.top
)
def __eq__(self, value):
return all([
self.width == value.width,
self.height == value.height,
self.top == value.top,
self.left == value.left
])
def reset(self):
self.width = None
self.height = None
self.top = 0
self.left = 0
######################################################################
# Layout dirtiness tracking.
#
# If dirty == True, the layout is known to be invalid.
# If dirty == False, the layout is known to be good.
# If dirty is None, the layout is currently being re-evaluated.
######################################################################
@property
def dirty(self):
return self._dirty
@dirty.setter
def dirty(self, value):
self._dirty = value
for child in self.node.children:
child.layout.dirty = value
######################################################################
# Implied geometry properties
######################################################################
@property
def right(self):
return self.left + self.width
@property
def bottom(self):
return self.top + self.height
@property
def absolute(self):
if self.node.parent:
parent_layout = self.node.parent.layout
return Point(
top=parent_layout.origin.top + parent_layout.top + self.top,
left=parent_layout.origin.left + parent_layout.left + self.left,
)
else:
return Point(top=self.top, left=self.left)
@property
def origin(self):
if self.node.parent:
parent_layout = self.node.parent.layout
return Point(
top=parent_layout.origin.top + parent_layout.top,
left=parent_layout.origin.left + parent_layout.left,
)
else:
return Point(top=0, left=0)
class Widget:
""" This is the base widget implementation that all widgets in Toga
derive from.
It defines the interface for core functionality for children, styling,
layout and ownership by specific App and Window.
Apart from the above, this is an abstract implementation which must
be made concrete by some platform-specific code for the _apply_layout
method.
Args:
id (str): An identifier for this widget.
style (:obj:`colosseum.CSSNode`): An optional style object.
If no style is provided then a new one will be created for the widget.
factory (:obj:`module`): A python module that is capable to return a
implementation of this class with the same name (optional & normally not needed).
"""
def __init__(self, id=None, style=None, factory=None):
self._id = id if id else identifier(self)
self._parent = None
self._children = None
self._window = None
self._app = None
self._impl = None
self._layout_in_progress = False
self.layout = Layout(self)
if style:
self.style = style.copy()
else:
self.style = CSS()
self._font = None
self.factory = get_platform_factory(factory)
def __repr__(self):
return "<%s:%s>" % (self.__class__.__name__, id(self))
@property
def id(self):
""" The node identifier. This id can be used to target CSS directives
Returns:
The widgets identifier as a ``str``.
"""
return self._id
@property
def parent(self):
""" The parent of this node.
Returns:
The parent :class:`toga.Widget`.
"""
return self._parent
@property
def children(self):
""" The children of this node.
This *always* returns a list, even if the node is a leaf
and cannot have children.
Returns:
A list of the children for this widget.
"""
if self._children is None:
return []
else:
return self._children
def add(self, child):
""" Add a widget as a child of this one.
Args:
child (:class:`toga.Widget`): A widget to add as a child to this widget.
Raises:
ValueError: If this widget is a leaf, and cannot have children.
"""
if self._children is None:
raise ValueError('Widget cannot have children')
self._children.append(child)
child.app = self.app
child._parent = self
if self.parent:
self.parent.layout.dirty = True
if self._impl:
self._impl.add_child(child._impl)
@property
def app(self):
""" The App to which this widget belongs.
On setting the app we also iterate over all children of this widget and set them to the same app.
Returns:
The :class:`toga.App` to which this widget belongs.
Raises:
ValueError: If the widget is already associated with another app.
"""
return self._app
@app.setter
def app(self, app):
if self._app is not None:
if self._app != app:
raise ValueError("Widget %s is already associated with an App" % self)
elif app is not None:
self._app = app
self._impl.set_app(app)
if self._children is not None:
for child in self._children:
child.app = app
@property
def window(self):
""" The Window to which this widget belongs.
On setting the window, we automatically update all children of this widget to belong to the same window.
Returns:
The :class:`toga.Window` to which the widget belongs.
"""
return self._window
@window.setter
def window(self, window):
self._window = window
if self._impl:
self._impl.set_window(window)
if self._children is not None:
for child in self._children:
child.window = window
@property
def style(self):
""" The style object for this widget.
Returns:
The style object :class:`colosseum.CSSNode` of the widget.
"""
return self._style
@style.setter
def style(self, value):
self._style = value.bind(self)
@property
def font(self):
""" Font the widget.
Returns:
The :class:`toga.Font` of the widget.
"""
return self._font
@font.setter
def font(self, font):
self._font = font
self._impl.set_font(font)
@property
def enabled(self):
return self._impl.enabled
@enabled.setter
def enabled(self, value):
self._impl.enabled = value
def rehint(self):
self._impl.rehint()
def _update_layout(self, **style):
"""Force a layout update on the widget.
The update request can be accompanied by additional style information
(probably min_width, min_height, width or height) to control the
layout.
"""
if self._layout_in_progress:
return
self._layout_in_progress = True
if style:
self.style.set(**style)
# Recompute layout for this widget
self.style.apply()
# Update the layout parameters for all children.
# This will also perform a leaf-first update of
# the constraint on each widget.
self._update_child_layout()
# Set the constraints the widget to adhere to the new style.
self._impl.apply_layout()
self._impl.apply_sub_layout()
self._layout_in_progress = False
def _update_child_layout(self):
if self._children is not None:
for child in self.children:
child._update_layout()
# # FIXME some wigets need their _update_child_layout() function get called.
# try:
# child._impl._update_child_layout()
# except:
# pass
| true |
6ee6377707c77f0ab147299f8de7252c4b59d52a | Python | command-z-z/OpenCV | /OpenCV/contours.py | UTF-8 | 1,691 | 2.796875 | 3 | [] | no_license | import cv2
import numpy as np
import matplotlib.pyplot as plt
def find_contours(img_input,blur,threshold):
# gray
gray = cv2.cvtColor(img_input,cv2.COLOR_BGR2GRAY)
# guass
img_blur = cv2.GaussianBlur(gray,(blur,blur),0)
# threshold
img = cv2.threshold(img_blur,threshold,255,cv2.THRESH_OTSU)[1]
# open operation
kernel = np.ones((2,2),np.uint8)
img_erode = cv2.erode(img,kernel,iterations=1)
img = cv2.dilate(img_erode,kernel,iterations=1)
# invert
img = ~img
# findcontours
# parameter cv2.CHAIN_APPROX_SIMPLE
contours,nada = cv2.findContours(img.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
# make coutour frame
frame = img_input.copy()
print(len(contours))
for c in contours:
if cv2.contourArea(c) < 500:
continue
M = cv2.moments(c)
# print( M )
cx = int(M['m10'] / M['m00'])
cy = int(M['m01'] / M['m00'])
x, y, w, h = cv2.boundingRect(c)
rx = x + int(w / 2)
ry = y + int(h / 2)
# plot contours
cv2.drawContours(frame, [c], 0, (0, 0, 255), 2)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.circle(frame,(cx,cy),5,(0, 255, 0), 2)
print("top_left:",(x,y))
print("top_right:",(x+w,y))
print("bottom_left:", (x, y+h))
print("bottom_right:", (x + w, y+h))
# cv2.circle(frame, (cx, cy), 2, (0, 0, 255), 2)
# cv2.circle(frame, (rx, ry), 2, (0, 255, 0), 2)
cv2.imshow('result1',img)
cv2.imshow('result',frame)
cv2.waitKey(0)
cv2.destroyAllWindows()
img = cv2.imread('screw3.png')
find_contours(img,5,127)
| true |
7a9cc6430c26ece298991c8d31c4495303776594 | Python | USF-Computer-Networking/lanchat-shyamsn97 | /Test2server.py | UTF-8 | 441 | 2.796875 | 3 | [] | no_license | #Used to test ChatClient
import socket
udp_port = 6000
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
s.bind(('0.0.0.0',0))
while True:
print('listening on port:', s.getsockname())
r = s.recvfrom(1000)
print("Recieved : %s"%(r[0].decode('utf-8')))
print(r[1])
reply = "hey its me!"
client_address = r[1]
s.sendto(reply.encode('utf-8'), client_address)
print("sent") | true |
c40db093043e042423364ba6f4f8d881a864dec4 | Python | mattjp/leetcode | /practice/medium/0763-Partition_Labels.py | UTF-8 | 1,194 | 3.25 | 3 | [] | no_license | class Solution:
def partitionLabels(self, S: str) -> List[int]:
# ababcbacadefegdehijhklij
# a: |-------| [0,8]
# b: |---| [1,5]
# c: |--| [4,7]
# d: |----| [9,14]
# e: |----| [10,15]
# f: | [11,11]
# g: | [13,13]
# h: |--| [16,19]
# i: |----| [17,22]
# j: |----| [18,23]
# k: | [20,20]
# l: | [21,21]
seen = set()
segments = []
S_reversed = S[::-1]
for i,ch in enumerate(S):
if ch not in seen:
j = len(S) - 1 - S_reversed.index(ch) # find last index of the given char
segments.append([i,j])
seen.add(ch)
output = [segments.pop(0)]
while segments:
top = segments.pop(0)
if top[0] > output[-1][1]:
output.append(top) # no overlap, therefore end of the current segment
else:
output[-1][1] = max(output[-1][1], top[1])
return list(map(lambda x: x[1]-x[0]+1, output)) # return the lengths of the segments
| true |
a32dec8077ccc80edc669b7d4d815f772aa58ca2 | Python | wanzhs/crawlDistrict | /district.py | UTF-8 | 4,334 | 2.875 | 3 | [] | no_license | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import time
from Queue import Queue
from threading import Thread
import pandas as pd
import requests
from lxml import etree
reload(sys)
sys.setdefaultencoding('utf-8')
def getUrl(url, num_retries=20):
headers = {
'User-Agent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36"}
try:
response = requests.get(url, headers=headers)
response.encoding = 'GBK'
data = response.text
return data
except BaseException as e:
if num_retries > 0:
time.sleep(10)
print(url)
print("request fail,retry!")
return getUrl(url, num_retries - 1)
else:
print("retry fail!")
print("errors:%s" % e + "" + url)
def getProvice(url):
provice = []
data = getUrl(url)
selector = etree.HTML(data)
proviceList = selector.xpath('//tr[@class="provincetr"]')
for i in proviceList:
proviceName = i.xpath('td/a/text()')
proviceLink = i.xpath('td/a/@href')
for j in range(len(proviceLink)):
parentCode = 0
provinceCode = proviceLink[j][0:-5]
proviceURL = url[:-10] + proviceLink[j]
provice.append({'name': proviceName[j], 'code': complementDistrictCode(provinceCode), 'link': proviceURL,
'level': 0,
'parentCode': parentCode})
return provice
def getCity(url_list):
city_all = []
for url in url_list:
data = getUrl(url)
selector = etree.HTML(data)
cityList = selector.xpath('//tr[@class="citytr"]')
city = []
for i in cityList:
cityCode = i.xpath('td[1]/a/text()')
cityLink = i.xpath('td[1]/a/@href')
cityName = i.xpath('td[2]/a/text()')
for j in range(len(cityLink)):
parentCode = url[-7:-5]
cityURL = url[:-7] + cityLink[j]
city.append(
{'name': cityName[j], 'code': complementDistrictCode(cityCode[j]), 'link': cityURL, 'level': 1,
'parentCode': complementDistrictCode(parentCode)})
city_all.extend(city)
return city_all
def getCounty(url_list):
queue_county = Queue()
thread_num = 5
county = []
def produce_url(url_list):
for url in url_list:
queue_county.put(url)
def getData():
while not queue_county.empty():
url = queue_county.get()
data = getUrl(url=url)
selector = etree.HTML(data)
countyList = selector.xpath('//tr[@class="countytr"]')
for i in countyList:
countryCode = i.xpath('td[1]/a/text()')
countyLink = i.xpath('td[1]/a/@href')
countyName = i.xpath('td[2]/a/text()')
for j in range(len(countyLink)):
parentCode = url[-9:-5]
countyURL = url[:-9] + countyLink[j]
county.append(
{'code': complementDistrictCode(countryCode[j]), 'link': countyURL, 'name': countyName[j],
'level': 2,
'parentCode': complementDistrictCode(parentCode)})
def run(url_list):
produce_url(url_list)
ths = []
for _ in range(thread_num):
th = Thread(target=getData)
th.start()
ths.append(th)
for th in ths:
th.join()
run(url_list)
return county
def complementDistrictCode(code):
return str(code).ljust(9, '0')
if __name__ == '__main__':
# 获取省份
pro = getProvice("http://www.stats.gov.cn/tjsj/tjbz/tjyqhdmhcxhfdm/2019/index.html")
df_province = pd.DataFrame(pro)
df_province.info()
df_province.to_csv('province.csv', sep=',', header=True, index=False)
# 获取市级
city = getCity(df_province['link'])
df_city = pd.DataFrame(city)
df_city.info()
df_city.to_csv('city.csv', sep=',', header=True, index=False)
# 获取县级
county = getCounty(df_city['link'])
df_county = pd.DataFrame(county)
df_county.info()
df_county.to_csv('county.csv', sep=',', header=True, index=False) | true |
9424bcf6daeae9330bfcdc313dc571ace2f269c4 | Python | foss2cyber/Incident-Playbook | /Incident-Response/Tools/beagle/tests/backend/test_graphistry.py | UTF-8 | 571 | 2.625 | 3 | [
"MIT"
] | permissive | import mock
from beagle.backends.graphistry import Graphistry
class MockGraphistry(Graphistry):
def __init__(self):
pass
def test_anonymize_graph():
graphistry = MockGraphistry()
graphistry.to_json = mock.MagicMock()
graphistry.to_json.return_value = {
"nodes": [{"id": 1, "properties": 4}, {"id": 2, "properties": 3}],
"links": [{"source": 2, "target": 1, "data": "foo"}],
}
G = graphistry.anonymize_graph()
for node in G.nodes(data=True):
# tuple of id, data
assert "properties" not in node[1]
| true |
b9c54ff1fab12950bc2eb8c30cadc71432675a5b | Python | CoreyDeJong/data-structures-and-algorithms-python | /data_structures/hashtable/test_hashtable.py | UTF-8 | 1,575 | 3.375 | 3 | [] | no_license | import pytest
from hashtable import *
from linked_list import *
def test_add():
hashmap = Hashmap(10)
hashmap.add('Corey', 37)
hashmap.add('Kristin', 33)
hashmap.add('Porter', 14)
actual = hashmap.get('Corey')
expected = [['Corey', 37]]
assert actual == expected
def test_contains_true():
hashmap = Hashmap(10)
hashmap.add('Corey', 37)
hashmap.add('Kristin', 33)
hashmap.add('Porter', 14)
actual = hashmap.contains('Corey')
expected = True
assert actual == expected
def test_contains_false():
hashmap = Hashmap(10)
hashmap.add('Corey', 37)
hashmap.add('Kristin', 33)
hashmap.add('Porter', 14)
actual = hashmap.contains('Bob')
expected = False
assert actual == expected
# hashmap = Hashmap(1024)
# hashmap.add('corey','37')
# .get will return the value of the key
# hashmap.get('corey')
# this will return '37'
##### Left Join #######
def test_demo1():
hashmap1 = {
'fond' : 'enamored',
'wrath' : 'anger',
'diligent' : 'employed',
'outfil' : 'garb',
'guide' : 'usher',
}
hashmap2 = {
'fond' : 'averse',
'wrath' : 'delight',
'diligent' : 'idle',
'guide' : 'follow',
'flow' : 'jam',
}
actual = left_join_hashtable(hashmap1, hashmap2)
expected = [
['fond', 'enamored', 'averse'],
['wrath', 'anger', 'delight'],
['diligent', 'employed', 'idle'],
['outfil', 'garb', None],
['guide', 'usher', 'follow']
]
assert actual == expected
| true |
c737c4bd0df20c8a1dcfc7c76f21949bca1eecdc | Python | AkiraRafhaelJP/GraphCollection | /TugasSatuMetnum.py | UTF-8 | 606 | 3.578125 | 4 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
x = np.linspace(-10, 10, 100)
#f(x)= e^x + 5x^2
y1 = np.exp(x) + 5 * x ** 2
plt.plot(x, y1, 'b-', label='f(x)=e^x + 5x^2')
#f(x)= x^3 + 2x^2+ 10x - 20
y2 = x ** 3 + 2 * x ** 2 + 10 * x - 20
plt.plot(x, y2, 'g--', label='f(x)= x^3 + 2x^2+ 10x - 20')
#f(x)= xe^-x + 1
y3 = x * np.exp(-x) + 1
plt.plot(x, y3, 'm:', label='f(x)= xe^-x + 1')
#additional feature
plt.legend()
plt.grid(True, linestyle='-')
plt.xlim([-10, 10])
plt.ylim([-10, 10])
plt.title('Grafik Tugas 1 MetNum')
plt.xlabel('x')
plt.ylabel('f(x)')
plt.show()
| true |
5a6bc8dd75d6376fb0566d1806d4f5e9fc3e8103 | Python | shantanusohni/python-drinking-app | /main.py | UTF-8 | 396 | 2.53125 | 3 | [] | no_license | import time
from plyer import notification
if __name__ == "__main__":
while True:
notification.notify(
title = "pls drink water now !!",
message = "8 litter requied",
app_icon ="C:\\Users\\shree\\Desktop\\drinkwater_app\\icon.ico",
timeout=10
)
time.sleep(60*60) #it will remain you in every hour as 3600 minutes
| true |
2d6cc5724448b751150670b49f0a8c446821b3b1 | Python | Ze1598/medium-articles | /Find your favorite artists in Spotify playlists with Python/analyze_spotify_playlist_data.py | UTF-8 | 1,366 | 3.65625 | 4 | [] | no_license | import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
# Load the data and sort it by artist frequency in descending order
def pre_process_data(file_name: str) -> pd.DataFrame:
dataset = pd.read_csv(file_name)
# Sort the dataset by "Frequency", in descending order, and reset the indices
dataset = dataset.sort_values(
"Frequency",
ascending=False
).reset_index(drop=True)
return dataset
# Plot a column chart for the 10 most frequent artists
def plot_column_chart(data: pd.DataFrame) -> None:
# Plot the column chart (Artists vs Frequency)
fig = px.bar(
data.head(n=10), x="Artist", y="Frequency",
text="Frequency",
title=f"Top 10 Artists (out of {data.shape[0]} artists)"
)
# Add the data labels inside the columns
fig.update_traces(
textposition="inside"
)
# Adjust the font and center the title
fig.update_layout(
xaxis_title="",
yaxis_title="",
uniformtext_minsize=14,
uniformtext_mode="hide",
title_x=0.5
)
# Display the finalized plot
fig.show()
if __name__ == "__main__":
# CSV with the data
file_name = "artists_frequencies.csv"
# Load the data
dataset = pre_process_data(file_name)
# Plot the data in a column chart
plot_column_chart(dataset)
| true |
907cb99dbb2f739dc701815715d285fd92d7fad0 | Python | fouriaux/hpcbench | /hpcbench/campaign.py | UTF-8 | 3,323 | 2.59375 | 3 | [
"BSD-3-Clause",
"MIT"
] | permissive | """HPCBench campaign helper functions
"""
import re
from . toolbox.collections_ext import (
Configuration,
nameddict,
)
def from_file(campaign_file):
"""Load campaign from YAML file
:param campaign_file: path to YAML file
:return: memory representation of the YAML file
:rtype: dictionary
"""
campaign = Configuration.from_file(campaign_file)
return fill_default_campaign_values(campaign)
def fill_default_campaign_values(campaign):
"""Fill an existing campaign with default
values for optional keys
:param campaign: dictionary
:return: object provided in parameter
:rtype: dictionary
"""
default_campaign = dict(
output_dir="hpcbench-%Y%m%d-%H:%M:%S"
)
for key, value in default_campaign.items():
campaign.setdefault(key, value)
campaign.setdefault('network', {})
campaign['network'].setdefault('nodes', ['localhost'])
campaign.network.setdefault('tags', {})
campaign.benchmarks.setdefault('*', {})
for tag in list(campaign.network.tags):
config = campaign.network.tags[tag]
if isinstance(config, dict):
config = [config]
campaign.network.tags[tag] = config
for pattern in config:
for mode in list(pattern):
if mode == 'match':
pattern[mode] = re.compile(pattern[mode])
elif mode == 'nodes':
if not isinstance(pattern[mode], list):
raise Exception('Invalid "nodes" value type.'
' list expected')
pattern[mode] = set(pattern[mode])
else:
raise Exception('Unknown tag association pattern: %s',
mode)
set_export_campaign_section(campaign)
return campaign
def set_export_campaign_section(campaign):
"""Add default values for the ``export`` section
"""
campaign.setdefault('export', nameddict())
campaign.export.setdefault('elasticsearch', nameddict())
campaign.export.elasticsearch.setdefault('host', 'localhost')
campaign.export.elasticsearch.setdefault('connection_params', {})
campaign.export.elasticsearch.setdefault('index_name',
'hpcbench-{date}')
return campaign
def get_benchmark_types(campaign):
"""Get of benchmarks referenced in the configuration
:return: benchmarks
:rtype: string generator
"""
for benchmarks in campaign.benchmarks.values():
for benchmark in benchmarks.values():
yield benchmark.type
def get_metrics(campaign):
"""Get all metrics of a campaign
:return: metrics
:rtype: dictionary generator
"""
for hostname, host_driver in campaign.traverse():
for tag, tag_driver in host_driver.traverse():
for suite, bench_obj in tag_driver.traverse():
for category, cat_obj in bench_obj.traverse():
yield (
dict(
hostname=hostname,
tag=tag,
category=category,
suite=suite,
),
cat_obj.metrics
)
| true |
0421b69192e93ac6de917031311d47e2d5d96e60 | Python | SilviaAlarcon/programacionPython | /tiposEstructuradosYArchivos/ejercicio6.py | UTF-8 | 1,807 | 4.15625 | 4 | [] | no_license | '''Función con un argumento para crear un diccionario que defina como clave
el nip de una persona y como valor el nombre de pila de esta. La función
retorna dicho diccionario con dichos datos.'''
def usuario(datos_usuarios):
numero_usuarios = int(input('Número de usuarios que quieres agregar: '))
for _ in range(numero_usuarios):
nip = int(input('Ingrese un NIP: '))
nombre = str(input('Ingrese el nombre: '))
datos_usuarios[nip] = nombre
return datos_usuarios
'''Función con un argumento para imprimir los datos del diccionario completo
(nip, nombre)'''
def imprimir_datos(datos_usuarios):
print(f'Datos NIP/Personas: \n{datos_usuarios}')
'''Función con dos argumentos para buscar o consultar el nombre de una persona,
introduciendo su nip, si este no está registrado nos manda un mensaje de “Error,
no existe el nip”'''
def buscar_consultar(nip_usuario, datos_usuarios):
print(datos_usuarios.get(nip_usuario, 'Error, no existe el NIP'))
'''En la función principal sin argumentos (main()), crea del diccionario vacío
y empieza a cargar los datos de 4 personas, los imprime, busca a la persona,
mandando llamar a las funciones anteriores como sean requeridas.'''
def main():
datos_usuarios = {}
usuario(datos_usuarios)
imprimir = int(input('Para imprimir los datos presiona 1, para salir presiona 2: \n'))
if imprimir == 1:
imprimir_datos(datos_usuarios)
else:
print('Datos guardados')
opcion_buscar = int(input('Para buscar a una persona presiona 1, para salir presiona 2: \n'))
if opcion_buscar == 1:
nip_usuario = int(input('Dame el NIP: '))
buscar_consultar(nip_usuario, datos_usuarios)
else:
print('Fin del programa')
if __name__ == '__main__':
main() | true |
6e081c41df487a6114d172ea8396d4057b1646a6 | Python | mohammaddanish85/Python-Coding | /List Data type.py | UTF-8 | 638 | 4.78125 | 5 | [] | no_license | # This program demonstrate the concept of list in python.
# Lsi is similar to array in C/Java. List represent the group of elements.
# Difference between list and array is that List can store different types of elements while array can store similar type of elements.
list=[2, 'Danish', 23.5, 'Ariba'] # Create a list having different elements.
print(list) # Print the entire list.
print(list[2]) # Print the 2nd element in list
print(list[1:3]) # Print the elements from 1st to 3rd position in list. | true |
0aedf57203f6c50aaedd122ac42ec2680702aed8 | Python | AjinkyaTaranekar/AlgorithmX | /Codeforces/677/A.py | UTF-8 | 160 | 3.09375 | 3 | [] | no_license | n,h = map(int,input().split())
a = list(map(int,input().split()))
res = 0
for i in range(n):
if a[i] > h:
res+=1
res+=1
print(res) | true |
a30e5773dc3585e668e349d0f37b9842da184fa8 | Python | mhmadwrekat/data-structures-and-algorithms-2 | /python/code_challenges/stack-and-queue/stack-and-queue.py | UTF-8 | 1,530 | 3.828125 | 4 | [
"MIT"
] | permissive | class Node:
def __init__(self , data=None):
self.data = data
self.next = None
class Stack:
def __init__(self,node=None):
self.top = node
def push(self, value):
node = Node(value)
node.next = self.top
self.top = node
def pop(self):
if self.top is None:
return None
value = self.top.data
self.top = self.top.next
return value
def peek(self):
if not self.is_empty():
return self.top.data
def is_empty(self):
return not self.top
class Queue:
def __init__(self):
self.front = None
self.rear = None
def enqueue(self, value):
node = Node(value)
if self.rear is None:
self.front = node
self.rear = node
else:
self.rear.next = node
self.rear = node
def dequeue(self):
if self.front is None:
return None
value = self.front.data
self.front = self.front.next
return value
def is_empty(self):
return not self.front
def peek(self):
if not self.is_empty():
return self.front.data
if __name__ == "__main__":
print('test stack')
stack = Stack()
print(stack.is_empty())
stack.push(1)
stack.push(2)
stack.push(3)
print(stack.peek())
print(stack.pop())
print(stack.peek())
print(stack.pop())
print(stack.is_empty())
print('testing queue')
q = Queue()
print(q.is_empty())
q.enqueue(4)
q.enqueue(5)
q.enqueue(6)
print(q.peek())
print(q.dequeue())
print(q.peek())
print(q.dequeue())
print(q.is_empty())
| true |
79746cd349f6998bb4b4071d468980958593d2ad | Python | ntsd/Documents2Database | /test/drawRectangle.py | UTF-8 | 1,524 | 3.09375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import wx
class MyFrame(wx.Frame):
isLeftDown = False
def __init__(self, parent):
wx.Frame.__init__(self, parent, id=wx.ID_ANY, size=wx.Size(500, 500))
bSizer1 = wx.BoxSizer(wx.VERTICAL)
self.m_panel = wx.Panel(self, wx.ID_ANY)
bSizer1.Add(self.m_panel, 3, wx.EXPAND | wx.ALL, 5)
self.bmp = wx.EmptyBitmap(500, 500)
self.staticBMP = wx.StaticBitmap(self.m_panel, wx.ID_ANY, self.bmp)
self.SetSizer(bSizer1)
# bind event
self.staticBMP.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
self.staticBMP.Bind(wx.EVT_LEFT_UP, self.OnLeftUp)
self.staticBMP.Bind(wx.EVT_MOTION, self.OnMove)
def drawRectangle(self, pos, pos2):
dc = wx.ClientDC(self.staticBMP)
dc.SetBrush( wx.TRANSPARENT_BRUSH )
dc.SetPen(wx.Pen(wx.Colour(0,0,255)) )
dc.DrawRectangle(pos[0], pos[1], pos2[0]-pos[0], pos2[1]-pos[1])
pos = None
def OnLeftDown(self, event):
self.pos = event.GetPosition()
self.pos2 = event.GetPosition()
self.isLeftDown = True
def OnLeftUp(self, event):
self.isLeftDown = False
self.drawRectangle(self.pos, self.pos2)
def OnMove(self, event):
if self.isLeftDown:
self.pos2 = event.GetPosition()
#dc = wx.ClientDC(self.staticBMP)
#dc.DrawBitmap(self.bmp, 0, 0)
if __name__ == '__main__':
app = wx.App()
frame = MyFrame(None)
frame.Show(True)
app.MainLoop() | true |
53223539f859fd719b458f07525cc3eae9bfd9ba | Python | ogawanobuya/ai-stock-predicting | /ai_stock_4.py | UTF-8 | 5,927 | 2.8125 | 3 | [] | no_license | # coding=utf-8
import random
import numpy as np
import pandas as pd
from collections import deque
import keras
from keras import regularizers
from keras.models import Sequential, clone_model
from keras.layers import Dense, Dropout, Activation, LSTM
from keras.optimizers import Adam
from keras.utils import np_utils
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
# ai_stock_4:強化学習(DQN)による株取引最適化
class Agent(object):
def __init__(self):
self.input_shape = (8, 9)
self.num_actions = 2 # 「0:売り 1:買い」の二つ
he_normal = keras.initializers.he_normal()
# build layer
model = Sequential()
model.add(LSTM(32, dropout=0.3, batch_input_shape=(None, self.input_shape[0], self.input_shape[1]), return_sequences=False))
model.add(Dense(32, activation='relu', kernel_initializer=he_normal))
model.add(Dropout(0.3))
model.add(Dense(self.num_actions, activation='relu', kernel_initializer=he_normal))
self.model = model
def evaluate(self, state, model=None):
_model = model if model else self.model
_state = np.expand_dims(state, axis=0) # add batch size dimension
return _model.predict(_state)[0]
def act(self, state, epsilon=0):
if np.random.rand() <= epsilon:
a = random.choice([0, 1])
else:
q = self.evaluate(state)
a = np.argmax(q)
return a
class Observer(object):
def __init__(self, input_shape):
self.time_length = input_shape[0]
self._states = []
def observe(self, state):
if len(self._states) == 0:
# full fill the frame cache
self._states = [state] * self.time_length
else:
self._states.append(state)
self._states.pop(0) # remove most old state
input_state = np.array(self._states)
return input_state
class Environment(object):
def __init__(self, limit_days):
self.limit_days = limit_days
self.total_days = 253
csv_data = pd.read_csv('data/test_data.csv')
x = csv_data.drop(['Date'], axis=1).values
y = csv_data['^GSPC'].values.reshape(-1, 1)
# Normalize the numerical values
xScaler = StandardScaler()
yScaler = StandardScaler()
self.x = xScaler.fit_transform(x)
self.y = yScaler.fit_transform(y)
def begin(self):
state_idx = random.randint(0, self.total_days - self.limit_days)
initial_state = self.x[state_idx]
return initial_state, state_idx
def step(self, state_idx, action): # action =「0:売り 1:買い」
next_state = self.x[state_idx + 1]
if action == 0:
reward = self.y[state_idx + 1] - self.y[state_idx]
else:
reward = self.y[state_idx] - self.y[state_idx + 1]
return next_state, reward
class Trainer(object):
def __init__(self, agent, optimizer, limit_days):
self.agent = agent
self.observer = Observer(agent.input_shape)
self.env = Environment(limit_days)
self.experience = []
self._target_model = clone_model(self.agent.model)
self.agent.model.compile(optimizer=optimizer, loss="mse")
self.limit_days = limit_days
def get_batch(self, batch_size, gamma):
batch_indices = np.random.randint(
low=0, high=len(self.experience), size=batch_size)
X = np.zeros((batch_size,) + self.agent.input_shape)
y = np.zeros((batch_size, self.agent.num_actions))
for i, b_i in enumerate(batch_indices):
s, a, r, next_s = self.experience[b_i]
X[i] = s
y[i] = self.agent.evaluate(s)
# future reward
Q_sa = np.max(self.agent.evaluate(next_s, model=self._target_model))
y[i, a] = r + gamma * Q_sa
return X, y
def train(self, gamma=0.99, initial_epsilon=0.4, final_epsilon=0.001,
memory_size=500, observation_epochs=8, training_epochs=20, batch_size=4):
fmt = "Epoch {:d}/{:d} | Score: {} | epsilon={:.4f}"
self.experience = deque(maxlen=memory_size)
epochs = observation_epochs + training_epochs
epsilon = initial_epsilon
for e in range(epochs):
# initialize
rewards = []
self.observer._states = []
initial_state, state_idx = self.env.begin()
state = self.observer.observe(initial_state)
game_days = 1
is_training = True if e > observation_epochs else False
# let's play the game
while True:
if not is_training:
action = self.agent.act(state, epsilon=1)
else:
action = self.agent.act(state, epsilon)
next_state, reward = self.env.step(state_idx, action)
next_state = self.observer.observe(next_state)
self.experience.append((state, action, reward, next_state))
rewards.append(reward)
state = next_state
state_idx += 1
game_days += 1
if is_training:
X, y = self.get_batch(batch_size, gamma)
self.agent.model.train_on_batch(X, y)
if game_days >= self.limit_days:
break
score = sum(rewards)
if is_training:
self._target_model.set_weights(self.agent.model.get_weights())
if epsilon > final_epsilon:
epsilon -= (initial_epsilon - final_epsilon) / epochs
if e % 1 == 0:
print (fmt.format(e + 1, epochs, score, epsilon))
if __name__ == "__main__":
agent = Agent()
trainer = Trainer(agent, Adam(), limit_days=30)
trainer.train()
| true |
0f59f250caf76a60a5d89eee760be141b7317099 | Python | lucapro/pytradfri | /pytradfri/smart_task.py | UTF-8 | 9,636 | 2.828125 | 3 | [
"MIT"
] | permissive | """Smart tasks set timers to turn on/off lights in various ways.
> Currently supporting wake up
SmartTask # return top level info
TaskControl # Change top level values
StartAction # Get top level info on start action
StartActionItem # Get info on specific device in task
StartActionItemController # change values for task
"""
from datetime import (datetime as dt)
import datetime
from .command import Command
from .const import (
ATTR_ID,
ATTR_LIGHT_DIMMER,
ATTR_DEVICE_STATE,
ATTR_SMART_TASK_LIGHTS_OFF,
ATTR_SMART_TASK_NOT_AT_HOME,
ATTR_SMART_TASK_TRIGGER_TIME_INTERVAL,
ATTR_SMART_TASK_TRIGGER_TIME_START_HOUR,
ATTR_SMART_TASK_TRIGGER_TIME_START_MIN,
ATTR_SMART_TASK_TYPE,
ATTR_SMART_TASK_WAKE_UP,
ATTR_TRANSITION_TIME,
ATTR_REPEAT_DAYS,
ATTR_START_ACTION,
ROOT_START_ACTION,
ROOT_SMART_TASKS
)
from .resource import ApiResource
from .util import BitChoices
WEEKDAYS = BitChoices(
(
('mon', 'Monday'),
('tue', 'Tuesday'),
('wed', 'Wednesday'),
('thu', 'Thursday'),
('fri', 'Friday'),
('sat', 'Saturday'),
('sun', 'Sunday')
)
)
class SmartTask(ApiResource):
"""Represent a smart task."""
def __init__(self, gateway, raw):
"""Initialize the class."""
super().__init__(raw)
self._gateway = gateway
@property
def path(self):
"""Return gateway path."""
return [ROOT_SMART_TASKS, self.id]
@property
def state(self):
"""Boolean representing the light state of the transition."""
return self.raw.get(ATTR_DEVICE_STATE) == 1
@property
def task_type_id(self):
"""Return type of task."""
return self.raw.get(ATTR_SMART_TASK_TYPE)
@property
def task_type_name(self):
"""Return the task type in plain text.
(Own interpretation of names.)
"""
if self.is_wake_up:
return "Wake Up"
if self.is_not_at_home:
return "Not At Home"
if self.is_lights_off:
return "Lights Off"
@property
def is_wake_up(self):
"""Boolean representing if this is a wake up task."""
return self.raw.get(ATTR_SMART_TASK_TYPE) == ATTR_SMART_TASK_WAKE_UP
@property
def is_not_at_home(self):
"""Boolean representing if this is a not home task."""
return self.raw.get(
ATTR_SMART_TASK_TYPE) == ATTR_SMART_TASK_NOT_AT_HOME
@property
def is_lights_off(self):
"""Boolean representing if this is a lights off task."""
return self.raw.get(ATTR_SMART_TASK_TYPE) == ATTR_SMART_TASK_LIGHTS_OFF
@property
def repeat_days(self):
"""Return int (bit) for enabled weekdays."""
return self.raw.get(ATTR_REPEAT_DAYS)
@property
def repeat_days_list(self):
"""Binary representation of weekdays the event takes place."""
return WEEKDAYS.get_selected_values(self.raw.get(ATTR_REPEAT_DAYS))
@property
def task_start_parameters(self):
"""Return hour and minute that task starts."""
return self.raw.get(ATTR_SMART_TASK_TRIGGER_TIME_INTERVAL)[0]
@property
def task_start_time(self):
"""Return the time the task starts.
Time is set according to iso8601.
"""
return datetime.time(
self.task_start_parameters[
ATTR_SMART_TASK_TRIGGER_TIME_START_HOUR],
self.task_start_parameters[
ATTR_SMART_TASK_TRIGGER_TIME_START_MIN])
@property
def task_control(self):
"""Method to control a task."""
return TaskControl(
self,
self.state,
self.path,
self._gateway)
@property
def start_action(self):
"""Return start action object."""
return StartAction(self, self.path)
def __repr__(self):
"""Return a readable name for smart task."""
state = 'on' if self.state else 'off'
return '<Task {} - {} - {}>'.format(
self.id, self.task_type_name, state)
class TaskControl:
"""Class to control the tasks."""
def __init__(self, task, state, path, gateway):
"""Initialize TaskControl."""
self._task = task
self.state = state
self.path = path
self._gateway = gateway
@property
def tasks(self):
"""Return task objects of the task control."""
return [StartActionItem(
self._task,
i,
self.state,
self.path,
self.raw) for i in range(len(self.raw))]
def set_dimmer_start_time(self, hour, minute):
"""Set start time for task (hh:mm) in iso8601.
NB: dimmer starts 30 mins before time in app
"""
# This is to calculate the difference between local time
# and the time in the gateway
d1 = self._gateway.get_gateway_info().current_time
d2 = dt.utcnow()
diff = d1 - d2
newtime = dt(100, 1, 1, hour, minute, 00) - diff
command = {
ATTR_SMART_TASK_TRIGGER_TIME_INTERVAL:
[{
ATTR_SMART_TASK_TRIGGER_TIME_START_HOUR: newtime.hour,
ATTR_SMART_TASK_TRIGGER_TIME_START_MIN: newtime.minute
}]
}
return self._task.set_values(command)
@property
def raw(self):
"""Return raw data that it represents."""
return self._task.raw[ATTR_START_ACTION]
class StartAction:
"""Class to control the start action-node."""
def __init__(self, start_action, path):
"""Initialize StartAction class."""
self.start_action = start_action
self.path = path
@property
def state(self):
"""Return state of start action task."""
return self.raw.get(ATTR_DEVICE_STATE)
@property
def devices(self):
"""Return state of start action task."""
return [StartActionItem(
self.start_action,
i,
self.state,
self.path,
self.raw) for i in range(
len(self.raw[ROOT_START_ACTION]))]
@property
def raw(self):
"""Return raw data that it represents."""
return self.start_action.raw[ATTR_START_ACTION]
class StartActionItem:
"""Class to show settings for a task."""
def __init__(self, task, index, state, path, raw):
"""Initialize TaskInfo."""
self.task = task
self.index = index
self.state = state
self.path = path
self._raw = raw
@property
def devices_dict(self):
"""Return state of start action task."""
json_list = {}
z = 0
for x in self._raw[ROOT_START_ACTION]:
if z != self.index:
json_list.update(x)
z = z + 1
return json_list
@property
def id(self):
"""Return ID (device id) of task."""
return self.raw.get(ATTR_ID)
@property
def item_controller(self):
"""Method to control a task."""
return StartActionItemController(
self,
self.raw,
self.state,
self.path,
self.devices_dict)
@property
def transition_time(self):
"""A transition runs for this long from the time in task_start.
Value is in seconds x 10
"""
return self.raw.get(ATTR_TRANSITION_TIME) / 60 / 10
@property
def dimmer(self):
"""Return dimmer level."""
return self.raw.get(ATTR_LIGHT_DIMMER)
@property
def raw(self):
"""Return raw data that it represents."""
return self.task.raw[ATTR_START_ACTION][ROOT_START_ACTION][self.index]
def __repr__(self):
"""Return a readable name for this class."""
return '<StartActionItem (Device: {} - Dimmer: {} - Time: {})>'\
.format(self.id, self.dimmer, self.transition_time)
class StartActionItemController:
"""Class to edit settings for a task."""
def __init__(self, item, raw, state, path, devices_dict):
"""Initialize TaskControl."""
self._item = item
self.raw = raw
self.state = state
self.path = path
self.devices_dict = devices_dict
def set_dimmer(self, dimmer):
"""Set final dimmer value for task."""
command = {
ATTR_START_ACTION: {
ATTR_DEVICE_STATE: self.state,
ROOT_START_ACTION: [{
ATTR_ID: self.raw[ATTR_ID],
ATTR_LIGHT_DIMMER: dimmer,
ATTR_TRANSITION_TIME: self.raw[ATTR_TRANSITION_TIME]
}, self.devices_dict]
}
}
return self.set_values(command)
def set_transition_time(self, transition_time):
"""Set time (mins) for light transition."""
command = {
ATTR_START_ACTION: {
ATTR_DEVICE_STATE: self.state,
ROOT_START_ACTION: [{
ATTR_ID: self.raw[ATTR_ID],
ATTR_LIGHT_DIMMER: self.raw[ATTR_LIGHT_DIMMER],
ATTR_TRANSITION_TIME: transition_time * 10 * 60
}, self.devices_dict]
}
}
return self.set_values(command)
def set_values(self, command):
"""
Set values on task control.
Returns a Command.
"""
return Command('put', self._item.path, command)
| true |
a6dcbce5dd6bca2f3c19155aa4dd05764452401d | Python | cj23/T3 | /Ignition/Scripts/Shared/gwfile.py | UTF-8 | 18,436 | 2.625 | 3 | [] | no_license |
__dbName = "LocalDB" # Set this to the name of your database connection.
__skipAudit = True # Set this to exclude sql queries from audits.
# Nothing below here should need to change.
__tableName = "Gateway_File_System"
__pathColumn = "FilePath"
__nameColumn = "FileName"
__blobColumn = "Contents"
__typeColumn = "Type"
__createdColumn = "Created"
__modifiedColumn = "Modified"
__uniqueColumn = "Unique"
__sizeColumn = "Size"
def __init():
__checkDB()
def __checkDB():
# If not exists:
__setupDB()
def __setupDB():
sql = "CREATE TABLE IF NOT EXISTS `%s` (`%s` VARCHAR(512), `%s` VARCHAR(128), `%s` BLOB, `%s` SMALLINT DEFAULT 1, `%s` DATETIME DEFAULT CURRENT_TIMESTAMP, `%s` DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, `%s` BINARY(32), PRIMARY KEY(`%s`))" % (
__tableName, __pathColumn, __nameColumn, __blobColumn, __typeColumn, __createdColumn, __modifiedColumn, __uniqueColumn, __uniqueColumn)
result = system.db.runUpdateQuery(query=sql, database=__dbName, skipAudit=__skipAudit)
def __printQuery(sql, args=[]):
for a in args:
sql = sql.replace("?", "'%s'" % (a), 1)
#print sql
def __getPathAndName(filepath):
if len(filepath) > 512:
raise ValueError('File path is too long.')
if len(filepath) < 1:
filepath = "/"
elif filepath[0] <> "/":
filepath = "/%s" % (filepath)
if filepath[-1] == "/":
filepath = filepath[:-1]
fp = filepath.split("/")
path = "/".join(fp[:-1]) + "/"
name = fp[-1]
return (path, name)
# Split a filepath into the actual path and item name.
#
# String filepath: full path to file.
# return (String, String): (path, name) tuplet.
def getPathAndName(filepath):
return __getPathAndName(filepath)
# Writes a (binary) file to the Gateway database.
# If the file already exists its contents will be updated.
#
# String filepath: full path of file to write.
# String / byte[] data: file contents to write.
# return boolean: if file was successfully written.
def writeFile(filepath, data, append=False):
(path, name) = __getPathAndName(filepath)
if type(data) in ('str', 'java.lang.String'):
from java.lang import String
data = String(data).getBytes()
makeDirectory(path, True)
tx = system.db.beginTransaction()
sql = "SET @data=?"
result = system.db.runPrepUpdate(sql, [data], database=__dbName, tx=tx, skipAudit=__skipAudit)
sql = "INSERT INTO `%s` (`%s`, `%s`, `%s`, `%s`, `%s`) VALUES(?, ?, @data, 1, MD5(CONCAT(`%s`, `%s`))) ON DUPLICATE KEY UPDATE `%s`=CONCAT(IF(NOT %s OR ISNULL(`%s`),'',`%s`),@data)" % (
__tableName, __pathColumn, __nameColumn, __blobColumn, __typeColumn, __uniqueColumn, __pathColumn, __nameColumn, __blobColumn, bool(append), __blobColumn, __blobColumn)
#try:
result = system.db.runPrepUpdate(sql, [path, name], database=__dbName, tx=tx, skipAudit=__skipAudit)
system.db.commitTransaction(tx)
system.db.closeTransaction(tx)
return result > 0
#except PacketTooBigException:
# raise ValueError('File size is too large.')
# Load a (binary) file from the Gateway database.
#
# String filepath: full path of file to read.
# return byte[]: file contents.
def readFileAsBytes(filepath):
(path, name) = __getPathAndName(filepath)
sql = "SELECT `%s` FROM `%s` WHERE `%s`=? AND `%s`=?" % (__blobColumn, __tableName, __pathColumn, __nameColumn)
result = system.db.runPrepQuery(sql, [path, name], database=__dbName)
if result == None:
return None
if len(result) < 1:
return None
return result[0][__blobColumn]
# Load a (text) file from the Gateway database.
#
# String filepath: full path of file to read.
# return String: file contents.
def readFileAsString(filepath, encoding="UTF-8"):
blob = readFileAsBytes(filepath)
if blob == None:
return ""
from org.apache.commons.io import IOUtils
return IOUtils.toString(blob, encoding)#StandardCharsets.UTF_8)
# Download a file from the Gateway database to the client machine.
#
# String filepath: full path of file to download.
# String dlPath: full path to downloaded file.
# return boolean: if file was successfully downloaded.
def downloadFile(filepath, dlPath, overwrite=False):
data = loadFile(filepath)
if data <> None:
if overwrite or not system.file.fileExists(dlPath):
system.file.writeFile(dlPath, data.tolist())
return True
return False
# Make a directory/folder.
#
# String filepath: full path of directory to create.
# return boolean: if directory was successfully made.
def makeDirectory(filepath, recursive=True):
if recursive:
return makeDirectoryRecursive(filepath)
else:
(path, name) = __getPathAndName(filepath)
sql = "INSERT INTO `%s` (`%s`, `%s`, `%s`, `%s`) VALUES(?, ?, 2, MD5(CONCAT(`%s`, `%s`))) ON DUPLICATE KEY UPDATE `%s`=`%s`" % (
__tableName, __pathColumn, __nameColumn, __typeColumn, __uniqueColumn, __pathColumn, __nameColumn, __nameColumn, __nameColumn)
result = system.db.runPrepUpdate(sql, [path, name], database=__dbName, tx=tx, skipAudit=__skipAudit)
return result > 0
# Make a directory/folder.
# Each directory of the path will be added if non-existant.
#
# String filepath: full path of directory to create.
# return boolean: if directory was successfully made.
def makeDirectoryRecursive(filepath):
if len(filepath) > 512:
raise ValueError('File path is too long.')
if len(filepath) < 1:
return False
fp = filepath.split("/")#[1:-1]
path = "/"
tx = system.db.beginTransaction(__dbName)
result = 0
#print filepath
#print fp
for name in fp:
if len(name) > 0:
sql = "INSERT INTO `%s` (`%s`, `%s`, `%s`, `%s`) VALUES(?, ?, 2, MD5(CONCAT(`%s`, `%s`))) ON DUPLICATE KEY UPDATE `%s`=`%s`" % (
__tableName, __pathColumn, __nameColumn, __typeColumn, __uniqueColumn, __pathColumn, __nameColumn, __nameColumn, __nameColumn)
#print sql, path, name
result += system.db.runPrepUpdate(sql, [path, name], database=__dbName, tx=tx, skipAudit=__skipAudit)
path = "%s%s/" % (path, name)
system.db.commitTransaction(tx)
system.db.closeTransaction(tx)
return result > 0
# Check if a file exists on the Gateway database.
#
# String filepath: full path of file to check.
# return boolean: if file exists.
def fileExists(filepath):
(path, name) = __getPathAndName(filepath)
sql = "SELECT 1 FROM `%s` WHERE `%s`=? AND `%s`=?" % (__tableName, __pathColumn, __nameColumn)
result = system.db.runPrepQuery(sql, [path, name], database=__dbName)
if result == None:
return None
return len(result) > 0
# Move a file to a new location.
#
# String src: full path of file to move.
# String dest: full path of destination file.
# boolean makeDir: build directory path to destination file.
# return boolean: if file was successfully moved.
def move(src, dest, makeDir=True):
(srcpath, srcname) = __getPathAndName(src)
(destpath, destname) = __getPathAndName(dest)
if makeDir:
makeDirectory(destpath, True)
tx = system.db.beginTransaction(__dbName)
sql = "UPDATE `%s` SET `%s`=?, `%s`=?, `%s`=MD5(CONCAT(`%s`, `%s`)) WHERE `%s`=? AND `%s`=?" % (
__tableName, __pathColumn, __nameColumn, __uniqueColumn, __pathColumn, __nameColumn, __pathColumn, __nameColumn)
args = [destpath, destname, srcpath, srcname]
__printQuery(sql, args)
result = system.db.runPrepUpdate(sql, args, database=__dbName, tx=tx, skipAudit=__skipAudit)
sql = "UPDATE `%s` SET `%s`=CONCAT(?, SUBSTRING(`%s`, LENGTH(?) + 1)), `%s`=MD5(CONCAT(`%s`, `%s`)) WHERE `%s` LIKE ?" % (
__tableName, __pathColumn, __pathColumn, __uniqueColumn, __pathColumn, __nameColumn, __pathColumn)
args = ["%s%s/" % (destpath, destname), "%s%s/" % (srcpath, srcname), "%s%s/%s" % (srcpath, srcname, '%')]
__printQuery(sql, args)
result += system.db.runPrepUpdate(sql, args, database=__dbName, tx=tx, skipAudit=__skipAudit)
system.db.commitTransaction(tx)
system.db.closeTransaction(tx)
return result > 0
# Rename a file.
#
# String src: full path of file to move.
# String newname: new name to give file.
# return boolean: if file was successfully renamed.
def rename(src, newname):
if len(src) < 1 or len(newname) < 1:
return False
(srcpath, srcname) = __getPathAndName(src)
sql = "UPDATE `%s` SET `%s`=?, `%s`=MD5(CONCAT(`%s`, `%s`)) WHERE `%s`=? AND %s=?" % (
__tableName, __nameColumn, __uniqueColumn, __pathColumn, __nameColumn, __pathColumn, __nameColumn)
result = system.db.runPrepUpdate(sql, [newname, srcpath, srcname], database=__dbName, skipAudit=__skipAudit)
return result > 0
# Permanently delete a file on the Gateway database.
# Deleting a directory will also delete its files and subdirectories.
#
# String filepath: full path of file to delete.
# return boolean: if file was successfully deleted.
def deletePermanent(filepath):
(path, name) = __getPathAndName(filepath)
sql = "DELETE FROM `%s` WHERE ((`%s`=? AND `%s`=?) OR (`%s` LIKE ?))" % (__tableName, __pathColumn, __nameColumn, __pathColumn)
args = [path, name, "%s%s/%s" % (path, name, "%")]
__printQuery(sql, args)
result = system.db.runPrepUpdate(sql, args, database=__dbName, skipAudit=__skipAudit)
return result > 0
# Copy a file on the Gateway database.
#
# String src: full path of file to copy.
# String dest: full path to pasted file.
# boolean recursive: copy subdirectories and files.
# boolean makeDir: build directory path to destination file.
# return boolean: if file was successfully copied.
def copy(src, dest, recursive=True, makeDir=True):
if recursive:
return copyRecursive(src, dest, makeDir)
(srcpath, srcname) = __getPathAndName(src)
(destpath, destname) = __getPathAndName(dest)
if makeDir:
makeDirectory(destpath, True)
sql = "INSERT INTO `%s`(`%s`, `%s`, `%s`, `%s`, `%s`) SELECT ?, ?, `%s`, `%s`, MD5(CONCAT(`%s`, `%s`)) FROM `%s` WHERE `%s`=? AND `%s`=?" % (
__tableName, __pathColumn, __nameColumn, __typeColumn, __blobColumn, __uniqueColumn, __typeColumn, __blobColumn, __pathColumn, __nameColumn, __pathColumn, __nameColumn)
result = system.db.runPrepUpdate(sql, [destpath, destname, srcpath, srcname], database=__dbName, skipAudit=__skipAudit)
return result > 0
# Copy a file on the Gateway database.
# Copying a directory will also copy its files and subdirectories.
#
# String src: full path of file to copy.
# String dest: full path to pasted file.
# return boolean: if file was successfully copied.
def copyRecursive(src, dest, makeDir=True):
(srcpath, srcname) = __getPathAndName(src)
(destpath, destname) = __getPathAndName(dest)
#print srcpath, srcname
#print destpath, destname
#if len(destname) < 1:
#destname = srcname
if makeDir:
makeDirectory(destpath, True)
sql = "INSERT INTO `%s` (`%s`, `%s`, `%s`, `%s`, `%s`) " % (__tableName, __pathColumn, __nameColumn, __typeColumn, __blobColumn, __uniqueColumn)
sql += "SELECT `%s`, `%s`, `%s`, `%s`, MD5(CONCAT(`%s`,`%s`)) AS `%s` FROM (" % (__pathColumn, __nameColumn, __typeColumn, __blobColumn, __pathColumn, __nameColumn, __uniqueColumn)
sql += "SELECT ? AS `%s`, ? AS `%s`, `%s`, `%s` FROM `%s` WHERE `%s`=? AND `%s`=? UNION SELECT " % (__pathColumn, __nameColumn, __typeColumn, __blobColumn, __tableName, __pathColumn, __nameColumn)
sql += "CONCAT(?, SUBSTRING(`%s`, LENGTH(?) + 1)) " % (__pathColumn)
#sql += "CONCAT(?, SUBSTRING(`%s`, INSTR(`%s`, ?)+1)) " % (__pathColumn, __pathColumn)
sql += "AS `%s`, `%s`, `%s`, `%s` FROM `%s` WHERE `%s` LIKE ? ) aaagwfilezzz" % (__pathColumn, __nameColumn, __typeColumn, __blobColumn, __tableName, __pathColumn)
args = [destpath, destname, srcpath, srcname,
#destpath, "/%s/" % (srcname),
"%s%s/" % (destpath, destname), "%s%s/" % (srcpath, srcname),
"%s%s/%s" % (srcpath, srcname, '%')]
__printQuery(sql, args)
result = system.db.runPrepUpdate(sql, args, database=__dbName, skipAudit=__skipAudit)
#for a in args:
# sql = sql.replace('?', "'%s'" % a, 1)
#print sql
return result > 0
class File():
path = None
name = None
type = None
created = None
modified = None
def __init__(self, path, name, type=1, created=None, modified=None):
self.path = path
self.name = name
self.type = type
self.created = created
self.modified = modified
def getFullPath(self):
return "%s/%s" % (self.path, self.name)
def getPath(self):
return self.path
def getName(self):
return self.name
def isDirectory(self):
return self.type == 2
def getCreated(self):
return self.created
def getModifed(self):
return self.modified
# Get a list of file items in the given path.
#
# String path: full path to directory to search.
# return File[]: list of file items in the given path.
def getFiles(path, recursive=False):
sql = "SELECT `%s`,`%s`,`%s`,`%s`,`%s` FROM `%s` WHERE `%s` LIKE ?" % (
__pathColumn, __nameColumn, __typeColumn, __createdColumn, __modifiedColumn, __tableName, __pathColumn)
result = system.db.runPrepQuery(sql, ["%s%s" % (path, '%' if recursive else '')], database=__dbName)
files = []
for r in result:
file = File(r[__pathColumn], r[__nameColumn], r[__typeColumn], r[__createdColumn], r[__modifiedColumn])
files.append(file)
return files
# Get a simple dataset of file items in the given path.
#
# String path: full path to directory to search.
# return PyDataset: dataset of file items in the given path.
def getFilesDataset(path, recursive=True):
sql = "SELECT `%s`,`%s`,`%s`,`%s`,`%s` FROM `%s` WHERE `%s` LIKE ? ORDER BY `%s` DESC, `%s` ASC" % (
__pathColumn, __nameColumn, __typeColumn, __createdColumn, __modifiedColumn, __tableName, __pathColumn, __typeColumn, __nameColumn)
result = system.db.runPrepQuery(sql, ["%s%s" % (path, '%' if recursive else '')], database=__dbName)
return result
# Get a dataset of file items in the given path that is useful
# for creating a file browser in a Table component.
#
# String path: full path to directory to search.
# return PyDataset: dataset of file items in the given path.
def getTableViewDataset(path, filter=""):
#files = getFilesDataset(path)
sql = "SELECT `%s`,`%s`,`%s`, LENGTH(`%s`) AS `%s` FROM `%s` WHERE `%s` LIKE ? AND (`%s`=2 OR LOWER(`%s`) LIKE ?) ORDER BY `%s` DESC, `%s` ASC" % (
__nameColumn, __typeColumn, __modifiedColumn, __blobColumn, __sizeColumn, __tableName, __pathColumn, __typeColumn, __nameColumn, __typeColumn, __nameColumn)
files = system.db.runPrepQuery(sql, [path, filter.lower()], database=__dbName)
ds = []
for f in files:
row = [f[__typeColumn], f[__nameColumn], f[__sizeColumn], f[__modifiedColumn]]
ds.append(row)
return system.dataset.toDataSet(['Type', 'Name', 'Size', 'Date Modified'], ds)
# Get a dataset of file items in the given path that is useful
# for creating a file browser in a List component.
#
# String path: full path to directory to search.
# return PyDataset: dataset of file items in the given path.
def getListViewDataset(path):
#files = getFilesDataset(path)
sql = "SELECT `%s` FROM `%s` WHERE `%s` LIKE ? ORDER BY `%s` DESC, `%s` ASC" % (__nameColumn, __tableName, __pathColumn, __typeColumn, __nameColumn)
files = system.db.runPrepQuery(sql, [path], database=__dbName)
ds = []
for f in files:
row = [f[__nameColumn]]
ds.append(row)
return system.dataset.toDataSet(['Name'], ds)
# Get a dataset of file items in the given path that is useful
# for creating a file browser in a Tree View component.
#
# String path: full path to directory to search.
# return PyDataset: dataset of file items in the given path.
def getTreeViewDataset(path):
files = getFilesDataset(path)
ds = []
for f in files:
fp = "%s/%s" % (f[__pathColumn], f[__nameColumn])
row = [f[__pathColumn], f[__nameColumn], "default","color(255,255,255,255)","color(0,0,0,255)","",None,"","default","color(250,214,138,255)","color(0,0,0,255)","",None]
ds.append(row)
headers = ["path","text","icon","background","foreground","tooltip","border","selectedText","selectedIcon","selectedBackground","selectedForeground","selectedTooltip","selectedBorder"]
return system.dataset.toDataSet(headers, ds)
# Show the window as a modal dialog.
def __showModal(params=None):
from javax.swing import JDialog
windowName = 'GWFileBrowser'
if windowName in system.gui.getWindowNames():
window = system.nav.openWindowInstance(windowName, params)
system.nav.centerWindow(window)
rc = window.getRootContainer()
#rc.load()
cp = window.getContentPane()
window.setVisible(False)
dlg = JDialog(None, True)
dlg.setContentPane(cp)
dlg.setSize(window.getWidth(), window.getHeight())
dlg.setMinimumSize(window.getMinimumSize())
dlg.setMaximumSize(window.getMaximumSize())
dlg.setLocation(window.getX(), window.getY())
#dlg.setLocationRelativeTo(None)
dlg.setTitle(window.getTitle())
dlg.setVisible(True)
system.nav.closeWindow(window)
return rc.Result
return None
# Dialog modifiers
DLG_DB_ICON = 1
DLG_DARK_BG = 2
# Get the scope that the module is called from.
def __getGlobalScope():
from com.inductiveautomation.ignition.common.model import ApplicationScope
scope = ApplicationScope.getGlobalScope()
if (ApplicationScope.isGateway(scope)):
return 0
if (ApplicationScope.isClient(scope)):
return 1
if (ApplicationScope.isDesigner(scope)):
return 2
return -1
# Open File dialog
#
# String extension: not implemented.
# String filename: default file location to select.
# return String: selected filepath, or None if cancelled.
def openFile(extension="", defaultLocation="/", modifiers=0):
if __getGlobalScope() == 1:
params = {'Title' : 'Open', 'InitialPath': defaultLocation, 'Modifiers': modifiers, 'ExtensionFilter':extension, 'ExtensionFilterName': ""}
return __showModal(params)
else:
# The dialog can only be shown in the client scope.
return None
# Save File dialog
#
# String filename: default file location to select.
# String extension: not implemented.
# String typeDesc: not implemented.
# return String: selected filepath, or None if cancelled.
def saveFile(filename="/", extension="", typeDesc="", modifiers=0):
if __getGlobalScope() == 1:
params = {'Title' : 'Save', 'InitialPath': filename, 'Modifiers': modifiers, 'ExtensionFilter':extension, 'ExtensionFilterName': typeDesc}
return __showModal(params)
else:
# The dialog can only be shown in the client scope.
return None
# Get path and name of exisiting location from user input.
#
# String filepath: user inputted filepath.
# return (String, String): (path,name) tuplet.
def getUserPathAndName(filepath):
(path, name) = __getPathAndName(filepath)
filepath = "%s%s" % (path, name)
if fileExists(path):
return (path, name)
return ("/", "")
# Initialise. Called each time the module is loaded.
__init()
| true |
eba43c3885093d5c4e3bb9ba5c43a921f0b9e943 | Python | HongzoengNg/UqerCodeGenerator | /utils/validation.py | UTF-8 | 3,830 | 3.390625 | 3 | [
"MIT"
] | permissive | # -*- coding:utf-8 -*-
'''
File: validation.py
File Created: Saturday, 26th January 2019
Author: Hongzoeng Ng (kenecho@hku.hk)
-----
Last Modified: Saturday, 26th January 2019
Modified By: Hongzoeng Ng (kenecho@hku.hk>)
-----
Copyright @ 2018 KenEcho
'''
import datetime
def validate_portfolio(portfolio):
"""
:params:
portfolio: e.g. {"000001.XSHG": 0.25}
"""
symbol_correct = True
weight_correct = True
msg = ""
for symbol in portfolio:
if (
len(symbol) == 11 and symbol[:-5].isdigit() and
(symbol.endswith(".XSHG") or symbol.endswith(".XSHE"))):
pass
else:
msg += "Invalid symbol: {}\n".format(symbol)
symbol_correct = False
if portfolio[symbol] >= 0 and portfolio[symbol] <= 1:
pass
else:
weight_correct = False
msg += "Invalid weight: {}\n".format(portfolio[symbol])
if symbol_correct and weight_correct:
return msg, True
else:
if not weight_correct:
msg += "Weight should between 0 and 1 (included 0 and 1)\n"
return msg, False
def validate_date(start_date, end_date):
date_correct = True
msg = ""
start_date = start_date.split("T")[0]
end_date = end_date.split("T")[0]
min_date = datetime.datetime.strptime("2010-01-01", "%Y-%m-%d")
max_date = datetime.datetime.strptime("2019-02-01", "%Y-%m-%d")
if start_date != "":
start = datetime.datetime.strptime(start_date, "%Y-%m-%d")
if start < min_date:
date_correct = False
msg += "The start cannot earlier than '2010-01-01'\n"
if start > max_date:
date_correct = False
msg += "The start cannot later than '2019-02-01'\n"
else:
date_correct = False
msg += "Error: The start date cannot be empty!\n"
if end_date != "":
end = datetime.datetime.strptime(end_date, "%Y-%m-%d")
if end < min_date:
date_correct = False
msg += "The end cannot earlier than '2010-01-01'\n"
if end > max_date:
date_correct = False
msg += "The end cannot later than '2019-02-01'\n"
else:
date_correct = False
msg += "Error: The end date cannot be empty!\n"
if start_date == "" or end_date == "":
return msg, date_correct
else:
if start > end:
date_correct = False
msg += "The start date cannot be later than the end date\n"
elif start == end:
date_correct = False
msg += "The start date cannot be equal to the end date\n"
return msg, date_correct
def validate_sma_lma(sma, lma):
msg = ""
ma_correct = True
if sma != "":
if sma < 1:
ma_correct = False
msg += "Short-Term value should not be less than 1\n"
if sma > 100:
ma_correct = False
msg += "Short-Term value should not be greater than 100\n"
else:
ma_correct = False
msg += "Error: The Short-Term value cannot be empty!\n"
if lma != "":
if lma < 1:
ma_correct = False
msg += "Long-Term value should not be less than 1\n"
if lma > 100:
ma_correct = False
msg += "Long-Term value should not be greater than 100\n"
else:
ma_correct = False
msg += "Error: The Long-Term value cannot be empty!\n"
if sma == "" or lma == "":
return msg, ma_correct
else:
if sma > lma:
ma_correct = False
msg += "The Short-Term value cannot be greater than the Long-Term value\n"
elif sma == lma:
ma_correct = False
msg += "The Short-Term value cannot be equal to the Long-Term value\n"
return msg, ma_correct
| true |
4eb917ecc35067b2d3ddeeebd5c45482f4f4e8df | Python | SwordYi/py_2048 | /src/2048.py | UTF-8 | 8,343 | 2.796875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
ProductName:2048
Author:Sword
CreateTime:2018-12-17
ModefiedTime:2018-12-19
"""
import random, os, msvcrt
class Game:
def __init__(self, _WinScore_ = 32, _BEST_FILE_ = "bestScore.dat"):
self.matrix = [[0 for n in range(4)] for m in range(4)]
self.score = 0
self._BEST_FILE_ = _BEST_FILE_
self.best = self.readFile()
self._WinScore_ = _WinScore_
initNumFlag = 0
while True:
s = divmod(random.randrange(0, 16), 4)
if self.matrix[s[0]][s[1]] == 0:
self.matrix[s[0]][s[1]] = 2
initNumFlag += 1
if initNumFlag == 2:
break
# 0值判断
def notzero(self, s):
return s if s != 0 else ''
# 显示结果
def display(self):
self.CLS()
print(r"----------------------------------2048----------------------------------")
print(r"操作说明:上(W/K/↑) 下(S/J/↓) 左(A/H/←) 右(D/L/→) 退出(Q)")
print(r"当前积分:%d,最高积分:%d" % (self.score, self.best))
print("\n\
┌────┬────┬────┬────┐\n\
│%4s│%4s│%4s│%4s│\n\
├────┼────┼────┼────┤\n\
│%4s│%4s│%4s│%4s│\n\
├────┼────┼────┼────┤\n\
│%4s│%4s│%4s│%4s│\n\
├────┼────┼────┼────┤\n\
│%4s│%4s│%4s│%4s│\n\
└────┴────┴────┴────┘"
%(self.notzero(self.matrix[0][0]), self.notzero(self.matrix[0][1]), self.notzero(self.matrix[0][2]), self.notzero(self.matrix[0][3]),
self.notzero(self.matrix[1][0]), self.notzero(self.matrix[1][1]), self.notzero(self.matrix[1][2]), self.notzero(self.matrix[1][3]),
self.notzero(self.matrix[2][0]), self.notzero(self.matrix[2][1]), self.notzero(self.matrix[2][2]), self.notzero(self.matrix[2][3]),
self.notzero(self.matrix[3][0]), self.notzero(self.matrix[3][1]), self.notzero(self.matrix[3][2]), self.notzero(self.matrix[3][3]))
)
# 往左移动
def moveLeft(self):
for i in range(4):
for j in range(3):
for k in range(j+1, 4):
if self.matrix[i][j] == 0 and self.matrix[i][k] > 0:
self.matrix[i][j] = self.matrix[i][k]
self.matrix[i][k] = 0
elif self.matrix[i][j] > 0 and self.matrix[i][j] == self.matrix[i][k]:
self.matrix[i][j] = self.matrix[i][j] + self.matrix[i][k]
self.matrix[i][k] = 0
self.score += self.matrix[i][j]
break;
elif self.matrix[i][j] > 0 and self.matrix[i][k] > 0 \
and self.matrix[i][j] != self.matrix[i][k]:
self.matrix[i][j+1] = self.matrix[i][k]
if k != j + 1:
self.matrix[i][k] = 0
break;
# 往右移动
def moveRight(self):
for i in range(4):
for j in range(3, -1, -1):
for k in range(j-1, -1, -1):
if self.matrix[i][j] == 0 and self.matrix[i][k] > 0:
self.matrix[i][j] = self.matrix[i][k]
self.matrix[i][k] = 0
elif self.matrix[i][j] > 0 and self.matrix[i][j] == self.matrix[i][k]:
self.matrix[i][j] = self.matrix[i][j] + self.matrix[i][k]
self.matrix[i][k] = 0
self.score += self.matrix[i][j]
break;
elif self.matrix[i][j] > 0 and self.matrix[i][k] > 0 \
and self.matrix[i][j] != self.matrix[i][k]:
self.matrix[i][j-1] = self.matrix[i][k]
if k != j - 1:
self.matrix[i][k] = 0
break;
# 往上移动
def moveUp(self):
for j in range(4):
for i in range(3):
for k in range(i+1, 4):
if self.matrix[i][j] == 0 and self.matrix[k][j] > 0:
self.matrix[i][j] = self.matrix[k][j]
self.matrix[k][j] = 0
elif self.matrix[i][j] > 0 and self.matrix[i][j] == self.matrix[k][j]:
self.matrix[i][j] = self.matrix[i][j] + self.matrix[k][j]
self.matrix[k][j] = 0
self.score += self.matrix[i][j]
break;
elif self.matrix[i][j] > 0 and self.matrix[k][j] > 0 \
and self.matrix[i][j] != self.matrix[k][j]:
self.matrix[i+1][j] = self.matrix[k][j]
if k != i + 1:
self.matrix[k][j] = 0
break;
# 往下移动
def moveDown(self):
for j in range(4):
for i in range(3, -1, -1):
for k in range(i-1, -1, -1):
if self.matrix[i][j] == 0 and self.matrix[k][j] > 0:
self.matrix[i][j] = self.matrix[k][j]
self.matrix[k][j] = 0
elif self.matrix[i][j] > 0 and self.matrix[i][j] == self.matrix[k][j]:
self.matrix[i][j] = self.matrix[i][j] + self.matrix[k][j]
self.matrix[k][j] = 0
self.score += self.matrix[i][j]
break;
elif self.matrix[i][j] > 0 and self.matrix[k][j] > 0 \
and self.matrix[i][j] != self.matrix[k][j]:
self.matrix[i-1][j] = self.matrix[k][j]
if k != i - 1:
self.matrix[k][j] = 0
break;
# 判断是否还有空位
def isHaveSpace(self):
for i in range(4):
for j in range(4):
if self.matrix[i][j] == 0:
return True
return False
# 随机生成新的数字2
def addRandomNum(self):
while self.isHaveSpace() == True:
s = divmod(random.randrange(0, 16), 4)
if self.matrix[s[0]][s[1]] == 0:
self.matrix[s[0]][s[1]] = 2
return
# 判断游戏状态,是否失败
def isfailed(self):
for i in range(4):
for j in range(4):
if self.matrix[i][j] == 0 or \
i > 0 and self.matrix[i][j] == self.matrix[i-1][j] or \
i < 3 and self.matrix[i][j] == self.matrix[i+1][j] or \
j > 0 and self.matrix[i][j] == self.matrix[i][j-1] or \
j < 3 and self.matrix[i][j] == self.matrix[i][j+1] :
return False
print("--------------------------游戏失败--------------------------")
return True
# 判断游戏状态,是否获胜
def isWin(self):
for i in range(4):
for j in range(4):
if self.matrix[i][j] >= self._WinScore_:
print("--------------------------游戏胜利--------------------------")
return True
return False
# 读取文件
def readFile(self):
bestScore = 0
if os.path.exists(self._BEST_FILE_):
with open(self._BEST_FILE_, 'r') as f:
bestScore = int(f.read())
return bestScore
# 写入文件
def writeFile(self):
with open(self._BEST_FILE_, 'w') as f:
f.write(str(self.best))
# 更新最高分
def updateBest(self):
if self.score > self.best:
self.best = self.score
# 是否重新开始一局新的游戏
def isStartNewGame(self):
while True:
d = input("是否重新开始一局新的游戏(y/n):")
if d in ['y', 'Y']:
return True
elif d in ['n', 'N']:
return False
else:
print("输入错误,请重新输入!")
# 清屏
def CLS(self):
os.system('cls')
# 主程序
def start(self):
self.display()
while True:
d = msvcrt.getch() # 读取字符
if d == b'\xe0': # 处理方向键,属于功能键,由2个字节组成
d2 = msvcrt.getch()
while msvcrt.kbhit(): # 读取多余的字符,不处理
msvcrt.getch()
if d in [b'q', b'Q']:
self.writeFile()
print("退出游戏,本次积分为:%d" % (self.score))
break
elif d in [b'w', b'W', b'k', b'K'] or (d == b'\xe0' and d2 == b'H'):
self.moveUp()
elif d in [b's', b'S', b'j', b'J'] or (d == b'\xe0' and d2 == b'P'):
self.moveDown()
elif d in [b'a', b'A', b'h', b'H'] or (d == b'\xe0' and d2 == b'K'):
self.moveLeft()
elif d in [b'd', b'D', b'l', b'L'] or (d == b'\xe0' and d2 == b'M'):
self.moveRight()
else:
continue
self.updateBest()
self.addRandomNum()
self.display()
# 结束一轮游戏后
if self.isfailed() == True or self.isWin() ==True:
self.writeFile()
print("游戏结束,本次积分为:%d" % (self.score))
if self.isStartNewGame() == True: # 进行下一次游戏需要初始化对象、重新显示。
self.__init__(int(inputWinScore()))
self.display()
else:
break
# 输入游戏的最大值
def inputWinScore():
while True:
winScore = input("请输入游戏赢的最大值(必需大于等于4):")
if winScore.isdigit() == True and int(winScore) >= 4:
return int(winScore)
else:
print("请重新输入数字!")
if __name__ == "__main__":
print(r"----------------------------------2048----------------------------------")
game = Game(int(inputWinScore()))
game.start() | true |
6db80f4d50d1f622bad37f6835b3731ba102a983 | Python | sabriozgur/euler-python | /euler061.py | UTF-8 | 1,809 | 3.390625 | 3 | [] | no_license | """
Question : 61
Start Time : 17.11.19 ~
End Time : 19.11.19 15:25
Total Time Spent : 3-4 hours
Complexity : worst: O(n*k!) best O(nk) #trivial for loops are ignored k: number of figurate types
Answer : 28684
"""
from collections import defaultdict
import sys
def polygonal(x, r):
if r == 3: return int(x * (x + 1) / 2)
if r == 4: return int(x * x)
if r == 5: return int(x * (3*x - 1) / 2)
if r == 6: return int(x * (2*x - 1))
if r == 7: return int(x * (5*x - 3) / 2)
if r == 8: return int(x * (3*x - 2))
polygonals = defaultdict(list)
for i in range(3,9):
nth_polygonals = list()
for n in range(200):
nth_polygonal = polygonal(n, i)
if 999 < nth_polygonal < 10000 and (nth_polygonal % 100) > 9 and int(nth_polygonal / 100) > 9 :
nth_polygonals.append(nth_polygonal)
polygonals[i] = nth_polygonals
starts = defaultdict(lambda: defaultdict(list))
ends = defaultdict(lambda: defaultdict(list))
for i in range(3,9):
for num in polygonals[i]:
starts[i][ int(num / 100) ].append(num)
ends[i][ num % 100 ].append(num)
def search(found_list, nth_list):
needed_start = found_list[-1]
for i in nth_list:
for num in starts[i][needed_start]:
nth_list.remove(i)
last_number = needed_start * 100 + found_list[0]
if nth_list:
search(found_list + [num % 100], nth_list)
elif last_number in polygonals[i]:
print("Found:", found_list, sum(found_list) * 101)
sys.exit()
nth_list.append(i)
return []
for i in range(3,9):
list_ = list(range(3,9))
list_.remove(i)
for num in polygonals[i]:
search([int(num / 100), num % 100], list_)
| true |
e42e8245176bfd811fb502d6289408fc7de65958 | Python | EshikaShah/Purchasing-Pattern-in-Starbucks | /EDA.py | UTF-8 | 2,916 | 3.328125 | 3 | [] | no_license | import Notebook
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
cleaned_portfolio, cleaned_profile, offers, transactions = Notebook.cleaning_data()
''' Exploratory Data Analysis '''
# To find out the maximum no of customer's belonging to which age group
def by_age_count(cleaned_profile):
sns.countplot(x="age_by_decade",data=cleaned_profile, palette='rocket')
# Gender Distribution of our customer
def by_gender_count(cleaned_profile):
'''cleaned_profile['F'].value_counts()
0 8696
1 6129
cleaned_profile['M'].value_counts()
1 8484
0 6341
cleaned_profile['O'].value_counts()
0 14613
1 212
'''
x = ["F", "M", "O"]
y = [6129,8484,212]
plt.bar(x, y, color='c')
# Gender conts by membership year
def gender_by_year(cleaned_profile):
membership_date = cleaned_profile.groupby(['became_member_on', 'gender']).size()
membership_date = membership_date.reset_index()
membership_date.columns = ['became_member_on', 'gender', 'count']
# plot a bar graph for age distribution as a function of gender in membership program
plt.figure(figsize=(10, 5))
sns.barplot(x='became_member_on', y='count', hue='gender', data=membership_date)
plt.xlabel('Membership Start Year')
plt.ylabel('Count');
plt.title('Gender counts by membership year')
# An Overview of what income range facilitates more membership
def by_income_range(cleaned_profile):
sns.countplot(x="income_range",data=cleaned_profile)
def by_member_year(cleaned_profile):
sns.countplot(x="became_member_on",data=cleaned_profile, palette='Set3')
# Comparing the Gender-wise distribution of our customer's income
def income_by_gender(cleaned_profile):
x = cleaned_profile[cleaned_profile['F']==1]
y = cleaned_profile[cleaned_profile['M']==1]
z = cleaned_profile[cleaned_profile['O']==1]
sns.kdeplot(x['income'],label='Female')
sns.kdeplot(y['income'],label='Male')
sns.kdeplot(z['income'],label='Other')
# Some data visualization of events related to offers
# Some numerical data regarding the offer events
'''
offers['offer viewed'].value_counts()
0 98945
1 49860
offers['offer received'].value_counts()
0 82304
1 66501
offers['offer completed'].value_counts()
0 116361
1 32444'''
# Representaion of people who viewed and didn't view the offer on recieving the offer
def offers1():
x = ["Viewed", "Not viewed"]
y = [49860,16641]
plt.pie(y, labels = x,autopct='%1.2f%%', explode=(0.0,0.1), colors=['#ff9999','#66b3ff'])
def offers2():
x = ["Completed", "Left viewed"]
y = [32444,17416]
plt.pie(y, labels = x,autopct='%1.2f%%', explode=(0.0,0.1), colors = ['#99ff99','#ffcc99'])
| true |
1f17d4df92695cadcd92388ee24f5b88a503e652 | Python | LiZhenzhuBlog/Kidney-Surgery-Guide-System | /VTKCallBackExample.py | UTF-8 | 2,948 | 2.90625 | 3 | [] | no_license | import vtk
def GetOrientation(caller, ev):
# Just do this to demonstrate who called callback and the event that triggered it.
print(caller.GetClassName(), "Event Id:", ev)
# Now print the camera orientation.
CameraOrientation(GetOrientation.cam)
def CameraOrientation(cam):
fmt1 = "{:>15s}"
fmt2 = "{:9.6g}"
print(fmt1.format("Position:"), ', '.join(map(fmt2.format, cam.GetPosition())))
print(fmt1.format("Focal point:"), ', '.join(map(fmt2.format, cam.GetFocalPoint())))
print(fmt1.format("Clipping range:"), ', '.join(map(fmt2.format, cam.GetClippingRange())))
print(fmt1.format("View up:"), ', '.join(map(fmt2.format, cam.GetViewUp())))
print(fmt1.format("Distance:"), fmt2.format(cam.GetDistance()))
def MakeAxesActor():
axes = vtk.vtkAxesActor()
axes.SetShaftTypeToCylinder()
axes.SetXAxisLabelText('X')
axes.SetYAxisLabelText('Y')
axes.SetZAxisLabelText('Z')
axes.SetTotalLength(1.0, 1.0, 1.0)
axes.SetCylinderRadius(0.5 * axes.GetCylinderRadius())
axes.SetConeRadius(1.025 * axes.GetConeRadius())
axes.SetSphereRadius(1.5 * axes.GetSphereRadius())
return axes
use_function_callback = True
colors = vtk.vtkNamedColors()
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
source = vtk.vtkConeSource()
source.SetCenter(0,0,0)
source.SetRadius(1)
source.SetHeight(1.6180339887498948482)
source.SetResolution(128)
source.Update()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(source.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(colors.GetColor3d("peacock"))
actor.GetProperty().SetAmbient(0.3)
actor.GetProperty().SetDiffuse(0.0)
actor.GetProperty().SetSpecular(1.0)
actor.GetProperty().SetSpecularPower(20.0)
outline = vtk.vtkOutlineFilter()
outline.SetInputData(source.GetOutput())
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.GetProperty().SetColor(colors.GetColor3d("Black"))
outlineActor.SetMapper(outlineMapper)
ren.AddActor(actor)
ren.AddActor(outlineActor)
ren.SetBackground(colors.GetColor3d("AliceBlue"))
renWin.SetSize(512,512)
camera = vtk.vtkCamera()
camera.SetPosition(4.6,-2.0,3.8)
camera.SetFocalPoint(0.0,0.0,0.0)
camera.SetClippingRange(3.2,10.2)
camera.SetViewUp(0.3,1.0,0.13)
ren.SetActiveCamera(camera)
renWin.Render()
renWin.SetWindowName("CallBack")
axes1 = MakeAxesActor()
om1 = vtk.vtkOrientationMarkerWidget()
om1.SetOrientationMarker(axes1)
om1.SetViewport(0,0,0.2,0.2)
om1.SetInteractor(iren)
om1.EnabledOn()
om1.InteractiveOn()
GetOrientation.cam = ren.GetActiveCamera()
iren.AddObserver('EndInteractionEvent', GetOrientation)
iren.Initialize()
iren.Start()
| true |
5c6b6df87ac07ab36449e54930d371611623f03b | Python | alexizzzotov/first_test | /game.py | UTF-8 | 992 | 3.6875 | 4 | [] | no_license | from random import *
words = ('апельсин','мандарин','яблоко', 'банан','грейпфрукт','виноград','клубника','черешня','абрикос','смородина')
word = choice(words)
correct = word
jumble = ''
while word:
posit = randrange(len(word))
jumble += word[posit]
word = word[:posit] + word[posit+1:]
print('Добро пожаловать в игру Анаграмма! Переставьте буквы так, чтобы получилось слово (для завершения нажмите Enter)')
print('Вот анаграмма:',jumble)
guess = input('Попробуйте отгадать исходное слово:')
while guess !=correct and guess !='':
print('Попробуйте еще раз')
guess = input('Попробуйте отгадать исходное слово:')
if guess == correct:
print('Поздравляю! Вы правильно угадали слово')
| true |
292f0a103a8695ec7bbc2c5417efeb3444f97bd8 | Python | jijuntao/Python | /python_practice/practice_8.py | UTF-8 | 10,761 | 3.953125 | 4 | [] | no_license | # 定义函数 def
def greet_user():
print('hello')
greet_user()
# 向函数传递信息
def function(name):
print('hello '+name.title())
function('jesse')
# 实参与形参:
# 一般定义函数时传入形参,调用函数时传入实参
# 实参-位置参数:调用函数传入的实参与定义函数的形参位置顺序应一致
def describe_prt(animal_type,pet_name):
print('I have a ' + animal_type + ' .'+'\nMy '+animal_type+"'s name is "+pet_name.title()+' .')
describe_prt('dog','sanyue')
# 实参-关键字参数:在调用函数中把实参指定给定义函数中的形参,不用考虑顺序
def describe_prt(animal_type,pet_name):
print('I have a ' + animal_type + ' .'+'\nMy '+animal_type+"'s name is "+pet_name.title()+' .')
describe_prt(animal_type='dog',pet_name='sanyue')
# 实参-默认值:在定义函数形参的同时赋值,调用函数时可重新赋值,也可使用默认值
def describe_prt(animal_type,pet_name= 'sanyue'):
print('I have a ' + animal_type + ' .'+'\nMy '+animal_type+"'s name is "+pet_name.title()+' .')
describe_prt(animal_type='dog')
# 返回值:return语句把不需要直接输出的数据返回一个或一组值
# 让实参变成可选的
def name(first_name,last_name,middle_name=''):
"""返回整洁的姓名"""
if middle_name:
full_name = first_name+' '+middle_name+' '+last_name
else:
full_name = first_name+' '+last_name
return full_name.title()
a = name('jami','hendrix')
print(a)
# 返回字典
def bulid_person(first_name,last_name):
"""返回一个字典,其中包含有关一个人的信息"""
person = {'first':first_name,'last':last_name}
return person
b = bulid_person('jimi','hendrix')
print(b)
# 结合使用函数和while循环
def get_formatted_name(first_name, last_name):
"""返回整洁的姓名"""
full_name = first_name + ' ' + last_name
return full_name.title()
while True:
print("\nPlease tell me your name:")
print("(enter 'q' at any time to quit)")
f_name = input("First name: ")
if f_name == 'q':
break
l_name = input("Last name: ")
if l_name == 'q':
break
formatted_name = get_formatted_name(f_name, l_name)
print("\nHello, " + formatted_name + "!")
# 传递列表
def greet_users(names):
"""向列表中的每位用户都发出简单的问候"""
for name in names:
msg = "Hello, " + name.title() + "!"
print(msg)
usernames = ['hannah', 'ty', 'margot']
greet_users(usernames)
# 在函数中修改列表
def print_models(unprinted_designs, completed_models):
""" 模拟打印每个设计,直到没有未打印的设计为止
打印每个设计后,都将其移到列表completed_models中
"""
while unprinted_designs:
current_design = unprinted_designs.pop()
# 模拟根据设计制作3D打印模型的过程
print("Printing model: " + current_design)
completed_models.append(current_design)
def show_completed_models(completed_models):
"""显示打印好的所有模型"""
print("\nThe following models have been printed:")
for completed_model in completed_models:
print(completed_model)
unprinted_designs = ['iphone case', 'robot pendant', 'dodecahedron']
completed_models = []
print_models(unprinted_designs, completed_models)
show_completed_models(completed_models)
# 禁止函数修改列表:可向函数传递列表副本,这样函数所做修改不影响原始列表
def print_models(unprinted_designs, completed_models):
""" 模拟打印每个设计,直到没有未打印的设计为止
打印每个设计后,都将其移到列表completed_models中
"""
while unprinted_designs:
current_design = unprinted_designs.pop()
# 模拟根据设计制作3D打印模型的过程
print("Printing model: " + current_design)
completed_models.append(current_design)
def show_completed_models(completed_models):
"""显示打印好的所有模型"""
print("\nThe following models have been printed:")
for completed_model in completed_models:
print(completed_model)
unprinted_designs = ['iphone case', 'robot pendant', 'dodecahedron']
completed_models = []
print_models(unprinted_designs[:], completed_models)
show_completed_models(completed_models)
# 传递任意数量的实参
def make_pizza(*toppings): # *topping中的星号让python创建一个名为topping的空元组
"""打印顾客点的所有配料"""
print(toppings)
make_pizza('pepperoni')
# 结合使用位置实参和任意数量实参,如果要让函数接受不同类型的实参,必须在函数定义中将接纳任意数量实参的形参放在最后。
# Python先匹配位置实参和关键字实参,再将余下的实参都收集到最后一个形参中
def make_pizza(size, *toppings):
"""概述要制作的比萨"""
print("\nMaking a " + str(size) +"-inch pizza with the following toppings:")
for topping in toppings:
print("- " + topping)
make_pizza(12, 'mushrooms', 'green peppers', 'extra cheese')
# 使用任意数量的关键字实参,有时候,需要接受任意数量的实参,但预先不知道传递给函数的会是什么样的信息。在这种情况下,
# 可将函数编写成能够接受任意数量的键—值对——调用语句提供了多少就接受多少
def build_profile(first, last, **user_info): # 双星号即表示创建以user_info命名的空字典
"""创建一个字典,其中包含我们知道的有关用户的一切"""
profile = {}
profile['first_name'] = first
profile['last_name'] = last
for key, value in user_info.items():
profile[key] = value
print(profile)
user_profile = build_profile('albert', 'einstein',location='princeton',field='physics')
print(user_profile)
# 8-1 编写一个名为display_message(),打印句子,指出你在本章学习什么
def display_message():
print("I'm learning function .")
display_message()
# 8-2 编写一个名为favorite_book()的函数,传入形参,打印消息我最喜欢的书为。。。
def favorite_book(title):
print('One of my favorite books is '+title.title())
favorite_book('alice in Wonderland')
# 8-3 编写一个名为make_shirt()的函数,需要一个尺码和字样,使用位置实参制作T恤,使用关键字实参调用
def make_shirt(size,logo):
print('The shirt size is '+size+' and logo is '+logo+'.')
make_shirt('38','cat')
make_shirt(size='40',logo='fish')
# 8-4 修改函数make_shirt(),默认情况下制作印有'I love Python'的大号T恤,打印:默认字样大号T恤,默认字样中号T恤,其他T恤
def make_shirt(size,logo = 'I love Python'):
print('The shirt size is ' + size + ' and logo is ' + logo + '.')
make_shirt('大号')
make_shirt(size='中号')
make_shirt('小号','Java')
# 8-5 编写函数describe_city(),接受城市名字和所属国家,给国家指定默认值,至少有一座城市不属于默认国家
def describe_city(city,country = 'China'):
print(city.title()+' is in '+country)
describe_city('sichuan')
describe_city('anhui')
describe_city('luoshanji')
# 8-6 编写名为city_country()的函数,它接受城市及所属国家
def city_country(city,county):
msg = city+','+county
return msg.title()
while True:
print('请输入城市和国家:')
print('输入q即可退出')
c_s = input('city:')
if c_s == 'q':
break
g_j = input('country:')
if g_j == 'q':
break
c_c = city_country(c_s,g_j)
print(c_s)
# 8-7 编写名make_album()的函数,接受名字和专辑名,返回字典包含这两个信息,添加一个可选参数,专辑曲数
def make_album(name,album_name,num=''):
if num:
msg = {'name':name,'album_name':album_name,'num':num}
else:
msg = {'name':name,'album_name':album_name}
print(msg)
# 8-8 在8-7中编写while循环让用户输入专辑的歌手和名称,调用函数打印字典
def make_album(name,album_name):
msg = {'name':name,'album_name':album_name}
return msg
while True:
print('======请输入歌手和专辑名称======')
print('======按q即可退出======')
msg1 = input('name')
if msg1 == 'q':
break
msg2 = input('album_name')
if msg2 == 'q':
break
msg3 = make_album(msg1,msg2)
print(msg3)
# 8-9 创建一个包含魔术师的列表,将其传递给一个名为show_magicians()的函数,打印列表中每个魔术师的名字
magic = ['magic1','magic2','magic3']
def show_magicians():
for name in magic:
print(name)
show_magicians()
# 8-10 在8-9基础上,编写一个名为make_great()的函数,对魔术师名字加入字样'the Great',调用函数show_magicians()函数打印
magic = ['magic1','magic2','magic3']
mag = []
def make_great(magic,mag):
while magic:
a = magic.pop()
a = 'the Great '+a
mag.append(a)
def show_magicians(mag):
for name in mag:
print(name)
make_great(magic,mag)
show_magicians(mag)
# 8-11 在8-9中调用make_great()函数时,由于不想修改原始列表,只传递副本,分别调用show_magicians()函数查看结果
magic = ['magic1','magic2','magic3']
mag = []
def make_great(magic,mag):
while magic:
a = magic.pop()
a = 'the Great '+a
mag.append(a)
def show_magicians(mag):
for name in mag:
print(name)
make_great(magic[:],mag)
show_magicians(magic)
show_magicians(mag)
# 8-12 编写一个函数,他接受顾客在三明治添加的食材,只有一个形参,打印消息
def add_food(*foods):
for food in foods:
print(food)
add_food('apple')
add_food('orange','bananas')
# 8-13 复制前面程序user_profile.py,在其中调用bulid_profile()函数来创建关于你的简介
def build_profile(first, last, **user_info): # 双星号即表示创建以user_info命名的空字典
"""创建一个字典,其中包含我们知道的有关用户的一切"""
profile = {}
profile['first_name'] = first
profile['last_name'] = last
for key, value in user_info.items():
profile[key] = value
return profile
user_profile = build_profile('ji', 'juntao',location='杭州',sex='man')
print(user_profile)
# 8-14 编写一个函数,将一辆车信息存储在字典中。这个函数总是接受制造商和型号,还接受任意数量的关键字实参
def msg_car(manufacturer,type,**informations):
cars = {}
cars['manufacturer'] = manufacturer
cars['type'] = type
for k,v in informations.items():
cars[k] = v
return cars
car = msg_car('subaru', 'outback', color='blue', tow_package=True)
print(car) | true |
3ea6c8ba73e08e0f37ec2ea5c67911af2f96ba74 | Python | davidwagner/audit-bear | /web2py/applications/audit_bear/modules/dateMod.py | UTF-8 | 11,372 | 3.109375 | 3 | [
"LicenseRef-scancode-public-domain"
] | permissive | #!/usr/bin/python
"""
-----Date Module-----
-Scope: This module is a collection of related functions that are hopfully useful in dealing with date anomalies and splitting up the main data structure based on Election-Day Voting, Pre-Voting Days, and Other days
-Usage: The "main" function serves as a usage example You can import this module in any file you might want access to the functions.
-Class DateMod: Init this class with an AuditLog object and a path to the 68a file.
Update: This file didn't prove as univerally useful as intended. Updated to take out aspects that were not being used.
------------------------
"""
import sys
import datetime
import dateutil.parser
import auditLog
import ballotImage
class DateMod:
def __init__(self,data, date):
#Dependant on a valid path and date parse from the 68a text file
self.eday = '' #Parsed Election Date from 68.lst file
# Filled with dateNoms()
self.D1 = {}
self.D2 = {}
self.D3 = {}
self.valid = {}
if not isinstance(data, auditLog.AuditLog):
raise Exception('Must pass valid AuditLog object')
if self.daygrab(data, date):
#print 'Election Date Retrieved from 68a'
pass
else:
#print 'No 68a Supplied or unable to parse. Inferring Election Day...'
pass
self.dateNoms(data)
self.validParse()
return
def daygrab(self, data, date):
"""
Gets date from l68a file or infer eday
"""
if not date:
self.inferEday(data)
return False
else:
self.eday = date.date()
return True
def inferEday(self, data):
d = {}
for line in data:
key = line.dateTime[0:10]
if key in d:
d[key] += 1
else:
d.update({key: 1})
self.eday = dateutil.parser.parse(max(d.iterkeys(), key=d.get)).date()
return
def validDate(self, date):
"""
Helper function for dateNoms. Test if opening/closing date is reasonable.
"""
if date == None: return False
elif date.date() > self.eday: return False
elif date.date() < (self.eday - datetime.timedelta(33)): return False
else: return True
def dateNoms(self, data):
"""
This function provides a dictionary/list of machines that experiance a variety
of different date issues. The machines are divided into 3 distinct categories
to be furthur parsed into intelligent reports.
-Type 1:
Returns machines who closed on election day but had manual adjustments made
to their datetime. Includes all machines who were never adjusted until
election day and those who were off by an hour.
-Type 2:
Returns machines which needed adjustments made to their date, but it was
never done. This really means machines opened and closed on impossible
dates. It won't catch machines who needed adjusting by an hour or so.
-Type 3:
Returns machines that had date bugs. This means that the date randomly
leaped forward or backward in time or decided its date was 00/00/00 for
some amount of events. Sometimes the date is manually changed back or
sometimes it jumps back on its own, these are all listed because more
importantly, the machine was experiancing some serious bugs.
"""
"""
State that tracks opening and closing events.
This ensures that only the last opening closing states of a machine
are saved. This is because pre-voting opening and closing times are
not considered in this analysis. State is overwritten with 1 when last
opening occurs (election day usually). These are the times used.
"""
ostate = 0
#0: Machine never opened or closed
#1: Machine Opened
#2: Machine Opened and Closed Sucessfully
#3: Machine Closed without an Open
timeset,startset = False, False
start, end = None, None
jump = False
jumpEvents = 0
jumpB, jumpF, jumpZ = False, False, False
temp = data[0].serialNumber
lastTime = data[0].dateTime
for line in data:
try:
cTime = dateutil.parser.parse(line.dateTime)
except ValueError:
cTime = None
#--New Machine, process last one--
if line.serialNumber != temp:
if ostate == 2:
#Populate D1
if end != None and end.date() == self.eday:
if not self.validDate(start):
self.D1.update({temp:(start, end, 'N/A')})
elif start.date() == self.eday:
if timeset:
start = start - diff
self.D1.update({temp:(start, end, end-start)})
self.valid.update({temp:(start, end, end-start)})
else: pass
#Populate D2
"""
Note: I check for invalid opening as well. This is because
It is more likely that something ending on a invalid date
without opening on one resulted from a date jump and goes
with D3
"""
elif (not self.validDate(end)) and (not self.validDate(start)):
self.D2.update({temp:(start, end, end-start)})
#Populate D3
if jump:
self.D3.update({temp:(startJ,jumpValue,jumpEvents)})
#Machine Neither Closed Nor Opened
if ostate == 0: print 'Machine ', temp , ' not closed nor opened'
#Machine Opened and Not Closed
elif ostate == 1: print 'Machine', temp, ' Not closed'
temp = line.serialNumber
timeset, startset = False, False
end, start, diff = None, None, None
ostate = 0
jump = False
jumpB, jumpF, jumpZ = False, False, False
jumpEvents = 0
#--Record opening state and times--
if line.eventNumber == '0001672': #Open Event
ostate = 1
start = cTime
elif line.eventNumber == '0001673': #Machine Close
if ostate == 1:
end = cTime
ostate = 2
#Machine Closed without an Open
elif ostate == 0:
ostate = 3
print 'Machine ', temp,' closed without open event?!'
#--If time was adjusted while machine was open record delta--
elif line.eventNumber == '0000117' and (ostate==1 or ostate==2):
startset = True
elif line.eventNumber == '0001656' and startset:
#Mark changes occuring when open and on eday
if cTime != None and cTime.date() == self.eday:
if lastTime == None:
diff = datetime.timedelta(0)
timeset = True
else:
diff = lastTime - cTime
#We don't care for changes less then 1 minute
if abs(diff) > datetime.timedelta(0,60):
timeset = True
else:
timeset = False
startset = False
#--Find any date jumping and record date anomalies resulting from bugs--
"""
This currently doesn't really adapt well to machines experiancing
many different jumps. This could be added, but might not be all
that useful. A machine experiencing date bugs is a machine
experiencing date bugs. Event count is cumulative among multiple
jumps however.
"""
if ostate == 1:
if line.eventNumber != '0001656' and not jumpB and not jumpF and not jumpZ:
#00/00/00 Jump
if cTime == None:
jump, jumpZ = True, True
startJ = lastTime
jumpValue = 'Invalid Date'
#Backward Jump
elif lastTime > cTime:
jump,jumpB = True, True
startJ = lastTime
jumpValue = cTime
#Forward Jump (Threshold is arbitrary but works OK)
elif (cTime-lastTime) > datetime.timedelta(33):
jump,jumpF = True, True
startJ = lastTime
jumpValue = cTime
if jumpB:
if cTime > startJ:
jumbB = False
else:
jumpEvents += 1
if jumpF:
if cTime.date() == startJ.date():
jumpF = False
else:
jumpEvents += 1
if jumpZ:
if cTime != None:
jumpZ = False
else:
jumpEvents += 1
elif ostate == 2:
jumpF, jumpB, jumpZ = False, False, False
lastTime = cTime
return
def validParse(self):
"""
This function takes the dictionary from dateNoms and decides if the
datestamp is believable. Uses pretty simple heuristics but works
well to create a list of valid machines for analysis that monitors
poll closing times
"""
d = {}
#Machine must be open by this time to be assumed valid if open for 12 hours+
timeopen = dateutil.parser.parse('07:30:00')
for k,v in self.valid.iteritems():
if v[2] > datetime.timedelta(hours=12) and v[0] < timeopen:
d.update({k:v})
else: pass
self.valid = d
return
def __del__(self):
del self.eday
del self.D1
del self.D2
del self.D3
del self.valid
return
def count(data):
d={}
for line in data:
d.update({line.serialNumber:1})
return len(d)
if __name__== "__main__":
path = sys.argv[1]
try: f = open(path, 'r')
except:
print 'Invalid arg'
exit()
data = auditLog.AuditLog(f)
f.close()
dateclass = DateMod(data, dateutil.parser.parse('11/02/2010'))
count = count(data)
for k,v in dateclass.D1.iteritems():
print k, v[0], v[1], v[2]
for k,v in dateclass.D2.iteritems():
print k,v[0], v[1], v[2]
for k,v in dateclass.D3.iteritems():
print k,v[0], v[1], v[2]
print 'Lengths', len(dateclass.D1), len(dateclass.D2), len(dateclass.D3), len(dateclass.valid)
print 'Count', count, '\n'
| true |
b60ba7c6bb159af475c802246b6e94a0604aab17 | Python | kranthiakssy/Thesis | /DDPG/spyder/Bkup_20210528/ddpg_gym_env.py | UTF-8 | 2,277 | 2.734375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Tue May 25 10:34:09 2021
@author: kranthi
"""
# Importing packages
#import os
#import torch as T
#import torch.nn as nn
#import torch.nn.functional as F
#import torch.optim as optim
import numpy as np
import gym
import matplotlib.pyplot as plt
# Importing local functions
#from ddpg_module import CriticNetwork, ActorNetwork, OUActionNoise, ReplayBuffer
from ddpg_agent import Agent
# Function for plotting scores
def plotLearning(scores, filename, x=None, window=5):
N = len(scores)
running_avg = np.empty(N)
for t in range(N):
running_avg[t] = np.mean(scores[max(0, t-window):(t+1)])
if x is None:
x = [i for i in range(N)]
plt.ylabel('Score')
plt.xlabel('Game')
plt.plot(x, running_avg)
plt.savefig(filename)
# Defining gym environment
env = gym.make('Pendulum-v0') # 'LunarLanderContinuous-v2', 'MountainCarContinuous-v0',
# 'Pendulum-v0'
# Define all the state and action dimensions, and the bound of the action
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
action_bound = env.action_space.high[0]
print("State Dim: {0}\n Action Dim: {1}\n Action Bound: {2}"\
.format(state_dim, action_dim, action_bound))
# Agent creaation
agent = Agent(alpha=0.000025, beta=0.00025, input_dims=[state_dim], tau=0.001, env=env,
batch_size=64, layer1_size=400, layer2_size=300, n_actions=action_dim,
action_bound=action_bound)
#agent.load_models()
np.random.seed(0)
score_history = []
for i in range(10): #1000 episodes
obs = env.reset()
done = False
score = 0
while not done:
act = agent.choose_action(obs)
new_state, reward, done, info = env.step(act)
agent.remember(obs, act, reward, new_state, int(done))
agent.learn()
score += reward
obs = new_state
env.render()
score_history.append(score)
if i % 25 == 0:
agent.save_models()
print('episode ', i, 'score %.2f' % score,
'trailing 100 games avg %.3f' % np.mean(score_history[-100:]))
filename = 'LunarLander-alpha000025-beta00025-400-300.png'
plotLearning(score_history, filename, window=100) | true |
ef18a7ceeb5f15ec3c6cf449ef06957d60d1f4c0 | Python | milindl/auto-aggregator | /post_reader.py | UTF-8 | 6,893 | 3.125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/env python3
from post import Post
import datetime
from fuzzywuzzy import fuzz
import re
import dateutil.parser as dp
import parsedatetime as pdt
import calendar
class PostReader:
'''
Class to parse posts and populate them with information
'''
def __init__(self, mapping_file = 'mappings', stoplist = set()):
'''
Initialize PostReader with mappings etc
mapping_file can be used to provide a file for location -> keyword maps
Example mapping_file:
location1;keyword1,key phrase,key phrase two
location2;key phrase three,keyword2,keyword3
stoplist is the set() of words not to include in token tuples. A
default version is provided inside the function body
'''
if len(stoplist) == 0:
self.stoplist = set('in for on at to from or around'.split())
else:
self.stoplist = stoplist
loc_file = open(mapping_file)
loc_map = {}
for line in loc_file:
ans = line.split(';')[0]
for key in line.split(';')[1].replace('\n', '').split(','):
loc_map[key] = ans
self.loc_map = loc_map
def loc_tokenizer(self, preceding_word, suceeding_word, pst):
'''
Returns a tuple containing possible location strings in pst.content
This looks for strings of the form 'to (keyword1) (keyword2)' or
'(keyword1) (keyword2) to'.
'to' is the preceding or suceeding word respectively, and the keywords
are possible location names
'''
# Pad words with spaces if they are alphanumeric
# Otherwise leave them, as they may be special characters (^, $ etc)
preceding_word = re.sub('([a-zA-Z]+)', '\g<1> ', preceding_word)
suceeding_word = re.sub('([a-zA-Z]+)', ' \g<1>', suceeding_word)
# Match a two words or a single word around the to/from etc word
m = re.search(preceding_word +
'([a-zA-Z]+)? ([a-zA-Z]+)?' +
suceeding_word,
pst.content) or re.search(preceding_word +
'([a-zA-Z]+)?' +
suceeding_word,
pst.content)
# Return a tuple only if word in tuple is not in stoplist
if not m:
return ()
final = []
for g in m.groups():
if g.lower() not in self.stoplist: final.append(g.lower())
return tuple(final)
def best_scoring_value(self, groups):
'''
Finds best fuzzy match
Compares each elem of the group with each keyphrase/word in loc_map
Returns the location with best matching
'''
best_match = ''
best_score = 0
groups = list(groups)
# Append the whole of the group to the things to be checked
# For instance, for the group ('a', 'b'), 'a b' will also be matched
groups.append(' '.join(groups))
for g in groups:
for key in self.loc_map:
if fuzz.ratio(key, g) > best_score:
best_score = fuzz.ratio(key, g)
best_match = self.loc_map[key]
return best_match
def get_temporal(self, pst):
'''
Return tuple containing best possible temporal match for Post pst
'''
cal = pdt.Calendar()
# Censor some punctuation from post content, then flatten 1+ spaces into 1
# This seems to improve guessing
content = re.sub('([^a-zA-Z0-9\:\-\+])', ' \g<1> ', pst.content)
content = re.sub('[ ]+', ' ', content)
# Naked content are those dates which do not have a month associated
# Eg: 31st morning, 1st night, etc
naked_content = re.search('([0-3]?[0-9])[ ]?(st|nd|rd|th)? (?!Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec|am|pm|A\.M\.|P\.M\.)',
content, re.I)
if naked_content:
# Assumption: only one such naked date exists
d = int(naked_content.groups()[0])
month_name = ''
# Month name is current month if current date < naked date
# Otherwise it is the next month
if d >= pst.posting_date.day:
month_name = calendar.month_name[pst.posting_date.month]
else:
month_name = calendar.month_name[(pst.posting_date.month % 12) + 1]
content = re.sub('([0-3]?[0-9])[ ]?(st|nd|rd|th)? (?!Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec|am|pm|A\.M\.|P\.M\.|noon)[A-Za-z]+',
'\g<0>' + month_name + ' ', content, re.I)
# Obtain an tuple containing the best guesses from PDT's NLP
nlp_guess = cal.nlp(content, sourceTime = pst.posting_date)
if len(nlp_guess) == 0:
raise ValueError('Your string does not have a date in it')
# In case we have only one parse, get to to caller
if len(nlp_guess) == 1:
# format is due to the fact that we need to return a datetime tuple
return (nlp_guess[0][0],)
# In case we have >= 2 parses, need to separate them into Time and Date
time_parsed = []
date_parsed = []
weird_parsed = [] # Strings which fail dateutil's parse but pass PDT
for guess in nlp_guess:
try:
# If time is 00:00, then it's probably a pure date
if dp.parse(guess[-1]).time() == dp.parse('00:00').time():
date_parsed.append(guess)
else:
time_parsed.append(guess)
except ValueError:
weird_parsed.append(guess)
# TODO: Right now I am taking the first member of the time_parsed and
# date_parsed. Find a way to get the best out of this
if len(time_parsed) > 0 and len(date_parsed) > 0:
return (dp.parse(time_parsed[0][-1]), dp.parse(date_parsed[0][-1]))
elif len(date_parsed) > 0:
return (dp.parse(date_parsed[0][-1]),)
else:
return (weird_parsed[0][0],)
def read_post(self, p):
'''
Method to read a raw post and extract relavant content from it
'''
to_groups = self.loc_tokenizer('to', '', p) or self.loc_tokenizer('for', '', p)
best_to = self.best_scoring_value(to_groups)
from_groups = self.loc_tokenizer('from', '', p) or self.loc_tokenizer('', 'to', p)
best_from = self.best_scoring_value(from_groups)
p.to = best_to
p.frm = best_from
dates = self.get_temporal(p)
if len(dates) == 1:
p.date = dates[0].date()
p.time = dates[0].time()
else:
p.date = dates[1].date()
p.time = dates[0].time()
return p
| true |
957071af869dcaecb59c0c65898fc235815f5301 | Python | hwangpo/intensity_duration_frequency_analysis | /idf_analysis/in_out.py | UTF-8 | 1,269 | 2.65625 | 3 | [
"MIT"
] | permissive | __author__ = "Markus Pichler"
__credits__ = ["Markus Pichler"]
__maintainer__ = "Markus Pichler"
__email__ = "markus.pichler@tugraz.at"
__version__ = "0.1"
__license__ = "MIT"
import pandas as pd
def csv_args(unix=False):
if unix:
return dict(sep=',', decimal='.')
else:
return dict(sep=';', decimal=',')
def import_series(filename, series_label='precipitation', index_label='datetime', unix=False):
"""
:param filename:
:param series_label:
:param index_label:
:param unix: whether to use a "," as separator and a "." as decimal sign or ";" and ",".
:type unix: bool
:return:
"""
if filename.endswith('csv'):
ts = pd.read_csv(filename, index_col=0, header=None, squeeze=True, names=[series_label], **csv_args(unix))
ts.index = pd.to_datetime(ts.index)
ts.index.name = index_label
return ts
elif filename.endswith('parquet'):
return pd.read_parquet(filename, columns=[series_label])[series_label].rename_axis(index_label, axis='index')
elif filename.endswith('pkl'):
return pd.read_pickle(filename).rename(series_label).rename_axis(index_label, axis='index')
else:
raise NotImplementedError('Sorry, but only csv files are implemented. Maybe there will be more options soon.')
| true |
1f2ee8144c9b6c134f1dd52e9afa269212d1c71f | Python | chenshanghao/LeetCode_learning | /Problem_347/my_solution_not_good.py | UTF-8 | 661 | 3.03125 | 3 | [] | no_license | class Solution(object):
def topKFrequent(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[int]
"""
dict_key_nums= {}
for i in nums:
if i in dict_key_nums:
dict_key_nums[i] += 1
else:
dict_key_nums[i] = 1
results = []
for i in range(k):
max_key, max_value = 0, 0
for k,v in dict_key_nums.items():
if v > max_value:
max_key, max_value = k, v
results.append(max_key)
del dict_key_nums[max_key]
return results
| true |
fe9b7e386d7d0fcc2b1693e2b396634201e455df | Python | userpest/blockcrypt | /src/EncryptedBlockDevice.py | UTF-8 | 4,837 | 2.640625 | 3 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/python2.7
import math
import logging
from Crypto.Hash import SHA256, HMAC
import struct
from util import *
class SectorHmacDoesntMatch(Exception):
def __init__(self,message):
super(SectorHmacDoesntMatch, self).__init__()
self.message=message
#TODO:merge DiskDrivers and EncryptedBlockDevice into one
class EncryptedBlockDevice(object):
def __init__(self,crypto_driver,disk_driver):
self.crypto = crypto_driver
self.device = disk_driver
self.sector_size = disk_driver.sector_size
self.offset=0
self.size = disk_driver.size
def seek(self,offset,from_what=0):
if from_what==0:
self.offset=offset
elif from_what==1:
self.offset+=offset
elif from_what==2:
self.offset=self.size+offset
def get_current_sector(self):
"""
returns a pair (sector_number,sector_offset)
"""
return (int(math.floor(self.offset/self.sector_size)), self.offset%self.sector_size)
def _read(self,size):
sector_size = self.sector_size
device = self.device
while size > 0:
(sector, sector_offset) = self.get_current_sector()
read_size = min(size,sector_size-sector_offset)
read_begin = sector_offset
read_end = min(sector_offset+size, sector_size)
size-=read_size
self.offset+=read_size
ciphertext = self.read_sector(sector)
yield (sector,ciphertext,read_begin,read_end,read_size)
def read_sector(self,sector):
return self.device.read(sector)
def read(self,size):
buf = bytearray()
crypto = self.crypto
for (sector,ciphertext,read_begin, read_end,read_size) in self._read(size):
plaintext = crypto.decrypt(sector,ciphertext)
buf+=plaintext[read_begin:read_end]
return buf
#meh
def write_sector(self,sector,data):
self.device.write(sector,data)
def write(self,plaintext):
#TODO:optimize
crypto = self.crypto
device = self.device
sector_size = self.sector_size
plaintext_size = len(plaintext)
while plaintext_size > 0 :
(sector, sector_offset) = self.get_current_sector()
write_begin = sector_offset
write_end = min(plaintext_size+sector_offset,sector_size)
write_size = write_end-write_begin
ciphertext = self.read_sector(sector)
sector_data = crypto.decrypt(sector,ciphertext)
sector_data = sector_data[:write_begin]+plaintext[:write_size]+sector_data[write_end:]
ciphertext = crypto.encrypt(sector,sector_data)
self.write_sector(sector,ciphertext)
plaintext=plaintext[write_size:]
self.offset+=write_size
plaintext_size-=write_size
def flush(self):
self.device.flush()
class EncryptedBlockDeviceWithHmac(EncryptedBlockDevice):
def __init__(self,crypto_driver,disk_driver,hmac_key):
super(EncryptedBlockDeviceWithHmac,self).__init__(crypto_driver,disk_driver)
self.hmac_key = hmac_key
self.hmac_size=SHA256.digest_size
self.hmac_entry_size=self.hmac_size
active_sectors = int(math.floor(self.size/(self.sector_size+self.hmac_entry_size)))
self.hmac_pos = active_sectors*self.sector_size
self.hmac_section_begin=self.hmac_pos
self.realsize = self.size
self.size -= self.hmac_entry_size*active_sectors
self.compute_hmac = True
self.check_hmac = True
self.active_sectors = active_sectors
def save_sector_hmac(self,sector,data):
#print "saving hmac for %d" % (sector)
hmac = self.get_hmac(data)
buf=hmac.digest()
#print to_hex(buf)
offset2 = self.offset
self.seek(self.get_sector_hmac_offset(sector))
self.compute_hmac=False
self.check_hmac = False
self.write(buf)
self.seek(offset2)
self.compute_hmac = True
def write_sector(self,sector,data):
if self.compute_hmac:
self.save_sector_hmac(sector,data)
self.device.write(sector,data)
def get_sector_hmac_offset(self,sector):
return int(self.hmac_section_begin + sector*self.hmac_entry_size)
def get_hmac(self,data):
return HMAC.new(self.hmac_key,data, SHA256)
def read_sector(self,sector):
buf = self.device.read(sector)
if self.check_hmac :
computed_hmac = self.get_hmac(buf).digest()
self.check_hmac = False
offset2 = self.offset
self.seek(self.get_sector_hmac_offset(sector))
data = self.read(self.hmac_entry_size)
self.seek(offset2)
self.check_hmac = True
saved_hmac = data
if computed_hmac != saved_hmac:
s = "sector %d has been modified " % (sector)
print s
print "reading from %d" % (self.get_sector_hmac_offset(sector))
print to_hex(computed_hmac)
print to_hex(str(saved_hmac))
raise SectorHmacDoesntMatch(s)
return buf
def compute_disk_hmac(self):
#print
for i in range(0,self.active_sectors):
s = self.device.read(i)
self.save_sector_hmac(i,s)
#print "zuo"
#print self.hmac_section_begin
def find_modified_sectors(self):
ret = []
for i in range(0,int(math.floor(self.size/self.sector_size))):
if not self.sector_hmac_valid(i):
ret.append(i)
return ret
| true |
ecc93f5662508b12b61a8557d5967fd1d607d26b | Python | yukuku/euler | /p3.py | UTF-8 | 261 | 3 | 3 | [] | no_license | import math
def pf(n):
res = []
while True:
found = False
for p in range(2, int(math.sqrt(n))+1):
if n%p == 0:
n /= p
res.append(p)
found = True
break
if not found: break
if n != 1: res.append(n)
return res
print pf(600851475143)
| true |
12d906e89e36081b126f32f82198aad164dd08cf | Python | Arushi-V/PythonWithCM | /2021.05.22/fun2.py | UTF-8 | 140 | 3.671875 | 4 | [] | no_license | def voterAge(age):
if (age >= 18):
print("Adult")
else :
print("Not Adult")
age = int(input("Enter your age : "))
voterAge(age)
| true |
898ac44730ed55f68aeadb0985f3beb2534318c3 | Python | Dreamer-WangYiQiang/SepsisTrajectoryPrediction_Release | /inputPredictionPermaDrop.py | UTF-8 | 2,505 | 2.734375 | 3 | [] | no_license | # Written by Dale Larie, send questions to Dale.B.Larie@gmail.com
# Last updated 05/15/2020
# Script that will generate LSTM Cytokine prediction models with a permanent dropout layer
import numpy as np
from keras import backend as K
from keras.layers.core import Lambda
import os
import math
import keras
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
from keras.layers import Flatten
import tensorflow.python.util.deprecation as deprecation
deprecation._PRINT_DEPRECATION_WARNINGS = False #suppress depreciation warnings
# make regressor do rolling prediction as well
# make lstm for just oxydef
def PermaDropout(rate):
return Lambda(lambda x: K.dropout(x, level=rate))
# 5 observations for the network to predict on
numForPredictions = 5
# percent of training data to use
percent = 10
# creating a directory to store the models in
pathToModelsDir = "./inputPredictionPermaDropout/"
try:
os.mkdir(pathToModelsDir)
except:
pass
# parameters for making the LSTM neural network
def makeLSTM():
model = Sequential()
model.add(LSTM(units=100,return_sequences=True,input_shape=(numForPredictions,11)))
model.add(LSTM(units=100,return_sequences=False,input_shape=(numForPredictions,11)))
model.add(PermaDropout(0.1))
model.add(Dense(units = 300))
model.add(Dense(units = 200))
# model.add(Dense(units = 112))
model.add(Dense(units = 1))
model.compile(loss='mean_squared_error', optimizer='Adam', metrics=['mean_absolute_error'])
model.summary()
return model
# -------------------------------------#
# main section
data = np.load("TrainingData.npy")
# ignore oxygen deficit
data = data[1:,:,:]
data = np.transpose(data,(2,1,0))
# expect shape = (X,6,11)
np.random.shuffle(data)
# make the dataset smaller
percent = percent/100
endPoint = math.floor(data.shape[0]*percent)
data = data[0:endPoint,:,:]
# for each network
for networkNum in range(11):
try:
lstmModel = keras.models.load_model(pathToModelsDir + "model" + str(networkNum) + ".h5")
print("loaded lstm" + str(networkNum))
except:
lstmModel = makeLSTM()
print("created lstm" + str(networkNum))
features = data[:,1:,:] # shape = (X, 5, 11)
labels = data[:,0:1,networkNum] # shape = (X, 1)
lstmModel.fit(features,labels, validation_split = .1, epochs = 10, batch_size = 1028, verbose = 1)
lstmModel.save(pathToModelsDir + "/model" + str(networkNum) + ".h5")
| true |
cf3f67dfe6a3c6fe8fac9618618faaeaa40b6629 | Python | wtsi-hgi/hail | /python/hail/representation/genomeref.py | UTF-8 | 7,702 | 2.859375 | 3 | [
"MIT"
] | permissive | from hail.java import handle_py4j, jiterable_to_list
from hail.typecheck import *
from hail.representation.interval import Interval
from hail.utils import wrap_to_list
from hail.history import *
class GenomeReference(HistoryMixin):
"""An object that represents a `reference genome <https://en.wikipedia.org/wiki/Reference_genome>`__.
:param str name: Name of reference. Must be unique and not one of Hail's predefined references "GRCh37" and "GRCh38".
:param contigs: Contig names.
:type contigs: list of str
:param lengths: Dict of contig names to contig lengths.
:type lengths: dict of str to int
:param x_contigs: Contigs to be treated as X chromosomes.
:type x_contigs: str or list of str
:param y_contigs: Contigs to be treated as Y chromosomes.
:type y_contigs: str or list of str
:param mt_contigs: Contigs to be treated as mitochondrial DNA.
:type mt_contigs: str or list of str
:param par: List of intervals representing pseudoautosomal regions.
:type par: list of :class:`.Interval`
>>> contigs = ["1", "X", "Y", "MT"]
>>> lengths = {"1": 249250621, "X": 155270560, "Y": 59373566, "MT": 16569}
>>> par = [Interval.parse("X:60001-2699521")]
>>> my_ref = GenomeReference("my_ref", contigs, lengths, "X", "Y", "MT", par)
"""
@handle_py4j
@record_init
@typecheck_method(name=strlike,
contigs=listof(strlike),
lengths=dictof(strlike, integral),
x_contigs=oneof(strlike, listof(strlike)),
y_contigs=oneof(strlike, listof(strlike)),
mt_contigs=oneof(strlike, listof(strlike)),
par=listof(Interval))
def __init__(self, name, contigs, lengths, x_contigs=[], y_contigs=[], mt_contigs=[], par=[]):
contigs = wrap_to_list(contigs)
x_contigs = wrap_to_list(x_contigs)
y_contigs = wrap_to_list(y_contigs)
mt_contigs = wrap_to_list(mt_contigs)
par_jrep = [interval._jrep for interval in par]
jrep = (Env.hail().variant.GenomeReference
.apply(name,
contigs,
lengths,
x_contigs,
y_contigs,
mt_contigs,
par_jrep))
self._init_from_java(jrep)
self._name = name
self._contigs = contigs
self._lengths = lengths
self._x_contigs = x_contigs
self._y_contigs = y_contigs
self._mt_contigs = mt_contigs
self._par = par
super(GenomeReference, self).__init__()
@handle_py4j
def __str__(self):
return self._jrep.toString()
def __repr__(self):
return 'GenomeReference(name=%s, contigs=%s, lengths=%s, x_contigs=%s, y_contigs=%s, mt_contigs=%s, par=%s)' % \
(self.name, self.contigs, self.lengths, self.x_contigs, self.y_contigs, self.mt_contigs, self.par)
@handle_py4j
def __eq__(self, other):
return self._jrep.equals(other._jrep)
@handle_py4j
def __hash__(self):
return self._jrep.hashCode()
@property
def name(self):
"""Name of genome reference.
:rtype: str
"""
return self._name
@property
def contigs(self):
"""Contig names.
:rtype: list of str
"""
return self._contigs
@property
def lengths(self):
"""Dict of contig name to contig length.
:rtype: dict of str to int
"""
return self._lengths
@property
def x_contigs(self):
"""X contigs.
:rtype: list of str
"""
return self._x_contigs
@property
def y_contigs(self):
"""Y contigs.
:rtype: list of str
"""
return self._y_contigs
@property
def mt_contigs(self):
"""Mitochondrial contigs.
:rtype: list of str
"""
return self._mt_contigs
@property
def par(self):
"""Pseudoautosomal regions.
:rtype: list of :class:`.Interval`
"""
return self._par
@typecheck_method(contig=strlike)
def contig_length(self, contig):
"""Contig length.
:param contig: Contig
:type contig: str
:return: Length of contig.
:rtype: int
"""
if contig in self._lengths:
return self._lengths[contig]
else:
raise KeyError("Contig `{}' is not in reference genome.".format(contig))
@classmethod
@record_classmethod
@handle_py4j
def GRCh37(cls):
"""Reference genome for GRCh37.
Data from `GATK resource bundle <ftp://gsapubftp-anonymous@ftp.broadinstitute.org/bundle/b37/human_g1k_v37.dict>`__.
>>> grch37 = GenomeReference.GRCh37()
:rtype: :class:`.GenomeReference`
"""
return GenomeReference._from_java(Env.hail().variant.GenomeReference.GRCh37())
@classmethod
@record_classmethod
@handle_py4j
def GRCh38(cls):
"""Reference genome for GRCh38.
Data from `GATK resource bundle <ftp://gsapubftp-anonymous@ftp.broadinstitute.org/bundle/hg38/Homo_sapiens_assembly38.dict>`__.
>>> grch38 = GenomeReference.GRCh38()
:rtype: :class:`.GenomeReference`
"""
return GenomeReference._from_java(Env.hail().variant.GenomeReference.GRCh38())
@classmethod
@record_classmethod
@handle_py4j
@typecheck_method(file=strlike)
def from_file(cls, file):
"""Load reference genome from a JSON file.
The JSON file must have the following format:
.. code-block:: text
{"name": "my_reference_genome",
"contigs": [{"name": "1", "length": 10000000},
{"name": "2", "length": 20000000},
{"name": "X", "length": 19856300},
{"name": "Y", "length": 78140000},
{"name": "MT", "length": 532}],
"xContigs": ["X"],
"yContigs": ["Y"],
"mtContigs": ["MT"],
"par": [{"start": {"contig": "X","position": 60001},"end": {"contig": "X","position": 2699521}},
{"start": {"contig": "Y","position": 10001},"end": {"contig": "Y","position": 2649521}}]
}
**Notes**
`name` must be unique and not overlap with Hail's pre-instantiated references: "GRCh37" and "GRCh38".
The contig names in `xContigs`, `yContigs`, and `mtContigs` must be present in `contigs`. The intervals listed in
`par` must have contigs in either `xContigs` or `yContigs` and have positions between 0 and the contig length given
in `contigs`.
:param file: Path to JSON file.
:type file: str
:rtype: :class:`.GenomeReference`
"""
return GenomeReference._from_java(Env.hail().variant.GenomeReference.fromFile(Env.hc()._jhc, file))
@handle_py4j
def _init_from_java(self, jrep):
self._jrep = jrep
@classmethod
def _from_java(cls, jrep):
gr = GenomeReference.__new__(cls)
gr._init_from_java(jrep)
gr._name = jrep.name()
gr._contigs = [str(x) for x in jrep.contigs()]
gr._lengths = {str(x._1()): int(x._2()) for x in jiterable_to_list(jrep.lengths())}
gr._x_contigs = [str(x) for x in jiterable_to_list(jrep.xContigs())]
gr._y_contigs = [str(x) for x in jiterable_to_list(jrep.yContigs())]
gr._mt_contigs = [str(x) for x in jiterable_to_list(jrep.mtContigs())]
gr._par = [Interval._from_java(x) for x in jrep.par()]
return gr
| true |
07eb13eef85e9160bc09f83b6f869aa56da5442c | Python | daniel-reich/turbo-robot | /hZ4HzhboCJ5dDiNve_7.py | UTF-8 | 896 | 4.21875 | 4 | [] | no_license | """
Create a function that takes a string and returns the reversed string. However
there's a few rules to follow in order to make the challenge interesting:
* The UPPERCASE/lowercase positions must be kept in the same order as the original string (see example #1 and #2).
* Spaces must be kept in the same order as the original string (see example #3).
### Examples
special_reverse_string("Edabit") ➞ "Tibade"
special_reverse_string("UPPER lower") ➞ "REWOL reppu"
special_reverse_string("1 23 456") ➞ "6 54 321"
### Notes
N/A
"""
def special_reverse_string(txt):
v = [i for i in txt if i != ' '][::-1]
s, j = '', 0
for i in txt:
if i == ' ':
s = s + ' '
else:
if i.isupper():
s = s + v[j].upper()
elif i.islower():
s = s + v[j].lower()
else:
s = s + v[j].lower()
j = j + 1
return s
| true |
23489e2dc44df4e7d1eea312c3d6480524b4cc0b | Python | jmsleiman/rosalind | /python/fib/main.py | UTF-8 | 603 | 2.96875 | 3 | [
"BSD-2-Clause-Views"
] | permissive | #!/usr/bin/python
# -*- coding: <utf-8> -*-
# Copyright 2015 Joseph M. Sleiman <www.jmsleiman.com>
#
# Licensed under the Simplified FreeBSD License
import sys
def main(n, k):
mature = list()
mature.append(0)
mature.append(1)
mature.append(1)
x = 3
while(x <= n):
mature.append(mature[x-1] + mature[x-2]*k)
x = x + 1
print mature
return mature[-1]
if __name__ == "__main__":
if (len(sys.argv) == 3):
#with open (sys.argv[1], "r") as myfile:
#data = myfile.readlines()
print main(int(sys.argv[1]), int(sys.argv[2]))
else:
print "Proper usage of this module: \n main.py n k"
| true |
69cc8bc4aa1f891531644429f87e455323f88ea9 | Python | Garrison50/unit-2-brid | /1.3 convert.py | UTF-8 | 266 | 3.921875 | 4 | [] | no_license | def main ():
count = 0
while count <= 5:
celcius = eval(input("What is the Celcius temperature?"))
farenheit =9/5 * celcius + 32
print ("The temperature is", round(farenheit, 1), "degrees Farenheit!")
count = count + 1
main()
| true |
e20e2528f7919cff1e93164149fae9c5cb849862 | Python | LudovicSchorpp/TM_Ludovic_Schorpp | /codes_flopy/Chabart/notebooks/Chabart_fun.py | UTF-8 | 1,000 | 3.15625 | 3 | [] | no_license | ## hand made function for the Chabart model
import numpy as np
import pandas as pd
#1 well data
def pump_data(file,nrow=38,ncol=42,layer=0,fac=1):
"""
This function take a file (csv) path as entry and spatial parameters (nrow,ncol) and the layer on which the data will be applied
actually import data from the file in a df, remove value above 3000 and return in a list all the values different from 0 with the appropriate cellid (dis only)
structure : lst = pump_data(file,nrow,ncol,layer,fac)
a factor is added in order to change the units of the pumping values, the factor multiply the actual values in the file
"""
lst=[]
a = pd.read_csv(file,sep=";",header=None,na_values=None)
a[a>3000] = 0 # remove nodata
for irow in np.arange(nrow):
for icol in np.arange(ncol):
if a.iloc[irow,icol] != 0:
data_pompage = ((layer,irow,icol),a.iloc[irow,icol]*fac)
lst.append(data_pompage)
return lst
| true |
3e3566cfe902832f8f1a0cdee90ca48940b0a3fb | Python | grimmi/learnpython | /decipherthis.py | UTF-8 | 1,415 | 4.15625 | 4 | [] | no_license | '''
Decipher this!
You are given several secret messages you need to decipher. Here are the conditions:
The first letter corresponds to ASCII character code (case sensitive)
The second letter needs to be switched to the last letter
The last letter needs to be switched to the second letter
If it only has one letter, it will be unchanged
If it only has two letters, you will just need to convert the ASCII character code to a letter
Keepin' it simple -- there are no special characters
Example:
decipherThis('72olle 103doo 100ya'); // 'Hello good day'
decipherThis('82yade 115te 103o'); // 'Ready set go'
taken from: https://www.codewars.com/kata/581e014b55f2c52bb00000f8/train/python
'''
def decipher_this(words):
def decipher_word(word):
ascii_code = ""
for c in word:
if not c.isdigit():
break
ascii_code += c
first_letter = chr(int(ascii_code))
word = word.replace(ascii_code, "", 1)
new_word = first_letter
if len(word) == 0:
return new_word
if len(word) == 1:
return new_word + word[-1]
else:
new_word += word[-1]
new_word += word[1:-1]
new_word += word[0]
return new_word
return ' '.join(decipher_word(word) for word in words.split(' '))
print(decipher_this("65 119esi 111dl 111lw 108dvei 105n 97n 111ka")) | true |
dca75c3ae074a34d58ef2e1048a3e55c1e71c580 | Python | oceanbreak/Videoplatform_Run_Script | /lib/folder_struct/LogFileGenerator.py | UTF-8 | 1,511 | 2.96875 | 3 | [] | no_license | """
Module to write log file
"""
from lib.data.DataCollection import DataCollection
import os
import time
class LogFileGeneraror:
def __init__(self, file_path : str, data_collection : DataCollection):
self.path = file_path
self.data_collection = data_collection
self.file_name = self.generateFileName()
self.file_full_path = os.path.join(self.path, self.file_name)
# Write header
with open(self.file_full_path, 'a') as fs:
to_write = [item if item!=None else '' for item in self.data_collection.logHeader()]
fs.write(';'.join(to_write))
fs.write('\n')
def writeLogString(self):
with open(self.file_full_path, 'a') as fs:
to_write = [item if item!=None else '' for item in self.data_collection.toLogItemsList()]
# print(to_write)
fs.write(';'.join(to_write))
fs.write('\n')
def generateFileName(self, prefix = 'log', extension = 'csv'):
cur_time = self.data_collection.datetime.data
# date_str = '{:0>4}'.format(cur_time.tm_year) + '{:0>2}'.format(cur_time.tm_mon) + '{:0>2}'.format(cur_time.tm_mday)[-2:]
# time_str = '{:0>2}'.format(cur_time.tm_hour) + '{:0>2}'.format(cur_time.tm_min) + '{:0>2}'.format(cur_time.tm_sec)
datetime_str = f'{cur_time.year}{cur_time.mon:0>2}{cur_time.day:0>2}_{cur_time.hour:0>2}{cur_time.min:0>2}{cur_time.sec:0>2}'
return prefix + '_' + datetime_str + '.' + extension
| true |
0368a56f1c1d5e79d6dfdf0a495d628c0a02cbb5 | Python | l1legend/Football-Simulator | /Football Simulator/team.py | UTF-8 | 3,515 | 3.90625 | 4 | [] | no_license | import copy
import random
class Team:
"""
Team has many players
Team has a ranking in a league
Team plays games against other teams
Team has a single manager
"""
def __init__(self, name):
self.name = name
self.players = []
self.wins = 0
self.losses = 0
self.money = 1000000
def weekly_salary(self):
salary = 0
for player in self.players:
salary += player.salary()
return salary
def pay_players(self):
self.money -= self.weekly_salary()
def rating(self):
"""
what is the rating of the team
"""
rating = 0
for player in self.players:
rating += player.skill
return rating
def __str__(self):
return '{} {}'.format(self.name, self.rating())
class Game:
"""
plays a game between two teams
game belongs to a league
"""
def __init__(self, league, home_team, away_team):
self.league = league
self.home_team = home_team
self.away_team = away_team
self.home_team_won = None
print('{} vs. {}'.format(self.home_team, self.away_team))
def play(self):
"""
play the game, return who won
True means the home team won, False means the away team won
"""
print('Play begins')
# insert game here
print('Play ends')
if self.home_team.rating() > self.away_team.rating():
print('{} wins'.format(self.home_team))
self.home_team_won = True
else:
print('{} wins.'.format(self.away_team))
self.home_team_won = False
class League:
"""
league has many teams
each team is going to have a ranking within this league
"""
def __init__(self, name, teams, players):
self.name = name
self.teams = teams
self.players = players
self.rounds_played = 0
def play_round(self):
"""
play a round, which is 3 games
"""
print('Round begins')
num_teams = len(self.teams)
num_games = num_teams // 2
teams_to_play = copy.copy(self.teams)
for game_num in range(num_games):
home_team = random.choice(teams_to_play)
teams_to_play.remove(home_team)
away_team = random.choice(teams_to_play)
teams_to_play.remove(away_team)
game = Game(self, home_team, away_team)
game.play()
self.resolve_game(game)
print('Round ends')
self.rounds_played += 1
# ladder status
self.ladder()
def ladder(self):
for team in sorted(self.teams, key=lambda t: -t.wins): # sorted function copies list and sorts it '-' sorts team with most wins to top
print('{} {} wins'.format(team, team.wins))
def resolve_game(self, game):
if game.home_team_won:
# home team won
game.home_team.wins += 1
game.away_team.losses += 1
game.home_team.money += round(200000*random.random())
else:
# away team won
game.away_team.wins += 1
game.home_team.losses += 1
game.away_team.money += round(200000*random.random())
game.home_team.pay_players()
game.away_team.pay_players()
| true |
c0ec9227a11c002828c680bbdb53c330e5fd63d6 | Python | yuanswife/nodule-predict | /count_pos.py | UTF-8 | 897 | 3.03125 | 3 | [] | no_license | #!usr/bin/env python
# -*-coding:utf-8-*-
import os
import sys
import glob
import pandas
"""filefolder/csv文件s 对这些csv文件进行操作 count每个csv下的条目"""
def count_pos(src_dir):
count = 0
for patient_index, csv_file in enumerate(glob.glob(src_dir + "*.csv")):
df_annos = pandas.read_csv(csv_file)
if len(df_annos) == 0:
continue
for i in df_annos.iterrows():
count += 1
return count
if __name__ == '__main__':
# 文件夹路径作为命令行参数
if len(sys.argv) < 2:
print('Usage: {} <source directory> )'.format(sys.argv[0]))
sys.exit(1)
src_dir = sys.argv[1]
print("src_dir:", src_dir)
if not os.path.isdir(src_dir):
print('Invalid source directory: {}'.format(src_dir))
sys.exit(2)
count = count_pos(src_dir=src_dir)
print("count:", count)
| true |
c7e00353b5e5df91d295271923d213dfeaea8abd | Python | emilyrmurray1/comp110-21f-workspace | /exercises/ex02/count_letters.py | UTF-8 | 216 | 3.6875 | 4 | [] | no_license | """Counting letters in a string."""
__author__ = "730238066"
from typing import Counter
letter: str = input("What letter do you want to search for?: ")
word: str = input("Enter a word: ")
print(Counter(letter)) | true |
3fd2fbfb1df67f2cfabc6df8388aface88a55341 | Python | gauravgpta93/LeetCode | /Code/128.py | UTF-8 | 484 | 2.75 | 3 | [] | no_license | class Solution:
def longestConsecutive(self, nums: List[int]) -> int:
if not nums:
return 0
num_set = set(nums)
longest = current = 1
for num in nums:
if num - 1 not in num_set:
next_val = num + 1
while next_val in num_set:
current += 1
next_val += 1
longest = max(longest, current)
current = 1
return longest
| true |
10c0bbe18d179703d79032a94967f89aa74ffea5 | Python | vasantidatta/PythonProjectDemo | /Demo_1/string_concatening_demo.py | UTF-8 | 192 | 3.453125 | 3 | [] | no_license | # String conactening :- is combining two strings to produce new string
va = "cry cry till you die "
pr = "bye bye"
ro = " success under my foot"
print(va.casefold())
print(va+pr+ro)
| true |
c714de32083cbc3c23b763fb1501352081118d2b | Python | alejandroverita/python-courses | /segundo_proyecto/archives.py | UTF-8 | 1,777 | 4.125 | 4 | [] | no_license | def read():
names = []
with open("./archives/name.txt", "r", encoding="utf-8") as f:
for line in f:
# strip() is used to delete spaces in the line
if len(line.strip()) > 0:
names.append(line.strip())
if len(names) > 0:
print(names)
else:
print("Archivo vacio")
def write(texto):
names = [texto]
with open("./archives/new.txt", "w", encoding="utf-8") as f:
for name in names:
f.write(name)
f.write("\n")
def add_name(name):
with open("./archives/name.txt", "a", encoding="utf-8") as f:
f.write(name)
f.write("\n")
def delete_name(name):
pass
def run():
start_program = True
while start_program:
try:
print(
"""
----------------------------------------------------------------------
Seleccione un numero:
1. Crear un nuevo archivo
2. Agregar nombre
3. Listar nombre
4. Salir del programa
----------------------------------------------------------------------
"""
)
n = int(input("Ingrese un valor: "))
if n == 1:
texto = input("Ingresa un texto: ")
write(texto)
elif n == 2:
name = input("Ingrese un nombre: ")
add_name(name)
elif n == 3:
read()
elif n == 4:
start_program = False
print("Gracias por usar el programa")
except ValueError:
print("Error, seleccione una opcion valida")
if __name__ == "__main__":
run()
| true |
f91fbbd7c151f0743428099294493e580cd3dd27 | Python | byronwasti/SoftDes | /inclass/classes.py | UTF-8 | 1,002 | 3.953125 | 4 | [] | no_license | import time
'''
# WE MAKING SOME CLASS STUFF
class Basic:
def __init__(self):
self.var1
self.var2
self.var3 = True
def func1(self, var):
self.var1
var
copy.copy( class ) is to make a shallow copy
copy.deepcopy ( class ) is to make a really deep copy
'''
class Rectangle:
def __init__(self,p1,p2):
self.p1 = p1
self.p2 = p2
self.center = [False]*2
def __str__(self):
return "I am a rectangle!"
def __add__(self, number):
return "I am a rect%dngle! " % number
def __radd__(self, number):
return self + number
def __sub__(self,number):
return "I am a rect%dngle! " % number
def __rsub__(self,number):
return self - number
def find_center(rect):
for i in xrange(2):
rect.center[i] = (rect.p1[i] - rect.p2[i])/2.0
return rect.center
rect = Rectangle([30,422],[5,3])
print find_center(rect)
print rect + 1
print rect - 4
print 1 + rect
print 4 - rect
| true |
4bdd9b14377a24ff1685d59e56636a2da4913463 | Python | shahaddhafer/Murtada_Almutawah | /Challenges/weekFive/dayTwo.py | UTF-8 | 3,500 | 4.40625 | 4 | [] | no_license | # Queue - FIFO
# ------------------------------------------------
class Node:
def __init__(self, value):
self.value = value
self.next = None
class Queue:
def __init__(self):
self.head = None
self.tail = None
# Front
# ------------------------------------------------
# Create slQueue method front() to return the value at front of our queue, without removing it.
def front(self):
if self.head == None:
return
else:
return self.head.value
# Is Empty
# ------------------------------------------------
# Create slQueue method isEmpty() that returns whether our queue contains no values.
def isEmpty(self):
return self.head == None
# Enqueue
# ------------------------------------------------
# Create slQueue method enqueue(val) to add the given value to end of our queue. Remember, slQueue uses a singly linked list (not an array).
def enqueue(self, val):
new_node = Node(val)
if self.head == None:
self.head = new_node
self.tail = new_node
else:
self.tail.next = new_node
self.tail = self.tail.next
# Dequeue
# ------------------------------------------------
# Create slQueue method dequeue() to remove and return the value at front of queue. Remember, slQueue uses a singly linked list (not array).
def dequeue(self):
if self.head == None:
return
elif self.head == self.tail:
temp = self.head.value
self.head = None
self.tail = None
return temp
else:
temp = self.head.value
self.head = self.head.next
return temp
# Contains
# ------------------------------------------------
# Create method contains(val) to return whether given value is found within our queue.
def contains(self, val):
index = self.head
while (index):
if (index.value == val):
return True
else:
index = index.next
return False
# Size
# ------------------------------------------------
# Create slQueue method size() that returns the number of values in our queue.
def size(self):
count = 0
index = self.head
while (index):
count += 1
index = index.next
return count
def display(self):
string = "["
index = self.head
while (index):
string += str(index.value)
if(index.next):
string += "->"
index = index.next
string += "]"
return string
if __name__ == "__main__":
queue = Queue()
print(queue.display(), ',isEmpty ?:',
queue.isEmpty(), ',Size:', queue.size(), ',Front:', queue.front())
queue.enqueue(1)
queue.enqueue(2)
queue.enqueue(3)
print('Dose the queue contain 4 ?', queue.contains(4))
print('Dose the queue contain 2 ?', queue.contains(2))
print('Dose the queue contain 1 ?', queue.contains(1))
print('Dose the queue contain 3 ?', queue.contains(3))
print(queue.display(), ',isEmpty ?:',
queue.isEmpty(), ',Size:', queue.size(), ',Front:', queue.front())
while(queue.isEmpty() is False):
print('Dequeuing ..', queue.dequeue(),
',Front:', queue.front(), queue.display())
print(queue.display(), ',isEmpty ?:',
queue.isEmpty(), ',Size:', queue.size(), ',Front:', queue.front())
| true |
02a6491ec04226ca7c48d696cdeb991370ef1bf5 | Python | mikaelvilen/ot-harjoitustyo | /src/db.py | UTF-8 | 1,314 | 3 | 3 | [] | no_license | import sqlite3
def init_database():
conn = sqlite3.connect('highscores.db')
conn.execute('''
DROP TABLE IF EXISTS HIGH_SCORES''')
conn.execute('''CREATE TABLE HIGH_SCORES
(NAME TEXT NOT NULL,
SCORE INT NOT NULL)''')
conn.commit()
conn.close()
def set_sample_scores():
conn = sqlite3.connect('highscores.db')
conn.execute("INSERT INTO HIGH_SCORES (NAME, SCORE) \
VALUES ('random9000', 9000)")
conn.execute("INSERT INTO HIGH_SCORES (NAME, SCORE) \
VALUES ('xyz', 8000)")
conn.execute("INSERT INTO HIGH_SCORES (NAME, SCORE) \
VALUES ('asd', 5000)")
conn.execute("INSERT INTO HIGH_SCORES (NAME, SCORE) \
VALUES ('qwerty', 4000)")
conn.execute("INSERT INTO HIGH_SCORES (NAME, SCORE) \
VALUES ('555', 100)")
conn.commit()
conn.close()
def get_scores():
conn = sqlite3.connect('highscores.db')
cursor = conn.execute("SELECT * FROM HIGH_SCORES ORDER BY SCORE DESC LIMIT 5")
high_scores = []
for row in cursor:
high_scores.append((row[0], row[1]))
return high_scores
def set_new_score(name, score):
conn = sqlite3.connect('highscores.db')
conn.execute("INSERT INTO HIGH_SCORES (NAME, SCORE) \
VALUES (?, ?)", (name, score))
conn.commit()
conn.close()
| true |
943f9f8ba881afd473242d8e65549b12d46a4587 | Python | Haeroo/recipe-app-api | /app/core/tests/test_models.py | UTF-8 | 1,716 | 2.921875 | 3 | [] | no_license | from django.test import TestCase
from django.contrib.auth import get_user_model
from core import models
def sample_user(email='test@mnp.ca', password='testadmin123'):
""" Create a sample user """
return get_user_model().objects.create_user(email, password)
class ModelTests (TestCase):
def test_create_user_with_email_successful(self):
""" A test to make sure a new user was created successfully """
email = 'test@mnp.ca'
password = '6TuttiFruity9'
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
""" This test checks to see if domain name is normalzied """
email = "test@MNP.COM"
user = get_user_model().objects.create_user(email, 'test123')
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
""" This test confirms that the email is valid"""
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test123')
def test_create_superuser(self):
"""Test confirms newsuper user has is_superuser & is_staff set"""
user = get_user_model().objects.create_superuser(
'test@test.com',
'test123')
self.assertTrue(user.is_staff)
self.assertTrue(user.is_superuser)
def test_tag_model_as_string(self):
""" Test the tag string representation """
tag = models.Tag.objects.create(
user=sample_user(),
name='Vegan'
)
self.assertEqual(str(tag), tag.name)
| true |
1071efd80aa6738e6916cd6c5a101149b3631533 | Python | 592McAvoy/3D-reconstruction | /experimental code/img_process/change_obj.py | UTF-8 | 3,316 | 2.6875 | 3 | [] | no_license | import numpy as np
import os
from shutil import copyfile
def load_obj(filename):
f = open(filename, 'r')
def get_num(string, type):
if type == 'int':
return int(string)
elif type == 'float':
return float(string)
elif type == 'list':
li = []
for s in string:
if s is not "":
li.append(int(s)-1)
else:
li.append(0)
return li
else:
print( 'Wrong type specified')
vertices = []
faces = []
texcoords = []
normals = []
for line in f:
str = line.split()
if len(str) == 0:
continue
if str[0] == '#':
continue
elif str[0] == 'v':
tmp_v = [get_num(s, 'float') for s in str[1:]]
vertices.append( tmp_v )
elif str[0] == 'f':
tmp_f = [get_num(s.split("/"), 'list') for s in str[1:]]
faces.append( tmp_f )
elif str[0] == 'vt':
tmp_vt = [get_num(s, 'float') for s in str[1:]]
texcoords.append( tmp_vt )
elif str[0] == 'vn':
tmp_vn = [get_num(s, 'float') for s in str[1:]]
normals.append( tmp_vn )
f.close()
v = np.asarray(vertices)
vn = np.asarray(normals)
f = np.asarray(faces)
vt = np.asarray(texcoords)
if len(vt)>0:
vt = vt[:,:2]
return ( v, f, vt, vn )
def process_obj(mesh_v, scale):
center = np.mean(mesh_v, axis=0)
mesh_v -= center
max_val = np.max(mesh_v)
mesh_v /= max_val
mesh_v *= scale
return mesh_v
def write_obj( mesh_v, mesh_f, mesh_vt, mesh_vn, filepath, verbose=True):
with open( filepath, 'w') as fp:
for v in mesh_v:
if len(v) == 3:
fp.write( 'v %f %f %f\n' % ( v[0], v[1], v[2] ) )
elif len(v) == 6:
fp.write( 'v %f %f %f %f %f %f\n' % ( v[0], v[1], v[2], v[3], v[4], v[5] ) )
if mesh_vt is not None:
for t in mesh_vt:
fp.write( 'vt %f %f\n' % ( t[0], t[1] ) )
if mesh_vn is not None:
for n in mesh_vn:
fp.write( 'vn %f %f %f\n' % ( n[0], n[1], n[2] ) )
for f in mesh_f+1: # Faces are 1-based, not 0-based in obj files
if f.shape == (1,3) or f.shape == (3,) or f.shape == (3,1):
fp.write( 'f %d/%d/%d %d/%d/%d %d/%d/%d\n' % ( f[0],f[0],f[0],f[1],f[1],f[1],f[2],f[2],f[2]) )
elif f.shape == (3,2):
fp.write( 'f %d/%d %d/%d %d/%d\n' % ( f[0,0],f[0,1],f[1,0],f[1,1],f[2,0],f[2,1]) )
elif f.shape == (3,3):
fp.write( 'f %d/%d/%d %d/%d/%d %d/%d/%d\n' % ( f[0,0],f[0,1],f[0,2],f[1,0],f[1,1],f[1,2],f[2,0],f[2,1],f[2,2]) )
else:
print("strange faces shape!")
if verbose:
print ('mesh saved to: ', filepath)
if __name__ == "__main__":
f = open("meta.txt",'r')
cnt = 0
for line in f:
if cnt > 2000:
break
cnt = cnt + 1
in_file = "./comb_obj/"+line[:-1]+".obj"
out_file = "./scaled_obj/"+line[:-1]+".obj"
v, f, vt, vn = load_obj(in_file)
v_out = process_obj(v, 10)
write_obj(v_out,f,vt,vn,out_file)
| true |
b53573209ec0b12f241254619fac5699635e817f | Python | emabellor/TGActivityDetection | /python/projects/CNN/test/Classification/trajectorycorrection.py | UTF-8 | 3,846 | 2.84375 | 3 | [] | no_license | from tkinter import Tk
from tkinter.filedialog import askopenfilename
import cv2
import json
import numpy as np
from classutils import ClassUtils
from classdescriptors import ClassDescriptors
import math
threshold_angle = 45
threshold_rp = 100
def main():
print('Initializing main function')
# Withdrawing tkinter interface
Tk().withdraw()
# Loading elements from list
init_dir = ClassUtils.activity_base_path
options = {
'initialdir': init_dir,
'filetypes': (("JSON Files", "*.json"),
("All files", "*.*"))
}
filename = askopenfilename(**options)
if filename is None:
raise Exception('Filename not selected!!!')
print('Filename selected: {0}'.format(filename))
# Redrawing trajectory
# Re-scale trajectory to known points
min_x = -900
max_x = 1200
min_y = -900
max_y = 1200
delta_x = max_x - min_x
delta_y = max_y - min_y
width_plane = 800
height_plane = 800
img_plane = np.zeros((height_plane, width_plane, 3), np.uint8)
# Blank image
img_plane[:, :] = (255, 255, 255)
# Opening filename
with open(filename, 'r') as f:
json_txt = f.read()
json_data = json.loads(json_txt)
# Iterating over data selection - list poses
list_poses = json_data["listPoses"]
list_rp, list_action_poses = ClassDescriptors.get_moving_action_poses(list_poses)
# Only for drawing!
for i in range(1, len(list_poses)):
pose1 = list_poses[i - 1]
pose2 = list_poses[i]
# Extracting position from pose
global_pos1 = pose1['globalPosition']
global_pos2 = pose2['globalPosition']
# Transforming points
pt_plane1 = transform_point(global_pos1, min_x, min_y, width_plane, height_plane, delta_x, delta_y)
pt_plane2 = transform_point(global_pos2, min_x, min_y, width_plane, height_plane, delta_x, delta_y)
# Draw line using OpenCV library
thickness = 3
cv2.line(img_plane, pt_plane1, pt_plane2, (0, 0, 0), thickness)
# Now that works, detect loitering using Ko Method
# Link: http://ijssst.info/Vol-15/No-2/data/3251a254.pdf
# Calculating distance
if i == 0:
rect_rad = 6
# Draw RP point into image
pt1 = pt_plane1[0] - rect_rad, pt_plane1[1] - rect_rad
pt2 = pt_plane1[0] + rect_rad, pt_plane1[1] + rect_rad
cv2.rectangle(img_plane, pt1, pt2, (0, 0, 255), -1)
# Check if index is in point_rp
is_rp = False
for rp in list_rp:
if 'index' not in rp:
print('Hello!')
if rp['index'] == i:
is_rp = True
break
if is_rp:
rect_rad = 6
pt1 = pt_plane2[0] - rect_rad, pt_plane2[1] - rect_rad
pt2 = pt_plane2[0] + rect_rad, pt_plane2[1] + rect_rad
cv2.rectangle(img_plane, pt1, pt2, (0, 0, 255), -1)
else:
rect_rad = 3
pt1 = pt_plane2[0] - rect_rad, pt_plane2[1] - rect_rad
pt2 = pt_plane2[0] + rect_rad, pt_plane2[1] + rect_rad
cv2.rectangle(img_plane, pt1, pt2, (0, 255, 0), -1)
print('Total trajectories: {0}'.format(len(list_action_poses)))
# Showing image result
cv2.namedWindow('main_window', cv2.WINDOW_AUTOSIZE)
cv2.imshow('main_window', img_plane)
print('Image Loaded! Press a key to continue!')
cv2.waitKey()
print('Done')
def transform_point(point, min_x, min_y, width_plane, height_plane, delta_x, delta_y):
# Draw line into list
pt_plane_x = int((point[0] - min_x) * width_plane / delta_x)
pt_plane_y = height_plane - int((point[1] - min_y) * height_plane / delta_y)
return pt_plane_x, pt_plane_y
if __name__ == '__main__':
main()
| true |
9749f5dc6e164a6a5c931137d799055c88ff9daa | Python | ajitmourya7/Zookeeper | /Topics/Program with numbers/Good rest on vacation/main.py | UTF-8 | 247 | 3.3125 | 3 | [] | no_license | # put your python code here
days = int(input())
cost_per_day = int(input())
one_way_flight_cost = int(input())
cost_per_night_in_hotel = int(input())
print((days * cost_per_day) + ((days - 1) * cost_per_night_in_hotel) + (one_way_flight_cost * 2)) | true |
3d6b9a07cdbdc6fea2c389aabddd9ca167fea324 | Python | gaylonalfano/30-days-of-python | /Day17-data-pipeline-jupyter-pandas-fastapi/server/main.py | UTF-8 | 1,691 | 2.671875 | 3 | [] | no_license | import os
import typing as t
import pandas as pd
from fastapi import FastAPI
BASE_DIR = os.path.dirname(
os.path.dirname(os.path.abspath(__file__))
) # /17-data-pipeline
CACHE_DIR = os.path.join(BASE_DIR, "cache")
# print(os.path.abspath(__file__)) # .../main.py
# print(BASE_DIR) # /Users.../17-data-pipeline-jupyter-pandas-fastapi
# print(CACHE_DIR) # /Users/.../17-data-pipeline-jupyter-pandas-fastapi/cache
dataset = os.path.join(CACHE_DIR, "movies-box-office-dataset-cleaned.csv")
app = FastAPI()
@app.get("/")
def read_root():
return {"Hello": "World"}
@app.get("/box-office")
def read_box_office_data():
"""
Retrieve box office dataset using Pandas.
"""
df = pd.read_csv(dataset)
# Return a DICT of the Rank column in our CSV:
# {'Rank': 7095, 'Release_Group': 'Rififi 2000 Re-release', 'Worldwide': 463593, 'Domesti
# c': 460226, 'Domestic_%': 0.992737163848462, 'Foreign': 3367, 'Foreign_%': 0.007262836151538094, 'Year': 2000
# , 'Filename': '2000.csv'}
return df.to_dict("Rank")
# test_df = pd.read_csv(dataset)
# print(test_df.to_dict("Rank"))
# {'Rank': 7095, 'Release_Group': 'Rififi 2000 Re-release', 'Worldwide': 463593, 'Domesti
# c': 460226, 'Domestic_%': 0.992737163848462, 'Foreign': 3367, 'Foreign_%': 0.007262836151538094, 'Year': 2000
# , 'Filename': '2000.csv'}
# run.sh (chmod +x run.sh)
# uvicorn main:app --reload
# main: the file 'main.py' (the Python 'module')
# app: the objected created inside of main.py with the line app = FastAPI()
# --reload: make the server restart after code changes.
# NOTE: Need to ./run.sh from INSIDE /server directory
# Otherwise, need to modify to: server.main:app --reload
| true |
c3bf33adb9e4b485f7859572342df81fffe44b60 | Python | Carolinacapote/AirBnB_clone_v3 | /api/v1/views/cities.py | UTF-8 | 2,061 | 2.84375 | 3 | [
"LicenseRef-scancode-public-domain"
] | permissive | #!/usr/bin/python3
"""
Script that starts a Flask Blueprint
"""
from api.v1.views import app_views
from models import storage
from models.city import City
from flask import abort
from flask import jsonify
from flask import request
@app_views.route('/cities/<id>', methods=['GET'], strict_slashes=False)
def get_city(id=None):
""" Method that retrieves a city by id """
city = storage.get(storage.classes['City'], id)
if city is None:
abort(404)
return city.to_dict(), 200
@app_views.route('/states/<id>/cities', methods=['GET'], strict_slashes=False)
def get_cities(id):
""" Retrieves the list of all cities of a specific State """
state = storage.get(storage.classes['State'], id)
if state is None:
abort(404)
cities = state.cities
return jsonify([obj.to_dict() for obj in cities])
@app_views.route('/cities/<id>', methods=['DELETE'], strict_slashes=False)
def delete_city(id):
""" Deletes a City object """
city = storage.get(storage.classes['City'], id)
if city is None:
abort(404)
city.delete()
storage.save()
return {}, 200
@app_views.route('/states/<id>/cities', methods=['POST'], strict_slashes=False)
def create_city(id):
""" Creates a City object """
state = storage.get(storage.classes['State'], id)
if state is None:
abort(404)
data = request.get_json()
if data is None:
abort(400, 'Not a JSON')
if 'name' in data.keys():
new_city = City(name=data.get('name'), state_id=id)
new_city.save()
return new_city.to_dict(), 201
abort(400, 'Missing name')
@app_views.route('/cities/<id>', methods=['PUT'], strict_slashes=False)
def update_city(id):
""" Updates a City object """
data = request.get_json()
if data is None:
abort(400, 'Not a JSON')
city = storage.get(storage.classes['City'], id)
if city is None:
abort(404)
if 'name' in data.keys():
setattr(city, 'name', data.get('name'))
city.save()
return city.to_dict(), 200
| true |
5ce2fc625e8e9ea4c1228352a93664a39ece6867 | Python | DeepLearningHB/PythonMentoring | /Python_Mentoring_01/20195161_문현승-Lab01/20195161_문현승_Lab02_Task01.py | UTF-8 | 74 | 3.5 | 4 | [] | no_license | a=int(input("1num: "))
b=int(input("2num: "))
print(a+b, a-b, a**b, a%b) | true |
b55628ce3b712c989639c851bac4999859e6be0b | Python | migzpogi/PokerCalculator | /PokerCalcMain.py | UTF-8 | 4,348 | 3.390625 | 3 | [
"MIT"
] | permissive | import argparse
from configparser import ConfigParser
import logging.config
import sys
from deuces.Card import Card
from deuces.Evaluator import Evaluator
from lib.checkers import input_type_checker
from lib.mydeck import MyDeck
log = logging.getLogger(__name__)
def handle_args(args):
"""
Handles the arguments passed by the user. Used mainly for card input.
:return: argParse.ArgumentParser()
"""
parser = argparse.ArgumentParser(
description='Poker Calculater. Please input your cards:')
parser.add_argument('-b', '--board', action='store', nargs='*', default=None,
dest='board', help='The board cards.')
parser.add_argument('-c', '--hand', action='store', nargs=2, default=None,
dest='hand', help='The hand cards.')
if len(args) < 1:
parser.print_help()
sys.exit(1)
return parser.parse_args(args)
def calculate_deuces(board_arg, hand_arg):
"""
Calculates the Deuces score as well as its class (high card, pair, etc)
:param board: list of string with len > 2
:param hand: list of string with len = 2
:return: Tuple of (deuces score, deuces class)
"""
log.debug('Calculating Deuces of {} and {}'.format(board_arg, hand_arg))
if input_type_checker(board_arg, hand_arg):
# convert string to Card object
board = list(map(lambda c: Card.new(c), board_arg))
hand = list(map(lambda c: Card.new(c), hand_arg))
evaluator = Evaluator()
deuces_score = evaluator.evaluate(board, hand)
deuces_class = evaluator.class_to_string(evaluator.get_rank_class(deuces_score))
return (deuces_score, deuces_class)
else:
log.error('Please see documentation for list of valid input.')
sys.exit(1)
def draw_from_deck(number, exclude):
"""
Draws card from a deck
:param number:
:param exclude:
:return:
"""
deck = MyDeck()
i = 0
drawn_cards = []
while i != number:
sample_draw = deck.draw()
if sample_draw in exclude:
pass
else:
drawn_cards.append(sample_draw)
i += 1
return drawn_cards
def compute_win_percentage(board_arg, hand_arg):
"""
Computes your winning percentage using a Monte Carlo Simulation
:param board_arg: list of string with len > 2
:param hand_arg: list of string with len = 2
:return:
"""
log.debug('Computing win percentage of {} and {}'.format(board_arg, hand_arg))
board = []
hand = []
if input_type_checker(board_arg, hand_arg):
# convert string to Card object
board = list(map(lambda c: Card.new(c), board_arg))
hand = list(map(lambda c: Card.new(c), hand_arg))
simulation_count = 3000
win_count = 0
runs = 0
evaluator = Evaluator()
for sim in range(simulation_count):
board_fill = 5 - len(board)
simulate_board = board + draw_from_deck(board_fill, board + hand)
simulate_hand = draw_from_deck(2, simulate_board + hand)
my_score = evaluator.evaluate(simulate_board, hand)
enemy_score = evaluator.evaluate(simulate_board, simulate_hand)
if my_score < enemy_score:
win_count += 1
runs += 1
log.debug('Your score: {} {} vs {} {}'.format(
evaluator.class_to_string(evaluator.get_rank_class(my_score)),
my_score,
evaluator.class_to_string(evaluator.get_rank_class(enemy_score)),
enemy_score))
win_chance = win_count/float(runs)
return win_chance
if __name__ == '__main__':
# load config and initialize logging
config = ConfigParser()
config.read('./conf/settings.ini')
logging.config.fileConfig(disable_existing_loggers=False,
fname='./logs/logging_config.ini',
defaults={'logfilename': config.get('logs', 'path')})
# arg handling
user_input_cards = handle_args(sys.argv[1:])
if user_input_cards.board == None or user_input_cards.hand == None:
log.error('Please supply board cards and hand cards.')
else:
board = user_input_cards.board
hand = user_input_cards.hand
print(calculate_deuces(board, hand))
print(compute_win_percentage(board, hand))
| true |