blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
814cfbb5223fbd3ebe571095ff4fab04ef7dc6da | Python | manikshahkataria/web-scraping-of-ebay | /ebay.py | UTF-8 | 2,511 | 2.828125 | 3 | [] | no_license | import csv
import requests
from bs4 import BeautifulSoup
def get_page(url):
response=requests.get(url)
#url has responded
#print(response.ok)
# 200 means server responded successfully
#print(response.status_code)
if not response.ok:
print('server responded', response.status_code)
else:
soup=BeautifulSoup(response.text,'lxml')
return soup
# the first argument of BeautifulSoup(response.text,'lxml') response.text is the html code of a page
# the second argument is the parser lxml
def get_detail_data(soup):
#tittle, price ,item soid items
try:
h1=soup.find('h1',id='itemTitle')
h=h1.text
j=h.split(' ')
title=j[1]
#print(title)
except:
title=''
try:
price_data=soup.find('span',id='prcIsum').text.strip().split(' ')
continent=price_data[0]
#print(continent)
price=price_data[1]
#print(price[1:])
currency=price[:1]
#print(currency )
except:
price=''
try:
sold=soup.find('a',class_='vi-txt-underline').text.split(' ')[0]
#print(sold)
except:
sold= 0
data={
'title':title,
'price':price,
#'currency':currency,
'sold':sold
}
return data
def get_index_data(soup):
#this will return all the links of the products
#the link of eacg product will be passed one by one into the grt detail function to get data of every product
try:
links = soup.find_all('a',class_='s-item__link')
except:
links=[]
urls=[item.get('href')for item in links]
return urls
def write_csv(data,url):
with open('output.csv','a') as csvfile:
writer= csv.writer(csvfile)
row=[data['title'],data['price'],data['sold'], url]
writer.writerow(row)
def main():
# url='https://www.ebay.com/itm/Rolex-Datejust-31-Black-MOP-Jubilee-Diamond-Dial-Ladies-18kt-Yellow-Gold/183515884131?_trkparms=%26rpp_cid%3D5cb7586a7b34a72b115fe0a3%26rpp_icid%3D5cb7586a7b34a72b115fe0a2'
#url='https://www.ebay.com/itm/SEIKO-SARB033-Mechanical-Automatic-Stainless-Steel-Mens-Watch-Made-In-Japan/254605873598?hash=item3b47b151be:g:y7EAAOSw4YZeyT62'
url='https://www.ebay.com/sch/i.html?&_nkw=watches&_pgn=1'
products= get_index_data(get_page(url))
for link in products:
data=get_detail_data(get_page(link))
print(data)
write_csv(data,link)
if __name__=='__main__':
main() | true |
f13e252ee4b7853490d50761b50c28e4b83972b5 | Python | iorodeo/water_channel_ros | /software/setpt_source/nodes/setpt_joystick_source.py | UTF-8 | 2,071 | 2.578125 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python
from __future__ import division
import roslib
roslib.load_manifest('setpt_source')
import rospy
import threading
import math
from joy.msg import Joy
from std_msgs.msg import Header
from msg_and_srv.msg import SetptMsg
class SetptSource(object):
def __init__(self):
self.initialized = False
self.setpt_update_rate = rospy.get_param("setpt_update_rate",50)
self.rate = rospy.Rate(self.setpt_update_rate)
self.dt = 1/self.setpt_update_rate
self.lock = threading.Lock()
# Setpt source parameters
self.vel_max = rospy.get_param("velocity_max",1.000)
self.acc_max = rospy.get_param("acceleration_max",1.000)
self.vel_setpt = 0
self.vel_setpt_goal = 0
self.pos_setpt = 0
# Setup subscriber to joystick topic
self.joystick_sub = rospy.Subscriber('joy', Joy, self.joystick_callback)
# Setup setpt topic
self.setptMsg = SetptMsg()
self.setpt_rel_pub = rospy.Publisher('setpt_rel', SetptMsg)
self.initialized = True
def update(self):
self.setptMsg.header.stamp = rospy.get_rostime()
with self.lock:
acc = (self.vel_setpt_goal - self.vel_setpt)/self.dt
if self.acc_max < abs(acc):
acc = math.copysign(self.acc_max,acc)
vel_inc = acc*self.dt
self.vel_setpt += vel_inc
pos_inc = self.vel_setpt*self.dt
self.pos_setpt += pos_inc
self.setptMsg.velocity = self.vel_setpt
self.setptMsg.position = self.pos_setpt
self.setpt_rel_pub.publish(self.setptMsg)
def joystick_callback(self,data):
if self.initialized:
with self.lock:
self.vel_setpt_goal = data.axes[0]*self.vel_max
# -----------------------------------------------------------------------------
if __name__ == '__main__':
rospy.init_node('joystick_position')
setpt = SetptSource()
while not rospy.is_shutdown():
setpt.update()
setpt.rate.sleep()
| true |
2d55765b028d1f1ae9a37f05c89da06a42a2923f | Python | ReritoO-dev/Enchanted-Bot | /Cogs/Matchmaking.py | UTF-8 | 42,273 | 2.59375 | 3 | [] | no_license | import asyncio
import functools
import Config
import discord
import datetime
from discord.ext import commands, tasks
import logging
import Utils
import random
def match_check(match):
# make sure health and mana are not above max value
for _ in range(2):
if match[_]['health'] > match[_]['account']['stats']['health']:
match[_]['health'] = match[_]['account']['stats']['health']
if match[_]['mana'] > match[_]['account']['stats']['endurance']:
match[_]['mana'] = match[_]['account']['stats']['endurance']
# make sure strength stats are where they should be
strength_min = 0
if match[_]['account']['weapon'] is not None:
strength_min = match[_]['account']['weapon']['effect']
if match[_]['account']['stats']['strength'] < strength_min:
match[_]['account']['stats']['strength'] = strength_min
else:
match[_]['account']['stats']['strength'] = round(match[_]['account']['stats']['strength'], 1)
# make sure armor stats are where they should be
armor_min = 0
if match[_]['account']['armor'] is not None:
armor_min = match[_]['account']['armor']['effect']
if match[_]['account']['stats']['defense'] < armor_min:
match[_]['account']['stats']['defense'] = armor_min
else:
match[_]['account']['stats']['defense'] = round(match[_]['account']['stats']['defense'], 1)
return match
class Matchmaking(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.battles = 0
self.battling_users = []
self.chats = []
self.matchmaking.start()
self.ticket_garbage.start()
def cog_unload(self):
logging.info("Shutting down matchmaking system")
self.matchmaking.cancel()
logging.info("Shutting down queue cleaning system")
self.ticket_garbage.cancel()
async def construct_embeds(self, match, turn):
SUB = str.maketrans("0123456789", "₀₁₂₃₄₅₆₇₈₉")
for _ in range(2):
field_description = ""
field_description = "╔ ▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬\n"
for chat in self.chats:
found = False
if match[0]['account']['user_id'] in chat[0]["ids"]:
for c in chat[1:]:
field_description += f"│ **{c['user']}**: {c['msg']}\n"
found = True
if not found:
field_description += "│ *No chat logs*\n"
field_description += "╚ ▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬"
if turn == _:
embed = discord.Embed(color = Config.TURNCOLOR, description="It's your turn. React with a number to use a spell. Or react with 💤 to pass")
else:
embed = discord.Embed(color = Config.NOTTURN, description="It is " + match[int(not bool(_))]['ctx'].author.name + "'s turn, Please wait for them to cast a spell.")
equipped_string = ""
for spell in match[_]['account']['slots']:
if spell is None:
equipped_string += "\n> *Nothing is written on this page...*"
continue
for x in Utils.get_users_spells(match[_]['account']):
if spell == x['id']:
spell = x
if spell is not None:
equipped_string += "\n> "+spell['emoji']+" **" +" ["+spell['type']+"] "+ spell['name'] + "** - [ "+str(spell['damage'])+" Effect] [ "+str(spell['cost'])+" Cost]"
embed.description += "\n\n**Spellbook**:" + equipped_string
for __ in range(2):
weapon_additive_string = ""
if match[__]['account']['weapon'] is not None:
weapon_additive_string = " [+"+str(match[__]['account']['weapon']['effect'])+ match[__]['account']['weapon']['emoji'] +"]"
armor_additive_string = ""
if match[__]['account']['armor'] is not None:
armor_additive_string = " [+" + str(match[__]['account']['armor']['effect']) + \
match[__]['account']['armor']['emoji'] + "]"
embed.add_field(name=Utils.get_rank_emoji(match[__]['account']['power']) + match[__]['ctx'].author.name + match[__]['account']['selected_title'], value="Health: " + str(match[__]['health']) + "/" + str(match[__]['account']['stats']['health']).translate(SUB) + Config.EMOJI['hp'] + "\nMana: " + str(match[__]['mana']) + "/" + str(match[__]['account']['stats']['endurance']).translate(SUB) + Config.EMOJI['flame'] + "\nStrength: " + str(match[__]['account']['stats']['strength']) + weapon_additive_string + "\nDefense: " + str(match[__]['account']['stats']['defense']) + armor_additive_string)
embed.title = "Battle against " + match[int(not bool(_))]['ctx'].author.name + match[int(not bool(_))]['account']['selected_title']
footer_string = ""
for effect in match[_]['effects']:
footer_string += " | " + str(effect['amount']) + "x " + effect['name'] + " effect for " + str(effect['turns']) + " turns."
embed.set_footer(text="You gain 3 mana at the beginning of your turn." + footer_string)
embed.add_field(name="💬 **Chat**", value=field_description, inline=False)
await match[_]['message'].edit(embed=embed)
async def construct_embeds_with_message(self, turn, match, message):
SUB = str.maketrans("0123456789", "₀₁₂₃₄₅₆₇₈₉")
for _ in range(2):
field_description = "╔ ▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬\n"
for chat in self.chats:
found = False
if match[0]['account']['user_id'] in chat[0]["ids"]:
for c in chat[1:]:
field_description += f"│ **{c['user']}**: {c['msg']}\n"
found = True
if not found:
field_description += "│ *No chat logs*\n"
field_description += "╚ ▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬"
if turn == _:
embed = discord.Embed(color = Config.OK, description=message)
else:
embed = discord.Embed(color = Config.DAMAGE, description=message)
equipped_string = ""
for spell in match[_]['account']['slots']:
if spell is None:
equipped_string += "\n> *Nothing is written on this page...*"
continue
for x in Utils.get_users_spells(match[_]['account']):
if spell == x['id']:
spell = x
if spell is not None:
equipped_string += "\n> "+spell['emoji']+" **" +" ["+spell['type']+"] "+ spell['name'] + "** - [ "+str(spell['damage'])+" Effect] [ "+str(spell['cost'])+" Cost]"
embed.description += "\n\n**Spellbook**:" + equipped_string
for __ in range(2):
weapon_additive_string = ""
if match[__]['account']['weapon'] is not None:
weapon_additive_string = " [+"+str(match[__]['account']['weapon']['effect'])+ match[__]['account']['weapon']['emoji'] +"]"
armor_additive_string = ""
if match[__]['account']['armor'] is not None:
armor_additive_string = " [+" + str(match[__]['account']['armor']['effect']) + \
match[__]['account']['armor']['emoji'] + "]"
embed.add_field(name=Utils.get_rank_emoji(match[__]['account']['power']) + match[__]['ctx'].author.name + match[__]['account']['selected_title'], value="Health: " + str(match[__]['health']) + "/" + str(match[__]['account']['stats']['health']).translate(SUB) + Config.EMOJI['hp'] + "\nMana: " + str(match[__]['mana']) + "/" + str(match[__]['account']['stats']['endurance']).translate(SUB) + Config.EMOJI['flame'] + "\nStrength: " + str(match[__]['account']['stats']['strength']) + weapon_additive_string + "\nDefense: " + str(match[__]['account']['stats']['defense']) + armor_additive_string)
embed.title = "Battle against " + match[int(not bool(_))]['ctx'].author.name + match[int(not bool(_))]['account']['selected_title']
footer_string = ""
for effect in match[_]['effects']:
footer_string += " | " + str(effect['amount']) + "x " + effect['name'] + " effect for " + str(effect['turns']) + " turns."
embed.set_footer(text="You gain 3 mana at the beginning of your turn." + footer_string)
embed.add_field(name="💬 **Chat**", value=field_description, inline=False)
await match[_]['message'].edit(embed=embed)
async def battle_thread(self, match):
try:
logging.info("Battle thread started: Current threads: " + str(self.battles))
self.battling_users.append(match[0]['ctx'].author.id)
self.battling_users.append(match[1]['ctx'].author.id)
turn = random.randint(0, 1)
total_turns = 1
draw = False
match[0]['health'] = match[0]['account']['stats']['health']
embed = discord.Embed(title="Match Started", color = Config.MAINCOLOR, description= "[jump]("+match[0]['message'].jump_url+")")
one_message = await match[0]['ctx'].send(match[0]['ctx'].author.mention, embed=embed)
await one_message.delete(delay=10)
embed = discord.Embed(title="Match Started", color=Config.MAINCOLOR,
description="[jump](" + match[1]['message'].jump_url + ")")
one_message = await match[1]['ctx'].send(match[1]['ctx'].author.mention, embed=embed)
await one_message.delete(delay=10)
match[1]['health'] = match[1]['account']['stats']['health']
match[0]['mana'] = match[0]['account']['stats']['endurance']
match[1]['mana'] = match[1]['account']['stats']['endurance']
match[0]['effects'] = []
match[1]['effects'] = []
match[0]['afk'] = 0
match[1]['afk'] = 0
for _ in range(2):
if match[_]['account']['armor'] is not None:
match[_]['account']['stats']['defense'] += match[_]['account']['armor']['effect']
if match[_]['account']['weapon'] is not None:
match[_]['account']['stats']['strength'] += match[_]['account']['weapon']['effect']
if match[_]['account']['slots'][0] is not None:
await match[_]['message'].add_reaction("1️⃣")
if match[_]['account']['slots'][1] is not None:
await match[_]['message'].add_reaction("2️⃣")
if match[_]['account']['slots'][2] is not None:
await match[_]['message'].add_reaction("3️⃣")
if match[_]['account']['slots'][3] is not None:
await match[_]['message'].add_reaction("4️⃣")
await match[_]['message'].add_reaction("💤")
while match[0]['health'] > 0 and match[1]['health'] > 0 and match[0]['mana'] > 0 and match[1]['mana'] > 0:
if match[turn]['afk'] > 2:
match[turn]['health'] = 0
match[turn]['mana'] = 0
continue
# calculate effects for beginning of round
for _ in range(2):
effects_remove = []
for effect in match[_]['effects']:
match[_][effect['type']] -= effect['amount']
match[_][effect['type']] = round(match[_][effect['type']], 1)
effect['turns'] -= 1
if effect['turns'] < 1:
effects_remove.append(effect)
for effect in effects_remove:
match[_]['effects'].remove(effect)
# add mana to player
match[turn]['mana'] += 3
match = match_check(match)
for _ in range(2):
if match[_]['health'] <= 0 or match[_]['mana'] <= 0:
break
total_turns += 1
await self.construct_embeds(match, turn)
try:
reaction_dict = {'1️⃣': 0, '2️⃣': 1, '3️⃣': 2, '4️⃣': 3, '💤': 4}
def check(payload):
if payload.user_id == match[turn]['ctx'].author.id and payload.message_id == match[turn]['message'].id:
if str(payload.emoji) in reaction_dict.keys():
if reaction_dict[str(payload.emoji)] < 4:
return match[turn]['account']['slots'][reaction_dict[str(payload.emoji)]] is not None
else:
return True
return False
temp_msg = await match[turn]['ctx'].channel.fetch_message(match[turn]['message'].id)
reaction = None
for temp_reaction in temp_msg.reactions:
users = await temp_reaction.users().flatten()
if match[turn]['ctx'].author.id in [x.id for x in users] and temp_reaction.me:
can_continue = True
reaction = temp_reaction
try:
await temp_reaction.remove(match[turn]['ctx'].author)
except:
logging.error("Cannot remove emoji (not a big deal)")
if reaction is None:
payload = await self.bot.wait_for('raw_reaction_add', timeout=30.0, check=check)
reaction = payload.emoji
try:
await match[turn]['message'].remove_reaction(payload.emoji, match[turn]['ctx'].author)
except:
logging.error("Cannot remove emoji (not big deal)")
if str(reaction) == "💤":
turn = int(not bool(turn))
continue
else:
spell = Utils.get_spell(match[turn]['account']['class'], match[turn]['account']['slots'][reaction_dict[str(reaction)]])
if spell['type'] not in ["MANA", "DRAIN"]:
match[turn]['mana'] -= spell['cost']
elif spell['type'] == "DRAIN":
match[turn]['health'] -= spell['cost']
# spell types
if spell['type'] == "DAMAGE":
calculated_damage = round(((spell['damage'] + match[turn]['account']['stats']['strength']) * spell['scalling']) - match[int(not bool(turn))]['account']['stats']['defense'], 1)
if calculated_damage < 0:
calculated_damage = 0
match[int(not bool(turn))]['health'] -= calculated_damage
match[int(not bool(turn))]['health'] = round(match[int(not bool(turn))]['health'], 1)
match = match_check(match)
await self.construct_embeds_with_message(turn, match, match[turn]['ctx'].author.name + " casted **" + spell['name'] + "**. "+match[int(not bool(turn))]['ctx'].author.name+" takes `" + str(calculated_damage) + "` damage total (`" + str(match[int(not bool(turn))]['account']['stats']['defense']) + "` blocked)")
turn = int(not bool(turn))
elif spell['type'] == "HEAL":
match[turn]['health'] += spell['damage']
match = match_check(match)
await self.construct_embeds_with_message(turn, match, match[turn]['ctx'].author.name + " casted **" + spell['name'] + "**. "+match[turn]['ctx'].author.name+" gains `" + str(spell['damage']) + "` health.")
turn = int(not bool(turn))
elif spell['type'] == "STUN":
calculated_damage = round(((spell['damage'] + match[turn]['account']['stats']['strength']) * spell['scalling']) - match[int(not bool(turn))]['account']['stats']['defense'], 1)
if calculated_damage < 0:
calculated_damage = 0
match[int(not bool(turn))]['health'] -= calculated_damage
match[int(not bool(turn))]['health'] = round(match[int(not bool(turn))]['health'], 1)
match = match_check(match)
chance = random.randint(0, 1)
if chance == 1:
await self.construct_embeds_with_message(turn, match, match[turn]['ctx'].author.name + " casted **" + spell['name'] + "**. "+match[int(not bool(turn))]['ctx'].author.name+" takes `" + str(calculated_damage) + "` damage total (`" + str(match[int(not bool(turn))]['account']['stats']['defense']) + "` blocked) and is stunned. (loses next turn)")
elif chance == 0:
await self.construct_embeds_with_message(turn, match, match[turn]['ctx'].author.name + " casted **" + spell['name'] + "**. "+match[int(not bool(turn))]['ctx'].author.name+" takes `" + str(calculated_damage) + "` damage total (`" + str(match[int(not bool(turn))]['account']['stats']['defense']) + "` blocked) the stun failed...")
turn = int(not bool(turn))
elif spell['type'] == "MANA":
match[turn]['mana'] += spell['damage']
match[turn]['health'] -= spell['damage']
match = match_check(match)
await self.construct_embeds_with_message(turn, match, match[turn]['ctx'].author.name + " casted **" + spell['name'] + "**. "+match[turn]['ctx'].author.name+" transforms `" + str(spell['damage']) + "` health into mana.")
turn = int(not bool(turn))
elif spell['type'] == "DRAIN":
match[turn]['mana'] += spell['damage']
match[int(not bool(turn))]['mana'] -= spell['damage']
match = match_check(match)
await self.construct_embeds_with_message(turn, match, match[turn]['ctx'].author.name + " casted **" + spell['name'] + "**. "+match[turn]['ctx'].author.name+" stole `" + str(spell['damage']) + "` mana from "+match[int(not bool(turn))]['ctx'].author.name+" using `" + str(spell['cost']) + "` health.")
turn = int(not bool(turn))
elif spell['type'] == "PEN":
match[turn]['account']['stats']['strength'] += spell['damage']
match = match_check(match)
await self.construct_embeds_with_message(turn, match, match[turn]['ctx'].author.name + " casted **" + spell['name'] + "**. "+match[turn]['ctx'].author.name+" boosted their Strength from `" + str(match[turn]['account']['stats']['strength'] - spell['damage']) + "` to `"+str(match[turn]['account']['stats']['strength'])+"`")
turn = int(not bool(turn))
elif spell['type'] == "ARMOR":
match[turn]['account']['stats']['defense'] += spell['damage']
match = match_check(match)
await self.construct_embeds_with_message(turn, match, match[turn]['ctx'].author.name + " casted **" + spell['name'] + "**. "+match[turn]['ctx'].author.name+" boosted their Defense from `" + str(match[turn]['account']['stats']['defense'] - spell['damage']) + "` to `"+str(match[turn]['account']['stats']['defense'])+"`")
turn = int(not bool(turn))
elif spell['type'] == "POISON":
effect = {'name': "Poison", 'turns': random.randint(2, 8), 'type': 'health', 'amount': round((spell['damage'] + match[turn]['account']['stats']['strength']) * spell['scalling'] / match[int(not bool(turn))]['account']['stats']['defense'], 1)}
match[int(not bool(turn))]['effects'].append(effect)
match = match_check(match)
await self.construct_embeds_with_message(turn, match, match[turn]['ctx'].author.name + " casted **" + spell['name'] + "**. "+match[int(not bool(turn))]['ctx'].author.name+" gets effect `" + effect['name'] + "` of `"+str(effect['amount'])+"` magnitude for `"+str(effect['turns'])+"` turns.")
turn = int(not bool(turn))
elif spell['type'] == "BLIND":
effect = {'name': "Blinding", 'turns': random.randint(2, 8), 'type': 'mana', 'amount': round((spell['damage'] + match[turn]['account']['stats']['strength']) * spell['scalling'] / match[int(not bool(turn))]['account']['stats']['defense'], 1)}
match[int(not bool(turn))]['effects'].append(effect)
match = match_check(match)
await self.construct_embeds_with_message(turn, match, match[turn]['ctx'].author.name + " casted **" + spell['name'] + "**. "+match[int(not bool(turn))]['ctx'].author.name+" gets effect `" + effect['name'] + "` of `"+str(effect['amount'])+"` magnitude for `"+str(effect['turns'])+"` turns.")
turn = int(not bool(turn))
elif spell['type'] == 'STEAL':
calculated_damage = round(((spell['damage'] + match[turn]['account']['stats']['strength']) * spell['scalling']) - match[int(not bool(turn))]['account']['stats']['defense'], 1)
if calculated_damage < 0:
calculated_damage = 0
match[int(not bool(turn))]['health'] -= calculated_damage
match[turn]['health'] += calculated_damage
match = match_check(match)
await self.construct_embeds_with_message(turn, match, match[turn]['ctx'].author.name + " casted **" + spell['name'] + "**. "+match[turn]['ctx'].author.name+" stole `" + str(spell['damage']) + "` health from "+match[int(not bool(turn))]['ctx'].author.name)
turn = int(not bool(turn))
elif spell['type'] == "IMPAIR":
before_stat = match[int(not bool(turn))]['account']['stats']['defense']
match[int(not bool(turn))]['account']['stats']['defense'] -= spell['damage']
if match[int(not bool(turn))]['account']['stats']['defense'] < 1:
match[int(not bool(turn))]['account']['stats']['defense'] = 1
match = match_check(match)
await self.construct_embeds_with_message(turn, match, match[turn]['ctx'].author.name + " casted **" + spell['name'] + "**. " + match[int(not bool(turn))]['ctx'].author.name + "'s defense falls from `" + str(before_stat) + "` to `" + str(match[int(not bool(turn))]['account']['stats']['defense']) + "`.")
turn = int(not bool(turn))
elif spell['type'] == "WEAKEN":
before_stat = match[int(not bool(turn))]['account']['stats']['strength']
match[int(not bool(turn))]['account']['stats']['strength'] -= spell['damage']
if match[int(not bool(turn))]['account']['stats']['strength'] < 1:
match[int(not bool(turn))]['account']['stats']['strength'] = 1
match = match_check(match)
await self.construct_embeds_with_message(turn, match, match[turn]['ctx'].author.name + " casted **" + spell['name'] + "**. " + match[int(not bool(turn))]['ctx'].author.name + "'s strength falls from `" + str(before_stat) + "` to `" + str(match[int(not bool(turn))]['account']['stats']['strength']) + "`.")
turn = int(not bool(turn))
await asyncio.sleep(5)
continue
except Exception as e:
if isinstance(e, asyncio.TimeoutError):
embed = discord.Embed(title="AFK WARNING", color=Config.MAINCOLOR,
description="Your battle is still going! You lost this turn because you took over 30 seconds to choose a spell.\n\n[Click to go to fight](" + match[turn]['message'].jump_url + ")")
timeout_msg = await match[turn]['ctx'].send(match[turn]['ctx'].author.mention, embed=embed)
await timeout_msg.delete(delay=20)
match[turn]['afk'] += 1
turn = int(not bool(turn))
continue
elif isinstance(e, discord.errors.NotFound):
draw = True
break
person_lost = False
for _ in range(2):
try:
await match[_]['message'].clear_reactions()
except:
logging.error("Cannot remove emoji (not a big deal)")
if draw:
embed = discord.Embed(color = Config.MAINCOLOR, description="**DRAW**")
elif match[_]['mana'] > 0 and match[_]['health'] > 0 or person_lost:
amount = random.randint(1, 3)
money = random.randint(5, 15)
coins = random.randint(12, 20)
power = random.randint(7, 9)
upgrade_emoji = Config.EMOJI['up1']
if power == 8:
upgrade_emoji = Config.EMOJI['up2']
elif power == 9:
upgrade_emoji = Config.EMOJI['up3']
xp = round(round(total_turns / 2, 1) * 100)
rankstring = Utils.get_rank_emoji(match[_]['account']['power'] + power) + " " + upgrade_emoji + "\n\n"
mystring = rankstring + "+`" + str(amount) + "` <:key:670880439199596545>\n+`" + str(money) + "` " + Config.EMOJI['ruby']+"\n+`" + str(coins) + "` " + Config.EMOJI['coin'] + "\n+`" + str("{:,}".format(xp)) + "` " + Config.EMOJI['xp']
match[_]['account']['keys'] += amount
if match[_]['account']['keys'] > 9:
match[_]['account']['keys'] -= 10
match[_]['account']['chests'] += 1
mystring += "\n+`1` " + Config.EMOJI['chest']
if 'xp' not in match[_]['account']:
match[_]['account']['xp'] = 0
Config.USERS.update_one({'user_id': match[_]['ctx'].author.id}, {'$inc': {'rubies': money, 'power': power, "coins": coins}, '$set': {'chests': match[_]['account']['chests'], 'keys': match[_]['account']['keys'], 'xp': match[_]['account']['xp'] + xp}})
embed = discord.Embed(color = Config.MAINCOLOR, description="**Congratulations! You have won!**\n\n" + mystring)
else:
person_lost = True
power = random.randint(5, 7)
upgrade_emoji = Config.EMOJI['down1']
if power == 6:
upgrade_emoji = Config.EMOJI['down2']
elif power == 7:
upgrade_emoji = Config.EMOJI['down3']
money = random.randint(3, 9)
coins = random.randint(4, 10)
xp = round(round(total_turns / 2, 1) * 100)
match[_]['account']['power'] -= power
if match[_]['account']['power'] < 2:
match[_]['account']['power'] = 1
power = 0
rankstring = Utils.get_rank_emoji(match[_]['account']['power']) + " " + upgrade_emoji + "\n\n"
if 'xp' not in match[_]['account']:
match[_]['account']['xp'] = 0
Config.USERS.update_one({'user_id': match[_]['ctx'].author.id}, {'$inc': {'rubies': money, "coins": coins}, '$set': {'power': match[_]['account']['power'], 'xp': match[_]['account']['xp'] + xp}})
embed = discord.Embed(color = Config.MAINCOLOR, description="**You lost...**\n\n" + rankstring + "+`" + str(money) + "` " + Config.EMOJI['ruby'] + "\n+`" + str(coins) + "` " + Config.EMOJI['coin'] + "\n+`" + str("{:,}".format(xp)) + "` " + Config.EMOJI['xp'])
for __ in range(2):
embed.add_field(name=Utils.get_rank_emoji(match[__]['account']['power']) + match[__]['ctx'].author.name + match[__]['account']['selected_title'], value="Health: " + str(match[__]['health']) + Config.EMOJI['hp'] + "\nMana: " + str(match[__]['mana']) + Config.EMOJI['flame'])
embed.title = "Battle against " + match[int(not bool(_))]['ctx'].author.name + match[int(not bool(_))]['account']['selected_title']
try:
await match[_]['message'].edit(embed=embed)
except:
logging.error("While cleaning up match message is not found. ignorning.")
logging.info("Cleaning up a battle")
Config.USERS.update_many({'user_id': {'$in': [match[0]['ctx'].author.id, match[1]['ctx'].author.id]}}, {'$inc': {'battles': 1}})
if match[0]['ctx'].author.id in self.battling_users:
self.battling_users.remove(match[0]['ctx'].author.id)
if match[1]['ctx'].author.id in self.battling_users:
self.battling_users.remove(match[1]['ctx'].author.id)
broken_items = Utils.decrease_durability(match[_]['account']['user_id'])
if len(broken_items) > 0:
embed = discord.Embed(title="Broken Tools",
description=match[_]['ctx'].author.mention + "! Your " + " and ".join(
[x['name'] for x in broken_items]) + " broke!",
color=Config.MAINCOLOR)
await match[_]['ctx'].send(content=match[_]['ctx'].author.mention, embed=embed)
except:
logging.error("Battle has errored! It has been disbanded and players were unqueued.")
embed = discord.Embed(color=Config.MAINCOLOR, title="Battle has ended", description="The battle has ended.")
for _ in match:
try:
await _['message'].edit(embed=embed)
except:
pass
finally:
self.battles -= 1
if match[0]['ctx'].author.id in self.battling_users:
self.battling_users.remove(match[0]['ctx'].author.id)
if match[1]['ctx'].author.id in self.battling_users:
self.battling_users.remove(match[1]['ctx'].author.id)
@commands.command()
async def clear_q(self, ctx):
if ctx.author.id not in Config.OWNERIDS:
await ctx.send("You do not have permission to do this")
else:
Utils.matchmaking = []
await ctx.send("All tickets in matchmaking Queue have been cleared.")
@commands.command(aliases=['b'])
@commands.bot_has_permissions(add_reactions=True, manage_messages=True, send_messages=True, external_emojis=True)
async def battle(self, ctx):
msg, account = await Utils.get_account_lazy(self.bot, ctx, ctx.author.id)
if account is None:
return
if not Config.OPEN_QUEUES:
embed = discord.Embed(color=Config.MAINCOLOR, title="Enchanted Maintenance",
description="Queuing is disabled at the moment. Enchanted is under maintenance.")
if msg is None:
msg = await ctx.send(embed=embed)
else:
await msg.edit(embed=embed)
return
if ctx.author.id in self.battling_users:
embed=discord.Embed(color=Config.MAINCOLOR, title="Error entering Queue", description="You are already battling someone. Please finish that battle first.")
if msg is None:
msg = await ctx.send(embed=embed)
else:
await msg.edit(embed=embed)
return
prefix = Utils.fetch_prefix(ctx)
embed=discord.Embed(color=Config.MAINCOLOR, title="Looking for match... <a:lg:670720658166251559>", description="You are in queue. Once you find a match you will begin battling.", timestamp=datetime.datetime.utcnow() + datetime.timedelta(minutes=10))
embed.set_footer(text=f'type {prefix}cancel to stop searching | timeout at ')
if msg is None:
msg = await ctx.send(embed=embed)
else:
await msg.edit(embed=embed)
for ticket in Utils.matchmaking:
if ticket['account']['user_id'] == ctx.author.id:
await ticket['message'].edit(embed=discord.Embed(title="Entered Queue somewhere else", description="You have started looking for a match in a different location.", color = Config.MAINCOLOR))
ticket['ctx'] = ctx
ticket['message'] = msg
return
Utils.send_ticket({'power': account['power'], 'ctx': ctx, 'account': account, 'message': msg, 'expire': datetime.datetime.utcnow() + datetime.timedelta(minutes=10)})
@commands.command()
async def cancel(self, ctx):
msg, account = await Utils.get_account_lazy(self.bot, ctx, ctx.author.id)
if account is None:
return
remove_ticket = None
for ticket in Utils.matchmaking:
if ticket['account']['user_id'] == ctx.author.id:
await ticket['message'].edit(embed=discord.Embed(title="Canceled Matchmaking", description="Matchmaking has been canceled.", color = Config.MAINCOLOR))
await ticket['message'].delete(delay=10)
await ticket['ctx'].message.delete(delay=10)
remove_ticket = ticket
if remove_ticket is not None:
Utils.matchmaking.remove(remove_ticket)
embed=discord.Embed(color=Config.MAINCOLOR, title="Matchmaking Canceled", description="You have exited the battle queue.")
if msg is None:
msg = await ctx.send(embed=embed)
else:
await msg.edit(embed=embed)
await msg.delete(delay=10)
await ctx.message.delete(delay=10)
else:
embed=discord.Embed(color=Config.MAINCOLOR, title="You look confused.", description="You are not actively looking for a battle. Use ]battle to start looking for one.")
if msg is None:
msg = await ctx.send(embed=embed)
else:
await msg.edit(embed=embed)
await msg.delete(delay=10)
await ctx.message.delete(delay=10)
@battle.error
async def battle_error(self, error, ctx):
if isinstance(error, commands.BotMissingPermissions):
await ctx.send(embed=discord.Embed(title="Uh oh..", description="I'm missing some permissions, please make sure i have the following:\n\nadd_reactions, manage_messages, send_messages, external_emojis"), color=Config.ERRORCOLOR)
async def after_battle(self, task, match):
logging.info("Callback for after match has been called.")
try:
task.result()
except:
logging.error("Battle has errored! It has been disbanded and players were unqueued.")
embed = discord.Embed(color = Config.MAINCOLOR, title="Battle has ended", description="The battle has ended.")
for _ in match:
await _['message'].edit(embed=embed)
finally:
self.battles -= 1
if match[0]['ctx'].author.id in self.battling_users:
self.battling_users.remove(match[0]['ctx'].author.id)
if match[1]['ctx'].author.id in self.battling_users:
self.battling_users.remove(match[1]['ctx'].author.id)
loop = 0
for chat in self.chats:
if match[0]['ctx'].author.id in chat[0]["ids"]:
self.chats.remove(self.chats[loop])
loop += 1
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def chat(self, ctx, *, choice:str=None):
msg, account = await Utils.get_account_lazy(self.bot, ctx, ctx.author.id)
if account is None:
return
if choice is None:
prefix = Utils.fetch_prefix(ctx)
embed = discord.Embed(title="Emotes", description="", color = Config.MAINCOLOR)
i = 0
for cosmetic in account['cosmetics']:
if cosmetic["type"] == "emote":
i += 1
embed.description += "> " + str(i) + " | **" + cosmetic["value"] + "**\n"
embed.set_footer(text=f"Get more emotes from the shop | use {prefix}chat <index> to chat in battle")
if msg is None:
await ctx.send(embed=embed)
else:
await msg.edit(embed=embed)
return
try:
emotes = []
for cosmetic in account['cosmetics']:
if cosmetic["type"] == "emote":
emotes.append(cosmetic)
choice = int(choice)
if choice > len(emotes) or choice < 1:
embed = discord.Embed(title="Hmmmm...", description="You only have " + str(len(emotes)) + " Emotes. Try using a number 1-" + str(len(emotes)),
color=Config.MAINCOLOR)
if msg is None:
await ctx.send(embed=embed)
else:
await msg.edit(embed=embed)
return
else:
choice = choice - 1
loop = 0
for chat in self.chats:
if ctx.author.id in chat[0]["ids"]:
if len(chat) > 5:
self.chats[loop].remove(self.chats[loop][1])
self.chats[loop].append({'user': str(ctx.author.name), 'msg': emotes[choice]['value']})
embed = discord.Embed(description=f"Chat sent!\n**{str(ctx.author.name)}**: {emotes[choice]['value']}", color=Config.MAINCOLOR)
if msg is None:
message = await ctx.send(embed=embed)
await asyncio.sleep(5)
await message.delete()
await ctx.message.delete()
else:
await msg.edit(embed=embed)
return
loop += 1
embed = discord.Embed(title="Whoops..", description=f"You can only use this command when you're battling!", color=Config.MAINCOLOR)
if msg is None:
await ctx.send(embed=embed)
else:
await msg.edit(embed=embed)
return
except ValueError:
embed = discord.Embed(title="Hmmmm...", description="Thats not a emote index. Try using a number 1-" + str(len(emotes)),
color=Config.MAINCOLOR)
if msg is None:
await ctx.send(embed=embed)
else:
await msg.edit(embed=embed)
return
@tasks.loop(seconds=10)
async def matchmaking(self):
if len(Utils.matchmaking) > 1:
logging.info("Starting matching")
matched = Utils.match_tickets()
for match in matched:
logging.info("Found match")
await match[0]['message'].edit(embed=discord.Embed(color = Config.MAINCOLOR, title="Match found!", description="Battling " + match[1]['ctx'].author.name))
await match[1]['message'].edit(embed=discord.Embed(color = Config.MAINCOLOR, title="Match found!", description="Battling " + match[0]['ctx'].author.name))
self.battles += 1
match[0]['message']
id1 = match[0]['ctx'].author.id
id2 = match[1]['ctx'].author.id
self.chats = [[{"ids": [id1, id2]}]]
battle = self.bot.loop.create_task(self.battle_thread(match))
#battle.add_done_callback(functools.partial(self.after_battle, match=match))
logging.info("Matching completed.")
@tasks.loop(seconds=30)
async def ticket_garbage(self):
if len(Utils.matchmaking) > 0:
logging.info("Started queue cleaning")
to_delete = []
for ticket in Utils.matchmaking:
if ticket['expire'] < datetime.datetime.utcnow():
to_delete.append(ticket)
for ticket in to_delete:
await ticket['message'].edit(embed=discord.Embed(color=Config.MAINCOLOR, title="Matchmaking Canceled", description="timout has been reached. Please type `]battle` to join the queue again."))
Utils.matchmaking.remove(ticket)
logging.info("Cleaned ticket from queue.")
logging.info("Queue cleaning completed.")
def setup(bot):
bot.add_cog(Matchmaking(bot))
| true |
acb18d3c6751bafb2a94784c12d4e8b16112d3fc | Python | Nermin-Ghith/ihme-modeling | /gbd_2019/cod_code/fataldiscontinuities/side_splitting/apply_side_splitting.py | UTF-8 | 17,815 | 3.046875 | 3 | [] | no_license | import pandas as pd
import numpy as np
from multiprocessing import Pool
from db_queries import get_population
from shock_tools import *
def fill_side_columns_with_non_null_values(df):
df['deaths_a'] = df['deaths_a'].fillna(0)
df['deaths_b'] = df['deaths_b'].fillna(0)
df["side_a"] = df["side_a"].fillna("")
df["side_b"] = df["side_b"].fillna("")
return df
def convert_to_lists_of_ints(row, column):
locs = row[column]
locs = locs.replace("'", "")
locs = convert_str_to_list(locs)
locs = list(map(float, locs))
for loc in locs:
assert type(loc) == float, "a location id is not an int"
return locs
def split_sides_with_known_deaths(side_a, side_b, deaths_a, deaths_b,
best, year, high, low, row, pop,
event_population, event_df):
deaths_one = best
if np.isnan(deaths_b):
deaths_b = 0
if deaths_b == None:
deaths_b = 0
if deaths_a == None:
deaths_a = 0
if np.isnan(deaths_a):
deaths_a = 0
if len(side_b) == 0:
deaths_a += deaths_b
deaths_b = 0
if side_b is None:
deaths_a += deaths_b
deaths_b = 0
known_deaths = deaths_a + deaths_b
unkown_deaths = best - known_deaths
side_a_population = pop[(pop['location_id'].isin(side_a)) &
(pop['year_id'] == year)]['population'].sum()
side_b_population = pop[(pop['location_id'].isin(side_b)) &
(pop['year_id'] == year)]['population'].sum()
side_a_percentage = side_a_population / event_population
side_b_percentage = side_b_population / event_population
# add unknown deaths on, splitting by population
deaths_a = deaths_a + (unkown_deaths * side_a_percentage)
deaths_b = deaths_b + (unkown_deaths * side_b_percentage)
# split out deaths by population of location
for location in side_a:
# grab the population for that location
location_pop = pop[(pop['location_id'] == location) &
(pop['year_id'] == year)]['population'].sum()
# find the percentage of population for that event
population_percentage = location_pop / side_a_population
row['best'] = deaths_a * population_percentage
row['high'] = float("nan")
row['low'] = float("nan")
row['location_id'] = location
row['split_status'] = 1
# insert the unique locations row into the event df
event_df = event_df.append(row)
for location in side_b:
# grab the population for that location
location_pop = pop[(pop['location_id'] == int(location)) &
(pop['year_id'] == int(year))]['population'].sum()
# find the percentage of population for that event
population_percentage = location_pop / side_b_population
row['best'] = deaths_b * population_percentage
row['high'] = float("nan")
row['low'] = float("nan")
row['location_id'] = location
row['split_status'] = 1
# insert the unique locations row into the event df
event_df = event_df.append(row)
assert np.isclose(deaths_one, event_df['best'].sum(), atol=50), "{} SEI {} deaths, {}, {}, {}".format(row['source_event_id'].iloc[0], deaths_one-event_df['best'].sum(), deaths_a, deaths_b, side_b)
return event_df
def split_sides_for_terrorism(side_a, side_b, deaths_a, deaths_b,
best, year, high, low, row, pop,
event_population, event_df):
# add unknown deaths on, splitting by population
# side_b will be one death
deaths_one = best
side_b = set(side_b) - set(side_a)
side_a_population = pop[(pop['location_id'].isin(side_a)) &
(pop['year_id'] == year)]['population'].sum()
side_b_population = pop[(pop['location_id'].isin(side_b)) &
(pop['year_id'] == year)]['population'].sum()
deaths_a = best - 1
deaths_b = 1
# split out deaths by population of location
for location in side_a:
# grab the population for that location
location_pop = pop[(pop['location_id'] == location) &
(pop['year_id'] == year)]['population'].sum()
# find the percentage of population for that event
population_percentage = location_pop / side_a_population
row['best'] = deaths_a * population_percentage
row['high'] = float("nan")
row['low'] = float("nan")
row['location_id'] = location
row['split_status'] = 1
# insert the unique locations row into the event df
event_df = event_df.append(row)
for location in side_b:
# grab the population for that location
location_pop = pop[(pop['location_id'] == location) &
(pop['year_id'] == year)]['population'].sum()
# find the percentage of population for that event
population_percentage = location_pop / side_b_population
# write over the row's value to append to event_df
row['best'] = deaths_b * population_percentage
row['high'] = float("nan")
row['low'] = float("nan")
row['location_id'] = location
row['split_status'] = 1
# insert the unique locations row into the event df
event_df = event_df.append(row)
assert np.isclose(deaths_one, event_df['best'].sum(), atol=10), "{} SEI {} deaths".format(row['source_event_id'], deaths_one-event_df['best'].sum())
return event_df
def split_sides_for_war(side_a, side_b, deaths_a, deaths_b,
best, year, high, low, row, pop,
event_population, event_df, location_of_event):
best = float(best)
deaths_one = best
side_a_population = pop[(pop['location_id'].isin(side_a)) &
(pop['year_id'] == year)]['population'].sum()
side_b_population = pop[(pop['location_id'].isin(side_b)) &
(pop['year_id'] == year)]['population'].sum()
event_locations_population = pop[(pop['location_id'].isin(location_of_event)) &
(pop['year_id'] == year)]['population'].sum()
side_a_percentage = side_a_population / event_population
side_b_percentage = side_b_population / event_population
location_deaths = .99 * best
if np.isnan(location_deaths):
location_deaths = 0
if location_deaths == None:
location_deaths = 0
remaining_deaths = best - location_deaths
# calculate deaths based on percentage of population
deaths_a = (remaining_deaths * side_a_percentage)
deaths_b = (remaining_deaths * side_b_percentage)
for location in location_of_event:
location_pop = pop[(pop['location_id'] == location) &
(pop['year_id'] == year)]['population'].sum()
# find the percentage of population for that event
population_percentage = location_pop / event_locations_population
# write over the row's value to append to event_df
row['best'] = location_deaths * population_percentage
row['high'] = float("nan")
row['low'] = float("nan")
row['location_id'] = location
row['split_status'] = 1
event_df = event_df.append(row)
# split out deaths by population of location
for location in side_a:
# grab the population for that location
location_pop = pop[(pop['location_id'] == location) &
(pop['year_id'] == year)]['population'].sum()
# find the percentage of population for that event
population_percentage = location_pop / side_a_population
# write over the row's value to append to event_df
row['best'] = deaths_a * population_percentage
row['high'] = float("nan")
row['low'] = float("nan")
row['location_id'] = location
row['split_status'] = 1
# insert the unique locations row into the event df
event_df = event_df.append(row)
for location in side_b:
# grab the population for that location
location_pop = pop[(pop['location_id'] == location) &
(pop['year_id'] == year)]['population'].sum()
# find the percentage of population for that event
population_percentage = location_pop / side_b_population
# write over the row's value to append to event_df
row['best'] = deaths_b * population_percentage
row['high'] = float("nan")
row['low'] = float("nan")
row['location_id'] = location
row['split_status'] = 1
# insert the unique locations row into the event df
event_df = event_df.append(row)
assert np.isclose(deaths_one, event_df['best'].sum(), atol=10), "{} SEI {} deaths".format(str(row['source_event_id'].iloc[0]), deaths_one-event_df['best'].sum())
return event_df
def split_sides_by_population(side_a, side_b, deaths_a, deaths_b,
best, year, high, low, row, pop,
event_population, event_df):
best = float(best)
deaths_one = best
side_a_population = pop[(pop['location_id'].isin(side_a)) &
(pop['year_id'] == year)]['population'].sum()
side_b_population = pop[(pop['location_id'].isin(side_b)) &
(pop['year_id'] == year)]['population'].sum()
side_a_percentage = side_a_population / event_population
side_b_percentage = side_b_population / event_population
# calculate deaths based on percentage of population
deaths_a = (best * side_a_percentage)
deaths_b = (best * side_b_percentage)
# split out deaths by population of location
for location in side_a:
# grab the population for that location
location_pop = pop[(pop['location_id'] == location) &
(pop['year_id'] == year)]['population'].sum()
# find the percentage of population for that event
population_percentage = location_pop / side_a_population
# write over the row's value to append to event_df
row['best'] = deaths_a * population_percentage
row['high'] = float("nan")
row['low'] = float("nan")
row['location_id'] = location
row['split_status'] = 1
# insert the unique locations row into the event df
event_df = event_df.append(row)
for location in side_b:
# grab the population for that location
location_pop = pop[(pop['location_id'] == location) &
(pop['year_id'] == year)]['population'].sum()
# find the percentage of population for that event
population_percentage = location_pop / side_b_population
# write over the row's value to append to event_df
row['best'] = deaths_b * population_percentage
row['high'] = float("nan")
row['low'] = float("nan")
row['location_id'] = location
row['split_status'] = 1
# insert the unique locations row into the event df
event_df = event_df.append(row)
assert np.isclose(deaths_one, event_df['best'].sum(), atol=10), "{} SEI {} deaths".format(row['source_event_id'], deaths_one-event_df['best'].sum())
return event_df
def iterate_through_df_and_split_sides(df):
final = pd.DataFrame()
df = fill_side_columns_with_non_null_values(df)
pop = get_population(location_id=-1, decomp_step="step1", year_id=-1, location_set_id=21)
# iterate through each row of the dataframe
for index, row in df.iterrows():
event_df = pd.DataFrame()
locations = convert_to_lists_of_ints(row, "location_id")
side_a = convert_to_lists_of_ints(row, "side_a")
side_b = convert_to_lists_of_ints(row, "side_b")
deaths_a = row['deaths_a']
deaths_b = row['deaths_b']
year = row['year_id']
best = float(row['best'])
high = row['high']
low = row['low']
cause_id = row['cause_id']
row = pd.DataFrame(row).transpose()
has_side_a = not (side_a == [])
has_side_b = not (side_b == [])
has_location = not (np.isnan(locations)).all()
has_sides = ((has_side_a | has_side_b) and not has_location)
has_deaths_by_side = ((deaths_a != 0) | (deaths_b != 0))
if has_sides:
all_locs = list(set(side_a) | set(side_b))
event_population = pop[(pop['location_id'].isin(all_locs)) &
(pop['year_id'] == year)]['population'].sum()
side_a, side_b = deduplicate_locations_within_sides(side_a, side_b)
if has_deaths_by_side:
event_df = split_sides_with_known_deaths(side_a, side_b, deaths_a, deaths_b,
best, year, high, low, row, pop,
event_population, event_df)
elif cause_id == 855:
location_of_event = row['location_of_event'].iloc[0]
event_df = split_sides_for_war(side_a, side_b, deaths_a, deaths_b,
best, year, high, low, row, pop,
event_population, event_df, location_of_event)
else:
event_df = split_sides_by_population(side_a, side_b, deaths_a, deaths_b,
best, year, high, low, row, pop,
event_population, event_df)
elif has_location:
og_best = best
event_population = pop[(pop['location_id'].isin(locations)) &
(pop['year_id'] == year)]['population'].sum()
assert has_location, "Hmm, there are no sides and no location"
if len(locations) > 1:
if cause_id == 855:
location_of_event = row['location_of_event'].iloc[0]
location_of_event = (location_of_event & set(locations))
if len(location_of_event) > 0:
location_deaths = best * .99
best = best - location_deaths
for location in set(locations):
location_pop = pop[(pop['location_id'] == location) &
(pop['year_id'] == year)]['population'].sum()
population_percentage = location_pop / event_population
row['best'] = best * population_percentage
if cause_id == 855:
if int(location) in location_of_event:
row['best'] = row['best'] + (location_deaths / len(location_of_event))
row['high'] = float('nan')
row['low'] = float('nan')
row['location_id'] = location
row['split_status'] = 1
event_df = event_df.append(row)
assert np.isclose(og_best, event_df['best'].sum(), atol=10), "{}, {}, {}, {}, {}".format(og_best, event_df['best'].sum(), locations, best, row['best'])
else:
row['split_status'] = 0
event_df = row
assert np.isclose(og_best, event_df['best'].sum(), atol=10), "{}, {}".format(og_best, event_df['best'].sum())
event_df.reset_index(drop=True, inplace=True)
final = final.append(event_df, ignore_index=True)
return final
def deduplicate_locations_within_sides(side_a, side_b):
side_a = set(side_a)
side_b = set(side_b)
if side_a == side_b:
side_b = set()
side_a = side_a - side_b
return side_a, side_b
def parallelize(df, func):
workers = 10
df_split = np.array_split(df, workers)
pool = Pool(workers)
df = pd.concat(pool.map(func, df_split))
pool.close()
pool.join()
return df
def add_locations_from_side(df, source):
loc_map = pd.read_csv("FILEPATH".format(source))
loc_map = loc_map.query("map_type_hierarchy_kept == True")
loc_map = loc_map.query("source_col == 'location_id'")
loc_map = loc_map[['source_event_id','location_id']]
loc_map = loc_map.groupby(['source_event_id'], as_index=False).agg({"location_id":set})
loc_map = loc_map.rename(columns={"location_id":"location_of_event"})
og_shape = df.copy().shape[0]
df['source_event_id'] = df['source_event_id'].apply(lambda x: str(x))
loc_map['source_event_id'] = loc_map['source_event_id'].apply(lambda x: str(x))
df = pd.merge(left=df, right=loc_map, how='left', on='source_event_id')
assert og_shape == df.shape[0]
return df
def run_side_splitting(df, source):
original_death_count = df.copy()['best'].sum()
df = add_locations_from_side(df, source)
df['best'] = df['best'].fillna(0)
print(df['best'].dtype)
assert np.isclose(df['best'].sum(), original_death_count, atol=50)
df = parallelize(df, iterate_through_df_and_split_sides)
split_death_count = df['best'].sum()
difference = split_death_count - original_death_count
assert np.isclose(difference, 0, atol=50), (
"deaths before split does not equal deaths after split: Difference {}".format(difference))
# report_locations_split(df)
return df
| true |
ea4fc68df40d77b85e11086901e00850857a69e7 | Python | markjoeljimenez/pydfs-lineup-optimizer | /pydfs_lineup_optimizer/sites/sites_registry.py | UTF-8 | 693 | 2.578125 | 3 | [
"MIT"
] | permissive | from collections import defaultdict
from typing import Type, DefaultDict, Dict
from pydfs_lineup_optimizer.settings import BaseSettings
class SitesRegistry:
SETTINGS_MAPPING = defaultdict(dict) # type: DefaultDict[str, Dict[str, Type[BaseSettings]]]
@classmethod
def register_settings(cls, settings_cls: Type[BaseSettings]) -> Type[BaseSettings]:
cls.SETTINGS_MAPPING[settings_cls.site][settings_cls.sport] = settings_cls
return settings_cls
@classmethod
def get_settings(cls, site: str, sport: str) -> Type[BaseSettings]:
try:
return cls.SETTINGS_MAPPING[site][sport]
except KeyError:
raise NotImplementedError
| true |
22832d0b97a3e3ec3a073776d477d01c2fcffcd0 | Python | msproteomicstools/msproteomicstools | /gui/openswathgui/models/ChromatogramTransition.py | UTF-8 | 10,746 | 2.953125 | 3 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================================
msproteomicstools -- Mass Spectrometry Proteomics Tools
=========================================================================
Copyright (c) 2013, ETH Zurich
For a full list of authors, refer to the file AUTHORS.
This software is released under a three-clause BSD license:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of any author or any participating institution
may be used to endorse or promote products derived from this software
without specific prior written permission.
--------------------------------------------------------------------------
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL ANY OF THE AUTHORS OR THE CONTRIBUTING
INSTITUTIONS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------------
$Maintainer: Hannes Roest$
$Authors: Hannes Roest$
--------------------------------------------------------------------------
"""
CHROMTYPES = {
0 : "Protein",
1 : "Peptide",
2 : "Precursor",
3 : "Transition"
}
CHROMTYPES_r = dict([ (v,k) for k,v in CHROMTYPES.items()])
class ChromatogramTransition(object):
"""
Internal tree structure object representing one row in the in the left side tree.
This is the bridge between the view and the data model
Pointers to objects of :class:`.ChromatogramTransition` are passed to
callback functions when the selection of the left side tree changes. The
object needs to have store information about all the column present in the
rows (PeptideSequence, Charge, Name) which are requested by the
:class:`.PeptideTree` model.
Also it needs to know how to access the raw data as well as meta-data for a
certain transition. This is done through getData, getLabel etc.
"""
def __init__(self, name, charge, subelements, peptideSequence=None, fullName=None, datatype="Precursor"):
self._name = name
self._charge = charge
self._fullName = fullName
self._peptideSequence = peptideSequence
self._subelements = subelements
self.mytype = CHROMTYPES_r[datatype]
def getSubelements(self):
return self._subelements
def getPeptideSequence(self):
if self._peptideSequence is None:
return self.getName()
return self._peptideSequence
def getName(self):
"""
Get name of precursor
Returns
-------
str:
Name of precursor
"""
return self._name
def getCharge(self):
"""
Get charge of precursor
Returns
-------
int:
Charge
"""
return self._charge
def getType(self):
return CHROMTYPES[self.mytype]
def getData(self, run):
"""
Get raw data for a certain object
If we have a single precursors or a peptide with only one precursor, we
show the same data as for the precursor itself. For a peptide with
multiple precursors, we show all precursors as individual curves. For a
single transition, we simply plot that transition.
Parameters
----------
run : :class:`.SwathRun` or :class:`.SqlSwathRun`
SwathRun object which will be used to retrieve data
Returns
-------
list of pairs (timearray, intensityarray):
Returns the raw data of the chromatograms for a given run. The
dataformat is a list of transitions and each transition is a pair
of (timearray,intensityarray)
"""
if CHROMTYPES[self.mytype] == "Precursor" :
return run.get_data_for_precursor(self.getName())
elif CHROMTYPES[self.mytype] == "Peptide" :
prec = run.get_precursors_for_sequence(self.getName())
if len(prec) == 1:
return run.get_data_for_precursor(prec[0])
else:
# Peptide view with multiple precursors
# -> Sum up the data for all individual precursors
final_data = []
for p in prec:
timedata = None
intdata = None
import numpy
for data in run.get_data_for_precursor(p):
if timedata is None:
timedata = numpy.array(data[0])
intdata = numpy.array(data[1])
else:
intdata = intdata + numpy.array(data[1])
final_data.append( [timedata, intdata] )
return final_data
elif CHROMTYPES[self.mytype] == "Transition" :
return run.get_data_for_transition(self.getName())
return [ [ [0], [0] ] ]
def getRange(self, run):
"""
Get the data range (leftWidth/rightWidh) for a specific run
Parameters
----------
run : :class:`.SwathRun`
SwathRun object which will be used to retrieve data
Returns
-------
list of float:
A pair of floats representing the data range (leftWidth/rightWidh) for a specific run
"""
if CHROMTYPES[self.mytype] == "Precursor" :
return run.get_range_data(self.getName())
elif CHROMTYPES[self.mytype] == "Peptide" :
prec = run.get_precursors_for_sequence(self.getName())
if len(prec) == 1:
return run.get_range_data(prec[0])
elif CHROMTYPES[self.mytype] == "Transition" :
# TODO
return [ [0,0] ]
return [ [0,0] ]
def getProbScore(self, run):
"""
Get the probabilistic score for a specific run and current precursor
Parameters
----------
run : :class:`.SwathRun`
SwathRun object which will be used to retrieve data
Returns
-------
float:
The probabilistic score for a specific run and current precursor
"""
if CHROMTYPES[self.mytype] == "Precursor" :
return run.get_score_data(self.getName())
elif CHROMTYPES[self.mytype] == "Peptide" :
prec = run.get_precursors_for_sequence(self.getName())
if len(prec) == 1:
return run.get_score_data(prec[0])
else:
# For multiple precursors, the probability score is not defined
return None
elif CHROMTYPES[self.mytype] == "Transition" :
return None
return None
def getIntensity(self, run):
"""
Get the intensity for a specific run and current precursor
Parameters
----------
run : :class:`.SwathRun`
SwathRun object which will be used to retrieve data
Returns
-------
float:
The intensity for a specific run and current precursor
"""
if CHROMTYPES[self.mytype] == "Precursor" :
return run.get_intensity_data(self.getName())
elif CHROMTYPES[self.mytype] == "Peptide" :
prec = run.get_precursors_for_sequence(self.getName())
if len(prec) == 1:
return run.get_intensity_data(prec[0])
else:
# For multiple precursors, the intensity is currently not computed
return None
elif CHROMTYPES[self.mytype] == "Transition" :
return None
return None
def getAssayRT(self, run):
"""
Get the intensity for a specific run and current precursor
Parameters
----------
run : :class:`.SwathRun`
SwathRun object which will be used to retrieve data
Returns
-------
float:
The intensity for a specific run and current precursor
"""
if CHROMTYPES[self.mytype] == "Precursor" :
return run.get_assay_data(self.getName())
elif CHROMTYPES[self.mytype] == "Peptide" :
prec = run.get_precursors_for_sequence(self.getName())
if len(prec) == 1:
return run.get_assay_data(prec[0])
else:
# For multiple precursors, the intensity is currently not computed
return None
elif CHROMTYPES[self.mytype] == "Transition" :
return None
return None
def getLabel(self, run):
"""
Get the labels for a curve (corresponding to the raw data from getData
call) for a certain object.
If we have a single precursors or a peptide with only one precursor, we
show the same data as for the precursor itself. For a peptide with
multiple precusors, we show all precursors as individual curves. For a
single transition, we simply plot that transition.
Parameters
----------
run : :class:`.SwathRun`
SwathRun object which will be used to retrieve data
Returns
-------
list of str:
The labels to display for each line in the graph
"""
if CHROMTYPES[self.mytype] == "Precursor" :
return run.get_transitions_for_precursor_display(self.getName())
elif CHROMTYPES[self.mytype] == "Peptide" :
prec = run.get_precursors_for_sequence(self.getName())
if len(prec) == 1:
return run.get_transitions_for_precursor_display(prec[0])
else:
# Peptide view with multiple precursors
return prec
elif CHROMTYPES[self.mytype] == "Transition" :
return [self.getName()]
return [ "" ]
| true |
695f960ef47b0862065fcdb74ea18558663798e2 | Python | krishnakalyan3/PythonAlgorithms | /palindrome.py | UTF-8 | 384 | 3.78125 | 4 | [] | no_license | example = ["mom","dad","abcba","test"]
def isPal(word):
if len(word) <= 1:
return True
else:
left = 0
right = len(word) - 1
while left < right:
if word[right] == word[left]:
left += 1
right -= 1
else:
return False
return True
def isPal1(word):
return word == "".join(reversed(word))
for ex in example:
print isPal1(ex)
| true |
81c64481600c09952b6c69b085148105974c81bb | Python | shohirose/atcoder | /dp_contest/python/B.py | UTF-8 | 414 | 3.078125 | 3 | [] | no_license | def calc_cost(n, k, h):
cost = [1e10 for _ in range(n)]
cost[0] = 0
for i in range(1, n):
for j in range(1, min(i+1, k+1)):
cost[i] = min(cost[i], cost[i-j] + abs(h[i-j] - h[i]))
return cost
def main():
n, k = list(map(int, input().split()))
h = list(map(int, input().split()))
cost = calc_cost(n, k, h)
print(cost[n-1])
if __name__ == "__main__":
main() | true |
d57647d338221bb38172a20a4fe099964cd6bb5d | Python | HoweChen/PythonNote-CYH | /Data Structure/stack.py | UTF-8 | 359 | 4.03125 | 4 | [] | no_license | class Stack(object):
def __init__(self, value_list=None):
self.stack = []
if value_list:
for item in value_list:
self.push(item)
def push(self, val=None):
if val:
self.stack.append(val)
def pop(self):
return self.stack.pop()
test = Stack()
test.push(1)
print(test.pop())
| true |
5fa571ec86049c1813e2724bbe42dd13f2152ff6 | Python | jCrompton/jjabrams_rl | /src/block_builder.py | UTF-8 | 5,145 | 2.53125 | 3 | [] | no_license | import numpy as np
import keras
import string
from keras.models import Model
from keras import layers
from keras.layers import Activation, Dense, Input, BatchNormalization, Conv2D, SeparableConv2D, Conv2DTranspose
from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D
from keras.layers import GlobalMaxPooling2D
from keras.engine.topology import get_source_inputs
from keras.utils.layer_utils import convert_all_kernels_in_model
from keras.utils.data_utils import get_file
from keras import backend as K
class Blocks:
def __init__(self, separable=True, activation='relu', transpose=False):
print('Initialized the building blocks module')
self.conv = SeparableConv2D if separable else Conv2D
if transpose:
self.conv = Conv2DTranspose
self.activation = activation
def residual_block(self, input_tensor, kernel_size, filters, number_of_conv_blocks, number_of_id_blocks, stage, strides=(2, 2)):
conv_block = input_tensor
for i in range(number_of_conv_blocks):
block_name = '{}_CONV{}'.format(stage, string.ascii_lowercase[i%26])
conv_block = self.conv_block(conv_block, kernel_size, filters, i, block_name, strides=strides)
id_block = conv_block
for j in range(number_of_id_blocks):
block_name = '{}_ID{}'.format(stage, string.ascii_lowercase[j%26])
id_block = self.identity_block(id_block, kernel_size, filters, j, block_name)
return id_block
def identity_block_T(self, input_tensor, kernel_size, filters, stage, block):
pass
def identity_block(self, input_tensor, kernel_size, filters, stage, block):
"""The identity block is the block that has no conv layer at shortcut.
# Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the filterss of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
# Returns
Output tensor for the block.
"""
filters1, filters2, filters3 = filters
if K.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = self.conv(filters1, (1, 1), name=conv_name_base + '2a')(input_tensor)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
x = Activation(self.activation)(x)
x = self.conv(filters2, kernel_size,
padding='same', name=conv_name_base + '2b')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
x = Activation(self.activation)(x)
x = self.conv(filters3, (1, 1), name=conv_name_base + '2c')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)
x = layers.add([x, input_tensor])
x = Activation(self.activation)(x)
return x
def conv_block(self, input_tensor, kernel_size, filters, stage, block, strides=(2, 2)):
"""conv_block is the block that has a conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the filterss of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
# Returns
Output tensor for the block.
Note that from stage 3, the first conv layer at main path is with strides=(2,2)
And the shortcut should have strides=(2,2) as well
"""
filters1, filters2, filters3 = filters
if K.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = self.conv(filters1, (1, 1), strides=strides,
name=conv_name_base + '2a')(input_tensor)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
x = Activation(self.activation)(x)
x = self.conv(filters2, kernel_size, padding='same',
name=conv_name_base + '2b')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
x = Activation(self.activation)(x)
x = self.conv(filters3, (1, 1), name=conv_name_base + '2c')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)
shortcut = self.conv(filters3, (1, 1), strides=strides,
name=conv_name_base + '1')(input_tensor)
shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1')(shortcut)
x = layers.add([x, shortcut])
x = Activation(self.activation)(x)
return x
| true |
9f9c8a857bc0316c6de1b4421a737ccca5b616e5 | Python | sh1doy/AntBook | /sec2_7/MinimumScalarProduct.py | UTF-8 | 281 | 3 | 3 | [] | no_license | # Minimum_Scalar_Product
import random
n = 800
v1=[random.randint(-100000,100000) for i in range(n)]
v2=[random.randint(-100000,100000) for i in range(n)]
def solve(v1,v2):
v1.sort()
v2.sort(reverse=True)
return(sum([v1[a]*v2[a] for a in range(len(v1))]))
print(solve(v1, v2)) | true |
dd2ead593a49a4e602dafd3227094b77055920b8 | Python | fox34/AdventOfCode2019 | /Intcode_Computer.py | UTF-8 | 6,600 | 3.578125 | 4 | [] | no_license | #!/usr/bin/env python3
class Intcode_Computer:
# Parameter-Modi
# POSITION_MODE: Lese/Schreibe an im Parameter angegebene Adresse
PARAM_MODE_POSITION = 0
# IMMEDIATE_MODE Nur Lesen: Wert aus Parameter übernehmen
PARAM_MODE_IMMEDIATE = 1
# RELATIVE_MODE: Lese/Schreibe an im Parameter angegebene Adresse, verschoben um variables Offset
PARAM_MODE_RELATIVE = 2
def __init__(self, instructions):
self.instructions = instructions
# Simple input: Ask user
def read_input(self):
return int(input("INPUT: "))
# Simple output: Print to command line
def process_output(self, output):
print("OUTPUT:", output)
# Instruktionen ausführen
def run(self):
# Arbeitsspeicher
memory = self.instructions[:]
# Bereich für beliebig große Adressen außerhalb des ursprünglichen Speichers
memory_random_access = {}
# Pointer für Parameter-Modus "RELATIVE"
relative_memory_pointer = 0
# Aktueller Opcode-Zeiger
current_instruction_pointer = 0
# Loop über alle Opcodes
while True:
# Annahme: Opcodes stehen immer im normalen Speicherbereich,
# nicht im erweiterten Random Access-Speicher
# Muss ggf. in Zukunft korrigiert werden
if current_instruction_pointer >= len(memory):
raise Exception("Opcode pointer overflow")
instruction = f"%05d" % int(memory[current_instruction_pointer])
instruction = list(instruction)
instruction.reverse()
current_opcode = int(instruction[1] + instruction[0])
parameter_modes = instruction[2:]
def read_memory(num, mode = -1):
if mode == -1:
mode = int(parameter_modes[num-1])
if mode == self.PARAM_MODE_POSITION:
param_pos = int(memory[current_instruction_pointer+num])
elif mode == self.PARAM_MODE_IMMEDIATE:
param_pos = current_instruction_pointer+num
elif mode == self.PARAM_MODE_RELATIVE:
param_pos = relative_memory_pointer + int(memory[current_instruction_pointer+num])
else:
raise Exception("Invalid parameter mode")
if param_pos >= len(memory):
if param_pos in memory_random_access:
return int(memory_random_access[param_pos])
else:
return 0
else:
return int(memory[param_pos])
def write_memory(num, val):
mode = int(parameter_modes[num-1])
if mode == self.PARAM_MODE_POSITION:
param_pos = int(memory[current_instruction_pointer+num])
elif mode == self.PARAM_MODE_IMMEDIATE:
raise Exception("Cannot write in immediate mode")
elif mode == self.PARAM_MODE_RELATIVE:
param_pos = relative_memory_pointer + int(memory[current_instruction_pointer+num])
else:
raise Exception("Invalid parameter mode")
if param_pos >= len(memory):
memory_random_access[param_pos] = int(val)
else:
memory[param_pos] = int(val)
# Starten
if current_opcode == 1:
# ADD
param1 = read_memory(1)
param2 = read_memory(2)
result = param1 + param2
write_memory(3, result)
current_instruction_pointer += 4
elif current_opcode == 2:
# MUL
param1 = read_memory(1)
param2 = read_memory(2)
result = param1 * param2
write_memory(3, result)
current_instruction_pointer += 4
elif current_opcode == 3:
# INPUT (Wert in Speicher schreiben)
write_memory(1, self.read_input())
current_instruction_pointer += 2
elif current_opcode == 4:
# OUTPUT (Wert aus Speicher lesen)
self.process_output(read_memory(1))
current_instruction_pointer += 2
elif current_opcode == 5:
# Jump if true
param1 = read_memory(1)
if (param1 != 0):
current_instruction_pointer = read_memory(2)
else:
current_instruction_pointer += 3
elif current_opcode == 6:
# Jump if false
param1 = read_memory(1)
if (param1 == 0):
current_instruction_pointer = read_memory(2)
else:
current_instruction_pointer += 3
elif current_opcode == 7:
# less than
param1 = read_memory(1)
param2 = read_memory(2)
if (param1 < param2):
write_memory(3, 1)
else:
write_memory(3, 0)
current_instruction_pointer += 4
elif current_opcode == 8:
# equals
param1 = read_memory(1)
param2 = read_memory(2)
if (param1 == param2):
write_memory(3, 1)
else:
write_memory(3, 0)
current_instruction_pointer += 4
elif current_opcode == 9:
# adjust relative memory address base
param1 = read_memory(1)
relative_memory_pointer += param1
current_instruction_pointer += 2
elif current_opcode == 99:
# HALT
#print("HALT")
break
else:
raise Exception("Invalid opcode @ " + str(current_instruction_pointer) + ": " + str(current_opcode))
| true |
db6acb341a979a79ed1bcb101805542ae4b4f85c | Python | team-titians/HNGi7_Titans_Task2 | /scripts/FuadGbadamosi.py | UTF-8 | 241 | 2.953125 | 3 | [] | no_license | name = "Fuad Gbadamosi"
email = "gfadebayo16@gmail.com"
hngid = "HNG-05744"
language = "Python3"
outputFormat = "Hello World, this is " + name + " with HNGi7 ID " + hngid + " and email " + email + " using " + language + " for stage 2 task"
print(outputFormat)
| true |
d1adca513435847adfcf435ba23391bef6325836 | Python | AdamZhouSE/pythonHomework | /Code/CodeRecords/2094/60829/265374.py | UTF-8 | 570 | 3.046875 | 3 | [] | no_license | a=str(input())
if a.isdigit():
print("True")
elif a[0]=="-":
zz=""
for i in range(1,len(a)):
zz=zz+a[i]
if zz.isdigit():
print("True")
else:
judge=0
for i in range(0,len(a)):
if a[i]=="e":
judge=1
break
if judge==0:
print("False")
else:
xx=""
for x in range(0,i):
xx=xx+a[x]
yy=""
for y in range(i,len(a)):
yy=yy+a[y]
if xx.isdigit() and yy.isdigit():
print("True")
else:
print("False") | true |
fe7a2b041b85e5b9d2fea688304a7fc9cd5a0a75 | Python | ywtail/leetcode | /56_581_1.py | UTF-8 | 1,907 | 4.375 | 4 | [] | no_license | # coding:utf-8
# 581. Shortest Unsorted Continuous Subarray 最短未排序的连续子数组
# 125ms beats 36.67%
class Solution(object):
def findUnsortedSubarray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
n = len(nums)
if n < 2:
return 0
left_index = -1 # 待排序数组的左侧索引
min_num = nums[n - 1]
for i in range(n - 1)[::-1]:
if nums[i] > min_num:
left_index = i
else:
min_num = nums[i]
if left_index == -1:
return 0
right_index = -1 # 待排序数组的右侧索引
max_num = nums[0]
for i in range(1, n):
if nums[i] < max_num:
right_index = i
else:
max_num = nums[i]
return right_index - left_index + 1
solution = Solution()
print solution.findUnsortedSubarray([2, 6, 4, 8, 10, 9, 15])
# 5
print solution.findUnsortedSubarray([1, 3, 5, 8, 4, 2, 1, 9, 7, 10])
# 8
'''
题目:
给定一个整数数组,你需要找到一个连续的子数组,如果你只按升序对这个子数组进行排序,那么整个数组也按升序排序。
您需要找到最短的子阵列并输出其长度。
示例1:
输入:[2,6,4,8,10,9,15]
输出:5
说明:您需要按升序对[6,4,8,10,9]进行排序,使整个数组以升序排列。
注意:
然后输入数组的长度在[1,10,000]的范围内。
输入数组可能包含重复项,因此这里升序表示<=。
分析:
时间复杂度 O(N),空间复杂度 O(1)
- 先找出待排序数组的左侧索引:
从右往左遍历,如果当前 num[i]>min_num,则更新左侧索引 left_index=i
- 再找出待排序数组的右侧索引
从左往右遍历,如果当前 num[i]<max_num,则更新右侧索引 right_index=i
''' | true |
c41d669d6497888a0c74ff0e86f64409a90c8cbb | Python | PdxCodeGuild/class_Binary_Beasts | /Students/Theo/Python/lab_object_oriented_programming.py | UTF-8 | 2,587 | 4.375 | 4 | [] | no_license | '''
Theo Cocco
Object Oriented Programming Lab
Monday, March 15, 2021
'''
# Bank Account Class
'''
class BankAccount:
def __init__(self, accountnumber, name, balance):
self.accountnumber = accountnumber
self.name = name
self.balance = balance
def deposit(self, x):
self.balance = self.balance + x
def withdrawl(self, x):
self.balance = self.balance - x
def bankfees(self):
self.balance = self.balance - (.05 * self.balance)
def display(self):
return (f'Welcome {self.name}, your account number is: {self.accountnumber}, and your balance is {self.balance}')
account1 = BankAccount(555, 'Theo', 1000000000.50)
account1.deposit(10000)
account1.withdrawl(5000)
account1.bankfees()
print(account1.display())
# Welcome Theo, your account number is: 555, and your balance is 950004750.475
'''
# Rectangle Class
"""
class Rectangle:
def __init__(self, l, w):
self.l = l
self.w = w
def perimeter(self):
p = 2 * (self.l + self.w)
return p
def area(self):
a = self.l * self.w
return a
def display(self):
return (f'''
Length: {self.l}
Width: {self.w}
Perimeter: {self.perimeter()}
Area: {self.area()}''')
rectangle1 = Rectangle(5,6)
print(rectangle1.display())
'''
Length: 5
Width: 6
Perimeter: 22
Area: 30
'''
class Parallelepipede(Rectangle):
def __init__(self, h, *args):
super().__init__(*args)
self.h = h
def volume(self):
v = self.l * self.w * self.h
return v
def display(self):
return (f'''
Length: {self.l}
Width: {self.w}
Perimeter: {self.perimeter()}
Area: {self.area()}
Volume: {self.volume()}''')
parallelepipede1 = Parallelepipede(7,6,6)
print(parallelepipede1.display())
'''
Length: 6
Width: 6
Perimeter: 24
Area: 36
Volume: 252
'''
"""
# Person Class
"""
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
def display(self):
return (f'Name: {self.name}, Age: {self.age}')
theo = Person('Theo', 27)
print(theo.display())
# Name: Theo, Age: 27
class Student(Person):
def __init__(self, section, *args):
super().__init__(*args)
self.section = section
def display(self):
return (f'Name: {self.name}, Age: {self.age}, Section: {self.section}')
ares = Student('Band', 'Ares', 7)
print(ares.display())
# Name: Ares, Age: 7, Section: Band
""" | true |
f804b9220f768d3b643531ff9dd92bbbf0d62e66 | Python | lordraindance2/cardgame | /objects/User.py | UTF-8 | 544 | 2.671875 | 3 | [] | no_license |
class User(object):
def __init__(self, pk, discord_id, cards, balance):
self.pk = pk
self.cards = cards
self.discord_id = discord_id
self.balance = balance
@property
def primary_key(self):
return self.pk
@property
def cards(self):
return self.cards
@property
def balance(self):
return self.balance
@cards.setter
def cards(self, cards):
self.cards = cards
@balance.setter
def balance(self, balance):
self.balance = balance
| true |
366a91e548ea0ac8481c723b45db1387f4143260 | Python | lhyugithub/lhyu | /pyele_sdds.py | UTF-8 | 27,507 | 2.53125 | 3 | [] | no_license | from typing import Optional
import os, sys
from pathlib import Path
from subprocess import Popen, PIPE
import re
import numpy as np
import tempfile
import shlex
import collections
#----------------------------------------------------------------------
def strfind(string, pattern):
""""""
return [s.start() for s in
re.finditer(pattern, string)]
#----------------------------------------------------------------------
def str2num(string):
""""""
if isinstance(string,str):
return np.array([float(s) for s in string.split()])
elif isinstance(string,list):
string_list = string
array = [[] for i in string_list]
for (i, string) in enumerate(string_list):
array[i] = [float(s) for s in string.split()]
return np.array(array).flatten()
else:
raise TypeError('str2num only accepts a string or a list of strings.')
#----------------------------------------------------------------------
def query(sdds_filepath, suppress_err_msg=False):
""""""
p = Popen(['sddsquery', sdds_filepath], stdout=PIPE, stderr=PIPE,
encoding='utf-8')
output, error = p.communicate()
#if isinstance(output, bytes):
#output = output.decode('utf-8')
#error = error.decode('utf-8')
if error and (not suppress_err_msg):
print('sddsquery stderr:', error)
print('sddsquery stdout:', output)
m = re.search(r'(\d+) columns of data:', output)
if m is not None:
nColumns = int(m.group(1))
column_header = m.group(0)
else:
nColumns = 0
m = re.search(r'(\d+) parameters:', output)
if m is not None:
nParams = int(m.group(1))
param_header = m.group(0)
else:
nParams = 0
column_dict = {}
if nColumns != 0:
m = re.search(r'(?<='+column_header+r')[\w\W]+', output)
column_str_list = m.group(0).split('\n')[3:(3+nColumns)]
for s in column_str_list:
ss = re.search(r'([^ ]+) +'*6+'(.+)', s).groups()
column_dict[ss[0]] = {
'UNITS': ss[1], 'SYMBOL': ss[2], 'FORMAT': ss[3],
'TYPE': ss[4], 'FIELD LENGTH': ss[5], 'DESCRIPTION': ss[6]}
assert len(column_dict) == nColumns
param_dict = {}
if nParams != 0:
m = re.search(r'(?<='+param_header+r')[\w\W]+', output)
param_str_list = m.group(0).split('\n')[2:(2+nParams)]
unit_pattern = r'[\w\$\(\)<>\*\^\'/,]+'
symbol_pattern = r'[\w\$\(\)<>\*\^\'/, ]+'
type_pattern = r'short|long|float|double|character|string'
for index, s in enumerate(param_str_list):
ss = re.search(
r'([\w\./]+) +({0:s}) +({1:s}) +({2:s}) +(.+)'.format(
unit_pattern, symbol_pattern, type_pattern), s).groups()
param_dict[ss[0]] = {'UNITS': ss[1], 'SYMBOL': ss[2].strip(),
'TYPE': ss[3], 'DESCRIPTION': ss[4],
'_index': index}
assert len(param_dict) == nParams
# deal with the special cases
if 'enx0' in param_dict:
if (param_dict['enx0']['UNITS'] == 'm$be$nc') and \
(param_dict['enx0']['SYMBOL'].split() == ['$gp$rm','NULL']):
param_dict['enx0']['UNITS'] = 'm$be$nc $gp$rm'
param_dict['enx0']['SYMBOL'] = 'NULL'
return param_dict, column_dict
#----------------------------------------------------------------------
def printout(sdds_filepath, param_name_list=None,
column_name_list=None, str_format='',
show_output=False, show_cmd=False,
suppress_err_msg=False):
"""
If "str_format" is specified, you must make sure that all the data
type of the specified paramter or column name list must be the same.
An example of "str_format" is '%25.16e'.
"""
if os.name == 'posix':
newline_char = '\n'
elif os.name == 'nt':
newline_char = '\r\n'
_, column_info_dict = query(sdds_filepath, suppress_err_msg=suppress_err_msg)
if column_name_list is None:
column_name_list = list(column_info_dict)
if column_name_list == []:
column_option_str = ''
else:
column_option_str = '-columns=' + '(' + ','.join(column_name_list) + ')'
if str_format != '':
column_option_str += ",format="+str_format
if param_name_list is None:
param_info_dict, _ = query(sdds_filepath, suppress_err_msg=suppress_err_msg)
param_name_list = list(param_info_dict)
if param_name_list == []:
param_option_str = ''
else:
param_option_str = '-parameters=' + '(' + ','.join(param_name_list) + ')'
if str_format != '':
param_option_str += ",format="+str_format
if (not column_option_str) and (not param_option_str):
raise ValueError('You must specify at least one of -columns and -parameters.')
if os.name == 'nt':
sp = sdds_filepath.split('\\')
double_backslashed_sdds_filepath = ('\\'*2).join(sp)
sdds_filepath = double_backslashed_sdds_filepath
cmd_list = ['sddsprintout', sdds_filepath]
if param_option_str:
cmd_list.append(param_option_str)
if show_cmd:
print(cmd_list)
p = Popen(cmd_list, stdout=PIPE, stderr=PIPE, encoding='utf-8')
output, error = p.communicate()
#if isinstance(output, bytes):
#output = output.decode('utf-8')
#error = error.decode('utf-8')
if error and (not suppress_err_msg):
print('sddsprintout stderr:', error)
print('sddsprintout stdout:', output)
if (param_name_list == []): # or (param_name_list is None):
param_dict = {}
else:
if False: # old version
eq_pattern = ' = '
eq_ind = strfind(output, eq_pattern)
param_name_ind = [[] for i in param_name_list]
for i,n in enumerate(param_name_list):
param_name_ind[i] = strfind(output,newline_char+n+' ')
if param_name_ind[i] == []:
param_name_ind[i] = strfind(output,' '+n+' ')
param_val_list = [0.]*len(eq_ind)
for i in range(len(eq_ind)-1):
start_ind = eq_ind[i]+len(eq_pattern)
if param_name_ind[i+1] == []: continue
end_ind = param_name_ind[i+1][0]
val_str = output[start_ind:end_ind]
if val_str.strip() == '1.#QNAN0e+000': # Elegant's NaN for old version (23.1.2)
val_str = 'nan'
try:
param_val_list[i] = float(val_str)
except ValueError:
param_val_list[i] = val_str
start_ind = eq_ind[-1]+len(eq_pattern)
end_ind = start_ind + output[start_ind:].find(newline_char)
val_str = output[start_ind:end_ind]
if val_str.strip() == '1.#QNAN0e+000': # Elegant's NaN for old version (23.1.2)
val_str = 'nan'
param_val_list[-1] = float(val_str)
param_dict = dict(zip(param_name_list,param_val_list))
#print(param_dict)
else:
#param_dict = {}
param_dict = collections.defaultdict(list)
for k, v_str in re.findall(
#'([\w /\(\)\$]+)[ ]+=[ ]+([nae\d\.\+\-]+)[ \n]?',
'([\w /\(\)\$\^\*\.]+)[ ]*=[ ]*([naife\d\.\+\-]+)[ \n]?',
output):
# ^ [n] & [a] is added for digit matching in cases for "nan"
# [i] & [f] is added for digit matching in cases for "inf"
if '(' in k:
first_para_ind = k.index('(')
k_stripped = k[:first_para_ind].strip()
else:
k_stripped = k.strip()
# If the parameter name is picking up the previous parameter's
# non-digit values as characters, remove those here.
k_stripped = k_stripped.split()[-1]
if param_info_dict[k_stripped]['TYPE'] == 'double':
#param_dict[k_stripped] = float(v_str)
param_dict[k_stripped].append(float(v_str))
elif param_info_dict[k_stripped]['TYPE'] in ('long', 'short'):
#param_dict[k_stripped] = int(v_str)
param_dict[k_stripped].append(int(v_str))
elif param_info_dict[k_stripped]['TYPE'] == 'string':
pass
else:
raise ValueError(
f'Unexpected TYPE: {param_info_dict[k_stripped]["TYPE"]}')
# Extract string types
if 'string' in [q_d['TYPE'] for q_d in param_info_dict.values()]:
ordered_param_name_list = [None] * len(param_name_list)
for param_name, q_d in param_info_dict.items():
ordered_param_name_list[q_d['_index']] = param_name
for param_name, q_d in param_info_dict.items():
if q_d['TYPE'] != 'string':
continue
_extracted = re.findall(f'{param_name}[ ]*=[ ]*(.+)[=\n]', output)
if False: # old version before dealing with SDDS "pages"
assert len(_extracted) == 1
val = _extracted[0].split('=')[0].strip()
try:
next_param_name = ordered_param_name_list[
ordered_param_name_list.index(param_name)+1]
val = val.replace(next_param_name, '').strip()
except IndexError:
pass
#print([param_name, val])
param_dict[param_name] = val
else:
vals = [v.split('=')[0].strip() for v in _extracted]
try:
next_param_name = ordered_param_name_list[
ordered_param_name_list.index(param_name)+1]
vals = [v.replace(next_param_name, '').strip()
for v in vals]
except IndexError:
pass
param_dict[param_name] = vals
len_list = [len(v) for _, v in param_dict.items()]
assert len(set(len_list)) == 1 # i.e., having save length
_temp_dict = {}
if len_list[0] == 1:
# Only single "page"
for k, v in param_dict.items():
_temp_dict[k] = v[0]
else:
# Multiple "pages"
for k, v in param_dict.items():
_temp_dict[k] = v # keep it as a list
param_dict = _temp_dict
# Check if all the specified parameters have been correctly extracted
_extracted_param_names = list(param_dict)
for name in param_name_list:
if name not in _extracted_param_names:
print(f'* ERROR: Paramter "{name}" was not extracted')
for name in _extracted_param_names:
if name not in param_name_list:
print(f'* WARNING: Unrequested Parameter "{name}" was extracted')
cmd_list = ['sddsprintout', sdds_filepath]
if column_option_str:
cmd_list.append(column_option_str)
#use_comma_delimiter = False
use_comma_delimiter = True
if use_comma_delimiter:
cmd_list.append("-spreadsheet=(delimiter=',')")
if show_cmd:
print(cmd_list)
p = Popen(cmd_list, stdout=PIPE, stderr=PIPE, encoding='utf-8')
output, error = p.communicate()
#if isinstance(output, bytes):
#output = output.decode('utf-8')
#error = error.decode('utf-8')
if error and (not suppress_err_msg):
print('sddsprintout stderr:', error)
print('sddsprintout stdout:', output)
if (column_name_list == []): # or (column_name_list is None):
column_dict = {}
else:
if not use_comma_delimiter:
column_title_divider_pattern = '---' + newline_char
column_start_ind = strfind(output, column_title_divider_pattern)
if column_start_ind != []:
column_start_ind = column_start_ind[0] + len(column_title_divider_pattern)
column_data_str = output[column_start_ind:]
rows_str = column_data_str.splitlines()
column_dict = dict.fromkeys(column_name_list)
for col_name in column_name_list:
column_dict[col_name] = [[] for i in rows_str]
col_ind_offset = 0
row_counter = 0
for r in rows_str:
str_list = [c for c in r.split(' ') if c]
for j,st in enumerate(str_list):
column_dict[column_name_list[j+col_ind_offset]][row_counter] = st
if ( len(str_list)+col_ind_offset ) != len(column_name_list):
col_ind_offset += len(str_list)
else:
col_ind_offset = 0
row_counter += 1
for col_name in column_name_list:
column_dict[col_name] = column_dict[col_name][:row_counter] # Make sure to
# remove empty elements at the tail
#if col_name not in ('ElementName','ElementType'):
try:
column_dict[col_name] = str2num(column_dict[col_name])
except ValueError:
pass
else:
rows = [s.strip() for s in output.split('\n') if s.strip() != '']
if False: # old version before dealing with SDDS "pages"
column_dict = dict.fromkeys(column_name_list)
for col_name in column_name_list:
column_dict[col_name] = []
col_title_rowind = 1
for row in rows[(col_title_rowind+1):]:
for col_name, v in zip(column_name_list, row.split("','")):
column_dict[col_name].append(v.strip())
for col_name in column_name_list:
if column_info_dict[col_name]['TYPE'] == 'double':
column_dict[col_name] = str2num(column_dict[col_name])
else:
column_dict = collections.defaultdict(list)
col_title_rowind = 1
for row in rows[(col_title_rowind+1):]:
for col_name, v in zip(column_name_list, row.split("','")):
if col_name != v:
column_dict[col_name].append(v.strip())
else:
# "col_name" and "v" is the same, which means, this
# is a tile line in the case of having multiple
# SDDS "pages". So, skip this line.
pass
_temp_dict = {}
for col_name in column_name_list:
if column_info_dict[col_name]['TYPE'] == 'double':
_temp_dict[col_name] = str2num(column_dict[col_name])
else:
_temp_dict[col_name] = column_dict[col_name]
column_dict = _temp_dict
# Check if all the specified columns have been correctly extracted
_extracted_column_names = list(column_dict)
for name in column_name_list:
if name not in _extracted_column_names:
print(f'* ERROR: Column "{name}" was not extracted')
for name in _extracted_column_names:
if name not in column_name_list:
print(f'* WARNING: Unrequested Column "{name}" was extracted')
if show_output:
print(output)
if error and (not suppress_err_msg):
print(error)
return param_dict, column_dict
def sdds2dicts(sdds_filepath, str_format=''):
""""""
meta_params, meta_columns = query(sdds_filepath, suppress_err_msg=True)
meta = {}
if meta_params:
meta['params'] = meta_params
if meta_columns:
meta['columns'] = meta_columns
output = {}
if (meta_params == {} and meta_columns == {}):
return output, meta
params, columns = printout(
sdds_filepath, param_name_list=None, column_name_list=None,
str_format=str_format, show_output=False, show_cmd=False,
suppress_err_msg=True)
if params:
for _k, _v in params.items():
if meta['params'][_k]['TYPE'] in ('long', 'short'):
try:
params[_k] = int(_v)
except TypeError:
params[_k] = np.array(_v).astype(int)
except:
sys.stderr.write(f'** key: {_k}\n')
sys.stderr.write('** value:\n')
sys.stderr.write(str(_v))
sys.stderr.write('\n')
sys.stderr.flush()
raise
output['params'] = params
if columns:
for _k, _v in columns.items():
if meta['columns'][_k]['TYPE'] in ('long', 'short'):
columns[_k] = np.array(_v).astype(int)
else:
columns[_k] = np.array(_v)
output['columns'] = columns
return output, meta
def dicts2sdds(
sdds_output_pathobj, params=None, columns=None,
params_units=None, columns_units=None, params_descr=None, columns_descr=None,
params_symbols=None, columns_symbols=None,
params_counts=None, columns_counts=None, outputMode='ascii',
tempdir_path: Optional[str] = None, suppress_err_msg=True):
""""""
sdds_output_pathobj = Path(sdds_output_pathobj)
sdds_output_filepath = str(sdds_output_pathobj)
tmp = tempfile.NamedTemporaryFile(
dir=tempdir_path, delete=False, prefix='tmpDicts2sdds_', suffix='.txt')
plaindata_txt_filepath = str(Path(tmp.name).resolve())
tmp.close()
lines = []
if params is None:
param_name_list = []
param_type_list = []
param_unit_list = None
param_descr_list = None
param_symbol_list = None
param_count_list = None
else:
param_name_list = list(params)
param_type_list = []
param_unit_list = []
param_descr_list = []
param_symbol_list = []
param_count_list = []
if params_units is None:
params_units = {}
if params_descr is None:
params_descr = {}
if params_symbols is None:
params_symbols = {}
if params_counts is None:
params_counts = {}
for name in param_name_list:
v = params[name]
if isinstance(v, float):
s = f'{v:.16g}'
param_type_list.append('double')
elif isinstance(v, (int, np.integer)):
s = f'{v:d}'
param_type_list.append('long')
elif isinstance(v, str):
s = f'"{v}"'
param_type_list.append('string')
else:
raise ValueError(f'Unexpected data type for paramter "{name}"')
lines.append(s)
param_unit_list.append(params_units.get(name, None))
param_descr_list.append(params_descr.get(name, None))
param_symbol_list.append(params_symbols.get(name, None))
param_count_list.append(params_counts.get(name, None))
if columns is None:
column_name_list = []
column_type_list = []
column_unit_list = None
column_descr_list = None
column_symbol_list = None
column_count_list = None
else:
column_name_list = list(columns)
column_type_list = []
column_unit_list = []
column_descr_list = []
column_symbol_list = []
column_count_list = []
if columns_units is None:
columns_units = {}
if columns_descr is None:
columns_descr = {}
if columns_symbols is None:
columns_symbols = {}
if columns_counts is None:
columns_counts = {}
for name in column_name_list:
column_unit_list.append(columns_units.get(name, None))
column_descr_list.append(columns_descr.get(name, None))
column_symbol_list.append(columns_symbols.get(name, None))
column_count_list.append(columns_counts.get(name, None))
nCols = len(column_name_list)
# Write the number of rows
nRows = np.unique([len(columns[name]) for name in column_name_list])
if len(nRows) == 1:
nRows = nRows[0]
else:
raise ValueError('All the column data must have the same length')
#
lines.append(f'\t{nRows:d}')
M = zip(*[columns[name] for name in column_name_list])
for iRow, row in enumerate(M):
tokens = []
for iCol, v in enumerate(row):
if isinstance(v, float):
s = f'{v:.16g}'
if iRow != 0:
assert column_type_list[iCol] == 'double'
else:
column_type_list.append('double')
elif isinstance(v, (int, np.integer)):
s = f'{v:d}'
if iRow != 0:
assert column_type_list[iCol] == 'long'
else:
column_type_list.append('long')
elif isinstance(v, str):
s = f'"{v}"'
if iRow != 0:
assert column_type_list[iCol] == 'string'
else:
column_type_list.append('string')
else:
raise ValueError(
f'Unexpected data type for column "{column_name_list[iCol]}" index {iRow:d}')
tokens.append(s)
lines.append(' '.join(tokens))
with open(plaindata_txt_filepath, 'w') as f:
f.write('\n'.join(lines))
plaindata2sdds(
plaindata_txt_filepath, sdds_output_filepath, outputMode=outputMode,
param_name_list=param_name_list, param_type_list=param_type_list,
param_unit_list=param_unit_list, param_descr_list=param_descr_list,
param_symbol_list=param_symbol_list, param_count_list=param_count_list,
column_name_list=column_name_list, column_type_list=column_type_list,
column_unit_list=column_unit_list, column_descr_list=column_descr_list,
column_symbol_list=column_symbol_list, column_count_list=column_count_list,
suppress_err_msg=suppress_err_msg)
try:
os.remove(plaindata_txt_filepath)
except IOError:
pass
def sdds2plaindata(
sdds_filepath, output_txt_filepath, param_name_list=None, column_name_list=None,
suppress_err_msg=True):
""""""
cmd_list = [
'sdds2plaindata', sdds_filepath, output_txt_filepath, '"-separator= "',]
meta_params, meta_columns = query(sdds_filepath, suppress_err_msg=True)
if param_name_list is None:
param_name_list = list(meta_params)
if column_name_list is None:
column_name_list = list(meta_columns)
for name in param_name_list:
cmd_list.append(f'-parameter={name}')
for name in column_name_list:
cmd_list.append(f'-column={name}')
p = Popen(cmd_list, stdout=PIPE, stderr=PIPE, encoding='utf-8')
output, error = p.communicate()
if error and (not suppress_err_msg):
print('sdds2plaindata stderr:', error)
print('sdds2plaindata stdout:', output)
def plaindata2sdds(
input_txt_filepath, sdds_output_filepath, outputMode='ascii',
param_name_list=None, param_type_list=None, param_unit_list=None,
param_descr_list=None, param_symbol_list=None, param_count_list=None,
column_name_list=None, column_type_list=None, column_unit_list=None,
column_descr_list=None, column_symbol_list=None, column_count_list=None,
suppress_err_msg=True):
""""""
if outputMode not in ('ascii', 'binary'):
raise ValueError('"outputMode" must be either "ascii" or "binary".')
cmd_list = [
'plaindata2sdds', input_txt_filepath, sdds_output_filepath,
'-inputMode=ascii', f'-outputMode={outputMode}', '"-separator= "',]
if param_name_list is not None:
n = len(param_name_list)
assert n == len(param_type_list)
if param_unit_list is None:
param_unit_list = [None] * n
assert n == len(param_unit_list)
if param_descr_list is None:
param_descr_list = [None] * n
assert n == len(param_descr_list)
if param_symbol_list is None:
param_symbol_list = [None] * n
assert n == len(param_symbol_list)
if param_count_list is None:
param_count_list = [None] * n
assert n == len(param_count_list)
for name, dtype, unit, descr, symbol, count in zip(
param_name_list, param_type_list, param_unit_list,
param_descr_list, param_symbol_list, param_count_list):
assert dtype in ('string', 'long', 'short', 'double')
opt = f'-parameter={name},{dtype}'
if unit is not None:
opt += f',units="{unit}"'
if descr is not None:
assert ',' not in descr
opt += f',description="{descr}"'
if symbol is not None:
opt += f',symbol="{symbol}"'
if count is not None:
opt += f',count={count:d}'
cmd_list.append(opt)
if column_name_list is not None:
n = len(column_name_list)
assert n == len(column_type_list)
if column_unit_list is None:
column_unit_list = [None] * n
assert n == len(column_unit_list)
if column_descr_list is None:
column_descr_list = [None] * n
assert n == len(column_descr_list)
if column_symbol_list is None:
column_symbol_list = [None] * n
assert n == len(column_symbol_list)
if column_count_list is None:
column_count_list = [None] * n
assert n == len(column_count_list)
for name, dtype, unit, descr, symbol, count in zip(
column_name_list, column_type_list, column_unit_list,
column_descr_list, column_symbol_list, column_count_list):
assert dtype in ('string', 'long', 'short', 'double')
opt = f'-column={name},{dtype}'
if unit is not None:
opt += f',units="{unit}"'
if descr is not None:
assert ',' not in descr
opt += f',description="{descr}"'
if symbol is not None:
opt += f',symbol="{symbol}"'
if count is not None:
opt += f',count={count:d}'
cmd_list.append(opt)
shell_cmd = ' '.join(cmd_list)
cmd_list = shlex.split(shell_cmd, posix=True)
#print(cmd_list)
p = Popen(cmd_list, stdout=PIPE, stderr=PIPE, encoding='utf-8')
output, error = p.communicate()
if error and (not suppress_err_msg):
print('plaindata2sdds stderr:', error)
print('plaindata2sdds stdout:', output)
| true |
5398a861a6a245ac5ca3541266256b228385989a | Python | ThinkChaos/CStyle | /cstyle/rules/meta/wrapper.py | UTF-8 | 1,236 | 3.609375 | 4 | [] | no_license | """*Meta rules* that wrap rules."""
def line(n, rule):
"""Apply rule to `n`th `line` only."""
return lambda l, i: (
i['lineno'] == (n if n >= 0 else i['nlines'] + n) and rule(l, i)
)
def not_line(n, rule):
"""Apply rule to all but `n`th `line`."""
return lambda l, i: (
i['lineno'] != (n if n >= 0 else i['nlines'] + n) and rule(l, i)
)
def shift_line(n, rule, skip_comments=True):
"""Move by `n` lines in file. No-op when moving out of bounds."""
def wrap(line, info):
old_index = info['line_index']
new_index = old_index + n
if 0 <= new_index < info['nlines']:
new_lineno, new_line = info['lines'][new_index]
info['line_index'] = new_index
old_lineno, info['lineno'] = info['lineno'], new_lineno
res = rule(new_line, info)
info['lineno'], info['line_index'] = old_lineno, old_index
return res
return False
return wrap
def next_line(rule):
"""Apply `rule` to next line of the file. No-op on last line."""
return shift_line(1, rule)
def prev_line(rule):
"""Apply `rule` to previous line of the file. No-op on first line."""
return shift_line(-1, rule)
| true |
b016ebf93a852deddad6e35fc65782db1a26e713 | Python | kylewillis21/TwitterBots | /scheduled_tweet.py | UTF-8 | 790 | 2.78125 | 3 | [] | no_license | import sched, tweepy
import time as time_module
import accessToken
# Authenticate to Twitter
auth = tweepy.OAuthHandler(accessToken.CONSUMER_KEY, accessToken.CONSUMER_SECRET)
auth.set_access_token(accessToken.ACCESS_TOKEN, accessToken.ACCESS_SECRET)
# Create API object
api = tweepy.API(auth)
# Create a tweet function
def tweet():
api.update_status("Message")
# Create a scheduler function that runs the tweet program at specified time
scheduler = sched.scheduler(time_module.time, time_module.sleep)
t = time_module.strptime('2020-02-29 12:11:00', '%Y-%m-%d %H:%M:%S') # Enter the time and date that you would like the tweet to go out
t = time_module.mktime(t)
scheduler_e = scheduler.enterabs(t, 1, tweet, ())
scheduler.run()
| true |
c24f2fe61c7c7604eee245186efa24919463217f | Python | Sn0wled/pythonbase | /lesson_002/05_zoo.py | UTF-8 | 1,147 | 4.03125 | 4 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# есть список животных в зоопарке
animals = ['lion', 'kangaroo', 'elephant', 'monkey', ]
# посадите медведя (bear) между львом и кенгуру
# и выведите список на консоль
animals.insert(1, 'bear')
print(animals)
# добавьте птиц из списка birds в последние клетки зоопарка
birds = ['rooster', 'ostrich', 'lark', ]
# и выведите список на консоль
animals.extend(birds)
print(animals)
# уберите слона
# и выведите список на консоль
del animals[animals.index('elephant')]
print(animals)
# выведите на консоль в какой клетке сидит лев (lion) и жаворонок (lark).
# Номера при выводе должны быть понятны простому человеку, не программисту.
print('Лев сидит в клетке номер ', animals.index('lion') + 1, ', а жаворонок в клетке номер ', animals.index('lark') + 1)
| true |
40a421d1fb4f822388b15849c7edeb6c96c2d8c5 | Python | ZombiePy/CryptocurrencyDataMiner | /Utilities/functions.py | UTF-8 | 3,511 | 3.078125 | 3 | [
"MIT"
] | permissive | from DataGathering.csv_data_parser import CsvDataParser
import os
import re
def mqtt_receiving(crypto, output_file_path):
"""Function that creates instance of CsvDataParsing and running it"""
client_name = crypto + '1'
data_parser = CsvDataParser(crypto, client_name, output_file_path)
def on_message_func(client, userdata, msg):
nonlocal data_parser
data_parser.add_message(msg.topic, msg.payload)
data_parser.run(on_message_func)
def get_dates(crypto='BTC'):
"""Gets all dates from saved files in chosen crypto
:parameter crypto - cryptocurrency """
file_names = get_prices_files(crypto)
dates = set()
for file_name in file_names:
date_csv = file_name.split('_')[1]
date = date_csv.split('.')[0]
dates.add(date)
return dates
def get_output_path(path_type='Prices'):
"""Functions that searching for output path
:parameter path_type - 'Prices' or 'Plots' based on what program is looking for"""
active_path = os.getcwd()
if os.path.isdir('Data'):
output_path = os.path.join(active_path, 'Data', 'Output', path_type)
else:
os.chdir('..')
output_path = get_output_path(path_type)
return output_path
def get_file_path(crypto, date):
"""Creating path with file name for saveing data
:parameter date - current date
:parameter crypto - cryptocurrency symbol"""
output_path = get_output_path()
file_name = crypto + '_' + date + '.csv'
return os.path.join(output_path, file_name)
def get_prices_files(crypto):
"""Listing all files with data for chosen crypoo
:parameter crypto - cryptocurrency symbol"""
output_file_path = get_output_path()
files = os.listdir(output_file_path)
files_given_crypto = list()
for file_price in files:
if re.search(crypto, file_price):
files_given_crypto.append(file_price)
return files_given_crypto
def get_last_date():
"""Getting last date based on saved files"""
dates = get_dates()
return sorted(dates)[-1]
def get_plot_path(date, plot_type, crypto):
"""Creating path with file name for plots
:parameter date - current date
:parameter crypto - cryptocurrency symbol
:parameter plot_type - defines witch type of plot is being creating"""
file_name = crypto.upper() + '_' + plot_type + '_' + date + '.png'
return os.path.join(get_output_path('Plots'), file_name)
def list_to_html_table(list_of_data):
"""Creating sting that looks like html code of table from list of data"""
table_content = ""
for sublist in list_of_data:
table_content += " <tr>\n"
for data in sublist:
table_content += " <td>" + str(data) + "</td>\n"
table_content += " </tr>\n"
return table_content[:-1]
def add_subscriber(email, name):
"""Adding subscriber to list"""
subscribers_path = os.path.join("..", 'Data', 'Input', 'subscribers.csv')
with open(subscribers_path, 'a') as subscribers:
subscribers.write('\n' + email + ',' + name)
def remove_subscriber(email, name):
"""Removing subscriber from the list """
subscribers_path = os.path.join("..", 'Data', 'Input', 'subscribers.csv')
with open(subscribers_path, '') as subscribers:
lines = subscribers.readlines()
subscribers.seek(0)
for line in lines:
if line != email + "," + name:
subscribers.write(line)
subscribers.truncate()
| true |
3764f1f347682e83f7ee852969089d5d5921092d | Python | fernandoCV19/codigoOpenCV | /tamanio.py | UTF-8 | 2,789 | 2.96875 | 3 | [] | no_license | import cv2
import math
from google.colab.patches import cv2_imshow
def encontrarCentro(approx):
x,y,w,h = cv2.boundingRect(approx)
return (x + w/2,y + h/2)
def esCuadrado(approx):
x,y,w,h = cv2.boundingRect(approx)
if (float(w)/h > 0.9 and float(w)/h < 1.1):
return True
else:
return False
def direccion(x):
if (x < centroX):
return "Izquierda"
else:
return "Derecha"
def areaCuadrilateros(approx):
x,y,w,h = cv2.boundingRect(approx)
return w*h
def areaTriangulo(approx):
x,y,w,h = cv2.boundingRect(approx)
return (w*h)/2
def direccionATomar(image):
global centroX
global centroY
global areaMasGrande
centroX = image.shape[1]/2
centroY = image.shape[0]/2
areaMasGrande = 0
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
canny = cv2.Canny(gray, 10, 150)
canny = cv2.dilate(canny, None, iterations = 1)
canny = cv2.erode(canny, None, iterations = 1)
cnts,herarchy = cv2.findContours(canny, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for c in cnts:
epsilon = 0.01 * cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, epsilon, True)
if len(approx) == 3:
areaAux = areaTriangulo(approx)
if (areaAux > areaMasGrande):
areaMasGrande = areaAux
elif len(approx) == 4:
areaAux = areaCuadrilateros(approx)
if (areaAux > areaMasGrande):
areaMasGrande = areaAux
for c in cnts:
epsilon = 0.01 * cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, epsilon, True)
if len(approx) == 3:
areaAux = areaTriangulo(approx)
if (areaAux/areaMasGrande) > 0.6 and grande:
x,y = encontrarCentro(approx)
return direccion(x)
elif (areaAux/areaMasGrande) <= 0.6 and (areaAux/areaMasGrande) > 0.4 and mediano:
x,y = encontrarCentro(approx)
return direccion(x)
elif (areaAux/areaMasGrande) <= 0.4 and pequeno:
x,y = encontrarCentro(approx)
return direccion(x)
elif len(approx) == 4:
areaAux = areaCuadrilateros(approx)
if (areaAux/areaMasGrande) > 0.6 and grande:
x,y = encontrarCentro(approx)
return direccion(x)
elif (areaAux/areaMasGrande) <= 0.6 and (areaAux/areaMasGrande) > 0.4 and mediano:
x,y = encontrarCentro(approx)
return direccion(x)
elif (areaAux/areaMasGrande) <= 0.4 and pequeno:
x,y = encontrarCentro(approx)
return direccion(x)
cv2_imshow(image)
print("No encontrado")
cv2.destroyAllWindows()
pequeno = True
mediano = False
grande = False
centroX = 0
centroY = 0
areaMasGrande = 0
image = cv2.imread("/content/drive/MyDrive/DeteccionDeFormas/AhoraSi.png")
direccionATomar(image) | true |
8c794ccd871e80661cd9a5e5f0d501172391a078 | Python | Jawish-a/cashier | /cashier.py | UTF-8 | 659 | 3.859375 | 4 | [] | no_license | def main():
# write code here
items = []
while True:
item_name = input("Item (enter \"done\" when finished): ")
if item_name == "done":
break
item_price = int(input("Price: "))
item_qty = int(input("Quantity: "))
items.append({"name":item_name, "price": item_price, "quantity": item_qty})
print("-------------------")
print("receipt")
print("-------------------")
total = 0
for item in items:
print(str(item["quantity"]) + item["name"] + str(item["price"]*item["quantity"]) + "KD" )
total += item["price"]*item["quantity"]
print("-------------------")
print("Total Price: " + str(total) + "KD")
if __name__ == '__main__':
main() | true |
a23d24eb5273664e5282f6b3a1f34d3d8b7bcf8b | Python | Cwei1/Deep-Learning | /HW 5 - AGN News Set.py | UTF-8 | 4,944 | 3.046875 | 3 | [] | no_license | import pandas as pd
import keras
# Cardy Wei
# Professor Curro
# Deep Learning Assignment 5
max_len = 1012
num_classes = 4
epochs = 3
batch_size = 128
train = pd.read_csv('ag_news_csv/train.csv', names=["class", "title", "desc"])
test = pd.read_csv('ag_news_csv/test.csv', names=["class", "title", "desc"])
x_train = train["title"] + " " + train["desc"]
x_test = test["title"] + " " + test["desc"]
y_train = train["class"] - 1
y_test = test["class"] - 1
x_train = x_train[20000:]
y_train = y_train[20000:]
x_val = x_train[:20000]
y_val = y_train[:20000]
t = keras.preprocessing.text.Tokenizer()
t.fit_on_texts(x_train)
x_train = t.texts_to_sequences(x_train)
x_val = t.texts_to_sequences(x_val)
x_test = t.texts_to_sequences(x_test)
x_train = keras.preprocessing.sequence.pad_sequences(x_train, padding="post", truncating="post", maxlen=max_len)
x_val = keras.preprocessing.sequence.pad_sequences(x_val, padding="post", truncating="post", maxlen=max_len)
x_test = keras.preprocessing.sequence.pad_sequences(x_test, padding="post", truncating="post", maxlen=max_len)
y_train = keras.utils.to_categorical(y_train, num_classes)
y_val = keras.utils.to_categorical(y_val, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = keras.Sequential()
model.add(keras.layers.Embedding(len(t.word_counts), 64, input_length=max_len))
model.add(keras.layers.MaxPooling1D(pool_size = 2))
model.add(keras.layers.Dropout(.4))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
model.summary()
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_val, y_val))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# C:\Users\cardy\Desktop>python MLHW5.py
# Using TensorFlow backend.
# _________________________________________________________________
# Layer (type) Output Shape Param #
# =================================================================
# embedding_1 (Embedding) (None, 1012, 64) 4102272
# _________________________________________________________________
# max_pooling1d_1 (MaxPooling1 (None, 506, 64) 0
# _________________________________________________________________
# dropout_1 (Dropout) (None, 506, 64) 0
# _________________________________________________________________
# flatten_1 (Flatten) (None, 32384) 0
# _________________________________________________________________
# dense_1 (Dense) (None, 4) 129540
# =================================================================
# Total params: 4,231,812
# Trainable params: 4,231,812
# Non-trainable params: 0
# _________________________________________________________________
# Train on 100000 samples, validate on 20000 samples
# Epoch 1/3
# 2018-10-10 17:20:00.836661: I T:\src\github\tensorflow\tensorflow\core\platform\cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2
# 2018-10-10 17:20:01.478405: I T:\src\github\tensorflow\tensorflow\core\common_runtime\gpu\gpu_device.cc:1405] Found device 0 with properties:
# name: GeForce 940MX major: 5 minor: 0 memoryClockRate(GHz): 0.8605
# pciBusID: 0000:01:00.0
# totalMemory: 2.00GiB freeMemory: 1.66GiB
# 2018-10-10 17:20:01.486599: I T:\src\github\tensorflow\tensorflow\core\common_runtime\gpu\gpu_device.cc:1484] Adding visible gpu devices: 0
# 2018-10-10 17:20:02.951061: I T:\src\github\tensorflow\tensorflow\core\common_runtime\gpu\gpu_device.cc:965] Device interconnect StreamExecutor with strength 1 edge matrix:
# 2018-10-10 17:20:02.955509: I T:\src\github\tensorflow\tensorflow\core\common_runtime\gpu\gpu_device.cc:971] 0
# 2018-10-10 17:20:02.960616: I T:\src\github\tensorflow\tensorflow\core\common_runtime\gpu\gpu_device.cc:984] 0: N
# 2018-10-10 17:20:02.965537: I T:\src\github\tensorflow\tensorflow\core\common_runtime\gpu\gpu_device.cc:1097] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 1412 MB memory) -> physical GPU (device: 0, name: GeForce 940MX, pci bus id: 0000:01:00.0, compute capability: 5.0)
# 100000/100000 [==============================] - 57s 565us/step - loss: 0.4873 - acc: 0.8383 - val_loss: 0.1987 - val_acc: 0.9401
# Epoch 2/3
# 100000/100000 [==============================] - 50s 502us/step - loss: 0.1835 - acc: 0.9407 - val_loss: 0.1204 - val_acc: 0.9645
# Epoch 3/3
# 100000/100000 [==============================] - 50s 501us/step - loss: 0.1216 - acc: 0.9608 - val_loss: 0.0715 - val_acc: 0.9804
# Test loss: 0.25114226884512525
# Test accuracy: 0.9206842105263158 | true |
e44b2b8d89ea5a6e88d7868fd2b754c642cf6f13 | Python | lucasmazz/python-minimal-chat | /main.py | UTF-8 | 1,868 | 2.9375 | 3 | [
"Unlicense"
] | permissive | # -*- coding: utf-8 -*-
"""
@date 09-26-13
"""
import socket, threading, sys, getopt, time
class Server( threading.Thread ):
"""
"""
def __init__( self, ip, port ):
threading.Thread.__init__(self)
self.ip = ip
self.port = port
self.start()
def run(self):
tcp = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
tcp.bind( ( self.ip, self.port ) )
tcp.listen(1)
conn, client = tcp.accept()
print 'Connected by ', client
while True:
try:
msg = conn.recv(1024)
if not msg: break
print client, msg
except Exception, e:
break
conn.close()
print "The connection with {0} was closed ".format( client )
class Connect( threading.Thread ):
"""
"""
def __init__( self, ip, port ):
threading.Thread.__init__(self)
self.ip = ip
self.port = port
self.start()
def run(self):
tcp = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
def connect():
try:
tcp.connect( (self.ip, self.port) )
except Exception, e:
print e
time.sleep(1)
connect()
connect()
msg = raw_input()
while msg <> '@close' : #tecla
tcp.send( msg )
msg = raw_input()
sys.stdout.flush()
tcp.close()
def help():
print """
--------------------------------------------------------
Help window
--------------------------------------------------------
"""
sys.exit()
def main(argv):
receive = None
send = None
try:
opts, args = getopt.getopt(argv,"ho:c:", ["help", "open=", "connect="])
except getopt.GetoptError:
help()
for opt, arg in opts:
if opt in ('-h', '--help'):
#chama a tela de ajuda
help()
elif opt in ("-o", "--open"):
receive = Server( arg[:arg.find(':')], int( arg[arg.find(':')+1:]) )
elif opt in ("-c", "--connect"):
send = Connect( arg[:arg.find(':')], int( arg[arg.find(':')+1:]) )
if __name__ == "__main__":
main(sys.argv[1:])
| true |
6034fe543cd4d4318b07ac2f1d1ca7a354191d66 | Python | abdulaziz-damlahi/BRINGME | /venv/Lib/site-packages/libbiomedit/lib/deserialize.py | UTF-8 | 3,575 | 2.859375 | 3 | [] | no_license | from typing import Union, Tuple, Any, Dict, Optional, Sequence
import collections
import warnings
import dataclasses
from .classify import classify, __origin_attr__
def deserialize(T: type):
"""Creates a deserializer for the type :T:. It handles dataclasses,
sequences, typing.Optional and primitive types.
:returns: A deserializer, converting a dict, list or primitive to :T:
"""
return _deserializers.get(classify(T), lambda x: x)(T)
_deserializers = {}
def _deserializer(T: type):
def decorator(f):
_deserializers[T] = f
return f
return decorator
@_deserializer(Any)
def deserialize_any(_: type):
return lambda x: x
@_deserializer(Tuple)
def deserialize_tuple(T: type):
item_types = T.__args__
if len(item_types) == 2 and item_types[1] is ...:
item_type = item_types[0]
def _deserialize(data: tuple):
return tuple(deserialize(item_type)(item) for item in data)
return _deserialize
def _deserialize(data: tuple):
if len(item_types) != len(data):
raise ValueError(
f"Wrong number ({len(data)}) of items for {repr(T)}")
return tuple(deserialize(T)(item) for T, item in zip(item_types, data))
return _deserialize
@_deserializer(Sequence)
def deserialize_seq(T: type):
seq_type = getattr(T, __origin_attr__, None)
try:
item_type = T.__args__[0]
except AttributeError:
raise ValueError(
f"Sequence of type {seq_type.__name__} without item type")
if seq_type is collections.abc.Sequence:
seq_type = list
def _deserialize(data):
return seq_type(map(deserialize(item_type), data))
return _deserialize
@_deserializer(dataclasses.dataclass)
def deserialize_dataclass(T):
fields = dataclasses.fields(T)
def _deserialize(data):
unexpected_keys = set(data.keys()) - set(f.name for f in fields)
if unexpected_keys:
warnings.warn(
f"{T.__name__}: Unexpected keys: " +
", ".join(unexpected_keys))
converted_data = {f.name: deserialize(
get_deserialize_method(f))(data[f.name]) for f in fields
if f.name in data}
return T(**converted_data)
return _deserialize
def get_deserialize_method(f: dataclasses.Field) -> type:
return f.metadata.get("deserialize", f.type)
@_deserializer(Optional)
def deserialize_optional(T: type):
T1, T2 = T.__args__
if isinstance(None, T1):
opt_type = T2
else:
opt_type = T1
def _deserialize(data):
if data is None:
return None
return opt_type(data)
return _deserialize
@_deserializer(Union)
def deserialize_union(T: type):
types = T.__args__
def _deserialize(data):
types_by_name = {t.__name__: t for t in types}
type_name = data.get("type")
if type_name is None:
raise ValueError(
f"Union[{', '.join(types_by_name)}]: missing `type` item")
T = types_by_name.get(type_name)
if T is None:
raise ValueError(
f"Union[{', '.join(types_by_name)}]: "
f"unexpected type `{type_name}`")
return deserialize(T)(data["arguments"])
return _deserialize
@_deserializer(Dict)
def deserialize_dict(T: type):
key_type, val_type = T.__args__
def _deserialize(data):
return {
deserialize(key_type)(key): deserialize(val_type)(val)
for key, val in data.items()}
return _deserialize
| true |
60d947b0be5ecd5c986e43d92daa6613443cfc07 | Python | allprecisely/Adventofcode | /Day 7/Puzzle_2.py | UTF-8 | 2,673 | 2.8125 | 3 | [] | no_license | def puzzle(inp):
# Тут все грустно, потому что пока не решил
with open(inp) as f:
arr = f.readlines()
d_lets = dict()
res = 0
elf1 = elf2 = elf3 = elf4 = elf5 = 0
let1 = let2 = let3 = let4 = let5 = ''
for i in range(len(arr)):
let = arr[i].split()
if let[1] not in d_lets:
d_lets[let[1]] = set(), {let[7]}
if let[7] not in d_lets:
d_lets[let[7]] = {let[1]}, set()
else:
d_lets[let[7]][0].add(let[1])
else:
d_lets[let[1]][1].add(let[7])
if let[7] not in d_lets:
d_lets[let[7]] = {let[1]}, set()
else:
d_lets[let[7]][0].add(let[1])
# for i, j in d_lets.items():
# print(i, j)
count = len(d_lets)
tmp = set()
while 0 < count:
flag = True
for key, value in d_lets.items():
if not value[0] and key not in tmp:
if elf1 == 0:
elf1 = ord(key) - 4
let1 = key
tmp.add(key)
elif elf2 == 0:
elf2 = ord(key) - 4
let2 = key
tmp.add(key)
elif elf3 == 0:
elf3 = ord(key) - 4
let3 = key
tmp.add(key)
elif elf4 == 0:
elf4 = ord(key) - 4
let4 = key
tmp.add(key)
elif elf5 == 0:
elf5 = ord(key) - 4
let5 = key
tmp.add(key)
while flag:
if elf1:
elf1 = elf1 - 1
if elf1 == 0:
vacl = let1
flag = False
if elf2:
elf2 = elf2 - 1
if elf2 == 0:
vacl = let2
flag = False
if elf3:
elf3 = elf3 - 1
if elf3 == 0:
vacl = let3
flag = False
if elf4:
elf4 = elf4 - 1
if elf4 == 0:
vacl = let4
flag = False
if elf5:
elf5 = elf5 - 1
if elf5 == 0:
vacl = let5
flag = False
res += 1
print(res)
for j in d_lets[vacl][1]:
d_lets[j][0].discard(vacl)
del d_lets[vacl]
count -= 1
print(d_lets)
return res
print(puzzle('test.txt'))
print(puzzle('input.txt')) | true |
c06c1bf46e5457236f22c9e72ca4d4572cefd946 | Python | xnegx/python-sysadmin | /HandsOn/Aula01/exercicio.py | UTF-8 | 448 | 2.703125 | 3 | [] | no_license | #!usr/bin/python
# -*- coding: utf-8 -*-
import requests
import json
url = 'http://localhost:5000/usuarios/'
nome = "Rafael Medeiros"
email = "rafael.medeiros@dexter.com.br"
resultados = json.loads(requests.get(url).text)
for u in resultados["usuarios"]:
if u['email'] == email:
print "Usuario ja cadastrado"
exit(1)
data = json.dumps({"nome":nome,"email":email})
headers = {"Content-Type":"application/json"}
print requests.post(url, data=data,headers=headers).text | true |
da6e1ee0cfc7ae25f032c74a7061bebfb6066868 | Python | johnngugi/exercism | /python/atbash-cipher/atbash_cipher.py | UTF-8 | 759 | 3.34375 | 3 | [] | no_license | import string
# your functions
def decode(cipher):
list1 = dict(zip(string.ascii_lowercase, range(26)))
list2 = dict(zip(range(25, -1, -1), string.ascii_lowercase))
plain = ""
for i in cipher.lower():
if i.isalpha():
plain += list2[(list1[i] + 26) % 26]
return plain
def encode(plain):
list1 = dict(zip(string.ascii_lowercase, range(26)))
list2 = dict(zip(range(25, -1, -1), string.ascii_lowercase))
cypher = ""
for i in plain.lower():
if i in list1:
cypher += list2[list1[i]]
elif i in '0123456789':
cypher += i
else:
continue
cypher = "".join([cypher[0+i:5+i] + " " for i in range(0, len(cypher), 5)])
return cypher.strip()
| true |
60da1376ffec6544f45c73402d5e672b652a8bf9 | Python | Gamiginy/netpro | /sql.py | UTF-8 | 1,336 | 2.828125 | 3 | [] | no_license | import sqlalchemy
import sqlalchemy.ext.declarative
from sqlalchemy.orm import sessionmaker
import codecs
Base = sqlalchemy.ext.declarative.declarative_base()
class Word(Base):
__tablename__ = 'words'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
english = sqlalchemy.Column(sqlalchemy.String)
japanese = sqlalchemy.Column(sqlalchemy.String)
part = sqlalchemy.Column(sqlalchemy.Integer)
section = sqlalchemy.Column(sqlalchemy.Integer)
part_section = sqlalchemy.Column(sqlalchemy.String)
def __init__(self, id, en, jp, part, section, part_section):
self.id = id
self.english = en
self.japanese = jp
self.part = part
self.section = section
self.part_section = part_section
url = 'postgresql+psycopg2://masuda:hogehoge@localhost:5432/netpro'
engine = sqlalchemy.create_engine(url, echo=True)
# スキーマ作成
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
def get_session():
return session
def add_words():
fin = codecs.open("words.txt", "r", "utf-8")
counter = 1
for line in fin:
print(line)
word = line.split(",")
session.add(Word(counter, word[0], word[1], word[2], word[3], word[4]))
counter += 1
session.commit()
fin.close()
| true |
9ebefdbcfa5837a90f3e766eebfe17e4f065286e | Python | how2how/ToyHome | /commander/thirdparty/covertutils/handlers/functiondict.py | UTF-8 | 5,074 | 2.59375 | 3 | [
"Apache-2.0"
] | permissive | # from abc import ABCMeta, abstractmethod
from covertutils.exceptions import *
from covertutils.handlers import BaseHandler
from covertutils.helpers import defaultArgMerging
import marshal, types
from threading import Thread
# from multiprocessing import Queue
try:
from queue import Queue # Python 3
except ImportError:
from Queue import Queue # Python 2
class FunctionDictHandler( BaseHandler ) :
"""
This class provides a per-stream function dict.
If a message is received from a `stream`, a function corresponding to this particular stream will be executed with single argument the received message.
The function's return value will be sent across that stream to the message's sender.
Ideal for simple `remote shell` implementation.
The FunctionDictHandler class implements the `onMessage()` function of the BaseHandler class.
The `function_dict` passed to this class `__init__()` must have the above format:
.. code:: python
def os_echo( message ) :
from os import popen
resp = popen( "echo %s" % 'message' ).read()
return resp
function_dict = { 'echo' : os_echo }
Note: The functions must be **absolutely self contained**. In the above example the `popen()` function is imported inside the `os_echo`. This is to ensure that `popen()` will be available, as there is no way to tell if it will be imported from the handler's environment.
Well defined functions for that purpose can be found in :mod:`covertutils.payloads`. Also usable for the :class:`StageableHandler` class
.. code:: python
from covertutils.payloads import GenericStages
pprint( GenericStages )
{'shell': {'function': <function __system_shell at 0x7fc347472320>,
'marshal': 'c\\x01\\x00\\x00\\x00\\x03\\x00\\x00\\x00\\x02\\x00\\x00\\x00C\\x00\\x00\\x00s&\\x00\\x00\\x00d\\x01\\x00d\\x02\\x00l\\x00\\x00m\\x01\\x00}\\x01\\x00\\x01|\\x01\\x00|\\x00\\x00\\x83\\x01\\x00j\\x02\\x00\\x83\\x00\\x00}\\x02\\x00|\\x02\\x00S(\\x03\\x00\\x00\\x00Ni\\xff\\xff\\xff\\xff(\\x01\\x00\\x00\\x00t\\x05\\x00\\x00\\x00popen(\\x03\\x00\\x00\\x00t\\x02\\x00\\x00\\x00osR\\x00\\x00\\x00\\x00t\\x04\\x00\\x00\\x00read(\\x03\\x00\\x00\\x00t\\x07\\x00\\x00\\x00messageR\\x00\\x00\\x00\\x00t\\x06\\x00\\x00\\x00result(\\x00\\x00\\x00\\x00(\\x00\\x00\\x00\\x00s\\x15\\x00\\x00\\x00covertutils/Stages.pyt\\x0e\\x00\\x00\\x00__system_shell\\x04\\x00\\x00\\x00s\\x06\\x00\\x00\\x00\\x00\\x01\\x10\\x01\\x12\\x01'}}
"""
# __metaclass__ = ABCMeta
def __init__( self, recv, send, orchestrator, **kw ) :
"""
:param dict function_dict: A dict containing `(stream_name, function)` tuples. Every time a message is received from `stream_name`, `function(message)` will be automatically executed.
"""
super( FunctionDictHandler, self ).__init__( recv, send, orchestrator, **kw )
self.stage_storage = {}
self.stage_storage['COMMON'] = {}
self.stage_storage['COMMON']['handler'] = self
self.processed_responses = Queue()
# try :
# self.function_dict = kw['function_dict']
for stream, stage in kw['function_dict'].items() :
self.addStage( stream, stage )
# except :
# raise NoFunctionAvailableException( "No Function dict provided to contructor" )
def onMessage( self, stream, message ) :
"""
:raises: :exc:`NoFunctionAvailableException`
"""
super( FunctionDictHandler, self ).onMessage( stream, message )
# print message
self.stage_storage[stream]['queue'].put( message )
# print "Put to Queue"
ret = self.processed_responses.get(True)
# print "Processed: "+ret
return ret
def onChunk( self, stream, message ) : pass
def onNotRecognised( self ) : pass
def stageWorker( self, init, worker, storage ) :
# print "Handler: Worker Started"
if not init(storage) : return
# print "Handler: Init Run Started"
while storage['on'] :
# print "Try to GET from Queue"
message = storage['queue'].get( block = True )
# print "Handler: Work() Run"
ret = worker(storage, message)
# print ret, type(ret)
self.processed_responses.put( ret )
self.stage_storage[stream] = {}
def getStage( self, stage_obj ) :
# Recognize the type of stage
# Assume 'marshal' for now
stage_dict = marshal.loads( stage_obj )
# print stage_dict
# print stage_dict['init']
if stage_dict['init'] == None :
stage_init = _dummy_init
else :
stage_init = types.FunctionType(stage_dict['init'], globals(), "stage_init_func")
stage_work = types.FunctionType(stage_dict['work'], globals(), "stage_work_func")
# print stage_init
return stage_init, stage_work
def addStage( self, stream, stage_obj ) :
self.stage_storage[stream] = {}
self.stage_storage[stream]['queue'] = Queue()
self.stage_storage[stream]['on'] = True
self.stage_storage[stream]['COMMON'] = self.stage_storage['COMMON']
# print stream
stage_init, stage_worker = self.getStage( stage_obj )
self.orchestrator.addStream( stream )
stage_thread = Thread( target = self.stageWorker, args = ( stage_init, stage_worker, self.stage_storage[stream] ) )
stage_thread.daemon = True
stage_thread.start()
pass
def _dummy_init (storage) :
return True
| true |
43e6e2e53d4790455166a95e09600a232631a8a3 | Python | rweel/greenhouse | /Data uit ecxellijst greenhouse project.py | UTF-8 | 291 | 2.90625 | 3 | [] | no_license | # voobeeld om excel file te openen
import pandas as pd
df = pd.read_excel (r'C:\Users\ruben\Documents\test-greenhouse-data.xlsx') #(use "r" before the path string to address special character, such as '\'). Don't forget to put the file name at the end of the path + '.xlsx'
print (df)
| true |
0c2e93d706ff54481ed40dcbe7faa3c0fb1b8f12 | Python | incous/ProjectEuler | /030.py | UTF-8 | 341 | 3.859375 | 4 | [] | no_license | def fifthPowerSum(number):
numberArr = list(str(number))
sum = 0
for ch in numberArr:
sum += int(ch) ** 5
return sum
def fourthPowerSum(number):
numberArr = list(str(number))
sum = 0
for ch in numberArr:
sum += int(ch) ** 4
return sum
total = 0
for ci in range(2,1000000):
if ci == fifthPowerSum(ci): total += ci
print total
| true |
42b944b07b3f9ff0e60bfc2b1410c2a8eade29b1 | Python | SybelBlue/SybelBlue | /Algorithms/HW2Scratch.py | UTF-8 | 1,341 | 3.46875 | 3 | [] | no_license | import random
# range bound
k = 10
# team sizes
n = 10
m = 7
def preprocess(t0, t1):
def freq_list(t):
out = [0] * k
for height in t:
out[height] += 1
return out
freq0 = freq_list(t0)
freq1 = freq_list(t1)
team0_val = [0] * k
team1_val = [0] * k
team_last = [0, 0]
for i in range(k):
team_last[0] += freq0[i]
team_last[1] += freq1[i]
team0_val[i] += team_last[0]
team1_val[i] += team_last[1]
return team0_val, team1_val
def evaluate(team_vals, a, b):
def player_count(team_number):
team = team_vals[team_number]
top = min(k - 1, b)
if a == 0:
lower = 0
else:
# in order to include players
# of height a
lower = team[a - 1]
return team[top] - lower
return player_count(0), player_count(1)
if __name__ == '__main__':
def build_team(size):
return [random.randint(0, k - 1) for _ in range(size)]
teams = build_team(m), build_team(n)
team_vals = preprocess(teams[0], teams[1])
print(teams)
print(team_vals)
a, b = 0, 0
while a >= 0 and b >= 0:
a = int(input(":"))
b = int(input(":"))
eval = evaluate(team_vals, a, b)
print(eval)
print(eval[0] >= eval[1])
| true |
359bd6982cd30b7b2e4aa69b1ac97d077d92d41f | Python | mattratt/causql | /stackexchange/scraper.py | UTF-8 | 3,641 | 3.40625 | 3 | [] | no_license | #!/usr/bin/python
import sys
from urllib2 import urlopen
class Scraper:
def __init__(self, html):
if (html.startswith("http://")):
f = urlopen(html)
self.url = f.geturl()
else:
f = open(html, 'r')
self.data = f.read()
# sys.stderr.write("scraped %d bytes\n" % len(self.data))
self.pos = 0
f.close()
def move_to(self, key):
p = self.data.find(key, self.pos)
if (p > -1):
dist = p - self.pos
self.pos = p + len(key)
return dist
else:
return -1
def moveBack(self, key):
p = self.data.rfind(key, 0, self.pos)
if (p > -1):
dist = self.pos - p
self.pos = p + len(key)
return dist
else:
return -1
def scout(self, key):
p = self.data.find(key, self.pos)
if (p > -1):
return p
else:
return -1
def comes_before(self, key, other):
posKey = self.scout(key)
posOther = self.scout(other)
if (posKey >= 0):
if (posOther == -1):
return True
else:
return posKey < posOther
else:
return False
def comesFirst(self, choices):
firstChoice = None
firstPos = sys.maxint
for choice in choices:
pos = self.scout(choice)
if (pos > -1) and (pos < firstPos):
firstChoice = choice
firstPos = pos
return firstChoice
def peek(self, rng):
start = max(0, self.pos - rng)
end = min(len(self.data), self.pos + rng)
return str(self.pos) + ": " + self.data[start:self.pos] + "|" + self.data[self.pos:end]
# pull functions throw an exception if we don't find the key(s)
def pullUntil(self, key):
pEnd = self.data.index(key, self.pos)
good = self.data[self.pos:pEnd]
self.pos = pEnd + len(key)
return good
def pull_from_to(self, keyStart, keyEnd):
self.pos = self.data.index(keyStart, self.pos) + len(keyStart)
return self.pullUntil(keyEnd)
def pullLine(self): # returns the rest of the current line (getting rid of the newline)
return self.pullUntil("\n")
# misc conversion funcs
dateMonths = {"Jan": "01", "Feb": "02", "Mar": "03", "Apr": "04", "May": "05", "Jun": "06",
"Jul": "07", "Aug": "08", "Sep": "09", "Oct": "10", "Nov": "11", "Dec": "12"}
# "Thursday, Dec 13" -> "20071213"
def parseDateYahooShort(dateStr):
# sys.stderr.write("parsing '%s'\n" % dateStr)
dateElts = dateStr.split()
dateMonth = dateMonths[dateElts[1]]
dateDay = dateElts[2]
if (len(dateDay) < 2):
dateDay = "0" + dateDay
if (int(dateMonth) < 9):
dateYear = "2008"
else:
dateYear = "2007"
return dateYear + dateMonth + dateDay
# "Sundy December 9, 2007" -> 20071209
def parseDateYahooLong(date):
sys.stderr.write("parsing '%s'\n" % date)
dayofweek, month, day, year = date.strip().split()
month = dateMonths[month[:3]]
if (len(day) < 3):
day = "0" + day[:1] # rid the comma
else:
day = day[:2]
return year + month + day
# "1:00 pm ET" -> 1300
# "1:00pm ET" -> 1300
def parseTimeYahoo(time):
elts = time.split()
if (len(elts) == 3):
h, m = elts[0].split(":")
ap = elts[1]
else:
h, m = elts[0][:-2].split(":")
ap = elts[0][-2:]
if (ap == "pm"):
h = str(int(h) + 12)
elif (int(h) < 10):
h = "0" + h
return h + m
| true |
8c07688cf60d18fcf148857aa917f53478a85d25 | Python | bicepjai/myclasses | /2016/caltech-cs1156x/week9-final/finals.py | UTF-8 | 7,810 | 2.890625 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
import random
from sklearn import svm
from sklearn.cluster import KMeans
features_train = np.loadtxt("data/features.train")
features_test = np.loadtxt("data/features.test")
R_train,C_train = features_train.shape
R_test,C_test = features_test.shape
print("==========================")
print("problem 11")
# plotting the points
r_xs = [1,0,0]
r_ys = [0,1,-1]
l_xs = [-1,0,0,-2]
l_ys = [0,2,-2,0]
#plt.plot(r_xs, r_ys, 'bs')
#plt.plot(l_xs, l_ys, 'rs')
#plt.axis([-3, 3, -3, 3])
#plt.show()
zr_xs = [ x2**2 - 2*x1 - 1 for (x1,x2) in [(1, 0), (0, 1), (0, -1)]]
zr_ys = [ x1**2 - 2*x2 + 1 for (x1,x2) in [(1, 0), (0, 1), (0, -1)]]
zl_xs = [ x2**2 - 2*x1 - 1 for (x1,x2) in list(zip(l_xs,l_ys))]
zl_ys = [ x1**2 - 2*x2 + 1 for (x1,x2) in list(zip(l_xs,l_ys))]
#plt.plot(zr_xs, zr_ys, 'bs')
#plt.plot(zl_xs, zl_ys, 'rs')
#plt.axis([-6, 6, -6, 6])
#plt.show()
print("==========================")
print("problem 13")
N = 100
f_x = lambda X: np.sign(X[:,1] - X[:,0] + 0.25*np.sin(np.pi*X[:,0]))
runs = 1000
C = 1
gamma = 1.5
nof_0_e_ins = 0
for run in range(runs):
X_in = np.ones((N,2))
X_in[:,0] = np.random.uniform(-1,1,N)
X_in[:,1] = np.random.uniform(-1,1,N)
Y_in = f_x(X_in)
# Y_in.shape = (N,1)
#svm rbf
model_svm = svm.SVC(kernel='rbf',gamma=gamma, C=C)
model_svm.fit(X_in,Y_in)
# predicting SVM
Y_svm_in = model_svm.predict(X_in)
p_in_svm = np.mean((Y_svm_in != Y_in).astype(int))
if(p_in_svm == 0):
nof_0_e_ins += 1
print("nof_0_e_ins %:",nof_0_e_ins*100/1000)
# hypothesis for problems 14 thru 18
f_x = lambda X: np.sign(X[:,1] - X[:,0] + 0.25*np.sin(np.pi*X[:,0]))
def kmeans_rbf_lloyd(X,Y,k,gamma):
R,C = X.shape
uks_idx = np.random.randint(R, size=k)
uks = X[uks_idx]
iters = 0
converged = False
while( not converged ):
iters += 1
# forming sks
min_obj_l = []
for i in range(k):
wrt_uki = np.square(np.linalg.norm(uks[i] - X,axis=1))
min_obj_l.append(wrt_uki)
min_obj = np.column_stack(min_obj_l)
sks = np.argmin(min_obj, axis=1)
#check for empty cluster
empty_cluster = []
for i in range(k):
if(len(sks[sks[:] == i]) == 0):
empty_cluster.append(False)
else:
empty_cluster.append(True)
if(not all(empty_cluster)):
uks_idx = np.random.randint(R, size=k)
uks = X[uks_idx]
continue
# forming uks
uks_l = []
for i in range(k):
uks_l.append(np.mean(X[sks[:] == i],axis=0))
# convergence check
prev_uks = uks
uks = np.vstack(uks_l)
converged = np.allclose(prev_uks,uks,1e-10)
if(iters > 1000):
print("iters exceeded 1000")
break
# print("iters",iters)
# finding ws
rdf_uki_l = [np.ones(R)]
for i in range(k):
wrt_uki = np.exp(-gamma*np.square(np.linalg.norm(uks[i] - X,axis=1)))
rdf_uki_l.append(wrt_uki)
phi = np.column_stack(rdf_uki_l)
phi_dagger=np.dot(np.linalg.pinv(np.dot(phi.T,phi)),phi.T)
w = np.dot(phi_dagger,Y)
return uks,w
def kernel_vs_regular(N, runs, K, gamma):
kernel_beat_regular = 0
p_ins_kernel = []
p_ins_regular = []
p_outs_kernel = []
p_outs_regular = []
for run in range(runs):
# seperable data points
X_in = np.ones((N,2))
X_in[:,0] = np.random.uniform(-1,1,N)
X_in[:,1] = np.random.uniform(-1,1,N)
Y_in = f_x(X_in)
while(np.all(Y_in==1) or np.all(Y_in==-1)):
X_in = np.ones((N,2))
X_in[:,0] = np.random.uniform(-1,1,N)
X_in[:,1] = np.random.uniform(-1,1,N)
Y_in = f_x(X_in)
X_out = np.ones((N,2))
X_out[:,0] = np.random.uniform(-1,1,N)
X_out[:,1] = np.random.uniform(-1,1,N)
Y_out = f_x(X_out)
# k means clustering rbf lloyds regular form
uks,w = kmeans_rbf_lloyd(X_in,Y_in,K,gamma)
# predicting kmeans Ein
hx_sum = np.zeros(N)
for i in range(K):
hx_uki_wi = w[i+1] * np.exp(-gamma*np.linalg.norm(uks[i] - X_in,axis=1))
hx_sum = hx_sum + hx_uki_wi
g_of_xin = np.sign(hx_sum + w[0]*np.ones(N))
p_in_kmeans = np.mean((g_of_xin != Y_in).astype(int))
p_ins_regular.append(p_in_kmeans)
# predicting kmeans Eout
hx_sum = np.zeros(N)
for i in range(K):
hx_uki_wi = w[i+1] * np.exp(-gamma*np.linalg.norm(uks[i] - X_out,axis=1))
hx_sum = hx_sum + hx_uki_wi
g_of_xout = np.sign(hx_sum + w[0]*np.ones(N))
p_out_kmeans = np.mean((g_of_xout != Y_out).astype(int))
p_outs_regular.append(p_out_kmeans)
#svm rbf kernel form
model_svm = svm.SVC(kernel='rbf',gamma=gamma, C=C)
model_svm.fit(X_in,Y_in)
# predicting SVM Ein
Y_svm_in = model_svm.predict(X_in)
p_in_svm = np.mean((Y_svm_in != Y_in).astype(int))
p_ins_kernel.append(p_in_svm)
# predicting SVM Eout
Y_svm_out = model_svm.predict(X_out)
p_out_svm = np.mean((Y_svm_out != Y_out).astype(int))
p_outs_kernel.append(p_out_svm)
if(p_in_kmeans > p_in_svm):
kernel_beat_regular += 1
#print("p_in_svm_rbf:",p_in_svm,"p_in_kmeans:",p_in_kmeans)
#print("p_out_svm_rbf:",p_out_svm,"p_out_kmeans:",p_out_kmeans)
print("kernel_beat_regular",kernel_beat_regular)
return p_ins_regular,p_outs_regular,p_ins_kernel,p_outs_kernel
print("==========================")
print("problem 14")
N = 100
runs = 100
K = 9
gamma = 1.5
p_ins_regular,p_outs_regular,p_ins_kernel,p_outs_kernel = kernel_vs_regular(N, runs, K, gamma)
print("==========================")
print("problem 15")
N = 100
runs = 100
K = 12
gamma = 1.5
p_ins_regular,p_outs_regular,p_ins_kernel,p_outs_kernel = kernel_vs_regular(N, runs, K, gamma)
print("==========================")
print("problem 16")
N = 100
runs = 100
K = 9
gamma = 1.5
print("K",K)
p_ins_regular,p_outs_regular,p_ins_kernel,p_outs_kernel = kernel_vs_regular(N, runs, K, gamma)
print("p_ins_regular",np.mean(p_ins_regular))
print("p_outs_regular",np.mean(p_outs_regular))
print("p_ins_kernel",np.mean(p_ins_kernel))
print("p_outs_kernel",np.mean(p_outs_kernel))
K = 12
print("K",K)
p_ins_regular,p_outs_regular,p_ins_kernel,p_outs_kernel = kernel_vs_regular(N, runs, K, gamma)
print("p_ins_regular",np.mean(p_ins_regular))
print("p_outs_regular",np.mean(p_outs_regular))
print("p_ins_kernel",np.mean(p_ins_kernel))
print("p_outs_kernel",np.mean(p_outs_kernel))
print("==========================")
print("problem 17")
N = 100
runs = 100
K = 9
gamma = 1.5
print("gamma",gamma)
p_ins_regular,p_outs_regular,p_ins_kernel,p_outs_kernel = kernel_vs_regular(N, runs, K, gamma)
print("p_ins_regular",np.mean(p_ins_regular))
print("p_outs_regular",np.mean(p_outs_regular))
print("p_ins_kernel",np.mean(p_ins_kernel))
print("p_outs_kernel",np.mean(p_outs_kernel))
gamma = 2
print("gamma",gamma)
p_ins_regular,p_outs_regular,p_ins_kernel,p_outs_kernel = kernel_vs_regular(N, runs, K, gamma)
print("p_ins_regular",np.mean(p_ins_regular))
print("p_outs_regular",np.mean(p_outs_regular))
print("p_ins_kernel",np.mean(p_ins_kernel))
print("p_outs_kernel",np.mean(p_outs_kernel))
print("==========================")
print("problem 18")
N = 100
runs = 100
K = 9
gamma = 1.5
p_ins_regular,p_outs_regular,p_ins_kernel,p_outs_kernel = kernel_vs_regular(N, runs, K, gamma)
print("Ein=0 in p_ins_regular",sum(p == 0.0 for p in p_ins_regular))
| true |
a6cc2461ad5a05fc47e4fb156635499c5c7195c3 | Python | NSS-Day-Cohort-42/rare-server-fakes-news | /models/subscription.py | UTF-8 | 229 | 2.546875 | 3 | [] | no_license | class Subscription():
def __init__(self, id, user_id, subscribe_id, begin, end):
self.id = id
self.user_id = user_id
self.subscribe_id = subscribe_id
self.begin = begin
self.end = end
| true |
99e39dca45e7447c1b43faa99216af95e0be79c7 | Python | Timurusus/TestFramework | /connector.py | UTF-8 | 402 | 2.953125 | 3 | [] | no_license | import pyodbc
class Connector:
def __init__(self, driver, server, database):
conn = pyodbc.connect(driver=driver,
server=server,
database=database
)
self.cursor = conn.cursor()
def execute_query(self, query):
self.cursor.execute(query)
return self.cursor.fetchone()[0]
| true |
9dc1f766d15e959763ffcec0fc3e131a3d6cf17c | Python | TylerDahneke/HomeDepotMap | /heap.py | UTF-8 | 5,721 | 3.96875 | 4 | [] | no_license | class MaxHeap:
def __init__(self, capacity=50):
'''Constructor creating an empty heap with default capacity = 50 but allows heaps of other capacities to be created.'''
self.size = capacity
self.items = [None] * (self.size + 1)
self.num_items = 0
def enqueue(self, item):
'''inserts "item" into the heap, returns true if successful, false if there is no room in the heap
"item" can be any primitive or ***object*** that can be compared with other
items using the < operator'''
# Should call perc_up
if self.is_full():
return False
else:
self.num_items += 1
self.items[self.num_items] = item
self.perc_up(self.num_items)
return True
def peek(self):
'''returns max without changing the heap, returns None if the heap is empty'''
if self.is_empty():
return None
return self.items[1]
def dequeue(self):
'''returns max and removes it from the heap and restores the heap property
returns None if the heap is empty'''
# Should call perc_down
if self.is_empty():
return None
r_ans = self.items[1]
self.items[1], self.items[self.num_items] = self.items[self.num_items], None
self.perc_down(1)
self.num_items -= 1
return r_ans
def contents(self):
'''returns a list of contents of the heap in the order it is stored internal to the heap.
(This may be useful for in testing your implementation.)'''
return [i for i in self.items if i is not None]
def build_heap(self, alist):
'''Discards all items in the current heap and builds a heap from
the items in alist using the bottom-up construction method.
If the capacity of the current heap is less than the number of
items in alist, the capacity of the heap will be increased to accommodate
exactly the number of items in alist'''
# Bottom-Up construction. Do NOT call enqueue
if len(alist) > self.size:
self.num_items = self.size = len(alist)
self.items = [None] + alist
else:
self.num_items = len(alist)
remaining_size = self.size - len(alist)
self.items = [None] + alist + [None] * remaining_size
counter = self.num_items
while counter > 1:
self.perc_down(get_parent(counter))
counter -= 1
def is_empty(self):
'''returns True if the heap is empty, false otherwise'''
return not self.num_items
def is_full(self):
'''returns True if the heap is full, false otherwise'''
return self.num_items == self.size
def is_leaf(self, pos):
truth_table = [False, False]
if get_left(pos) > self.size or self.items[get_left(pos)] is None:
truth_table[0] = True
if get_right(pos) > self.size or self.items[get_right(pos)] is None:
truth_table[1] = True
return all(truth_table)
def get_capacity(self):
'''this is the maximum number of a entries the heap can hold
1 less than the number of entries that the array allocated to hold the heap can hold'''
return self.size
def get_size(self):
'''the actual number of elements in the heap, not the capacity'''
return self.num_items
def perc_down(self, i):
'''where the parameter i is an index in the heap and perc_down moves the element stored
at that location to its proper place in the heap rearranging elements as it goes.'''
truth_table = [False, False]
if get_left(i) < self.size and self.items[get_left(i)] is not None and self.items[get_left(i)] > self.items[i]:
truth_table[0] = True
if get_right(i) < self.size and self.items[get_right(i)] is not None and self.items[get_right(i)] > self.items[
i]:
truth_table[1] = True
if all(truth_table):
if self.items[get_left(i)] > self.items[get_right(i)]:
self.swap_left(i)
else:
self.swap_right(i)
elif truth_table[0]:
self.swap_left(i)
elif truth_table[1]:
self.swap_right(i)
else:
pass
def perc_up(self, i):
'''where the parameter i is an index in the heap and perc_up moves the element stored
at that location to its proper place in the heap rearranging elements as it goes.'''
if i > 1 and self.items[i] > self.items[get_parent(i)]:
self.items[i], self.items[get_parent(i)] = self.items[get_parent(i)], self.items[i]
self.perc_up(get_parent(i))
def heap_sort_ascending(self, alist):
'''perform heap sort on input alist in ascending order
This method will discard the current contents of the heap, build a new heap using
the items in alist, then mutate alist to put the items in ascending order'''
self.build_heap(alist)
pos = len(alist) - 1
while not self.is_empty():
alist[pos] = self.dequeue()
pos -= 1
def swap_left(self, i):
self.items[i], self.items[get_left(i)] = self.items[get_left(i)], self.items[i]
self.perc_down(get_left(i))
def swap_right(self, i):
self.items[i], self.items[get_right(i)] = self.items[get_right(i)], self.items[i]
self.perc_down(get_right(i))
def get_left(pos):
return pos * 2
def get_right(pos):
return pos * 2 + 1
def get_parent(pos):
return int(pos / 2)
def get_smallest(item_1, item_2):
if item_1 is None:
return
| true |
00de39f95040b3d5cb724cf407f8c451627a76af | Python | MrHamdulay/csc3-capstone | /examples/data/Assignment_5/prksel001/question1.py | UTF-8 | 954 | 4.15625 | 4 | [] | no_license | """UCT BBS
Limpho Parkies
17-04-2014"""
#variables
welcome=('Welcome to UCT BBS\nMENU\n(E)nter a message\n(V)iew message\n(L)ist files\n(D)isplay file\ne(X)it')
MENU=''
message="no message yet"
while MENU!='X' and MENU!='x':
print(welcome)
MENU=input('Enter your selection:\n')
if MENU=='E' or MENU=='e':
message=input('Enter the message:\n')
elif MENU=='V' or MENU=='v':
print('The message is:',message)
elif MENU=='L' or MENU=='l':
print('List of files: 42.txt, 1015.txt')
elif MENU=='D' or MENU=='d':
filname=input('Enter the filename:\n')
if filname=='42.txt':
print('The meaning of life is blah blah blah ...')
elif filname=='1015.txt':
print('Computer Science class notes ... simplified\nDo all work\nPass course\nBe happy')
else:
print('File not found')
if MENU=='X' or MENU=='x':
print('Goodbye!') | true |
ef06a005e15140daa42d3f55ba495f2f1c7dac28 | Python | Tatsinnit/P | /Tst/RegressionTests/Rvm/run_all_unittests.py | UTF-8 | 1,462 | 2.609375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
import glob
import os
import paralleltests
import shutil
import sys
import tempfile
import tools
def usageError():
raise Exception(
"Expected exactly one command line argument: "
"the maximum number of tests to run in parallel.\n"
"Usage: run_all_unittests.py parallel-test-count"
)
def getTests():
""" Returns a list of unit tests.
"""
script_dir = os.path.dirname(os.path.abspath(__file__))
unittest_dir = os.path.join(script_dir, "Unit", "Test", "*")
names = [os.path.basename(f) for f in glob.glob(unittest_dir)]
return sorted([f for f in names if f[0] != '.'])
def buildCommand(script_directory, temporary_directory, test_name):
""" Builds the command to run a unit test.
"""
return [ "python"
, os.path.join(script_directory, "run_unittest.py")
, test_name
, temporary_directory
]
def main(argv):
if len(argv) == 0:
parallelism = tools.findAvailableCpus() / 2
elif len(argv) == 1:
parallelism = int(argv[0])
else:
usageError()
temp_dir = tempfile.mkdtemp()
tools.progress("Temporary directory: %s" % temp_dir)
script_dir = os.path.dirname(os.path.abspath(__file__))
exit_code = paralleltests.runTests(
parallelism,
getTests(),
temp_dir,
lambda temp_dir, test_name: buildCommand(script_dir, temp_dir, test_name))
shutil.rmtree(temp_dir)
sys.exit(exit_code)
if __name__ == "__main__":
main(sys.argv[1:])
| true |
4d4ff9f174b528c623f7f8d860629ef20df6eef9 | Python | srntqn/docker-rest-api | /containers.py | UTF-8 | 2,173 | 2.53125 | 3 | [] | no_license | import docker
from flask import make_response, abort
client = docker.from_env()
def listContainers():
all_containers = []
for c in client.containers.list(all=True):
all_containers.append([c.short_id, c.status, c.name])
return all_containers
def getContainer(name):
try:
c = client.containers.get(f"{name}")
except docker.errors.DockerException as error:
abort(
404, str(error)
)
container_params = [c.short_id, c.status, c.name]
return container_params
def pullImage(name):
client.images.pull(f'{name}:latest')
return f"Successfully pulled {name} image."
def createContainer(name):
try:
client.containers.run(f'{name}:latest', name={name}, detach=True)
except docker.errors.DockerException as error:
abort(
404, str(error)
)
return f"Container {name} is running."
def changeContainerStatus(name, status):
if status not in ('running', 'exited'):
abort(
500, "Please use correct status code."
)
else:
try:
container = client.containers.get(f"{name}")
except docker.errors.DockerException as error:
abort(
404, str(error)
)
if container.status == status:
abort(
500, f'Container {name} already {status}.'
)
else:
if container.status == "running":
container.stop()
else:
container.start()
return f'Container {name} is {status}.'
def removeContainer(name):
try:
container = client.containers.get(f"{name}")
except docker.errors.DockerException as error:
abort(
404, str(error)
)
if container.status == "running":
container.stop()
container.remove()
else:
container.remove()
return f"Successfully removed {name} container."
def getContainerLogs(name, tail='all'):
try:
container = client.containers.get(f"{name}")
except docker.errors.DockerException as error:
abort(
404, str(error)
)
return container.logs(tail=tail)
| true |
6b5d5f7a08f43a0377b69e9d5ae83bff6c17a763 | Python | sKapshuk/amis_python | /km73/Kapshuk_Sergiy/5/task2.py.py | UTF-8 | 275 | 3.3125 | 3 | [] | no_license | from random import randrange
e = int(input('кількість чисел='))
a = [randrange(0, 10) for i in range(e)]
print(a)
count = 0
for i in range(e):
for j in range(e):
if (a[j] == a[i]) and (i != j):
count = count + 1
print(int(count/2)) | true |
0b2eaa21e7d9f81d7f4bfd0d7e7b94192528e1b3 | Python | Sangewang/PythonBasicLearn | /ClassTen/Time.py | UTF-8 | 372 | 2.796875 | 3 | [] | no_license | from time import *
from random import *
date1 = (2008,1,1,0,0,0,-1,-1,-1)
time1 = mktime(date1)
print '%d convert to %s'%(time1,asctime(localtime(time1)))
date2 = (2009,1,1,0,0,0,-1,-1,-1)
time2 = mktime(date2)
print '%d convert to %s'%(time2,asctime(localtime(time2)))
rand_time = uniform(time1,time2)
print '%d convert to %s'%(rand_time,asctime(localtime(rand_time)))
| true |
61e937a84625fcd46674a8cc03bbf34072261020 | Python | SailBotPitt/SailBot | /stationKeeping.py | UTF-8 | 18,130 | 2.84375 | 3 | [
"MIT"
] | permissive | import logging
import math
import time
from eventUtils import Event, EventFinished, Waypoint
from windvane import windVane
import constants as c
if c.config["MAIN"]["DEVICE"] == "pi":
from GPS import gps
"""
# Challenge Goal:
- To demonstrate the ability of the boat to remain close to one position and respond to time-based commands.
# Description:
- The boat will enter a 40 x 40m box and attempt to stay inside the box for 5 minutes.
- It must then exit within 30 seconds to avoid a penalty.
# Scoring:
- 10 pts max
- 2 pts per minute within the box during the 5 minute test (the boat may exit and reenter multiple times).
- 2 pts per minute will be deducted for time within the box after 5½ minutes.
- The final score will be reduced by 50% if any RC is preformed from the start of the 5 minute event until the boat’s final exit.
- The final score will be to X.X precision
# Assumptions: (based on guidelines)
- front is upstream
# Strategy:
- 1.) wait till fall behind 80%
- 2.) sail to 90%, until at 90%
- 3.) set sail flat
- 4.) if behind 75%, go to step 2, repeat
- 5.) GTFO (find&sail to best point) after time limit
- DO NOT JUST DROP SAIL
- how we won event first time was dropping sail
- and floating from front to end for total of 5 minute duration travel
"""
REQUIRED_ARGS = 4
class Station_Keeping(Event):
"""
Attributes:
- event_info (array) - 4 GPS coordinates forming a 40m^2 rectangle that the boat must remain in
event_info = [(b1_lat, b1_long),(b2_lat, b2_long),(b3_lat, b3_long),(b4_lat, b4_long)]
"""
def __init__(self, event_info):
if (len(event_info) != REQUIRED_ARGS):
raise TypeError(f"Expected {REQUIRED_ARGS} arguments, got {len(event_info)}")
super().__init__(event_info)
logging.info("Station_Keeping moment")
'''#see SK_perc_guide() notes on calculating go-to points
#running:
#1.) wait till fall behind 80%
#2.) sail to 90%, until at 90%
#3.) set sail flat
#4.) if behind 75%, go to step 2, repeat
#5.) GTFO (find&sail to best point) after time limit
#DO NOT JUST DROP SAIL
#how we won event first time was dropping sail
#and floating from front to end for total of 5 minute duration travel'''
self.time_perc = 5*60 * (70/100) #time to leave, 5 minute limit * %
#ALGO ARRAY========================================
#send in wanted %s in desmos calculation, and return what they are (m,b in y=mx+b; or x,y cord of center of box on %'s line)
type_inpArr = [ 0, 0, 0, 1] #m,b or x,y
perc_inpArr = [80,75,90,90] #%'s
self.cool_arr = self.SK_perc_guide(perc_inpArr,type_inpArr,self.event_info)
del type_inpArr, perc_inpArr
#whats contained in cool_arr:
#(0,1)80-line, (2,3)75-line,
#(4,5)90-line, (6,7)90-point,
#[always auto put on end of cool_arr thats not caluclated from input]:
#(8,9)Front-line (here cause of cart_perimiter_scan),
#(10,11)Left-line, (12,13)Right-line,
#(14,15)Back-line
#(16) mid m line for line check
#other Algo sets========================================
self.start = True
self.escape_x, self.escape_y = None,None #determining best out to go to to leave box based on angle of run (wind)
self.skip = False #faster if statement for time; holdout from prev notation
#time calc
self.start_time = time.time()
def next_gps(self):
#time based checks, off-set the set GPS========================================
curr_time = time.time()
#gtfo, times up
if self.skip or curr_time - self.start_time >= self.time_perc:
#find best point to leave:
if self.escape_x == None:
self.skip = True #faster if statement
self.escape_x, self.escape_y = self.cart_perimiter_scan(self.cool_arr[-7:-1]) #i thought the func name sounded cool
self.last_pnt_x, self.last_pnt_y = self.escape_x,self.escape_y
return Waypoint(self.escape_x,self.escape_y)
#if not in box========================================
#ordered in certain way of most importance, handle up/down first before too left or right
#also put before time because then it doesnt matter cause it's already out
#past front line of box
if not( self.SK_line_check(self.cool_arr[-9:-7], self.cool_arr[-3:-1],self.cool_arr[-1]) ):
logging.info("too forward")
#loosen sail, do nuthin; drift
#.adjustSail(90)
self.last_pnt_x, self.last_pnt_y = None,None
return None
#past bottom line of box
elif not( self.SK_line_check(self.cool_arr[-3:-1], self.cool_arr[-9:-7],self.cool_arr[-1]) ):
logging.info("too back")
#go to 90deg line
self.last_pnt_x, self.last_pnt_y = self.cool_arr[6],self.cool_arr[7]
return Waypoint(self.cool_arr[6],self.cool_arr[7])
#past left line of box
elif not( self.SK_line_check(self.cool_arr[-7:-5], self.cool_arr[-5:-3],self.cool_arr[-1]) ):
logging.info("too left")
#find/go-to intersect of line (+)35degrees of wind direction to left line
#mini cart scan
t_x, t_y = self.mini_cart_perimiter_scan(self.cool_arr[-7:-5],"L")
self.last_pnt_x, self.last_pnt_y = t_x, t_y
return Waypoint(t_x, t_y)
#past right line of box
elif not( self.SK_line_check(self.cool_arr[-5:-3], self.cool_arr[-7:-5],self.cool_arr[-1]) ):
logging.info("too right")
#find/go-to intersect of line (-)35degrees of wind direction to left line
#mini cart scan
t_x, t_y = self.mini_cart_perimiter_scan(self.cool_arr[-5:-3],"R")
self.last_pnt_x, self.last_pnt_y = t_x, t_y
return Waypoint(t_x, t_y)
#passed checks: SAILING; DOING THE EVENT========================================
#beginning set up
if self.start:
logging.info("start if")
#if not moving and behind 80%
if self.SK_line_check(self.cool_arr[0:2], self.cool_arr[-3:-1],self.cool_arr[-1]):
logging.info("start: behind 80%; ending start")
self.start = False
self.last_pnt_x, self.last_pnt_y = self.cool_arr[6],self.cool_arr[7]
return Waypoint(self.cool_arr[6],self.cool_arr[7]) #go to 90deg line
#if this doesnt pass, its WITHIN BOX but is ahead of 80; so it returns init'd last_pnt which is to loosen sail and drift (WHAT WE WANT)
else: logging.info("start: ahead 80%")
#majority of the sail
else:
#if behind 75%:sail back
if self.SK_line_check(self.cool_arr[2:4], self.cool_arr[-3:-1],self.cool_arr[-1]):
self.last_pnt_x, self.last_pnt_y = self.cool_arr[6],self.cool_arr[7]
return Waypoint(self.cool_arr[6],self.cool_arr[7]) #go to 90deg line
#if past or at 90% (redundence reduction)
elif not(self.SK_line_check(self.cool_arr[4:6], self.cool_arr[-3:-1],self.cool_arr[-1])):
self.last_pnt_x, self.last_pnt_y = None,None
return None #loosen sail, do nuthin
return Waypoint(self.last_pnt_x, self.last_pnt_y) #fall back return if nested if's dont pass
#give %-line of box and other lines(details in SK) used in algo
def SK_perc_guide(self,inp_arr,type_arr,buoy_arr):
'''#calc front/back/sides mid point
#find the parameter lat/long value per percent
#calc line 75%/80%/90% (give long, if lat) towards front between them
#input an array of wanted %'s, return array with matching x/y's (in own array, array of arrays)
#saves on calc times
#https://www.desmos.com/calculator/yjeqtqunbh
#go with s scaling
#1.)find midpoints
#2.)between mid1, mid3: perc scale x/y's
#works no matter rotation of boat
#last in inp_arr returns x/y point that is % way of the box
#rest is m/b's
#add m/b of back line, at end of ret_arr
#add m of front/back midpoint line, at end of ret_arr
#0: m/b, 1: x/y
#then add other details need to the very end'''
ret_arr = []
mid_arr=[]#, m_arr=[], b_arr=[]
# midpoints ==========================
#annoying optimization work; done with skips in 'i' indexing
'''
- i=x cord of buoy, even index in array
- need to find with different combos of all the buoy coords to find the point in the middle of them
- organize those combos with a FAST FOR STATEMENT using index skipping
- can see depreciated "straightforward" way of doing it below, this looks nicer (and takes less memory?)
- need to find these midpoints to calcualte the perc lines/points given in input of function
'''
# (12,13,34,24);(front,left,back,right)
# 02,04,46,26
a = [0, 2, 0, 4, 4, 6, 2, 6] #optimizing code with for rather then long ass list
# 0,1, 2,3, 4,5, 6,7
for i in range(4): # 0,1,2,3
#TODO: remove nice variables: just fill in and make two lines (optimization)
#NOTE: nah, fuck that^^^
j1 = a[i*2] # 0 - 0 - 4 - 2
k1 = j1 +1 # 1 - 1 - 5 - 3
j2 = a[(i*2) +1] # 2 - 4 - 6 - 6 next over in "a"
k2 = j2 +1 # 3 - 5 - 7 - 7
p = (buoy_arr[j1] + buoy_arr[j2])/2 # 0+1/2: j1,j2
mid_arr.append(p) #p
mid_arr.append(self.SK_f(p, buoy_arr[j1], buoy_arr[k1], buoy_arr[j2], buoy_arr[k2])) #p,j1,k1,j2,k2
'''
#DEPRECIATED
#DO NOT DELETE, USE FOR EXPLANATION FOR OPTIMIZATION
# m's and b's ==========================
# dont wanna just delete cause dont wanna rewrite if somehow need them
# (mid13,mid24; 1,2; 3,4; mid12,mid34)
m_arr.append(self.SK_m(mid_arr[2], mid_arr[3], mid_arr[6], mid_arr[7])) # mid13 - mid24 (2,4) m2
#m_arr.append(self.SK_m(buoy_arr[0], buoy_arr[1], buoy_arr[2], buoy_arr[3])) # 1 - 2 front
m_arr.append(self.SK_m(buoy_arr[4], buoy_arr[5], buoy_arr[6], buoy_arr[7])) # 3 - 4 back
#m_arr.append(self.SK_m(mid_arr[0], mid_arr[1], mid_arr[4], mid_arr[5])) # mid12 - mid34 (1,3) down center
b_arr.append(self.SK_v(mid_arr[2], mid_arr[3], mid_arr[6], mid_arr[7])) # mid13 - mid24 (2,4)
#b_arr.append(self.SK_v(buoy_arr[0], buoy_arr[1], buoy_arr[2], buoy_arr[3])) # 1 - 2
b_arr.append(self.SK_v(buoy_arr[4], buoy_arr[5], buoy_arr[6], buoy_arr[7])) # 3 - 4
#b_arr.append(self.SK_v(mid_arr[0], mid_arr[1], mid_arr[4], mid_arr[5])) # mid12 - mid34 (1,3)
#front/back mid line for facing use
m_arr.append(self.SK_m(mid_arr[0],mid_arr[1],mid_arr[4],mid_arr[5]))'''
m2 = self.SK_m(mid_arr[2], mid_arr[3], mid_arr[6], mid_arr[7]) #slope between side line's midpoints
#newline: s-scale
#adding all perc line/point into return array (from input in function)
for i in range(len(inp_arr)):
perc = inp_arr[i]/100
x = perc*mid_arr[0] + (1-perc)*mid_arr[4]
y = perc*mid_arr[1] + (1-perc)*mid_arr[5]
if type_arr[i] == 1: #x/y
ret_arr.append(x)
ret_arr.append(y)
continue
else:
ret_arr.append( m2[0] ) #m
ret_arr.append( y-m2[0]*x ) #b
#sides-line for cart_perimiter_scan; additional additions to return
#front
ret_arr.append( self.SK_m(buoy_arr[0], buoy_arr[1], buoy_arr[2], buoy_arr[3]) ) #m buoy1,buoy2
ret_arr.append( self.SK_v(buoy_arr[0], buoy_arr[1], buoy_arr[2], buoy_arr[3]) ) #b buoy1,buoy2
#left
ret_arr.append( self.SK_m(buoy_arr[0], buoy_arr[1], buoy_arr[4], buoy_arr[5]) ) #m buoy1,buoy3
ret_arr.append( self.SK_v(buoy_arr[0], buoy_arr[1], buoy_arr[4], buoy_arr[5]) ) #b buoy1,buoy3
#right
ret_arr.append( self.SK_v(buoy_arr[2], buoy_arr[3], buoy_arr[6], buoy_arr[7]) ) #m buoy2,buoy4
ret_arr.append( self.SK_v(buoy_arr[2], buoy_arr[3], buoy_arr[6], buoy_arr[7]) ) #b buoy2,buoy4
'''#back-line, m of middle-line(linecheck); additional additions to return
ret_arr.append(m_arr[1])
ret_arr.append(b_arr[1])
ret_arr.append(m_arr[2])
#ret_arr.append(b_arr[2])'''
#back
ret_arr.append( self.SK_m(buoy_arr[4], buoy_arr[5], buoy_arr[6], buoy_arr[7]) ) #m buoy3,buoy4
ret_arr.append( self.SK_v(buoy_arr[4], buoy_arr[5], buoy_arr[6], buoy_arr[7]) ) #b buoy3,buoy4
ret_arr.append( self.SK_m(mid_arr[0],mid_arr[1],mid_arr[4],mid_arr[5]) )
return ret_arr
#if past line
def SK_line_check(self,Tarr,Barr,mid_m):
#TRUE: BEHIND LINE
#FALSE: AT OR PAST LINE
#Ix/y: current location of boat
# gps.longitude, gps.latitude
#Tarr: m/b compare line
#arr: m/b Back line,
#Fa:front
#Fb:mid
#Fc:back
Fa=0;Fb=0;Fc=0 #temp sets
#check if sideways =========================
#input x/y as Buoy x/y's to func
gps.updategps()
if self.DEBUG: self.gps_spoof()
if abs(mid_m) < 1: #Barr is secretly the mid m line shhhhhhh (LOOK AT ME)
#sideways -------------------
#x=(y-b)/m
Fa= (gps.latitude-Tarr[1])/Tarr[0]
Fb= gps.longitude
Fc= (gps.latitude-Barr[1])/Barr[0]
else:
#rightways -------------------
#y=mx+b
Fa= Tarr[0]*gps.longitude +Tarr[1]
Fb= gps.latitude
Fc= Barr[0]*gps.longitude +Barr[1]
if Fa > Fc: #upright
if Fa >= Fb: return False #past or equal
else: return True #behind
else: #upside down
if Fa <= Fb: return False #past or equal
else: return True #behind
#find best point of run to leave box
def cart_perimiter_scan(self,arr):
#DETAILS
'''#https://www.desmos.com/calculator/rz8tfc8fwn
#see what mid point closest (Left,Back,Right)
#cartesian with rand radius
#find point at perimeter at -45 or 125 (left,right) degrees (LDeg,RDeg line)
#find m/b of both
#x = r * cos( θ )
#y = r * sin( θ ); r=5(doesnt matter)
#take I() of LDeg,LSide; LDeg,BSide; RDeg,RSide; RDeg,BSide
#find closest, sail to
#arr: back-line,left-line,right-line (m,b's) 01,23,45
#find x,y's of degrees at best run points left and right'''
#STRAIGHTFORWARD EXPLANATION:
#make a point in the 2 best directions of run using cart math
#make a line between the boat and both points
#find intersection of those two lines and the boat
#determine with intersection is closest
gps.updategps()
if self.DEBUG: self.gps_spoof()
lat = gps.latitude; long = gps.longitude
t = math.pi/180
o = windVane.position
r=(5/6371000) * (180 / math.pi)
lx = r*math.cos( 135*t+o*t)+lat #left side run point
ly = r*math.sin( 135*t+o*t)+long
rx = r*math.cos(-135*t+o*t)+lat #right side run point
ry = r*math.sin(-135*t+o*t)+long
#into m,b's
lm = self.SK_m(lx,ly,lat,long)
lb = self.SK_v(lx,ly,lat,long)
rm = self.SK_m(rx,ry,lat,long)
rb = self.SK_v(rx,ry,lat,long)
#del t,o,lx,ly,rx,ry
#find intersects of LDeg,LSide; LDeg,BSide; RDeg,RSide; RDeg,BSide
t_arr=[]
t_arr.append( self.SK_I(lm,lb,arr[2],arr[3]) ) #x1(0)
t_arr.append( lm*t_arr[0]+lb ) #y1(1)
t_arr.append( self.SK_I(lm,lb,arr[0],arr[1]) ) #x2(2)
t_arr.append( lm*t_arr[2]+lb ) #y2(3)
t_arr.append( self.SK_I(rm,rb,arr[4],arr[5]) ) #x3(4)
t_arr.append( rm*t_arr[4]+rb ) #y3(5)
t_arr.append( self.SK_I(rm,rb,arr[0],arr[1]) ) #x4(6)
t_arr.append( rm*t_arr[6]+rb ) #y4(7)
#use distance equation and find closest
sd = self.SK_d(t_arr[0],t_arr[1],lat,long)
si = -1
for i in range(3):
a = self.SK_d(t_arr[2*(i+1)],t_arr[(2*i)+3],lat,long) #skip 0,1
if a<sd: sd=a;si=i
return t_arr[si+1],t_arr[si+2]
#same concept as cart_perm_scan
#used when OUTSIDE BOX to find best line to attack INTO BOX
#just for when left/right of box
def mini_cart_perimiter_scan(self,arr,case):
gps.updategps()
if self.DEBUG: self.gps_spoof()
lat = gps.latitude; long = gps.longitude
t = math.pi/180
o = windVane.position
r=(5/6371000) * (180 / math.pi)
if case == "L":
x = r*math.cos( 55*t+o*t)+lat #+35 from windvane
y = r*math.sin( 55*t+o*t)+long
elif case == "R":
x = r*math.cos(125*t+o*t)+lat #-35
y = r*math.sin(125*t+o*t)+long
else:
raise TypeError("mini_cart_perimiter_scan ERROR")
m = self.SK_m(x,y,lat,long)
b = self.SK_v(x,y,lat,long)
ret1= self.SK_I(arr[0],arr[1],m,b)
return ret1, m*ret1 + b
if __name__ == "__main__":
pass | true |
6f10379d91b385887653dd2def8beacd7b73cec7 | Python | JimXiongGM/MachineLearningPractice | /Algorithm/CLiMF/CLiMF_Epinions/CLiMF_Epinions_FromScratch.py | UTF-8 | 4,760 | 2.8125 | 3 | [] | no_license | import numpy as np
import pickle # for model preservation
from Epinions_Preprocessing import load_epinions, get_sample_users
class sigmoid: # callable sigmoid function class
def __init__(self, x):
self.x = x
def __call__(self):
return 1/(1+np.exp(-self.x))
def derivative(self):
return np.exp(self.x)/(1+np.exp(self.x))**2
# i = user
# j, k = item
class CLiMF:
def __init__(self, data, lamb=0.001, gamma=0.0001, dimension=10, max_iters=25):
self.__data = data # Scipy sparse metrix => user->(item, count)
self.__lambda = lamb # Regularization constant lambda
self.__gamma = gamma # Learning rate
self.__max_iters = max_iters
self.U = 0.01 * np.random.random_sample((data.shape[0], dimension))
self.V = 0.01 * np.random.random_sample((data.shape[1], dimension))
def load(self, filename="CLiMF_model.pickle"):
with open(filename, 'rb') as model_file:
model_dict = pickle.load(model_file)
self.__dict__.update(model_dict)
def save(self, filename="CLiMF_model.pickle"):
with open(filename, 'wb') as model_file:
pickle.dump(self.__dict__, model_file)
def __f(self, i):
items = self.__data[i].indices
fi = dict((j, np.dot(self.U[i], self.V[j])) for j in items)
return fi # Get <U[i], V[j]> for all j in data[i]
# Objective function (predict)
# U: user latent factor
# V: item latent factor
def F(self):
F = 0
for i in range(len(self.U)):
fi = self.__f(i)
for j in fi:
F += np.log(sigmoid(fi[j])())
for k in fi:
F += np.log(1 - sigmoid(fi[k]-fi[j])())
F -= 0.5 * self.__lambda * (np.sum(self.U * self.U) + np.sum(self.V * self.V)) # Forbenius norm
return F
# Stochastic gradient ascent (maximize the objective function)
def __train_one_round(self):
for i in range(len(self.U)):
dU = -self.__lambda * self.U[i]
fi = self.__f(i)
for j in fi:
# Calculate dV
dV = sigmoid(-fi[j])() - self.__lambda * self.V[j]
for k in fi:
dV += sigmoid(fi[j]-fi[k]).derivative() * (1/(1-sigmoid(fi[k] - fi[j])())) - (1/(1-sigmoid(fi[j] - fi[k])())) * self.U[i]
self.V[j] += self.__gamma * dV
# Calculate dU
dU += sigmoid(-fi[j])() * self.V[j]
for k in fi:
dU += (self.V[j] - self.V[k]) * sigmoid(fi[k] - fi[j])() / (1-sigmoid(fi[k] - fi[j])())
self.U[i] += self.__gamma * dU
def train(self, verbose=False, sample_users=None, max_iters=-1):
if max_iters <= 0:
max_iters = self.__max_iters
for time in range(max_iters):
self.__train_one_round()
if verbose:
print('iteration:', time+1)
print('F(U, V) =', self.F())
print('Train MRR =', aMRRevaluate(self.__data, self, sample_users))
# average Mean Reciprocal Rank
def aMRRevaluate(data, climf_model, sample_users=None):
MRR = []
if not sample_users:
sample_users = range(len(climf_model.U))
for i in sample_users:
items = set(data[i].indices)
predict = np.sum(np.tile(climf_model.U[i], (len(climf_model.V), 1)) * climf_model.V, axis=1)
for rank, item in enumerate(np.argsort(predict)[::-1]):
if item in items:
MRR.append(1.0/(rank+1))
break
return np.mean(MRR)
def main():
TRAIN = True # Train or Load the model
print("Loading Epinions dataset...")
train_data, test_data = load_epinions()
train_sample_users, test_sample_users = get_sample_users(train_data, test_data)
print("Before training:")
CF_model = CLiMF(train_data)
print("aMRR of training data:", aMRRevaluate(train_data, CF_model, train_sample_users))
print("aMRR of test data:", aMRRevaluate(test_data, CF_model, test_sample_users))
if TRAIN:
print("Training...")
CF_model.train(verbose=True, sample_users=train_sample_users)
else:
print("Load pre-trained model...")
CF_model.load()
print("After training:")
print("aMRR of training data:", aMRRevaluate(train_data, CF_model, train_sample_users))
print("aMRR of test data:", aMRRevaluate(test_data, CF_model, test_sample_users))
print("Result of U, V")
print("U:", CF_model.U)
print("V:", CF_model.V)
CF_model.save() # save model
if __name__ == "__main__":
# Test sigmoid callable class
# print(sigmoid(-87)())
# print(sigmoid(87).derivative())
main()
| true |
f636efcded9e09dc55a109c19a5c213a2f49562b | Python | MrHamdulay/csc3-capstone | /examples/data/Assignment_4/srkmoh002/ndom.py | UTF-8 | 1,522 | 3.09375 | 3 | [] | no_license | # question2
def ndom_to_decimal(a):
d=a
decimal=0
i=0
while d!=0:
decimal=decimal+((d%10)*(6**i))
i=i+1
d=d//10
decimal=int(decimal)
return decimal
def decimal_to_ndom(a):
d=a
ndom=0
i=0
while d!=0:
ndom=ndom+((d%6)*(10**i))
i=i+1
d=d//6
ndom=int(ndom)
return ndom
def ndom_add(a,b):
d1=a
decimal=0
i=0
while d1!=0:
decimal=decimal+((d1%10)*(6**i))
i=i+1
d1=d1//10
decimal1=int(decimal)
d2=b
decimal=0
i=0
while d2!=0:
decimal=decimal+((d2%10)*(6**i))
i=i+1
d2=d2//10
decimal2=int(decimal)
decimal3=decimal1+decimal2
d=decimal3
ndom=0
i=0
while d!=0:
ndom=ndom+((d%6)*(10**i))
i=i+1
d=d//6
ndom=int(ndom)
return ndom
def ndom_multiply(a,b):
d1=a
decimal=0
i=0
while d1!=0:
decimal=decimal+((d1%10)*(6**i))
i=i+1
d1=d1//10
decimal1=int(decimal)
d2=b
decimal=0
i=0
while d2!=0:
decimal=decimal+((d2%10)*(6**i))
i=i+1
d2=d2//10
decimal2=int(decimal)
decimal3=(decimal1)*(decimal2)
d=decimal3
ndom=0
i=0
while d!=0:
ndom=ndom+((d%6)*(10**i))
i=i+1
d=d//6
ndom=int(ndom)
return ndom
| true |
5c537d981382e13a4a678b028c9a539a573740ca | Python | Jaskaran23/Programming-for-big-data-1-cmpt-732 | /Assignement3/word count-improved.py | UTF-8 | 960 | 2.703125 | 3 | [] | no_license | from pyspark import SparkConf, SparkContext
import sys
import re,string
import operator
assert sys.version_info >= (3, 5) # make sure we have Python 3.5+
# add more functions as necessary
def words_once(line):
wordsep=re.compile(r'[%s\s]+' % re.escape(string.punctuation))
for w in wordsep.split(line):
yield(w.lower(),1)
def get_key(kv):
return kv[0]
def output_format(kv):
k, v = kv
return '%s %i' % (k,v)
def main(inputs, output):
text=sc.textFile(inputs).repartition(8)
words=text.flatMap(words_once)
word_notempty=words.filter(lambda x: len(x)>0)
wordcount=word_notempty.reduceByKey(operator.add)
outdata = wordcount.sortBy(get_key).map(output_format)
outdata.saveAsTextFile(output)
if __name__ == '__main__':
conf = SparkConf().setAppName('Wordcount Improved')
sc = SparkContext(conf=conf)
assert sc.version >= '2.3' # make sure we have Spark 2.3+
inputs = sys.argv[1]
output = sys.argv[2]
main(inputs, output)
| true |
01301d222c22140a592d3ab2e62a05e38f38c2be | Python | wangyy161/test_learning | /demo/point_ifin.py | UTF-8 | 2,226 | 3.421875 | 3 | [] | no_license |
def isinpolygon(point,vertex_lst:list, contain_boundary=True):
#检测点是否位于区域外接矩形内
lngaxis, lataxis = zip(*vertex_lst)
minlng, maxlng = min(lngaxis),max(lngaxis)
minlat, maxlat = min(lataxis),max(lataxis)
lng, lat = point
if contain_boundary:
isin = (minlng<=lng<=maxlng) & (minlat<=lat<=maxlat)
else:
isin = (minlng<lng<maxlng) & (minlat<lat<maxlat)
return isin
def isintersect(poi,spoi,epoi):
#输入:判断点,边起点,边终点,都是[lng,lat]格式数组
#射线为向东的纬线
#可能存在的bug,当区域横跨本初子午线或180度经线的时候可能有问题
lng, lat = poi
slng, slat = spoi
elng, elat = epoi
if poi == spoi:
#print("在顶点上")
return None
if slat==elat: #排除与射线平行、重合,线段首尾端点重合的情况
return False
if slat>lat and elat>lat: #线段在射线上边
return False
if slat<lat and elat<lat: #线段在射线下边
return False
if slat==lat and elat>lat: #交点为下端点,对应spoint
return False
if elat==lat and slat>lat: #交点为下端点,对应epoint
return False
if slng<lng and elat<lat: #线段在射线左边
return False
#求交点
xseg=elng-(elng-slng)*(elat-lat)/(elat-slat)
if xseg == lng:
#print("点在多边形的边上")
return None
if xseg<lng: #交点在射线起点的左侧
return False
return True #排除上述情况之后
def isin_multipolygon(poi,vertex_lst, contain_boundary=True):
# 判断是否在外包矩形内,如果不在,直接返回false
if not isinpolygon(poi, vertex_lst, contain_boundary):
return False
sinsc = 0
for spoi, epoi in zip(vertex_lst[:-1],vertex_lst[1::]):
intersect = isintersect(poi, spoi, epoi)
if intersect is None:
return (False, True)[contain_boundary]
elif intersect:
sinsc+=1
return sinsc%2==1
if __name__ == '__main__':
vertex_lst = [[0,0],[1,1],[1,2],[0,2],[0,0]]
poi = [0.82,0.75]
print(isin_multipolygon(poi,vertex_lst, contain_boundary=True)) | true |
c10d65d6701c21d1d0dd4c5324006906af1f4a21 | Python | Dz6666/Python_CMDB | /Devops_CMDB/hello/views_listview.py | UTF-8 | 2,226 | 2.65625 | 3 | [] | no_license | from django.views.generic import ListView # 导入ListView
from hello.models import User # 导入model
from django.shortcuts import render, reverse, redirect
class IndexView(ListView):
"""
ListView 适合以下场景:
getlist : 列出所有数据
create : 创建数据
"""
# 公共类变量
# 指定模板文件
template_name = "hello/index.html"
# 获取模板中User表的数据(objects_list = User.objects.all())
model = User
# 自定义传给前端模板渲染的变量,默认objects_list
context_object_name = "users"
keyword = ""
# 定义搜索 http://ip/hello/index3/?keyword=kk
def get_queryset(self):
print("搜索功能")
# 子类调用父类的方法拿到的所有数据User.objects.all()
queryset = super(__class__, self).get_queryset()
print("data_all=",queryset)
# 允许用户get请求一个keyword
self.keyword = self.request.GET.get("keyword","")
# 如果用户传入keywork请求,则对继承父类拿到的所有数据User.objects.all()做一个模糊查询并返回
if self.keyword:
queryset = queryset.filter(name__icontains=self.keyword)
print("keyword=",queryset)
return queryset
# 将后端的搜索关键字传入模板
def get_context_data(self, **kwargs):
print("搜索后的数据传入前端")
# 继承基类的ListView类
context = super(__class__, self).get_context_data(**kwargs) # 对继承父类拿到的所有数据User.objects.all()
print("data_all=",context)
# 在父类的基础上,再额外加一些数据到object_list中(将查询到的数据库的数据全部塞到context中)
context['keyword'] = self.keyword
print("keyword=",context)
# 将context里面数据传入到前端的index.html页面中
return context
# 提交数据--> 定义一个post请求
def post(self, request):
data = request.POST.dict()
print(data)
User.objects.create(**data)
users = User.objects.all()
return render(request, 'hello/index.html', {"users":users}) | true |
db7b3cb92b3049b66d53e7b6bf168c87da968cb0 | Python | xiaohuione/code-learning | /python/argparse_test.py | UTF-8 | 336 | 2.875 | 3 | [] | no_license | # encoding: utf-8
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('bar', help='one of the bars to be frobbled')
parser.add_argument('--foo', required=True)
parser.add_argument('--nargs2', nargs=2, required=True)
args = ['bar', '--foo', 'foo', '--nargs2', 'a', 'b']
option = parser.parse_args(args)
print option
| true |
dd35e88095d6a68520f7fc05a163b8585df26cdf | Python | Cationiz3r/C4T-Summer | /Session-4/list/update/update2.py | UTF-8 | 176 | 3.71875 | 4 | [] | no_license |
print()
myList = ["Games", "Games", "Still Games", "Games?"]
myList[0] = "Movies"
myList[len(myList) -1] = "Comics"
for i in myList:
print(i, end = " ")
print(), print() | true |
321537f6daebe401a3278df3d178a10bc9a4aefe | Python | DReiser7/w2v_did | /utils/count_columns.py | UTF-8 | 1,065 | 2.96875 | 3 | [] | no_license | import pandas as pd
if __name__ == "__main__":
base_dir = 'C:/workspaces/BA/Corpora/cv-corpus-6.1-2020-12-11/es/'
test_tsv = base_dir + 'test.tsv'
list_of_attributes_taken = ['mexicano',
'caribe',
'andino',
'centrosurpeninsular',
'americacentral',
'rioplatense',
'nortepeninsular',
'surpeninsular']
test_data = pd.read_table(test_tsv, sep='\t')
counter_empty = 0
counter_taken = 0
counter_total = 0
for i in range(0, len(test_data)):
accent = test_data.iloc[i, 7]
counter_total = counter_total + 1
if accent in list_of_attributes_taken:
counter_taken = counter_taken + 1
else:
print(accent)
print('total: ', str(counter_total))
print('taken empty: ', str(counter_taken))
print('empty: ', str(counter_empty))
print('filled in %: ', str(100 / counter_total * counter_taken))
| true |
af30410601f05db262269e8a4a4b9e399952e417 | Python | DaHuO/Supergraph | /codes/CodeJamCrawler/16_0_2_neat/16_0_2_edmarisov_B.py | UTF-8 | 859 | 3.015625 | 3 | [] | no_license | def process(input):
input = input.replace('\n', '')
while True:
final = input
input = input.replace('--', '-')
input = input.replace('++', '+')
if final == input:
break;
count = 0
was_plus = False
for c in input:
if c == '-':
count = count + 1
if was_plus:
count = count + 1
elif c == '+':
was_plus = True
return count
if __name__ == '__main__':
res = ''
i = 0
with open('B-large.in', 'r') as file:
first = True
for line in file:
if first:
first = False
continue;
i = i + 1
res = res + ("Case #%s: %s\n" % (i, process(line)))
with open('output', 'w+') as file:
print res
file.write(res) | true |
4f7e04a7a7f96788d9672ebaa06b6adac09829bd | Python | kunst1080/twitter-image-dump | /common.py | UTF-8 | 929 | 2.75 | 3 | [] | no_license | import os
import sys
import twitter
def get_twitter():
config_file = os.getenv("HOME") + os.sep + "twitter.key"
conf = None
if os.path.isfile(config_file):
try:
with open(config_file, 'r') as file:
conf = config_to_dictionary(file)
except IOError:
die(config_file + ":file cannot open.")
else:
die(config_file + ":file not exists.")
auth = twitter.OAuth(consumer_key=conf["CONSUMER_KEY"],
consumer_secret=conf["CONSUMER_SECRET"],
token=conf["ACCESS_TOKEN"],
token_secret=conf["ACCESS_TOKEN_SECRET"])
return twitter.Twitter(auth=auth)
def config_to_dictionary(file):
dic = {}
for line in file:
key, val = line.strip().split("=")
dic[key] = val
return dic
def die(message):
print("ERROR:" + message, file=sys.stderr)
sys.exit(1)
| true |
186871bbeba69ab33ac1364582c80932857b4f76 | Python | nickstenning/honcho | /honcho/environ.py | UTF-8 | 3,230 | 2.859375 | 3 | [
"MIT"
] | permissive | from collections import OrderedDict
from collections import defaultdict
from collections import namedtuple
import shlex
import os
import re
PROCFILE_LINE = re.compile(r'^([A-Za-z0-9_-]+):\s*(.+)$')
class Env(object):
def __init__(self, config):
self._c = config
@property
def port(self):
try:
return int(self._c['port'])
except ValueError:
raise ValueError(f"invalid value for port: '{self._c['port']}'")
@property
def procfile(self):
return os.path.join(self._c['app_root'], self._c['procfile'])
def load_procfile(self):
with open(self.procfile) as f:
content = f.read()
return parse_procfile(content)
class Procfile(object):
"""A data structure representing a Procfile"""
def __init__(self):
self.processes = OrderedDict()
def add_process(self, name, command):
assert name not in self.processes, \
"process names must be unique within a Procfile"
self.processes[name] = command
def parse_procfile(contents):
p = Procfile()
for line in contents.splitlines():
m = PROCFILE_LINE.match(line)
if m:
p.add_process(m.group(1), m.group(2))
return p
def parse(content):
"""
Parse the content of a .env file (a line-delimited KEY=value format) into a
dictionary mapping keys to values.
"""
values = {}
for line in content.splitlines():
lexer = shlex.shlex(line, posix=True)
tokens = list(lexer)
# parses the assignment statement
if len(tokens) < 3:
continue
name, op = tokens[:2]
value = ''.join(tokens[2:])
if op != '=':
continue
if not re.match(r'[A-Za-z_][A-Za-z_0-9]*', name):
continue
value = value.replace(r'\n', '\n')
value = value.replace(r'\t', '\t')
values[name] = value
return values
ProcessParams = namedtuple("ProcessParams", "name cmd quiet env")
def expand_processes(processes, concurrency=None, env=None, quiet=None, port=None):
"""
Get a list of the processes that need to be started given the specified
list of process types, concurrency, environment, quietness, and base port
number.
Returns a list of ProcessParams objects, which have `name`, `cmd`, `env`,
and `quiet` attributes, corresponding to the parameters to the constructor
of `honcho.process.Process`.
"""
if env is not None and env.get("PORT") is not None:
port = int(env.get("PORT"))
if quiet is None:
quiet = []
con = defaultdict(lambda: 1)
if concurrency is not None:
con.update(concurrency)
out = []
for name, cmd in processes.items():
for i in range(con[name]):
n = "{0}.{1}".format(name, i + 1)
c = cmd
q = name in quiet
e = {'HONCHO_PROCESS_NAME': n}
if env is not None:
e.update(env)
if port is not None:
e['PORT'] = str(port + i)
params = ProcessParams(n, c, q, e)
out.append(params)
if port is not None:
port += 100
return out
| true |
e9cbe21ebb6f60b9deae446165fce85095a34e82 | Python | foreverxujiahuan/Pytorch- | /第三章_深度学习基础/line_regression_pytorch.py | UTF-8 | 1,919 | 2.921875 | 3 | [] | no_license |
import torch
from torch import nn
import numpy as np
import torch.utils.data as Data
from torch.nn import init
import torch.optim as optim
torch.manual_seed(1)
torch.set_default_tensor_type('torch.FloatTensor')
# 生成数据集
num_inputs = 2
num_examples = 1000
true_w = [2, -3.4]
true_b = 4.2
features = torch.tensor(np.random.normal(0, 1, (num_examples, num_inputs)),
dtype=torch.float)
labels = true_w[0] * features[:, 0] + true_w[1] * features[:, 1] + true_b
labels += torch.tensor(np.random.normal(0, 0.01, size=labels.size()),
dtype=torch.float)
# 读取数据
batch_size = 10
# 将训练数据的特征和标签组合
dataset = Data.TensorDataset(features, labels)
# 把 dataset 放入 DataLoader
data_iter = Data.DataLoader(
dataset=dataset, # torch TensorDataset format
batch_size=batch_size, # mini batch size
shuffle=True, # 要不要打乱数据 (打乱比较好)
num_workers=2, # 多线程来读数据
)
# 定义模型
net = nn.Sequential(
nn.Linear(num_inputs, 1)
# 此处还可以传入其他层
)
# 初始化参数模型
init.normal_(net[0].weight, mean=0.0, std=0.01)
# 也可以直接修改bias的data: net[0].bias.data.fill_(0)
init.constant_(net[0].bias, val=0.0)
# 定义损失函数
loss = nn.MSELoss()
# 定义优化算法
optimizer = optim.SGD(net.parameters(), lr=0.03)
# 训练模型
if __name__ == '__main__':
num_epochs = 3
for epoch in range(1, num_epochs + 1):
for X, y in data_iter:
output = net(X)
loss_ = loss(output, y.view(-1, 1))
optimizer.zero_grad() # 梯度清零,等价于net.zero_grad()
loss_.backward()
optimizer.step()
print('epoch %d, loss: %f' % (epoch, loss_.item()))
dense = net[0]
print(true_w, dense.weight.data)
print(true_b, dense.bias.data)
| true |
931adc68a68af37e326106879b1f990067376d64 | Python | selvesandev/python-ess | /functions/functions.py | UTF-8 | 164 | 3.328125 | 3 | [] | no_license | def my_func(a, b):
"""
DOCSTRING : Information about this function
:param a:
:param b:
:return:
"""
return a + b
print(my_func(1, 2))
| true |
1dcaed61d1018b97713931b8e807e23f039b2662 | Python | SS4G/AlgorithmTraining | /exercise/leetcode/python_src/by2017_Sep/Leet393.py | UTF-8 | 1,524 | 3.296875 | 3 | [] | no_license | class Solution(object):
def validUtf8(self, data):
"""
:type data: List[int]
:rtype: bool
"""
i = 0
while i < len(data):
num = data[i]
types = self.judge(num)
if types == 6:
return False
# print("i=", i, " types=", types)
j = 1
while j < types:
if i+j >= len(data):
return False
num = data[i+j]
if self.judge(num) != 5:
return False
j += 1
i = i + types
return True
def judge(self, x):
x &= 0xff
FIRSTMASK_2B = 0xc0
FIRSTMASK_3B = 0xe0
FIRSTMASK_4B = 0xf0
SECONDMASK = 0x80
#print("bin = ", bin(x), x)
if x & 0x80 == 0:
#print("type = 1")
return 1 # 1byte 1st
if x & 0xe0 == FIRSTMASK_2B:
#print("type = 2")
return 2 # 2byte 1st
if x & 0xf0 == FIRSTMASK_3B:
#print("type = 3")
return 3 # 3byte 1st
if x & 0xf8 == FIRSTMASK_4B:
#print("type = 4")
return 4 # 4byte 1st
if x & 0xc0 == SECONDMASK:
#print("type = 5")
return 5 # nbyte start
#print("type = 6")
return 6 # unknow type
if __name__ == "__main__":
s = Solution()
data = [39, 89, 227, 83, 132, 95, 10, 0]
# print(s.judge(227))
print(s.validUtf8(data))
| true |
41b8df2d44a99ec2c88be1eaf4193a157ac6b57e | Python | fosskers/alg-a-day | /day03-cat/cat.py | UTF-8 | 319 | 2.875 | 3 | [] | no_license | from syshelp import get_args # This is in my python-libs repo.
def cat(filename):
'''Print the contents of a given file.'''
with open(filename) as lines:
for line in lines:
print(line, end='')
if __name__ == '__main__':
args = get_args('EXACTLY', 1)
if args:
cat(args[0])
| true |
fcd42795665419748fff61e6e2d886a4202e8411 | Python | vaishnav67/WebMining | /5. TF-IDF/Exp5.py | UTF-8 | 5,239 | 2.53125 | 3 | [
"Apache-2.0"
] | permissive | from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import PorterStemmer
from collections import Counter
from num2words import num2words
import nltk
import os
import string
import numpy as np
import copy
import pandas as pd
import pickle
import re
import math
title = "documents"
alpha = 0.3
folders = [x[0] for x in os.walk(str(os.getcwd())+'/'+title+'/')]
folders[0] = folders[0][:len(folders[0])-1]
dirite=os.listdir(folders[0])
dataset=[]
for i in dirite:
dataset.append((i,i.strip(".txt")))
def convert_lower_case(data):
return np.char.lower(data)
def remove_stop_words(data):
stop_words = stopwords.words('english')
words = word_tokenize(str(data))
new_text = ""
for w in words:
if w not in stop_words and len(w) > 1:
new_text = new_text + " " + w
return new_text
def remove_punctuation(data):
symbols = "!\"#$%&()*+-./:;<=>?@[\]^_`{|}~\n"
for i in range(len(symbols)):
data = np.char.replace(data, symbols[i], ' ')
data = np.char.replace(data, " ", " ")
data = np.char.replace(data, ',', '')
return data
def remove_apostrophe(data):
return np.char.replace(data, "'", "")
def stemming(data):
stemmer= PorterStemmer()
tokens = word_tokenize(str(data))
new_text = ""
for w in tokens:
new_text = new_text + " " + stemmer.stem(w)
return new_text
def convert_numbers(data):
tokens = word_tokenize(str(data))
new_text = ""
for w in tokens:
try:
w = num2words(int(w))
except:
a = 0
new_text = new_text + " " + w
new_text = np.char.replace(new_text, "-", " ")
return new_text
def preprocess(data):
data = convert_lower_case(data)
data = remove_punctuation(data)
data = remove_apostrophe(data)
data = remove_stop_words(data)
data = convert_numbers(data)
data = stemming(data)
data = remove_punctuation(data)
data = convert_numbers(data)
data = stemming(data)
data = remove_punctuation(data)
data = remove_stop_words(data)
return data
N = len (dataset)
processed_text = []
processed_title = []
for i in dataset[:N]:
file = open(title+'/'+i[0], 'r', encoding="utf8", errors='ignore')
text = file.read().strip()
file.close()
processed_text.append(word_tokenize(str(preprocess(text))))
processed_title.append(word_tokenize(str(preprocess(i[1]))))
DF = {}
for i in range(N):
tokens = processed_text[i]
for w in tokens:
try:
DF[w].add(i)
except:
DF[w] = {i}
tokens = processed_title[i]
for w in tokens:
try:
DF[w].add(i)
except:
DF[w] = {i}
for i in DF:
DF[i] = len(DF[i])
total_vocab_size = len(DF)
total_vocab = [x for x in DF]
def doc_freq(word):
c = 0
try:
c = DF[word]
except:
pass
return c
doc = 0
tf_idf = {}
for i in range(N):
tokens = processed_text[i]
counter = Counter(tokens + processed_title[i])
words_count = len(tokens + processed_title[i])
for token in np.unique(tokens):
tf = counter[token]/words_count
df = doc_freq(token)
idf = np.log((N+1)/(df+1))
tf_idf[doc, token] = tf*idf
doc += 1
def cosine_sim(a, b):
cos_sim = np.dot(a, b)/(np.linalg.norm(a)*np.linalg.norm(b))
return cos_sim
D = np.zeros((N, total_vocab_size))
for i in tf_idf:
try:
ind = total_vocab.index(i[1])
D[i[0]][ind] = tf_idf[i]
except:
pass
def gen_vector(tokens):
Q = np.zeros((len(total_vocab)))
counter = Counter(tokens)
words_count = len(tokens)
query_weights = {}
for token in np.unique(tokens):
tf = counter[token]/words_count
df = doc_freq(token)
idf = math.log((N+1)/(df+1))
try:
ind = total_vocab.index(token)
Q[ind] = tf*idf
except:
pass
return Q
def cosine_similarity(k, query):
print("Cosine Similarity")
preprocessed_query = preprocess(query)
tokens = word_tokenize(str(preprocessed_query))
print("\nQuery:", query)
print("")
print(tokens)
d_cosines = []
query_vector = gen_vector(tokens)
for d in D:
d_cosines.append(cosine_sim(query_vector, d))
out = np.array(d_cosines).argsort()[-k:][::-1]
for i in out:
print(dataset[i][1])
for i in out:
print(dataset[i][1]+" is the highest in terms of cosine similarity")
break
cosine_similarity(5, "Web mining")
def euclid_dist(a, b):
sum=0
for i in range(0,len(a)):
sum+=pow((a[i]-b[i]),2)
math.sqrt(sum)
return sum
def euclidean_distance(k,query):
print("Euclidean Distance")
preprocessed_query = preprocess(query)
tokens = word_tokenize(str(preprocessed_query))
print("\nQuery:", query)
print("")
print(tokens)
euc_dis = []
query_vector = gen_vector(tokens)
for d in D:
euc_dis.append(euclid_dist(query_vector,d))
out = np.array(euc_dis).argsort()[-k:]
for i in out:
print(dataset[i][1])
for i in out:
print(dataset[i][1]+" is the highest in terms of euclidean distance")
break
euclidean_distance(5, "Web mining")
| true |
93dc243a88dc1569d8c701f8188834b67ae1834f | Python | analysiscenter/batchflow | /batchflow/models/torch/modules/core.py | UTF-8 | 3,637 | 2.828125 | 3 | [
"Apache-2.0"
] | permissive | """ Defaults module. """
import inspect
from torch import nn
from ..blocks import Block
from ..repr_mixin import LayerReprMixin
class DefaultModule(LayerReprMixin, nn.Module):
""" Module for default model parts.
Allows to use `module` key for initialization:
- if the value is a nn.Module, then it is used directly
- otherwise, `module` is expected to be a module constructor, which is initialized with the rest of the kwargs.
In other cases, relies on :class:`~.torch.layers.MultiLayer` for actual operations.
Key `disable_at_inference` can be used to turn off the module at inference.
That allows to use augmentations such as `torchvision.Compose` as part of the model.
Implements additional logic of working with inputs and outputs:
- if `input_type` is `tensor` and `output_type` is `tensor`,
then this module expects one tensor and outputs one tensor.
- if `input_type` is `list` and `output_type` is `tensor`,
then this module slices the list with `input_index` and outputs one tensor.
- if `input_type` is `tensor` and `output_type` is `list`,
then this module expects one tensor and wraps the output in list.
- if `input_type` is `list` and `output_type` is `list`,
then this module slices the list wth `input_index` and appends the output to the same list, which is returned.
"""
VERBOSITY_THRESHOLD = 3
def __init__(self, inputs=None, input_type='tensor', output_type='tensor', input_index=-1,
disable_at_inference=False, **kwargs):
super().__init__()
self.kwargs = kwargs
self.input_type = input_type
self.input_index = input_index
self.output_type = output_type
self.disable_at_inference = disable_at_inference
self.initialize(inputs, **kwargs)
def initialize(self, inputs, **kwargs):
""" Make underlying block or reuse existing one. """
# Parse inputs type: list or individual tensor
inputs_is_list = isinstance(inputs, list)
if inputs_is_list and self.input_type != 'list':
raise TypeError(f'Input type is list with `input_type={self.input_type}`!')
inputs = inputs[self.input_index] if inputs_is_list else inputs
# Parse module
if 'module' in kwargs:
module_constructor = kwargs['module']
if isinstance(module_constructor, nn.Module):
module = module_constructor
elif callable(module_constructor) and not isinstance(module_constructor, type):
module = module_constructor
else:
kwargs = {**kwargs, **kwargs.get('module_kwargs', {})}
if 'inputs' in inspect.getfullargspec(module_constructor.__init__)[0]:
kwargs['inputs'] = inputs
module = module_constructor(**kwargs)
self.block = module
else:
self.block = Block(inputs=inputs, **kwargs)
def forward(self, inputs):
# Parse inputs type: list or individual tensor
inputs_is_list = isinstance(inputs, list)
tensor = inputs[self.input_index] if inputs_is_list else inputs
# Apply layer
if self.training or (self.disable_at_inference is False):
output = self.block(tensor)
else:
output = tensor
# Prepare output type: sequence or individual tensor
if self.output_type == 'list':
if inputs_is_list:
output = inputs + [output]
else:
output = [output]
return output
| true |
6a3a81edafc0e1641e3fd3b699ff63eca0803d08 | Python | drd/karmabot | /src/facets/help.py | UTF-8 | 1,740 | 2.671875 | 3 | [] | no_license | import thing
import command
from itertools import chain
def numbered(strs):
return (u"{0}. {1}".format(num+1, line)
for num, line in enumerate(strs))
@thing.facet_classes.register
class HelpFacet(thing.ThingFacet):
name = "help"
commands = command.thing.add_child(command.FacetCommandSet(name))
short_template = u"\"{0}\""
full_template = short_template + u": {1}"
@classmethod
def does_attach(cls, thing):
return True
def get_topics(self, thing):
topics = dict()
for cmd in chain(command.thing, thing.iter_commands()):
if cmd.visible:
topic = cmd.format.replace("{thing}", thing.name)
help = cmd.help.replace("{thing}", thing.name)
topics[topic] = help
return topics
def format_help(self, thing, full=False):
line_template = self.full_template if full else self.short_template
help_lines = [line_template.format(topic, help)
for topic, help in self.get_topics(thing).items()]
help_lines.sort()
return help_lines
@commands.add(u"help {thing}", help=u"view command help for {thing}")
def help(self, thing, context):
context.reply(u"Commands: " + u", ".join(self.format_help(thing)))
@commands.add(u"help {thing} {topic}", help=u"view help for {topic} on {thing}")
def help_topic(self, thing, topic, context):
topic = topic.strip(u"\"")
topics = self.get_topics(thing)
if topic in topics:
context.reply(self.full_template.format(topic, topics[topic]))
else:
context.reply(u"I know of no such help topic.")
| true |
14b3401021ca167f708edb993d4791a0d9aef611 | Python | Mixiz/python_study | /lesson_6/TrafficLight.py | UTF-8 | 958 | 3.71875 | 4 | [] | no_license | # Класс Светофор. Включаемся и моргаем цветами
import time
class TrafficLight:
COLORS = ('красный', 'желтый', 'зеленый')
__PERIODS = (7, 2, 10)
__TOTALTIME = __PERIODS[0] + __PERIODS[1] + __PERIODS[2]
__color = COLORS[0]
__time = time.time()
def running(self):
local_time = (time.time() - self.__time) % self.__TOTALTIME
if local_time < self.__PERIODS[0]:
self.__color = self.COLORS[0]
elif local_time < self.__PERIODS[0] + self.__PERIODS[1]:
self.__color = self.COLORS[1]
else:
self.__color = self.COLORS[2]
print(f'Горит {self.__color}')
if __name__ == '__main__':
light = TrafficLight()
light.running()
while True:
check = input("Узнать светофор. q - выйти\n")
if check == 'q':
break;
else:
light.running()
| true |
7a712c3f81a2f6dbea1c17a2b214286b1b159a50 | Python | simarjot16/zipExtractor | /zipExtractor.py | UTF-8 | 219 | 2.625 | 3 | [] | no_license | from zipfile import as ZipFile as zp
file_path = "" #Define the path of the zipfile over here
with zp(file_path, "r") as zip_:
zip_.printdir() #to print all the content
zip_.extractall() #to extract the zip file
| true |
135c058acd44aa69f589152bfb6cab6ed6e3f9d1 | Python | qianfuzhuang/-selenium12306- | /01.request第一.py | UTF-8 | 263 | 2.75 | 3 | [] | no_license | import requests
if __name__=="__main__":
url='https://www.sougou.com/'
response=requests.get(url=url)
page_text=response.text
print(page_text)
with open('./sougou.html','w',encoding='utf-8') as fp:
fp.write(page_text)
print("over") | true |
255c5dde66fe851760b00fe3f33f25c82c90e942 | Python | hongfel3/Knowledge-Distillation-CNN | /models/gscnn/custom_functional.py | UTF-8 | 5,396 | 2.96875 | 3 | [
"MIT"
] | permissive | """
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import torch
import torch.nn.functional as F
import numpy as np
def calc_pad_same(in_siz, out_siz, stride, ksize):
"""Calculate same padding width.
Args:
ksize: kernel size [I, J].
Returns:
pad_: Actual padding width.
"""
return (out_siz - 1) * stride + ksize - in_siz
def conv2d_same(input, kernel, groups,bias=None,stride=1,padding=0,dilation=1):
n, c, h, w = input.shape
kout, ki_c_g, kh, kw = kernel.shape
pw = calc_pad_same(w, w, 1, kw)
ph = calc_pad_same(h, h, 1, kh)
pw_l = pw // 2
pw_r = pw - pw_l
ph_t = ph // 2
ph_b = ph - ph_t
input_ = F.pad(input, (pw_l, pw_r, ph_t, ph_b))
result = F.conv2d(input_, kernel, bias=bias, stride=stride, padding=padding, dilation=dilation, groups=groups)
assert result.shape == input.shape
return result
def gradient_central_diff(input, cuda):
return input, input
kernel = [[1, 0, -1]]
kernel_t = 0.5 * torch.Tensor(kernel) * -1. # pytorch implements correlation instead of conv
if type(cuda) is int:
if cuda != -1:
kernel_t = kernel_t.cuda(device=cuda)
else:
if cuda is True:
kernel_t = kernel_t.cuda()
n, c, h, w = input.shape
x = conv2d_same(input, kernel_t.unsqueeze(0).unsqueeze(0).repeat([c, 1, 1, 1]), c)
y = conv2d_same(input, kernel_t.t().unsqueeze(0).unsqueeze(0).repeat([c, 1, 1, 1]), c)
return x, y
def compute_single_sided_diferences(o_x, o_y, input):
# n,c,h,w
#input = input.clone()
o_y[:, :, 0, :] = input[:, :, 1, :].clone() - input[:, :, 0, :].clone()
o_x[:, :, :, 0] = input[:, :, :, 1].clone() - input[:, :, :, 0].clone()
# --
o_y[:, :, -1, :] = input[:, :, -1, :].clone() - input[:, :, -2, :].clone()
o_x[:, :, :, -1] = input[:, :, :, -1].clone() - input[:, :, :, -2].clone()
return o_x, o_y
def numerical_gradients_2d(input, cuda=False):
"""
numerical gradients implementation over batches using torch group conv operator.
the single sided differences are re-computed later.
it matches np.gradient(image) with the difference than here output=x,y for an image while there output=y,x
:param input: N,C,H,W
:param cuda: whether or not use cuda
:return: X,Y
"""
n, c, h, w = input.shape
assert h > 1 and w > 1
x, y = gradient_central_diff(input, cuda)
return x, y
def convTri(input, r, cuda=False):
"""
Convolves an image by a 2D triangle filter (the 1D triangle filter f is
[1:r r+1 r:-1:1]/(r+1)^2, the 2D version is simply conv2(f,f'))
:param input:
:param r: integer filter radius
:param cuda: move the kernel to gpu
:return:
"""
if (r <= 1):
raise ValueError()
n, c, h, w = input.shape
return input
f = list(range(1, r + 1)) + [r + 1] + list(reversed(range(1, r + 1)))
kernel = torch.Tensor([f]) / (r + 1) ** 2
if type(cuda) is int:
if cuda != -1:
kernel = kernel.cuda(device=cuda)
else:
if cuda is True:
kernel = kernel.cuda()
# padding w
input_ = F.pad(input, (1, 1, 0, 0), mode='replicate')
input_ = F.pad(input_, (r, r, 0, 0), mode='reflect')
input_ = [input_[:, :, :, :r], input, input_[:, :, :, -r:]]
input_ = torch.cat(input_, 3)
t = input_
# padding h
input_ = F.pad(input_, (0, 0, 1, 1), mode='replicate')
input_ = F.pad(input_, (0, 0, r, r), mode='reflect')
input_ = [input_[:, :, :r, :], t, input_[:, :, -r:, :]]
input_ = torch.cat(input_, 2)
output = F.conv2d(input_,
kernel.unsqueeze(0).unsqueeze(0).repeat([c, 1, 1, 1]),
padding=0, groups=c)
output = F.conv2d(output,
kernel.t().unsqueeze(0).unsqueeze(0).repeat([c, 1, 1, 1]),
padding=0, groups=c)
return output
def compute_normal(E, cuda=False):
if torch.sum(torch.isnan(E)) != 0:
print('nans found here')
import ipdb;
ipdb.set_trace()
E_ = convTri(E, 4, cuda)
Ox, Oy = numerical_gradients_2d(E_, cuda)
Oxx, _ = numerical_gradients_2d(Ox, cuda)
Oxy, Oyy = numerical_gradients_2d(Oy, cuda)
aa = Oyy * torch.sign(-(Oxy + 1e-5)) / (Oxx + 1e-5)
t = torch.atan(aa)
O = torch.remainder(t, np.pi)
if torch.sum(torch.isnan(O)) != 0:
print('nans found here')
import ipdb;
ipdb.set_trace()
return O
def compute_normal_2(E, cuda=False):
if torch.sum(torch.isnan(E)) != 0:
print('nans found here')
import ipdb;
ipdb.set_trace()
E_ = convTri(E, 4, cuda)
Ox, Oy = numerical_gradients_2d(E_, cuda)
Oxx, _ = numerical_gradients_2d(Ox, cuda)
Oxy, Oyy = numerical_gradients_2d(Oy, cuda)
aa = Oyy * torch.sign(-(Oxy + 1e-5)) / (Oxx + 1e-5)
t = torch.atan(aa)
O = torch.remainder(t, np.pi)
if torch.sum(torch.isnan(O)) != 0:
print('nans found here')
import ipdb;
ipdb.set_trace()
return O, (Oyy, Oxx)
def compute_grad_mag(E, cuda=False):
E_ = convTri(E, 4, cuda)
Ox, Oy = numerical_gradients_2d(E_, cuda)
mag = torch.sqrt(torch.mul(Ox,Ox) + torch.mul(Oy,Oy) + 1e-6)
mag = mag / mag.max();
return mag | true |
cc80087b979c682c57fc4b270fa02bd4572968c6 | Python | Fantomster/py | /deep_learning/common_and_base_features_ai/base_nets.py | UTF-8 | 3,541 | 3.109375 | 3 | [] | no_license | """
Vector data - densely connected network ( Dense layers ).
Image data - 2D convnets
Sound data (for example, waveform)—Either 1D convnets (preferred) or RNNs.
Text data—Either 1D convnets (preferred) or RNNs.
Timeseries data—Either RNNs (preferred) or 1D convnets.
Other types of sequence data — Either RNNs or 1D convnets. Prefer RNNs if data
ordering is strongly meaningful (for example, for timeseries, but not for text).
Video data—Either 3D convnets (if you need to capture motion effects) or a combination of a frame-level 2D convnet for feature extraction followed by
either an RNN or a 1D convnet to process the resulting sequences.
Volumetric data—3D convnets.
"""
# Densely connected networks
# for binary classification
from keras import models, layers
model = models.Sequential()
model.add(layers.Dense(32, activation='relu', input_shape=(num_input_features,)))
model.add(layers.Dense(32, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop', loss='binary_crossentropy')
# Single-label categorical classification
# if targets integers, use sparse_categorical_crossentropy
model = models.Sequential()
model.add(layers.Dense(32, activation='relu', input_shape=(num_input_features,)))
model.add(layers.Dense(32, activation='relu'))
model.add(layers.Dense(num_classes, activation='softmax'))
# one-hot encoded targets
model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
# Multilabel categorical classification
# use K-hot encoded
model = models.Sequential()
model.add(layers.Dense(32, activation='relu', input_shape=(num_input_features,)))
model.add(layers.Dense(32, activation='relu'))
model.add(layers.Dense(num_classes, activation='sigmoid'))
model.compile(optimizer='rmsprop', loss='binary_crossentropy')
# regression
# use mean_squared_error and mean_absolute_error for loss function
model = models.Sequential()
model.add(layers.Dense(32, activation='relu', input_shape=(num_input_features,)))
model.add(layers.Dense(32, activation='relu'))
model.add(layers.Dense(num_values))
model.compile(optimizer='rmsprop', loss='mse')
# Convnets
# Typical image-classification network
model = models.Sequential()
model.add(layers.SeparableConv2D(32, 3, activation='relu', input_shape=(height, width, channels)))
model.add(layers.SeparableConv2D(64, 3, activation='relu'))
model.add(layers.MaxPooling2D(2))
model.add(layers.SeparableConv2D(64, 3, activation='relu'))
model.add(layers.SeparableConv2D(128, 3, activation='relu'))
model.add(layers.MaxPooling2D(2))
model.add(layers.SeparableConv2D(64, 3, activation='relu'))
model.add(layers.SeparableConv2D(128,3, activation='relu'))
model.add(layers.GlobalAveragePooling2D())
model.add(layers.Dense(32, activation='relu'))
model.add(layers.Dense(num_classes, activation='softmax'))
model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
# RNN
# binary classification of vector sequences
model = models.Sequential()
model.add(layers.LSTM(32, input_shape=(num_timesteps, num_features)))
model.add(layers.Dense(num_classes, activation='sigmoid'))
model.compile(optimizer='rmsprop', loss='binary_crossentropy')
# Stacked binary classification of vector sequences
model = models.Sequential()
model.add(layers.LSTM(32, return_sequences=True, input_shape=(num_timesteps, num_features)))
model.add(layers.LSTM(32, return_sequences=True))
model.add(layers.LSTM(32))
model.add(layers.Dense(num_classes, activation='sigmoid'))
model.compile(optimizer='rmsprop', loss='binary_crossentropy') | true |
eb117429db15dc84b33488eff7ac28f1e194106c | Python | AngelNovoselski/Zoo | /test_animal.py | UTF-8 | 942 | 3.484375 | 3 | [] | no_license | from animal import Animal
import unittest
class TestAnimal(unittest.TestCase):
def setUp(self):
self.cat_animal = Animal("cat", 365, "Pena", "Female", 5, 5475)
def test_animal_init(self):
self.assertEqual("cat", self.cat_animal.species)
self.assertEqual(365, self.cat_animal.age)
self.assertEqual("Pena", self.cat_animal.name)
self.assertEqual("Female", self.cat_animal.gender)
self.assertEqual(5, self.cat_animal.weight)
self.assertEqual(5475, self.cat_animal.life_expectancy)
def test_grow(self):
self.cat_animal.grow(2, 1)
self.assertEqual(366, self.cat_animal.age)
self.assertEqual(7, self.cat_animal.weight)
def test_eat(self):
self.cat_animal.eat(2)
self.assertEqual(7, self.cat_animal.weight)
def test_dying(self):
self.assertFalse(self.cat_animal.dying())
if __name__ == '__main__':
unittest.main()
| true |
dc73c81c6982899f4d23173003a857abd66652fb | Python | Highstaker/Book-progress-site | /myresite/myreprogress/admin.py | UTF-8 | 1,503 | 2.515625 | 3 | [] | no_license | from django.contrib import admin
from django.db.models import Max
from .models import BookPage, Book
class BookPageAdmin(admin.ModelAdmin):
# fields NOT to show in Edit Page.
list_display = ('__str__', 'page_number', 'page_name', 'storyboarded', 'sketched', 'colored', 'edited', 'proofread',)
list_filter = ('book',)
readonly_fields = ('page_number',) # valid page number is assigned via overridden save() in model
actions = ('delete_selected',)
fieldsets = (
(None, {
'fields': ('book', 'page_name',
'storyboarded', 'sketched', 'colored', 'edited', 'proofread',
)
}),
)
def save_model(self, request, obj, form, change):
if not change:
# set the page number only on creation!
max_page = BookPage.objects.filter(book=obj.book).aggregate(Max('page_number'))['page_number__max']
obj.page_number = max_page + 1
obj.save() # the parent does only this
def delete_model(self, request, obj):
book = obj.book
super(BookPageAdmin, self).delete_model(request, obj)
book.validatePageNumbers()
def delete_selected(self, request, obj):
# kinda overriding default 'delete_selected' action to make it perform page validation afterwards
books = {i.book for i in obj}
obj.delete()
for b in books:
# perform validation for all books the pages of which were deleted
b.validatePageNumbers()
class BookAdmin(admin.ModelAdmin):
list_display = ('book_name', 'book_slug',)
admin.site.register(BookPage, BookPageAdmin)
admin.site.register(Book, BookAdmin)
| true |
a6cacebb97fbba41724b70aa0b922c1dcda3ee2a | Python | EricMontague/Object-Oriented-Design-in-Python | /library_management_system/app/services/fine_service.py | UTF-8 | 2,319 | 2.71875 | 3 | [] | no_license | from app.models.fine import Fine
from uuid import uuid4
class _FineService:
_FINE_PER_DAY_IN_CENTS = 25
def __init__(self):
self._fines_by_member_id = {}
def get_amount_due(self, member_id):
fines = self._fines_by_member_id.get(member_id, [])
if not fines:
raise ValueError("User does not currently have any outstanding fines.")
amount = 0
for fine in fines:
amount += fine.amount_due
return amount
def create_fine(self, book, member, amount=0):
fine_amount = amount or self._FINE_PER_DAY_IN_CENTS
fine = Fine(uuid4(), fine_amount, book, member)
if not self.owes_money(member.user_id):
self._fines_by_member_id[member.user_id] = []
self._fines_by_member_id[member.user_id].append(fine)
def increment_fine(self, book, member, amount=0):
if not self.owes_money(member.user_id):
raise ValueError("User does not currently have any outstanding fines")
increment_amount = amount or self._FINE_PER_DAY_IN_CENTS
fines = self._fines_by_member_id[member.user_id]
for fine in fines:
if fine.book == book:
fine.increment(increment_amount)
break
def owes_money(self, member_id):
if not member_id in self._fines_by_member_id:
return False
fines = self._fines_by_member_id[member_id]
for fine in fines:
if not fine.is_paid():
return True
return False
def owes_money_for_book(self, book, member):
if not member.user_id in self._fines_by_member_id:
return False
fines = self._fines_by_member_id[member.user_id]
for fine in fines:
if fine.book == book and not fine.is_paid():
return True
return False
def charge(self, amount, book, member):
if not self.owes_money(member.user_id):
raise ValueError("User does not currently have any outstanding fines")
fines = self._fines_by_member_id[member.user_id]
for fine in fines:
if fine.book == book:
fine.pay()
break
def get_fine_per_day(self):
return self._FINE_PER_DAY_IN_CENTS
fine_service = _FineService()
| true |
083b333e56c1b74bc507bd0c3e866c087500a9fb | Python | bad-bit/thames | /thames.py | UTF-8 | 7,442 | 2.921875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/python3
#
#Thames - A software to scrape the internet to identify the themes of websites built on WordPress.
#Author - Vaibhav Choudhari (Twitter - badbit0)
from threading import Thread
from os import path
import requests
import json
import re
import time
import sys
import os
import argparse
#tic = time.perf_counter()
urls = []
themes = []
def main():
parser = argparse.ArgumentParser(
description='A software to scrape the web for WordPress websites and to identify their themes.',
prog='thames.py',
usage='%(prog)s --help <for help> -k <Serpstack API key> -d <comma seperated Google Dorks in double quotes> -f (OPTIONAL) <path to Google Dork files> -v (OPTIONAL) <verbosity level>')
parser.add_argument("-k", "--key", help="Your API key as received from Serpstack", required=True, dest='key')
parser.add_argument("-d", "--dork", help="Comma seperated Google Dorks. Eg: thames.py -d \"intitle: Wordpress, site:.wordpress.com\"", type=str)
parser.add_argument("-f", "--file", help=" Full path of file listing your search terms / Google Dorks. Eg: thames.py -f C:\somedir\dorkfile.txt", dest='file')
parser.add_argument("-v", "--verbose", help="Verbosity level", action='count', default=0, dest='verb')
parser.add_argument("-p", "--page", help="Number of Google search result pages to scrape. Default value is set to 5 pages.", default=5, dest='page', type=int)
args = parser.parse_args()
if sys.platform.startswith("win"):
cwd = os.getcwd()
#need to change directory to CWD for Windows systems where in Python is not in the environment variables.
os.chdir(cwd)
if os.path.isfile("serpstack_20.json"):
os.remove("serpstack_20.json")
if os.path.isfile("Output.txt"):
os.remove("Output.txt")
elif sys.platform.startswith("linux"):
if os.path.isfile("serpstack_20.json"):
os.remove("serpstack_20.json")
if os.path.isfile("Output.txt"):
os.remove("Output.txt")
if args.file:
with open(args.file, "r") as dorkfile:
query = dorkfile.read().splitlines()
elif args.dork:
query = args.dork.split(",")
else:
print("[--] Please input dorks as comma seperated values in double quotes or input a file containing a list of dorks.\nType thames.py --help for more info.")
exit()
url = "http://api.serpstack.com/search"
api_key = args.key
#query = ["intitle: Wordpress"] #, "Proudly powered by WordPress", "site:.wordpress.com"]
num = "10"
page = args.page
print(r"""
___ _ __
| |_| /\ |\/| |_ (_
| | | /--\ | | |__ _) . py v1.0
- badbit0
""")
print("[+] Execution began!\nScraping "+str(page)+" pages of Google for the given dork(s).\n")
if page >= 20:
print("""[~] Please note that the number of search results given by Google usually do not exceed 150. Thus, increasing
the number of pages beyond 20 doesn't really increase the number of scraped URLs. The problem is not with the tool, that is just how
Google works. :D""")
for each_query in query:
page = args.page
for page_no in range(1, page+1):
page_num = str(page_no)
request = url+"?"+"access_key="+api_key+"&"+"query="+each_query+"&"+"num="+num+"&"+"page="+page_num
if args.verb == 1:
print("[+] Scraping URLs from Google results from page: "+page_num+" for the dork: "+each_query)
api_request = requests.get(request)
response = api_request.text
with open("serpstack_20.json", "a") as file:
file.write(response)
print("\n")
scraper(args.verb)
def scraper(verb_value):
count = 0
with open("serpstack_20.json", "r") as file:
jArray = file.read()
#converting SERP data into a single JSON object for processing
newJArray = jArray.replace("}{","},{")
json_data = json.loads(f'[{newJArray}]')
try:
for i in json_data:
for results in i['organic_results']:
url_list = results['url']
urls.append(url_list)
except KeyError:
print("[-] Your API usage limit has been exhausted on https://www.serpstack.com. If not, please try some other dorks.")
exit()
for all_urlz in urls:
count = count + 1
print("[+] Total URLs scraped = "+str(count))
print("\n")
t = Thread(target=locator(verb_value))
t.start()
def locator(verb_value):
count_url = 0
count_theme = 0
print("[+] Attempting to extract themes from scraped websites.\nThis should take time.")
for each_url in urls:
#stripping TLD logic goes here.
count_url += 1
try:
req = requests.get(each_url, timeout=30)
if req.status_code == 302:
print(" [*]The url: "+each_url+" was redirected." )
source = req.text
if "wp-content" in source or "wp-includes" in source:
if verb_value == 2:
print("[+] The CMS for the website "+each_url+" is WordPress")
try:
finder = re.search(r"themes/[a-zA-Z0-9]+|theme\\/[a-zA-Z0-9]+|themeSlug\":\"[a-zA-Z0-9-]+\\/[a-zA-Z0-9-]+", source)
l = finder.group()
themes.append("The theme for the domain: "+each_url+" is - "+l)
except:
if verb_value == 2:
print("[-] Theme not found for - "+each_url+"\nThe CMS for the webiste might not be WordPress\n")
else:
if verb_value == 2:
print("[-] The CMS for the website "+each_url+" is not WordPress")
except:
print("[-] URL - "+each_url+" seems unreachable, moving to next URL \n\n")
#print("\n\n[*] Total URLs listed = "+str(count_url))
#A new list for only unique hits of "/themes/<theme_name>" from each website's source as a
#website can have multiple instances of "/themes/<theme_name>" in its source
to_store = []
uniq_themes = []
locked = []
not_found_list = []
notctr = 0
if verb_value == 1:
print("\n\n[+] Printing themes found: \n")
for all_themes in themes:
count_theme += 1
#The list - [themes] will contain junk from the regex. The replace statement below will clean the data and will produce only theme names.
final = all_themes.replace("themes/", "").replace(r"theme\/", "").replace("themeSlug\":\"pub\\/", "").replace("themeSlug\":\"premium\\/", "")
if verb_value == 1:
print(final)
to_store.append(final)
for x in to_store:
with open("Output.txt", "a") as result:
result.write(x+"\n")
print("\n[*] Total themes found = "+str(count_theme)+"\nThe result has been stored in \"Output.txt\" file. Please copy the output to some other destination if required. The file will be deleted in the next execution.")
if verb_value == 1:
for stored_urls in themes:
#Filtering out just the URLs from the the list - to_store which will contain the string "The theme for the domain - <URL> is : <theme name>"
lock = re.search(r"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+", stored_urls)
p = lock.group()
locked.append(p)
print("\n\n")
for not_found in urls:
if not_found not in locked:
notctr += 1
nf = "[*] The theme was not found for the URL: "+not_found
not_found_list.append(nf)
print("[*] Theme couldn't be found for "+str(notctr)+" websites.\nThe websites might not be using WordPress.\n")
if verb_value == 2:
print("The websites are listed below:\n")
for all_webs in not_found_list:
print(all_webs)
# tac = time.perf_counter()
# print("\n\n")
# print(tac - tic)
if __name__ == '__main__':
main()
| true |
cf033e72114a10f6482c13dc4496548b37550497 | Python | djputta/FYP | /PlayerClient.py | UTF-8 | 5,126 | 2.875 | 3 | [] | no_license | from Player import HumanPlayer, DumbAIPlayer, SLDumbAIPlayer, LDumbAIPlayer, LMiniMax, SLMiniMax, RandomAI
import socket
import pickle
from Bet import Bet
class PlayerClient():
players = {0: HumanPlayer, 1: DumbAIPlayer, 2: SLDumbAIPlayer,
3: LDumbAIPlayer, 4: LMiniMax, 5: SLMiniMax, 6: RandomAI}
def __init__(self, type=0, host='127.0.0.1', port=65445):
self.HOST = host # Standard loopback interface address (localhost)
self.PORT = port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((self.HOST, self.PORT))
self.all_bets = []
self.bet_history = dict(zip(range(2, 7), [0 for _ in range(5)]))
self.bot = self.players[type]()
self.out = False
self.game_over = False
self.num_dice = 0
self.dice_list = []
self.called = False
self.opp_called = False
self.went_previously = False
def receive_info(self):
'''
Receives the number of dice and your dice for the round.
'''
self.num_dice = pickle.loads(self.sock.recv(131072))
self.sock.sendall(pickle.dumps("OK"))
print("There are " + str(self.num_dice) + " dice left in the game.")
print()
old_dice = self.dice_list
self.dice_list = pickle.loads(self.sock.recv(131072))
self.sock.sendall(pickle.dumps("OK"))
if self.called and len(old_dice) > len(self.dice_list):
print("You have called unsuccessfully")
print()
pass
elif self.called and len(old_dice) == len(self.dice_list):
print("You have called successfully")
print()
elif self.opp_called and len(old_dice) > len(self.dice_list):
print("Somebody has called successfully against you")
print()
elif self.opp_called and len(old_dice) == len(self.dice_list) and self.went_previously:
print("Somebody has called unsuccessfully against you")
print()
# print("Dice is:", self.dice_list)
self.bot.dice_list = self.dice_list
def place_bet(self, prob, bluff):
'''
Tell's whatever bet you are using to place a bet and to send it.
'''
last_bet = None if len(self.all_bets) == 0 else self.all_bets[-1]
bet = self.bot.place_bet(self.num_dice, self.bet_history, prob, bluff, last_bet)
if isinstance(bet, str): # If you have called
self.sock.sendall(pickle.dumps(str(bet)))
return True
self.all_bets.append(bet)
self.bet_history[bet.dice_value] = bet.num_of_dice
self.sock.sendall(pickle.dumps(str(bet)))
return False
def play_round(self, prob=.1, bluff=.1):
'''
prob: cutoff probability for deciding to place or call a bet
bluff: probability to place a random bet
Play a round of Perudo
'''
if not self.out:
self.receive_info()
while True:
response = pickle.loads(self.sock.recv(24))
self.sock.sendall(pickle.dumps("OK"))
if response == 'S':
self.called = self.place_bet(prob, bluff)
self.went_previously = True
if self.called:
self.all_bets = []
self.bet_history = dict(zip(range(2, 7), [0 for _ in range(5)]))
return
elif response == 'call':
print("Someone has called")
self.opp_called = True
self.all_bets = []
self.bet_history = dict(zip(range(2, 7), [0 for _ in range(5)]))
return
else:
self.opp_called = False
self.went_previously = False
print("The previous bet is: ", end='')
response = response.split()
bet = Bet(int(response[1]), int(response[0]))
print(repr(bet))
print()
self.all_bets.append(bet)
self.bet_history[bet.dice_value] = bet.num_of_dice
else:
print("You are out")
pass
def check_out(self):
check = pickle.loads(self.sock.recv(16384))
self.sock.sendall(pickle.dumps("OK"))
self.out = check
def check_game_over(self):
self.game_over = pickle.loads(self.sock.recv(16384))
self.sock.sendall(pickle.dumps("OK"))
def check_won(self):
won = pickle.loads(self.sock.recv(16384))
self.sock.sendall(pickle.dumps("OK"))
return won
def reset(self):
self.all_bets = []
self.bet_history = dict(zip(range(2, 7), [0 for _ in range(5)]))
self.out = False
self.game_over = False
self.opp_called = False
self.called = False
def num_games(self):
_num_games = pickle.loads(self.sock.recv(1024))
self.sock.sendall(pickle.dumps("OK"))
return _num_games
| true |
ee4cd8ded2dba9c34c59c93b2bc943e39d5d1e98 | Python | itrowa/arsenal | /CS61A/code/ch2-_sets.py | UTF-8 | 5,377 | 4.09375 | 4 | [] | no_license | # 1. 利用linked_list实现无序版的set;
# 2. 利用linked_list实现有版的set;
# 3. 利用二叉树实现有序版的set
# 最后需要分析一下3种实现方式的算法复杂度.
# 链表系统.
from _linked_list_object import *
# tree系统
from _tree_obj import *
# ################################
# BinaryTree 为二叉树set做准备.
# ################################
class BinaryTree(Tree):
"""
在Tree的基础上定义二叉树.
只有左右两只的Tree.叫做二叉树.
默认每个树都有分支. 没有实际元素的就用empty表示.
"""
empty = Tree(None)
empty.is_empty = True
def __init__(self, entry, left = empty, right = empty):
for branch in (left, right):
assert isinstance(branch, BinaryTree) or branch.is_empty
Tree.__init__(self, entry, (left, right))
self.is_empty = False
@property
def left(self):
return self.branches[0]
@property
def right(self):
return self.branches[1]
def is_leaf(self):
return self.left.is_empty and self.right.is_empty
def __repr__(self):
if self.is_leaf():
return 'Bin({0})'.format(self.entry)
elif self.right.is_empty:
return'Bin({0},{1})'.format(self.entry, self.left)
else:
# @?@ if else是什么语法....
left = 'Bin.empty' if self.left.is_empty else repr(self.left)
return 'Bin{0}, {1}, {2}'.format(self.entry, left, self.right)
# ################################
# 辅助函数
# ################################
def empty(s):
"""
测试链表s是不是空的
"""
return s is Link.empty
# ################################
# UNorderd set implementationa using linked list object
# ################################
def set_contains(s, v):
"""
如果一个set s 包含一个元素v则返回True
"""
if empty(s):
return False
elif s.first == v:
return True
else:
return set_contains(s.rest, v)
def adjoin_set(s, v):
"""
返回一个set, 里面的元素包含了所有的s的元素和元素v.
(把元素插入到集)
"""
# 利用已经造好的轮子
if set_contains(s, v):
return s
else:
return Link(v, s)
def intersect_set(set1, set2):
"""
Intersection of set1 and set2.
返回的集合包含了set1 和 set2的所有元素.
"""
# 利用filter_link, 找出set1的元素在set2的部分.
in_set2 = lambda v: set_contains(set2, v)
return filter_link(in_set2, set1)
def union_set(set1, set2):
"""
返回set1 和 set2的并集.
"""
# 找出set1 不在set2的部分, 这部分再和set2合并即可.
not_in_set2 = lambda v: not set_contains(set2, v)
set1_not_set2 = filter_link(not_in_set2, set1)
return extend_link(set1_not_set2, set2)
# ################################
# orderd set implementationa using linked list object
# ################################
# 假设元素是从小到大排列的.
def set_contains2(s, v):
"""
如果一个set s 包含一个元素v则返回True
"""
if empty(s) or s.first > v:
return False
elif s.first == v:
return True
else:
return set_contains2(s.rest, v)
def intersect_set2(set1, set2):
"""
Intersection of set1 and set2.
返回的集合包含了set1 和 set2的所有元素.
"""
if empty(set1) or empty(set2):
return Link.empty
else:
e1, e2 = set1.first, set2.first
if e1 == e2:
# e1保留在我们要返回的列表中. 剩下的递归处理.
return Link(e1, intersect_set2(set1.rest, set2.rest))
if e1 < e2:
# 抛弃掉e1, 因为e1和e2并不相等. 而且e1不可能再和set2的其它元素相等了.
return Link(e1, intersect_set2(set1.rest, set2))
if e1 > e2:
# 抛弃掉e2
return Link(e1, intersect_set2(set1.rest, set2))
# 利用二叉搜索树实现集合..
def set_contains3(s, v):
if s.is_empty:
return False
elif s.entry == v:
return True
elif s.entry < v:
return set_contains3(s.right, v)
elif s.entry > v:
return set_contains3(s.left, v)
def adjoin_set3(s, v):
if s.is_empty:
return Binerytree(v)
elif s.entry == v:
return s
elif s.entry < v:
# 构建二叉树, 用新的左支不变, 右支用新的
return BineryTree(s.entry, s.left, adjoin_set3(s.right, v))
elif s.entry > v:
return BineryTree(s.entry, adjoin_set3(s.left, v), s.right)
def big_tree(left, right):
"""
返回一个二叉搜索树, 元素是left到right的中间值.
>>> big_tree(0, 12)
Bin(6, Bin(2, Bin(0), Bin(4)), Bin(10, Bin(8), Bin(12)))
"""
if left > right:
return BinaryTree.empty
elif left == right:
return BinaryTree(left)
split = left + (right - left) // 2
return BinaryTree(split, big_tree(left, split - 2), big_tree(split + 2, right))
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# abstraction barrier
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# ################################
# Test
# ################################
s = Link(1, Link(2, Link(3)))
set_contains(s, 3)
set_contains2(s, 3)
| true |
48309487710ac82b88a01f2174f5bb6d16d9e9be | Python | yuly3/atcoder | /ABC/ABC176/A.py | UTF-8 | 186 | 2.5625 | 3 | [] | no_license | import sys
sys.setrecursionlimit(10 ** 7)
rl = sys.stdin.readline
def solve():
N, X, T = map(int, rl().split())
print(T * -(-N // X))
if __name__ == '__main__':
solve()
| true |
66e3278e6f8fd36eace53be8fe1341103678f542 | Python | avanto85/openbci-stream | /openbci_stream/acquisition/consumer.py | UTF-8 | 4,734 | 2.8125 | 3 | [
"BSD-2-Clause"
] | permissive | """
================
OpenBCI Consumer
================
"""
import pickle
import logging
from .cyton import Cyton
from typing import Tuple, Optional, Union, Literal, List
from kafka import KafkaConsumer
# Custom type var
MODE = Literal['serial', 'wifi', None]
DAISY = Literal['auto', True, False]
########################################################################
class OpenBCIConsumer:
"""Kafka consumer for read data streamed.
This class can start the acquisition if the respective parameter are
specified. For just connect with an existing stream only needs the **host**,
argument, the others one are used for start one.
Connect with an existing stream:
>>> whith OpenBCIConsumer() as stream:
for message in stream:
...
Starts serial acquisition, create a stream and it connects with it:
>>> whith OpenBCIConsumer('serial', '/dev/ttyUSB0') as stream:
for message in stream:
...
Connect with a remote existing stream:
>>> whith OpenBCIConsumer(host='192.168.1.113') as stream:
for message in stream:
...
For examples and descriptions refers to documentation:
`Controlled execution with OpenBCIConsumer() <../notebooks/03-data_acquisition.html#Controlled-execution-with-OpenBCIConsumer()-class>`_
Parameters
----------
mode
If specified, will try to start streaming with this connection mode.
endpoint
Serial port for RFduino or IP address for WiFi module.
daisy
Daisy board can be detected on runtime or declare it specifically.
montage
A list means consecutive channels e.g. `['Fp1', 'Fp2', 'F3', 'Fz',
'F4']` and a dictionary means specific channels `{1: 'Fp1', 2: 'Fp2',
3: 'F3', 4: 'Fz', 5: 'F4'}`.
streaming_package_size
The streamer will try to send packages of this size, this is NOT the
sampling rate for data acquisition.
host
IP address for the server that has the OpenBCI board attached, by
default its assume that is the same machine where is it executing, this
is the `localhost`.
topics
List of topics to listen.
auto_start
If `mode` and `endpoint` are passed, then start the stream automatically.
"""
# ----------------------------------------------------------------------
def __init__(self, mode: MODE = None, endpoint: Optional[str] = None,
daisy: DAISY = 'auto',
montage: Optional[Union[list, dict]] = None,
streaming_package_size: Optional[int] = 250,
host: Optional[str] = 'localhost',
topics: Optional[List[str]] = [
'eeg', 'aux', 'marker', 'annotation'],
auto_start: Optional[bool] = True,
*args,
**kwargs,
) -> None:
""""""
self.bootstrap_servers = [f'{host}:9092']
self.topics = topics
self.auto_start = auto_start
if mode:
self.openbci = Cyton(mode=mode,
endpoint=endpoint, host=host,
daisy=daisy,
capture_stream=False,
montage=montage,
streaming_package_size=streaming_package_size,
*args,
**kwargs,
)
# ----------------------------------------------------------------------
def __enter__(self) -> Tuple[KafkaConsumer, Optional[Cyton]]:
"""Start stream and create consumer."""
if hasattr(self, 'openbci') and self.auto_start:
self.openbci.start_stream()
self.consumer = KafkaConsumer(bootstrap_servers=self.bootstrap_servers,
value_deserializer=pickle.loads,
auto_offset_reset='latest',
)
self.consumer.subscribe(self.topics)
if hasattr(self, 'openbci'):
return self.consumer, self.openbci
else:
return self.consumer
# ----------------------------------------------------------------------
def __exit__(self, exc_type: str, exc_val: str, exc_tb: str) -> None:
"""Stop stream and close consumer."""
if hasattr(self, 'openbci'):
self.openbci.stop_stream()
self.consumer.close()
if exc_type:
logging.warning(exc_type)
if exc_val:
logging.warning(exc_val)
if exc_tb:
logging.warning(exc_tb)
| true |
b45e663b1e0c93b19733251e65e185e8e041f2eb | Python | EDEYUAN/notes_pythonForInformatics | /Q_A_For_Chapter3.py | UTF-8 | 1,705 | 4.03125 | 4 | [] | no_license | # _*_ coding:utf-8 _*_
# __Author:edeyuan
# Version: 1.0
# Time:2018/03/17
#################### Chapter 3 ###########################
# Q3.1 重写薪水计算公式,如果员工工作时间超过40小时,按平常薪水的1.5倍支付
print 'Q3.1'
hours = raw_input('Enter Hours:')
rate = raw_input('Enter Rate:')
if hours > 40:
pay = (float(hours) - 40) * 1.5 * float(rate) + \
40 * float(rate)
else:
pay = float(hours) * float(rate)
print 'Pay:%s' % (pay)
# Q3.2 运用try和except重写支付程序,让程序可以正常处理非数字输入的情况,如果是非数字输入,打印消息并退出程序
print 'Q3.2'
errorFlag = 0
hours = raw_input('Enter Hours:')
rate = raw_input('Enter Rate:')
try:
workTime = float(hours)
payRate = float(rate)
except:
print 'Error: please enter numeric input'
errorFlag = 1
if not errorFlag:
if workTime > 40:
pay = (float(workTime) - 40) * 1.5 * float(payRate) + \
40 * float(payRate)
else:
pay = float(workTime) * float(payRate)
print 'Pay:%s' % (pay)
# Q3.3编写一个程序,提示分数在0.0和1.0之间。如果分数超出这个范围则打印出错误。如果分数在0.0和1.0之间,判读正确的Grade
print 'Q3.3'
errorFlag = 0
try:
score = float(raw_input('Enter score lie in range [0.01 1]:'))
except:
print('Bad score\n Error: please enter numeric input\n')
errorFlag = 1
if not errorFlag:
if score > 1:
print('Beyond upper limitation')
elif score >= 0.9:
print 'A'
elif score >= 0.8:
print 'B'
elif score >= 0.7:
print 'C'
elif score >= 0.6:
print 'D'
else:
print 'F' | true |
97c3044624bd570ea9b2fbab00acd76c8b01f926 | Python | danoliveiradev/PythonExercicios | /ex106.py | UTF-8 | 1,256 | 3.8125 | 4 | [
"MIT"
] | permissive | from time import sleep
c = ('\033[m', # 0 - sem cores
'\033[0;97;41m', # 1 - vermelho
'\033[0;97;42m', # 2 - verde
'\033[0;97;43m', # 3 - amarelo
'\033[0;97;44m', # 4 - azul
'\033[0;97;45m', # 5 - roxo
'\033[1;30;107m' # 6 - branco
)
def titulo(txt, cor=0):
"""
-> Função que personaliza titulos e texto
:param txt: recebe o texto ou titulo a ser personalizado
:param cor: recebe a cor desejada através da tupla c
:return: sem retorno
"""
tam = len(txt) + 2
print(f'{c[cor]}', end='')
print(f'-' * tam)
print(f'{txt:^{tam}}')
print('-' * tam)
print(f'{c[0]}', end='')
sleep(1)
def pyHelp(com):
"""
Função de sistema de ajuda do Python personalizado
:param com: recebe o a função ou biblioteca a ser consultada
:return: sem retorno
"""
titulo(f'ACESSANDO O MANUAL DO COMANDO \'{com}\'', 2)
print(f'{c[6]}', end='')
help(com)
print(f'{c[0]}', end='')
sleep(1)
# Programa Principal
while True:
titulo('SISTEMA DE AJUDA PyHELP', 4)
ajuda = input('Função ou Biblioteca: ').lower().strip()
if ajuda in 'fim':
titulo('ATÉ LOGO!', 1)
break
else:
pyHelp(ajuda)
| true |
7283cfc975183c68c7c1de870f224230b7f26806 | Python | kevin-fang/leetcode | /1414 Find the Minimum Number of Fibonacci Numbers Whose Sum Is K.py | UTF-8 | 274 | 2.875 | 3 | [
"MIT"
] | permissive | class Solution:
def findMinFibonacciNumbers(self, k: int) -> int:
if k <= 1:
return k
a,b = 1,1
while b <= k:
b = a+b
a = b-a
return 1 + self.findMinFibonacciNumbers(k-a)
| true |
aa3cbc65f5c4f4c7df26bcae4e256abc166afed7 | Python | LowWeiLin/asteroids | /asteroids_game.py | UTF-8 | 6,560 | 2.984375 | 3 | [] | no_license | """
Asteroids game
"""
import numpy as np
from scipy.spatial import distance
class AsteroidsGame:
"""
Asteroids game
"""
def __init__(self):
# Configs
self.borders = np.array((800, 800)) # Screen wraps around at borders
self.asteroids_speed = 5
self.asteroids_max_radius = 80
self.asteroids_min_radius = 20
self.asteroids_split_radius_ratio = 0.5
self.bullet_radius = 5
self.bullet_speed = 10
self.bullet_lifespan = 50
self.player_radius = 10
self.player_acceleration = 0.5
self.player_rotation_speed = 15
self.player_max_speed = 7
self.player_bullet_cooldown = 20
# State
self.steps = 0
self.object_position = []
self.object_velocity = []
self.object_radius = []
self.object_rotation = []
self.object_type = []
self.object_steps = []
self.player_alive = []
self.player_cooldown = []
# Initialize
self.add_player()
for _ in range(4):
self.add_asteroid()
def move_all(self):
"""
Moves all entities
"""
self.object_position = (
(np.array(self.object_position) + np.array(self.object_velocity))
% self.borders
).tolist()
def add_player(self, position=None):
"""
Add a player
"""
self.object_position.append(
position if position else np.random.rand(2) * self.borders
)
self.object_velocity.append(np.zeros(2))
self.object_radius.append(self.player_radius)
self.object_rotation.append(0 * np.random.rand() * 360)
self.object_type.append("player")
self.object_steps.append(0)
self.player_alive.append(1)
self.player_cooldown.append(0)
def add_asteroid(self, radius=None, position=None, velocity=None):
"""
Adds an asteroid
"""
self.object_position.append(
position if position else np.random.rand(2) * self.borders
)
self.object_velocity.append(
velocity if velocity else np.random.rand(2) * self.asteroids_speed
)
self.object_radius.append(radius if radius else self.asteroids_max_radius)
self.object_rotation.append(0)
self.object_type.append("asteroid")
self.object_steps.append(0)
def add_bullet(self, position, velocity):
"""
Adds a bullet
"""
self.object_position.append(position)
self.object_velocity.append(velocity)
self.object_radius.append(self.bullet_radius)
self.object_rotation.append(0)
self.object_type.append("bullet")
self.object_steps.append(0)
def remove_bullets(self):
"""
Removes bullets that exceeds their lifespan
"""
bullet_indexes_to_remove = [
i
for i, t in enumerate(self.object_type)
if t == "bullet" and self.object_steps[i] > self.bullet_lifespan
]
self.remove_objects(bullet_indexes_to_remove)
def remove_objects(self, indexes):
"""
Removes objects by indexes
"""
fields = [
"object_position",
"object_velocity",
"object_radius",
"object_rotation",
"object_type",
"object_steps",
]
for field in fields:
setattr(
self,
field,
[x for i, x in enumerate(getattr(self, field)) if i not in indexes],
)
def apply_actions(self, actions):
"""
Applies actions
"""
player_object_indexes = [
i for i, t in enumerate(self.object_type) if t == "player"
]
for player_index, player_actions in enumerate(actions):
if not self.player_alive[player_index]:
continue
idx = player_object_indexes[player_index]
for action, value in player_actions.items():
if action == "rotate_left" and value:
self.object_rotation[idx] += self.player_rotation_speed
self.object_rotation[idx] %= 360
if action == "rotate_right" and value:
self.object_rotation[idx] -= self.player_rotation_speed
self.object_rotation[idx] %= 360
if action == "accelerate_forward" and value:
rot = np.radians(self.object_rotation[idx])
self.object_velocity[idx] += self.player_acceleration * np.array(
[np.sin(rot), np.cos(rot)]
)
speed = np.linalg.norm(self.object_velocity[idx])
if speed > self.player_max_speed:
self.object_velocity[idx] = (
self.object_velocity[idx] / speed * self.player_max_speed
)
if action == "shoot" and value:
if self.player_cooldown[player_index] == 0:
rot = np.radians(self.object_rotation[idx])
self.add_bullet(
self.object_position[idx],
self.bullet_speed * np.array([np.sin(rot), np.cos(rot)]),
)
self.player_cooldown[player_index] = self.player_bullet_cooldown
def step(self, actions):
"""
Perform a game step
"""
# Check alive
if not True:
return
# Step
self.steps += 1
self.object_steps = (np.array(self.object_steps) + 1).tolist()
self.player_cooldown = (np.array(self.player_cooldown) - 1).clip(0).tolist()
# Apply actions
self.apply_actions(actions)
# Move objects
self.move_all()
self.remove_bullets()
# Check collisions
self.collide()
def collide(self):
"""
Checks and apply effects of collisions
"""
dist = distance.cdist(self.object_position, self.object_position, "euclidean")
collision = ((dist - self.object_radius) <= 0) * 1
np.fill_diagonal(collision, 0)
collision = np.sum(collision, axis=1)
print(dist)
print(collision)
return collision
if __name__ == "__main__":
print("hello")
GAME = AsteroidsGame()
for _ in range(1):
GAME.step({})
| true |
99cfdd46a7c07423dbed3c9989e88a4f64d23523 | Python | rebeccabilbro/rebeccabilbro.github.io | /_drafts/mushroom_tutorial_reboot.py | UTF-8 | 9,734 | 3.9375 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env python
# coding: utf-8
# # Model Selection Tutorial with Yellowbrick
#
# In this tutorial, we are going to look at scores for a variety of [scikit-learn](http://scikit-learn.org) models and compare them using visual diagnostic tools from [Yellowbrick](http://www.scikit-yb.org) in order to select the best model for our data.
#
#
# ## The Model Selection Triple
#
# Discussions of machine learning are frequently characterized by a singular focus on model selection. Be it logistic regression, random forests, Bayesian methods, or artificial neural networks, machine learning practitioners are often quick to express their preference. The reason for this is mostly historical. Though modern third-party machine learning libraries have made the deployment of multiple models appear nearly trivial, traditionally the application and tuning of even one of these algorithms required many years of study. As a result, machine learning practitioners tended to have strong preferences for particular (and likely more familiar) models over others.
#
# However, model selection is a bit more nuanced than simply picking the "right" or "wrong" algorithm. In practice, the workflow includes:
#
# 1. selecting and/or engineering the smallest and most predictive feature set
# 2. choosing a set of algorithms from a model family, and
# 3. tuning the algorithm hyperparameters to optimize performance.
#
# The **model selection triple** was first described in a 2015 [SIGMOD](http://cseweb.ucsd.edu/~arunkk/vision/SIGMODRecord15.pdf) paper by Kumar et al. In their paper, which concerns the development of next-generation database systems built to anticipate predictive modeling, the authors cogently express that such systems are badly needed due to the highly experimental nature of machine learning in practice. "Model selection," they explain, "is iterative and exploratory because the space of [model selection triples] is usually infinite, and it is generally impossible for analysts to know a priori which [combination] will yield satisfactory accuracy and/or insights."
#
# Recently, much of this workflow has been automated through grid search methods, standardized APIs, and GUI-based applications. In practice, however, human intuition and guidance can more effectively hone in on quality models than exhaustive search. By visualizing the model selection process, data scientists can steer towards final, explainable models and avoid pitfalls and traps.
#
# The Yellowbrick library is a diagnostic visualization platform for machine learning that allows data scientists to steer the model selection process. Yellowbrick extends the scikit-learn API with a new core object: the Visualizer. Visualizers allow visual models to be fit and transformed as part of the scikit-learn `Pipeline` process, providing visual diagnostics throughout the transformation of high dimensional data.
#
#
# ## About the Data
#
# This tutorial uses the mushrooms data from the Yellowbrick :doc:`api/datasets` module. Our objective is to predict if a mushroom is poisonous or edible based on its characteristics.
#
# _NOTE: The YB version of the mushrooms data differs from the mushroom dataset from the [UCI Machine Learning Repository](http://archive.ics.uci.edu/ml/). The Yellowbrick version has been deliberately modified to make modeling a bit more of a challenge._
#
# The data include descriptions of hypothetical samples corresponding to 23 species of gilled mushrooms in the Agaricus and Lepiota Family. Each species was identified as definitely edible, definitely poisonous, or of unknown edibility and not recommended (this latter class was combined with the poisonous one).
#
# Our file, "agaricus-lepiota.txt," contains information for 3 nominally valued attributes and a target value from 8124 instances of mushrooms (4208 edible, 3916 poisonous).
#
# Let's load the data:
# In[1]:
from yellowbrick.datasets import load_mushroom
X, y = load_mushroom()
print(X[:5]) # inspect the first five rows
# ## Feature Extraction
#
# Our data, including the target, is categorical. We will need to change these values to numeric ones for machine learning. In order to extract this from the dataset, we'll have to use scikit-learn transformers to transform our input dataset into something that can be fit to a model. Luckily, scikit-learn does provide transformers for converting categorical labels into numeric integers: [`sklearn.preprocessing.LabelEncoder`](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.LabelEncoder.html) and [`sklearn.preprocessing.OneHotEncoder`](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OneHotEncoder.html).
#
# We'll use a combination of scikit-learn's `Pipeline` object ([here's great post on using pipelines](http://zacstewart.com/2014/08/05/pipelines-of-featureunions-of-pipelines.html) by [Zac Stewart](https://twitter.com/zacstewart)), `OneHotEncoder`, and `LabelEncoder`:
#
# ```python
# from sklearn.pipeline import Pipeline
# from sklearn.preprocessing import OneHotEncoder, LabelEncoder
#
#
# y = LabelEncoder().fit_transform(y) # Label-encode targets before modeling
# model = Pipeline([
# ('one_hot_encoder', OneHotEncoder()), # One-hot encode columns before modeling
# ('estimator', estimator)
# ])
# ```
# ## Modeling and Evaluation
#
# ### Common metrics for evaluating classifiers
#
# **Precision** is the number of correct positive results divided by the number of all positive results (e.g. _How many of the mushrooms we predicted would be edible actually were?_).
#
# **Recall** is the number of correct positive results divided by the number of positive results that should have been returned (e.g. _How many of the mushrooms that were poisonous did we accurately predict were poisonous?_).
#
# The **F1 score** is a measure of a test's accuracy. It considers both the precision and the recall of the test to compute the score. The F1 score can be interpreted as a weighted average of the precision and recall, where an F1 score reaches its best value at 1 and worst at 0.
#
# precision = true positives / (true positives + false positives)
#
# recall = true positives / (false negatives + true positives)
#
# F1 score = 2 * ((precision * recall) / (precision + recall))
#
#
# Now we're ready to make some predictions!
#
# Let's build a way to evaluate multiple estimators — first using traditional numeric scores (which we'll later compare to some visual diagnostics from the Yellowbrick library).
# In[2]:
from sklearn.metrics import f1_score
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
def score_model(X, y, estimator, **kwargs):
"""
Test various estimators.
"""
y = LabelEncoder().fit_transform(y)
model = Pipeline([
('one_hot_encoder', OneHotEncoder()),
('estimator', estimator)
])
# Instantiate the classification model and visualizer
model.fit(X, y, **kwargs)
expected = y
predicted = model.predict(X)
# Compute and return F1 (harmonic mean of precision and recall)
print("{}: {}".format(estimator.__class__.__name__, f1_score(expected, predicted)))
# In[3]:
# Try them all!
from sklearn.svm import LinearSVC, NuSVC, SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegressionCV, LogisticRegression, SGDClassifier
from sklearn.ensemble import BaggingClassifier, ExtraTreesClassifier, RandomForestClassifier
models = [
SVC(gamma='auto'), NuSVC(gamma='auto'), LinearSVC(),
SGDClassifier(max_iter=100, tol=1e-3), KNeighborsClassifier(),
LogisticRegression(solver='lbfgs'), LogisticRegressionCV(cv=3),
BaggingClassifier(), ExtraTreesClassifier(n_estimators=100),
RandomForestClassifier(n_estimators=100)
]
for model in models:
score_model(X, y, model)
# ### Preliminary Model Evaluation
#
# Based on the results from the F1 scores above, which model is performing the best?
# ## Visual Model Evaluation
#
# Now let's refactor our model evaluation function to use Yellowbrick's `ClassificationReport` class, a model visualizer that displays the precision, recall, and F1 scores. This visual model analysis tool integrates numerical scores as well color-coded heatmap in order to support easy interpretation and detection, particularly the nuances of Type I and Type II error, which are very relevant (lifesaving, even) to our use case!
#
#
# **Type I error** (or a **"false positive"**) is detecting an effect that is not present (e.g. determining a mushroom is poisonous when it is in fact edible).
#
# **Type II error** (or a **"false negative"**) is failing to detect an effect that is present (e.g. believing a mushroom is edible when it is in fact poisonous).
# In[7]:
from sklearn.pipeline import Pipeline
from yellowbrick.classifier import ClassificationReport
def visualize_model(X, y, estimator):
"""
Test various estimators.
"""
y = LabelEncoder().fit_transform(y)
model = Pipeline([
('one_hot_encoder', OneHotEncoder()),
('estimator', estimator)
])
# Instantiate the classification model and visualizer
visualizer = ClassificationReport(
model, classes=['edible', 'poisonous'],
cmap="YlGn", size=(600, 360)
)
visualizer.fit(X, y)
visualizer.score(X, y)
visualizer.poof()
for model in models:
visualize_model(X, y, model)
# ## Reflection
#
# 1. Which model seems best now? Why?
# 2. Which is most likely to save your life?
# 3. How is the visual model evaluation experience different from numeric model evaluation?
| true |
11f9c24749e5f36688f7862492f647b3fa39c7cc | Python | jgirardet/MyCartable | /tests/python/types/test_annee.py | UTF-8 | 594 | 2.640625 | 3 | [] | no_license | from mycartable.types.annee import Annee
def test_init(qtbot, bridge):
a = Annee.new(id=2345, niveau="aah", parent=bridge)
assert a.id == 2345
assert a.niveau == "aah"
with qtbot.waitSignal(a.niveauChanged):
a.niveau = "EEE"
assert a.niveau == "EEE"
def test_getMenuesAnnees(fk):
for i in range(4):
fk.f_annee(2016 - (i * i))
a = Annee()
assert a.getMenuAnnees() == [
{"id": 2007, "niveau": "cm2007"},
{"id": 2012, "niveau": "cm2012"},
{"id": 2015, "niveau": "cm2015"},
{"id": 2016, "niveau": "cm2016"},
]
| true |
476321650669a715d51c5904748a39ba87632300 | Python | victormartinez/ecommerceapi | /ecommerce_api/core/cart/exceptions.py | UTF-8 | 287 | 2.6875 | 3 | [
"MIT"
] | permissive | from typing import Iterable, Optional
class ProductsNotFound(Exception):
def __init__(self, product_ids: Optional[Iterable[int]] = None):
self.product_ids = product_ids or []
self.message = "One or more products are invalid."
super().__init__(self.message)
| true |
4d57056bb6bacd4a8f26dadacf3b736ff7ad6b1e | Python | Mr-Phoebe/ProgramLanguage | /Python Example/python 100 examples/039.py | GB18030 | 758 | 4.21875 | 4 | [] | no_license | # -*- coding: UTF-8 -*-
'''
39
ĿһѾź顣һҪԭĹɽС
1. жϴǷһȻٿDzм
Ԫ֮κһλá
2.Դ룺
'''
if __name__ == '__main__':
a = [1,4,6,9,13,16,19,28,40,100,0]
print 'original list is:'
for i in range(len(a)):
print a[i]
number = int(raw_input("insert a new number:\n"))
if number > a[len(a) - 1]:
a.append(number)
else:
for i in range(len(a)):
if a[i] > number:
a.insert(i,number)
print a
| true |
d15f173bb19dc66341ba263e65117d594f425ebd | Python | helgadenes/aperdrift | /modeling/scan2fits.py | UTF-8 | 9,655 | 2.5625 | 3 | [] | no_license | # scan2fits: Create XX & YY beam models from drift scans
# K.M.Hess 19/02/2019 (hess@astro.rug.nl)
__author__ = "Kelley M. Hess"
__date__ = "$04-jun-2019 16:00:00$"
__version__ = "0.2"
from glob import glob
import os
from argparse import ArgumentParser, RawTextHelpFormatter
from astropy.coordinates import SkyCoord, EarthLocation, FK5
from astropy.io import fits
from astropy.time import Time
from astropy.table import Table
import astropy.units as u
from astropy.wcs import WCS
import matplotlib.colors as colors
import matplotlib.pyplot as plt
import numpy as np
from scipy import interpolate
from modules.telescope_params import westerbork
def taskid2equinox(taskid):
# Automatically take the date of the observaitons from the taskid to calculate apparent coordinates of calibrator
year = 2000 + int(str(taskid)[0:2])
month = str(taskid)[2:4]
day = str(taskid)[4:6]
equinox = Time('{}-{}-{}'.format(year, month, day))
return equinox.decimalyear
def make_gifs(root):
os.system('convert -delay 50 {}*db0_reconstructed.png {}all_beams0.gif'.format(root, root))
os.system('convert -delay 50 {}*_difference.png {}diff_xx-yy.gif'.format(root, root))
return
def parse_args():
parser = ArgumentParser(
description="Make a model of all 40 beams from drift scans.",
formatter_class=RawTextHelpFormatter)
parser.add_argument('-c', '--calibname', default='Cyg A',
help="Specify the calibrator. (default: '%(default)s').")
parser.add_argument('-t', "--taskid", default="190531207",
help="The first taskid in the set. (default: '%(default)s').")
parser.add_argument('-o', '--root', default='/Users/hess/apertif/scheduling/aperdrift/modeling/CygA_190531/',
help="Specify the root directory. \n(default: '%(default)s').")
parser.add_argument('-g', '--make_gifs',
help="(Re)Make gifs of figures? (default is False).",
action='store_true')
# parser.add_argument('-v', "--verbose",
# help="If option is included, print time estimate for several drift combos.",
# action='store_true')
args = parser.parse_args()
return args
def main():
args = parse_args()
np.warnings.filterwarnings('ignore')
# Find calibrator position
calib = SkyCoord.from_name(args.calibname)
# Put all the output from drift_scan_auto_corr.ipynb in a unique folder per source, per set of drift scans.
datafiles = glob(args.root + '*exported_data.csv')
datafiles.sort()
posfiles = glob(args.root + '*hadec.csv')
posfiles.sort()
# Put calibrator into apparent coordinates (because that is what the telescope observes it in.)
test = calib.transform_to('fk5')
calibnow = test.transform_to(FK5(equinox='J{}'.format(taskid2equinox(args.taskid))))
corr_im = []
diff_im = []
for beam in range(40):
print(beam, end=' ')
# Create the vectors which contain all data from all scans for a given beam which has been specified above.
x, y, z_xx, z_yy = [], [], [], []
for file, pos in zip(datafiles, posfiles):
data = Table.read(file, format='csv')
hadec = Table.read(pos, format='csv')
hadec_start = SkyCoord(ra=hadec['ha'], dec=hadec['dec'], unit=(u.rad, u.rad)) # From ALTA (same as above)
time_mjd = Time(data['time'] / (3600 * 24), format='mjd')
lst = time_mjd.sidereal_time('apparent', westerbork().lon)
HAcal = lst - calibnow.ra # in sky coords
dHAsky = HAcal - hadec_start[beam].ra + (24 * u.hourangle) # in sky coords in hours
dHAsky.wrap_at('180d', inplace=True)
dHAphys = dHAsky * np.cos(hadec_start[beam].dec.deg * u.deg) # physical offset in hours
x = np.append(x, dHAphys.deg)
y = np.append(y, np.full(len(dHAphys.deg), hadec_start[beam].dec.deg))
z_xx = np.append(z_xx, data['auto_corr_beam_' + str(beam) + '_xx'] - np.median(
data['auto_corr_beam_' + str(beam) + '_xx']))
z_yy = np.append(z_yy, data['auto_corr_beam_' + str(beam) + '_yy'] - np.median(
data['auto_corr_beam_' + str(beam) + '_yy']))
# # Add a fake drift that goes to zero power at 1 deg above last scan
# x=np.append(x,dHAphys.deg)
# y=np.append(y,np.full(len(dHAphys.deg),max(y)+1.0))
# z_xx=np.append(z_xx,np.full(len(dHAphys.deg),1))
# z_yy=np.append(z_yy,np.full(len(dHAphys.deg),1))
# # Add a fake drift that goes to zero power at 1 deg below first scan
# x=np.append(x,dHAphys.deg)
# y=np.append(y,np.full(len(dHAphys.deg),min(y)-1.0))
# z_xx=np.append(z_xx,np.full(len(dHAphys.deg),1))
# z_yy=np.append(z_yy,np.full(len(dHAphys.deg),1))
# Create the 2D grid and do a cubic interpolation
cell_size = 105. / 3600.
tx = np.arange(min(x), max(x), cell_size)
ty = np.arange(min(y), max(y), cell_size)
XI, YI = np.meshgrid(tx, ty)
gridcubx = interpolate.griddata((x, y), z_xx, (XI, YI), method='cubic') # median already subtracted
gridcuby = interpolate.griddata((x, y), z_yy, (XI, YI), method='cubic')
# Find the reference pixel at the apparent coordinates of the calibrator
ref_pixy = (calibnow.dec.deg - min(y)) / cell_size
ref_pixx = (-min(x)) / cell_size
# Find the peak of the primary beam to normalize
norm_xx = np.max(gridcubx[int(ref_pixy)-3:int(ref_pixy)+4, int(ref_pixx)-3:int(ref_pixx)+4])
norm_yy = np.max(gridcuby[int(ref_pixy) - 3:int(ref_pixy) + 4, int(ref_pixx) - 3:int(ref_pixx) + 4])
if beam == 0:
norm0_xx = np.max(gridcubx[int(ref_pixy) - 3:int(ref_pixy) + 4, int(ref_pixx) - 3:int(ref_pixx) + 4])
norm0_yy = np.max(gridcuby[int(ref_pixy) - 3:int(ref_pixy) + 4, int(ref_pixx) - 3:int(ref_pixx) + 4])
# Convert to decibels
db_xx = np.log10(gridcubx/norm_xx) * 10.
db_yy = np.log10(gridcuby/norm_yy) * 10.
# db0_xx = np.log10(gridcubx/norm0_xx) * 10.
# db0_yy = np.log10(gridcuby/norm0_yy) * 10.
wcs = WCS(naxis=2)
wcs.wcs.cdelt = np.array([-cell_size, cell_size])
wcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
wcs.wcs.crval = [calib.ra.to_value(u.deg), calib.dec.to_value(u.deg)]
wcs.wcs.crpix = [ref_pixx, ref_pixy]
header = wcs.to_header()
hdux_db = fits.PrimaryHDU(db_xx, header=header)
hduy_db = fits.PrimaryHDU(db_yy, header=header)
hdux = fits.PrimaryHDU(gridcubx/norm_xx, header=header)
hduy = fits.PrimaryHDU(gridcuby/norm_yy, header=header)
# hdulx = fits.HDUList([hdux])
# hduly = fits.HDUList([hduy])
# Save the FITS files
hdux_db.writeto(args.root + '{}_{}_{:02}xx_db.fits'.format(args.calibname.replace(" ", ""), args.taskid[:-3],
beam), overwrite=True)
hduy_db.writeto(args.root + '{}_{}_{:02}yy_db.fits'.format(args.calibname.replace(" ", ""), args.taskid[:-3],
beam), overwrite=True)
hdux.writeto(args.root + '{}_{}_{:02}xx.fits'.format(args.calibname.replace(" ", ""), args.taskid[:-3],
beam), overwrite=True)
hduy.writeto(args.root + '{}_{}_{:02}yy.fits'.format(args.calibname.replace(" ", ""), args.taskid[:-3],
beam), overwrite=True)
fig1 = plt.figure(figsize=(6, 9))
ax1 = fig1.add_subplot(2, 1, 1, projection=wcs.celestial)
ax1.grid(lw=1, color='white')
ax1.set_title("Beam {:02} - XX Correlation - Cubic".format(beam))
ax1.set_ylabel("Declination [J2000]")
ax1.set_xlabel("Right Ascension [J2000]")
im1 = ax1.imshow(gridcubx/norm0_xx, vmax=0.10, vmin=-0.03, cmap='magma', animated=True)
ax2 = fig1.add_subplot(2, 1, 2, projection=wcs.celestial)
ax2.grid(lw=1, color='white')
ax2.set_title("Beam {:02} - YY Correlation - Cubic".format(beam))
ax2.set_ylabel("Declination [J2000]")
ax2.set_xlabel("Right Ascension [J2000]")
im2 = ax2.imshow(gridcuby/norm0_yy, vmax=0.10, vmin=-0.03, cmap='magma', animated=True)
corr_im.append([im1, im2])
plt.savefig(args.root + '{}_{}_{:02}db0_reconstructed.png'.format(args.calibname.replace(" ", ""),
args.taskid, beam))
plt.close('all')
# Plot the difference between XX and YY for every beam
diffcub = gridcubx/norm_xx - gridcuby/norm_yy
fig2 = plt.figure(figsize=(10, 9))
ax1 = fig2.add_subplot(1, 1, 1, projection=wcs.celestial)
ax1.grid(lw=1, color='white')
ax1.set_title("Beam {:02} - Difference (XX$-$YY)".format(beam))
ax1.set_ylabel("Declination [J2000]")
ax1.set_xlabel("Right Ascension [J2000]")
ax1.scatter(ref_pixx, ref_pixy, marker='x', color='black')
im3 = ax1.imshow(diffcub, vmin=-0.1, vmax=0.1)
plt.colorbar(im3)
diff_im.append([im3])
plt.savefig(args.root + '{}_{}_{:02}_difference.png'.format(args.calibname.replace(" ", ""),
args.taskid, beam))
plt.close('all')
if args.make_gifs:
make_gifs(args.root)
if __name__ == '__main__':
main()
| true |
9efe26791d8b1f635b1bef9e5542842e5d708a45 | Python | sanjay7884/hun1 | /even.py | UTF-8 | 213 | 2.984375 | 3 | [] | no_license | #sanjay
n1=int(input())
a=input().split()
for i in range(n1):
if(i%2==0):
if(int(a[i])%2==1):
print(a[i],end=' ')
else:
if(int(a[i])%2==0):
print(a[i],end=' ')
| true |
b53a929fe64f8162a6e849ef4409702619d3dc92 | Python | jamesbailz/github_assignment_1_repo | /agentframework.py | UTF-8 | 6,482 | 3.9375 | 4 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 11 16:50:46 2021
Module that initialises arbitrary agents (y, x), manipulates them and
calculates values from them.
Module classes:
Agent
Module functions:
__init__
str
move
eat
distance_between
share_with_neighbours
get_y
get_x
set_y
set_x
"""
#import statements
import random
#Variable set up
rando = random.random
#Set up Agent class
class Agent ():
#set up Agent class methods
#Initialise Agents
def __init__ (self, i, environment, agents, y, x, store):
'''
Initial Agent set up
Parameters
----------
i : int
Assigns each agent with a unique i.d.
environment : float
Environment with which the agents interact
agents : int
Full co-ordinates of the agents
y : int
y value of the agents co-ordinate
x : int
x value of the agents co-ordinate
store : int
Value of the amount stored within an agent
Returns
-------
None.
'''
self.i = i
self.environment = environment
self.agents = agents
self.store = store
self.nstore = 0
self.neighbours = []
self.shared_with = []
self.shared_amount = 0
self._y = y
self._x = x
#Agent description
def __str__ (self):
'''
Taking and conversion of numbers to strings,
for input into an agent description
Returns
-------
string
Agent: i.d.; y value; x value; store value
'''
return ("i = " + str(self.i) + ", y = " + str(self._y) \
+ ", x = " + str(self._x) + ", store = " + str(self.store))
#Move agents
def move (self):
'''
Moves the agents around the environment,
based on random number generation.
If random number < 0.5, agent moves positively in given direction.
If random number > 0.5, agent moves negatively in given direction.
Returns
-------
None.
'''
if rando () <0.5:
self._y = (self._y + 1) % 100
else:
self._y = (self._y - 1) % 100
if rando () <0.5:
self._x = (self._x + 1) % 100
else:
self._x = (self._x - 1) % 100
#Agents eat the environment
def eat (self):
'''
Determines if an agent will 'eat' the environment and,
add value to its store.
If agent > 10, environment will be eaten by a value of 10,
and this will be added to the agents store.
Returns
-------
None.
'''
if self.environment [self._y][self._x] > 10:
self.environment [self._y][self._x] -= 10
self.store += 10
'''
#Initial distance between points function dec.
def distance_between (agents_row_a, agents_row_b):
"""
Given two arbitrary agents, return the distance between them
agents_row_a : int
agents_row_b : int
return : float
>>> a = agentframework.Agent(0,environment,agents,random.randint(0,99),random.randint(0,99))
>>> a.x = 1
>>> a.y = 2
>>> b = agentframework.Agent(1,environment,agents,random.randint(0,99),random.randint(0,99))
>>> b.x = 4
>>> b.y = 6
>>> distance_between(a,b)
5.0
"""
return (((agents_row_a.y - agents_row_b.y)**2)
+ ((agents_row_a.x - agents_row_b.x)**2))**0.5
'''
#Final distance between agents
def distance_between (self, agents):
'''
Given two agents (self and agent), return the distance between them.
Parameters
----------
agents : int
Full co-ordinates of the agents
Returns
-------
float
Distance between self and agent co-ordinates
'''
return (((self._y - agents._y)**2) + ((self._x - agents._x)**2))**0.5
#Agents search for close neighbours and share resources
def share_with_neighbours (self, neighbourhood):
'''
Determines if an agent is within the neighbourhood of another agent.
If they are, calculates store amount agent 1 will share with agent 2.
Note, agents will not share if they have done so already.
Parameters
----------
neighbourhood : int
A value used to determine whether or not an agent is proximal
to another agent.
Returns
-------
None.
'''
#print (type(self.neighbours))
for agent in self.neighbours:
#Check not already shared with the agent
if (self.i not in agent.shared_with):
total = self.share_amount + agent.share_amount
average = total / 2
self.nstore = self.nstore + average
agent.nstore = agent.nstore + average
self.shared_with.append (agent.i)
# print ("i=" + str(self.i) + ", store: " + str(self.store) +\
# " shares with i = " + str(agent.i) + ", store: " +\
# str(agent.store) + ": avg = " + str(average))
#getter method
def get_y(self):
'''
Function to get an attribute value for y
Returns
-------
int
y value
'''
return self._y
def get_x(self):
'''
Function to get an attribute value for x
Returns
-------
int
x value
'''
return self._x
#setter method
def set_y(self, value):
'''
Function to set an attribute value for y
Parameters
----------
value : int
y value
Returns
-------
None.
'''
self._y = value
def set_x(self, value):
'''
Function to set an attribute value for x
Parameters
----------
value : int
x value
Returns
-------
None.
'''
self._x = value
#Property object creation
y = property (get_y, set_y)
x = property (get_x, set_x)
#Call doctest
if __name__ == "__main__":
import doctest
doctest.testmod ()
| true |
f9fd944c55a3787405ccd8b08a7dcaf975a0a4c5 | Python | iiAnderson/highways-exploration | /reader/csv_file.py | UTF-8 | 1,143 | 3.3125 | 3 | [] | no_license | import csv
class CSVFile():
def __init__(self, file_path):
self.csv_reader = csv.reader(open(file_path), delimiter=',')
self.headers = self.get_csv_headers(self.csv_reader)
self.filters = []
def get_csv_headers(self, csv_reader):
return next(csv_reader)
def add_filter(self, func):
self.filters.append(func)
def to_file(self, output_location):
output_rows = []
i = 0
for row in self.csv_reader:
for func in self.filters:
func_output = func(self.headers, row)
if func_output:
output_rows.append(func_output)
i += 1
if i % 100000 == 0:
print(f"Processed {i} Records")
self._to_file(output_location, output_rows)
def _to_file(self, output_location, rows):
with open(output_location, mode='w') as output_file:
writer = csv.writer(output_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow(self.headers)
for row in rows:
writer.writerow(row)
| true |
12f60086a92c0cbc33553d6463d22f8baf9c1b3d | Python | chrddav/CSC-121 | /CSC121_Lab04_Lab04P1.py | UTF-8 | 646 | 3.203125 | 3 | [] | no_license | FPG = float(input("Enter patient's fasting plasma glucose (FPG) level: "))
if FPG > 125:
print('This patient has diabetes')
elif FPG > 100:
print('This patient has pre-diabetes')
else:
print('This patient has healthy fpg level')
again= input("Enter another patients FPG level?[y/n]: ")
while again == 'y':
FPG = float(input("Enter patient's fasting plasma glucose (FPG) level: "))
if FPG > 125:
print('This patient has diabetes')
elif FPG > 100:
print('This patient has pre-diabetes')
else: print('This patient has healthy fpg level')
again = input("Calculate commission for another house?[y/n] ")
| true |
65f6b8b5fec9ece65a62b4f49ca83b5c909cc208 | Python | rrbiz662/neighborhood-map | /neighborhood-map-project/server.py | UTF-8 | 1,340 | 2.59375 | 3 | [] | no_license | #!usr/bin/env python2
import json
import requests
import os
from flask import Flask, request, make_response
from flask_cors import cross_origin
from urllib import quote
app = Flask(__name__)
# API constants.
API_KEY = "zboWotd5QomCFouN96e-YRf7deALxng825rC-GpXWbeoTGZmaOYtCy" \
"l6U9eMOEJd09KNTzo6H12cbxoQb_jetLKrD_NHDf1fqVfYmAlEgv" \
"G6TZdx2qvNPiVmLWvqWnYx"
API_HOST = "https://api.yelp.com"
SEARCH_PATH = "/v3/businesses/search"
SEARCH_LIMIT = 5
# About a 6 mile radius.
RADIUS = 10000
@app.route("/yelprequest/")
@cross_origin()
def yelp_request():
"""Forwards data request to Yelp and returns the data to the client."""
params = {
"term": "",
"location": request.args.get("location").replace(" ", "+"),
"limit": SEARCH_LIMIT,
"radius": RADIUS
}
# Add required header to request.
headers = {
'Authorization': 'Bearer %s' % API_KEY,
}
url = '{0}{1}'.format(API_HOST, quote(SEARCH_PATH.encode('utf8')))
# Send request to YELP.
results = requests.request('GET', url, headers=headers, params=params)
# Build response to return to the client.
response = make_response(json.dumps(results.text), 200)
response.headers["Content-Type"] = "application/json"
return response
if __name__ == '__main__':
# Running module as a program.
app.secret_key = os.urandom(24)
app.debug = True
app.run(host="0.0.0.0", port=5000)
| true |
a80f40d45c84665417a6e48d99d24f0ff721c6f4 | Python | amspector100/discrete-latent | /dlatent/encoders.py | UTF-8 | 2,490 | 2.8125 | 3 | [] | no_license | import torch
import torch.nn as nn
from .utils.weight_dropout import WeightDropout, dropout_dim
class LSTMEncoder(nn.Module):
"""
LSTM encoder.
:param d_embedding: dimension of WORD embeddings
:param d_hidden: dimension of hidden state
:param d_latent: dimension of LATENT embeddings
:param n_downsize: Number of times to downsize by
a factor of 2.
:param kernel_size: Kernel to use in downsizing step.
"""
def __init__(self, d_embedding, d_hidden, d_latent,
n_downsize=2, kernel_size=7,
weight_dropout=0.5, input_dropout=0.4,
inter_dropout=0.3, output_dropout=0.4):
super().__init__()
# Save parameters
self.n_downsize = n_downsize
self.weight_dropout = weight_dropout
self.input_dropout = input_dropout
self.inter_dropout = inter_dropout
self.output_dropout = output_dropout
# LSTM layers with weightdropout
dims = [d_embedding] + [d_hidden] * (n_downsize + 1)
lstm_layers = [nn.LSTM(d1, d2 // 2, bidirectional=True)
for d1, d2 in zip(dims[:-1], dims[1:])]
lstm_layers = [WeightDropout(l, 'weight_hh_l0', weight_dropout)
for l in lstm_layers]
self.lstm_layers = nn.ModuleList(lstm_layers)
# Downsizing convolutional layers
conv_layers = [nn.Conv1d(d_hidden, d_hidden, kernel_size, 2,
kernel_size // 2, groups=d_hidden)
for _ in range(n_downsize)]
self.conv_layers = nn.ModuleList(conv_layers)
# Final output
self.linear = nn.Linear(d_hidden, d_latent)
def forward(self, x):
"""
:param x: torch tensor of size seqlen by batchsize by d_embedding
returns: output, torch tensor of size seqlen/(n_downsize)**2 by
batchsize by d_latent
"""
x = dropout_dim(x, self.input_dropout, 0, self.training)
for lstm, conv in zip(self.lstm_layers, self.conv_layers):
x, _ = lstm(x)
x = dropout_dim(x, self.inter_dropout, 0, self.training)
x = conv(x.permute(1, 2, 0)).permute(2, 0, 1).relu()
x, _ = self.lstm_layers[-1](x)
x = dropout_dim(x, self.output_dropout, 0, self.training)
x = self.linear(x)
return x
| true |
7688f1015583bd425806d1261b9df181511c820e | Python | w00j00ng/WJ_PyProjects | /yong.py | UTF-8 | 777 | 2.9375 | 3 | [] | no_license | import os
def mkdir(projectName):
i = 0
while True:
try:
if i == 0:
os.makedirs("./" + projectName)
fdir = "./" + projectName
else:
os.makedirs("./" + projectName + " (" + str(i) + ")")
fdir = "./" + projectName + " (" + str(i) + ")"
break
except:
continue
def getText(fname):
try:
if fname[-4:] == ".txt":
pass
else:
fname += ".txt"
except:
fname += ".txt"
try:
inputFile = open("./" + fname, "r", encoding = "utf-8")
text = inputFile.read()
inputFile.close()
return text
except:
print("No such File")
return None
| true |
3bdfe91f83a4c690e9ea3f7f1914c09b789fe6df | Python | Donghyun-34/KUCIS | /project/금보원 과제/머신러닝(numpy로만 구현)/LogisticRegression.py | UTF-8 | 3,612 | 3.1875 | 3 | [] | no_license | import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
from sklearn.metrics import f1_score, recall_score
class LogisticRegression:
def __init__(self, learning_rate=0.01, threshold=0.01, max_iterations=100000, fit_intercept=True, verbose=False):
self._learning_rate = learning_rate # 학습 계수
self._max_iterations = max_iterations # 반복 횟수
self._threshold = threshold # 학습 중단 계수
self._fit_intercept = fit_intercept # 절편 사용 여부를 결정
self._verbose = verbose # 중간 진행사항 출력 여부
# theta(W) 계수들 return
def get_coeff(self):
return self._W
# 절편 추가
def add_intercept(self, x_data):
intercept = np.ones((x_data.shape[0], 1))
return np.concatenate((intercept, x_data), axis=1)
# 시그모이드 함수(로지스틱 함수)
def sigmoid(self, z):
return 1 / (1 + np.exp(-z))
def cost(self, h, y):
return (-y * np.log(h) - (1 - y) * np.log(1 - h)).mean()
def fit(self, x_data, y_data):
num_examples, num_features = np.shape(x_data)
if self._fit_intercept:
x_data = self.add_intercept(x_data)
# weights initialization
self._W = np.zeros(x_data.shape[1])
for i in range(self._max_iterations):
z = np.dot(x_data, self._W)
hypothesis = self.sigmoid(z)
# 실제값과 예측값의 차이
diff = hypothesis - y_data
# cost 함수
cost = self.cost(hypothesis, y_data)
gradient = np.dot(x_data.transpose(), diff) / num_examples
# gradient에 따라 theta 업데이트
self._W -= self._learning_rate * gradient
# 판정 임계값에 다다르면 학습 중단
if cost < self._threshold:
return False
# 100 iter 마다 cost 출력
if (self._verbose == True and i % 100 == 0):
print('cost :', cost)
print('diff :', diff)
def predict_prob(self, x_data):
if self._fit_intercept:
x_data = self.add_intercept(x_data)
return self.sigmoid(np.dot(x_data, self._W))
def predict(self, x_data):
# 0,1 에 대한 판정 임계값은 0.5 -> round 함수로 반올림
return self.predict_prob(x_data).round()
df = pd.read_csv('./creditcard.csv')
feature_names = ['V1', 'V3', 'V4', 'V7', 'V9', 'V10', 'V11', 'V12', 'V14', 'V16', 'V17', 'V18']
data = df[feature_names]
target = df['Class']
X_train, X_test, y_train, y_test = train_test_split(data, target, train_size=0.70, test_size=0.30, random_state=1)
X = np.array(X_train)
y = np.array(y_train)
model = LogisticRegression(learning_rate=0.1, verbose=True)
model.fit(X, y)
predict_data = model.predict(X_test)
class_names = ['not_fraud', 'fraud']
matrix = confusion_matrix(y_test, predict_data)
# Create pandas dataframe
dataframe = pd.DataFrame(matrix, index=class_names, columns=class_names)
# Create heatmap
sns.heatmap(dataframe, annot=True, cbar=None, cmap="Blues", fmt='g')
plt.title("Confusion Matrix"), plt.tight_layout()
plt.ylabel("True Class"), plt.xlabel("Predicted Class")
plt.show()
mse = mean_squared_error(y_test, predict_data)
rmse = np.sqrt(mse)
print(rmse) | true |
32d967735b3c9304ede57561d1223b2afa5f4f93 | Python | IshtiaqueNafis/onlineShop | /Class/Aceesories.py | UTF-8 | 228 | 2.640625 | 3 | [] | no_license | from Class.Product import Product
class Accessories(Product):
def __init__(self, id, name, price, description, acessory_for):
super().__init__(id, name, price, description)
self.acessory_for = acessory_for
| true |
efe663ae9e4518f350c2a7391e7629b4ffb95907 | Python | alexanderdrent/Uncertainty-Analysis-Windmaster | /model/prototype/scratchpad.py | UTF-8 | 574 | 2.5625 | 3 | [
"MIT"
] | permissive | '''
Created on 1 Apr 2019
@author: jhkwakkel
'''
import pandas as pd
investments = pd.read_csv('./data/investIDwindmaster.csv')
investment_set = set(investments.iloc[:,0])
all_assets = pd.read_csv("./data/conversionAssets.csv",
header=17, usecols=["Conversion asset ID [string] – Must be unique",
"assetTypes",
"Asset description – human readable form – [String]"])
descr = all_assets.iloc[:,1]
print(set(descr).intersection(investment_set)) | true |
7e51c439fe171036e069de709f99ae2d7f61acb8 | Python | TaurusYin/Translator | /Filehandler.py | UTF-8 | 4,171 | 2.578125 | 3 | [] | no_license | import glob
import re
from time import sleep, ctime
import threading
import docx
from docx import Document
import BaiduTranslator
import os
# coding=utf8
def remove_doc_space(para):
para_text = re.sub(u'[\u3000,\xa0]', u'', para.text) # remove em space
para_text = para_text.replace(' ', '') # remove en space
para_text = para_text.replace("\n", '') # remove empty rows
return para_text
def remove_txt_space(para):
if isinstance(para, str):
para = unicode(para, "utf-8")
para_text = re.sub(u'[\u3000,\xa0]', u'', para) # remove em space
para_text = para_text.replace(' ', '') # remove en space
para_text = para_text.replace("\n", '') # remove empty rows
return para_text
def thread_start(threads):
for t in threads:
t.setDaemon(True)
t.start()
sleep(2)
# This case origin from Baidu Translator interface has the access controller
# which does not allow the users to query the translator more frequently.
# The sleep() is used to avoid the access limit error
for t in threads:
t.join()
def task(file_obj):
res, result_list, translated_res = '', [], ''
(filepath, tempfilename) = os.path.split(file_obj)
filename, extension = os.path.splitext(tempfilename)
wpath = filepath + '/' + filename + '_result' + extension
if re.match(r'.+(docx)$', file_obj, re.M) and not re.match(r'.+\~\$.+', file_obj, re.M): # docx format
# exclude the case C:/My Received Files\~$test.docx
print("task of " + file_obj + " start")
doc_obj = docx.Document(file_obj)
result_list = map(remove_doc_space, doc_obj.paragraphs)
for para in result_list:
if (para is not ''): # remove empty rows
res = res + para
translated_res = BaiduTranslator.translate(res, fromLang='zh', toLang='en')
wstr = translated_res['trans_result'][0]['dst'].encode("utf-8")
document = Document()
document.add_paragraph(wstr)
document.save(wpath)
if re.match(r'.+(txt)$', file_obj, re.M) and not re.match(r'.+\~\$.+', file_obj, re.M): # txt format
print("task of " + file_obj + " start")
f = open(file_obj, "r")
result_list = f.readlines()
result_list = map(remove_txt_space, result_list)
for para in result_list:
if (para is not ''): # remove empty rows
res = res + para
translated_res = BaiduTranslator.translate(res, fromLang='zh', toLang='en')
wstr = translated_res['trans_result'][0]['dst'].encode("utf-8")
wfile_obj = open(wpath, 'w')
wfile_obj.write(wstr)
f.close()
wfile_obj.close()
def translate_from_path(path):
re_str = "/*"
glob_path = path + re_str
files = glob.glob(pathname=glob_path)
threads = []
threads_num = 0
for file in files:
threads_num = threads_num + 1
t = threading.Thread(target=task, args=(file,))
threads.append(t)
if threads_num == 10: # 10 tasks are putting into one bulk for multi-processing
threads_num = 0
thread_start(threads)
threads = []
if file is files[-1]:
thread_start(threads)
# summarize tasks
files_after = glob.glob(pathname=glob_path)
wfile_obj = open(path + '/summary.txt', 'w+')
restful_output = "Total tasks is " + str(len(files)) + " \n"
for file in files:
(filepath, tempfilename) = os.path.split(file)
filename, extension = os.path.splitext(tempfilename)
wpath = filepath + '\\' + filename + '_result' + extension;
if wpath in files_after:
wfile_obj.writelines(filename + extension + ": success \n")
restful_output = "{0}{1}{2}: success\n".format(restful_output, filename, extension)
else:
wfile_obj.writelines(filename + extension + ": fail \n")
restful_output = "{0}{1}{2}: fail\n".format(restful_output, filename, extension)
wfile_obj.close()
return restful_output
# translate_path('C:/Users/eqsvimp/PycharmProjects/Translator/testfiles')
# print
| true |
3ac32241e4b9806dd0c07ececc178bf67ddfa9e5 | Python | joescottdave/salary-me | /code/rent-scraper/csv-clean.py | UTF-8 | 1,085 | 2.859375 | 3 | [] | no_license | import csv
infile = open('rents8.csv', 'r')
outfile = open('rents-2.csv', 'w')
f = csv.DictReader(infile, delimiter=',')
g = csv.writer(outfile, delimiter=',')
fhead = f.fieldnames
ghead = ['postcode', 'totalprops', 'last14', 'average', 'median', 'avgtom']
g.writerow(ghead)
for row in f:
newrow = []
newrow.append(row[fhead[0]])
try:
newrow.append(int(row[fhead[1]]))
except ValueError:
newrow.append('')
newrow.append(row[fhead[2]])
try:
myString = row[fhead[3]].split(' ')[0].replace(',','')
x = myString.index('£')
print(x)
newrow.append(int(myString[x+1:]))
except ValueError:
newrow.append('')
try:
myString = row[fhead[4]].split(' ')[0].replace(',','')
x = myString.index('£')
print(x)
newrow.append(int(myString[x+1:]))
except ValueError:
newrow.append('')
try:
newrow.append(int(row[fhead[5]].split(' ')[0]))
except ValueError:
newrow.append('')
g.writerow(newrow)
| true |
a7a650038f378e196fd33781c172b40364630481 | Python | KirillShmilovich/active_learning | /active_learning/acquisition.py | UTF-8 | 3,076 | 3.015625 | 3 | [
"MIT"
] | permissive | from scipy.stats import norm
import numpy as np
def PI(mu, std, **kwargs):
"""
Probability of improvement acquisition function
INPUT:
- mu: mean of predicted point in grid
- std: sigma (square root of variance) of predicted point in grid
- fMax: observed or predicted maximum value (depending on noise p.19 [Brochu et al. 2010])
- epsilon: trade-off parameter (>=0)
OUTPUT:
- PI: probability of improvement for candidate point
As describend in:
E Brochu, VM Cora, & N de Freitas (2010):
A Tutorial on Bayesian Optimization of Expensive Cost Functions, with Application to Active User Modeling and Hierarchical Reinforcement Learning,
arXiv:1012.2599, http://arxiv.org/abs/1012.2599.
"""
fMax = kwargs["fmax"]
epsilon = kwargs["epsilon"]
Z = (mu - fMax - epsilon) / std
return norm.cdf(Z)
def EI(mu, std, **kwargs):
"""
Expected improvement acquisition function
INPUT:
- mu: mean of predicted point in grid
- std: sigma (square root of variance) of predicted point in grid
- fMax: observed or predicted maximum value (depending on noise p.19 Brochu et al. 2010)
- epsilon: trade-off parameter (>=0)
[Lizotte 2008] suggest setting epsilon = 0.01 (scaled by the signal variance if necessary) (p.14 [Brochu et al. 2010])
OUTPUT:
- EI: expected improvement for candidate point
As describend in:
E Brochu, VM Cora, & N de Freitas (2010):
A Tutorial on Bayesian Optimization of Expensive Cost Functions, with Application to Active User Modeling and Hierarchical Reinforcement Learning,
arXiv:1012.2599, http://arxiv.org/abs/1012.2599.
"""
fMax = kwargs["fMax"]
epsilon = kwargs["epsilon"] if "epsilon" in kwargs else 0.01
Z = (mu - fMax - epsilon) / std
return (mu - fMax - epsilon) * norm.cdf(Z) + std * norm.pdf(Z)
def Exploitacquisition(mu, std, **kwargs):
fMax = kwargs["fmax"]
epsilon = kwargs["epsilon"]
Z = (mu - fMax - epsilon) / std
return norm.cdf(Z)
def UCB(mu, std, **kwargs):
"""
Upper confidence bound acquisition function
INPUT:
- mu: predicted mean
- std: sigma (square root of variance) of predicted point in grid
- t: number of iteration
- d: dimension of optimization space
- v: hyperparameter v = 1*
- delta: small constant (prob of regret)
*These bounds hold for reasonably smooth kernel functions.
[Srinivas et al., 2010]
OUTPUT:
- UCB: upper confidence bound for candidate point
As describend in:
E Brochu, VM Cora, & N de Freitas (2010):
A Tutorial on Bayesian Optimization of Expensive Cost Functions, with Application to Active User Modeling and Hierarchical Reinforcement Learning,
arXiv:1012.2599, http://arxiv.org/abs/1012.2599.
"""
t = kwargs["t"]
d = kwargs["d"]
v = kwargs["v"] if ("v" in kwargs) else 1
delta = kwargs["delta"] if ("delta" in kwargs) else 0.1
Kappa = np.sqrt(
v * (2 * np.log((t ** (d / 2.0 + 2)) * (np.pi ** 2) / (3.0 * delta)))
)
return mu + Kappa * std
| true |
6253c0d3afaa9b57c19402381fdd3afdc0cafd42 | Python | SURYA-MANUSANI/Deep-Learning | /Convolution Neural Network/Optimizers.py | UTF-8 | 1,445 | 3.21875 | 3 | [] | no_license | import numpy as np
class Sgd:
def __init__(self, learning_rate):
self.learning_rate = float(learning_rate)
def calculate_update(self, weight_tensor, gradient_tensor):
weight_tensor = weight_tensor - self.learning_rate * gradient_tensor
return weight_tensor
class SgdWithMomentum:
def __init__(self, learning_rate, momentum_rate):
self.learning_rate = float(learning_rate)
self.v = 0
self.momentum_rate = momentum_rate
def calculate_update(self, weight_tensor, gradient_tensor):
self.v = self.v * self.momentum_rate - self.learning_rate * gradient_tensor
return weight_tensor + self.v
class Adam:
def __init__(self, learning_rate, mu, rho):
self.learning_rate = float(learning_rate)
self.v = 0
self.r = 0
self.mu = mu
self.rho = rho
self.k = 1
def calculate_update(self, weight_tensor, gradient_tensor):
self.v = self.mu * self.v + (1 - self.mu) * gradient_tensor
self.r = self.rho * self.r + (1 - self.rho) * np.square(gradient_tensor)
v_correction = self.v / (1 - np.power(self.mu, self.k))
r_correction = self.r / (1 - np.power(self.rho, self.k))
weight_tensor = weight_tensor - self.learning_rate * (v_correction / (np.sqrt(r_correction + np.finfo(float).eps)))
self.k = self.k + 1
return weight_tensor | true |