seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 โ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k โ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
14366507238 | from pymongo import MongoClient
import datetime
""" this module contains methods for connecting to a mongodb database server, adding individual games
to the database, adding a group of games from a file, updating overall stats, updating
individual player stats, and updating player vs player stats."""
client = MongoClient('mongodb://user:readonly@ds064748.mlab.com:64748/pingpongstats')
db = client.pingpongstats
games = db.games
playerNames = db.playerNames
g_stats = db.g_stats
pl_stats = db.pl_stats
vs_stats = db.vs_stats
def reconnect(uname, pword, dbname):
global client, db, games, playerNames, g_stats, pl_stats, vs_stats
client2 = MongoClient('mongodb://%s:%s@ds064748.mlab.com:64748/%s' % (uname, pword, dbname))
if client2.server_info():
client = client2
db = client.pingpongstats
games = db.games
playerNames = db.playerNames
g_stats = db.g_stats
pl_stats = db.pl_stats
vs_stats = db.vs_stats
else:
return False
def add_data_obo(player1, score1, player2, score2):
p1data = {'name': player1}
p2data = {'name': player2}
playerNames.update({"name": player1}, p1data, upsert=True)
playerNames.update({"name": player2}, p2data, upsert=True)
if int(score1) > int(score2):
games.insert_one({'winner': player1,
'loser': player2,
'winningScore': int(score1),
'losingScore': int(score2),
'dateAdded': datetime.datetime.now(),
'game_id': games.count() + 1})
else:
games.insert_one({'winner': player2,
'loser': player1,
'winningScore': int(score2),
'losingScore': int(score1),
'dateAdded': datetime.datetime.now(),
'game_id': games.count() + 1})
def add_data(file):
# open score sheet file
with open(file, 'r') as infile:
for line in infile:
player1, score1, player2, score2 = line.split()
p1data = {'name': player1}
p2data = {'name': player2}
playerNames.update({"name": player1}, p1data, upsert=True)
playerNames.update({"name": player2}, p2data, upsert=True)
if int(score1) > int(score2):
games.insert_one({'winner': player1,
'loser': player2,
'winningScore': int(score1),
'losingScore': int(score2),
'dateAdded': datetime.datetime.now(),
'game_id': games.count()+1})
else:
games.insert_one({'winner': player2,
'loser': player1,
'winningScore': int(score2),
'losingScore': int(score1),
'dateAdded': datetime.datetime.now(),
'game_id': games.count()+1})
def update_g_stats():
# total games
tot_games = games.find().count()
# total overtime games
ot_games = games.find({'winningScore': {'$gt': 21}}).count()
# highest score
high_score = 0
high_scorer = ''
for player in pl_stats.find():
if player['Highest Score'] == high_score:
high_scorer = high_scorer + ', ' + player['name']
if player['Highest Score'] > high_score:
high_score = player['Highest Score']
high_scorer = player['name']
# lowest score
low_score = 99
low_scorer = ''
for player in pl_stats.find():
if player['Lowest Score'] == low_score:
low_scorer = low_scorer + ', ' + player['name']
if player['Lowest Score'] < low_score:
low_score = player['Lowest Score']
low_scorer = player['name']
# highest win percentage
hi_win_pct = 0
hi_win_pcter = ''
for player in pl_stats.find({'Total Games': {'$gt': 4}}):
if player['Overall Win %'] == hi_win_pct:
hi_win_pcter = hi_win_pcter + ', ' + player['name']
if player['Overall Win %'] > hi_win_pct:
hi_win_pct = player['Overall Win %']
hi_win_pcter = player['name']
# lowest win percentage
low_win_pct = 1
low_win_pcter = ''
for player in pl_stats.find({'Total Games': {'$gt': 4}}):
if player['Overall Win %'] == low_win_pct:
low_win_pcter = low_win_pcter + ', ' + player['name']
if player['Overall Win %'] < low_win_pct:
low_win_pct = player['Overall Win %']
low_win_pcter = player['name']
# longest win streak
win_streak = 0
win_streaker = ''
for player in pl_stats.find():
if player['Longest Win Streak'] == win_streak:
win_streaker = win_streaker + ', ' + player['name']
if player['Longest Win Streak'] > win_streak:
win_streak = player['Longest Win Streak']
win_streaker = player['name']
# longest losing streak
lose_streak = 0
lose_streaker = ''
for player in pl_stats.find():
if player['Longest Losing Streak'] == lose_streak:
lose_streaker = lose_streaker + ', ' + player['name']
if player['Longest Losing Streak'] > lose_streak:
lose_streak = player['Longest Losing Streak']
lose_streaker = player['name']
data = {'Total Games': tot_games,
'Games to OT': ot_games,
'Highest Score': (high_score, high_scorer),
'Lowest Score': (low_score, low_scorer),
'Highest Win % (>4 games)': (hi_win_pct, hi_win_pcter),
'Lowest Win % (>4 games)': (low_win_pct, low_win_pcter),
'Longest Win Streak': (win_streak, win_streaker),
'Longest Losing Streak': (lose_streak, lose_streaker)}
g_stats.update({'Total Games': tot_games,
'Games to OT': ot_games,
'Highest Score': (high_score, high_scorer),
'Lowest Score': (low_score, low_scorer),
'Highest Win % (>4 games)': (hi_win_pct, hi_win_pcter),
'Lowest Win % (>4 games)': (low_win_pct, low_win_pcter),
'Longest Win Streak': (win_streak, win_streaker),
'Longest Losing Streak': (lose_streak, lose_streaker)}, data, upsert=True)
def update_pl_stats(name):
for player in playerNames.find({'name': name}):
# count total games
tot_games = games.find({'$or': [{'winner': player['name']}, {'loser': player['name']}]}).count()
# count games where winner is player['name']
wins = games.find({'winner': player['name']}).count()
# losses
losses = games.find({'loser': player['name']}).count()
# wins / games played
winpct = float('%.2f' % (wins / tot_games * 100))
# average score
# least points scored
# most points scored
scores = []
for game in games.find({'winner': player['name']}):
scores.append(game['winningScore'])
for game in games.find({'loser': player['name']}):
scores.append(game['losingScore'])
avg = float('%.2f' % (sum(scores[:]) / tot_games))
least = min(scores)
most = max(scores)
# average score of wins
# average margin of victory
scoresw = []
marginsv = []
for game in games.find({'winner': player['name']}):
scoresw.append(game['winningScore'])
marginsv.append(game['winningScore'] - game['losingScore'])
if wins == 0:
avg_scorew = 0
avg_winby = 0
else:
avg_scorew = float('%.2f' % (sum(scoresw[:]) / wins))
avg_winby = float('%.2f' % (sum(marginsv[:]) / wins))
# average score of losses
# average margin of defeat
scoresl = []
marginsd = []
for game in games.find({'loser': player['name']}):
marginsd.append(game['winningScore'] - game['losingScore'])
scoresl.append(game['losingScore'])
if losses == 0:
avg_scorel = 0
avg_loseby = 0
else:
avg_scorel = float('%.2f' % (sum(scoresl[:]) / losses))
avg_loseby = float('%.2f' % (sum(marginsd[:]) / losses))
# number of OT games played, winningScore over 21
ot = games.find({'$or': [{'winner': player['name']}, {'loser': player['name']}],
'winningScore': {'$gt': 21}}).count()
# number OT games won
otw = games.find({'winner': player['name'], 'winningScore': {'$gt': 21}}).count()
# number OT games lost
otl = games.find({'loser': player['name'], 'winningScore': {'$gt': 21}}).count()
# OT win percentage
if otw == 0:
ot_pct = 0
else:
ot_pct = float('%.2f' % (otw / ot * 100))
# longest win streak
wcount = 0
wstreaks = []
for game in games.find():
if game['winner'] == player['name']:
wcount += 1
elif game['loser'] == player['name']:
wstreaks.append(wcount)
wcount = 0
else:
wstreaks.append(wcount)
win_streak = max(wstreaks)
# longest losing streak
lcount = 0
lstreaks = []
for game in games.find():
if game['loser'] == player['name']:
lcount += 1
elif game['winner'] == player['name']:
lstreaks.append(lcount)
lcount = 0
else:
lstreaks.append(lcount)
loss_streak = max(lstreaks)
data = {'name': player['name'],
'Total Games': tot_games,
'Total Wins': wins,
'Total Losses': losses,
'Overall Win %': winpct,
'Average Score': avg,
'Lowest Score': least,
'Highest Score': most,
'Average Margin of Victory': avg_winby,
'Average Score of Win': avg_scorew,
'Average Margin of Defeat': avg_loseby,
'Average Score of Loss': avg_scorel,
'Total Games to OT': ot,
'Total OT Games Won': otw,
'Total OT Games Lost': otl,
'OT Win %': ot_pct,
'Longest Win Streak': win_streak,
'Longest Losing Streak': loss_streak}
pl_stats.update({'name': player['name'],
'Total Games': tot_games,
'Total Wins': wins,
'Total Losses': losses,
'Overall Win %': winpct,
'Average Score': avg,
'Lowest Score': least,
'Highest Score': most,
'Average Margin of Victory': avg_winby,
'Average Score of Win': avg_scorew,
'Average Margin of Defeat': avg_loseby,
'Average Score of Loss': avg_scorel,
'Total Games to OT': ot,
'Total OT Games Won': otw,
'Total OT Games Lost': otl,
'OT Win %': ot_pct,
'Longest Win Streak': win_streak,
'Longest Losing Streak': loss_streak}, data, upsert=True)
def update_vs_stats(player1, player2):
for player in playerNames.find({'name': player1}):
# games against each player
for player2 in playerNames.find({'name': player2}):
# games against player2
vs_games = games.find({'$or': [{'winner': player['name'], 'loser': player2['name']},
{'winner': player2['name'], 'loser': player['name']}]}).count()
# wins against player2
vs_wins = games.find({'winner': player['name'], 'loser': player2['name']}).count()
# losses against player2
vs_losses = games.find({'loser': player['name'], 'winner': player2['name']}).count()
# win percent against player2
# average score against player2
# lowest score against p2
# highest score against p2
scores = []
for game in games.find({'winner': player['name'], 'loser': player2['name']}):
scores.append(game['winningScore'])
for game in games.find({'loser': player['name'], 'winner': player2['name']}):
scores.append(game['losingScore'])
if vs_games == 0:
vs_winpct = 0
vs_avg = 0
vs_least = 0
vs_most = 0
else:
vs_winpct = float('%.2f' % (vs_wins / vs_games * 100))
vs_avg = float('%.2f' % (sum(scores[:]) / vs_games))
vs_least = min(scores)
vs_most = max(scores)
# average score of wins
# average margin of victory
scoresw = []
marginsv = []
for game in games.find({'winner': player['name'], 'loser': player2['name']}):
scoresw.append(game['winningScore'])
marginsv.append(game['winningScore'] - game['losingScore'])
if vs_wins == 0:
vs_avg_scorew = 0
vs_avg_winby = 0
else:
vs_avg_scorew = float('%.2f' % (sum(scoresw[:]) / vs_wins))
vs_avg_winby = float('%.2f' % (sum(marginsv[:]) / vs_wins))
# average score of losses
# average margin of defeat
scoresl = []
marginsd = []
for game in games.find({'loser': player['name'], 'winner': player2['name']}):
scoresl.append(game['losingScore'])
marginsd.append(game['winningScore'] - game['losingScore'])
if vs_losses == 0:
vs_avg_scorel = 0
vs_avg_loseby = 0
else:
vs_avg_scorel = float('%.2f' % (sum(scoresl[:]) / vs_losses))
vs_avg_loseby = float('%.2f' % (sum(marginsd[:]) / vs_losses))
# number of OT games played vs player2, winningScore over 21
vs_ot = games.find({'$or': [{'winner': player['name'], 'loser': player2['name']},
{'loser': player['name'], 'winner': player2['name']}],
'winningScore': {'$gt': 21}}).count()
# number OT games won
vs_otw = games.find({'winner': player['name'], 'loser': player2['name'], 'winningScore': {'$gt': 21}}).count()
# number OT games lost
vs_otl = games.find({'loser': player['name'], 'winner': player2['name'], 'winningScore': {'$gt': 21}}).count()
# OT win percentage
if vs_otw == 0:
vs_ot_pct = 0
else:
vs_ot_pct = float('%.2f' % (vs_otw / vs_ot * 100))
# longest win streak against player2
vs_wcount = 0
vs_wstreaks = [0]
for game in games.find({'$or': [{'winner': player['name'], 'loser': player2['name']},
{'loser': player['name'], 'winner': player2['name']}]}):
if game['winner'] == player['name']:
vs_wcount += 1
vs_wstreaks.append(vs_wcount)
elif game['loser'] == player['name']:
vs_wstreaks.append(vs_wcount)
vs_wcount = 0
vs_win_streak = max(vs_wstreaks)
# longest losing streak against player2
vs_lcount = 0
vs_lstreaks = [0]
for game in games.find({'$or': [{'winner': player['name'], 'loser': player2['name']},
{'loser': player['name'], 'winner': player2['name']}]}):
if game['loser'] == player['name']:
vs_lcount += 1
vs_lstreaks.append(vs_lcount)
elif game['winner'] == player['name']:
vs_lstreaks.append(vs_lcount)
vs_lcount = 0
vs_loss_streak = max(vs_lstreaks)
data = {'name': player['name'],
'opponent': player2['name'],
'Games': vs_games,
'Wins': vs_wins,
'Losses': vs_losses,
'Win %': vs_winpct,
'Average Score': vs_avg,
'Lowest Score': vs_least,
'Highest Score': vs_most,
'Average Margin of Victory': vs_avg_winby,
'Average Score of Win': vs_avg_scorew,
'Average Margin of Defeat': vs_avg_loseby,
'Average Score of Loss': vs_avg_scorel,
'Games to OT': vs_ot,
'Games Won in OT': vs_otw,
'Games Lost in OT': vs_otl,
'OT Win %': vs_ot_pct,
'Longest Win Streak': vs_win_streak,
'Longest Losing Streak': vs_loss_streak}
vs_stats.update({'name': player['name'],
'opponent': player2['name'],
'Games': vs_games,
'Wins': vs_wins,
'Losses': vs_losses,
'Win %': vs_winpct,
'Average Score': vs_avg,
'Lowest Score': vs_least,
'Highest Score': vs_most,
'Average Margin of Victory': vs_avg_winby,
'Average Score of Win': vs_avg_scorew,
'Average Margin of Defeat': vs_avg_loseby,
'Average Score of Loss': vs_avg_scorel,
'Games to OT': vs_ot,
'Games Won in OT': vs_otw,
'Games Lost in OT': vs_otl,
'OT Win %': vs_ot_pct,
'Longest Win Streak': vs_win_streak,
'Longest Losing Streak': vs_loss_streak}, data, upsert=True)
| laxnumber13/pingpong-Python | pingpong.py | pingpong.py | py | 18,945 | python | en | code | 1 | github-code | 13 |
41490232045 | import sqlite3
from . import connect
async def get_user_list(program: str) -> [int]:
table_query = f"""SELECT * from {program}"""
cursor, connection = await connect.make_connection("hseabitbot/user/sending.db")
try:
cursor.execute(table_query)
content = cursor.fetchall()
finally:
await connect.close_connection(cursor, connection)
return content
async def get_user_state_id(user_id: int) -> str:
table_query = f"""SELECT state_id FROM main_table WHERE id = {user_id};"""
cursor, connection = await connect.make_connection("hseabitbot/user/user_id.db")
try:
cursor.execute(table_query)
content = cursor.fetchall()
finally:
await connect.close_connection(cursor, connection)
return content[0][0]
| playerr17/hseabitbotv2 | hseabitbot/db_commands/get_info.py | get_info.py | py | 788 | python | en | code | 0 | github-code | 13 |
21538827442 | def solution(s):
rk = ''
list = s.split(" ")
for i in list:
for j in range(len(i)):
if j % 2 == 0:
rk += i[j].upper()
else:
rk += i[j].lower()
rk += " "
return rk[0:-1]
#์ถ์ฒ: ํ๋ก๊ทธ๋๋จธ์ค ์ฝ๋ฉ ํ
์คํธ ์ฐ์ต, https://programmers.co.kr/learn/challenges | CKtrace/Programmers | Programmers Level 1/์ด์ํ_๋ฌธ์_๋ง๋ค๊ธฐ.py | ์ด์ํ_๋ฌธ์_๋ง๋ค๊ธฐ.py | py | 352 | python | ko | code | 0 | github-code | 13 |
3845365899 | from random import choice
from data import question_data
from question_model import Question
from quiz_brain import QuizBrain
def play_quiz():
question_bank = []
for question in question_data:
new_question = Question(question['question'], question['correct_answer'])
question_bank.append(new_question)
quiz_brainner = QuizBrain(question_bank)
while quiz_brainner.still_has_questions():
quiz_brainner.next_question()
quiz_brainner.end_quiz()
# question = choice(data)
# quest_model = question_model.QuestionModel(question['text'], question['answer'])
# print(quest_model.text, quest_model.answer)
play_quiz()
| gteachey/100daysofcode_python | day017/main.py | main.py | py | 665 | python | en | code | 0 | github-code | 13 |
37942049415 | from turtle import Turtle
from random import choice, randint
COLORS = ["red", "orange", "yellow", "green", "blue", "purple"]
STARTING_MOVE_DISTANCE = 5
MOVE_INCREMENT = 10
class CarManager:
def __init__(self):
self.step = STARTING_MOVE_DISTANCE
self.car_list = []
self.append_cars()
def append_cars(self):
is_car = randint(1, 6)
if is_car == 1:
new_car = Turtle()
new_car.shape("square")
new_car.penup()
new_car.shapesize(stretch_wid=1, stretch_len=2)
new_car.color(choice(COLORS))
new_car.goto(300, randint(-255, 255))
self.car_list.append(new_car)
def move_car(self):
self.append_cars()
for car in self.car_list:
car.backward(self.step)
def next_level(self):
self.step += MOVE_INCREMENT
| Lukasz2506/The_Turtle_Crossing_Capstone | car_manager.py | car_manager.py | py | 872 | python | en | code | 0 | github-code | 13 |
31831512579 | from collections import deque
import pygame
from Point import Point
class Snake:
def __init__(self, startPointX, startPointY):
self.snakeStartPoint = Point(startPointX, startPointY)
self.headcolor = None
self.bodyColor = None
self.body = Body(self.snakeStartPoint)
self.snakeXMovement = -1
self.snakeYMovement = 0
self.snakeController = SnakeController(self)
self.currentDirection = "left"
self.lastDirection = ""
class Body:
def __init__(self, startingPoint):
self.length = 3
self.deque = deque([startingPoint])
for i in range(self.length):
x = self.deque[i].x
y = self.deque[i].y
x += 1
self.deque.append(Point(x, y))
def rotate(self, xSpeed, ySpeed):
temp = self.deque.pop()
temp.x = self.deque[0].x
temp.y = self.deque[0].y
temp.x += xSpeed
temp.y += ySpeed
self.deque.appendleft(temp)
def grow(self, direction):
incr = 1
if self.length == 1:
incr = 2
newPoint = Point(self.deque[self.length].x,self.deque[self.length].y)
if direction == "up":
newPoint.y -= incr
elif direction == "down":
newPoint.y += incr
elif direction == "left":
newPoint.x -= incr
elif direction == "right":
newPoint.x += incr
self.deque.append(newPoint)
self.length += 1
class SnakeController:
def __init__(self, snake):
self.snake = snake
self.lastTicks = pygame.time.get_ticks()
def changeDirection(self, direction):
if direction == "up" and self.snake.currentDirection != "down":
self.snake.snakeYMovement = -1
self.snake.snakeXMovement = 0
self.snake.currentDirection = direction
elif direction == "down" and self.snake.currentDirection != "up":
self.snake.snakeYMovement = 1
self.snake.snakeXMovement = 0
self.snake.currentDirection = direction
elif direction == "left" and self.snake.currentDirection != "right":
self.snake.snakeXMovement = -1
self.snake.snakeYMovement = 0
self.snake.currentDirection = direction
elif direction == "right" and self.snake.currentDirection != "left":
self.snake.snakeXMovement = 1
self.snake.snakeYMovement = 0
self.snake.currentDirection = direction
elif direction == "paused":
self.snake.snakeXMovement = 0
self.snake.snakeYMovement = 0
self.snake.currentDirection = direction
| ftherien99/_SnA.I.ke | dev/Snake.py | Snake.py | py | 2,812 | python | en | code | 0 | github-code | 13 |
25403483279 | import csv
try:
from urllib.request import Request, urlopen
from urllib.error import URLError
except ImportError:
from urllib2 import Request, urlopen, URLError
from codecs import iterdecode
import smf
def query_morningstar(self, exchange, symbol, url_ending):
"""Query Morningstar for the data we want"""
#Determine whether we want key ratios or financials & query Morningstar.
if url_ending == '®ion=usa&culture=en-US&cur=USD&order=desc':
url = ('http://financials.morningstar.com/ajax/exportKR2CSV.html?'
'&callback=?&t=%s:%s%s' % (exchange, symbol, url_ending))
else:
url = ('http://financials.morningstar.com/ajax/ReportProcess4CSV.html?'
'&t=%s:%s%s' % (exchange, symbol, url_ending))
req = Request(url)
#Catch errors.
try:
response = urlopen(req)
except URLError as e:
self.keyratio_flag[0] = '1'
self.financial_flag[0] = '1'
if hasattr(e, 'reason'):
return e.reason
elif hasattr(e,'code'):
return 'Error', e.code
#Verify response csv isn't empty.
sniff = response.readline()
if str(sniff) == '':
self.keyratio_flag[0] = '1'
self.financial_flag[0] = '1'
return 'Not Available'
#Discard first line if called by fetch_keyratios().
if url_ending == '®ion=usa&culture=en-US&cur=USD&order=desc':
response.readline()
return csv.reader(iterdecode(response,'utf-8'))
def fetch_keyratios(self, ticker, datacode):
"""Get Morningstar key ratio data and return desired element to user"""
#Check for sane user input for datacode.
if datacode < 1 or datacode > 946:
return 'Invalid Datacode'
#Check whether flags indicate that we already have the data we need.
if self.keyratio_flag[0] == '1' or self.keyratio_flag[1] != ticker:
#Query NASDAQ for exchange and check for errors.
exchange = smf.find_exchange(self, ticker)
if exchange not in ['XNYS', 'XASE', 'XNAS']:
return exchange
#Query Morningstar for key ratios and check for errors.
url_ending = '®ion=usa&culture=en-US&cur=USD&order=desc'
self.keyratio_reader = query_morningstar(self, exchange, ticker,
url_ending)
if self.keyratio_flag[0] == '1':
return self.keyratio_reader
#Set flags and read data into memory upon successful query.
else:
self.keyratio_flag[0] = '0'
self.keyratio_flag[1] = ticker
self.keyratio_data = [row for row in self.keyratio_reader]
#Append day for ISO standard dates.
for idx in range (2, 12):
self.keyratio_data[0][idx] += '-01'
#Check for existing datacode -> value map, if none exists then create it.
if not hasattr(self, 'key_datacode_map'):
self.key_datacode_map = keyratio_datacode_map()
#Lookup and return value from map.
row, col = self.key_datacode_map[datacode]
element = self.keyratio_data[row][col]
#Strip , from str so we can convert to float
return element.replace(',','')
def keyratio_datacode_map():
"""Create a dictionary mapping datacodes to (row, col) in data."""
#Define rows that have no useful data.
skip_list = {16, 17, 18, 28, 29, 38, 39, 40, 41, 46, 51, 56, 61, 62, 63, 69,
70, 71, 92, 93, 98, 99, 100}
#Setup dictionary structure.
allowed = sorted(set(range(109)) - skip_list)
#Map datacode to row, column.
def mapping (idx):
row, col = divmod(idx - 1, 11)
return allowed[row], col + 1
#Create and return the dictionary.
return {datacode: mapping(datacode)
for datacode in range(1, 947) }
def fetch_financials(self, fin_type, ticker, datacode):
"""Get Morningstar financial data and return desired element to user"""
if datacode < 1 or datacode > 162:
return 'Invalid Datacode'
#Check whether flags indicate that we already have the data we need.
flags = self.financial_flag
if fin_type == 'qtr':
flags = self.qfinancial_flag
if flags[0] == '1' or flags[1] != ticker:
#Query NASDAQ for exchange and check for errors.
exchange = smf.find_exchange(self,ticker)
if exchange not in ['XNYS', 'XASE', 'XNAS']:
return exchange
#Query Morningstar for financials and check for errors.
if fin_type == 'qtr':
url_ending = ('®ion=usa&culture=en-US&cur=USD&reportType=is'
'&period=3&dataType=A&order=desc&columnYear=5&rounding=3'
'&view=raw&r=113199&denominatorView=raw&number=3')
else:
url_ending = ('®ion=usa&culture=en-US&cur=USD&reportType=is'
'&period=12&dataType=A&order=desc&columnYear=5&rounding=3'
'&view=raw&r=113199&denominatorView=raw&number=3')
financial_reader = query_morningstar(self, exchange, ticker, url_ending)
if flags[0] == '1':
return financial_reader
#Set flags and read data into memory upon successful query.
else:
flags[0] = '0'
flags[1] = ticker
financial_data_setup(self, financial_reader)
#Check for existing datacode -> value map, if none exists then create it.
if not hasattr(self, 'fin_datacode_map'):
self.fin_datacode_map = financial_datacode_map()
#Lookup and return value from map.
row, col = self.fin_datacode_map[datacode]
element = self.financial_data[row][col]
#Strip , from str so we can convert to float
return element.replace(',','')
def financial_data_setup(self, financial_reader):
"""Setup our own data structure since Morningstar csv format varies."""
header_list = ['Revenue', 'Cost of revenue', 'Gross profit',
'Research and development', 'Sales, General and '
'administrative', 'Depreciation and amortization',
'Interest expense', 'Other operating expenses',
'Total costs and expenses', 'Total operating expenses',
'Operating income', 'Interest Expense',
'Other income (expense)', 'Income before taxes',
'Income before income taxes', 'Provision for income taxes',
'Net income from continuing operations',
'Net income from discontinuing ops', 'Other', 'Net income',
'Net income available to common shareholders', 'Basic',
'Diluted', 'EBITDA']
#For row in Morningstar csv, if row[0] is in our list add the row.
#Otherwise add an empty row.
self.financial_data = []
raw_financial_data = [row for row in financial_reader]
rfd_header = [h[0] for h in raw_financial_data]
ttm_count = 0
for d in header_list:
for i in raw_financial_data:
#Handle corner case of first row.
try:
if i[1] == 'TTM' and ttm_count == 0:
self.financial_data.append(i)
ttm_count = 1
continue
#Skip appending Morningstar categories ie: 'Costs and expenses'.
except:
continue
#Append our data and placeholder rows
if i[0] == d:
self.financial_data.append(i)
elif d not in rfd_header:
rfd_header.append(d)
self.financial_data.append(['No Data', 'N/A', 'N/A', 'N/A',
'N/A', 'N/A','N/A'])
#Append day for ISO standard dates.
for idx in range (2, 7):
self.financial_data[0][idx] += '-01'
def financial_datacode_map():
"""Create a dictionary mapping datacodes to (row, col) in data."""
def mapping( idx ):
row, col = divmod(idx - 1, 6)
return row, col + 1
return {idx: mapping(idx) for idx in range(1, 163)} | madsailor/SMF-Extension | src/morningstar.py | morningstar.py | py | 8,048 | python | en | code | 27 | github-code | 13 |
1288838401 | from langdetect import detect
def sent_detection(sent, direct):
'''Language ID adaptred for fasttext model'''
lang_id = detect(sent)
src_id, tgt_id = direct.split('-')
black_list = [tgt_id, 'uk', 'bg', 'cs', 'mk']
return (lang_id == src_id or lang_id not in black_list)
if __name__ == '__main__':
text = 'hello world!'
print(sent_detection(text, 'en-ru'))
| eleldar/Translator | OpenAPI/api/tools/language_detection.py | language_detection.py | py | 393 | python | en | code | 0 | github-code | 13 |
11505751522 |
# The sum of the squares of the first ten natural numbers is,
# 1^2 + 2^2 + ... + 10^2 = 385
#
# The square of the sum of the first ten natural numbers is,
# (1 + 2 + ... + 10)^2 = 55^2 = 3025
#
# Hence the difference between the sum of the squares of the
# first ten natural numbers and the square of the sum is
# 3025 - 385 = 2640
#
# Find the difference between the sum of the squares of the
# first one hundred natural numbers and the square of the sum.
import math
# obvious, bruteforce method
sumsq = sum([n**2 for n in range(1, 101)])
sqsum = sum([n for n in range(1, 101)])**2
print("sumsq=%u sqsum=%u diff=%u" % (sumsq, sqsum, sqsum - sumsq))
| rflynn/euler | 06.py | 06.py | py | 659 | python | en | code | 1 | github-code | 13 |
43243582029 | import os
import asyncio
import glob
import unittest
from unittest.mock import (AsyncMock,
patch)
from merge_files.mergers.async_ import AsyncFileMerger
from merge_files.utils import list_files
class TestAsyncFileMerger(unittest.TestCase):
"""
A test suite for the AsyncFileMerger class
"""
def setUp(self):
"""
Set up the test case by initializing test data and creating a file merger instance
"""
self.input_dir = os.path.join(os.getcwd(), 'tests', 'data', 'input')
self.input_list = list_files(self.input_dir)
self.filename = 'test_output.dat'
self.output_dir = os.path.join(os.getcwd(), 'tests', 'data', 'output')
self.output_file = os.path.join(self.output_dir, self.filename)
self.chunk_size_file = 3
self.chunk_size_line = 2
self.file_merger = AsyncFileMerger(self.input_list, self.output_dir, self.filename,
self.chunk_size_file, self.chunk_size_line)
self.chunks = self.file_merger._divide_files_into_chunks()
def tearDown(self):
"""
Clean up the test case by deleting the output file if it exists
"""
if os.path.exists(self.output_file):
os.remove(self.output_file)
@patch.object(AsyncFileMerger, '_create_intermediate')
def test_split_into_files(self, mock_create_intermediate):
"""
Test that files are merged asynchronously and intermediate files are created
"""
# Mock the coroutine so that it immediately returns
mock_create_intermediate.coro.return_value = None
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
# Call _split_into_files
_ = loop.run_until_complete(self.file_merger._split_into_files())
file_extension = "*.dat"
# Combine the directory path with the file extension using glob
files = glob.glob(os.path.join(self.input_dir, file_extension))
# Count the number of files
num_files = len(files)
# Check that create_intermediate was called the expected number of times
expected_num_calls = (num_files + self.chunk_size_file - 1) // self.chunk_size_file
self.assertEqual(mock_create_intermediate.call_count, expected_num_calls)
# Check that create_intermediate was called with the expected arguments
calls = [call[0] for call in mock_create_intermediate.call_args_list]
simplified_calls = [([os.path.basename(f) for f in file_list], os.path.basename(
output_file)) for (file_list, output_file) in calls]
expected_calls = [
(['file1.dat', 'file2.dat', 'file3.dat'], 'test_output.dat.0'),
(['file4.dat', 'file5.dat'], 'test_output.dat.1')
]
self.assertCountEqual(simplified_calls, expected_calls)
@patch.object(AsyncFileMerger, '_split_into_files', new_callable=AsyncMock)
@patch.object(AsyncFileMerger, '_merge_intermediate_files')
def test_merge_files_w_chunks(self, mock_merge_intermediate_files, mock_split_into_files):
"""
Test that files are merged and intermediate files are merged with the expected argument
"""
mock_split_into_files.return_value = []
# Call merge_files
self.file_merger.merge_files()
# Assert that merge_files_async was called
mock_split_into_files.assert_awaited_once()
# Check that merge_intermediate_files was called with the expected argument
mock_merge_intermediate_files.assert_called_once_with(
[], delete=True)
if __name__ == '__main__':
unittest.main()
| redrussianarmy/file-merger | tests/test_async.py | test_async.py | py | 3,705 | python | en | code | 2 | github-code | 13 |
34262000291 | from flask import Flask, request, render_template, redirect, url_for
import datetime
import mysql.connector
from mysql.connector import Error
app = Flask(__name__, template_folder="templates")
def create_table():
connection = mysql.connector.connect(
host='localhost',
user='dina',
password='28MySql!',
database='mydatabase'
)
try:
cursor = connection.cursor()
cursor.execute("""
CREATE TABLE IF NOT EXISTS posts (
id INT AUTO_INCREMENT PRIMARY KEY,
description VARCHAR(1000),
datetime DATE
)
""")
print("Table 'posts' created or already exists.")
except Error as e:
print("Error while creating or connecting to the 'posts' table:", e)
finally:
cursor.close()
connection.close()
print("MySQL connection is closed")
@app.route("/")
def home():
create_table()
connection = mysql.connector.connect(
host='localhost',
user='dina',
password='28MySql!',
database='mydatabase'
)
try:
cursor = connection.cursor()
cursor.execute("SELECT id, description, datetime FROM posts")
raw_rows = cursor.fetchall()
rows = [{"id": i[0], "description": i[1], "datetime": i[2]} for i in raw_rows]
return render_template('list.html', list=rows)
except Error as e:
print("Error while fetching posts from the 'posts' table:", e)
finally:
cursor.close()
connection.close()
print("MySQL connection is closed")
@app.route("/create", methods=["GET", "POST"])
def view_create():
if request.method == "GET":
return render_template('create.html')
elif request.method == "POST":
description = request.form.get("description")
date = datetime.datetime.now().strftime("%Y-%m-%d")
connection = mysql.connector.connect(
host='localhost',
user='dina',
password='28MySql!',
database='mydatabase'
)
try:
cursor = connection.cursor()
query = "INSERT INTO posts (description, datetime) VALUES (%s, %s)"
values = (description, date)
cursor.execute(query, values)
connection.commit()
print("Data inserted successfully.")
except Error as e:
connection.rollback()
print("Error while inserting data into the 'posts' table:", e)
finally:
cursor.close()
connection.close()
print("MySQL connection is closed")
return redirect(url_for('home'))
@app.route("/change", methods=["GET", "POST"])
def view_change():
if request.method == "GET":
pk = request.args.get('pk', default=0, type=int)
connection = mysql.connector.connect(
host='localhost',
user='dina',
password='28MySql!',
database='mydatabase'
)
try:
cursor = connection.cursor()
query = "SELECT id, description, datetime FROM posts WHERE id = %s"
values = (pk,)
cursor.execute(query, values)
row = cursor.fetchone()
if row:
post = {"id": row[0], "description": row[1], "datetime": row[2]}
return render_template('change.html', post=post)
else:
return redirect(url_for('home'))
except Error as e:
print("Error while fetching post from the 'posts' table:", e)
finally:
cursor.close()
connection.close()
print("MySQL connection is closed")
elif request.method == "POST":
pk = request.form.get("pk")
description = request.form.get("description")
date = datetime.datetime.now().strftime("%Y-%m-%d")
connection = mysql.connector.connect(
host='localhost',
user='dina',
password='28MySql!',
database='mydatabase'
)
try:
cursor = connection.cursor()
query = "UPDATE posts SET description = %s, datetime = %s WHERE id = %s"
values = (description, date, pk)
cursor.execute(query, values)
connection.commit()
print("Data updated successfully.")
except Error as e:
connection.rollback()
print("Error while updating data in the 'posts' table:", e)
finally:
cursor.close()
connection.close()
print("MySQL connection is closed")
return redirect(url_for('home'))
@app.route("/delete", methods=["GET"])
def view_delete():
pk = request.args.get('pk', default=0, type=int)
connection = mysql.connector.connect(
host='localhost',
user='dina',
password='28MySql!',
database='mydatabase'
)
try:
cursor = connection.cursor()
query = "DELETE FROM posts WHERE id = %s"
values = (pk,)
cursor.execute(query, values)
connection.commit()
print("Data deleted successfully.")
except Error as e:
connection.rollback()
print("Error while deleting data from the 'posts' table:", e)
finally:
cursor.close()
connection.close()
print("MySQL connection is closed")
return redirect(url_for('home'))
if __name__ == '__main__':
app.run(host="0.0.0.0", port=8000, debug=True)
| dinywka/flask_todo | main.py | main.py | py | 5,510 | python | en | code | 0 | github-code | 13 |
6142130941 | """
Module information
"""
class Checkout:
class Discount:
def __init__(self, numItems: int, price: float):
self.numItems = numItems
self.price = price
def __init__(self):
self.prices = {}
self.discounts = {}
self.items = {}
def addItemPrice(self, item: str, price: float) -> None:
self.prices[item] = price
def addItem(self, item: str) -> None:
if item not in self.prices:
raise Exception("Bad Item")
if item in self.items:
self.items[item] += 1
else:
self.items[item] = 1
def calculateTotal(self) -> float:
total = 0
for item, cnt in self.items.items():
total += self.calculateItemTotal(item, cnt)
return total
def calculateItemTotal(self, item: str, cnt: int) -> float:
total = 0
if item in self.discounts:
discount = self.discounts[item]
if cnt >= discount.numItems:
total += self.calculateItemDiscountedTotal(item, cnt, discount)
else:
total += self.prices[item] * cnt
else:
total += self.prices[item] * cnt
return total
def calculateItemDiscountedTotal(self, item: str, cnt: int, discount) -> float:
total = 0
numDiscounts = cnt / discount.numItems
total += numDiscounts * discount.price
remaining = cnt % discount.numItems
total += remaining * self.prices[item]
return total
def addDiscount(self, item: str, numItems: int, price: float) -> None:
self.discounts[item] = self.Discount(numItems, price)
| michael-c-hoffman/TestDrivenDevelopmentPythonPytest | checkout/checkout/__init__.py | __init__.py | py | 1,674 | python | en | code | 0 | github-code | 13 |
35375611874 | from functools import partial
from typing import Callable, Dict, Optional, Union
import torch
import torch.nn as nn
from ray.air.checkpoint import Checkpoint
from ray.air.config import DatasetConfig, RunConfig, ScalingConfig
from ray.data.preprocessor import Preprocessor
from ray.train.torch import TorchTrainer as TorchTrainerBase
from ray.train.torch.config import TorchConfig
from ray.train.trainer import GenDataset
from omegaconf import OmegaConf
class TorchTrainer(TorchTrainerBase):
def __init__(
self,
model_factory: Callable[[], nn.Module],
optimizer_factory: Callable[[nn.Module], torch.optim.Optimizer],
lr_scheduler_factory: Callable[[torch.optim.Optimizer], torch.optim.lr_scheduler.LRScheduler],
train_loop_per_worker: Union[Callable[[], None], Callable[[Dict], None]],
*,
train_loop_config: Optional[Dict] = None,
torch_config: Optional[TorchConfig] = None,
scaling_config: Optional[ScalingConfig] = None,
dataset_config: Optional[Dict[str, DatasetConfig]] = None,
run_config: Optional[RunConfig] = None,
datasets: Optional[Dict[str, GenDataset]] = None,
preprocessor: Optional[Preprocessor] = None,
resume_from_checkpoint: Optional[Checkpoint] = None,
):
if datasets is not None:
datasets: Dict[str, GenDataset] = OmegaConf.to_container(datasets)
super().__init__(
train_loop_per_worker=lambda: train_loop_per_worker(
model_factory=model_factory,
optimizer_factory=optimizer_factory,
lr_scheduler_factory=lr_scheduler_factory,
),
train_loop_config=train_loop_config,
torch_config=torch_config,
scaling_config=scaling_config,
dataset_config=dataset_config,
run_config=run_config,
datasets=datasets,
preprocessor=preprocessor,
resume_from_checkpoint=resume_from_checkpoint,
)
| vivym/x2r | x2r/trainers/pytorch/trainer.py | trainer.py | py | 2,019 | python | en | code | 1 | github-code | 13 |
41257186491 | import csv
from datetime import datetime
import matplotlib.pyplot as plt
filename = "data/sitka_weather_2018_simple.csv"
with open(filename) as f:
reader = csv.reader(f)
header_row = next(reader)
for index, column_header in enumerate(header_row):
print(index, column_header)
# Get date and rain from file
dates, rains = [], []
for row in reader:
current_date = datetime.strptime(row[2], '%Y-%m-%d')
rain = float(row[3])
dates.append(current_date)
rains.append(rain)
plt.style.use('seaborn')
fig, ax = plt.subplots()
ax.plot(dates, rains, linewidth=3)
fig.autofmt_xdate()
ax.set_title("The annual rain fall amount in Stika 2018", fontsize=24)
ax.set_xlabel('')
ax.set_ylabel('Rain fall', fontsize=12)
plt.show()
| fadiabji/data_science | stika_rainfall.py | stika_rainfall.py | py | 816 | python | en | code | 0 | github-code | 13 |
4163030691 | import random
import pygame
from pygame.locals import*
pygame.init()
screen=(pygame.display.set_mode((600,600)))
pygame.display.set_caption('ria')
red=(255,0,0)
green=(0,255,0)
blue=(0,0,255)
white=(250,250,250)
yellow=(255,255,0)
birdx=300
birdy=300
xtop=400
ytop=0
toplen=200
xdown=400
score=0
birdonscreen=0
toppipe=0
bottompipe=0
ydown=500#because the legs of the bottom is 600 and the pole' lenght 200, 600-200=400
downlen=200
bird=pygame.image.load('bird2.png')
bird=pygame.transform.scale(bird,(50,50))
background=pygame.image.load('flappy background.jpg')
background=pygame.transform.scale(background,(600,600))
ydownlist=[300,400,500]
#hight of toppipe+ the hight of the gap=? the hight of the screen-?= the hight of the bottom pipe
while 1>0:
pygame.display.update()
screen.fill(white)
screen.blit(background,(0,0))
birdonscreen=screen.blit(bird,(birdx,birdy))
birdy=birdy+1#makeing the bird go down
#### pygame.draw.circle(screen,yellow,(birdx,birdy),45)#made the bird
toppipe=pygame.draw.rect(screen,blue,(xtop,ytop,20,toplen))#made the top pipe
bottompipe=pygame.draw.rect(screen,blue,(xdown,ydown,20,downlen))#made the down pipe
xtop=xtop+-2#moveing the top pipe
xdown=xdown-2#moved the down pipe
if xdown==0:#checking if the down pipe went of the screen
score=score+1
xdown=600#making the down pipe come to the right side
if xtop==0:##checking if the top pipe went of the screen
ydown=random.choice(ydownlist)
xtop=600
if birdonscreen.colliderect(bottompipe):#if the bird hits the down pipe
print('your score was',score)
pygame.quit()
exit()
if birdonscreen.colliderect(toppipe):#if the bird hits the top pipe
print('your score was',score)
pygame.quit()
exit()
if birdy==600:# if the bird hits the bottom
print('your score was',score)
exit()
for event in pygame.event.get():
if event.type==QUIT:#allow to quit the game
print('your score was',score)
pygame.quit()
exit()
if event.type==KEYDOWN:#if any key is pressed down
if event.key==K_SPACE:#if space is pressed
birdy=birdy+-30#moveing the bird up
| RiaShitole/Coding-Python | flappy bird.py | flappy bird.py | py | 2,478 | python | en | code | 0 | github-code | 13 |
30500131944 | class EmptyQueueError:
pass
class Node:
def __init__(self,value,pr):
self.info=value
self.priority=pr
self.link=None
class PriorityQueue:
def __init__(self):
self.start=None
def is_empty(self):
return self.start==None
def size(self):
n=0
p=self.start
while p is not None:
n+=1
p=p.link
return n
def enqueue(self,data,data_priority):
temp=Node(data,data_priority)
if self.is_empty() or data_priority< self.start.priority:
temp.link=self.start
self.start=temp
else:
p=self.start
while p.link!=None and p.link.priority <=data_priority:
p=p.link
temp.link=p.link
p.link=temp
def dequeue(self):
if self.is_empty():
raise EmptyQueueError("Queue is empty")
x=self.start.info
self.start=self.start.link
return x
def display(self):
if self.is_empty():
print("Queue is empty")
return
print('\nQueue:\n')
p=self.start
queue='|'
while p is not None:
queue=queue+' '+str(p.info)+'('+str(p.priority)+') |'
p=p.link
print(queue) | jamwine/Data-Structures-and-Algorithm | DataStructures/PriorityQueue.py | PriorityQueue.py | py | 1,336 | python | en | code | 1 | github-code | 13 |
24512065612 | """
PATTERNS
Character Description Example Pattern Code Exammple Match
\d A digit file_\d\d file_25
\w Alphanumeric \w-\w\w\w A-b_1
\s White space a\sb\sc a b c
\D A non digit \D\D\D ABC
\W Non-alphanumeric \W\W\W\W\W *-+=)
\S Non-whitespace \S\S\S\S Yoyo
"""
import re
text = "My telephone number is 809-434-4322"
phone = re.search(r'\d\d\d-\d\d\d-\d\d\d\d', text)
"""
QUANTIFIERS
Character Description Example Pattern Code Exammple Match
+ Occurs one or more times Version \w-\w+ Version A-b1_1
{3} Occurs exactly 3 times \D{3} abc
{2,4} Occurs 2 to 4 times \d{2,4} 123
{3,} Occurs 3 or more \w{3,} anycharacters
\* Occurs zero or more times A\*B\*C* AAACC
? Once or none plurals? plural
"""
phone_pattern = re.search(r'\d{3}-\d{3}-\d{4}', text)
phone_pattern = re.compile(r'(\d{3})-(\d{3})-(\d{4})')
results = re.search(phone_pattern, text) | rajivpaulsingh/python-zero-to-hero | Advanced Python Modules/regex2.py | regex2.py | py | 847 | python | en | code | 0 | github-code | 13 |
14816022387 | import data_reader,resources
#CATMAP = event_dao.get_all_categories()
def get_domains_ids():
DOMAIN_TO_ID = dict()
ID_TO_DOMAIN = dict()
domains = data_reader.get_domains(resources.DOMAIN_PATH)
for domain in domains:
new_id = len(DOMAIN_TO_ID)
DOMAIN_TO_ID[domain] = new_id
ID_TO_DOMAIN[new_id] = domain
return (DOMAIN_TO_ID,ID_TO_DOMAIN)
#(DOMAIN_TO_ID,ID_TO_DOMAIN) = get_domains_ids()
| mrtamb9/eventracking | static_resources.py | static_resources.py | py | 437 | python | en | code | 0 | github-code | 13 |
18883886300 | import numpy as np
def AbsorptionMarkov():
# 1.1 Transition matrix
P_matrix = [[1, 0, 0, 0], [0.22, 0.11, 0.58, 0.09], [0.15, 0.27, 0.20, 0.38], [0.19, 0.57, 0.19, 0.05]]
vector = [0.1, 0.3, 0.2, 0.4]
def printMatrix ( matrix ):
for i in range ( len(matrix) ):
for j in range ( len(matrix[i]) ):
print(matrix[i][j], end="\t")
print ()
print("Transition matrix:")
printMatrix(P_matrix)
# 1.2 Fundamental_matrix
Q = []
for i in range(1, len(P_matrix)):
arr_row = []
for j in range(1,len(P_matrix)):
arr_row.append(P_matrix[i][j])
Q.append(arr_row)
fundamental_matrix = np.linalg.inv(np.identity(len(Q)) - Q)
print("Fundamental_matrix: \n", fundamental_matrix, "\n")
# # 1.3 The average number of steps that the chain is in the state j when the process started with the state n
condition_n = int(input("Start condition: "))
condition_j = int(input("Last condition: "))
print("The average number of steps that the chain is in the state j when the process started with the state n: \n", fundamental_matrix[condition_n - 2][condition_j - 2], "\n")
# 1.4 The average number of steps that the circuit is in state j when the initial state is not specified
numsteps_arr = 0
for i in range(len(fundamental_matrix)):
numsteps_arr = numsteps_arr + fundamental_matrix[i][condition_j - 2]
print("The average number of steps that the circuit is in state j when the initial state is not specified: \n", numsteps_arr, "\n")
# 1.5 Average absorption time
avarage_absorption = np.dot(fundamental_matrix, np.ones((len(fundamental_matrix))))
print("ะverage step numbers:", avarage_absorption)
# 1.6 Probability of absorption
probability = []
for i in range(1, len(P_matrix)):
row = []
for j in range(len(P_matrix) - 3):
row.append(P_matrix[i][j])
probability.append(row)
B = np.dot(fundamental_matrix, probability)
print("Probability of absorption:\n", B)
def RegularMarkov():
# 2.1 Transition matrix
P_matrix = [[0.36, 0.26, 0.38], [0.93, 0.01, 0.06], [0.05, 0.54, 0.41]]
vector = [0.25, 0.26, 0.49]
def printMatrix ( matrix ):
for i in range ( len(matrix) ):
for j in range ( len(matrix[i]) ):
print(matrix[i][j], end="\t")
print ()
print("Transition matrix:")
printMatrix(P_matrix)
# 2.2
w = vector
for i in range(1000):
w = np.dot(w, P_matrix).tolist()
probability = []
for j in range(len(P_matrix)):
probability.append(w)
probability = np.array(probability)
print('Final matrix:')
print(probability)
# 2.3
fundamental_matrix = np.linalg.inv(np.identity(len(P_matrix)) - P_matrix + probability)
print("Fundamental_matrix: \n", fundamental_matrix)
# 2.4 average time in a given state for n = 4 steps
fundamential_dg = np.diagflat(np.diag(fundamental_matrix))
l = len(fundamental_matrix)
E = [[1] * (l) for i in range(l)]
M = np.dot((np.eye(l) - fundamental_matrix + np.dot(E, fundamential_dg)), np.diagflat(1 / probability[0]))
print('Average time in a given state for n = 4 steps:')
print(np.dot(vector, fundamental_matrix) - probability[0] + 4 * probability[0], "\n")
# 2.5 the average time of the circuit in a given state (in the state j when the process started with the state n);
condition_n = int(input("Start condition: "))
condition_j = int(input("Last condition: "))
print("The average time of the circuit in a given state (in the state j when the process started with the state n) ",M[ condition_n-1][condition_j-1], "\n")
# 2.6 the average time of the circuit to the specified state (when the initial state is not preset);
print("The average time of the circuit to the specified state (when the initial state is not preset): ",np.dot(vector, M), "\n")
# 2.7 the average time of exit of the circuit to a given state in stationary mode (when the initial state is not set).
print("The average time of exit of the circuit to a given state in stationary mode (when the initial state is not set): ",np.dot(probability[0], M), "\n")
def start():
print("Choose task you want to solve: \n1 - Absorption Markov Chain\n2 - Regular Markov Chain")
number = int(input(""))
if number == 1:
AbsorptionMarkov()
again()
elif number == 2:
RegularMarkov()
again()
else:
print("Wrong number!")
def again():
print("Would you like to try again?:\n1 - yes\n2 - no")
choice = int(input(""))
if choice == 1:
start()
elif choice == 2:
return 0
start()
| YaroslavaMykhailenko/laboratory_3_Mykhailenko | laboratory_3.py | laboratory_3.py | py | 5,002 | python | en | code | 0 | github-code | 13 |
35340235685 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Repository',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
('language', models.CharField(default='RS', max_length=2, choices=[('RS', 'Rust')])),
],
),
]
| frewsxcv/lop.farm | app/repository/migrations/0001_initial.py | 0001_initial.py | py | 528 | python | en | code | 7 | github-code | 13 |
26882240651 | from __future__ import print_function
import sys
from .interface import CDB
OK=0
BAD_ARGS = 1
BAD_VERB = 2
BAD_KEY = 3
def usage():
print("Usage:", file=sys.stderr)
print("\tpython -m cdb.tool DBNAME get KEY", file=sys.stderr)
print("\tpython -m cdb.tool DBNAME set KEY VALUE", file=sys.stderr)
print("\tpython -m cdb.tool DBNAME delete KEY", file=sys.stderr)
def main(argv):
if not (4<=len(argv)<=5):
usage()
return BAD_ARGS
# python -m dbdb.tool example.db get foo
# args็ฌฌไธไธชๅๆฐๆฏๆไปถๅdbdb.tool๏ผๆญคๅค่ฟๆปค
dbname,verb,key,value=(argv[1:]+[None])[:4]
try:
key=int(key)
except KeyError:
print("Key Must Be int Class")
if verb not in {'get','set','del'}:
usage()
return BAD_VERB
cdb=CDB(dbname)
try:
if verb=='get':
sys.stdout.write(cdb[key])
elif verb=='set':
cdb[key]=value
elif verb=='del':
del cdb[key]
except KeyError:
print("Key not found",file=sys.stderr)
return BAD_KEY
return OK
if __name__ == '__main__':
sys.exit(main(sys.argv)) | shitianfang/SkipList-DataBase | tool.py | tool.py | py | 1,151 | python | en | code | 0 | github-code | 13 |
41668656858 | # TEMPORARY:
# cd I:/Users/welfj/Documents/Programming/Python/python-lua-dithering
import os
import time
import sys
from math import floor
import cv2
from PIL import Image
sys.path.append('libs/')
import dithering
# from python-lua-dithering.libs import dithering
# import libs.dithering
# from libs.dithering import *
# FUNCTIONS #######################################
def process_frames(full_file_name, max_width, max_height, frame_skipping):
extension = full_file_name.split('.')[1] # get the extension after the '.'
# get the name before the '.', and optionally add '_extended'
file_name = full_file_name.split('.')[0] + (' extended' if extended_chars else '')
input_path = 'inputs/' + full_file_name
output_file_name = file_name.replace(' ', '_')
print('Processing \'' + file_name + '\'')
if extension == 'mp4':
video = cv2.VideoCapture(input_path)
old_image = None
else:
video = None
old_image = Image.open(input_path)
new_height = max_height
new_width = get_new_width(extension, video, old_image, input_path, new_height, max_width)
output_folder_size_name = 'outputs/' + 'size_' + str(new_width) + 'x' + str(new_height)
if not os.path.exists(output_folder_size_name):
os.mkdir(output_folder_size_name)
output_folder_name = output_folder_size_name + '/' + output_file_name
if not os.path.exists(output_folder_name):
os.mkdir(output_folder_name)
output_data_folder_name = output_folder_name + '/data'
if not os.path.exists(output_data_folder_name):
os.mkdir(output_data_folder_name)
if extension == 'mp4':
used_frame_count, data_frames_count = process_mp4_frames(output_data_folder_name, video, frame_skipping, new_width, new_height)
elif extension == 'gif':
used_frame_count, data_frames_count = process_gif_frames(output_data_folder_name, old_image, new_width, new_height)
elif extension == 'jpeg' or extension == 'png' or extension == 'jpg':
used_frame_count, data_frames_count = process_image_frame(output_data_folder_name, old_image, new_width, new_height)
else:
print('Entered an invalid file type; only mp4, gif, jpeg, png and jpg extensions are allowed!')
output_info_file = create_output_file(output_folder_name, 'info')
string = '{frame_count=' + str(used_frame_count) + ',width=' + str(new_width) + ',height=' + str(new_height) + ',data_files=' + str(data_frames_count) + '}'
output_info_file.write(string)
output_info_file.close()
def get_new_width(extension, video, old_image, input_path, new_height, max_width):
if extension == 'mp4':
# get information about the video file
old_width = video.get(cv2.CAP_PROP_FRAME_WIDTH)
old_height = video.get(cv2.CAP_PROP_FRAME_HEIGHT)
elif extension == 'gif' or extension == 'jpeg' or extension == 'png' or extension == 'jpg':
try:
old_width = old_image.size[0]
old_height = old_image.size[1]
except IOError:
print('Can\'t load!')
else:
print('Entered an invalid file type; only mp4, gif, jpeg, png and jpg extensions are allowed!')
if new_width_stretched:
return max_width
else:
return int(new_height * old_width / old_height)
def create_output_file(folder, name):
output_path = folder + '/' + str(name) + '.txt'
return open(output_path, 'w')
def try_create_new_output_file(line_num, file_byte_count, output_file, output_data_folder_name, data_frames_count):
line_num += 1
if file_byte_count >= max_bytes_per_file:
file_byte_count = 0
if output_file:
output_file.close()
output_file = create_output_file(output_data_folder_name, data_frames_count)
data_frames_count += 1
line_num = 1
return line_num, file_byte_count, output_file, data_frames_count
def process_mp4_frames(output_data_folder_name, video, frame_skipping, new_width, new_height):
i = 0
used_frame_count = 0
actual_frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
new_frame_count = floor(actual_frame_count / frame_skipping)
file_byte_count = 0
output_file = create_output_file(output_data_folder_name, 1)
data_frames_count = 2
line_num = 0
while True:
start_frame_time = time.time()
hasFrames, cv2_frame = video.read()
if hasFrames:
if i % frame_skipping == 0:
used_frame_count += 1
line_num, file_byte_count, output_file, data_frames_count = try_create_new_output_file(line_num, file_byte_count, output_file, output_data_folder_name, data_frames_count)
# cv2_frame = cv2.cvtColor(cv2_frame, cv2.COLOR_BGR2RGB)
cv2_frame = cv2.resize(cv2_frame, (new_width - 1, new_height))
pil_frame = Image.fromarray(cv2_frame) # pil pixels can be read faster than cv2 pixels, it seems
get_frame_time = time.time() - start_frame_time # 40 frames/s
file_byte_count += process_frame(pil_frame, used_frame_count, line_num, new_width, new_height, output_file, new_frame_count, start_frame_time, get_frame_time)
i += 1
else:
video.release()
output_file.close()
return used_frame_count, data_frames_count - 1
def process_gif_frames(output_data_folder_name, old_image, new_width, new_height):
i = 0
used_frame_count = 0
file_byte_count = 0
output_file = create_output_file(output_data_folder_name, 1)
data_frames_count = 2
line_num = 0
try:
while True:
start_frame_time = time.time()
if i % frame_skipping == 0:
used_frame_count += 1
line_num, file_byte_count, output_file, data_frames_count = try_create_new_output_file(line_num, file_byte_count, output_file, output_data_folder_name, data_frames_count)
new_image = old_image.resize((new_width - 1, new_height), Image.ANTIALIAS)
get_frame_time = time.time() - start_frame_time
file_byte_count += process_frame(new_image, used_frame_count, line_num, new_width, new_height, output_file, None, start_frame_time, get_frame_time)
old_image.seek(old_image.tell() + 1) # gets the next frame
i += 1
except:
# this part gets reached when the code tries to find the next frame, while it doesn't exist
output_file.close()
return used_frame_count, data_frames_count - 1
def process_image_frame(output_data_folder_name, old_image, new_width, new_height):
data_frames_count = 1
line_num = 0
output_file = create_output_file(output_data_folder_name, 1)
start_frame_time = time.time()
new_image = old_image.resize((new_width - 1, new_height), Image.ANTIALIAS)
# new_image = old_image.resize((new_width - 1, new_height), Image.NEAREST)
# new_image = old_image.resize((new_width - 1, new_height), Image.BILINEAR)
# new_image = old_image.resize((new_width - 1, new_height), Image.BICUBIC)
# new_image = new_image.convert('RGB')
used_frame_count = 1
get_frame_time = time.time() - start_frame_time
process_frame(new_image, used_frame_count, line_num, new_width, new_height, output_file, 1, start_frame_time, get_frame_time)
output_file.close()
return used_frame_count, data_frames_count
def process_frame(frame, used_frame_count, line_num, new_width, new_height, output_file, frame_count, start_frame_time, get_frame_time):
preparing_loop_start_time = time.time()
# not sure if it is necessary to convert the frame into RGBA!
frame = frame.convert('RGBA')
# load the pixels of the frame
frame_pixels = frame.load()
# initializes empty variables for the coming 'for y, for x' loop
prev_char = None
prev_char_count = 0
string = ''
# the \n character at the end of every line needs to have one spot reserved
# this should ideally be done at the resizing of the frame stage instead!
modified_width = new_width - 1
preparing_loop_end_time = time.time()
# measure the time it takes for the coming 'for y, for x' loop to execute
looping_start_time = time.time()
for y in range(new_height):
for x in range(modified_width):
# the brightness of a pixel determines which character will be used in ComputerCraft for that pixel
brightness = get_brightness(frame_pixels[x, y])
if not extended_chars:
char = dithering.get_closest_char_default(brightness)
else:
char = dithering.get_closest_char_extended(brightness)
string += char
# the last character in a frame doesn't need a return character after it
if y < new_height - 1:
# add a return character to the end of each horizontal line,
# so ComputerCraft can draw the entire frame with one write() statement
string += '\\n'
looping_end_time = time.time()
writing_start_time = time.time()
# gives each frame its own line in the outputted file, so lines can easily be found and parsed
if line_num > 1:
final_string = '\n' + string
else:
final_string = string
output_file.write(final_string)
writing_end_time = time.time()
preparing_loop_time = preparing_loop_end_time - preparing_loop_start_time
looping_time = looping_end_time - looping_start_time
writing_time = writing_end_time - writing_start_time
if used_frame_count % frames_to_update_stats == 0 or used_frame_count == frame_count:
print_stats(used_frame_count, frame_count, start_frame_time, get_frame_time, preparing_loop_time, looping_time, writing_time)
if used_frame_count == frame_count:
print()
string_byte_count = len(final_string.encode('utf8'))
return string_byte_count
def get_brightness(tup):
# red, green and blue values aren't equally bright to the human eye
brightness = (0.2126 * tup[0] + 0.7152 * tup[1] + 0.0722 * tup[2]) / 255
if len(tup) == 4:
return brightness * tup[3] / 255
else:
return 0
def print_stats(used_frame_count, frame_count, start_frame_time, get_frame_time, preparing_loop_time, looping_time, writing_time):
# progress
progress = 'Frame ' + str(used_frame_count) + '/'
if frame_count:
progress = progress + str(frame_count)
else:
progress = progress + '?'
# speed of processing the frame
elapsed = time.time() - start_frame_time
if elapsed > 0:
processed_fps = floor(1 / elapsed)
else:
processed_fps = '1000+'
speed = ', speed: {} frames/s'.format(processed_fps)
# speed of getting the frame
if get_frame_time > 0:
processed_fps = floor(1 / get_frame_time)
else:
processed_fps = '1000+'
speed_2 = ', get frame: {} frames/s'.format(processed_fps)
# preparing for the 'for y, for x' loop
if preparing_loop_time > 0:
processed_fps = floor(1 / preparing_loop_time)
else:
processed_fps = '1000+'
speed_3 = ', preparing loop: {} frames/s'.format(processed_fps)
# speed of the 'for y, for x' loop
if looping_time > 0:
processed_fps = floor(1 / looping_time)
else:
processed_fps = '1000+'
speed_4 = ', pixel loop: {} frames/s'.format(processed_fps)
# writing speed
if writing_time > 0:
processed_fps = floor(1 / writing_time)
else:
processed_fps = '1000+'
speed_5 = ', writing: {} frames/s'.format(processed_fps)
# calculate how long it should take for the program to finish
if frame_count:
frames_left = frame_count - used_frame_count
seconds_left = elapsed * frames_left
eta_hours = floor(seconds_left / 3600)
eta_minutes = floor(seconds_left / 60) % 60
eta_seconds = floor(seconds_left) % 60
# makes sure each value is always 2 characters wide when printed
if eta_hours < 10:
eta_hours = '0' + str(eta_hours)
if eta_minutes < 10:
eta_minutes = '0' + str(eta_minutes)
if eta_seconds < 10:
eta_seconds = '0' + str(eta_seconds)
eta = ', {}:{}:{} left'.format(eta_hours, eta_minutes, eta_seconds)
else:
eta = ', ? left'
# clears the line that will be printed on of any straggling characters
tab = ' '
print(tab + progress + speed + eta, end='\r', flush=True)
# sys.stdout.write("\033[F") # Cursor up one line
# sys.stdout.write("\033[K") # Clear to the end of line
# print(tab + progress + speed + eta, end='\r', flush=True)
# print(tab + progress + speed + speed_2 + speed_3 + speed_4 + speed_5 + eta, end='\r', flush=True)
# USER SETTINGS #######################################
# default is False
# if true, the program assumes 94 characters are available, instead of the usual 20
# 94 are available by replacing Tekkit's default characters in default.png, see the instructions below
extended_chars = False
# how to get the extended character set (characters are replaced with grayscale blocks):
# 1. go to %appdata%/.technic/modpacks/tekkit/bin
# 2. remove the minecraft.jar file and replace it with 'minecraft.jar versions/new/minecraft.jar',
# which can be found inside the same folder of this program
# 3. tekkit's characters should now all be replaced with 94 grayscale colors, instead of the default 19
# 4. when you want to go back to the default font,
# replace the new minecraft.jar file with 'minecraft.jar versions/old/minecraft.jar'
# if true, the original aspect ratio won't be kept so the width can be stretched to max_width
new_width_stretched = True
# normally, files that have been put in the 'inputs' folder will be moved to 'temp inputs' once they've been processed
# they'll remain in the 'inputs' folder after being processed when this is set to False
move_processed_files = True
# a file compression method
# 1 means every frame of the video is kept, 3 means every third frame of the video is kept
frame_skipping = 1
# 100 MB GitHub file limit. 9.5e7 is 95 million.
max_bytes_per_file = 9.5e7
# how many frames have to be processed before the stats in the console are updated
frames_to_update_stats = 100
# this determines the width and height of the output frames
# see tekkit/config/mod_ComputerCraft.cfg to set your own max_width and max_height values
# (max_width, max_height)
output_dimensions = (
# (9, 8), # for single characters, this is 8x8 without the '\n' char at the end
(30, 30),
# (77, 31), # max 8x5 monitor size in ComputerCraft, used because 8x6 doesn't always work
# (77, 38), # max 8x6 monitor size in ComputerCraft
# (227, 85), # 1080p
# (426, 160), # 1440p
# (640, 240), # 4k
)
# EXECUTION OF THE PROGRAM #######################################
print()
t0 = time.time()
for dimension in output_dimensions:
max_width, max_height = dimension
for name in os.listdir('inputs'):
if name != '.empty': # '.empty' prevents the folder from being removed on GitHub
process_frames(name, max_width, max_height, frame_skipping)
print()
if move_processed_files:
for name in os.listdir('inputs'):
if name != '.empty':
os.rename('inputs/' + name, 'temp inputs/' + name)
# print the time it took to run the program
time_elapsed = time.time() - t0
minutes = floor(time_elapsed / 60)
seconds = time_elapsed % 60
print('Done! Duration: {}m, {:.2f}s'.format(minutes, seconds))
# sys.stdout.write("\033[F") # Cursor up one line
# sys.stdout.write("\033[K") # Clear to the end of line
# print('Done! Duration: {}m, {:.2f}s'.format(minutes, seconds), end='\r', flush=True) | MyNameIsTrez/python-lua-dithering | main.py | main.py | py | 14,612 | python | en | code | 0 | github-code | 13 |
72114123857 | from django.shortcuts import render, redirect, get_object_or_404
from .models import *
from django.contrib.auth.decorators import login_required
from .forms import *
# Create your views here.
@login_required(login_url='/accounts/login/')
def home(request):
'''
returns the homepage of the application
'''
residence = Hood.objects.all
person = Resident.get_human(jina=request.user)
return render(request, 'index.html', {'content': residence, 'eye':person})
@login_required(login_url='/accounts/login/')
def profile(request,name):
human = Resident.get_human(jina=name)
return render(request, 'profile.html', {'content': human})
@login_required(login_url='/accounts/login/')
def new_resident(request, operation, name):
current_user = request.user
hood = get_object_or_404(Hood, name=name)
home = Hood.get_hood(jina=name)
if request.method == 'POST':
form = NewResidentForm(request.POST, request.FILES)
if form.is_valid():
resident = form.save(commit=False)
resident.name = current_user
resident.home = hood
resident.save()
return redirect('home')
else:
form = NewResidentForm()
if operation == 'join':
hood.occupants +=1
hood.save()
return render(request, 'new_being.html', {'form': form, 'content': home})
@login_required(login_url='/accounts/login/')
def residence(request,jina):
area = Hood.get_hood(jina=jina)
service = Service.get_service(jina=jina)
news = Event.get_event(jina=jina)
return render(request, 'area.html', {'content': area, 'addon': service, 'news':news})
@login_required(login_url='/accounts/login/')
def new_business(request,jina):
current_user = request.user
home = Hood.get_hood(jina=jina)
area = get_object_or_404(Hood, name=jina)
if request.method == 'POST':
form = NewBusiness(request.POST, request.FILES)
if form.is_valid():
business = form.save(commit=False)
business.owner = current_user
business.neighbourhood = area
business.save()
return redirect('home')
else:
form = NewBusiness()
return render(request, 'bizna.html', {'form': form, 'content': home})
@login_required(login_url='/accounts/login/')
def new_service(request,jina):
home = Hood.get_hood(jina=jina)
area = get_object_or_404(Hood, name=jina)
if request.method == 'POST':
form = NewService(request.POST, request.FILES)
if form.is_valid():
service = form.save(commit=False)
service.area = area
service.save()
return redirect('home')
else:
form = NewService()
return render(request, 'service.html', {'form': form, 'content': home})
@login_required(login_url='/accounts/login/')
def new_event(request,jina):
home = Hood.get_hood(jina=jina)
area = get_object_or_404(Hood, name=jina)
if request.method == 'POST':
form = NewEvent(request.POST, request.FILES)
if form.is_valid():
event = form.save(commit=False)
event.area = area
event.save()
return redirect('home')
else:
form = NewEvent()
return render(request, 'event.html', {'form': form, 'content': home})
@login_required(login_url='/accounts/login/')
def new_hood(request):
if request.method == 'POST':
form = NewHoodForm(request.POST, request.FILES)
if form.is_valid():
hood = form.save(commit=False)
hood.save()
return redirect('home')
else:
form = NewHoodForm()
return render(request, 'pop.html', {'form': form})
@login_required(login_url='/accounts/login/')
def find_hood(request):
if 'hood' in request.GET and request.GET['hood']:
jina = request.GET.get('hood')
hood = Hood.get_hood(jina=jina)
return render(request, 'search.html', {'title': jina, 'content': hood})
else:
return render(request, 'search.html')
@login_required(login_url='/accounts/login/')
def change(request,name):
if 'home' in request.GET and request.GET['home']:
place = request.GET.get('home')
new = Resident.change_hood(iden= name, hood=place)
print(new)
return redirect('home')
else:
return render(request, 'gum.html')
| damunza/hood-ip | app/views.py | views.py | py | 4,374 | python | en | code | 0 | github-code | 13 |
29613459522 | import io
import os
from boto3.session import Session
from boto3.s3.transfer import TransferConfig
from LoraLogger import logger
logger = logger(__name__, "INFO")
with open('../es-syns-config/config.yaml', 'r') as f:
config = yaml.safe_load(f)
logger = logger(__name__, config['LOGGERS']['main'])
config = config['MAIN']
class Ceph3BOTO3():
def __init__(self, ak=None, sk=None, url=None):
# prod credentials
logger.info(ak)
access_key = ak
secret_key = sk
self.session = Session(aws_access_key_id=access_key,
aws_secret_access_key=secret_key)
self.s3_client = self.session.client('s3', endpoint_url=url)
self.s3_resource = self.session.resource('s3', endpoint_url=url)
def get_bucket(self):
buckets = [bucket['Name']
for bucket in self.s3_client.list_buckets()['Buckets']]
print(buckets)
return buckets
def create_bucket(self, bkname):
self.s3_client.create_bucket(Bucket=bkname)
# ACL = 'private', 'public-read', 'public-read-write', 'authenticated-read'
# self.s3_client.create_bucket(Bucket='new_bucket', ACL='public-read')
def delete_bucket(self, bkname):
self.s3_client.delete_bucket(Bucket=bkname)
def get_bucket_content(self, bkname):
bk = self.s3_resource.Bucket(bkname)
for obj in bk.objects.all():
print(obj.key)
def upload_folder(self, folder, bucket):
config = TransferConfig(multipart_threshold=1024*25, max_concurrency=10,
multipart_chunksize=1024*25, use_threads=True)
# bucket = self.s3_resource.Bucket(bucket)
print('upload started')
for subdir, dirs, files in os.walk(folder):
for file in files:
full_path = os.path.join(subdir, file)
print('uploading '+full_path)
self.s3_client.upload_file(
full_path, # file itself
bucket,
full_path, # name
Config=config
)
print('completed uploading '+full_path)
print('folder upload completed')
def upload_file(self, bkname, from_file_path, bucket_file_path):
with open(from_file_path, 'rb') as f:
fo = io.BytesIO(f.read())
resp = self.s3_client.put_object(
Bucket=bkname,
Key=bucket_file_path, # target file name
Body=fo
)
return resp
def download_folder(self, bkname, from_bucket_dir, to_dir):
bucket = self.s3_resource.Bucket(bkname)
for obj in bucket.objects.filter(Prefix=from_bucket_dir):
target_path = os.path.join(to_dir, obj.key)
print(os.path.dirname(target_path))
if not os.path.exists(os.path.dirname(target_path)):
os.makedirs(os.path.dirname(target_path))
bucket.download_file(obj.key, target_path) # save to same path
def download_file(self, bkname, from_bucket_path, to_file_path):
self.s3_resource.meta.client.download_file(
bkname, from_bucket_path, to_file_path)
def delete(self, bucket, key):
self.s3_resource.Object(bucket, key).delete()
if __name__ == '__main__':
ceph = Ceph3BOTO3(ak=config['access_key'], sk=config[
'secret_key'], url=config['end_point'])
ceph.download_folder(bkname='es-synonym',
from_bucket_dir='current', to_dir='analysis')
| adrwong/ModelMeshSystem | user_toolkit/s3access.py | s3access.py | py | 3,582 | python | en | code | 0 | github-code | 13 |
4699844295 | '''
[๋ฌธ์ ]
[์กฐ๊ฑด1] ๋ฆฌ์คํธ์ ๋๋ค์ซ์(1~100) 5๊ฐ๋ฅผ ์ถ๊ฐํ๋ค.
[์กฐ๊ฑด2] ๋ฆฌ์คํธ์ ์ซ์์ค 50๋ณด๋ค ํฐ๊ฐ๋ค๋ง ์ถ๋ ฅ
[์กฐ๊ฑด3] ์์กฐ๊ฑด์ ๊ฐ๋ค์ ๋์ ํฉ์ ์ถ๋ ฅ
[์กฐ๊ฑด4] ์์กฐ๊ฑด์ ๊ฐ์ ์ถ๋ ฅ
[์์]
a = [1, 83, 22, 77 ,19]
๋น๊ต = 50
์ถ๋ ฅ : 83, 77
ํฉ : 160
๊ฐ์ : 2
'''
import random
a = []
count = 0
total = 0
i=0
while i<5:
r=random.randint(1,100)
a.append(r)
if a[i]>50:
total+=a[i]
count+=1
i+=1
print(a)
print("๊ฐ์ :", count)
print("์ดํฉ :", total)
| Songmsu/python | H์ผ์ฐจ๋ฐฐ์ด/์ผ์ฐจ๋ฐฐ์ด3_๋ฌธ์ _๋์ ํฉ_๊ฐ์/์ผ์ฐจ๋ฐฐ์ด3_๋ฌธ์ 01_๋น๊ต_๋ฌธ์ .py | ์ผ์ฐจ๋ฐฐ์ด3_๋ฌธ์ 01_๋น๊ต_๋ฌธ์ .py | py | 631 | python | ko | code | 0 | github-code | 13 |
72935272978 | import copy
from typing import List, Tuple
def bfs(r: int, c: int, maps: List, target: str) -> Tuple[int, int, int]:
maps = copy.deepcopy(maps)
queue = [[r, c, 0]]
H, W = len(maps), len(maps[0])
while queue:
r, c, cnt = queue.pop(0)
if r in [-1, H] or c in [-1, W] or maps[r][c] == "X":
continue
if maps[r][c] == target:
return (r, c, cnt)
maps[r] = maps[r][:c] + "X" + maps[r][c+1:]
case = [[-1, 0], [1, 0], [0, -1], [0, 1]]
for dr, dc in case:
queue.append([r+dr, c+dc, cnt+1])
return (0, 0, -1)
def solution(maps: List) -> int:
for r in range(len(maps)):
if "S" in maps[r]:
c = maps[r].index("S")
break
r, c, to_lever = bfs(r, c, maps, "L")
_, _, to_exit = bfs(r, c, maps, "E")
if to_lever == -1 or to_exit == -1:
return -1
return to_lever + to_exit
| Zeka-0337/Problem-Solving | programmers/level_2/๋ฏธ๋ก ํ์ถ.py | ๋ฏธ๋ก ํ์ถ.py | py | 919 | python | en | code | 0 | github-code | 13 |
36588392906 | class Solution:
def killProcess(self, pid: List[int], ppid: List[int], kill: int) -> List[int]:
dag = defaultdict(list)
root = -1
for i in range(len(pid)):
if ppid[i] != 0:
dag[ppid[i]].append(pid[i])
else:
root = pid[i]
if kill == root:
return pid
# bfs:
res = []
queue = deque([kill])
while queue:
cur = queue.popleft()
res.append(cur)
for nex in dag[cur]:
queue.append(nex)
return res
| ysonggit/leetcode_python | 0582_KillProcess.py | 0582_KillProcess.py | py | 593 | python | en | code | 1 | github-code | 13 |
10057342546 | def quine():
#returns the code of this function
import inspect
lines = inspect.getsource(quine)
return lines[len('def quine(): '):]
#prints its code
def example():
s = 's = %r\nprint(s %% s)'
print(s % s)
print(quine())
print("\n\n")
example()
| th3spis/basilisk | unlockInfosec/self_print.py | self_print.py | py | 268 | python | en | code | 0 | github-code | 13 |
74564296338 | #!/usr/bin/env python
"""
_DeleteJobs_
MySQL implementation for creating a deleting a job
"""
from WMCore.Database.DBFormatter import DBFormatter
class DeleteJobs(DBFormatter):
"""
_DeleteJobs_
Delete jobs from bl_runjob
"""
sql = """DELETE FROM bl_runjob WHERE id = :id
"""
def execute(self, jobs, conn = None, transaction = False):
"""
_execute_
Delete jobs
Expects a list of IDs
"""
if len(jobs) == 0:
return
binds = []
for jobid in jobs:
binds.append({'id': jobid})
result = self.dbi.processData(self.sql, binds, conn = conn,
transaction = transaction)
return
| dmwm/WMCore | src/python/WMCore/BossAir/MySQL/DeleteJobs.py | DeleteJobs.py | py | 746 | python | en | code | 44 | github-code | 13 |
29283590393 | from typing import List, Union
from structures import Order, TradingSessionModel, Transaction, Error, OrderStatus
from uuid import UUID, uuid4
from order_book import OrderBook
from traders import Trader
from datetime import datetime, timezone
from utils import utc_now
class TradingSession:
def __init__(self):
self.id = uuid4()
self.session_data = TradingSessionModel(created_at=utc_now(), id=self.id)
self.created_at = utc_now()
# todo: rename order_book to active_book. Active book is a subset of orders, which are outstanding.
# todo: the order book is an anonymized active book, formatted in such a way that consists prices and corresponding number of orders.
self.order_book = OrderBook(self.session_data.active_book)
self.traders = {} # Dictionary to hold Trader instances
def __str__(self):
return f'{self.session_data}'
def is_trader_connected(self, trader: Trader) -> bool:
trader_ids = [t for t in self.session_data.active_traders]
return trader.id in trader_ids
def connect_trader(self, trader: Trader):
# Update the joined_at and session_id fields for the traders
trader.data.joined_at = utc_now()
trader.join_session(self.id) # This will set the session_id for the traders
# Add the traders to the session
self.session_data.active_traders.append(trader.data.id)
self.traders[trader.data.id] = trader # Add the Trader instance to the dictionary
def to_dict(self):
return self.session_data.model_dump()
def get_trader(self, trader_id: UUID) -> Trader:
return self.traders.get(trader_id, None) # Fetch the Trader instance by UUID
def place_order(self, trader_id: UUID, order_type: str, quantity: int, price: float) -> Union[Order, Error]:
trader = self.get_trader(trader_id)
if not trader:
return Error(message="Trader not found in a given session", created_at=datetime.utcnow())
if order_type == "bid":
required_cash = price * quantity
if trader.data.cash - trader.data.blocked_cash < required_cash:
return Error(message="Not enough cash for bid", created_at=datetime.utcnow())
trader.data.blocked_cash += required_cash
if order_type == "ask":
if trader.data.stocks - trader.data.blocked_stocks < quantity:
return Error(message="Not enough stocks for ask", created_at=datetime.utcnow())
trader.data.blocked_stocks += quantity
# Create the order and include the session_id
order = Order(
id=uuid4(),
session_id=self.id, # Add this line to set the session_id
trader=trader.data,
order_type=order_type,
quantity=quantity,
price=price,
created_at=datetime.utcnow()
)
self.session_data.active_book.append(order)
self.session_data.full_order_history.append(order)
self.match_orders()
return order
def check_order_validity(self, order: Order) -> bool:
# Validation logic here...
pass
def match_orders(self):
# Sort bid orders in descending order and ask orders in ascending order by price
bid_orders = sorted([order for order in self.session_data.active_book if order.order_type == "bid"],
key=lambda x: x.price, reverse=True)
ask_orders = sorted([order for order in self.session_data.active_book if order.order_type == "ask"],
key=lambda x: x.price)
# Check for matching orders
for ask_order in ask_orders:
for bid_order in bid_orders:
if bid_order.price >= ask_order.price:
# Match found, create a transaction
self.create_transaction(buyer_order=bid_order, seller_order=ask_order,
quantity=min(bid_order.quantity, ask_order.quantity), price=ask_order.price)
return
def create_transaction(self, buyer_order: Order, seller_order: Order, quantity: int, price: float):
# Mark the orders as executed
buyer_order.order_status = OrderStatus.EXECUTED
seller_order.order_status = OrderStatus.EXECUTED
# Update portfolios
transaction_value = quantity * price
buyer = self.get_trader(buyer_order.trader.id) # Assuming you have a get_trader method
seller = self.get_trader(seller_order.trader.id)
buyer.data.cash -= transaction_value
buyer.data.stocks += quantity
buyer.data.blocked_cash -= transaction_value # Unblock the cash
seller.data.cash += transaction_value
seller.data.stocks -= quantity
seller.data.blocked_stocks -= quantity # Unblock the stocks
# Remove executed orders from the active book
self.session_data.active_book = [order for order in self.session_data.active_book if
order.order_status == OrderStatus.ACTIVE]
# Create and record the transaction
transaction = Transaction(
buyer_order=buyer_order,
seller_order=seller_order,
quantity=quantity,
price=price,
created_at=datetime.utcnow()
)
self.session_data.transaction_history.append(transaction)
print('Transaction created:', transaction.quantity, transaction.price)
return transaction
| chapkovski/trader_london | session.py | session.py | py | 5,558 | python | en | code | 0 | github-code | 13 |
13670911003 | DOCUMENTATION = r"""
---
module: secretsmanager_secret
version_added: 1.0.0
short_description: Manage secrets stored in AWS Secrets Manager
description:
- Create, update, and delete secrets stored in AWS Secrets Manager.
- Prior to release 5.0.0 this module was called C(community.aws.aws_secret).
The usage did not change.
author:
- "REY Remi (@rrey)"
options:
name:
description:
- Friendly name for the secret you are creating.
required: true
type: str
state:
description:
- Whether the secret should be exist or not.
default: 'present'
choices: ['present', 'absent']
type: str
overwrite:
description:
- Whether to overwrite an existing secret with the same name.
- If set to C(True), an existing secret with the same I(name) will be overwritten.
- If set to C(False), a secret with the given I(name) will only be created if none exists.
type: bool
default: True
version_added: 5.3.0
recovery_window:
description:
- Only used if state is absent.
- Specifies the number of days that Secrets Manager waits before it can delete the secret.
- If set to 0, the deletion is forced without recovery.
default: 30
type: int
description:
description:
- Specifies a user-provided description of the secret.
type: str
default: ''
replica:
description:
- Specifies a list of regions and kms_key_ids (optional) to replicate the secret to
type: list
elements: dict
version_added: 5.3.0
suboptions:
region:
description:
- Region to replicate secret to.
type: str
required: true
kms_key_id:
description:
- Specifies the ARN or alias of the AWS KMS customer master key (CMK) in the
destination region to be used (alias/aws/secretsmanager is assumed if not specified)
type: str
required: false
kms_key_id:
description:
- Specifies the ARN or alias of the AWS KMS customer master key (CMK) to be
used to encrypt the I(secret) values in the versions stored in this secret.
type: str
secret_type:
description:
- Specifies the type of data that you want to encrypt.
choices: ['binary', 'string']
default: 'string'
type: str
secret:
description:
- Specifies string or binary data that you want to encrypt and store in the new version of the secret.
- Mutually exclusive with the I(json_secret) option.
default: ""
type: str
json_secret:
description:
- Specifies JSON-formatted data that you want to encrypt and store in the new version of the
secret.
- Mutually exclusive with the I(secret) option.
type: json
version_added: 4.1.0
resource_policy:
description:
- Specifies JSON-formatted resource policy to attach to the secret. Useful when granting cross-account access
to secrets.
required: false
type: json
version_added: 3.1.0
rotation_lambda:
description:
- Specifies the ARN of the Lambda function that can rotate the secret.
type: str
rotation_interval:
description:
- Specifies the number of days between automatic scheduled rotations of the secret.
default: 30
type: int
notes:
- Support for I(purge_tags) was added in release 4.0.0.
extends_documentation_fragment:
- amazon.aws.region.modules
- amazon.aws.common.modules
- amazon.aws.tags
- amazon.aws.boto3
"""
EXAMPLES = r"""
- name: Add string to AWS Secrets Manager
community.aws.secretsmanager_secret:
name: 'test_secret_string'
state: present
secret_type: 'string'
secret: "{{ super_secret_string }}"
- name: Add a secret with resource policy attached
community.aws.secretsmanager_secret:
name: 'test_secret_string'
state: present
secret_type: 'string'
secret: "{{ super_secret_string }}"
resource_policy: "{{ lookup('template', 'templates/resource_policy.json.j2', convert_data=False) | string }}"
- name: remove string from AWS Secrets Manager
community.aws.secretsmanager_secret:
name: 'test_secret_string'
state: absent
secret_type: 'string'
secret: "{{ super_secret_string }}"
- name: Only create a new secret, but do not update if alredy exists by name
community.aws.secretsmanager_secret:
name: 'random_string'
state: present
secret_type: 'string'
secret: "{{ lookup('community.general.random_string', length=16, special=false) }}"
overwrite: false
"""
RETURN = r"""
secret:
description: The secret information
returned: always
type: complex
contains:
arn:
description: The ARN of the secret.
returned: always
type: str
sample: arn:aws:secretsmanager:eu-west-1:xxxxxxxxxx:secret:xxxxxxxxxxx
description:
description: A description of the secret.
returned: when the secret has a description
type: str
sample: An example description
last_accessed_date:
description: The date the secret was last accessed.
returned: always
type: str
sample: '2018-11-20T01:00:00+01:00'
last_changed_date:
description: The date the secret was last modified.
returned: always
type: str
sample: '2018-11-20T12:16:38.433000+01:00'
name:
description: The secret name.
returned: always
type: str
sample: my_secret
rotation_enabled:
description: The secret rotation status.
returned: always
type: bool
sample: false
version_ids_to_stages:
description: Provide the secret version ids and the associated secret stage.
returned: always
type: dict
sample: { "dc1ed59b-6d8e-4450-8b41-536dfe4600a9": [ "AWSCURRENT" ] }
tags:
description:
- A list of dictionaries representing the tags associated with the secret in the standard boto3 format.
returned: when the secret has tags
type: list
elements: dict
contains:
key:
description: The name or key of the tag.
type: str
example: MyTag
returned: success
value:
description: The value of the tag.
type: str
example: Some value.
returned: success
tags_dict:
description: A dictionary representing the tags associated with the secret.
type: dict
returned: when the secret has tags
example: {'MyTagName': 'Some Value'}
version_added: 4.0.0
"""
import json
from traceback import format_exc
try:
from botocore.exceptions import BotoCoreError
from botocore.exceptions import ClientError
except ImportError:
pass # handled by AnsibleAWSModule
from ansible.module_utils._text import to_bytes
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies
from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list
from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags
from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
class Secret(object):
"""An object representation of the Secret described by the self.module args"""
def __init__(
self,
name,
secret_type,
secret,
resource_policy=None,
description="",
kms_key_id=None,
tags=None,
lambda_arn=None,
rotation_interval=None,
replica_regions=None,
):
self.name = name
self.description = description
self.replica_regions = replica_regions
self.kms_key_id = kms_key_id
if secret_type == "binary":
self.secret_type = "SecretBinary"
else:
self.secret_type = "SecretString"
self.secret = secret
self.resource_policy = resource_policy
self.tags = tags or {}
self.rotation_enabled = False
if lambda_arn:
self.rotation_enabled = True
self.rotation_lambda_arn = lambda_arn
self.rotation_rules = {"AutomaticallyAfterDays": int(rotation_interval)}
@property
def create_args(self):
args = {"Name": self.name}
if self.description:
args["Description"] = self.description
if self.kms_key_id:
args["KmsKeyId"] = self.kms_key_id
if self.replica_regions:
add_replica_regions = []
for replica in self.replica_regions:
if replica["kms_key_id"]:
add_replica_regions.append({"Region": replica["region"], "KmsKeyId": replica["kms_key_id"]})
else:
add_replica_regions.append({"Region": replica["region"]})
args["AddReplicaRegions"] = add_replica_regions
if self.tags:
args["Tags"] = ansible_dict_to_boto3_tag_list(self.tags)
args[self.secret_type] = self.secret
return args
@property
def update_args(self):
args = {"SecretId": self.name}
if self.description:
args["Description"] = self.description
if self.kms_key_id:
args["KmsKeyId"] = self.kms_key_id
args[self.secret_type] = self.secret
return args
@property
def secret_resource_policy_args(self):
args = {"SecretId": self.name}
if self.resource_policy:
args["ResourcePolicy"] = self.resource_policy
return args
@property
def boto3_tags(self):
return ansible_dict_to_boto3_tag_list(self.Tags)
def as_dict(self):
result = self.__dict__
result.pop("tags")
return snake_dict_to_camel_dict(result)
class SecretsManagerInterface(object):
"""An interface with SecretsManager"""
def __init__(self, module):
self.module = module
self.client = self.module.client("secretsmanager")
def get_secret(self, name):
try:
secret = self.client.describe_secret(SecretId=name)
except self.client.exceptions.ResourceNotFoundException:
secret = None
except Exception as e:
self.module.fail_json_aws(e, msg="Failed to describe secret")
return secret
def get_resource_policy(self, name):
try:
resource_policy = self.client.get_resource_policy(SecretId=name)
except self.client.exceptions.ResourceNotFoundException:
resource_policy = None
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e, msg="Failed to get secret resource policy")
return resource_policy
def create_secret(self, secret):
if self.module.check_mode:
self.module.exit_json(changed=True)
try:
created_secret = self.client.create_secret(**secret.create_args)
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e, msg="Failed to create secret")
if secret.rotation_enabled:
response = self.update_rotation(secret)
created_secret["VersionId"] = response.get("VersionId")
return created_secret
def update_secret(self, secret):
if self.module.check_mode:
self.module.exit_json(changed=True)
try:
response = self.client.update_secret(**secret.update_args)
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e, msg="Failed to update secret")
return response
def put_resource_policy(self, secret):
if self.module.check_mode:
self.module.exit_json(changed=True)
try:
json.loads(secret.secret_resource_policy_args.get("ResourcePolicy"))
except (TypeError, ValueError) as e:
self.module.fail_json(msg=f"Failed to parse resource policy as JSON: {str(e)}", exception=format_exc())
try:
response = self.client.put_resource_policy(**secret.secret_resource_policy_args)
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e, msg="Failed to update secret resource policy")
return response
def remove_replication(self, name, regions):
if self.module.check_mode:
self.module.exit_json(changed=True)
try:
replica_regions = []
response = self.client.remove_regions_from_replication(SecretId=name, RemoveReplicaRegions=regions)
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e, msg="Failed to replicate secret")
return response
def replicate_secret(self, name, regions):
if self.module.check_mode:
self.module.exit_json(changed=True)
try:
replica_regions = []
for replica in regions:
if replica["kms_key_id"]:
replica_regions.append({"Region": replica["region"], "KmsKeyId": replica["kms_key_id"]})
else:
replica_regions.append({"Region": replica["region"]})
response = self.client.replicate_secret_to_regions(SecretId=name, AddReplicaRegions=replica_regions)
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e, msg="Failed to replicate secret")
return response
def restore_secret(self, name):
if self.module.check_mode:
self.module.exit_json(changed=True)
try:
response = self.client.restore_secret(SecretId=name)
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e, msg="Failed to restore secret")
return response
def delete_secret(self, name, recovery_window):
if self.module.check_mode:
self.module.exit_json(changed=True)
try:
if recovery_window == 0:
response = self.client.delete_secret(SecretId=name, ForceDeleteWithoutRecovery=True)
else:
response = self.client.delete_secret(SecretId=name, RecoveryWindowInDays=recovery_window)
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e, msg="Failed to delete secret")
return response
def delete_resource_policy(self, name):
if self.module.check_mode:
self.module.exit_json(changed=True)
try:
response = self.client.delete_resource_policy(SecretId=name)
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e, msg="Failed to delete secret resource policy")
return response
def update_rotation(self, secret):
if secret.rotation_enabled:
try:
response = self.client.rotate_secret(
SecretId=secret.name,
RotationLambdaARN=secret.rotation_lambda_arn,
RotationRules=secret.rotation_rules,
)
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e, msg="Failed to rotate secret secret")
else:
try:
response = self.client.cancel_rotate_secret(SecretId=secret.name)
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e, msg="Failed to cancel rotation")
return response
def tag_secret(self, secret_name, tags):
if self.module.check_mode:
self.module.exit_json(changed=True)
try:
self.client.tag_resource(SecretId=secret_name, Tags=tags)
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e, msg="Failed to add tag(s) to secret")
def untag_secret(self, secret_name, tag_keys):
if self.module.check_mode:
self.module.exit_json(changed=True)
try:
self.client.untag_resource(SecretId=secret_name, TagKeys=tag_keys)
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e, msg="Failed to remove tag(s) from secret")
def secrets_match(self, desired_secret, current_secret):
"""Compare secrets except tags and rotation
Args:
desired_secret: camel dict representation of the desired secret state.
current_secret: secret reference as returned by the secretsmanager api.
Returns: bool
"""
if desired_secret.description != current_secret.get("Description", ""):
return False
if desired_secret.kms_key_id != current_secret.get("KmsKeyId"):
return False
current_secret_value = self.client.get_secret_value(SecretId=current_secret.get("Name"))
if desired_secret.secret_type == "SecretBinary":
desired_value = to_bytes(desired_secret.secret)
else:
desired_value = desired_secret.secret
if desired_value != current_secret_value.get(desired_secret.secret_type):
return False
return True
def rotation_match(desired_secret, current_secret):
"""Compare secrets rotation configuration
Args:
desired_secret: camel dict representation of the desired secret state.
current_secret: secret reference as returned by the secretsmanager api.
Returns: bool
"""
if desired_secret.rotation_enabled != current_secret.get("RotationEnabled", False):
return False
if desired_secret.rotation_enabled:
if desired_secret.rotation_lambda_arn != current_secret.get("RotationLambdaARN"):
return False
if desired_secret.rotation_rules != current_secret.get("RotationRules"):
return False
return True
def compare_regions(desired_secret, current_secret):
"""Compare secrets replication configuration
Args:
desired_secret: camel dict representation of the desired secret state.
current_secret: secret reference as returned by the secretsmanager api.
Returns: bool
"""
regions_to_set_replication = []
regions_to_remove_replication = []
if desired_secret.replica_regions is None:
return regions_to_set_replication, regions_to_remove_replication
if desired_secret.replica_regions:
regions_to_set_replication = desired_secret.replica_regions
for current_secret_region in current_secret.get("ReplicationStatus", []):
if regions_to_set_replication:
for desired_secret_region in regions_to_set_replication:
if current_secret_region["Region"] == desired_secret_region["region"]:
regions_to_set_replication.remove(desired_secret_region)
else:
regions_to_remove_replication.append(current_secret_region["Region"])
else:
regions_to_remove_replication.append(current_secret_region["Region"])
return regions_to_set_replication, regions_to_remove_replication
def main():
replica_args = dict(
region=dict(type="str", required=True),
kms_key_id=dict(type="str", required=False),
)
module = AnsibleAWSModule(
argument_spec={
"name": dict(required=True),
"state": dict(choices=["present", "absent"], default="present"),
"overwrite": dict(type="bool", default=True),
"description": dict(default=""),
"replica": dict(type="list", elements="dict", options=replica_args),
"kms_key_id": dict(),
"secret_type": dict(choices=["binary", "string"], default="string"),
"secret": dict(default="", no_log=True),
"json_secret": dict(type="json", no_log=True),
"resource_policy": dict(type="json", default=None),
"tags": dict(type="dict", default=None, aliases=["resource_tags"]),
"purge_tags": dict(type="bool", default=True),
"rotation_lambda": dict(),
"rotation_interval": dict(type="int", default=30),
"recovery_window": dict(type="int", default=30),
},
mutually_exclusive=[["secret", "json_secret"]],
supports_check_mode=True,
)
changed = False
state = module.params.get("state")
secrets_mgr = SecretsManagerInterface(module)
recovery_window = module.params.get("recovery_window")
secret = Secret(
module.params.get("name"),
module.params.get("secret_type"),
module.params.get("secret") or module.params.get("json_secret"),
description=module.params.get("description"),
replica_regions=module.params.get("replica"),
kms_key_id=module.params.get("kms_key_id"),
resource_policy=module.params.get("resource_policy"),
tags=module.params.get("tags"),
lambda_arn=module.params.get("rotation_lambda"),
rotation_interval=module.params.get("rotation_interval"),
)
purge_tags = module.params.get("purge_tags")
current_secret = secrets_mgr.get_secret(secret.name)
if state == "absent":
if current_secret:
if not current_secret.get("DeletedDate"):
result = camel_dict_to_snake_dict(
secrets_mgr.delete_secret(secret.name, recovery_window=recovery_window)
)
changed = True
elif current_secret.get("DeletedDate") and recovery_window == 0:
result = camel_dict_to_snake_dict(
secrets_mgr.delete_secret(secret.name, recovery_window=recovery_window)
)
changed = True
else:
result = "secret already scheduled for deletion"
else:
result = "secret does not exist"
if state == "present":
if current_secret is None:
result = secrets_mgr.create_secret(secret)
if secret.resource_policy and result.get("ARN"):
result = secrets_mgr.put_resource_policy(secret)
changed = True
else:
# current_secret exists; decide what to do with it
if current_secret.get("DeletedDate"):
secrets_mgr.restore_secret(secret.name)
changed = True
if not secrets_mgr.secrets_match(secret, current_secret):
overwrite = module.params.get("overwrite")
if overwrite:
result = secrets_mgr.update_secret(secret)
changed = True
if not rotation_match(secret, current_secret):
result = secrets_mgr.update_rotation(secret)
changed = True
current_resource_policy_response = secrets_mgr.get_resource_policy(secret.name)
current_resource_policy = current_resource_policy_response.get("ResourcePolicy")
if compare_policies(secret.resource_policy, current_resource_policy):
if secret.resource_policy is None and current_resource_policy:
result = secrets_mgr.delete_resource_policy(secret.name)
else:
result = secrets_mgr.put_resource_policy(secret)
changed = True
if module.params.get("tags") is not None:
current_tags = boto3_tag_list_to_ansible_dict(current_secret.get("Tags", []))
tags_to_add, tags_to_remove = compare_aws_tags(current_tags, secret.tags, purge_tags)
if tags_to_add:
secrets_mgr.tag_secret(secret.name, ansible_dict_to_boto3_tag_list(tags_to_add))
changed = True
if tags_to_remove:
secrets_mgr.untag_secret(secret.name, tags_to_remove)
changed = True
regions_to_set_replication, regions_to_remove_replication = compare_regions(secret, current_secret)
if regions_to_set_replication:
secrets_mgr.replicate_secret(secret.name, regions_to_set_replication)
changed = True
if regions_to_remove_replication:
secrets_mgr.remove_replication(secret.name, regions_to_remove_replication)
changed = True
result = camel_dict_to_snake_dict(secrets_mgr.get_secret(secret.name))
if result.get("tags", None) is not None:
result["tags_dict"] = boto3_tag_list_to_ansible_dict(result.get("tags", []))
result.pop("response_metadata")
module.exit_json(changed=changed, secret=result)
if __name__ == "__main__":
main()
| ansible-collections/community.aws | plugins/modules/secretsmanager_secret.py | secretsmanager_secret.py | py | 24,718 | python | en | code | 174 | github-code | 13 |
23876222138 | import datetime
import random
import string
from typing import Optional, Literal, List
import pymongo
from database import MongoSingleton
from features.schedule.models import Event, DatetimeGranularity, GuildScheduleConfig
from .AbstractScheduleDB import AbstractScheduleDB
class ScheduleDB(AbstractScheduleDB):
def __init__(self):
self.db = MongoSingleton.conn().client
self.guilds = self.db["Onigiri-Fillings"]["Guilds"]
self.events = self.db["Onigiri-Fillings"]["Events"]
async def get_guild_exists(self, guild_id: int) -> bool:
return bool(self.guilds.find_one({'guild_id': guild_id}))
async def get_available_event_id(self, guild_id) -> str:
event_id = None
while not event_id:
new_id = ''.join(random.choices(string.digits, k=4))
if not await self.get_event_exists(guild_id, new_id):
event_id = new_id
return event_id
async def get_event_exists(self, guild_id, event_id: str) -> bool:
return bool(self.events.find_one({"$and": [{"guild_id": guild_id}, {"event_id": event_id}]}))
async def get_guild(self, guild_id: int) -> Optional[GuildScheduleConfig]:
guild = self.guilds.find_one({'guild_id': guild_id})
if not guild:
return None
else:
return GuildScheduleConfig.from_mongo(guild)
async def get_all_guilds(self) -> List[GuildScheduleConfig]:
return [GuildScheduleConfig.from_mongo(x) for x in self.guilds.find()]
async def get_enabled_guilds(self) -> List[GuildScheduleConfig]:
return [GuildScheduleConfig.from_mongo(x) for x in self.guilds.find({"enabled": True})]
async def get_event(self, guild_id: int, event_id: str) -> Optional[Event]:
event = self.events.find_one({"$and": [{"guild_id": guild_id}, {"event_id": event_id}]})
if event:
return Event.from_mongo(event)
else:
return None
async def get_all_events(self, guild_id: int) -> List[Event]:
return [Event.from_mongo(k) for k in self.events.find(sort=[
("datetime", pymongo.DESCENDING),
('datetime_granularity', pymongo.ASCENDING),
('note', pymongo.DESCENDING)
])]
async def get_guild_events(self, guild_id: int) -> List[Event]:
return [Event.from_mongo(k) for k in self.events.find(
{"guild_id": guild_id},
sort=[
("datetime", pymongo.DESCENDING),
('datetime_granularity', pymongo.ASCENDING),
('note', pymongo.DESCENDING)
]
)]
async def create_guild(self, guild: GuildScheduleConfig) -> GuildScheduleConfig:
self.guilds.insert_one(guild.to_dict())
return guild
async def create_event(self, event: Event) -> Event:
self.events.insert_one(event.to_dict())
return event
async def update_guild(self, guild: GuildScheduleConfig) -> GuildScheduleConfig:
self.guilds.replace_one({"guild_id": guild.guild_id}, guild.to_dict())
return guild
async def update_event(self, event: Event) -> Event:
self.events.replace_one({"$and": [{"guild_id": event.guild_id, "event_id": event.event_id}]}, event.to_dict())
return event
async def delete_guild(self, guild_id: int) -> None:
self.guilds.delete_one({"guild_id": guild_id})
async def delete_event(self, guild_id: int, event_id: str) -> None:
self.events.delete_one({"$and": [{"guild_id": guild_id, "event_id": event_id}]})
async def set_guild_enable(self, guild_id: int) -> None:
self.guilds.find_one_and_update({"guild_id": guild_id}, {"$set": {"enabled": True}})
async def set_guild_disable(self, guild_id: int) -> None:
self.guilds.find_one_and_update({"guild_id": guild_id}, {"$set": {"enabled": False}})
async def set_guild_talent(self, guild_id: int, talent: str) -> None:
self.guilds.find_one_and_update({"guild_id": guild_id}, {"$set": {"talent": talent}})
async def set_guild_description(self, guild_id: int, description: str) -> None:
self.guilds.find_one_and_update({"guild_id": guild_id}, {"$set": {"description": description}})
async def set_guild_channel(self, guild_id: int, schedule_channel: int) -> None:
self.guilds.find_one_and_update(
{"guild_id": guild_id},
{"$set": {"schedule_channel_id": schedule_channel}}
)
async def set_guild_messages(self, guild_id: int, schedule_messages: List[int]) -> None:
self.guilds.find_one_and_update(
{"guild_id": guild_id},
{"$set": {"schedule_message_ids": schedule_messages}}
)
async def set_guild_editors(self, guild_id: int, editors: List[int]) -> None:
self.guilds.find_one_and_update(
{"guild_id": guild_id},
{"$set": {"editor_role_ids": editors}}
)
async def set_event_title(self, guild_id: int, event_id: str, title: str) -> None:
self.events.find_one_and_update(
{'$and': [{"guild_id": guild_id, "event_id": event_id}]},
{"$set": {"title": title}}
)
async def set_event_datetime(self, guild_id: int, event_id: str, dt: Optional[datetime.datetime]) -> None:
self.events.find_one_and_update(
{'$and': [{"guild_id": guild_id, "event_id": event_id}]},
{"$set": {"datetime": dt}}
)
async def set_event_datetime_granularity(self, guild_id: int, event_id: str, dt_g: DatetimeGranularity) -> None:
self.events.find_one_and_update(
{'$and': [{"guild_id": guild_id, "event_id": event_id}]},
{"$set": {"datetime_granularity": dt_g.to_dict()}}
)
async def set_event_type(self, guild_id: int, event_id: str, t: Literal[0, 1, 2, 3, 4]) -> None:
self.events.find_one_and_update(
{'$and': [{"guild_id": guild_id, "event_id": event_id}]},
{"$set": {"type": t}}
)
async def set_event_stashed(self, guild_id: int, event_id: str, stashed: bool) -> None:
self.events.find_one_and_update(
{'$and': [{"guild_id": guild_id, "event_id": event_id}]},
{"$set": {"stashed": stashed}}
)
async def set_event_url(self, guild_id: int, event_id: str, url: str) -> None:
self.events.find_one_and_update(
{'$and': [{"guild_id": guild_id, "event_id": event_id}]},
{"$set": {"url": url}}
)
async def set_event_note(self, guild_id: int, event_id: str, note: str) -> None:
self.events.find_one_and_update(
{'$and': [{"guild_id": guild_id, "event_id": event_id}]},
{"$set": {"note": note}}
)
| HuzzNZ/Onigiri | features/schedule/database/ScheduleDB.py | ScheduleDB.py | py | 6,754 | python | en | code | 0 | github-code | 13 |
14606159537 | from flask import Flask, redirect, url_for, render_template,request,session
from datetime import timedelta
#initiallize flask
app=Flask(__name__)
app.secret_key="H3!!0"
app.permanent_session_lifetime=timedelta(minutes=2)
#first page just on ip, route is '/' , used render_template, created a templates folder, with index.html
@app.route("/")
def home():
return render_template("index.html")
#use of dynamic content as variables to show on html code
@app.route("/login",methods=["POST","GET"])
def login():
if request.method == "POST":
session.permanent=True
user=request.form['nm']
session["user"]=user
return redirect(url_for("user",usr=user))
else:
if "user" in session:
return redirect(url_for("user"))
return render_template("login.html")
@app.route("/user")
def user():
if "user" in session:
user=session["user"]
return render_template("user.html",user=user)
else:
return redirect(url_for("login"))
@app.route("/test")
def test():
return render_template("new.html")
@app.route("/logout")
def logout():
session.pop("user",None)
return redirect(url_for("login"))
#to access phone as well use ip of server computer
if __name__=="__main__":
app.run(host='192.168.1.104',debug=True) | marsalan06/flask | tutorial_5th.py | tutorial_5th.py | py | 1,310 | python | en | code | 0 | github-code | 13 |
2495236150 | from fuzzywuzzy import process
with open(r"d:\indian_cities_data.txt","r") as f:
cities=f.read().split('\n')
def get_cities(query,database,limit=5):
return process.extract(query=query,choices=database,limit=limit)
print("Enter city name")
cname = input()
r_cities=get_cities(cname,cities)
rank = r_cities[0][1]
if rank == 100:
print(r_cities[0])
else:
print(r_cities)
| MuskanChaddha/-Fuzzy-String-Matching | code.py | code.py | py | 388 | python | en | code | 0 | github-code | 13 |
17054888054 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class KoubeiMerchantOperatorRoleCreateModel(object):
def __init__(self):
self._auth_code = None
self._role_id = None
self._role_name = None
@property
def auth_code(self):
return self._auth_code
@auth_code.setter
def auth_code(self, value):
self._auth_code = value
@property
def role_id(self):
return self._role_id
@role_id.setter
def role_id(self, value):
self._role_id = value
@property
def role_name(self):
return self._role_name
@role_name.setter
def role_name(self, value):
self._role_name = value
def to_alipay_dict(self):
params = dict()
if self.auth_code:
if hasattr(self.auth_code, 'to_alipay_dict'):
params['auth_code'] = self.auth_code.to_alipay_dict()
else:
params['auth_code'] = self.auth_code
if self.role_id:
if hasattr(self.role_id, 'to_alipay_dict'):
params['role_id'] = self.role_id.to_alipay_dict()
else:
params['role_id'] = self.role_id
if self.role_name:
if hasattr(self.role_name, 'to_alipay_dict'):
params['role_name'] = self.role_name.to_alipay_dict()
else:
params['role_name'] = self.role_name
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KoubeiMerchantOperatorRoleCreateModel()
if 'auth_code' in d:
o.auth_code = d['auth_code']
if 'role_id' in d:
o.role_id = d['role_id']
if 'role_name' in d:
o.role_name = d['role_name']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/KoubeiMerchantOperatorRoleCreateModel.py | KoubeiMerchantOperatorRoleCreateModel.py | py | 1,854 | python | en | code | 241 | github-code | 13 |
13498890977 | import bitstring
# Get input file name
def getFilename():
print("Insira o arquivo que sofrerรก compressรฃo (com extensรฃo):")
filename = input()
return filename
def getN():
print("Insira o tamanho do lado de cada bloco (N): ")
N = int(input())
return N
def getM():
print("Insira a quantidade de intervalos (M): ")
M = int(input())
return M
def writeCompressed(imageFilename, compressed):
compressedName = imageFilename[:-4] + ".du"
output = open(compressedName, "wb")
# Start translation of the original file
bitArray = ""
for _list in compressed:
for elem in _list:
#print(elem)
bitArray += elem
# Counting useless bits
artificialBits = bytes([0])
# Checking if will be necessary to add artificial bits
if (len(bitArray) % 8):
quo = int(int(len(bitArray) / 8) + 1)
artificialBits = bytes([8 * quo - len(bitArray)])
# Creating bitstream from string
deployArray = bitstring.BitArray("0b" + bitArray)
# Writing how many of the final bits are useless
#output.write(artificialBits)
# Writing bitstream
output.write(deployArray.tobytes())
| EduardoLR10/imageCompressors | ciqa/my_io.py | my_io.py | py | 1,200 | python | en | code | 1 | github-code | 13 |
6867672446 | #!/usr/bin/env python
# -*- encoding:utf-8 -*-
import inference
import cv2
import numpy as np
image = cv2.imread('./test_data/00.0.jpg')
image = cv2.resize(image, (64, 64))
# np.save('test_inference.npy', image)
result = inference.predict(image)
rank = result['rank']
i = 0
for r in rank:
i += 1
print('{} : {}'.format(r['no'], r['accuracy']))
print(i)
# i = 1
# for r in result['rank']:
# print('{} : {} : {}'.format(str(i), r['no'], str(r['accuracy'])))
# i += 1
| keydepth/facedetect | bin/stub_inference.py | stub_inference.py | py | 477 | python | en | code | 0 | github-code | 13 |
27546057433 | """Collect JHU Cases/Deaths data"""
import os
import sys
import pandas as pd
import numpy as np
from termcolor import colored
from cowidev import PATHS
from cowidev.utils.utils import export_timestamp
from cowidev.jhu._parser import _parse_args
from cowidev.jhu.shared import (
load_population,
load_owid_continents,
inject_owid_aggregates,
inject_per_million,
inject_days_since,
inject_cfr,
inject_population,
inject_rolling_avg,
inject_exemplars,
inject_doubling_days,
inject_weekly_growth,
inject_biweekly_growth,
standard_export,
ZERO_DAY,
)
from cowidev.grapher.db.utils.slack_client import send_warning, send_success
from cowidev.grapher.db.utils.db_imports import import_dataset
from cowidev import PATHS
from cowidev.utils.s3 import obj_to_s3
INPUT_PATH = PATHS.INTERNAL_INPUT_JHU_DIR
OUTPUT_PATH = PATHS.DATA_JHU_DIR
TMP_PATH = PATHS.INTERNAL_TMP_DIR
LOCATIONS_CSV_PATH = PATHS.INTERNAL_INPUT_JHU_STD_FILE
ERROR = colored("[Error]", "red")
WARNING = colored("[Warning]", "yellow")
DATASET_NAME = "COVID-19 - Johns Hopkins University"
LARGE_DATA_CORRECTIONS = [
("Austria", "2022-04-21", "deaths"),
("Austria", "2022-04-22", "deaths"),
("Brazil", "2021-09-18", "cases"),
("Chile", "2020-07-17", "deaths"),
("Chile", "2022-03-21", "deaths"),
("China", "2020-04-17", "deaths"),
("Denmark", "2021-12-21", "deaths"),
("Ecuador", "2020-09-07", "deaths"),
("Ecuador", "2021-07-20", "deaths"),
("Finland", "2022-03-07", "deaths"),
("India", "2021-06-10", "deaths"),
("Mexico", "2020-10-05", "deaths"),
("Mexico", "2021-06-01", "deaths"),
("Moldova", "2021-12-31", "deaths"),
("Norway", "2022-03-17", "deaths"),
("South Africa", "2021-11-23", "cases"),
("South Africa", "2022-01-06", "deaths"),
("Spain", "2020-06-19", "deaths"),
("Turkey", "2020-12-10", "cases"),
("United Kingdom", "2022-01-31", "cases"),
("United Kingdom", "2022-02-01", "deaths"),
("United Kingdom", "2022-04-06", "deaths"),
]
def print_err(*args, **kwargs):
return print(*args, file=sys.stderr, **kwargs)
def get_metric(metric, region):
file_path = os.path.join(INPUT_PATH, f"time_series_covid19_{metric}_{region}.csv")
df = pd.read_csv(file_path).drop(columns=["Lat", "Long"])
if metric == "confirmed":
metric = "total_cases"
elif metric == "deaths":
metric = "total_deaths"
else:
print_err("Unknown metric requested.\n")
sys.exit(1)
# Relabel as 'International'
df.loc[df["Country/Region"].isin(["Diamond Princess", "MS Zaandam"]), "Country/Region"] = "International"
# Exclude special entities
df = df[-df["Country/Region"].isin(["Summer Olympics 2020", "Winter Olympics 2022", "Antarctica"])]
# Relabel Hong Kong to its own time series
subregion_to_region = [
"Anguilla",
"Aruba",
"Bermuda",
"Bonaire, Sint Eustatius and Saba",
"British Virgin Islands",
"Cayman Islands",
"Cook Islands",
"Curacao",
"Falkland Islands (Malvinas)",
"Faroe Islands",
"French Polynesia",
"Gibraltar",
"Greenland",
"Hong Kong",
"Isle of Man",
"Macau",
"Montserrat",
"New Caledonia",
"Saint Helena, Ascension and Tristan da Cunha",
"Saint Pierre and Miquelon",
"Turks and Caicos Islands",
"Wallis and Futuna",
]
msk = df["Province/State"].isin(subregion_to_region)
df.loc[msk, "Country/Region"] = df.loc[msk, "Province/State"]
# df.loc[df["Province/State"] == "Hong Kong", "Country/Region"] = "Hong Kong"
national = df.drop(columns="Province/State").groupby("Country/Region", as_index=False).sum()
df = national.copy() # df = pd.concat([national, subnational]).reset_index(drop=True)
df = df.melt(id_vars="Country/Region", var_name="date", value_name=metric)
df.loc[:, "date"] = pd.to_datetime(df["date"], format="%m/%d/%y").dt.date
df = df.sort_values("date")
# Only start country series when total_cases > 0 or total_deaths > 0 to minimize file size
cutoff = (
df.loc[df[metric] == 0, ["date", "Country/Region"]]
.groupby("Country/Region", as_index=False)
.max()
.rename(columns={"date": "cutoff"})
)
df = df.merge(cutoff, on="Country/Region", how="left")
df = df[(df.date >= df.cutoff) | (df.cutoff.isna())].drop(columns="cutoff")
df.loc[:, metric.replace("total_", "new_")] = df[metric] - df.groupby("Country/Region")[metric].shift(1)
return df
def load_data():
global_cases = get_metric("confirmed", "global")
global_deaths = get_metric("deaths", "global")
return pd.merge(global_cases, global_deaths, on=["date", "Country/Region"], how="outer")
def load_locations():
return pd.read_csv(LOCATIONS_CSV_PATH, keep_default_na=False).rename(
columns={"Country": "Country/Region", "Our World In Data Name": "location"}
)
def _load_merged():
df_data = load_data()
df_locs = load_locations()
return df_data.merge(df_locs, how="left", on=["Country/Region"])
def check_data_correctness(df_merged):
errors = 0
# Check that every country name is standardized
df_uniq = df_merged[["Country/Region", "location"]].drop_duplicates()
if df_uniq["location"].isnull().any():
print_err("\n" + ERROR + " Could not find OWID names for:")
print_err(df_uniq[df_uniq["location"].isnull()])
errors += 1
# Drop missing locations for the further checks โ that error is addressed above
df_merged = df_merged.dropna(subset=["location"])
# Check for duplicate rows
if df_merged.duplicated(subset=["date", "location"]).any():
print_err("\n" + ERROR + " Found duplicate rows:")
print_err(df_merged[df_merged.duplicated(subset=["date", "location"])])
errors += 1
# Check for missing population figures
df_pop = load_population()
pop_entity_diff = set(df_uniq["location"]) - set(df_pop["location"]) - set(["International"])
if len(pop_entity_diff) > 0:
# this is not an error, so don't increment errors variable
print("\n" + WARNING + " These entities were not found in the population dataset:")
print(pop_entity_diff)
print()
formatted_msg = ", ".join(f"`{entity}`" for entity in pop_entity_diff)
send_warning(
channel="corona-data-updates",
title="Some entities are missing from the population dataset",
message=formatted_msg,
)
return errors == 0
def hide_recent_zeros(df: pd.DataFrame) -> pd.DataFrame:
last_reported_date = df.date.max()
last_positive_cases_date = df.loc[df.new_cases > 0, "date"].max()
if pd.isnull(last_positive_cases_date):
return df
if last_positive_cases_date != last_reported_date:
last_known_cases = df.loc[df.date == last_positive_cases_date, "new_cases"].item()
if last_known_cases >= 100 and (last_reported_date - last_positive_cases_date).days < 7:
df.loc[df.date > last_positive_cases_date, "new_cases"] = np.nan
last_positive_deaths_date = df.loc[df.new_deaths > 0, "date"].max()
if pd.isnull(last_positive_deaths_date):
return df
if last_positive_deaths_date != last_reported_date:
last_known_deaths = df.loc[df.date == last_positive_deaths_date, "new_deaths"].item()
if last_known_deaths >= 10 and (last_reported_date - last_positive_deaths_date).days < 7:
df.loc[df.date > last_positive_deaths_date, "new_deaths"] = np.nan
return df
def discard_rows(df):
# For all rows where new_cases or new_deaths is negative, we keep the cumulative value but set
# the daily change to NA. This also sets the 7-day rolling average to NA for the next 7 days.
df.loc[df.new_cases < 0, "new_cases"] = np.nan
df.loc[df.new_deaths < 0, "new_deaths"] = np.nan
# Custom data corrections
for ldc in LARGE_DATA_CORRECTIONS:
df.loc[(df.location == ldc[0]) & (df.date.astype(str) == ldc[1]), f"new_{ldc[2]}"] = np.nan
# If the last known value is above 1000 cases or 100 deaths but the latest reported value is 0
# then set that value to NA in case it's a temporary reporting error. (Up to 7 days in the past)
df = df.sort_values(["location", "date"]).groupby("location").apply(hide_recent_zeros)
return df
def load_standardized(df):
df = df[["date", "location", "new_cases", "new_deaths", "total_cases", "total_deaths"]]
df = discard_rows(df)
df = inject_owid_aggregates(df)
df = inject_weekly_growth(df)
df = inject_biweekly_growth(df)
df = inject_doubling_days(df)
df = inject_per_million(
df,
[
"new_cases",
"new_deaths",
"total_cases",
"total_deaths",
"weekly_cases",
"weekly_deaths",
"biweekly_cases",
"biweekly_deaths",
],
)
df = inject_rolling_avg(df)
df = inject_cfr(df)
df = inject_days_since(df)
df = inject_exemplars(df)
return df.sort_values(by=["location", "date"])
def export(df_merged):
df_loc = df_merged[["Country/Region", "location"]].drop_duplicates()
df_loc = df_loc.merge(load_owid_continents(), on="location", how="left")
df_loc = inject_population(df_loc)
df_loc["population_year"] = df_loc["population_year"].round().astype("Int64")
df_loc["population"] = df_loc["population"].round().astype("Int64")
df_loc = df_loc.sort_values("location")
df_loc.to_csv(os.path.join(OUTPUT_PATH, "locations.csv"), index=False)
# The rest of the CSVs
return standard_export(load_standardized(df_merged), OUTPUT_PATH, DATASET_NAME)
def clean_global_subnational(metric):
url = f"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_{metric}_global.csv"
metric = "cases" if metric == "confirmed" else "deaths"
df = (
pd.read_csv(url, na_values="")
.drop(columns=["Lat", "Long"])
.dropna(subset=["Province/State"])
.melt(id_vars=["Country/Region", "Province/State"], var_name="date", value_name=f"total_{metric}")
.rename(columns={"Country/Region": "location1", "Province/State": "location2"})
)
df["date"] = pd.to_datetime(df.date).dt.date.astype(str)
df = df.sort_values(["location1", "location2", "date"])
df[f"new_{metric}"] = df[f"total_{metric}"] - df.groupby(["location1", "location2"])[f"total_{metric}"].shift(1)
df[f"new_{metric}_smoothed"] = (
df.groupby(["location1", "location2"]).rolling(7)[f"new_{metric}"].mean().droplevel(level=[0, 1]).round(2)
)
df["location3"] = pd.NA
return df
def clean_us_subnational(metric):
url = f"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_{metric}_US.csv"
metric = "cases" if metric == "confirmed" else "deaths"
df = (
pd.read_csv(url)
.drop(
columns=[
"UID",
"iso2",
"iso3",
"code3",
"FIPS",
"Country_Region",
"Lat",
"Long_",
"Combined_Key",
"Population",
],
errors="ignore",
)
.melt(id_vars=["Province_State", "Admin2"], var_name="date", value_name=f"total_{metric}")
.rename(columns={"Province_State": "location2", "Admin2": "location3"})
)
df["date"] = pd.to_datetime(df.date).dt.date.astype(str)
df = df.sort_values(["location2", "location3", "date"])
df[f"new_{metric}"] = df[f"total_{metric}"] - df.groupby(["location2", "location3"])[f"total_{metric}"].shift(1)
df[f"new_{metric}_smoothed"] = (
df.groupby(["location2", "location3"]).rolling(7)[f"new_{metric}"].mean().droplevel(level=[0, 1]).round(2)
)
df["location1"] = "United States"
return df
def create_subnational():
global_cases = clean_global_subnational("confirmed")
global_deaths = clean_global_subnational("deaths")
us_cases = clean_us_subnational("confirmed")
us_deaths = clean_us_subnational("deaths")
df = pd.concat(
[
pd.merge(global_cases, global_deaths, on=["location1", "location2", "location3", "date"], how="outer"),
pd.merge(us_cases, us_deaths, on=["location1", "location2", "location3", "date"], how="outer"),
]
).sort_values(["location1", "location2", "location3", "date"])[
[
"location1",
"location2",
"location3",
"date",
"total_cases",
"new_cases",
"new_cases_smoothed",
"total_deaths",
"new_deaths",
"new_deaths_smoothed",
]
]
df = df[df.total_cases > 0]
filename = "subnational_cases_deaths"
compression = {"method": "zip", "archive_name": f"{filename}.csv"}
# df.to_csv(os.path.join(OUTPUT_PATH, f"{filename}.zip"), index=False, compression=compression)
obj_to_s3(df, s3_path="s3://covid-19/public/jhu/{filename}.zip", compression=compression, public=True)
def main(logger, skip_download=False):
if not skip_download:
logger.info("\nAttempting to download latest CSV files...")
download_csv()
df_merged = _load_merged()
if check_data_correctness(df_merged):
logger.info("Data correctness check %s.\n" % colored("passed", "green"))
else:
logger.error("Data correctness check %s.\n" % colored("failed", "red"))
raise ValueError("Data correctness check failed.")
# sys.exit(1)
if export(df_merged):
logger.info("Successfully exported CSVs to %s\n" % colored(os.path.abspath(OUTPUT_PATH), "magenta"))
else:
logger.error("JHU export failed.\n")
raise ValueError("JHU export failed.")
# sys.exit(1)
# logger.info("Generating megafileโฆ")
# generate_megafile(logger)
# logger.info("Megafile is ready.")
# send_success(channel="corona-data-updates", title="Updated JHU GitHub exports")
logger.info("Generating subnational fileโฆ")
create_subnational()
# Export timestamp
export_timestamp(PATHS.DATA_TIMESTAMP_JHU_FILE)
def download_csv(logger):
files = ["time_series_covid19_confirmed_global.csv", "time_series_covid19_deaths_global.csv"]
for file in files:
logger.info(file)
os.system(
f"curl --silent -f -o {INPUT_PATH}/{file} -L"
f" https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/{file}"
)
def update_db():
import_dataset(
dataset_name=DATASET_NAME,
namespace="owid",
csv_path=os.path.join(OUTPUT_PATH, DATASET_NAME + ".csv"),
default_variable_display={"yearIsDay": True, "zeroDay": ZERO_DAY},
source_name="Johns Hopkins University CSSE COVID-19 Data",
slack_notifications=False,
)
def run_step(step: str, skip_download):
if step == "download":
download_csv()
if step == "etl":
main(skip_download=skip_download)
elif step == "grapher-db":
update_db()
if __name__ == "__main__":
args = _parse_args()
run_step(step=args.step, skip_download=args.skip_download)
| ovenprint/pandemic-project | data/covid-19-data-master 16.5.22/scripts/src/cowidev/jhu/__main__.py | __main__.py | py | 15,505 | python | en | code | 0 | github-code | 13 |
16755994085 | """Breuer state."""
import numpy as np
from toqito.perms import symmetric_projection
from toqito.states import max_entangled
def breuer(dim: int, lam: float) -> np.ndarray:
r"""
Produce a Breuer state [HPBreuer]_.
Gives a Breuer bound entangled state for two qudits of local dimension :code:`dim`, with the
:code:`lam` parameter describing the weight of the singlet component as described in
[HPBreuer]_.
This function was adapted from the QETLAB package.
Examples
==========
We can generate a Breuer state of dimension :math:`4` with weight :math:`0.1`. For any weight
above :math:`0`, the state will be bound entangled, that is, it will satisfy the PPT criterion,
but it will be entangled.
>>> from toqito.states import breuer
>>> breuer(2, 0.1)
[[ 0.3 0. -0. 0. ]
[ 0. 0.2 0.1 0. ]
[-0. 0.1 0.2 -0. ]
[ 0. 0. -0. 0.3]]
References
==========
.. [HPBreuer] H-P. Breuer. Optimal entanglement criterion for mixed quantum states.
E-print: arXiv:quant-ph/0605036, 2006.
:raises ValueError: Dimension must be greater than or equal to 1.
:param dim: Dimension of the Breuer state.
:param lam: The weight of the singlet component.
:return: Breuer state of dimension :code:`dim` with weight :code:`lam`.
"""
if dim % 2 == 1 or dim <= 0:
raise ValueError(f"The value {dim} must be an even positive integer.")
v_mat = np.fliplr(np.diag((-1) ** np.mod(np.arange(1, dim + 1), 2)))
max_entangled(dim)
psi = np.dot(np.kron(np.identity(dim), v_mat), max_entangled(dim))
return lam * (psi * psi.conj().T) + (1 - lam) * 2 * symmetric_projection(dim) / (
dim * (dim + 1)
)
| vprusso/toqito | toqito/states/breuer.py | breuer.py | py | 1,734 | python | en | code | 118 | github-code | 13 |
15884686944 | # for i in range(0,50):
# if i/2 !=0:
# return i
# def linear_search(arr, target):
# for i in range(0,len(arr)):
# if arr[i]==target:
# print(arr[i],'is present in array')
# array=[1,2,3,4,5,6,7,8,9,10]
# linear_search(array,4)
def binary_search(arr,target):
low=0
high=len(arr)-1
while low<=high:
mid=(low+high)//2
if arr[mid]==target:
return mid
elif arr[mid]>target:
return mid-1
elif arr[mid]<target:
return mid+1
else:
return -1
array={1,2,3,4,5,6,7,8,9,10}
bs=binary_search(array,5)
if bs:
print(bs,'is found')
else:
print('item is not found')
| ahmedsharabasy/opp-and-data-structure | code python/review.py | review.py | py | 720 | python | en | code | 0 | github-code | 13 |
1954976364 | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 1 18:52:40 2021
Problem 66: Diophantine equation
https://projecteuler.net/problem=66
@author: Admin
"""
import math
def solvePell(n):
x = int(math.sqrt(n))
y, z, r = x, 1, x << 1
e1, e2 = 1, 0
f1, f2 = 0, 1
while True:
y = r * z - y
z = (n - y * y) // z
r = (x + y) // z
e1, e2 = e2, e1 + e2 * r
f1, f2 = f2, f1 + f2 * r
a, b = f2 * x + e2, f2
if a * a - n * b * b == 1:
return a, b
def solution():
D = [x for x in range(2, 1001) if not (x ** (1 / 2)).is_integer()]
max_x = 0
max_d = 0
for d in D:
x, y = solvePell(d)
if x > max_x:
max_x = x
max_d = d
print("x: {} d: {}".format(max_x, max_d))
solution()
| KubiakJakub01/ProjectEuler | src/Problem66.py | Problem66.py | py | 813 | python | en | code | 0 | github-code | 13 |
70368405458 | bot_template = "BOT : {0}"
user_template = "USER : {0}"
def respond(message):
bot_msg = "I can hear, you said " + message
return bot_msg
def send_message(message):
print(user_template.format(message))
response = respond(message)
print(bot_template.format(response))
while True:
a=input()
send_message(a)
| bhatnagaranshika02/ChatBots | EchoBot.py | EchoBot.py | py | 332 | python | en | code | 2 | github-code | 13 |
18908037123 | import os
import torch
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
def visualize_embeddings_from_checkpoint(checkpoint_path, save_path):
"""
Load the checkpoint, extract token embeddings, and visualize them using PCA.
Args:
checkpoint_path (str): Path to the saved checkpoint.
save_path (str): Path to save the generated visualization.
"""
# Load the checkpoint
checkpoint = torch.load(checkpoint_path)
# Check if the token embeddings are in the checkpoint
if "tok_embeddings" not in checkpoint:
print("No token embeddings found in the checkpoint.")
return
# Retrieve the token embeddings
tok_embeddings = checkpoint["tok_embeddings"].cpu().numpy()
# Reduce dimensionality using PCA
pca = PCA(n_components=2)
reduced_embeddings = pca.fit_transform(tok_embeddings)
# Plot the reduced embeddings
plt.figure(figsize=(10, 10))
plt.scatter(reduced_embeddings[:, 0], reduced_embeddings[:, 1], alpha=0.5)
plt.xlabel("Principal Component 1")
plt.ylabel("Principal Component 2")
plt.title("Token Embeddings Visualization")
# Save the figure
plt.savefig(save_path)
plt.close()
| Gallifantjack/llama2_prebias | vizualisations/visualize_embd.py | visualize_embd.py | py | 1,221 | python | en | code | 0 | github-code | 13 |
20978107790 | import subprocess
import pyautogui
import io
import sys
import platform
import socket
import re
import uuid
import json
import psutil
import logging
import random
import os
import requests
import webbrowser
from pyrogram import Client, filters
from PIL import ImageGrab, Image
from io import BytesIO
from win10toast import ToastNotifier
import mouse
from pyrogram.types import Message, BotCommand, InlineKeyboardMarkup, InlineKeyboardButton, CallbackQuery
toaster = ToastNotifier()
app = Client("my_idiot")
global OWNER_ID
OWNER_ID = 1829043559
# Load the list of owners from a file
with open("owners.json", "r") as f:
owners = json.load(f)
@app.on_message(filters.command(["delowner"]))
async def del_owner(client, message):
id = message.from_user.id
global owners
text = message.text.split()
global OWNER_ID
if id == OWNER_ID:
if len(text) == 1:
# If the user didn't provide any arguments, send a message asking for them
await message.reply("Please provide the ID or mention of the user you want to remove as owner.")
else:
# Get the user ID from the message text
user_id = text[1]
if not user_id.isdigit():
# If the provided argument is not a number, try to extract the user ID from a mention
user_id = message.entities[1].user.id
user_id = int(user_id)
# If user_id is None, the provided argument is invalid
if user_id is None:
await message.reply("Invalid user ID or mention.")
else:
# Remove the user from the owners list
if user_id in owners:
owners.remove(user_id)
with open("owners.json", "w") as f:
json.dump(owners, f)
await message.reply(f"User {user_id} removed from owners list.")
else:
await message.reply(f"User {user_id} is not an owner.")
# Define a filter to check if a user is an owner
def is_owner(message):
return message.from_user.id in owners
@app.on_message(filters.command("addowner"))
def add_owner(client, message):
if is_owner(message):
user_id = None
if message.reply_to_message:
user_id = message.reply_to_message.from_user.id
elif len(message.command) == 2:
try:
user_id = int(message.command[1])
except ValueError:
pass
if user_id:
if user_id not in owners:
owners.append(user_id)
with open("owners.json", "w") as f:
json.dump(owners, f)
message.reply(f"{user_id} has been added as an owner.")
else:
message.reply(f"{user_id} is already an owner.")
else:
message.reply("Please reply to a message from the user you want to add as an owner or provide a valid user ID.")
@app.on_message(filters.command("help"))
async def help_command(client, message):
# Get a list of all the registered commands
commands = await client.list_commands()
# Create a string that lists all the available commands
help_text = "Available commands:\n\n"
for command in commands:
help_text += f"/{command.command} - {command.description}\n"
await message.reply(help_text)
@app.on_message(filters.command("restart"))
def restart(client, message):
if is_owner(message):
message.reply("Restarting...")
os.execl(sys.executable, sys.executable, *sys.argv)
else:
message.reply("ะะตั ะฟัะฐะฒ ะดะปั ัะตััะฐััะฐ!!!")
@app.on_message(filters.command("bw"))
async def black_and_white_filter(client: Client, message: Message):
# Check if the message contains a photo or a video
if message.photo or message.video:
# Download the media file
file = await message.download()
# Open the media file with Pillow
with Image.open(file) as img:
# Apply the black and white filter
img = img.convert("L")
# Save the modified image to memory
with BytesIO() as buffer:
img.save(buffer, format="PNG")
buffer.seek(0)
# Send the modified image back to the user
await message.reply_photo(buffer)
# Delete the downloaded file from disk
os.remove(file)
else:
# If the message doesn't contain a photo or a video, let the user know
await message.reply("Please send a photo or a video.")
@ app.on_message(filters.command("press"))
def press_key(client, message):
keys = message.text.split()[1:]
pyautogui.hotkey(*keys)
# eg /press q u a x a l e r t
@app.on_message(filters.command(['rickroll']))
def press_rick(client, message):
browser = webbrowser.get()
browser.open("https://www.youtube.com/watch?v=dQw4w9WgXcQ")
message.reply("Rickrolled OMG OMG OMG")
def get_ip():
response = requests.get('https://api64.ipify.org?format=json').json()
return response["ip"]
@app.on_message(filters.command(['pcinfo']))
def getSystemInfo(client, message):
info={}
info['platform']=platform.system()
info['platform-release']=platform.release()
info['platform-version']=platform.version()
info['architecture']=platform.machine()
info['hostname']=socket.gethostname()
info['ip-address']=get_ip()
info['mac-address']=':'.join(re.findall('..', '%012x' % uuid.getnode()))
info['processor']=platform.processor()
info['ram']=str(round(psutil.virtual_memory().total / (1024.0 **3)))+" GB"
json_str = json.dumps(info, separators=(',', ':'))
# Remove brackets and double quotes
result = json_str.replace('{', '').replace('}', '').replace('"', '')
# Add newline character after each key-value pair
result = result.replace(',', '\n')
message.reply_text(result)
@app.on_message(filters.command(['web']))
async def open_link(_, message):
try:
# Get the link from the command argument
link = message.text.split(' ')[1]
# Check if the link starts with 'http://' or 'https://'
if not link.startswith('http://') and not link.startswith('https://'):
# If not, add 'https://' to the beginning of the link
link = 'https://' + link
# Get the user's default web browser
browser = webbrowser.get()
# Open the link in the user's default web browser
browser.open(link)
# Send a confirmation message to the user
await message.reply_text(f"Opened {link} in your default web browser!")
except IndexError:
# If the user didn't provide a link in the command argument
await message.reply_text("Please provide a link to open!")
@ app.on_message(filters.command("alert"))
def show_alert(client, message):
text = " ".join(message.command[1:])
sender_name = message.from_user.first_name
toaster.show_toast(sender_name, text, duration=10)
async def get_media_messages(client, channel_username):
messages = []
async for message in client.iter_messages(channel_username, limit=1000):
if message.media:
messages.append(message)
return messages
@ app.on_message(filters.command("close"))
def close_program(client, message):
message.reply_text("Closing the program...")
# Terminate the script using sys.exit
sys.exit()
@app.on_message(filters.command("screenshot"))
def take_screenshot(client, message):
# Take a screenshot of the entire screen
screenshot = ImageGrab.grab()
# Convert the screenshot to bytes
buffer = io.BytesIO()
screenshot.save(buffer, format="PNG")
buffer.seek(0)
# Send the screenshot to the user who invoked the command
client.send_photo(
chat_id=message.chat.id,
photo=buffer,
caption="ัะตัั"
)
# Handler function for the media command
@app.on_message(filters.command("media"))
def media_command_handler(client, message):
# Define the inline keyboard markup with the volume control buttons
volume_control_markup = InlineKeyboardMarkup([
[
InlineKeyboardButton("๐ -10", callback_data="decrease_volume_10"),
InlineKeyboardButton("๐", callback_data="mute_volume"),
InlineKeyboardButton("๐", callback_data="unmute_volume"),
InlineKeyboardButton("๐ +10", callback_data="increase_volume_10")
]
])
# Send the volume control buttons with a message
client.send_message(
message.chat.id,
"Select a volume control option:",
reply_markup=volume_control_markup
)
@ app.on_message(filters.command("cursorm", prefixes="/"))
def move_cursor(client: Client, message: Message):
direction = message.text.split()[1].lower()
pixels = int(message.text.split()[2])
if direction == "down":
mouse.move(0, pixels, absolute=False, duration=0.1)
elif direction == "up":
mouse.move(0, -pixels, absolute=False, duration=0.1)
elif direction == "right":
mouse.move(pixels, 0, absolute=False, duration=0.1)
elif direction == "left":
mouse.move(-pixels, 0, absolute=False, duration=0.1)
else:
client.send_message(message.chat.id, "Invalid direction provided. Please use /cursor <direction> <pixels>")
# Handler function for the media command
@app.on_message(filters.command("cursor"))
def media_command_handler(client, message):
# Define the inline keyboard markup with the volume control buttons
cursor_control_markup = InlineKeyboardMarkup([
[
InlineKeyboardButton("ะะะ", callback_data="click"),
InlineKeyboardButton("โฌ
", callback_data="left"),
InlineKeyboardButton("โฌ", callback_data="up"),
InlineKeyboardButton("โก", callback_data="right"),
InlineKeyboardButton("โฌ", callback_data="down")
]
])
# Send the volume control buttons with a message
client.send_message(
message.chat.id,
"Cursor position:",
reply_markup=cursor_control_markup
)
@app.on_callback_query(filters.create(lambda _, __, query: query.data == "left"))
def r(client, callback_query):
mouse.move(-450, 0, absolute=False, duration=0)
@app.on_callback_query(filters.create(lambda _, __, query: query.data == "right"))
def _handler(client, callback_query):
mouse.move(450, 0, absolute=False, duration=0)
@app.on_callback_query(filters.create(lambda _, __, query: query.data == "up"))
def allback_handler(client, callback_query):
mouse.move(0, -450, absolute=False, duration=0)
@app.on_callback_query(filters.create(lambda _, __, query: query.data == "down"))
def decrease_handler(client, callback_query):
mouse.move(0, 450, absolute=False, duration=0)
@app.on_callback_query(filters.create(lambda _, __, query: query.data == "click"))
def decrease_vandler(client, callback_query):
mouse.click()
# Handler functions for the volume control buttons
# Handler functions for the volume control buttons
@app.on_callback_query(filters.create(lambda _, __, query: query.data == "decrease_volume_10"))
def decrease_volume_callback_handler(client, callback_query):
# Decrease the volume by 10 using the nircmd utility
os.system("nircmd.exe changesysvolume -6553")
# Send a confirmation message
client.answer_callback_query(callback_query.id, "Volume decreased by 10.")
@app.on_callback_query(filters.create(lambda _, __, query: query.data == "mute_volume"))
def mute_volume_callback_handler(client, callback_query):
# Mute the volume using the nircmd utility
os.system("nircmd.exe mutesysvolume 1")
# Send a confirmation message
client.answer_callback_query(callback_query.id, "Volume muted.")
@app.on_callback_query(filters.create(lambda _, __, query: query.data == "unmute_volume"))
def mute_volume_callback_handler(client, callback_query):
# Mute the volume using the nircmd utility
os.system("nircmd.exe mutesysvolume 0")
# Send a confirmation message
client.answer_callback_query(callback_query.id, "Volume unmuted.")
@app.on_callback_query(filters.create(lambda _, __, query: query.data == "increase_volume_10"))
def increase_volume_callback_handler(client, callback_query):
# Increase the volume by 10 using the nircmd utility
os.system("nircmd.exe changesysvolume 6553")
# Send a confirmation message
client.answer_callback_query(callback_query.id, "Volume increased by 10.")
@app.on_message(filters.command("click"))
def uh(client, message):
mouse.click()
@app.on_message(filters.command("volume"))
def volume_command_handler(client, message):
# Parse the volume level from the command arguments
try:
volume_level = int(message.command[1])
except (IndexError, ValueError):
client.send_message(message.chat.id, "Please specify a valid volume level.")
return
nircmd_volume = int(volume_level * 655.35)
# Set the system volume using the Windows command line
subprocess.run(["nircmd.exe", "setsysvolume", str(nircmd_volume)])
# Send a confirmation message
client.send_message(message.chat.id, f"Volume set to {volume_level}%")
@ app.on_message(filters.private)
def run_command(client, message):
command = message.text
try:
msg = client.send_message(message.chat.id, "Running command...")
output = subprocess.check_output(command, shell=True)
if len(output) > 4096:
output = output[:4093] + b""
if "/addowner" in message.text:
print(0)
if "shutdown" in command:
print(msg)
client.edit_message_text(chat_id=message.chat.id, message_id=msg.id, text="ะงะผะพ ะฑะปััั")
else :
print(msg)
client.edit_message_text(chat_id=message.chat.id, message_id=msg.id, text=f"probably success \n {output.decode('cp866')}")
except subprocess.CalledProcessError as e:
print(msg)
client.edit_message_text(chat_id=message.chat.id, message_id=msg.id, text=f"Command failed with error:\n{e}")
app.run()
| quaxxido/pyrogram-cmdline | main.py | main.py | py | 14,175 | python | en | code | 1 | github-code | 13 |
26777352361 | """
imgur
A package for putting images on imgur
@category silly
@version $ID: 1.1.1, 2015-02-19 17:00:00 CST $;
@author KMR, Jason
@licence GNU GPL v.3
"""
import re
import pyimgur
from PIL import Image, ImageFont, ImageDraw
class imgur:
conf = None
imagePath = None
def __init__(self, conf):
self.conf = conf
self.imagePath = conf['imgPath']
def makeImg(self, text):
text = re.sub(r'\s', '', text)
pixels = len(text) / 6
xsize = 1
ysize = 1
imageRatios = [[16,9],
[8,5],
[5,4],
[5,3],
[4,3],
[3,2]]
# check if we can make a square
if (pixels ** (0.5)) % 1 == 0:
xsize = pixels ** (0.5)
ysize = xsize
else:
# if not see if it fits another ratio
for ratio in imageRatios:
if pixels % ratio[0] == 0 and pixels % ratio[1] == 0:
xsize = ratio[0] * ((pixels / (ratio[0] * ratio[1])) ** (0.5))
ysize = ratio[1] * ((pixels / (ratio[0] * ratio[1])) ** (0.5))
break
# if it doesn't fit a normal ratio, let's see if we can find one
if xsize * ysize != pixels:
for x in range(int(pixels ** (0.5)), (pixels/10), -1):
if pixels % x == 0:
ysize = x;
xsize = pixels / ysize
break
# if we still dont have a ratio let's just do our best (we'll lost a little data but ehh)
if xsize * ysize != pixels:
xsize = 5 * ((pixels / 20) ** (0.5))
ysize = 4 * ((pixels / 20) ** (0.5))
pix = self.conf['pixelsize']
xsize = int(xsize)
ysize = int(ysize)
size = (xsize * pix, ysize * pix)
im = Image.new('RGB', size )
draw = ImageDraw.Draw(im)
p = 0
for x in range(0, xsize):
for y in range(0, ysize):
r = int( "0x" + text[p+0:p+2], 16 )
g = int( "0x" + text[p+2:p+4], 16 )
b = int( "0x" + text[p+4:p+6], 16 )
p += 6
colour = (r, g, b, 255)
draw.rectangle(
(
((x * pix),(y * pix)),
(((x * pix) + pix),
(pix + (pix * y)))
),
fill=colour
)
im.save(self.conf['imgPath'], 'PNG')
return im.save(self.conf['imgPath'], 'PNG')
def uploadImage(self, imageTitle):
im = pyimgur.Imgur(self.conf['imgurAPI'])
return im.upload_image(self.conf['imgPath'], title=imageTitle).link | Zeppelin-and-Pails/A858_Image | imgur.py | imgur.py | py | 2,930 | python | en | code | 1 | github-code | 13 |
36146872446 | import logging
import pytest
# Explicitly import package-scoped fixtures (see explanation in pkgfixtures.py)
from pkgfixtures import host_with_saved_yum_state
@pytest.fixture(scope="package")
def host_with_hvm_fep(host):
logging.info("Checking for HVM FEP support")
if 'hvm_fep' not in host.ssh(['xl', 'info', 'xen_commandline']).split():
pytest.fail("HVM FEP is required for some of the XTF tests")
yield host
@pytest.fixture(scope="package")
def host_with_dynamically_disabled_ept_sp(host):
"""
Disable EPT superpages before running XTF.
The XSA-304 POC will crash hosts with vulnerable hardware if EPT SP are enabled.
"""
logging.info("Switching EPT superpages to secure")
host.ssh(['xl', 'set-parameters', 'ept=no-exec-sp'])
yield host
logging.info("Switching back EPT superpages to fast")
host.ssh(['xl', 'set-parameters', 'ept=exec-sp'])
@pytest.fixture(scope="package")
def host_with_git_and_gcc(host_with_saved_yum_state):
host = host_with_saved_yum_state
host.yum_install(['git', 'gcc'])
yield host
@pytest.fixture(scope="package")
def xtf_runner(host_with_git_and_gcc):
host = host_with_git_and_gcc
logging.info("Download and build XTF")
tmp_dir = host.ssh(['mktemp', '-d'])
try:
host.execute_script(f"""set -eux
cd {tmp_dir}
git clone git://xenbits.xen.org/xtf.git
cd xtf
make -j$(nproc)
""")
except Exception:
logging.info("Setup failed: delete temporary directory.")
host.ssh(['rm', '-rf', tmp_dir])
raise
yield f"{tmp_dir}/xtf/xtf-runner"
# teardown
logging.info("Delete XTF")
host.ssh(['rm', '-rf', tmp_dir])
@pytest.fixture(scope="package")
def host_with_dom0_tests(host_with_saved_yum_state):
host = host_with_saved_yum_state
host.yum_install(['xen-dom0-tests'])
yield host
| xcp-ng/xcp-ng-tests | tests/xen/conftest.py | conftest.py | py | 1,846 | python | en | code | 3 | github-code | 13 |
25686750956 | import torch
import torch.nn as nn
import os
class DnCNN_RL(nn.Module):
def __init__(self, channels, num_of_layers=17):
super(DnCNN_RL, self).__init__()
self.dncnn = DnCNN(channels=channels, num_of_layers=num_of_layers)
def forward(self, x):
noise = self.dncnn(x)
return noise
class BUIFD(nn.Module):
def __init__(self, channels, num_of_layers=17):
super(BUIFD, self).__init__()
self.dncnn = DnCNN(channels=channels, num_of_layers=num_of_layers)
self.noisecnn = NoiseCNN(channels=channels)
self.FinalFusionLayers = FinalFusionLayers(channels=channels)
def forward(self, x):
noisy_input = x
# PRIOR:
noise = self.dncnn(x)
prior = noisy_input - noise
# NOISE LVL:
noise_level = self.noisecnn(x)
# FUSION:
denoised_image = self.FinalFusionLayers(noisy_input, prior, noise_level)
noise_out = noisy_input - denoised_image
return noise_out, noise_level
class DnCNN(nn.Module):
def __init__(self, channels, num_of_layers=17):
super(DnCNN, self).__init__()
kernel_size = 3
padding = 1
features = 64
layers = []
layers.append(nn.Conv2d(in_channels=channels, out_channels=features, kernel_size=kernel_size, padding=padding, bias=False))
layers.append(nn.ReLU(inplace=True))
for _ in range(num_of_layers-2):
layers.append(nn.Conv2d(in_channels=features, out_channels=features, kernel_size=kernel_size, padding=padding, bias=False))
layers.append(nn.BatchNorm2d(features))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Conv2d(in_channels=features, out_channels=channels, kernel_size=kernel_size, padding=padding, bias=False))
self.dncnn = nn.Sequential(*layers)
def forward(self, x):
out = self.dncnn(x)
return out
class NoiseCNN(nn.Module):
def __init__(self, channels, num_of_layers=5):
super(NoiseCNN, self).__init__()
kernel_size = 5
padding = 2
features = 64
layers = []
layers.append(nn.Conv2d(in_channels=channels, out_channels=features, kernel_size=kernel_size, padding=padding, bias=False))
layers.append(nn.ReLU(inplace=True))
for _ in range(num_of_layers):
layers.append(nn.Conv2d(in_channels=features, out_channels=features, kernel_size=kernel_size, padding=padding, bias=False))
layers.append(nn.BatchNorm2d(features))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Conv2d(in_channels=features, out_channels=channels, kernel_size=kernel_size, padding=padding, bias=False))
self.noisecnn = nn.Sequential(*layers)
self.sigmoid_mapping = nn.Sigmoid()
def forward(self, x):
noise_level = self.noisecnn(x)
noise_level = self.sigmoid_mapping(noise_level)
return noise_level
class FinalFusionLayers(nn.Module):
def __init__(self, channels):
super(FinalFusionLayers, self).__init__()
kernel_size = 3
padding = 1
features = 16
dilation = 1
layers = []
layers.append(nn.Conv2d(in_channels=5*channels, out_channels=features, kernel_size=kernel_size, padding=padding, bias=False, dilation=dilation))
layers.append(nn.Conv2d(in_channels=features, out_channels=features, kernel_size=kernel_size, padding=padding, bias=False, dilation=dilation))
layers.append(nn.Conv2d(in_channels=features, out_channels=channels, kernel_size=kernel_size, padding=padding, bias=False, dilation=dilation))
self.fusion_layers = nn.Sequential(*layers)
def forward(self, a, b, c):
noisy_input = a
prior = b
noise_level = c
channel_0 = noisy_input
channel_1 = prior
channel_2 = noise_level
channel_3 = noisy_input * (1-noise_level)
channel_4 = prior * noise_level
x = torch.cat((channel_0, channel_1, channel_2, channel_3, channel_4), 1)
fused_out = self.fusion_layers(x)
return fused_out
| majedelhelou/BUIFD | Training/models.py | models.py | py | 4,188 | python | en | code | 13 | github-code | 13 |
11371426462 | from google.cloud import bigquery
from MySQLdb import connect, cursors
import os, json
# writeable part of the filesystem for Cloud Functions instance
gc_write_dir = "/tmp"
def get_file_mysql(mysql_configuration):
"""
Querying data using Connector/Python via *host* MySQL server.
The function return the full path to the file that has been downloaded.
"""
# construct MySQLConnection object and query table on a server
try:
cnx = connect(user = mysql_configuration["user"], passwd = mysql_configuration["psswd"], host = mysql_configuration["host"],
db = mysql_configuration["database"], port = mysql_configuration["port"],
cursorclass= cursors.DictCursor )
cursor = cnx.cursor()
cursor.execute(mysql_configuration["query"])
results = cursor.fetchall()
file_name = "mysql.txt"
with open(file_name, "w") as output_file:
for row in results:
output_file.write(json.dumps(row, default = str) + "\n")
file_location = gc_write_dir + "/" + file_name
print("Query <" + mysql_configuration["query"] + "> has completed successfully.")
finally:
try:
cursor.close()
cnx.close()
except:
print("Connection has not been established.")
return file_location
def give_file_gbq(path_to_file, bq_configuration):
"""
Download file from *path_to_file* to BigQuery table using *bq_configuration* settings.
"""
# construct Client object with the path to the table in which data will be stored
client = bigquery.Client(project = bq_configuration["project_id"])
dataset_ref = client.dataset(bq_configuration["dataset_id"])
table_ref = dataset_ref.table(bq_configuration["table_id"])
# determine uploading options
job_config = bigquery.LoadJobConfig()
job_config.source_format = "NEWLINE_DELIMITED_JSON"
job_config.write_disposition = bq_configuration["write_disposition"]
job_config.autodetect = True
# upload the file to BigQuery table
with open(path_to_file, "rb") as source_file:
job = client.load_table_from_file(source_file, table_ref, location = bq_configuration["location"], job_config = job_config)
job.result()
print("The Job " + job.job_id + " in status " + job.state + " for table " + bq_configuration["project_id"] + "." + bq_configuration["dataset_id"] + "." + bq_configuration["table_id"] + ".")
def mysql(request):
"""
Function to execute.
"""
try:
# get POST data from Flask.request object
request_json = request.get_json()
mysql_configuration = request_json["mysql"]
bq_configuration = request_json["bq"]
if not mysql_configuration.get("query"):
mysql_configuration["query"] = "SELECT * FROM " + mysql_configuration["table_id"]
if not mysql_configuration.get("port"):
mysql_configuration["port"] = 3306
if not bq_configuration.get("location"):
bq_configuration["location"] = "US"
bq_configuration["write_disposition"] = "WRITE_TRUNCATE"
except Exception as error:
print("An error occured with POST request data.")
print(str(error))
raise SystemExit
# go to writable directory
os.chdir(gc_write_dir)
# get the file from MySQL server
try:
mysql_file = get_file_mysql(mysql_configuration)
except Exception as error:
print("An error occured trying to get file from MySQL server.")
print(str(error))
raise SystemExit
# upload the file to BigQuery
try:
give_file_gbq(mysql_file, bq_configuration)
except Exception as error:
print("An error occured trying to upload file to Google BigQuery.")
print(str(error))
| OWOX/BigQuery-integrations | mysql/main.py | main.py | py | 3,847 | python | en | code | 46 | github-code | 13 |
25320300329 | import re
from itertools import product
in_file = "input.txt"
def read_input_lines():
with open(in_file, 'r') as fh:
in_string = fh.read().replace("\n", " ")
return in_string.split("mask = ")[1:]
def format_input():
data = read_input_lines()
pattern = re.compile("mem\[(\d+)\] = (\d+)")
data_list = []
for line in data:
addresses = re.findall(pattern, line)
data_list.append([line[:36]] + addresses)
return data_list
def intstr_to_bin(value):
return f"{int(value):b}"
def puzzle1():
data = format_input()
memory = {}
for n in data:
mask = n[0]
mask_ind = [(i, v) for i, v in enumerate(mask) if v != "X"]
for address in n[1:]:
value = intstr_to_bin(address[1])
value36 = value.rjust(36, "0")
for m in mask_ind:
value36 = value36[:m[0]] + m[1] + value36[m[0] + 1:]
memory[address[0]] = value36
result = sum([int(value, 2) for value in memory.values()])
print(f"Sum of memory values: {result}")
def puzzle2():
data = format_input()
memory_register = {}
for n in data:
mask = n[0]
mask_ind = [(i, v) for i, v in enumerate(mask) if v != "0"]
for address in n[1:]:
value = intstr_to_bin(address[0])
value36 = value.rjust(36, "0")
x_list = product(["0", "1"], repeat=len([x[1] for x in mask_ind if x[1] == "X"]))
x_add = [x[0] for x in mask_ind if x[1] == "X"]
x_replace = []
for x in x_list:
x_replace.append(list(zip(x_add, x)))
for m in mask_ind:
value36 = value36[:m[0]] + m[1] + value36[m[0] + 1:]
for x_set in x_replace:
for char in x_set:
value36 = value36[:char[0]] + char[1] + value36[char[0] + 1:]
memory_register[value36] = address[1]
result = sum([int(value) for value in memory_register.values()])
print(f"Sum of memory values: {result}")
if __name__ == '__main__':
puzzle1()
puzzle2()
| voidlessVoid/advent_of_code_2020 | day_14/dominik/main.py | main.py | py | 2,103 | python | en | code | 0 | github-code | 13 |
41431592643 |
import numpy as np
import scipy
import scipy.io as sio
import sys
import os
import casadi as cas
import casadi.tools as ct
from typing import Union, List, Dict, Tuple, Optional, Callable
from abc import ABC
from dataclasses import dataclass
from enum import Enum, auto
import copy
import pdb
# sys.path.append(os.path.join('..'))
# import sysid as sid
# from . import base
# import system
from blrsmpc.sysid import sysid as sid
from . import base
from blrsmpc import system
class StateSpaceSMPC(base.SMPCBase):
def __init__(self, sid_model: sid.StateSpaceModel, settings: base.SMPCSettings):
super().__init__(sid_model, settings)
self._prepare_covariance_propagation()
def _prepare_covariance_propagation(self):
W, W0 = self.sid_model._include_scaling_and_bias()
sys_A, sys_B, sys_C, sys_offset_ARX = system.get_ABC_ARX(
W = W,
W0 = W0,
l = self.sid_model.data_setup.T_ini,
n_y = self.sid_model.n_y,
n_u = self.sid_model.n_u,
)
self.sys_A = sys_A
self.sys_B = sys_B
self.sys_C = sys_C
self.sys_offset_ARX = sys_offset_ARX
self.sys_W = W
self.sys_W0 = W0
if self.sid_model.blr.state['scale_y']:
self.S_y = np.diag(self.sid_model.blr.scaler_y.scale_)
else:
self.S_y = np.eye(self.sid_model.n_y)
def _get_covariance(self, m: cas.SX, P0) -> cas.SX:
Sigma_e = self.sid_model.blr.Sigma_e
if not self.settings.with_cov:
Sigma_e = np.diag(np.diag(Sigma_e))
if self.sid_model.blr.state['scale_x']:
mu_x = self.sid_model.blr.scaler_x.mean_
s_x = self.sid_model.blr.scaler_x.scale_
m = (m - mu_x)/s_x
if self.sid_model.blr.state['add_bias']:
m = cas.vertcat(m, 1)
Sigma_y_new = (Sigma_e*(m.T@self.sid_model.blr.Sigma_p_bar@m)+Sigma_e)
Sigma_y_new = self.S_y@Sigma_y_new@self.S_y.T
P_next = self.sys_A@P0@self.sys_A.T + self.sys_C.T@Sigma_y_new@self.sys_C
Sigma_y_prop = self.sys_C@P_next@self.sys_C.T
return Sigma_y_prop, P_next
def _get_y_and_Sigma_y_pred(self, opt_x: ct.struct_symSX, opt_p: ct.struct_symSX) -> Tuple[cas.SX, cas.SX]:
""" Propagate the system dynamics and uncertainty """
y_seq = opt_p['y_past']+opt_x['y_pred']
u_seq = opt_p['u_past']+opt_x['u_pred']
T_ini = self.sid_model.data_setup.T_ini
N = self.sid_model.data_setup.N
n_y = self.sid_model.n_y
P0 = np.zeros(self.sys_A.shape)
Sigma_y_pred = cas.SX.zeros(self.sid_model.n_y*N, self.sid_model.n_y*N)
y_pred_calc = cas.SX.zeros(self.sid_model.n_y*N)
for k in range(self.sid_model.data_setup.N):
xk = cas.vertcat(*y_seq[k:k+T_ini], *u_seq[k:k+T_ini])
yk_pred = self.sys_W@xk + self.sys_W0
Sigma_y_k, P0 = self._get_covariance(xk, P0)
Sigma_y_pred[k*n_y:(k+1)*n_y, k*n_y:(k+1)*n_y] = Sigma_y_k
y_pred_calc[k*n_y:(k+1)*n_y] = yk_pred
return y_pred_calc, Sigma_y_pred
| 4flixt/2023_Stochastic_MSM | blrsmpc/smpc/ss_smpc.py | ss_smpc.py | py | 3,147 | python | en | code | 6 | github-code | 13 |
3746250048 | import json
import logging
import os
import re
import sys
from multiprocessing import Pool
from mwm import Mwm
from mwm.ft2osm import read_osm2ft
class PromoIds(object):
def __init__(self, countries, cities, mwm_path, types_path, osm2ft_path):
self.countries = countries
self.cities = cities
self.mwm_path = mwm_path
self.types_path = types_path
self.osm2ft_path = osm2ft_path
def inject_into_country(self, country):
nodes = self._get_nodes(country)
with Pool() as pool:
proposed_ids = pool.map(self._find, (n["id"] for n in nodes), chunksize=1)
countries_ids = [
ids for node_ids in proposed_ids for ids in node_ids["countries"]
]
if countries_ids:
country["top_countries_geo_ids"] = countries_ids
for idx, node_ids in enumerate(proposed_ids):
if not node_ids["cities"]:
continue
node = nodes[idx]
best = self._choose_best_city(node_ids["cities"])
node["top_city_geo_id"] = best["id"]
if best["id"] < 0:
node["top_city_geo_id"] += 1 << 64
def _find(self, leaf_id):
result = {"countries": [], "cities": []}
ft2osm = load_osm2ft(self.osm2ft_path, leaf_id)
for feature in Mwm(os.path.join(self.mwm_path, leaf_id + ".mwm")):
osm_id = ft2osm.get(feature.index(), None)
types = feature.readable_types()
if "sponsored-promo_catalog" in types and osm_id in self.cities:
city = self._get_city(osm_id, types)
result["cities"].append(city)
if "place-country" in types and osm_id in self.countries:
result["countries"].append(osm_id)
return result
@staticmethod
def _get_nodes(root):
def __get_nodes(node, mwm_nodes):
if "g" in node:
for item in node["g"]:
__get_nodes(item, mwm_nodes)
else:
mwm_nodes.append(node)
mwm_nodes = []
__get_nodes(root, mwm_nodes)
return mwm_nodes
def _get_city(self, osm_id, types):
city = {"id": osm_id, "count_of_guides": self.cities[osm_id], "types": []}
for t in types:
if t.startswith("place"):
city["types"].append(t)
if not city["types"]:
logging.error(
f"Incorrect types for sponsored-promo_catalog "
f"feature osm_id {osm_id}"
)
sys.exit(3)
return city
def _choose_best_city(self, proposed_cities):
def key_compare(city):
return city["count_of_guides"], self._score_city_types(city["types"])
return max(proposed_cities, key=key_compare)
def _score_city_types(self, types):
return max(self._city_type_to_int(t) for t in types)
@staticmethod
def _city_type_to_int(t):
if t == "place-town":
return 1
if t == "place-city":
return 2
m = re.match(r"^place-city-capital?(-(?P<admin_level>\d+)|)$", t)
if m:
admin_level = int(m.groupdict("1")["admin_level"])
if 1 <= admin_level <= 12:
return 14 - admin_level
return 0
def load_promo_ids(path):
with open(path) as f:
root = json.load(f)
ids = {}
for item in root["data"]:
ids[item["osmid"]] = item["paid_bundles_count"]
return ids
def load_osm2ft(osm2ft_path, mwm_id):
osm2ft_name = os.path.join(osm2ft_path, mwm_id + ".mwm.osm2ft")
if not os.path.exists(osm2ft_name):
logging.error(f"Cannot find {osm2ft_name}")
sys.exit(3)
with open(osm2ft_name, "rb") as f:
return read_osm2ft(f, ft2osm=True, tuples=False)
def inject_promo_ids(
countries_json,
promo_cities_path,
promo_countries_path,
mwm_path,
types_path,
osm2ft_path,
):
promo_ids = PromoIds(
load_promo_ids(promo_countries_path),
load_promo_ids(promo_cities_path),
mwm_path,
types_path,
osm2ft_path,
)
for country in countries_json["g"]:
promo_ids.inject_into_country(country)
| organicmaps/organicmaps | tools/python/post_generation/inject_promo_ids.py | inject_promo_ids.py | py | 4,244 | python | en | code | 7,565 | github-code | 13 |
34823949171 | # Find simplified formula for bank balance sheet change as a result of
# ELA collateral seizure
# Christopher Gandrud
# MIT License
################################################################################
# Import SymPy
import sympy as sp
from sympy.abc import eta, gamma
# Define symbols (eta and gamma imported)
A, AD = sp.symbols('A A_d')
print(sp.latex(lambdaA))
#######
# Expanded form expression for the change in non-performing loans after ELA
# collateral seizure
expr_npl1 = (gamma * A) / A
expr_npl2 = (gamma * A - eta * gamma * AD)/ (A - AD)
# Subtract and print simplified form as LaTeX
print(sp.latex((expr_npl1 - expr_npl2)))
| christophergandrud/ela_fiscal_costs | formal_modelling/ela_balance_sheet_effects.py | ela_balance_sheet_effects.py | py | 653 | python | en | code | 2 | github-code | 13 |
43285290370 | # Python3
# 14-3-21
# Documentation
# Here it goes:
from tkinter import *
import configparser
class salary:
def __init__(self, root):
self.config = configparser.ConfigParser()
self.config.read('main_config.ini')
# To show all sections of config file.
# print(self.config.sections())
self.root = root
self.root.title(f"{self.config['DEFAULT']['Title']}")
self.root.attributes('-fullscreen', self.config['Settings']['Fullscreen'])
self.root.geometry('1350x700+300+200')
if __name__ == "__main__":
master = Tk()
main = salary(master)
master.mainloop()
| AdityaSawant0912/Salary_Management_v2 | main.py | main.py | py | 636 | python | en | code | 0 | github-code | 13 |
14849260021 | import requests
import pymongo
import pandas as pd
from selenium import webdriver
from scrapy.http import TextResponse
import getpass
import time
import re
class instagram_crawling():
def __init__(self):
login_url = "https://www.instagram.com/accounts/login/?source=auth_switcher"
self.driver = webdriver.Chrome()
self.driver.get(login_url)
# element์ ๋ ๋๋ง์ ๊ธฐ๋ค๋ ค์ผ ํ๋ ๊ฒฝ์ฐ, ๋ ๋๋ง ๋ ๋๊น์ง ์ฌ๊ท์ ์ผ๋ก ํจ์ ์คํ
def check_response(self, webdriver, selector, func , start_time):
try:
if func == 'login':
result = webdriver.find_element_by_css_selector(selector)
elif func == 'input_keyword':
if time.time() - start_time > 10:
print("=========== no response in function : {} ===========".format(func))
return False
result = webdriver.find_elements_by_css_selector(selector)
while len(result) == 0:
return self.check_response(webdriver, selector, func, start_time)
elif func == 'initial_crawling':
if time.time() - start_time > 10:
print("=========== no response in function : {} ===========".format(func))
return False
result = webdriver.find_elements_by_css_selector(selector)
while len(result) != 8:
return self.check_response(webdriver, selector, func, start_time)
return result
except Exception:
# func = 'login'์ธ ๊ฒฝ์ฐ element๋ฅผ ์ฐพ์ง ๋ชปํ๋ฉด ์๋ฌ์ฒ๋ฆฌ๋ก ๋ค์ด์จ๋ค
if time.time() - start_time > 10:
print("=========== no response in function : {} ===========".format(func))
return False
return self.check_response(webdriver, selector, func, start_time)
# ๋ก๊ทธ์ธ ํจ์ (seleniun webdriver)
def login(self):
my_id = input('id๋ฅผ ์
๋ ฅํ์ธ์: ')
my_password = getpass.getpass('password๋ฅผ ์
๋ ฅํ์ธ์: ')
self.driver.find_element_by_css_selector('._2hvTZ.pexuQ.zyHYP[type=text]').send_keys(my_id)
self.driver.find_element_by_css_selector('._2hvTZ.pexuQ.zyHYP[type=password]').send_keys(my_password)
self.driver.find_element_by_css_selector('.sqdOP.L3NKy.y3zKF[type=submit]').click()
# ์๋ฆผ์ค์ ํ๋ผ๋ modal์ฐฝ์ด ๋จ๋๊ฒฝ์ฐ '๋์ค์ ํ๊ธฐ'๋ฅผ ํด๋ฆญํ๋ ์ฝ๋
start_time = time.time()
alert_modal = self.check_response(self.driver, 'body > div.RnEpo.Yx5HN > div > div', 'login', start_time)
if alert_modal == False:
print('================ login error ================')
self.driver.quit()
self.driver.find_element_by_css_selector('body > div.RnEpo.Yx5HN > div > div > div.mt3GC > button.aOOlW.HoLwm').click()
# ๊ฒ์์ด๋ฅผ ์
๋ ฅํ๊ณ ๊ฒ์๋ฌผ์ด ๊ฐ์ฅ ๋ง์ ๊ฒ์๊ฒฐ๊ณผ๋ฅผ ํด๋ฆญ (seleniun webdriver)
def input_keyword(self, word):
keyword = word
self.driver.find_element_by_css_selector('#react-root > section > nav > div._8MQSO.Cx7Bp > div > div > div.LWmhU._0aCwM > input').send_keys(keyword)
# ํค์๋ ์
๋ ฅํ๊ณ , ํค์๋์ ๋ํ ๊ฒ์ ๋ฆฌ์คํธ๊ฐ ๋จ๋๋ฐ ๊ฑธ๋ฆฌ๋ ์๊ฐ๋งํผ ๊ธฐ๋ค๋ ค์ค๋ค
start_time = time.time()
search_list = self.check_response(self.driver, '#react-root > section > nav > div._8MQSO.Cx7Bp > div > div > div.LWmhU._0aCwM > div:nth-child(4) > div.drKGC > div > a', 'input_keyword', start_time)
if search_list == False:
print('================ input_keyword rendering error ================')
self.driver.quit()
# ์์นญ๋ a ์๋ฆฌ๋จผํธ๋ค ์ค์ ๊ฒ์๋ฌผ ๊ฐฏ์๊ฐ ๊ฐ์ฅ ๋ง์ a ์๋ฆฌ๋จผํธ๋ฅผ ๋ฝ๋๋ค
number_list = []
for element in search_list:
splited_element = element.text.split('๊ฒ์๋ฌผ')
if len(splited_element) == 2:
number_list.append(int(re.sub(",", "", splited_element[1])))
else:
number_list.append(0)
max_number = max(number_list)
click_index = number_list.index(max_number)
search_list[click_index].click()
# ์ฒ์ ๋ ๋๋ง๋ ํ๋ฉด์ 24๊ฐ์ ๊ฒ์๋ฌผ์ ํฌ๋กค๋งํ๋ ํจ์
def initial_crawling(self):
# ๊ฒ์ ํค์๋๋ฅผ ํด๋ฆญ ํ ์ดํ ๊ฒ์๋ฌผ ๋ ๋๋ง์ด ์๋ฃ๋์๋์ง ํ์ธ
start_time = time.time()
result = self.check_response(self.driver, '#react-root > section > main > article > div:nth-child(3) > div > div', 'initial_crawling', start_time)
if result == False:
print('================ initial_crawling rendering error ================')
self.driver.quit()
urls = self.driver.find_elements_by_xpath('//*[@id="react-root"]/section/main/article/div[2]/div/div/div/a')
urls = [url.get_attribute("href") for url in urls]
hash_tag_df = pd.DataFrame(columns=['time', 'hash_tag'])
for i in range(len(urls)):
req = requests.get(urls[i])
response = TextResponse(req.url, body=req.text, encoding="utf-8")
hash_tag_list = response.css("meta[property='instapp:hashtags']")
hash_tag_list = ['#' + hash_tag.attrib['content'] for hash_tag in hash_tag_list]
hash_tags = ', '.join(hash_tag_list)
data = [
{'time': 0, 'hash_tag': hash_tags}
]
df = pd.DataFrame(data)
hash_tag_df = hash_tag_df.append(df)
return hash_tag_df.reset_index().drop(columns=['index'])
# ์คํฌ๋กค๋ด๋ฆฌ๋ฉด์ 12๊ฐ์ฉ ์ถ๊ฐ๋๋ ๊ฒ์๋ฌผ์ ํฌ๋กค๋งํ๋ ํจ์ (๋๋ฒ์งธ๋ง ์คํ) - chrome ์ ์ฒดํ๋ฉด ๊ธฐ์ค
# ํ์ฌ ๋ธ๋ผ์ฐ์ ํ๋ฉด ํฌ๊ธฐ์ ๋ฐ๋ผ ์ถ๊ฐ์ ์ผ๋ก ๋ถ๋ฌ์ค๋ div ๊ฐฏ์๊ฐ ๋ฌ๋ผ์ง
# ์ ์ฒดํ๋ฉด ๊ธฐ์ค div์ urls ์ด ๊ฐฏ์ = 24 -> 36 -> 45 -> 45 -> ...
def second_crawling(self):
hash_tag_df = pd.DataFrame(columns=['time', 'hash_tag'])
for i in range(0, 1):
self.driver.execute_script('window.scrollTo(0, document.body.scrollHeight)')
time.sleep(1.5)
urls = self.driver.find_elements_by_xpath('//*[@id="react-root"]/section/main/article/div[2]/div/div/div/a')
urls = [url.get_attribute("href") for url in urls]
for i in range(len(urls) - 12, len(urls)):
req = requests.get(urls[i])
response = TextResponse(req.url, body=req.text, encoding="utf-8")
hash_tag_list = response.css("meta[property='instapp:hashtags']")
hash_tag_list = ['#' + hash_tag.attrib['content'] for hash_tag in hash_tag_list]
hash_tags = ', '.join(hash_tag_list)
data = [
{'time': 0, 'hash_tag': hash_tags}
]
df = pd.DataFrame(data)
hash_tag_df = hash_tag_df.append(df)
return hash_tag_df.reset_index().drop(columns=['index'])
# ์ธ๋ฒ์งธ๋ถํฐ๋ ์ด ํจ์ ๋ฐ๋ณตํ๊ธฐ - ๊ฒ์๋ฌผ 9๊ฐ์ฉ ํฌ๋กค๋ง
def repeat_crawling(self):
hash_tag_df = pd.DataFrame(columns=['time', 'hash_tag'])
for i in range(0, 1):
self.driver.execute_script('window.scrollTo(0, document.body.scrollHeight)')
time.sleep(1.5)
urls = self.driver.find_elements_by_xpath('//*[@id="react-root"]/section/main/article/div[2]/div/div/div/a')
urls = [url.get_attribute("href") for url in urls]
for i in range(len(urls) - 9, len(urls)):
req = requests.get(urls[i])
response = TextResponse(req.url, body=req.text, encoding="utf-8")
hash_tag_list = response.css("meta[property='instapp:hashtags']")
hash_tag_list = ['#' + hash_tag.attrib['content'] for hash_tag in hash_tag_list]
hash_tags = ', '.join(hash_tag_list)
data = [
{'time': 0, 'hash_tag': hash_tags}
]
df = pd.DataFrame(data)
hash_tag_df = hash_tag_df.append(df)
return hash_tag_df.reset_index().drop(columns=['index'])
# instagram_crawling ํด๋์ค๋ฅผ ์ด์ฉํด์ ํด์ํ๊ทธ๋ฅผ ํฌ๋กค๋งํ๊ณ ์ ์ฒด ๋ฐ์ดํฐํ๋ ์์ ๋ฆฌํดํ๋ค
def crawling_start(keyword, repeat_num, mongo_save=0):
"""
3 input arguments
keyword:
๊ฒ์์ด(์ฃผ์ )
repeat_num:
์ธ๋ฒ์งธ ํฌ๋กค๋ง ํจ์์ ๋ฐ๋ณต ํ์
ex) repeat_num = 2 -> ๋ฐ์ดํฐ row ๊ฐฏ์ = 24 + 12 + (9 * 2)
repeat_num = 3 -> ๋ฐ์ดํฐ row ๊ฐฏ์ = 24 + 12 + (9 * 3)
repeat_num = 4 -> ๋ฐ์ดํฐ row ๊ฐฏ์ = 24 + 12 + (9 * 4)
mongo_save:
aws ์๋ฒ์ mongodb ๋ฐ์ดํฐ๋ฒ ์ด์ค์ ์ ์ฅ ํ ๊ฑด์ง ๊ฒฐ์ ํ๋ ์ธ์
์ ์ฅ = 1
์ ์ฅ ์ํจ = 0
"""
keyword = keyword
num = repeat_num
result_df = pd.DataFrame(columns=['time', 'hash_tag'])
insta = instagram_crawling()
insta.login()
insta.input_keyword(keyword)
initial_df = insta.initial_crawling()
result_df = pd.concat([result_df, initial_df])
second_df = insta.second_crawling()
result_df = pd.concat([result_df, second_df])
# ๋ง์ง๋ง ํฌ๋กค๋ง ํจ์ ๋ฐ๋ณต
for i in range(0, num):
try:
df = insta.repeat_crawling()
result_df = pd.concat([result_df, df])
except Exception as exc:
print('์๋ฌ๊ฐ ๋ฐ์ํ์ต๋๋ค : ', exc)
result_df = result_df.reset_index().drop(columns=['index'])
# ์ธ๋ฒ์งธ ์ธ์๋ฅผ 1๋ก ๋๊ฒจ์ฃผ๋ฉด, aws mongodb์ ๋ฐ์ดํฐ๋ฅผ ์ ์ฅํ๋ค
if mongo_save == 1:
mongo_df_list = result_df.to_dict("records")
client = pymongo.MongoClient('mongodb://root:dss@13.124.100.70:27017')
db = client.insta_crawling
collection = db.data
for i in range(len(mongo_df_list)):
collection.insert(mongo_df_list[i])
return result_df.drop(columns=['time'])
| yeejun007/instagram_crawling | insta_crawling.py | insta_crawling.py | py | 10,398 | python | en | code | 0 | github-code | 13 |
31171795542 | from turtle import left
num = list(map(int, input().split()))
type = 0
for i in num :
if i > 10000 or i < 1 :
print("์
๋ ฅ ์ค๋ฅ")
type = 1
else :
None
if type == 0 :
f = num[0] * num[3]
f1 = num[2] * num[1]
mom = num[1] * num[3]
son = f - f1
left = 2
while True :
if mom % left !=0 and son % 2 != 0:
break
elif mom % left == 0 and son % left == 0:
mom //= left
son //= left
else :
left += 1
print(f'{son}/{mom}')
else :
None | 64542/- | 05.py | 05.py | py | 573 | python | en | code | 0 | github-code | 13 |
25680771087 | import numpy as np
import pandas as pd
from flask import Flask, render_template, request
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
def create_sim():
data = pd.read_csv('data.csv')
cv = CountVectorizer()
count_matrix = cv.fit_transform(data['comb'])
sim = cosine_similarity(count_matrix)
return data,sim
def rcmd(m):
m = m.lower()
try:
data.head()
sim.shape
except:
data, sim = create_sim()
if m not in data['movie_title'].unique():
return('This movie is not available')
else:
i = data.loc[data['movie_title']==m].index[0]
lst = list(enumerate(sim[i]))
lst = sorted(lst, key = lambda x:x[1] ,reverse=True)
lst = lst[1:11]
l = []
for i in range(len(lst)):
a = lst[i][0]
l.append(data['movie_title'][a])
return l
app = Flask(__name__)
@app.route("/")
def home():
return render_template('home.html')
@app.route("/recommend")
def recommend():
movie = request.args.get('movie')
r = rcmd(movie)
movie = movie.upper()
if type(r)==type('string'):
return render_template('recommend.html',movie=movie,r=r,t='s')
else:
return render_template('recommend.html',movie=movie,r=r,t='l')
if __name__ == '__main__':
app.run()
| rahulbhadja/movie-recommendation-webapp | main.py | main.py | py | 1,390 | python | en | code | 3 | github-code | 13 |
4670797862 | # ะะปะณะพัะธัะผั ั ััะปะพะฒะธะตะผ
x = int(input('ะะฒะตะดะธัะต ั
: '))
if x > 0:
y = 2 * x - 10
elif x == 0:
y = 0
else:
y = 2 * abs(x) - 1 # abs - ััะฝะบัะธั, ะบะพัะพัะฐั ะฝะฐั
ะพะดะธั ะผะพะดัะปั ัะธัะปะฐ
print(f'y = {y}')
| mutedalien/PY_algo_interactive | less_1/task_3.py | task_3.py | py | 258 | python | ru | code | 0 | github-code | 13 |
1127895517 | import hashlib
import urllib.parse
import logging
import mimetypes
from ckan.plugins.toolkit import request, requires_ckan_version
from ckan.lib.munge import munge_tag
import ckanext.geodatagov.model as geodatagovmodel
from ckan import __version__ as ckan_version
requires_ckan_version("2.9")
from . import blueprint
import ckanext.geodatagov.cli as cli
mimetypes.add_type('application/vnd.ms-fontobject', '.eot')
# the patch below caused s3 upload fail. need to keep a copy of md5
hashlib.md5_orig = hashlib.md5
# Monkey Patch libraris to make fips work # ## #
hashlib.md5 = hashlib.sha1
# ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ##
from sqlalchemy import exc
from sqlalchemy import event
from sqlalchemy.pool import Pool
@event.listens_for(Pool, "checkout")
def ping_connection(dbapi_connection, connection_record, connection_proxy):
cursor = dbapi_connection.cursor()
try:
cursor.execute("SELECT 1")
except BaseException:
raise exc.DisconnectionError()
cursor.close()
import ckan.plugins as p
import ckan.model as model
import ckanext.harvest.plugin
import json
from ckan.logic.converters import convert_from_extras
from ckan.lib.navl.validators import ignore_missing
from sqlalchemy.util import OrderedDict
log = logging.getLogger(__name__)
try:
from ckanext.harvest.logic.schema import harvest_source_show_package_schema
except ImportError as e:
log.critical('Harvester not available %s' % str(e))
RESOURCE_MAPPING = {
# ArcGIS File Types
'esri rest': ('Esri REST', 'Esri REST API Endpoint'),
'arcgis_rest': ('Esri REST', 'Esri REST API Endpoint'),
'web map application': ('ArcGIS Online Map', 'ArcGIS Online Map'),
'arcgis map preview': ('ArcGIS Map Preview', 'ArcGIS Map Preview'),
'arcgis map service': ('ArcGIS Map Service', 'ArcGIS Map Service'),
'wms': ('WMS', 'ArcGIS Web Mapping Service'),
'wfs': ('WFS', 'ArcGIS Web Feature Service'),
'wcs': ('WCS', 'Web Coverage Service'),
# CSS File Types
'css': ('CSS', 'Cascading Style Sheet File'),
'text/css': ('CSS', 'Cascading Style Sheet File'),
# CSV File Types
'csv': ('CSV', 'Comma Separated Values File'),
'text/csv': ('CSV', 'Comma Separated Values File'),
# EXE File Types
'exe': ('EXE', 'Windows Executable Program'),
'application/x-msdos-program': ('EXE', 'Windows Executable Program'),
# HyperText Markup Language (HTML) File Types
'htx': ('HTML', 'Web Page'),
'htm': ('HTML', 'Web Page'),
'html': ('HTML', 'Web Page'),
'htmls': ('HTML', 'Web Page'),
'xhtml': ('HTML', 'Web Page'),
'text/html': ('HTML', 'Web Page'),
'application/xhtml+xml': ('HTML', 'Web Page'),
'application/x-httpd-php': ('HTML', 'Web Page'),
# Image File Types - BITMAP
'bm': ('BMP', 'Bitmap Image File'),
'bmp': ('BMP', 'Bitmap Image File'),
'pbm': ('BMP', 'Bitmap Image File'),
'xbm': ('BMP', 'Bitmap Image File'),
'image/bmp': ('BMP', 'Bitmap Image File'),
'image/x-ms-bmp': ('BMP', 'Bitmap Image File'),
'image/x-xbitmap': ('BMP', 'Bitmap Image File'),
'image/x-windows-bmp': ('BMP', 'Bitmap Image File'),
'image/x-portable-bitmap': ('BMP', 'Bitmap Image File'),
# Image File Types - Graphics Interchange Format (GIF)
'gif': ('GIF', 'GIF Image File'),
'image/gif': ('GIF', 'GIF Image File'),
# Image File Types - ICON
'ico': ('ICO', 'Icon Image File'),
'image/x-icon': ('ICO', 'Icon Image File'),
# Image File Types - JPEG
'jpe': ('JPEG', 'JPEG Image File'),
'jpg': ('JPEG', 'JPEG Image File'),
'jps': ('JPEG', 'JPEG Image File'),
'jpeg': ('JPEG', 'JPEG Image File'),
'pjpeg': ('JPEG', 'JPEG Image File'),
'image/jpeg': ('JPEG', 'JPEG Image File'),
'image/pjpeg': ('JPEG', 'JPEG Image File'),
'image/x-jps': ('JPEG', 'JPEG Image File'),
'image/x-citrix-jpeg': ('JPEG', 'JPEG Image File'),
# Image File Types - PNG
'png': ('PNG', 'PNG Image File'),
'x-png': ('PNG', 'PNG Image File'),
'image/png': ('PNG', 'PNG Image File'),
'image/x-citrix-png': ('PNG', 'PNG Image File'),
# Image File Types - Scalable Vector Graphics (SVG)
'svg': ('SVG', 'SVG Image File'),
'image/svg+xml': ('SVG', 'SVG Image File'),
# Image File Types - Tagged Image File Format (TIFF)
'tif': ('TIFF', 'TIFF Image File'),
'tiff': ('TIFF', 'TIFF Image File'),
'image/tiff': ('TIFF', 'TIFF Image File'),
'image/x-tiff': ('TIFF', 'TIFF Image File'),
# JSON File Types
'json': ('JSON', 'JSON File'),
'text/x-json': ('JSON', 'JSON File'),
'application/json': ('JSON', 'JSON File'),
# KML File Types
'kml': ('KML', 'KML File'),
'kmz': ('KML', 'KMZ File'),
'application/vnd.google-earth.kml+xml': ('KML', 'KML File'),
'application/vnd.google-earth.kmz': ('KML', 'KMZ File'),
# MS Access File Types
'mdb': ('ACCESS', 'MS Access Database'),
'access': ('ACCESS', 'MS Access Database'),
'application/mdb': ('ACCESS', 'MS Access Database'),
'application/msaccess': ('ACCESS', 'MS Access Database'),
'application/x-msaccess': ('ACCESS', 'MS Access Database'),
'application/vnd.msaccess': ('ACCESS', 'MS Access Database'),
'application/vnd.ms-access': ('ACCESS', 'MS Access Database'),
# MS Excel File Types
'xl': ('EXCEL', 'MS Excel File'),
'xla': ('EXCEL', 'MS Excel File'),
'xlb': ('EXCEL', 'MS Excel File'),
'xlc': ('EXCEL', 'MS Excel File'),
'xld': ('EXCEL', 'MS Excel File'),
'xls': ('EXCEL', 'MS Excel File'),
'xlsx': ('EXCEL', 'MS Excel File'),
'xlsm': ('EXCEL', 'MS Excel File'),
'excel': ('EXCEL', 'MS Excel File'),
'openXML': ('EXCEL', 'MS Excel File'),
'application/excel': ('EXCEL', 'MS Excel File'),
'application/x-excel': ('EXCEL', 'MS Excel File'),
'application/x-msexcel': ('EXCEL', 'MS Excel File'),
'application/vnd.ms-excel': ('EXCEL', 'MS Excel File'),
'application/vnd.ms-excel.sheet.macroEnabled.12': ('EXCEL', 'MS Excel File'),
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet': ('EXCEL', 'MS Excel File'),
# MS PowerPoint File Types
'ppt': ('POWERPOINT', 'MS PowerPoint File'),
'pps': ('POWERPOINT', 'MS PowerPoint File'),
'pptx': ('POWERPOINT', 'MS PowerPoint File'),
'ppsx': ('POWERPOINT', 'MS PowerPoint File'),
'pptm': ('POWERPOINT', 'MS PowerPoint File'),
'ppsm': ('POWERPOINT', 'MS PowerPoint File'),
'sldx': ('POWERPOINT', 'MS PowerPoint File'),
'sldm': ('POWERPOINT', 'MS PowerPoint File'),
'application/powerpoint': ('POWERPOINT', 'MS PowerPoint File'),
'application/mspowerpoint': ('POWERPOINT', 'MS PowerPoint File'),
'application/x-mspowerpoint': ('POWERPOINT', 'MS PowerPoint File'),
'application/vnd.ms-powerpoint': ('POWERPOINT', 'MS PowerPoint File'),
'application/vnd.ms-powerpoint.presentation.macroEnabled.12': ('POWERPOINT', 'MS PowerPoint File'),
'application/vnd.ms-powerpoint.slideshow.macroEnabled.12': ('POWERPOINT', 'MS PowerPoint File'),
'application/vnd.ms-powerpoint.slide.macroEnabled.12': ('POWERPOINT', 'MS PowerPoint File'),
'application/vnd.openxmlformats-officedocument.presentationml.slide': ('POWERPOINT', 'MS PowerPoint File'),
'application/vnd.openxmlformats-officedocument.presentationml.presentation': ('POWERPOINT', 'MS PowerPoint File'),
'application/vnd.openxmlformats-officedocument.presentationml.slideshow': ('POWERPOINT', 'MS PowerPoint File'),
# MS Word File Types
'doc': ('DOC', 'MS Word File'),
'docx': ('DOC', 'MS Word File'),
'docm': ('DOC', 'MS Word File'),
'word': ('DOC', 'MS Word File'),
'application/msword': ('DOC', 'MS Word File'),
'application/vnd.ms-word.document.macroEnabled.12': ('DOC', 'MS Word File'),
'application/vnd.openxmlformats-officedocument.wordprocessingml.document': ('DOC', 'MS Word File'),
# Network Common Data Form (NetCDF) File Types
'nc': ('CDF', 'NetCDF File'),
'cdf': ('CDF', 'NetCDF File'),
'netcdf': ('CDF', 'NetCDF File'),
'application/x-netcdf': ('NETCDF', 'NetCDF File'),
# PDF File Types
'pdf': ('PDF', 'PDF File'),
'application/pdf': ('PDF', 'PDF File'),
# PERL File Types
'pl': ('PERL', 'Perl Script File'),
'pm': ('PERL', 'Perl Module File'),
'perl': ('PERL', 'Perl Script File'),
'text/x-perl': ('PERL', 'Perl Script File'),
# QGIS File Types
'qgis': ('QGIS', 'QGIS File'),
'application/x-qgis': ('QGIS', 'QGIS File'),
# RAR File Types
'rar': ('RAR', 'RAR Compressed File'),
'application/rar': ('RAR', 'RAR Compressed File'),
'application/vnd.rar': ('RAR', 'RAR Compressed File'),
'application/x-rar-compressed': ('RAR', 'RAR Compressed File'),
# Resource Description Framework (RDF) File Types
'rdf': ('RDF', 'RDF File'),
'application/rdf+xml': ('RDF', 'RDF File'),
# Rich Text Format (RTF) File Types
'rt': ('RICH TEXT', 'Rich Text File'),
'rtf': ('RICH TEXT', 'Rich Text File'),
'rtx': ('RICH TEXT', 'Rich Text File'),
'text/richtext': ('RICH TEXT', 'Rich Text File'),
'text/vnd.rn-realtext': ('RICH TEXT', 'Rich Text File'),
'application/rtf': ('RICH TEXT', 'Rich Text File'),
'application/x-rtf': ('RICH TEXT', 'Rich Text File'),
# SID File Types - Primary association: Commodore64 (C64)?
'sid': ('SID', 'SID File'),
'mrsid': ('SID', 'SID File'),
'audio/psid': ('SID', 'SID File'),
'audio/x-psid': ('SID', 'SID File'),
'audio/sidtune': ('SID', 'MID File'),
'audio/x-sidtune': ('SID', 'SID File'),
'audio/prs.sid': ('SID', 'SID File'),
# Tab Separated Values (TSV) File Types
'tsv': ('TSV', 'Tab Separated Values File'),
'text/tab-separated-values': ('TSV', 'Tab Separated Values File'),
# Tape Archive (TAR) File Types
'tar': ('TAR', 'TAR Compressed File'),
'application/x-tar': ('TAR', 'TAR Compressed File'),
# Text File Types
'txt': ('TEXT', 'Text File'),
'text/plain': ('TEXT', 'Text File'),
# Extensible Markup Language (XML) File Types
'xml': ('XML', 'XML File'),
'text/xml': ('XML', 'XML File'),
'application/xml': ('XML', 'XML File'),
# XYZ File Format File Types
'xyz': ('XYZ', 'XYZ File'),
'chemical/x-xyz': ('XYZ', 'XYZ File'),
# ZIP File Types
'zip': ('ZIP', 'Zip File'),
'application/zip': ('ZIP', 'Zip File'),
'multipart/x-zip': ('ZIP', 'Zip File'),
'application/x-compressed': ('ZIP', 'Zip File'),
'application/x-zip-compressed': ('ZIP', 'Zip File'),
}
def split_tags(tag):
tags = []
for tag in tag.split(', '):
tags.extend(tag.split('>'))
return [munge_tag(tag) for tag in tags if munge_tag(tag) != '']
# copied from harvest but deals withe single item list keys like validation
def harvest_source_convert_from_config(key, data, errors, context):
config = data[key]
if config:
config_dict = json.loads(config)
for key, value in list(config_dict.items()):
if isinstance(value, list):
data[(key, )] = value[0]
else:
data[(key, )] = value
class DataGovHarvest(ckanext.harvest.plugin.Harvest):
def package_form(self):
return 'source/geodatagov_source_form.html'
def show_package_schema(self):
'''
Returns the schema for mapping package data from the database into a
format suitable for the form
'''
schema = harvest_source_show_package_schema()
schema['config'] = [convert_from_extras, harvest_source_convert_from_config, ignore_missing]
return schema
def dataset_facets(self, facets_dict, package_type):
if package_type != 'harvest':
return facets_dict
return OrderedDict([('organization_type', 'Organization Types'),
('frequency', 'Frequency'),
('source_type', 'Type'),
('organization', 'Organizations'),
# ('publisher', 'Publisher'),
])
def organization_facets(self, facets_dict, organization_type, package_type):
if package_type != 'harvest':
return facets_dict
return OrderedDict([('frequency', 'Frequency'),
('source_type', 'Type'),
# ('publisher', 'Publisher'),
])
def get_filename_and_extension(resource):
url = resource.get('url').rstrip('/')
if '?' in url:
return '', ''
if 'URL' in url:
return '', ''
url = urllib.parse.urlparse(url).path
split = url.split('/')
last_part = split[-1]
ending = last_part.split('.')[-1].lower()
if len(ending) in [2, 3, 4] and len(last_part) > 4 and len(split) > 1:
return last_part, ending
return '', ''
def change_resource_details(resource):
formats = list(RESOURCE_MAPPING.keys())
resource_format = resource.get('format', '').lower().lstrip('.')
filename, extension = get_filename_and_extension(resource)
if not resource_format:
resource_format = extension
if resource.get('name', '') in ['Unnamed resource', '', None]:
resource['no_real_name'] = True
if resource_format in formats:
resource['format'] = RESOURCE_MAPPING[resource_format][0]
if resource.get('name', '') in ['Unnamed resource', '', None]:
resource['name'] = RESOURCE_MAPPING[resource_format][1]
if filename:
resource['name'] = resource['name']
elif resource.get('name', '') in ['Unnamed resource', '', None]:
if extension and not resource_format:
resource['format'] = extension.upper()
resource['name'] = 'Web Resource'
if filename and not resource.get('description'):
resource['description'] = filename
def related_create_auth_fn(context, data_dict=None):
return {'success': False}
def related_update_auth_fn(context, data_dict=None):
return {'success': False}
class Demo(p.SingletonPlugin):
p.implements(p.IConfigurer)
p.implements(p.IConfigurable)
p.implements(p.IPackageController, inherit=True)
p.implements(p.ITemplateHelpers)
p.implements(p.IActions, inherit=True)
p.implements(p.IAuthFunctions)
p.implements(p.IClick)
def get_commands(self) -> list:
return cli.get_commands()
# IConfigurer
def update_config(self, config):
p.toolkit.add_template_directory(config, 'templates')
p.toolkit.add_resource('fanstatic_library', 'geodatagov')
edit_url = None
def configure(self, config):
log.info('plugin initialized: %s', self.__class__.__name__)
self.__class__.edit_url = config.get('saml2.user_edit')
@classmethod
def saml2_user_edit_url(cls):
return cls.edit_url
# IPackageController
def before_dataset_view(self, pkg_dict):
for num, extra in enumerate(pkg_dict.get('extras', [])):
if extra['key'] == 'tags':
tags = pkg_dict.get('tags', [])
tags.extend([dict(name=tag, display_name=tag) for tag
in split_tags(extra['value'])])
pkg_dict['tags'] = tags
pkg_dict['extras'].pop(num)
break
organization = pkg_dict.get('organization')
if organization:
result = model.Session.query(model.GroupExtra.value).filter_by(
key='organization_type', group_id=organization['id']).first()
if result:
organization['organization_type'] = result[0]
result = model.Session.query(model.GroupExtra.value).filter_by(
key='terms_of_use', state='active',
group_id=organization['id']).first()
if result:
organization['terms_of_use'] = result[0]
return pkg_dict
def before_dataset_index(self, pkg_dict):
tags = pkg_dict.get('tags', [])
tags.extend(tag for tag in split_tags(pkg_dict.get('extras_tags', '')))
pkg_dict['tags'] = tags
org_name = pkg_dict['organization']
group = model.Group.get(org_name)
if group and ('organization_type' in group.extras):
pkg_dict['organization_type'] = group.extras['organization_type']
if group and ('terms_of_use' in group.extras):
pkg_dict['terms_of_use'] = group.extras['terms_of_use']
title_string = pkg_dict.get('title_string')
if title_string:
pkg_dict['title_string'] = title_string.strip().lower()
# category tags
cats = {}
for extra in pkg_dict:
if extra.startswith('__category_tag_'):
cat = pkg_dict[extra]
if cat:
try:
cat_list = json.loads(cat)
cats['vocab_%s' % extra] = cat_list
new_list = cats.get('vocab_category_all', [])
new_list.extend(cat_list)
cats['vocab_category_all'] = new_list
except ValueError:
pass
pkg_dict.update(cats)
return pkg_dict
def before_dataset_search(self, search_params):
fq = search_params.get('fq', '')
if search_params.get('sort') in (None, 'rank'):
search_params['sort'] = 'views_recent desc'
if search_params.get('sort') in ('none'):
search_params['sort'] = 'score desc, name asc'
# only show collections on bulk update page and when the facet is explictely added
try:
path = request.path
except BaseException:
# when there is no requests we get a
# TypeError: No object (name: request) has been registered for this thread
path = ''
if 'collection_package_id' not in fq and 'bulk_process' not in path:
log.info('Added FQ to collection_package_id')
fq += ' -collection_package_id:["" TO *]'
else:
log.info('NOT Added FQ to collection_package_id')
search_params['fq'] = fq
return search_params
def after_dataset_show(self, context, data_dict):
current_extras = data_dict.get('extras', [])
new_extras = []
for extra in current_extras:
if extra['key'] == 'extras_rollup':
rolledup_extras = json.loads(extra['value'])
for key, value in list(rolledup_extras.items()):
new_extras.append({"key": key, "value": value})
else:
new_extras.append(extra)
data_dict['extras'] = new_extras
if 'resources' in data_dict:
for resource in data_dict['resources']:
change_resource_details(resource)
return data_dict
# ITemplateHelpers
def get_helpers(self):
from ckanext.geodatagov import helpers as geodatagov_helpers
return {
'get_validation_profiles': geodatagov_helpers.get_validation_profiles,
'get_validation_schema': geodatagov_helpers.get_validation_schema,
'saml2_user_edit_url': self.saml2_user_edit_url,
'get_harvest_source_type': geodatagov_helpers.get_harvest_source_type,
'get_harvest_source_config': geodatagov_helpers.get_harvest_source_config,
'get_collection_package': geodatagov_helpers.get_collection_package,
}
# IActions
def get_actions(self):
from ckanext.geodatagov import logic as geodatagov_logic
actions = {
'resource_show': geodatagov_logic.resource_show,
'organization_show': geodatagov_logic.organization_show,
'location_search': geodatagov_logic.location_search,
'organization_list': geodatagov_logic.organization_list,
'group_show': geodatagov_logic.group_show,
'group_catagory_tag_update': geodatagov_logic.group_catagory_tag_update,
'datajson_create': geodatagov_logic.datajson_create,
'datajson_update': geodatagov_logic.datajson_update,
'doi_create': geodatagov_logic.doi_create,
'doi_update': geodatagov_logic.doi_update,
'package_show_rest': geodatagov_logic.package_show_rest
}
if p.toolkit.check_ckan_version(min_version='2.8'):
# "chain" actions to avoid using unexistent decorator at CKAN 2.3
log.info('adding chained actions to {}'.format(ckan_version))
update_func = geodatagov_logic.package_update
update_func.chained_action = True
create_func = geodatagov_logic.package_create
create_func.chained_action = True
actions.update({
'package_update': update_func,
'package_create': create_func})
log.info('get_actions {} {}'.format(ckan_version, actions))
return actions
# IAuthFunctions
def get_auth_functions(self):
from ckanext.geodatagov import auth as geodatagov_auth
return {
'related_create': geodatagov_auth.related_create,
'related_update': geodatagov_auth.related_update,
'group_catagory_tag_update': geodatagov_auth.group_catagory_tag_update,
}
class Miscs(p.SingletonPlugin):
''' Places for something that has nowhere to go otherwise.
'''
p.implements(p.IConfigurer)
p.implements(p.IConfigurable)
p.implements(p.IBlueprint)
# IConfigurer
def update_config(self, config):
p.toolkit.add_template_directory(config, 'templates')
p.toolkit.add_resource('fanstatic_library', 'geodatagov')
# IConfigurable
def configure(self, config):
log.info('plugin initialized: %s', self.__class__.__name__)
geodatagovmodel.setup()
def get_blueprint(self):
return blueprint.datapusher
class S3Test(p.SingletonPlugin):
p.implements(p.IClick)
def get_commands(self) -> list:
return cli.get_commands2()
| GSA/ckanext-geodatagov | ckanext/geodatagov/plugin.py | plugin.py | py | 22,178 | python | en | code | 34 | github-code | 13 |
24147737484 | input_text = open("text.txt", "r")
alfabit = "abcdefghijklmnopqrstuvwxyz"
sym_dict = dict()
# ะกะพััะฐะฒะปะตะฝะธะต ัะปะพะฒะฐัั ั ะบะพะปะธัะตััะพะฒะพะผ ะฑัะบะฒ ะฒ ัะตะบััะต
for i_line in input_text:
for sym in i_line.lower():
if sym in alfabit:
if sym in sym_dict:
sym_dict[sym] += 1
else:
sym_dict[sym] = 1
all_sym = 0
# ะะฐั
ะพะถะดะตะฝะธะต ะพะฑัะตะณะพ ะบะพะปะธัะตััะฒะฐ ะฑัะบะฒ
for value in sym_dict.values():
all_sym += value
# ะกะพััะฐะฒะปะตะฝะธะต ัะปะพะฒะฐัั ะบะปัั - ะพัะบะธ, ะทะฝะฐัะตะฝะธะต - ะฑัะบะฒั
share_dict = dict()
for name, value in sym_dict.items():
share = value / all_sym
round_share = round(share, 3)
sym_dict[name] = round_share
if round_share in share_dict:
share_dict[round_share].append(name)
else:
# , ะฟะพัะปะตะดัััะธะต 2 ะดะตะนััะฒะธั ะฟัะตะดะปะฐะณะฐั ะพะฑัะตะดะธะฝะธัั ะฒ ะพะดะฝะพ. ะั ะถะต ะผะพะถะตะผ ัะพะทะดะฐะฒะฐัั ะทะฝะฐัะตะฝะธะต
# ะบะฐะบ ัะฟะธัะพะบ ะธะท 1 ัะปะตะผะตะฝัะฐ =)
share_dict[round_share] = [name]
# share_dict[round_share].append(name)
# ะกะพััะธัะพะฒะบะฐ ะทะฝะฐัะตะฝะธะน ัะปะพะฒะฐัั share_dict
for key, value in share_dict.items():
share_dict[key] = sorted(value)
print(share_dict)
print(sym_dict)
analysis_file = open("analysis.txt", "a")
# ะะฐะฟะธัั ัะตะทัะปััะฐัะพะฒ ะฒ ัะฐะนะป
for key, value in share_dict.items():
for item in value:
# , ะทะฐะฟะธัั ะฒ ัะฐะนะป ะฒะพะทะผะพะถะฝะพ ััะพะธั ัะดะตะปะฐัั ะฑะตะท ะดะพะฟะพะปะฝะธัะตะปัะฝะพะน ะฟะตัะตะผะตะฝะฝะพะน
# ะัะธ ะฟะพะผะพัะธ ัะพัะผะฐัะธัะพะฒะฐะฝะธั ัััะพะบ =)
analysis_file.write(f"{item} {str(key)} \n")
input_text.close()
analysis_file.close()
# ะทะฐััั!
| ilnrzakirov/Python_basic | Module22/08_frequency_analysis/main.py | main.py | py | 1,813 | python | ru | code | 0 | github-code | 13 |
26216522140 | import os
import urllib.request
from abc import ABC, abstractmethod
from typing import Iterable
import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedGroupKFold
from lrbenchmark.typing import TrainTestPair, XYType
class Dataset(ABC):
@abstractmethod
def get_splits(self, seed: int = None) -> Iterable[TrainTestPair]:
"""
Retrieve data from this dataset.
This function is responsible for splitting the data in subset for
training and testing in a way that is appropriate for the data set.
Depending on the implementation, the data set may be returned at once,
as a K-fold series, or otherwise.
Parameters
----------
seed : int, optional
Optional random seed to be used for splitting. The default is None.
Returns
-------
Iterable[TrainTestPair]
one or more subsets as an iterable, each element being a tuple
`((X_train, y_train), (X_test, y_test))`, where:
- `X_train` is a `numpy.ndarray` of features for records in the training set
- `y_train` is a `numpy.ndarray` of labels for records in the training set
- `X_test` is a `numpy.ndarray` of features for records in the test set
- `y_test` is a `numpy.ndarray` of labels for records in the test set
"""
raise NotImplementedError
@property
@abstractmethod
def is_binary(self) -> bool:
"""
Binary flag to indicate whether this data set has two labels (i.e. is
binary) or more than two labels. Datasets with multple labels are
typically used to develop common-source models.
A data set is designed to either develop specific-source models or
common-source models. A specific-source data set typically has two
class labels: `0` indicates the defense's hypothesis and `1` for the
prosecutor's hypothesis. Both the training set and the test set sample
from both classes. A common-source data set has multiple class labels,
and if the data set is split, a class label should not appear in more
than one split.
Returns
-------
bool
`True` if the data set has two labels;
`False` if the data set has multiple labels.
"""
raise NotImplementedError
def pop(self, fraction: float, seed: int = None) -> XYType:
"""
Draws a random sample from the data set.
The returned data will be removed.
Parameters
----------
fraction : float
The size of the sample as a fraction of the _original_ data set
size, i.e. subsequent calls will return arrays of (approximately)
the same size.
seed : int, optional
Optional random seed. The default is None.
Raises
------
NotImplementedError
If the method is not implemented by this data set.
Returns
-------
XYType
A tuple of `(X, y)`, with `X` being numpy arrays of features and
`y` the corresponding labels.
"""
raise NotImplementedError
class CommonSourceKFoldDataset(Dataset, ABC):
def __init__(self, n_splits):
super().__init__()
self.n_splits = n_splits
self._data = None
@abstractmethod
def load(self) -> XYType:
raise NotImplementedError
def get_x_y(self) -> XYType:
if self._data is None:
X, y = self.load()
self._data = (X, y)
return self._data
def get_splits(self, seed: int = None) -> Iterable[TrainTestPair]:
X, y = self.get_x_y()
cv = StratifiedGroupKFold(n_splits=self.n_splits, shuffle=True,
random_state=seed)
# cv.split requires an x, y and groups. We don't have y yet, therefore we set it to -1.
for train_idxs, test_idxs in cv.split(X, y=np.array([-1] * len(X)),
groups=y):
yield (X[train_idxs], y[train_idxs]), (X[test_idxs], y[test_idxs])
@property
def is_binary(self) -> bool:
return False
class InMemoryCommonSourceKFoldDataset(CommonSourceKFoldDataset):
def __init__(self, X, y, n_splits):
self._X = X
self._y = y
super().__init__(n_splits=n_splits)
def load(self) -> XYType:
return self._X, self._y
def __repr__(self):
return "InMemoryDataset"
class XTCDataset(CommonSourceKFoldDataset):
def __init__(self, n_splits):
super().__init__(n_splits)
def load(self) -> XYType:
"""
Loads XTC dataset
"""
data_file = 'Champ_data.csv'
url = "https://raw.githubusercontent.com/NetherlandsForensicInstitute/placeholder" # @todo publish dataset to github
print(f"{self.__repr__()} is not yet available for download")
xtc_folder = os.path.join('resources', 'drugs_xtc')
download_dataset_file(xtc_folder, data_file, url)
df = pd.read_csv(os.path.join(xtc_folder, data_file), delimiter=',')
features = ["Diameter", "Thickness", "Weight", "Purity"]
X = df[features].to_numpy()
y = df['batchnumber'].to_numpy()
return X, y
def __repr__(self):
return "XTC dataset"
class GlassDataset(CommonSourceKFoldDataset):
def __init__(self, n_splits):
super().__init__(n_splits)
def load(self) -> XYType:
datasets = {
'duplo.csv': 'https://raw.githubusercontent.com/NetherlandsForensicInstitute/elemental_composition_glass/main/duplo.csv',
'training.csv': 'https://raw.githubusercontent.com/NetherlandsForensicInstitute/elemental_composition_glass/main/training.csv',
'triplo.csv': 'https://raw.githubusercontent.com/NetherlandsForensicInstitute/elemental_composition_glass/main/triplo.csv'
}
glass_folder = os.path.join('resources', 'glass')
features = ["K39", "Ti49", "Mn55", "Rb85", "Sr88", "Zr90", "Ba137",
"La139", "Ce140", "Pb208"]
df = None
for file, url in datasets.items():
download_dataset_file(glass_folder, file, url)
df_temp = pd.read_csv(os.path.join(glass_folder, file),
delimiter=',')
# The Item column starts with 1 in each file,
# this is making it ascending across different files
df_temp['Item'] = df_temp['Item'] + max(
df['Item']) if df is not None else df_temp['Item']
# the data from all 3 files is added together to make one dataset
df = pd.concat([df, df_temp]) if df is not None else df_temp
X = df[features].to_numpy()
y = df['Item'].to_numpy()
return X, y
def __repr__(self):
return "Glass dataset"
def download_dataset_file(folder: str, file: str, url: str):
location = os.path.join(folder, file)
if not os.path.isfile(location):
print(f'downloading {file}')
try:
urllib.request.urlretrieve(url, location)
except Exception as e:
print(f"Could not download {file} because of: {e}")
| NetherlandsForensicInstitute/lr-benchmark | lrbenchmark/dataset.py | dataset.py | py | 7,311 | python | en | code | 0 | github-code | 13 |
44150359402 | """
Task:
Remove duplicate words from the given string.
Example:
'alpha beta beta gamma gamma gamma delta alpha beta beta gamma gamma gamma delta'
returns => 'alpha beta gamma delta'
"""
"""
Steps:
1) split the string based on " "
2) iterate over the splitted list of strings
3) check if the word is more than one
4) return the word
5) join the words together
"""
sampleStr = 'alpha beta gamma alpha beta gamma xeta beta beta' # 'alpha beta gamma xeta'
def removeDuplicateStrings(str):
output = [] # returned list of unique strings
list_of_strings= str.split(" ")
for word in (list_of_strings):
if word in output:
continue
output.append(word)
return ",".join(output)
print(removeDuplicateStrings(sampleStr))
| oris-96/Python-Algorithms | remove_duplicate_words.py | remove_duplicate_words.py | py | 805 | python | en | code | 0 | github-code | 13 |
12861907040 | """ะะตะพะฑั
ะพะดะธะผะพ ัะพะทะดะฐัั ััะธย ัะปะพะฒะฐััย ะธ ะฝะฐะฟะธัะฐัั ััะฝะบัะธั,
ะบะพัะพัะฐั ัะผะพะถะตั ะฑัะฐัั ัะปะพะฒะฐัะธ ะธ ะฟัะพะธะทะฒะพะดะธัั ะธั
ย ัะปะธัะฝะธะตย ะฒ ะพะดะธะฝ"""
def fun_dict(x, y, z):
"""fun_dict merges three given dictionaries"""
d = {**x, **y, **z}
return d
d1 = dict(a=1, b=2, c=3)
d2 = dict(d=4, e=5, f=6)
d3 = dict(g=7, h=8, i=9)
print("Dictionary 1:", d1)
print("Dictionary 2:", d2)
print("Dictionary 3:", d3)
print("Combined dictionary:", fun_dict(d1, d2, d3))
| nestelementary/Contacts | G117_Nesteruk_DZ_5_Dictionaries.py | G117_Nesteruk_DZ_5_Dictionaries.py | py | 539 | python | ru | code | 0 | github-code | 13 |
18168416245 | import glob
import os
import re
import shutil
import subprocess
import warnings
from distutils.dir_util import copy_tree
from distutils.file_util import copy_file, move_file
from shutil import rmtree
from setuptools import Distribution, Extension, find_packages, setup
from setuptools.command.build_ext import build_ext
# extend the package files by directory or by file
def extend_pynq_utils_package(data_list):
for data in data_list:
if os.path.isdir(data):
pynq_utils_files.extend(
[
os.path.join("..", root, f)
for root, _, files in os.walk(data)
for f in files
]
)
elif os.path.isfile(data):
pynq_utils_files.append(os.path.join("..", data))
# Get the version
ver_file = open("./pynqutils/version.txt", "r")
ver_str = ver_file.readline()
# read the contents of your README file
from pathlib import Path
this_directory = Path(__file__).parent
long_description = (this_directory / "README.md").read_text()
# Get the files
pynq_utils_files = []
extend_pynq_utils_package(
[
"pynqutils/",
"pynqutils/version.txt",
]
)
# Required packages
required = ["setuptools>=24.2.0", "pynqmetadata>=0.0.1", "cffi", "tqdm", "numpy", "python-magic>=0.4.25"]
setup(
name="pynqutils",
version=ver_str,
description="Utilities for PYNQ",
long_description=long_description,
long_description_content_type='text/markdown',
url="https://github.com/Xilinx/PYNQ-Utils",
author="pynq",
author_email="pynq_support@xilinx.com",
packages=find_packages(),
install_requires=required,
python_requires=">=3.5.2",
package_data={
"pynqutils": pynq_utils_files,
},
zip_safe=False,
license="BSD 3-Clause",
)
| Xilinx/PYNQ-Utils | setup.py | setup.py | py | 1,825 | python | en | code | 1 | github-code | 13 |
20132804697 | import torch
from torch.utils.data import Dataset
import os
import h5py
''' Define a class to contain the data that will be included in the dataloader
sent to the 3D-CNN '''
class CNN_Dataset(Dataset):
def __init__(self, hdf_path, feat_dim=22):
super(CNN_Dataset, self).__init__()
self.hdf_path = hdf_path
self.feat_dim = feat_dim
self.hdf = h5py.File(self.hdf_path, 'r')
self.data_info_list = []
# append PDB id and affinity label to data_info_list
for pdbid in self.hdf.keys():
affinity = float(self.hdf[pdbid].attrs['affinity'])
self.data_info_list.append([pdbid, affinity])
def close(self):
self.hdf.close()
def __len__(self):
return len(self.data_info_list)
def __getitem__(self, idx):
pdbid, affinity = self.data_info_list[idx]
data = self.hdf[pdbid][:]
x = torch.tensor(data)
y = torch.tensor(np.expand_dims(affinity, axis=0))
return x,y, pdbid
| caiyingchun/HAC-Net | src/CNN/CNN_dataset.py | CNN_dataset.py | py | 900 | python | en | code | null | github-code | 13 |
4762871099 | from colorsys import hls_to_rgb #conversion between different color models
from cmath import phase #cmath also has a lot of cool complex functions you can graph
from math import pi, floor
from PIL import Image, ImageDraw
#The below method translates the native coordinates of an image's pixels
#so that (0, 0) is at the center rather than the top left corner.
#It then negates the y coordinate, so that y increases rather
#than decreases with the height of the pixel.
def image_to_complex_coordinates(x, y, imageWidth, imageHeight):
center = [imageWidth // 2, imageHeight // 2]
return x - center[0], -(y - center[1])
def complex_to_hls(z):
h = 0.5 + (phase(z) / (2*pi))
l = 1 - 2**(-abs(z))
s = 1
return h, l, s
#Graphs the complex function complex_func (which maps complex numbers to complex numbers)
def graph(complex_func, center=0, scale=.01, width=500, height=500):
im = Image.new("RGB", (width, height), 'white')
draw = ImageDraw.Draw(im)
for x in range(width):
for y in range(height):
u, v = image_to_complex_coordinates(x, y, width, height)
z = u + v*1j
z = (z-center)*scale
try:
w = complex_func(z)
except (ZeroDivisionError, OverflowError, ValueError):
#We interpret these errors as complex_func being very large at z.
#No need to color since the image is white by default
continue
h, l, s = complex_to_hls(w)
w_rgb = hls_to_rgb(h, l, s)
w_rgb = tuple(floor(255*t) for t in w_rgb)
draw.point([x, y], w_rgb)
return im
#Example code
#from cmath import sin
#graph(sin, scale = .05)
| MattBroe/complex-function-grapher | complex_grapher.py | complex_grapher.py | py | 1,568 | python | en | code | 0 | github-code | 13 |
42834664258 | # Logging tests
import unittest
import calipertest as cat
class CaliperLogTest(unittest.TestCase):
""" Caliper Log test cases """
def test_log_verbose(self):
target_cmd = [ './ci_test_basic' ]
env = { 'CALI_LOG_VERBOSITY' : '3',
'CALI_LOG_LOGFILE' : 'stdout'
}
log_targets = [
'CALI_LOG_VERBOSITY=3',
'== CALIPER: Releasing channel default',
'== CALIPER: Releasing Caliper thread data',
'Process blackboard',
'Thread blackboard',
'Metadata tree',
'Metadata memory pool'
]
report_out,_ = cat.run_test(target_cmd, env)
lines = report_out.decode().splitlines()
for target in log_targets:
for line in lines:
if target in line:
break
else:
self.fail('%s not found in log' % target)
def test_cali_config_error_msg(self):
target_cmd = [ './ci_test_basic' ]
env = {
'CALI_LOG_VERBOSITY' : '0',
'CALI_LOG_LOGFILE' : 'stdout',
'CALI_CONFIG' : 'blagarbl'
}
log_targets = [
'== CALIPER: CALI_CONFIG: error: Unknown config or parameter: blagarbl'
]
report_out,_ = cat.run_test(target_cmd, env)
lines = report_out.decode().splitlines()
for target in log_targets:
for line in lines:
if target in line:
break
else:
self.fail('%s not found in log' % target)
def test_scope_parse_error_msgs(self):
target_cmd = [ './ci_test_basic' ]
env = {
'CALI_LOG_VERBOSITY' : '0',
'CALI_LOG_LOGFILE' : 'stdout',
'CALI_CALIPER_ATTRIBUTE_DEFAULT_SCOPE' : 'bar'
}
log_targets = [
'Invalid value "bar" for CALI_CALIPER_ATTRIBUTE_DEFAULT_SCOPE'
]
report_out,_ = cat.run_test(target_cmd, env)
lines = report_out.decode().splitlines()
for target in log_targets:
for line in lines:
if target in line:
break
else:
self.fail('%s not found in log' % target)
def test_log_silent(self):
target_cmd = [ './ci_test_basic' ]
env = { 'CALI_LOG_VERBOSITY' : '0',
'CALI_LOG_LOGFILE' : 'stdout'
}
report_out,_ = cat.run_test(target_cmd, env)
lines = report_out.decode().splitlines()
self.assertTrue(len(lines) == 0)
if __name__ == "__main__":
unittest.main()
| LLNL/Caliper | test/ci_app_tests/test_log.py | test_log.py | py | 2,652 | python | en | code | 300 | github-code | 13 |
23327230990 | class Solution_recur:
def __init__(self):
self.record={}
self.max_palindrome=''
def longestPalindrome(self, s: str) -> str:
# print(self.max_palindrome)
length=len(s)
if length < 2 or self.testPalindrome(s):
if len(s) > len(self.max_palindrome):
self.max_palindrome = s
self.record[s] = s
return s
else:
if length-1 <= len(self.max_palindrome):
return self.max_palindrome
else:
left=self.record.setdefault(s[:-1],self.longestPalindrome(s[:-1]))
right=self.record.setdefault(s[1:],self.longestPalindrome(s[1:]))
return max(left, right,key=lambda x:len(x))
def testPalindrome(self, s: str):
# check whether str is palindrome
return s == s[::-1]
class Solution_dummy:
def __init__(self):
self.max_palindrome=''
self.queue=[]
def longestPalindrome(self, s: str) -> str:
if len(s) < 2 :
return s
self.queue.append(s)
while len(self.queue)>0:
s=self.queue.pop(0)
if len(s) < 2 or self.testPalindrome(s):
if len(s) > len(self.max_palindrome):
self.max_palindrome = s
else:
if len(s) - 1 <= len(self.max_palindrome):
pass
else:
self.queue.append(s[:-1])
self.queue.append(s[1:])
return self.max_palindrome
def testPalindrome(self, s: str):
# check whether str is palindrome
return s == s[::-1]
class Solution_dp:
def longestPalindrome(self, s: str) -> str:
length = len(s)
if length < 2:
return s
#initialize dp container
dp=[[False]*length for _ in range(length)]
#global optimum's index
ans_left,ans_right=0,0
for i in range(length):
left,right=length-1-i,length-1-i
while right<length:
#check base situation
if left==right:
dp[left][right]=True
elif right-left==1 and s[left]==s[right]:
#e.g. "aa"
dp[left][right]=True
#common requirements
elif dp[left+1][right-1] and s[left]==s[right]:
dp[left][right]=True
if dp[left][right] and (right-left)>(ans_right-ans_left):
ans_left,ans_right=left,right
#update index
right+=1
return s[ans_left:ans_right+1]
class Solution_expand:
def longestPalindrome(self, s: str) -> str:
length = len(s)
if length < 2:
return s
# global optimum's index
ans=''
for i in range(length-1):
odd=self.expand_check(s,i,i)
even=self.expand_check(s,i,i+1)
ans=max(ans,odd,even,key=lambda x:len(x))
return ans
def expand_check(self,s,left,right):
while s[left]==s[right]:
left-=1
right+=1
if left<0 or right>=len(s):
break
return s[left+1:right]
s=Solution_expand()
tests=["babad","","cbbd","abcdcb","babaddtattarrattatddetartrateedredividerb"]
for test in tests:
print(s.longestPalindrome(test)) | vincentX3/Leetcode_practice | medium/005LongestPalindromicSubstring.py | 005LongestPalindromicSubstring.py | py | 3,403 | python | en | code | 2 | github-code | 13 |
39751079792 | from typing import Dict, Union
# Third Party Imports
from sqlalchemy import Column, Integer, String
# RAMSTK Local Imports
from .. import RAMSTK_BASE
from .baserecord import RAMSTKBaseRecord
class RAMSTKMethodRecord(RAMSTK_BASE, RAMSTKBaseRecord): # type: ignore
"""Class to representramstk_method in the RAMSTK Common database."""
__defaults__ = {
"description": "Method Description",
"method_type": "unknown",
"name": "Method Name",
}
__tablename__ = "ramstk_method"
__table_args__ = {"extend_existing": True}
method_id = Column(
"fld_method_id",
Integer,
primary_key=True,
autoincrement=True,
nullable=False,
)
name = Column("fld_name", String(256), default=__defaults__["name"])
description = Column(
"fld_description", String(512), default=__defaults__["description"]
)
method_type = Column(
"fld_method_type", String(256), default=__defaults__["method_type"]
)
def get_attributes(self) -> Dict[str, Union[int, str]]:
"""Retrieve current values of the RAMSTKMethod data model attributes.
:return: {method_id, name, description, method_type} pairs.
:rtype: dict
"""
return {
"method_id": self.method_id,
"name": self.name,
"description": self.description,
"method_type": self.method_type,
}
| ReliaQualAssociates/ramstk | src/ramstk/models/dbrecords/commondb_method_record.py | commondb_method_record.py | py | 1,432 | python | en | code | 34 | github-code | 13 |
13039855737 | import os
import shutil
import csv
def organizeFolderGAPED(original, pos, neg, neut):
# Copies each image in the GAPED database to the corresponding folder
# Make a dictionary of file names to valence
dict = {}
files = os.listdir(original)
for file in files:
if '.txt' in file and 'SD' not in file and 'readme' not in file:
with open(os.path.join(original, file), 'r') as f:
for l in f:
l = l.split()
dict[l[0][:-4]] = l[1]
# Walk through the images and categorize files as pos/neg/neut according to valence
for roots, dirs, files, in os.walk(original):
for file in files:
if '.bmp' in file:
if float(dict[file[:-4]]) < 40:
shutil.copy(os.path.join(roots, file), neg)
elif float(dict[file[:-4]]) > 60:
shutil.copy(os.path.join(roots, file), pos)
else:
shutil.copy(os.path.join(roots, file), neut)
def organizeFolderOASIS(original, pos, neg, neut):
# Copies each image in the GAPED database to the corresponding folder
# Make a dictionary of file names to valence
dict = {}
with open(os.path.join(original, 'OASIS.csv'), 'r') as file:
reader = csv.reader(file)
for row in reader:
dict[row[1].strip()] = row[4]
# Walk through the images and categorize files as pos/neg/neut according to normalized valence
for roots, dirs, files, in os.walk(original):
for file in files:
if '.jpg' in file:
if (float(dict[file[:-4]])-1)*100/6 < 40:
shutil.copy(os.path.join(roots, file), neg)
elif (float(dict[file[:-4]])-1)*100/6 > 60:
shutil.copy(os.path.join(roots, file), pos)
else:
shutil.copy(os.path.join(roots, file), neut)
if __name__ == '__main__' :
project_path = os.getcwd()
gaped = os.path.join(project_path, 'GAPED/GAPED')
oasis = os.path.join(project_path,'oasis')
pos = os.path.join(project_path, 'Positive')
neg = os.path.join(project_path, 'Negative')
neut = os.path.join(project_path, 'Neutral')
organizeFolderOASIS(oasis, pos, neg, neut)
organizeFolderGAPED(gaped, pos, neg, neut)
| harrysha1029/organize_images_GAPED_OASIS | organize.py | organize.py | py | 2,241 | python | en | code | 3 | github-code | 13 |
34421041078 | # coding=utf-8
"""Tools used for solving the Day 16: Proboscidea Volcanium puzzle."""
# Standard library imports:
import itertools
import re
from typing import Iterable
# Third party imports:
from aoc_tools.algorithms.a_star_search import Node, a_star_search
class Room(Node):
"""Location within the volcano's tunnel network with a single Valve."""
__slots__ = ["_name", "_neighbours_map"]
def __init__(self, name: str, neighbours_map: dict[str, list[str]],
parent: "Room" = None):
super().__init__(parent=parent)
self._name = name
self._neighbours_map = neighbours_map
@property
def id(self) -> str:
"""Provide a string identifier unique to this Room."""
return self._name
@property
def g(self) -> int:
"""Compute the cost for reaching this Room from the search start point."""
return 0 if self._parent is None else self.parent.g + 1
@property
def h(self) -> int:
"""Estimate the cost for reaching the search goal from this Room."""
return 0
def get_successors(self) -> Iterable["Room"]:
"""List all nodes to search that are directly reachable from this Room."""
adjacent_rooms = self._neighbours_map[self._name]
for s_name in adjacent_rooms:
yield Room(
name=s_name, parent=self, neighbours_map=self._neighbours_map)
class TunnelNetwork:
"""Interconnected locations inside a volcano, each hosting a single valve."""
__slots__ = ["_neighbours_map", "_flow_rates_map", "_travel_map"]
def __init__(self, neighbours_map: dict[str, list[str]],
flow_rates_map: dict[str, int]):
self._neighbours_map = neighbours_map
self._flow_rates_map = flow_rates_map
self._travel_map = self._build_travel_map(locations=list(neighbours_map.keys()))
def _build_travel_map(self, locations: list[str]) -> dict[str, dict[str, int]]:
"""Register the lowest travel time between all pairs of locations."""
times_map = {from_: {to_: None for to_ in locations} for from_ in locations}
for from_ in locations:
for to_ in locations:
if from_ == to_:
# No travel required:
times_map[from_][to_] = 0
elif times_map[from_][to_] is None and times_map[to_][from_] is None:
# Compute the travel time for an unseen pair:
travel_time = self._find_shortest_travel(from_=from_, to_=to_)
times_map[from_][to_] = travel_time
times_map[to_][from_] = travel_time
else:
# Update the travel time for an already seen pair:
travel_time = times_map[from_][to_] or times_map[to_][from_]
times_map[from_][to_] = travel_time
times_map[to_][from_] = travel_time
return times_map
def _find_shortest_travel(self, from_: str, to_: str) -> int:
"""Find the shortest travel time between locations using A* search."""
start = Room(name=from_, parent=None, neighbours_map=self._neighbours_map)
goal_room = a_star_search(start=start, goal_func=lambda node: node.id == to_)
return goal_room.g
@property
def relevant_locations(self) -> list[str]:
"""List all locations known by this TunnelNetwork with non-damaged valves."""
return list(loc for loc, fr in self._flow_rates_map.items() if fr > 0)
def get_travel(self, from_: str, to_: str) -> int:
"""Time to reach one location, starting at another location."""
return self._travel_map[from_][to_]
def get_flow(self, location: str) -> int:
"""Provide the flow rate of the pressure-release valve at a given location."""
return self._flow_rates_map[location]
@classmethod
def from_scan_report(cls, scan_report: list[str]) -> "TunnelNetwork":
"""Create a new TunnelNetwork from a scan report of valves and tunnels."""
rx = r"^.+(?P<name>[A-Z]{2}) .+=(?P<fr>\d+); .+valves? (?P<neighbours>.+)$"
neighbours_map = {}
flow_rates_map = {}
for scan_line in scan_report:
name, flow_rate, neighbours = re.match(pattern=rx, string=scan_line).groups()
neighbours_map.update({name: neighbours.split(", ")})
flow_rates_map.update({name: int(flow_rate)})
return cls(neighbours_map=neighbours_map, flow_rates_map=flow_rates_map)
class Valve(Node):
"""Mechanical device able to release pressure from the volcano's pipe network."""
__slots__ = ["_name", "_open_time", "_network", "output"]
def __init__(self, name: str, open_time: int, network: TunnelNetwork,
parent: "Valve" = None):
super().__init__(parent=parent)
self._name = name
self._open_time = max(0, open_time)
self._network = network
self.output = self._open_time * network.get_flow(location=name)
@property
def id(self) -> str:
"""Provide a string identifier unique to this Valve."""
return self._name
@property
def g(self) -> int:
"""Compute the cost for reaching this Valve from the search start point."""
return (0 if self._parent is None else self.parent.g) - self.output
@property
def h(self) -> int:
"""Estimate the cost for reaching the search goal from this Valve."""
total = 0
for location in self._remaining_locations():
travel_time = self._network.get_travel(from_=self._name, to_=location)
open_time = max(0, self._open_time - travel_time - 1)
flow_rate = self._network.get_flow(location=location)
total -= open_time * flow_rate
return total
def get_successors(self) -> Iterable["Node"]:
"""List all nodes to search that are directly reachable from this Valve."""
for s_name in self._remaining_locations():
travel_time = self._network.get_travel(from_=self._name, to_=s_name)
s_valve = Valve(
name=s_name, open_time=self._open_time - travel_time - 1,
parent=self, network=self._network)
if s_valve.output > 0:
yield s_valve
def _remaining_locations(self) -> list[str]:
"""List all locations not included in this Valve's lineage."""
all_locations = self._network.relevant_locations
lineage_locations = [step.id for step in self.lineage]
return [n for n in all_locations if n not in lineage_locations]
class DoubleValve(Node):
"""Pair of Valve devices operated by you and one of the elephants."""
__slots__ = ["_name_1", "_name_2", "_open_time_1", "_open_time_2",
"_network", "output"]
def __init__(self, name_1: str, name_2: str, open_time_1: int, open_time_2: int,
network: TunnelNetwork, parent: "DoubleValve" = None):
super().__init__(parent=parent)
self._name_1 = name_1
self._name_2 = name_2
self._open_time_1 = max(0, open_time_1)
self._open_time_2 = max(0, open_time_2)
self._network = network
output_1 = self._open_time_1 * network.get_flow(location=name_1)
output_2 = self._open_time_2 * network.get_flow(location=name_2)
self.output = output_1 + output_2
@property
def id(self) -> str:
"""Provide a string identifier unique to this DoubleValve."""
return "-".join([self._name_1, self._name_2])
@property
def g(self) -> int:
"""Compute the cost for reaching this DoubleValve from the search start point."""
return (0 if self._parent is None else self._parent.g) - self.output
@property
def h(self) -> int:
"""Estimate the cost for reaching the search goal from this DoubleValve."""
total = 0
for location in self._remaining_locations():
travel_time_1 = self._network.get_travel(from_=self._name_1, to_=location)
travel_time_2 = self._network.get_travel(from_=self._name_2, to_=location)
open_time_1 = max(0, self._open_time_1 - travel_time_1 - 1)
open_time_2 = max(0, self._open_time_2 - travel_time_2 - 1)
flow_rate = self._network.get_flow(location=location)
open_time = max(open_time_1, open_time_2)
total -= open_time * flow_rate
return total
def get_successors(self) -> Iterable["Node"]:
"""List all nodes to search that are directly reachable from this DoubleValve."""
remaining_loc = self._remaining_locations()
for s_name_1, s_name_2 in itertools.product(remaining_loc, remaining_loc):
if s_name_1 == s_name_2:
continue
travel_time_1 = self._network.get_travel(from_=self._name_1, to_=s_name_1)
travel_time_2 = self._network.get_travel(from_=self._name_2, to_=s_name_2)
s_double_valve = DoubleValve(
name_1=s_name_1, open_time_1=self._open_time_1 - travel_time_1 - 1,
name_2=s_name_2, open_time_2=self._open_time_2 - travel_time_2 - 1,
parent=self, network=self._network)
if s_double_valve.output > 0:
yield s_double_valve
def _remaining_locations(self) -> list[str]:
"""List all locations not included in this DoubleValve's lineage."""
all_locations = self._network.relevant_locations
ids = [step.id for step in self.lineage]
lineage_locations = list(itertools.chain(*[id_.split("-") for id_ in ids]))
return [n for n in all_locations if n not in lineage_locations]
class ValveSim:
"""App able to find the best valve-opening strategy for maximum pressure release."""
def __init__(self, network: TunnelNetwork, total_time: int):
self._network = network
self._total_time = total_time
def find_max_release(self) -> int:
"""Compute the total pressure released by applying an optimized opening plan."""
start = Valve(
name="AA", open_time=self._total_time, parent=None, network=self._network)
goal_valve = a_star_search(
start=start, goal_func=lambda node: len(list(node.get_successors())) == 0)
return -goal_valve.g
def find_max_release_with_help(self) -> int:
"""Compute the total pressure released if you have help from one elephant."""
start = DoubleValve(
name_1="AA", open_time_1=self._total_time, parent=None,
name_2="AA", open_time_2=self._total_time, network=self._network)
goal_valve = a_star_search(
start=start, goal_func=lambda node: len(list(node.get_successors())) == 0)
return -goal_valve.g
@classmethod
def from_scan_report(cls, scan_report: list[str], total_time: int) -> "ValveSim":
"""Create a new ValveSim from a scan report of valves and tunnels."""
network = TunnelNetwork.from_scan_report(scan_report=scan_report)
rx = r"^.+(?P<name>[A-Z]{2}) .+=(?P<fr>\d+); .+valves? .+$"
flows_map = {}
for scan_line in scan_report:
name, flow_rate = re.match(pattern=rx, string=scan_line).groups()
flows_map.update({name: int(flow_rate)})
return cls(network=network, total_time=total_time)
| JaviLunes/AdventCode2022 | src/aoc2022/day_16/tools.py | tools.py | py | 11,399 | python | en | code | 0 | github-code | 13 |
3710607553 | import numpy as np
import tensorflow as tf
DEBUG = True
def reshape_in(x):
'''
as the input format is cat( (bs_normal , ncrops , 32 , feat) , (bs_abnormal , ncrops , 32 , feat) )
reshape so model processes all arrays of features
'''
bs, ncrops, ts, feat = x.shape
if DEBUG: tf.print("\nModelMultiCrop inputs = ", bs, ncrops, ts, feat)
x = tf.reshape(x, (-1, ts, feat)) # ( bs * ncrops , ts , features)
if DEBUG: tf.print("inputs reshape = ", x.shape)
return x, bs, ncrops
def reshape_out(x,bs,ncrops):
'''
as the output is a score for each feature array
reshape so each crop from each batch_normal and abnormal is "exposed"
'''
if DEBUG: print("scores = ", x.shape) ## (bs * ncrops , 32 ,1)
## get scores for each crop
x = tf.reshape(x, (bs, ncrops, -1)) ## ( bs , ncrops , 32)
if DEBUG: print("scores reshape = ", x.shape)
##########################################
## mean across the ncrops
x = tf.reduce_mean(x, axis=1) ## (bs , 32)
if DEBUG: print("scores mean = ", x.shape)
##########################################
x = tf.expand_dims(x, axis=2) ## (bs , 32 , 1)
if DEBUG: print("scores final = ", x.shape , x.dtype)
return x
## with generator
def train_gen(model, normal_loader, abnormal_loader, num_iterations, optimizer, loss_obj, ncrops):
'''
if features are divided in ncrops
the input data need to be reshaped into ( bs * ncrops , ts , features) before feed to model
'''
losses = []
for i, (normal_in, abnormal_in) in enumerate(zip(normal_loader, abnormal_loader)):
if i >= num_iterations: break
data_in = tf.concat((normal_in, abnormal_in), axis=0)
if ncrops: data_in , bs , ncrops = reshape_in(data_in)
with tf.GradientTape() as tape:
scores = model(data_in)
if ncrops: scores = reshape_out(scores, bs , ncrops)
loss = loss_obj(tf.zeros_like(scores), scores)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
losses.append(loss)
if i % 10 == 0:
print(f"\nTRAIN_GEN{i}\nnormal_in abnormal_in : {normal_in.shape}{normal_in.dtype} {abnormal_in.shape}{abnormal_in.dtype}")
print(f"data_in : {data_in.shape}")
print("train_step scores",np.shape(scores))
print(f'Iteration {i}: Loss {loss:.4f}\n')
return losses
## with tf.dataset
def train_tfdata(model, normal_dataset, abnormal_dataset, num_iterations, optimizer, loss_obj, batch_size):
losses = []
for i in range(num_iterations):
normal_indices = np.random.choice(len(normal_dataset), size=batch_size, replace=False)
abnormal_indices = np.random.choice(len(abnormal_dataset), size=batch_size, replace=False)
normal_features = np.stack([normal_dataset[j] for j in normal_indices])
abnormal_features = np.stack([abnormal_dataset[j] for j in abnormal_indices])
normal_in = tf.convert_to_tensor(normal_features, dtype=tf.float32)
abnormal_in = tf.convert_to_tensor(abnormal_features, dtype=tf.float32)
data_in = tf.concat((normal_in, abnormal_in), axis=0)
with tf.GradientTape() as tape:
scores = model(data_in)
loss = loss_obj(tf.zeros_like(scores), scores)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
losses.append(loss)
if i % 10 == 0:
print(f"\n{i}\nnormal_in abnormal_in : {normal_in.shape} {abnormal_in.shape}")
print(f"data_in : {data_in.shape}")
print("train_step scores",np.shape(scores))
print(f'Iteration {i}: Loss {loss:.4f}')
return losses | zuble/zudeepmil00 | train.py | train.py | py | 3,988 | python | en | code | 0 | github-code | 13 |
14382415898 | # View Pulls Data from models.py during runtime and then process/calculate it and then it send that calculated data to a template.
from django.conf import settings
from django.contrib import messages
from django.core.mail import send_mail # -->send_mail is a function that allows us to send an E-mail with the respective settings.
from django.shortcuts import redirect, render
from django.contrib.auth import login,logout, authenticate # --> This django module is used for authentication.
from .models import Newsletter
# Create your views here.
def newsletter(request):
if request.method=="POST":
email = request.POST.get("email")
emaillist = Newsletter(email=email)
emaillist.save()
# send_mail(subject, message, from_email, to_list, fail_silently=True/False)
subject = 'Your Sinup is Completed'
message = 'Thankyou you for sining up to our Newsletter :)'
from_email = settings.EMAIL_HOST_USER
to_list = [emaillist.email]
send_mail(subject,message,from_email,to_list, fail_silently=False)
return render(request, 'index.html') #At this stage we are sending data to the template named index.html
return render(request, 'index.html')
def loginuser(request):
if request.method=="POST":
if "username" in request.POST:
# If field "username" exists
username = request.POST.get('username')
password = request.POST.get('password')
# print(username, password)
# check if user has entered correct credentials
user = authenticate(username=username, password=password)
# print(user)
if user is not None:
# A backend authenticated the credentials
login(request, user)
return render(request,'send.html')
else:
# No backend authenticated the credentials
return render(request, 'login.html')
else:
# If "username" does not exist in request.POST
emaillist = Newsletter.objects.all()
print(emaillist)
subject = request.POST.get("subject")
message = request.POST.get("message")
from_email = settings.EMAIL_HOST_USER
to_list = emaillist.values_list("email", flat = True)
to_list = list(to_list)
print(subject, message, from_email, to_list)
send_mail(subject,message,from_email,to_list, fail_silently=False)
return render(request, 'send.html')
# return render(request, 'send.html')
return render(request, 'login.html')
| Bhupendrachouhan19/Mass_Emailing | sendemail/views.py | views.py | py | 2,725 | python | en | code | 1 | github-code | 13 |
13490202276 | from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import optparse
import random
from numpy import inf
import time
import matplotlib.pyplot as plt
from heapq import * #priorityqueue
import math
from copy import deepcopy
# we need to import python modules from the $SUMO_HOME/tools directory
if 'SUMO_HOME' in os.environ:
tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
sys.path.append(tools)
else:
sys.exit("please declare environment variable 'SUMO_HOME'")
from sumolib import checkBinary
import traci #To interface with SUMO simulations
import sumolib #To query node/edge stuff about the network
import pickle #To save/load traffic light states
isSmart = dict(); #Store whether each vehicle does our routing or not
pSmart = 1.0; #Adoption probability
carsOnNetwork = [];
max_edge_speed = 0.0;
hmetadict = dict()
oldids = dict()
clusterthresh = 5 #Time between cars before we split to separate clusters
mingap = 2.5 #Minimum allowed space between cars
clusters = dict()
timedata = dict()
def run(netfile, rerouters):
#netfile is the filepath to the network file, so we can call sumolib to get successors
#rerouters is the list of induction loops on edges with multiple successor edges
#We want to reroute all the cars that passed some induction loop in rerouters using A*
"""execute the TraCI control loop"""
network = sumolib.net.readNet(netfile)
dontBreakEverything() #Run test simulation for a step to avoid it overwriting the main one or something??
while traci.simulation.getMinExpectedNumber() > 0:
traci.simulationStep() #Tell the simulator to simulate the next time step
#Decide whether new vehicles use our routing
for vehicle in traci.simulation.getDepartedIDList():
isSmart[vehicle] = random.random() < pSmart
timedata[vehicle] = [traci.simulation.getTime(), -1, -1]
for vehicle in traci.simulation.getArrivedIDList():
timedata[vehicle][1] = traci.simulation.getTime()
print("Actual minus expected:")
print( (timedata[vehicle][1]-timedata[vehicle][0]) - timedata[vehicle][2])
reroute(rerouters, network, True) #Reroute cars (including simulate-ahead cars)
carsOnNetwork.append(len(traci.vehicle.getIDList())) #Track number of cars on network (for plotting)
#After we're done simulating...
plt.figure()
plt.plot(carsOnNetwork)
plt.xlabel("Time (s)")
plt.ylabel("Cars on Network")
plt.title("Congestion, Adoption Prob=" + str(pSmart))
#plt.show() #NOTE: Blocks code execution until you close the plot
plt.savefig("Plots/Congestion, AP=" + str(pSmart)+".png")
#Tell all the detectors to reroute the cars they've seen
def reroute(rerouters, network, rerouteAuto=True):
doAstar = True #Set to false to stick with SUMO default routing
if doAstar:
for r in rerouters:
QueueReroute(r, network, rerouteAuto)
#BFReroute(r, network, rerouteAuto)
def QueueReroute(detector, network, rerouteAuto=True):
ids = traci.inductionloop.getLastStepVehicleIDs(detector) #All vehicles to be rerouted
if len(ids) == 0:
#No cars to route, we're done here
return
# getRoadID: Returns the edge id the vehicle was last on
edge = traci.vehicle.getRoadID(ids[0])
for vehicle in ids:
if rerouteAuto and detector in oldids and vehicle in oldids[detector]:
#print("Duplicate car " + vehicle + " at detector " + detector)
continue
#Decide whether we route this vehicle
if not vehicle in isSmart and rerouteAuto:
print("Oops, don't know " + vehicle)
isSmart[vehicle] = random.random() < pSmart
if rerouteAuto and isSmart[vehicle]: #and detector[0:5]=="IL_in":
#tstart = time.time()
saveStateInfo(edge) #Saves the traffic state and traffic light timings
#Swap to test sim, load current state
traci.switch("test")
#tstart = time.time()
tcluster = doClusterSim(edge, network, vehicle)
#print(traci.vehicle.getRoute(vehicle))
#print(edge)
#print(tcluster)
if edge == "start":
timedata[vehicle][2] = tcluster
traci.switch("main")
def doClusterSim(prevedge, net, vehicle):
loadClusters(prevedge, net)
time = traci.simulation.getTime()
starttime = time
while not stepClusters(net, time, vehicle) == "DONE":
time+=.5
return time-starttime
def loadClusters(prevedge, net):
loadStateInfo(prevedge, net)
#Test clusters
#Cluster data structures
#print("Start load clusters")
for edge in traci.edge.getIDList():
if edge[0] == ":":
#Skip internal edges (=edges for the inside of each intersection)
continue
for lanenum in range(traci.edge.getLaneNumber(edge)):
lane = edge + "_" + str(lanenum)
clusters[lane] = []
for vehicle in reversed(traci.lane.getLastStepVehicleIDs(lane)): #Reversed so we go from end of edge to start of edge - first clusters to leave are listed first
#Process vehicle into cluster somehow
#If nearby cluster, add to cluster in sorted order (could probably process in sorted order)
if len(clusters[lane]) > 0 and abs(clusters[lane][-1]["time"] - traci.simulation.getTime()) < clusterthresh and abs(clusters[lane][-1]["pos"] - traci.vehicle.getLanePosition(vehicle))/net.getEdge(edge).getSpeed() < clusterthresh:
#Add to cluster. pos and time track newest added vehicle to see if the next vehicle merges
#Departure time (=time to fully clear cluster) increases, arrival doesn't
clusters[lane][-1]["pos"] = traci.vehicle.getLanePosition(vehicle)
clusters[lane][-1]["time"] = traci.simulation.getTime()
clusters[lane][-1]["departure"] = traci.simulation.getTime() + (traci.lane.getLength(lane)-clusters[lane][-1]["pos"])/net.getEdge(edge).getSpeed()
clusters[lane][-1]["cars"].append((vehicle, clusters[lane][-1]["departure"], "Load append"))
else:
#Else make a new cluster
newcluster = dict()
newcluster["pos"] = traci.vehicle.getLanePosition(vehicle)
newcluster["time"] = traci.simulation.getTime()
newcluster["arrival"] = traci.simulation.getTime() + (traci.lane.getLength(edge+"_0")-newcluster["pos"])/net.getEdge(edge).getSpeed()
newcluster["departure"] = newcluster["arrival"]
newcluster["cars"] = [(vehicle, newcluster["departure"], "Load new")]
clusters[lane].append(newcluster)
def stepClusters(net, time, vehicleOfInterest):
#print("Start stepClusters")
#print(time)
#print(vehicleOfInterest)
#print(clusters)
#Sanity check for debugging infinite loops where the vehicle of interest disappears
notEmpty = False
for thing in clusters:
for thingnum in range(len(clusters[thing])):
for testcartuple in clusters[thing][thingnum]["cars"]:
if testcartuple[0] == vehicleOfInterest:
notEmpty = True
break
if not notEmpty:
raise Exception("AAAAAAHHHHHH!!!!!!! Can't find vehicle of interest!")
reflist = deepcopy(edgelist) #Want to reorder edge list to handle priority stuff, but don't want to mess up the for loop indexing
for edge in reflist:
if edge[0] == ":":
#Skip internal edges (=edges for the inside of each intersection)
continue
for lanenum in range(traci.edge.getLaneNumber(edge)):
lane = edge + "_" + str(lanenum)
for cluster in clusters[lane]:
if cluster["arrival"] > time:
#This and future clusters don't arrive yet, done on this edge
break
cartuple = cluster["cars"][0]
while cartuple[1] < time:
#Check if route is done; if so, stop
if traci.vehicle.getRoute(cartuple[0])[-1] == edge:
#Check if we're done simulating
if cartuple[0] == vehicleOfInterest:
return "DONE"
cluster["cars"].pop(0) #Remove car from this edge
if len(cluster["cars"]) == 0:
clusters[lane].pop(0) #Entire cluster is done, remove it
break
cartuple = cluster["cars"][0] #If we got here, there's still stuff in the cluster
continue #Move on to the next car
#TODO: Add car to next edge. NOTE: Enforce merging collision etc. constraints here
node = net.getEdge(edge).getToNode()
#print(node.getID()) #Matches the IDs on the traffic light list
#print(node.getType()) #zipper #traffic_light_right_on_red #dead_end
#print(traci.trafficlight.getIDList())
#https://sumo.dlr.de/docs/TraCI/Traffic_Lights_Value_Retrieval.html
#If light, look up phase, decide who gets to go, merge foe streams somehow
#Or just separate left turn phases or something? Would mean no need to merge
#If no light, zipper somehow
if node.getID() in traci.trafficlight.getIDList():
#First pass: Figure out priority
#Second pass: Go through in priority order, shortcut when possible
#Then flip priority if zipper
print("I'm a traffic light!")
print(traci.trafficlight.getControlledLanes(node.getID()))
print(traci.trafficlight.getControlledLinks(node.getID()))
print(traci.trafficlight.getCompleteRedYellowGreenDefinition(node.getID()))
#traci.trafficlight.getRedYellowGreenState
#Look up current road, next road, check if appropriate link is green
#If G green, add to next queue
#If g green, make sure no G green, then add to queue (second pass? Buffer variable?)
#This might be a problem later if I make too many assumptions about lights, but I'll ignore for now
#Else wait
else:
#Assume zipper
route = traci.vehicle.getRoute(cartuple[0])
nextedge = route[route.index(edge)+1]
for nextlanenum in range(traci.edge.getLaneNumber(nextedge)):
nextlane = nextedge+"_"+str(nextlanenum)
if len(clusters[nextlane]) > 0 and abs(clusters[nextlane][-1]["time"] - time) < clusterthresh and abs(clusters[nextlane][-1]["pos"])/net.getEdge(nextedge).getSpeed() < clusterthresh:
#Make sure time isn't too close
if not abs(clusters[nextlane][-1]["time"] - time) < mingap:
#Add to cluster. pos and time track newest added vehicle to see if the next vehicle merges
#Departure time (=time to fully clear cluster) increases, arrival doesn't
clusters[nextlane][-1]["pos"] = 0
clusters[nextlane][-1]["time"] = time
clusters[nextlane][-1]["departure"] = time + traci.lane.getLength(nextlane)/net.getEdge(nextedge).getSpeed()
clusters[nextlane][-1]["cars"].append((cartuple[0], clusters[nextlane][-1]["departure"], "Zipper append"))
else:
#No space, try next lane
continue
else:
#There is no cluster nearby
#So make a new cluster
newcluster = dict()
newcluster["pos"] = 0
newcluster["time"] = time
newcluster["arrival"] = time + traci.lane.getLength(nextedge+"_"+str(nextlanenum))/net.getEdge(nextedge).getSpeed()
newcluster["departure"] = newcluster["arrival"]
newcluster["cars"] = [(cartuple[0], newcluster["departure"], "Zipper new cluster")]
clusters[nextlane].append(newcluster)
#If zipper merge, need to alternate priority on things
edgelist.append(edgelist.pop(edgelist.index(edge))) #Push edge to end of list to give it lower priority next time
cluster["cars"].pop(0) #Remove car from this edge
break #Only delete and add the car once!
#Inside: while cartuple[1] < time
if len(cluster["cars"]) == 0:
clusters[lane].pop(0) #Entire cluster is done, remove it
break
oldcar = cartuple[0]
cartuple = cluster["cars"][0] #If we got here, there are cars left in the cluster
if cartuple[0] == oldcar:
#Couldn't move oldcar to next edge, don't infinite loop
break
return "NOT DONE"
##def BFReroute(detector, network, rerouteAuto=True):
##
## ids = traci.inductionloop.getLastStepVehicleIDs(detector) #All vehicles to be rerouted
## if len(ids) == 0:
## #No cars to route, we're done here
## return
##
## # getRoadID: Returns the edge id the vehicle was last on
## edge = traci.vehicle.getRoadID(ids[0])
##
## for vehicle in ids:
##
## if rerouteAuto and detector in oldids and vehicle in oldids[detector]:
## #print("Duplicate car " + vehicle + " at detector " + detector)
## continue
##
## #Decide whether we route this vehicle
## if not vehicle in isSmart and rerouteAuto:
## print("Oops, don't know " + vehicle)
## isSmart[vehicle] = random.random() < pSmart
## if rerouteAuto and isSmart[vehicle]: #and detector[0:5]=="IL_in":
## #tstart = time.time()
##
## saveStateInfo(edge) #Saves the traffic state and traffic light timings
##
## #Swap to test sim, load current state
## traci.switch("test")
## #tstart = time.time()
## loadStateInfo(edge, network)
##
##
## #Get goal
## route = traci.vehicle.getRoute(vehicle)
## goaledge = route[-1]
##
## t = 0
## keepGoing = True
## newids = dict()
## #tstart = time.time()
##
## #Initial split
## newvs = splitVehicle(vehicle, network)
## newids[detector] = newvs
##
## while(keepGoing):
##
## #Continue with counterfactual simulation
## traci.simulationStep()
## t+=1
##
## #Check if we're done
## for lanenum in range(traci.edge.getLaneNumber(goaledge)):
## testids = traci.inductionloop.getLastStepVehicleIDs("IL_"+goaledge+"_"+str(lanenum))
## for testv in testids:
## if testv in newvs:
## #Reroute the car, then say we're done
## stuff = testv.split("_")
## outroute = [edge]
## for i in range(1,len(stuff)):
## outroute.append(stuff[i])
## keepGoing = False
## break
##
## #Check if we need to split anything
## for rerouter in traci.inductionloop.getIDList():
## testids = traci.inductionloop.getLastStepVehicleIDs(rerouter)
## for testv in testids:
## if testv in newvs and not (rerouter in newids and testv in newids[rerouter]):
## #print("Splitting")
## #splittime = time.time()
## newnewvs = splitVehicle(testv, network)
## newvs.remove(testv)
## newvs = newvs + newnewvs
## if not rerouter in newids:
## newids[rerouter] = []
## newids[rerouter] += newnewvs
## #print(time.time() - splittime)
##
##
## traci.switch("main")
## traci.vehicle.setRoute(vehicle, outroute)
## #print(time.time() - tstart)
##
## if vehicle in isSmart and not isSmart[vehicle]: #TODO: Reconsider how we treat the vehicles that somehow haven't entered the network in main yet
## #TODO: Turn randomly
## #Can't just steal from old rerouteDetector code if we don't know possible routes
## #Could just turn randomly and stop if you fall off the network...
## #Or use Sumo default routing, but then we'd know what they're doing...
## #Can deal with this later, for now I'll just set psmart=1
## print("TODO: Turn randomly")
## if rerouteAuto:
## oldids[detector] = ids
##
##def splitVehicle(vehicle, network):
## newvs = []
## edge = traci.vehicle.getRoadID(vehicle)
##
## succs = getSuccessors(edge, network)
## #TODO make sure these are ordered CCW from current edge
## for succ in succs:
## route = [edge, succ]
##
##
## #rstart = time.time()
## if not str(route) in traci.route.getIDList():
## traci.route.add(str(route), route)
## else:
## #In case we already have the route, we'll get an error; ignore it
## pass
## #print(time.time() - rstart)
##
##
## lane = traci.vehicle.getLaneIndex(vehicle)
## #lane = 0
## pos = traci.vehicle.getLanePosition(vehicle)
## speed = traci.vehicle.getSpeed(vehicle)
## pos = -50
## #speed = "max"
##
## newv = vehicle+"_"+succ
## traci.vehicle.add(newv, str(route), departLane=lane, departPos=pos, departSpeed=speed)
## newvs.append(newv)
## traci.vehicle.setColor(newv, [0, 0, 255])
## traci.vehicle.setLength(newv, traci.vehicle.getLength(vehicle)/len(succs))
##
## for v in traci.edge.getLastStepVehicleIDs(edge):
## if traci.vehicle.getLanePosition(v) < traci.vehicle.getLanePosition(vehicle):
## traci.vehicle.setColor(v, [255, 0, 0])
## traci.vehicle.remove(v)
## elif traci.vehicle.getLanePosition(v) > traci.vehicle.getLanePosition(vehicle):
## traci.vehicle.setColor(v, [0, 255, 0])
## #traci.vehicle.remove(v)
## traci.vehicle.remove(vehicle)
## return newvs
# Gets successor edges of a given edge in a given network
# Parameters:
# edge: an edge ID string
# network: the nwtwork object from sumolib.net.readNet(netfile)
# Returns:
# successors: a list of edge IDs for the successor edges (outgoing edges from the next intersection)
def getSuccessors(edge, network):
ids = []
for succ in list(network.getEdge(edge).getOutgoing()):
ids.append(succ.getID())
return ids
def saveStateInfo(edge):
#Copy state from main sim to test sim
traci.simulation.saveState("savestates/teststate_"+edge+".xml")
#saveState apparently doesn't save traffic light states despite what the docs say
#So save all the traffic light states and copy them over
lightStates = dict()
for light in traci.trafficlight.getIDList():
lightStates[light] = [traci.trafficlight.getPhase(light), traci.trafficlight.getPhaseDuration(light)]
#Why do the built-in functions have such terrible names?!
lightStates[light][1] = traci.trafficlight.getNextSwitch(light) - traci.simulation.getTime()
#Save lightStates to a file
with open("savestates/lightstate_"+edge+".pickle", 'wb') as handle:
pickle.dump(lightStates, handle, protocol=pickle.HIGHEST_PROTOCOL)
def loadStateInfo(prevedge, net):#Load traffic state
traci.simulation.loadState("savestates/teststate_"+prevedge+".xml")
#Load light state
with open("savestates/lightstate_"+prevedge+".pickle", 'rb') as handle:
lightStates = pickle.load(handle)
#Copy traffic light timings
for light in traci.trafficlight.getIDList():
traci.trafficlight.setPhase(light, lightStates[light][0])
traci.trafficlight.setPhaseDuration(light, lightStates[light][1])
#Magically makes the vehicle lists stop deleting themselves somehow???
def dontBreakEverything():
traci.switch("test")
traci.simulationStep()
traci.switch("main")
def get_options():
optParser = optparse.OptionParser()
optParser.add_option("--nogui", action="store_true",
default=False, help="run the commandline version of sumo")
options, args = optParser.parse_args()
return options
#Generates induction loops on all the edges
def generate_additionalfile(sumoconfig, networkfile):
#Create a third instance of a simulator so I can query the network
traci.start([checkBinary('sumo'), "-c", sumoconfig,
"--start", "--no-step-log", "true",
"--xml-validation", "never"], label="setup")
net = sumolib.net.readNet(networkfile)
rerouters = []
global max_edge_speed
with open("additional_autogen.xml", "w") as additional:
print("""<additional>""", file=additional)
for edge in traci.edge.getIDList():
if edge[0] == ":":
#Skip internal edges (=edges for the inside of each intersection)
continue
if (net.getEdge(edge).getSpeed() > max_edge_speed):
max_edge_speed = net.getEdge(edge).getSpeed()
#print(edge)
for lanenum in range(traci.edge.getLaneNumber(edge)):
lane = edge+"_"+str(lanenum)
#print(lane)
print(' <inductionLoop id="IL_%s" freq="1" file="outputAuto.xml" lane="%s" pos="-50" friendlyPos="true" />' \
% (lane, lane), file=additional)
if len(net.getEdge(edge).getOutgoing()) > 1:
rerouters.append("IL_"+lane)
print("</additional>", file=additional)
return rerouters
# this is the main entry point of this script
if __name__ == "__main__":
options = get_options()
# this script has been called from the command line. It will start sumo as a
# server, then connect and run
if options.nogui:
sumoBinary = checkBinary('sumo')
else:
sumoBinary = checkBinary('sumo-gui')
#NOTE: Script name is zeroth arg
sumoconfig = sys.argv[2]
netfile = sys.argv[1]
rerouters = generate_additionalfile(sumoconfig, netfile)
# this is the normal way of using traci. sumo is started as a
# subprocess and then the python script connects and runs
traci.start([sumoBinary, "-c", sumoconfig,
"--additional-files", "additional_autogen.xml",
"--log", "LOGFILE", "--xml-validation", "never"], label="main")
#Second simulator for running tests. No GUI
traci.start([checkBinary('sumo'), "-c", sumoconfig,
"--additional-files", "additional_autogen.xml",
"--start", "--no-step-log", "true",
"--xml-validation", "never",
"--step-length", "1"], label="test")
edgelist = list(traci.edge.getIDList())
for edge in edgelist:
if edge[0] == ":":
edgelist.remove(edge)
run(netfile, rerouters)
traci.close()
| neimandavid/traffic-routing | shortlong2_16782/runnerQueue.py | runnerQueue.py | py | 24,757 | python | en | code | 1 | github-code | 13 |
26597388590 | import xml.etree.ElementTree as ET
import utils
unwantedTags = utils.construct_unwanted_tags()
def main():
tree = ET.parse('test-data2.data')
root = tree.getroot()
contexts = root.findall("./lexelt/instance/context")
for context in contexts:
context.text = utils.process_string(context.text, unwantedTags)
for head in context:
head.tail = utils.process_string(head.tail, unwantedTags)
tree.write('processed_test2.xml')
if __name__ == "__main__":
main()
| fever324/NLP-Projects | Project2/preprossing.py | preprossing.py | py | 511 | python | en | code | 0 | github-code | 13 |
7528379482 | from torch import distributed as dist
import torch
import os
import signal
import asyncio
from functools import wraps
from torch import multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel as DDP
from contextlib import contextmanager
from torch.nn.parallel.distributed import _find_tensors
from .contextlib import maybeasynccontextmanager
import logging
import inspect
import time
log = logging.getLogger(__name__)
def initialize(device, devices):
if dist.is_initialized():
log.info('Process group is already initialized')
return
os.environ['MASTER_ADDR'] = '127.0.0.1'
if device is None:
os.environ['MASTER_PORT'] = str(29500)
dist.init_process_group('nccl', rank=0, world_size=1)
else:
os.environ['MASTER_PORT'] = str(29500 + devices[0])
dist.init_process_group('nccl', rank=devices.index(device), world_size=len(devices))
@contextmanager
def processgroup(device, devices):
try:
initialize(device, devices)
yield
finally:
dist.destroy_process_group()
class DDP2(DDP):
def forward(self, *inputs, **kwargs):
"""Modified to support not scattering inputs when it's only on one device"""
if self.require_forward_param_sync:
self._sync_params()
if self.device_ids:
if len(self.device_ids) == 1:
output = self.module(*inputs, **kwargs)
else:
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
outputs = self.parallel_apply(self._module_copies[:len(inputs)], inputs, kwargs)
output = self.gather(outputs, self.output_device)
else:
output = self.module(*inputs, **kwargs)
if torch.is_grad_enabled() and self.require_backward_grad_sync:
self.require_forward_param_sync = True
# We'll return the output object verbatim since it is a freeform
# object. We need to find any tensors in this object, though,
# because we need to figure out which parameters were used during
# this forward pass, to ensure we short circuit reduction for any
# unused parameters. Only if `find_unused_parameters` is set.
if self.find_unused_parameters:
self.reducer.prepare_for_backward(list(_find_tensors(output)))
else:
self.reducer.prepare_for_backward([])
else:
self.require_forward_param_sync = False
return output
def set_start_method():
"""If you use get_start_method to check what the start method is, you'll accidentally _set_ the start method
and then get an error if you later try to set it. Here we open the box without killing the cat"""
import os
# https://github.com/pytorch/pytorch/issues/32575
# os.environ['NCCL_BLOCKING_WAIT'] = '1'
from multiprocessing import context
ctx = context._default_context
if ctx._actual_context is None:
mp.set_start_method('spawn')
else:
assert ctx._actual_context._name in ('spawn', 'forkserver')
def consensus(b):
b = torch.tensor(float(b)).cuda()
dist.all_reduce(b, dist.ReduceOp.PRODUCT)
return bool(b.cpu())
def cancel(canceller):
if dist.is_initialized():
# If we're in a process group, either the whole group needs to
# break or no-one does, else a process will be left hanging.
cancel = canceller.is_set()
if cancel:
log.info('Canceller set, trying to break')
if consensus(cancel):
log.info('Everyone has cancelled, breaking')
return True
else:
if canceller.is_set():
log.info('Cancelled, breaking')
return True
async def surrender():
await asyncio.sleep(0)
class DeadStrand(Exception):
pass
def coroutine_runner(f, *args, **kwargs):
co = f(*args, **kwargs)
try:
while True:
co.send(None)
except StopIteration:
pass
except Exception as e:
raise e
class ProcessSentinel:
def __init__(self, wait=15):
self._wait = wait
self._processes = {}
self._references = []
self.canceller = mp.Event()
set_start_method()
self.serial = False
def pin(self, obj):
"""There are sometimes objects passed to children - like queues - that need to
stick around for as long as the children do"""
self._references.append(obj)
def launch(self, f, *args, **kwargs):
if (self.canceller not in args) and (self.canceller not in kwargs.values()):
log.warn('Sentinel\'s canceller has not been passed to a launched process')
count = len([n for n in self._processes if n == f.__qualname__])
if inspect.iscoroutinefunction(f):
proc = mp.Process(
name=f'{f.__qualname__}-{count}',
target=coroutine_runner,
args=(f, *args),
kwargs=kwargs)
else:
proc = mp.Process(
name=f'{f.__qualname__}-{count}',
target=f,
args=args,
kwargs=kwargs)
proc.start()
self._processes[f.__qualname__, count] = proc
log.info(f'Launched process {f.__qualname__}-{count}')
def wait(self):
for _ in range(int(self._wait)):
alive = [(n, c) for (n, c), p in self._processes.items() if p.is_alive()]
if alive:
strs = [f'{n}-{c}' for n, c in alive]
log.info(f'Waiting for cancellations: {", ".join(strs)} still alive')
else:
log.info('All processes gracefully cancelled')
break
time.sleep(1)
else:
for n, c in alive:
log.info(f'Failed to cancel "{n}-{c}"; terminating')
self._processes[n].terminate()
self._references = []
def cancel(self):
log.info('Setting canceller')
self.canceller.set()
self.wait()
def check(self):
for (n, c), p in self._processes.items():
if not p.is_alive():
log.info(f'Process "{n}-{c}" died unexpectedly; cancelling')
self.cancel()
raise DeadStrand(f'Process "{n}-{c}" died unexpectedly')
class SerialSentinel:
def __init__(self, wait=15):
self._wait = wait
self.canceller = mp.Event()
self._coroutines = {}
self._exited = []
self.serial = True
def launch(self, f, *args, **kwargs):
if (self.canceller not in args) and (self.canceller not in kwargs.values()):
log.warn('Sentinel\'s canceller has not been passed to a launched process')
count = len([n for n, _ in self._coroutines if n == f.__qualname__])
co = f(*args, **kwargs)
self._coroutines[f.__qualname__, count] = co
log.info(f'Launched coroutine {f.__qualname__}-{count}')
def wait(self):
for _ in range(int(self._wait)):
alive = []
for (n, c), co in self._coroutines.items():
try:
co.send(None)
except (RuntimeError, StopIteration):
pass
else:
alive.append((n, c))
if alive:
strs = [f'{n}-{c}' for n, c in alive]
log.info(f'Waiting for cancellations: {", ".join(strs)} still alive')
else:
log.info('All coroutines gracefully cancelled')
break
else:
for n, c in alive:
log.info(f'Failed to cancel "{n}-{c}"; closing')
try:
self._coroutines[n, c].close()
except RuntimeError:
pass
def cancel(self):
log.info('Setting canceller')
self.canceller.set()
self.wait()
def check(self):
for (n, c), co in self._coroutines.items():
try:
co.send(None)
except StopIteration:
pass
except Exception as e:
log.info(f'Coroutine "{n}-{c}" died unexpectedly; cancelling')
self.cancel()
raise e
@contextmanager
def sentinel(serial=False):
sentinel = SerialSentinel() if serial else ProcessSentinel()
try:
yield sentinel
except KeyboardInterrupt:
log.info('Got a keyboard interrupt, cancelling processes')
sentinel.cancel()
except (DeadStrand,):
raise
except:
sentinel.cancel()
raise
else:
sentinel.cancel()
| andyljones/megastep | rebar/processes.py | processes.py | py | 8,724 | python | en | code | 117 | github-code | 13 |
30741215229 | from statistics import mean, median, mode
from time import sleep
import request
import solver
from datetime import date, timedelta
import matplotlib.pyplot as plt
d1 = date(2022,6,1)
d2 = date(2022,7,31)
score = []
for i in range((d2-d1).days + 1):
d = d1 + timedelta(i)
print(d)
eq_pos = request.get_precondition(d)
print(f"equation pos idx = {eq_pos}")
s = solver.Solver(eq_pos)
clear = False
#s.add_distinct_condition()
for i in range(10):
print(f"Searching answer {i} ",end="")
op_cond = s.ope_choices()
loop_flag = False
# ๆผ็ฎๅญใฎ็ตใฟๅใใใๅ
จๆข็ดขใใฆๆใ็ซใค็ญๅผใๆขใ
for ops in op_cond:
op1,op2 = ops[0],ops[1]
#print(ops)
#print(f"ops is {op1,op2}")
# ๅฏ่ฝใชใใฐ็ธ็ฐใชใๆฐๅญใ้ธใถ
dis_flag = s.add_distinct_condition()
# ็ญๅผๅถ็ด
s.add_operand_constraint(op1,op2)
if s.check():
s.createModel()
s.pop(dis_flag)
expr = s.get_format_expression()
print(expr,end=" ")
fb = request.send_answer(expr,d)
print("check=",end="")
for c in fb:
print("๐ฉ" if c == 2 else "๐จ" if c == 1 else "โฌ",end="")
print("")
# ๆญฃ็ญ
if fb == [2,2,2,2,2,2]:
clear = True
s.add_feedback_constraint(fb)
loop_flag = True
break
# ๆกไปถใๆบใใ็ญๅผใ่ฆใคใใใชใใฃใๅ ดๅ
else:
s.pop(dis_flag)
# ็ธ็ฐใชใๆฐๅญใ้ธใใ ็ญๅผใไฝใใชใใฃใๅ ดๅ
if not loop_flag:
for ops in op_cond:
op1,op2 = ops[0],ops[1]
s.add_operand_constraint(op1,op2)
if s.check():
s.createModel()
s.pop(False)
expr = s.get_format_expression()
print(expr,end=" ")
fb = request.send_answer(expr,d)
print("check=",end="")
for c in fb:
print("๐ฉ" if c == 2 else "๐จ" if c == 1 else "โฌ",end="")
print("")
if fb == [2,2,2,2,2,2]:
clear = True
s.add_feedback_constraint(fb)
loop_flag = True
break
else:
s.pop(False)
if clear:
print("Solve!")
score.append(i+1)
break
sleep(1)
print(mean(score),median(score),mode(score))
plt.hist(score)
plt.show() | NapoliN/mushikui-solver | measurement.py | measurement.py | py | 2,844 | python | en | code | 0 | github-code | 13 |
29113383146 | import time
def simple_watch(i):
if i == 0:
simple_watch.start = time.time()
return 0
start = simple_watch.start
elapsed = time.time() - start
return elapsed
def to_str(t):
t = int(t)
m, s = divmod(t, 60)
h, m = divmod(m, 60)
if h > 0:
return "{}h{}m".format(h, m)
if m > 0:
return"{}m{}s".format(m, s)
return "{}s".format(s)
def watch_as_str(i):
elapsed = simple_watch(i)
return to_str(elapsed)
| Kumamoto-Hamachi/daily_useful_py | timer/simple_watch.py | simple_watch.py | py | 482 | python | en | code | 1 | github-code | 13 |
23727349660 | # %%
from typing import List
class Solution:
def minRefuelStops(self, target: int, startFuel: int,
stations: List[List[int]]) -> int:
queue = []
res = 0
i = 0
n = len(stations)
while startFuel < target:
while i < n and startFuel >= stations[i][0]:
queue.append(stations[i][1])
i += 1
queue.sort()
try:
startFuel += queue.pop()
except:
return -1
res += 1
return res
a = Solution()
a.minRefuelStops(100, 10, [[10, 60], [20, 30], [30, 30], [60, 40]])
# %%
| HXLH50K/Leetcode | 871.py | 871.py | py | 656 | python | en | code | 0 | github-code | 13 |
26819057931 | import json
import os
import requests
# Read Trello API auth credentials from local
# The credential is store in a local JSON file whose location is defined in a system environment variable called
# "TRELLO_API_CONFIG_PATH"
# The credential file is a JSON map of the form
# {
# "key": "<API key>"
# "token": <token>
# }
# see https://trello.com/app-key
# see https://gcallah.github.io/DevOps/workflow/trelloapi.html
CONFIG_FILE_PATH = os.getenv("TRELLO_API_CONFIG_PATH")
if CONFIG_FILE_PATH:
api_credential = json.load(open(os.getenv("TRELLO_API_CONFIG_PATH")))
key = api_credential["key"]
token = api_credential["token"]
else:
key = os.getenv("TRELLO_API_KEY")
token = os.getenv("TRELLO_API_TOKEN")
def delete_attachment_by_name(card_id: str, name: str) -> None:
"""
Delete a specified attachment of a Trello card.
:param card_id: The ID of the Trello card whose attachments are to be deleted
:param name: The display name of the attachment being deleted
"""
attachments = requests.get(
"https://api.trello.com/1/cards/{}/attachments?key={}&token={}".format(card_id, key, token)
).json()
for attatchment in attachments:
if attatchment["name"] == name:
requests.delete(
"https://api.trello.com/1/cards/{}/attachments/{}?key={}&token={}".format(
card_id,
attatchment["id"],
key,
token
)
)
def delete_all_attachments(card_id: str) -> None:
"""
Delete all attachments of a Trello card.
:param card_id: The ID of the Trello card whose attachments are to be deleted
"""
attachments = requests.get(
"https://api.trello.com/1/cards/{}/attachments?key={}&token={}".format(card_id, key, token)
).json()
for attatchment in attachments:
requests.delete(
"https://api.trello.com/1/cards/{}/attachments/{}?key={}&token={}".format(
card_id,
attatchment["id"],
key,
token
)
)
def upload_attachment(card_id: str, attachment_name: str, attachment_relative_path: str) -> requests.models.Response:
"""
:param card_id: The ID of the Trello card against whihc the attachment is to be uploaded
:param attachment_name: Attachment display name, e.g. book.pdf
:param attachment_relative_path: Attachment file path relative to the location of this scrip invocation
:return: a response object whose trello API response fields can be retrieved via "response.json()"
"""
# Define the credential info
params = (
('key', key),
('token', token),
)
# Define file to be attached to Trello card
files = {
'file': (attachment_name, open(attachment_relative_path, 'rb')),
}
# Define API URL
url = "https://api.trello.com/1/cards/{}/attachments".format(card_id)
# Fire API request to upload attachment
return requests.post(url, params=params, files=files)
if __name__ == "__main__":
pass
| QubitPi/peitho-data | peitho_data/trello_api.py | trello_api.py | py | 3,118 | python | en | code | 0 | github-code | 13 |
74363229138 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Film',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('titre', models.CharField(max_length=100)),
('auteur', models.CharField(max_length=42)),
('contenu', models.TextField(null=True)),
('date', models.DateTimeField(verbose_name='Date de parution', auto_now_add=True)),
],
),
]
| TeamMoRe/MoRe | bdd/migrations/0001_initial.py | 0001_initial.py | py | 700 | python | en | code | 1 | github-code | 13 |
17061080434 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class UnavailablePeriodInfo(object):
def __init__(self):
self._end_day = None
self._start_day = None
@property
def end_day(self):
return self._end_day
@end_day.setter
def end_day(self, value):
self._end_day = value
@property
def start_day(self):
return self._start_day
@start_day.setter
def start_day(self, value):
self._start_day = value
def to_alipay_dict(self):
params = dict()
if self.end_day:
if hasattr(self.end_day, 'to_alipay_dict'):
params['end_day'] = self.end_day.to_alipay_dict()
else:
params['end_day'] = self.end_day
if self.start_day:
if hasattr(self.start_day, 'to_alipay_dict'):
params['start_day'] = self.start_day.to_alipay_dict()
else:
params['start_day'] = self.start_day
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = UnavailablePeriodInfo()
if 'end_day' in d:
o.end_day = d['end_day']
if 'start_day' in d:
o.start_day = d['start_day']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/UnavailablePeriodInfo.py | UnavailablePeriodInfo.py | py | 1,338 | python | en | code | 241 | github-code | 13 |
30268859195 | import numpy as np
import pylab as plt
time = np.arange(-10,10,0.01)
zeros = np.zeros(len(time))
y = np.sinh(time)
plt.plot(time,y)
plt.plot(time,zeros,'black')
plt.plot(zeros,y,'black')
plt.xlabel('Voltage V')
plt.ylabel('dg/dt')
plt.xticks([])
plt.yticks([])
plt.show()
| zzyxzz/code-stuff | desktop/threhold.py | threhold.py | py | 293 | python | en | code | 2 | github-code | 13 |
1376333261 | from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from webdriver_manager.chrome import ChromeDriverManager
class SeleniumWebDriver:
def __init__(self, download_path=None):
self.web_driver = None
self.download_path = download_path
@property
def options(self):
options = webdriver.ChromeOptions()
options.add_argument("--start-maximized")
options.add_argument("--headless")
if self.download_path:
options.add_experimental_option('prefs', {'download.default_directory': self.download_path})
return options
@property
def webdriver(self):
return webdriver.Chrome(
service=Service(ChromeDriverManager().install()),
options=self.options
)
| Parth971/github-domain-scraper | src/github_domain_scraper/driver.py | driver.py | py | 801 | python | en | code | 0 | github-code | 13 |
33300642625 | def linha ():
print ()
print ('_' *42)
print ()
print('=' * 10,'CALCULADORA DE TROCO','=' * 10)
print ()
valorDaCompra = float(input('Digite o valor da sua compra: '))
linha ()
valorRecebido = float(input('Agora digite o valor o recebido pela compra: '))
linha ()
valorTroco = valorRecebido - valorDaCompra
print ('Seu troco รฉ de: R$ %.2f' % valorTroco)
cedulas = 200
moedas = 1
totalCedulas = 0
valorRestante = valorTroco
moeda = 'celula (s)'
while True:
if cedulas <= valorRestante:
totalCedulas +=1
valorRestante -= cedulas
else:
if totalCedulas > 0:
print (f'O seu troco possui {totalCedulas} {moeda} de: R$ %.2f' % cedulas)
if cedulas == 200:
cedulas = 100
elif cedulas == 100:
cedulas = 50
elif cedulas == 50:
cedulas = 20
elif cedulas == 20:
cedulas = 10
elif cedulas == 10:
cedulas = 5
elif cedulas == 5:
cedulas = 2
elif cedulas == 2:
cedulas = 1
elif cedulas == 1:
moeda = 'moeda (s)'
cedulas = 0.5
elif cedulas == 0.5:
moeda = 'moeda (s)'
cedulas = 0.25
elif cedulas == 0.25:
moeda = 'moeda (s)'
cedulas = 0.10
elif cedulas == 0.10:
moeda = 'moeda (s)'
cedulas = 0.05
elif cedulas == 0.05:
moeda = 'moeda (s)'
cedulas = 0.01
totalCedulas = 0
if valorTroco == 0:
break
| JosePaulodeLima/APLICATIVOSENAI | SA_3_v.1.0.3.4.py | SA_3_v.1.0.3.4.py | py | 1,688 | python | pt | code | 0 | github-code | 13 |
12134926278 | import json
from json import JSONDecodeError
def get_posts_all():
"""ะะฐะณััะถะฐะตั ัะฐะนะป ั ะฟะพััะฐะผะธ ะธ ะฟัะตะพะฑัะฐะทัะตั ะธะท ัะพัะผะฐัะฐ JSON"""
try:
with open('static/data/posts.json', 'r', encoding='utf-8') as file:
data = json.load(file)
return data
except FileNotFoundError:
print('ะคะฐะนะป posts.json ะพััััััะฒัะตั')
except JSONDecodeError:
print("ะคะฐะนะป posts.json ะฝะต ัะดะฐะตััั ะฟัะตะพะฑัะฐะทะพะฒะฐัั")
def get_comments_all():
"""ะะฐะณััะถะฐะตั ัะฐะนะป ั ะบะพะผะผะตะฝัะฐัะธัะผะธ ะธ ะฟัะตะพะฑัะฐะทัะตั ะธะท ัะพัะผะฐัะฐ JSON"""
try:
with open('static/data/comments.json', 'r', encoding='utf-8') as file:
data = json.load(file)
return data
except FileNotFoundError:
print('ะคะฐะนะป comments.json ะพััััััะฒัะตั')
except JSONDecodeError:
print("ะคะฐะนะป comments.json ะฝะต ัะดะฐะตััั ะฟัะตะพะฑัะฐะทะพะฒะฐัั")
def get_posts_by_user(user_name):
"""ะะพะทะฒัะฐัะฐะตั ะฟะพััั ะพะฟัะตะดะตะปะตะฝะฝะพะณะพ ะฟะพะปัะทะพะฒะฐัะตะปั"""
return [post for post in get_posts_all() if post['poster_name'] == user_name]
def get_comments_by_post_id(post_id):
"""ะะพะทะฒัะฐัะฐะตั ะบะพะผะผะตะฝัะฐัะธะธ ะบ ะฒัะฑัะฐะฝะฝะพะผั ะฟะพััั"""
return [comment for comment in get_comments_all() if comment['post_id'] == post_id]
def search_for_posts(query):
"""ะะท ัะฐะนะปะฐ ั ะฟะพััะฐะผะธ ะฟะพะปััะฐะตั ัะฟะธัะพะบ ะฟะพััะพะฒ, ัะพะดะตัะถะฐัะธั
ะฟะพะธัะบะพะฒะพะน ะทะฐะฟัะพั"""
return [post for post in get_posts_all() if query.lower() in post['content'].lower()]
def get_post_by_pk(pk):
"""ะะพะทะฒัะฐัะฐะตั ะพะดะธะฝ ะฟะพัั ะฟะพ ะตะณะพ ะธะดะตะฝัะธัะธะบะฐัะพัั"""
for post in get_posts_all():
if post['pk'] == pk:
return post
def cut_posts(posts):
"""ะฃะบะพัะฐัะธะฒะฐะตั ะฟะพััั ะฒ ัะฟะธัะบะต ะดะพ 100 ัะธะผะฒะพะปะพะฒ"""
for post in posts:
post['content'] = post['content'][0:99]+'...'
return posts
def convert_tags_to_links(content):
"""ะัะตะพะฑัะฐะทัะตั ั
ัััะตะณะธ ะฒ ัััะปะบะธ ะฒ ะพะดะฝะพะน ัััะพะบะต"""
words = content.split(' ')
for word in words:
if len(word) > 1 and word[0] == '#' and word[1] != '#':
content = content.replace(word, f'<a href="/tag/{word[1:]}">{word}</a>', 1)
return content
def convert_tags_in_posts(posts):
"""ะัะตะพะฑัะฐะทัะตั ั
ัััะตะณะธ ะฒ ัััะปะบะธ ะฒ ัะฟะธัะบะต ะฟะพััะพะฒ"""
for post in posts:
post['content'] = convert_tags_to_links(post['content'])
return posts
def get_bookmarks():
"""ะะพะทะฒัะฐัะฐะตั ัะฟะธัะพะบ ะฝะพะผะตัะพะฒ ะฟะพััะพะฒ ะฒ ะทะฐะบะปะฐะดะบะฐั
"""
try:
with open('static/data/bookmarks.json', 'r', encoding='utf-8') as file:
data = json.load(file)
return data
except FileNotFoundError:
print('ะคะฐะนะป bookmarks.json ะพััััััะฒัะตั')
except JSONDecodeError:
print("ะคะฐะนะป bookmarks.json ะฝะต ัะดะฐะตััั ะฟัะตะพะฑัะฐะทะพะฒะฐัั")
return
def load_posts_in_bookmarks():
"""ะะพะทะฒัะฐัะฐะตั ะฟะพััั ะฒ ะทะฐะบะปะฐะดะบะฐั
"""
posts = list(filter(lambda x: x['pk'] in get_bookmarks(), get_posts_all()))
return posts
def add_bookmark(post_pk):
"""ะะพะฑะฐะฒะปัะตั ะฝะพะผะตั ะฟะพััะฐ ะฒ ะทะฐะบะปะฐะดะบะธ"""
data = get_bookmarks()
data.append(post_pk)
data = list(set(data))
with open('static/data/bookmarks.json', 'w', encoding='utf-8') as file:
json.dump(data, file)
return
def delete_bookmark(post_pk):
"""ะฃะดะฐะปัะตั ะฝะพะผะตั ะฟะพััะฐ ะธะท ะทะฐะบะปะฐะดะพะบ"""
data = get_bookmarks()
if post_pk in data:
data.remove(post_pk)
with open('static/data/bookmarks.json', 'w', encoding='utf-8') as file:
json.dump(data, file)
return
def add_post(post_data):
"""ะะพะฑะฐะฒะปัะตั ะฝะพะผะตั ะฟะพััะฐ ะฒ ะทะฐะบะปะฐะดะบะธ"""
data = get_posts_all()
data.append(post_data)
with open('static/data/posts.json', 'w', encoding='utf-8') as file:
json.dump(data, file)
return | porublevnik/Porublev_Course_3 | utils/utils.py | utils.py | py | 4,238 | python | ru | code | 0 | github-code | 13 |
74854962577 | # -*- coding: utf-8 -*-
import scrapy
class MingyanSpiderSpider(scrapy.Spider):
name = 'mingyan_spider'
# allowed_domains = ['mingyan.com']
def start_requests(self):
urls = [
'http://lab.scrapyd.cn/page/1/',
'http://lab.scrapyd.cn/page/2/',
]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
page = response.url.split('/')[-2]
filename = 'mingyan-{}.html'.format(page)
with open(filename, 'wb') as f:
f.write(response.body)
self.log('ไฟๅญๆไปถ: {}'.format(filename))
| Ewenwan/python_study | tutorial/L48scrapy/mingyan/mingyan/spiders/mingyan_spider.py | mingyan_spider.py | py | 640 | python | en | code | 1 | github-code | 13 |
8678311356 | import numpy as np
import pandas as pd
import os
from PIL import Image, ImageOps
from keras.models import Sequential, load_model
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D
from keras.preprocessing import image
from sklearn.model_selection import train_test_split
from tqdm import tqdm
def preprocessing(train):
# new expanded data frame
new_columns = [ train.columns[ 0 ], "neck0", "neck1", "neck2", "neck3", "neck4", "neck5", "neck6",
"neck7", "sleeve_length0", "sleeve_length1", "sleeve_length2", "sleeve_length3", "sleeve_length4",
"sleeve_length5", "pattern0", "pattern1", "pattern2", "pattern3", "pattern4", "pattern5", "pattern6",
"pattern7", "pattern8", "pattern9", "pattern10" ]
new_train = pd.DataFrame( columns=new_columns )
new_train.filename = train.filename
new_train.loc[ train.neck == np.float64( 0.0 ), "neck0" ] = 1
new_train.loc[ train.neck == np.float64( 1.0 ), "neck1" ] = 1
new_train.loc[ train.neck == np.float64( 2.0 ), "neck2" ] = 1
new_train.loc[ train.neck == np.float64( 3.0 ), "neck3" ] = 1
new_train.loc[ train.neck == np.float64( 4.0 ), "neck4" ] = 1
new_train.loc[ train.neck == np.float64( 5.0 ), "neck5" ] = 1
new_train.loc[ train.neck == np.float64( 6.0 ), "neck6" ] = 1
new_train.loc[ train.neck == np.float64( 7.0 ), "neck7" ] = 1
new_train.loc[ train.sleeve_length == np.float64( 0.0 ), "sleeve_length0" ] = 1
new_train.loc[ train.sleeve_length == np.float64( 1.0 ), "sleeve_length1" ] = 1
new_train.loc[ train.sleeve_length == np.float64( 2.0 ), "sleeve_length2" ] = 1
new_train.loc[ train.sleeve_length == np.float64( 3.0 ), "sleeve_length3" ] = 1
new_train.loc[ train.sleeve_length == np.float64( 4.0 ), "sleeve_length4" ] = 1
new_train.loc[ train.sleeve_length == np.float64( 5.0 ), "sleeve_length5" ] = 1
new_train.loc[ train.pattern == np.float64( 0.0 ), "pattern0" ] = 1
new_train.loc[ train.pattern == np.float64( 1.0 ), "pattern1" ] = 1
new_train.loc[ train.pattern == np.float64( 2.0 ), "pattern2" ] = 1
new_train.loc[ train.pattern == np.float64( 3.0 ), "pattern3" ] = 1
new_train.loc[ train.pattern == np.float64( 4.0 ), "pattern4" ] = 1
new_train.loc[ train.pattern == np.float64( 5.0 ), "pattern5" ] = 1
new_train.loc[ train.pattern == np.float64( 6.0 ), "pattern6" ] = 1
new_train.loc[ train.pattern == np.float64( 7.0 ), "pattern7" ] = 1
new_train.loc[ train.pattern == np.float64( 8.0 ), "pattern8" ] = 1
new_train.loc[ train.pattern == np.float64( 9.0 ), "pattern9" ] = 1
new_train.loc[ train.pattern == np.float64( 10.0 ), "pattern10" ] = 1
new_train.fillna( np.float64( 0 ), inplace=True )
return new_train
def resize_img( im ) :
print( "\nbefore : ", im.size )
old_size = im.size
desired_size = 350
ratio = float( desired_size ) / max( old_size )
new_size = tuple( [ int( x ) for x in old_size ] )
im = im.resize( new_size, Image.ANTIALIAS )
new_im = Image.new( "RGB", (desired_size, desired_size) )
new_im.paste( im, ((desired_size - new_size[ 0 ]) // 2, (desired_size - new_size[ 1 ]) // 2) )
delta_w = desired_size - new_size[ 0 ]
delta_h = desired_size - new_size[ 1 ]
padding = (delta_w // 2, delta_h // 2, delta_w - (delta_w // 2), delta_h - (delta_h // 2))
print( padding )
im = ImageOps.expand( im, padding, fill="white" )
return im
def create_model() :
model = Sequential()
model.add( Conv2D( filters=16, kernel_size=(5, 5), activation="relu", input_shape=(350, 350, 3) ) )
model.add( MaxPooling2D( pool_size=(2, 2) ) )
model.add( Dropout( 0.25 ) )
model.add( Conv2D( filters=32, kernel_size=(5, 5), activation='relu' ) )
model.add( MaxPooling2D( pool_size=(2, 2) ) )
model.add( Dropout( 0.25 ) )
model.add( Conv2D( filters=64, kernel_size=(5, 5), activation="relu" ) )
model.add( MaxPooling2D( pool_size=(2, 2) ) )
model.add( Dropout( 0.25 ) )
model.add( Conv2D( filters=64, kernel_size=(5, 5), activation='relu' ) )
model.add( MaxPooling2D( pool_size=(2, 2) ) )
model.add( Dropout( 0.25 ) )
model.add( Flatten() )
model.add( Dense( 128, activation='relu' ) )
model.add( Dropout( 0.5 ) )
model.add( Dense( 64, activation='relu' ) )
model.add( Dropout( 0.5 ) )
model.add( Dense( 25, activation='sigmoid' ) )
print( model.summary() )
model.compile( optimizer='adam', loss='binary_crossentropy', metrics=[ 'accuracy' ] )
return model
def train_model(data_folder):
attributes_path = os.path.join(data_folder, "attributes.csv")
train = pd.read_csv(attributes_path)
train.neck.fillna(np.float64(7), inplace=True)
train.sleeve_length.fillna(np.float64(5), inplace=True)
train.pattern.fillna(np.float64(10), inplace=True)
new_train = preprocessing(train)
train_image = []
image_not_in_folder = []
images_path = os.path.join(data_folder, "images")
for i in tqdm(range(new_train.shape[0])):
file_path = os.path.join(images_path,new_train.filename[i])
if os.path.isfile(file_path):
img = image.load_img(file_path)
img = resize_img(img)
print("after resize : ", img.size, "\n")
img = image.img_to_array(img)
img = img/255
train_image.append(img)
else:
image_not_in_folder.append(i)
X = np.array(train_image)
new_train.drop(new_train.index[image_not_in_folder[0]], inplace=True)
new_train.reset_index(drop=True, inplace=True)
y = np.array(new_train.drop(["filename"], axis=1))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42, test_size=0.1)
model = create_model()
model.fit(X_train, y_train, epochs=10, validation_data=(X_test, y_test), batch_size=64)
model.save("./models/cl_multi_label.h5")
def predict_attributes(saved_model_path, image_path):
model = load_model(saved_model_path)
img = image.load_img(image_path)
img = resize_img( img )
img = image.img_to_array( img )
img = img / 255
classes = np.asarray(["neck0", "neck1", "neck2", "neck3", "neck4", "neck5", "neck6",
"neck7", "sleeve_length0", "sleeve_length1", "sleeve_length2", "sleeve_length3", "sleeve_length4",
"sleeve_length5", "pattern0", "pattern1", "pattern2", "pattern3", "pattern4", "pattern5", "pattern6",
"pattern7", "pattern8", "pattern9", "pattern10" ])
proba = model.predict( img.reshape( 1, 350, 350, 3 ) )
top_3 = np.argsort( proba[ 0 ] )[ :-4 :-1 ]
for i in range( 3 ) :
print( "{}".format( classes[ top_3[ i ] ] ) + " ({:.3})".format( proba[ 0 ][ top_3[ i ] ] ) )
| akshay772/predict_deep_fashion_attributes | multi_label_CNN.py | multi_label_CNN.py | py | 6,745 | python | en | code | 1 | github-code | 13 |
31666564361 | from pyspark.sql import SparkSession
from pyspark.sql.functions import from_json, col, window, collect_list, count, avg, min, max
from pyspark.sql.types import StructType, StructField, StringType, FloatType, TimestampType, IntegerType
from kafka import KafkaProducer
import json
import sys
topic = sys.argv[1]
spark = SparkSession.builder \
.appName("StockDataStream") \
.getOrCreate()
schema = StructType([
StructField("symbol", StringType()),
StructField("name", StringType()),
StructField("price", FloatType()),
StructField("volume", IntegerType()),
StructField("tstamp", TimestampType())
])
df = spark \
.readStream \
.format("kafka") \
.option("kafka.bootstrap.servers", "localhost:9092") \
.option("value.deserializer", "StringDeserializer") \
.option("subscribe", topic) \
.load()
simplified_df = df.selectExpr("CAST(value AS STRING)").toDF("value") \
.select(from_json(col("value"), schema).alias("temp")).select("temp.*")
aggregated_df = simplified_df.withWatermark("tstamp", "30 minutes") \
.groupBy(window("tstamp", "6 minutes"), "symbol") \
.agg(count("*").alias("cnt"),
avg("price").alias("price_avg"),
avg("volume").alias("volume_avg"),
min("price").alias("price_min"),
max("price").alias("price_max"),
min("volume").alias("volume_min"),
max("volume").alias("volume_max"))
def send_simdf_to_kafka(row, topic):
producer = KafkaProducer(bootstrap_servers=['localhost:9092'],
value_serializer=lambda x: json.dumps(x).encode('utf-8'))
row = {"symbol": row["symbol"], "name": row["name"], "price": row["price"], "volume": row["volume"], "tstamp": row["tstamp"].strftime('%Y-%m-%d %H:%M:%S')}
producer.send(topic, value = row)
producer.flush()
def send_aggdf_to_kafka(row, topic):
producer = KafkaProducer(bootstrap_servers=['localhost:9092'],
value_serializer=lambda x: json.dumps(x).encode('utf-8'))
if len(row) > 0:
d = {}
for i, j in enumerate(row):
d[i] = {"symbol": j["symbol"],
"start_time": j["window"]["start"].strftime('%Y-%m-%d %H:%M:%S'),
"end_time": j["window"]["end"].strftime('%Y-%m-%d %H:%M:%S'),
"price_avg": j["price_avg"],
"price_min": j["price_min"],
"price_max": j["price_max"],
"volume_avg": j["volume_avg"],
"volume_min": j["volume_min"],
"volume_max": j["volume_max"],
"cnt": j["cnt"]}
producer.send(topic, value = d)
producer.flush()
simple_query = simplified_df \
.writeStream \
.foreach(lambda row: send_simdf_to_kafka(row, "simple_data")) \
.start()
agg_query = aggregated_df \
.writeStream \
.outputMode("update") \
.trigger(processingTime = "6 minutes") \
.foreachBatch(lambda agg_df, epoch_id: send_aggdf_to_kafka(agg_df.collect(), "agg_data")) \
.start()
simple_query.awaitTermination()
agg_query.awaitTermination()
| tushar-bhat/Stock-Market-Analysis-DBT-Project | sparkstream.py | sparkstream.py | py | 3,232 | python | en | code | 1 | github-code | 13 |
27630819230 | import numpy as np
import theano
import theano.tensor as T
import theano.sandbox.cuda as cuda
from theano.misc.pycuda_utils import to_gpuarray
import scikits.cuda
from scikits.cuda import fft
from scikits.cuda import linalg
from scikits.cuda import cublas
import pycuda.gpuarray
import theano.misc.pycuda_init
import string
linalg.init()
# TODO: implement __eq__ and __hash__ correctly
# TODO: Find out if scikits.cuda.fft.fft is destructive - if so we need to specify a destroy_map
# TODO: investigate FFTW compatibility modes. Can probably set this to the fastest setting.
# TODO: investigate the effect of enabling fastmath on FFT performance (how can it be enabled?).
class ScikitsCudaOp(cuda.GpuOp): # base class for shared code between scikits.cuda-based ops
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
def __str__(self):
return self.__class__.__name__
def output_type(self, inp):
raise NotImplementedError
def make_node(self, inp):
inp = cuda.basic_ops.gpu_contiguous(
cuda.basic_ops.as_cuda_ndarray_variable(inp))
assert inp.dtype == "float32"
return theano.Apply(self, [inp], [self.output_type(inp)()])
class CuFFTOp(ScikitsCudaOp):
def output_type(self, inp):
return cuda.CudaNdarrayType(broadcastable=[False] * (inp.type.ndim + 1)) # add one extra dim for real/imag
def make_thunk(self, node, storage_map, _, _2):
inputs = [storage_map[v] for v in node.inputs]
outputs = [storage_map[v] for v in node.outputs]
plan_input_shape = [None]
plan = [None]
def thunk():
input_shape = inputs[0][0].shape
# construct output shape
output_shape = list(input_shape)
output_shape[-1] = output_shape[-1] // 2 + 1 # DFT of real input is symmetric, no need to store redundant coefficients
output_shape += [2] # extra dimension with length 2 for real/imag
output_shape = tuple(output_shape)
z = outputs[0]
# only allocate if there is no previous allocation of the right size.
if z[0] is None or z[0].shape != output_shape:
z[0] = cuda.CudaNdarray.zeros(output_shape)
input_pycuda = to_gpuarray(inputs[0][0])
# I thought we'd need to change the type on output_pycuda so it is complex64,
# but as it turns out scikits.cuda.fft doesn't really care either way and
# treats the array as if it is complex64 anyway.
output_pycuda = to_gpuarray(z[0])
# only initialise plan if necessary
if plan[0] is None or plan_input_shape[0] != input_shape:
plan_input_shape[0] = input_shape
plan[0] = fft.Plan(input_shape[1:], np.float32, np.complex64, batch=input_shape[0])
fft.fft(input_pycuda, output_pycuda, plan[0])
thunk.inputs = inputs
thunk.outputs = outputs
thunk.lazy = False
return thunk
class CuIFFTOp(ScikitsCudaOp):
def output_type(self, inp):
return cuda.CudaNdarrayType(broadcastable=[False] * (inp.type.ndim - 1)) # remove extra real/imag dim
def make_thunk(self, node, storage_map, _, _2):
inputs = [storage_map[v] for v in node.inputs]
outputs = [storage_map[v] for v in node.outputs]
plan_input_shape = [None]
plan = [None]
def thunk():
input_shape = inputs[0][0].shape
# construct output shape
output_shape = list(input_shape[:-1]) # chop off the extra length-2 dimension for real/imag
output_shape[-1] = (output_shape[-1] - 1) * 2 # restore full signal length
output_shape = tuple(output_shape)
z = outputs[0]
# only allocate if there is no previous allocation of the right size.
if z[0] is None or z[0].shape != output_shape:
z[0] = cuda.CudaNdarray.zeros(output_shape)
input_pycuda = to_gpuarray(inputs[0][0])
# input_pycuda is a float32 array with an extra dimension, but will be
# interpreted by scikits.cuda as a complex64 array instead.
output_pycuda = to_gpuarray(z[0])
# only initialise plan if necessary
if plan[0] is None or plan_input_shape[0] != input_shape:
plan_input_shape[0] = input_shape
plan[0] = fft.Plan(output_shape[1:], np.complex64, np.float32, batch=output_shape[0])
fft.ifft(input_pycuda, output_pycuda, plan[0]) # , True)
# strangely enough, enabling rescaling here makes it run very, very slowly.
# so do this rescaling manually afterwards!
thunk.inputs = inputs
thunk.outputs = outputs
thunk.lazy = False
return thunk
def to_complex_gpuarray(x, copyif=False):
"""
adapted version of theano.misc.pycuda_utils.to_gpuarray that takes an array with an extra trailing
dimension of length 2 for real/imaginary parts, and turns it into a complex64 PyCUDA GPUArray.
"""
if not isinstance(x, cuda.CudaNdarray):
raise ValueError("We can transfer only CudaNdarray to pycuda.gpuarray.GPUArray")
else:
# Check if trailing dimension has length 2
assert x.shape[-1] == 2
# check if dtype is float32
assert x.dtype == 'float32'
# Check if it is c contiguous
size = 1
c_contiguous = True
for i in range(x.ndim-1, -1, -1):
if x.shape[i] == 1:
continue
if x._strides[i] != size:
c_contiguous = False
break
size *= x.shape[i]
if not c_contiguous:
if copyif:
x = x.copy()
else:
raise ValueError("We were asked to not copy memory, but the memory is not c contiguous.")
# Now x is always c contiguous
px = pycuda.gpuarray.GPUArray(x.shape[:-1], np.complex64, base=x, gpudata=x.gpudata)
return px
def bptrs(a):
"""
Pointer array when input represents a batch of matrices.
taken from scikits.cuda tests/test_cublas.py
"""
return pycuda.gpuarray.arange(a.ptr,a.ptr+a.shape[0]*a.strides[0],a.strides[0],
dtype=cublas.ctypes.c_void_p)
def sc_complex_dot_batched(bx_gpu, by_gpu, bc_gpu, transa='N', transb='N', handle=None):
"""
uses cublasCgemmBatched to compute a bunch of complex dot products in parallel
"""
if handle is None:
handle = scikits.cuda.misc._global_cublas_handle
assert len(bx_gpu.shape) == 3
assert len(by_gpu.shape) == 3
assert len(bc_gpu.shape) == 3
assert bx_gpu.dtype == np.complex64
assert by_gpu.dtype == np.complex64
assert bc_gpu.dtype == np.complex64
# Get the shapes of the arguments
bx_shape = bx_gpu.shape
by_shape = by_gpu.shape
# Perform matrix multiplication for 2D arrays:
alpha = np.complex64(1.0)
beta = np.complex64(0.0)
transa = string.lower(transa)
transb = string.lower(transb)
if transb in ['t', 'c']:
N, m, k = by_shape
elif transb in ['n']:
N, k, m = by_shape
else:
raise ValueError('invalid value for transb')
if transa in ['t', 'c']:
N2, l, n = bx_shape
elif transa in ['n']:
N2, n, l = bx_shape
else:
raise ValueError('invalid value for transa')
if l != k:
raise ValueError('objects are not aligned')
if N != N2:
raise ValueError('batch sizes are not the same')
if transb == 'n':
lda = max(1, m)
else:
lda = max(1, k)
if transa == 'n':
ldb = max(1, k)
else:
ldb = max(1, n)
ldc = max(1, m)
# construct pointer arrays needed for cublasCgemmBatched
bx_arr = bptrs(bx_gpu)
by_arr = bptrs(by_gpu)
bc_arr = bptrs(bc_gpu)
cublas.cublasCgemmBatched(handle, transb, transa, m, n, k, alpha, by_arr.gpudata,
lda, bx_arr.gpudata, ldb, beta, bc_arr.gpudata, ldc, N)
class BatchedComplexDotOp(ScikitsCudaOp):
"""
This version uses cublasCgemmBatched under the hood, instead of
doing multiple cublasCgemm calls.
"""
def make_node(self, inp1, inp2):
inp1 = cuda.basic_ops.gpu_contiguous(
cuda.basic_ops.as_cuda_ndarray_variable(inp1))
inp2 = cuda.basic_ops.gpu_contiguous(
cuda.basic_ops.as_cuda_ndarray_variable(inp2))
assert inp1.dtype == "float32"
assert inp2.dtype == "float32"
assert inp1.ndim == 4 # (batch, a, b, real/imag)
assert inp2.ndim == 4
return theano.Apply(self, [inp1, inp2], [self.output_type(inp1)()])
def output_type(self, inp):
return cuda.CudaNdarrayType(broadcastable=[False] * inp.type.ndim)
def make_thunk(self, node, storage_map, _, _2):
inputs = [storage_map[v] for v in node.inputs]
outputs = [storage_map[v] for v in node.outputs]
def thunk():
bx = inputs[0]
by = inputs[1]
input_shape_x = bx[0].shape # (batch, a, b, 2)
input_shape_y = by[0].shape # (batch, b, c, 2)
output_shape = (input_shape_x[0], input_shape_x[1], input_shape_y[2], 2) # (batch, a, c, 2)
bz = outputs[0]
# only allocate if there is no previous allocation of the right size.
if bz[0] is None or bz[0].shape != output_shape:
bz[0] = cuda.CudaNdarray.zeros(output_shape)
input_bx_pycuda = to_complex_gpuarray(bx[0])
input_by_pycuda = to_complex_gpuarray(by[0])
output_b_pycuda = to_complex_gpuarray(bz[0])
# fancy native batched version
sc_complex_dot_batched(input_bx_pycuda, input_by_pycuda, output_b_pycuda)
thunk.inputs = inputs
thunk.outputs = outputs
thunk.lazy = False
return thunk
cufft = CuFFTOp()
cuifft = CuIFFTOp()
batched_complex_dot = BatchedComplexDotOp()
def mult_and_reduce(input_fft_v, filters_fft_v, input_shape=None, filter_shape=None):
"""
input_fft_v is (b, ic, i0, i1//2 + 1, 2)
filters_fft_v is (oc, ic, i0, i1//2 + 1, 2)
"""
if input_shape is None:
input_shape = input_fft_v.shape # symbolic
if filter_shape is None:
filter_shape = filters_fft_v.shape # symbolic
b, ic, i0, i1_f, _ = input_shape
oc = filter_shape[0]
# reshape to flatten the dimensions that are multiplied elemwise
input_r = input_fft_v.reshape((b, ic, i0 * i1_f, 2))
filters_r = filters_fft_v.reshape((oc, ic, i0 * i1_f, 2))
# shuffle for batched dot product
input_s = input_r.dimshuffle(2, 0, 1, 3) # (i0 * i1_f, b, ic, 2)
filters_s = filters_r.dimshuffle(2, 1, 0, 3) # (i0 * i1_f, ic, oc, 2)
output_s = batched_complex_dot(input_s, filters_s)
# shuffle again
output_r = output_s.dimshuffle(1, 2, 0, 3)
# reshape to unflatten
output = output_r.reshape((b, oc, i0, i1_f, 2))
return output
def conv2d_fft(input, filters, image_shape=None, filter_shape=None):
"""
expects bc01 input
performs a valid convolution
input: (b, ic, i0, i1)
filters: (oc, ic, f0, f1)
"""
# use symbolic shapes to compute shape info at runtime if not specified
if image_shape is None:
image_shape = input.shape
if filter_shape is None:
filter_shape = filters.shape
b, ic, i0, i1 = image_shape # batch size, input channels, input dim 0, input dim 1
oc, ic_, f0, f1 = filter_shape # output channels, input channels, filter dim 0, filter dim 1
# pad filters to input shape
filters_padded = T.zeros((oc, ic, i0, i1))
filters_padded = T.set_subtensor(filters_padded[:, :, :f0, :f1], filters)
# reshape for FFT
input_flat = input.reshape((b * ic, i0, i1))
filters_flat = filters_padded.reshape((oc * ic, i0, i1))
# perform FFT
input_fft_flat = cufft(input_flat) # (b * ic, i0, i1//2 + 1, 2)
filters_fft_flat = cufft(filters_flat) # (oc * ic, i0, i1//2 + 1, 2)
# unfold ic dimension
input_fft_v_shape = (b, ic, i0, i1//2 + 1, 2)
filters_fft_v_shape = (oc, ic, i0, i1//2 + 1, 2)
input_fft_v = input_fft_flat.reshape(input_fft_v_shape)
filters_fft_v = filters_fft_flat.reshape(filters_fft_v_shape)
output_fft_s = mult_and_reduce(input_fft_v, filters_fft_v,
input_shape=input_fft_v_shape, filter_shape=filters_fft_v_shape) # (b, oc, i0, i1//2 + 1, 2)
# reshape for IFFT
output_fft_flat = output_fft_s.reshape((b * oc, i0, i1//2 + 1, 2))
# perform IFFT
output_flat = cuifft(output_fft_flat) # (b * oc, i0, i1)
# reshape
output_circ = output_flat.reshape((b, oc, i0, i1)) # circular!
# slice because the convolution was circular, we need it to be valid
output = output_circ[:, :, f0 - 1:, f1 - 1:]
# rescale manually
output = (1.0 / T.cast(i0 * i1, theano.config.floatX)) * output # allow for the scale factor to move to the gpu
# output should now be the result of a batched valid convolution of the input with the filters.
return output
| benanne/theano_fftconv | fftconv.py | fftconv.py | py | 13,289 | python | en | code | 50 | github-code | 13 |
42680145475 | import sys # exiting nicely
import copy # copying KeyCombo's over
import re # parsing files
import evdev # I/O
from functools import reduce
# necessary grabbing of keyboard so that keymasks can be formulated
kb = evdev.InputDevice('/dev/input/event0')
KB_CAPABILITIES = kb.capabilities()[1] # list of ecodes that work on the kb
HIGHEST_SCANCODE = max(KB_CAPABILITIES)
# constants
KEYDOWN = 1 # evdev events use 1 for key being pressed down and
KEYUP = 0 # 0 for key being lifted back up
ECODES = evdev.ecodes.ecodes
# global variables that will get defined later when keys.conf is read
ONE_KEY_MODE = None # True or False
DEFAULT_LABEL = None # a string matching the regex /^[A-Z]$/
MODE_KEYS = {} # a dict of tuples of Key's
CHAR_KEYS = {} # a dict of tuples of Key's
SHIFT_KEY = None # a Key (typically LEFTSHIFT or RIGHTSHIFT)
CTRL_KEY = None # a Key (typically LEFTCTRL or RIGHTCTRL)
ALT_KEY = None # a Key (typically LEFTALT or RIGHTALT)
QUIT_KEY = None # a Key
SPECIAL_CHARS = {} # a dict; keys are str's, values are lists of Keypress'es
# functions
def scancodes_hash(codes):
"""
Hashes an array of scancodes into a big number.
"""
# We want to treat our list of keys as though they are unordered, so we
# sort them. In practice, we may not actually have to do this (since the
# way that the keys are put in is predictable), but better safe than sorry,
# because otherwise this would be a hell of a bug to catch.
codes.sort()
# This converts an array of integers into an integer hash, based on the
# algorithm provided by Mihai on the Theoretical Computer Science Stack
# Exchange at the URL:
# http://cstheory.stackexchange.com/questions/3390/is-there-a-hash-function-for-a-collection-i-e-multi-set-of-integers-that-has
big_prime = HIGHEST_SCANCODE # big enough for our purposes
output = 0
for index, code in enumerate(codes):
output += code*big_prime**index
return int(output)
def keys_hash(keys):
"""
Hashes the array of keyboard Keys "keys" into a big number, based on their
scancodes. (Describing this function out loud would be like an Abbot &
Costello skit.
"So, first you need the list of keys."
"I thought this is the function that's supposed to *generate* the keys."
"No, we need the keys for hashing."
"Right, which is why we're using this hash function to generate keys."
"No, the keys are generated by __init__."
"So the keys *are* generated in it?")
"""
return scancodes_hash([key.code for key in keys])
class Key:
"""
Stores the name and code of a key.
"""
def __init__(self, name):
self.name = name
try:
self.code = ECODES['KEY_'+name]
except KeyError:
print("Badly defined key of name \"" + name +"\"!")
class KeyDefinitionError(Exception):
pass
raise KeyDefinitionError()
class Keypress:
"""
A single keypress, to be used in the output keys in LayoutCombo.
"""
def __init__(self, key, key_action='PRESS'):
self.key = key
self.key_action = key_action # can be 'PRESS', 'DOWN', and 'UP'
class LayoutCombo:
"""A combo of keys that produces an output."""
def __init__(self, mode_code, char_code, output_code,
mode_position=DEFAULT_LABEL, char_position=DEFAULT_LABEL):
## instance variables
self.mode_keys = []
self.char_keys = []
self.output_keypresses = []
## build mode keys
self.set_mode(mode_code, mode_position)
## build char keys
self.set_char(char_code, char_position)
## build output keys
def get_category(output_code):
"""
Gets the category so that __init__ can determine how to construct
the output.
"""
lowercase_letter_matcher = re.compile(r'^[a-z]$')
lowercase_letter_matches = lowercase_letter_matcher.match(output_code)
if lowercase_letter_matches != None:
return 'ALNUM_LOWER'
uppercase_letter_matcher = re.compile(r'^[A-Z]$')
uppercase_letter_matches = uppercase_letter_matcher.match(output_code)
if uppercase_letter_matches != None:
return 'ALNUM_UPPER'
special_code_matcher = re.compile(r'\+[\w_]+')
special_code_matches = special_code_matcher.match(output_code)
if special_code_matches != None:
return 'SPECIAL'
return 'UNICODE_SEQ'
category = get_category(output_code)
## deal with normal uppercase and lowercase letters
if category in ('ALNUM_LOWER', 'ALNUM_UPPER'):
if category[-6:] == '_UPPER':
self.output_keypresses.append(Keypress(SHIFT_KEY, "DOWN"))
self.output_keypresses.append(Keypress(Key(output_code.upper())))
if category[-6:] == '_UPPER':
self.output_keypresses.append(Keypress(SHIFT_KEY, "UP"))
## deal with special characters, i.e. codes; codes must begin with a
## plus sign, and have the rest consist of uppercase letters and
## underscores
elif category == 'SPECIAL':
try:
self.output_keypresses = list(SPECIAL_CHARS[output_code])
except KeyError:
print("Warning: probably an invalid special sequence code on "
"line " + str(line_count) + "!")
## deal with everything else; it will just type out the given Unicode
## characters
elif category == 'UNICODE_SEQ':
for char in output_code:
# convert character to sequence, convert sequence to hex, lop
# off the '0x' in front
unicode_sequence = hex(ord(char))[2:]
# hold down ctrl, shift, and u, and type the numbers of the seq
self.output_keypresses.extend(
tuple((Keypress(CTRL_KEY, "DOWN"),
Keypress(SHIFT_KEY, "DOWN"),
Keypress(Key('U'), "DOWN"))) +
tuple((Keypress(Key(digit.upper())) for digit in unicode_sequence)) +
tuple((Keypress(Key('U'), "UP"),
Keypress(SHIFT_KEY, "UP"),
Keypress(CTRL_KEY, "UP"))
)
)
def set_mode(self, mode_code, mode_position=None):
self.mode_keys = []
if mode_position is None:
mode_position = DEFAULT_LABEL
mode_keys_map = MODE_KEYS[mode_position]
for place, sign in enumerate(mode_code):
if sign == '*':
key = mode_keys_map[place]
# Is it really Easier to Ask Forgiveness than Permission?
assert isinstance(key, Key)
self.mode_keys.append(key)
def set_char(self, char_code, char_position=None):
self.char_keys = []
if char_position is None:
char_position = DEFAULT_LABEL
char_keys_map = CHAR_KEYS[char_position]
for place, sign in enumerate(char_code):
if sign == '*':
key = char_keys_map[place]
# Is it really Easier to Ask Forgiveness than Permission?
assert isinstance(key, Key)
self.char_keys.append(key)
def type_out(self, ui):
"""Types the character or character sequence represented by this
combination."""
EV_KEY = evdev.ecodes.EV_KEY
for keypress in self.output_keypresses:
if keypress.key_action == "PRESS":
ui.write(EV_KEY, keypress.key.code, KEYDOWN)
print(keypress.key.name+" DOWN")
ui.write(EV_KEY, keypress.key.code, KEYUP)
print(keypress.key.name+" UP")
elif keypress.key_action == "DOWN":
ui.write(EV_KEY, keypress.key.code, KEYDOWN)
print(keypress.key.name+" DOWN")
elif keypress.key_action == "UP":
ui.write(EV_KEY, keypress.key.code, KEYUP)
print(keypress.key.name+" UP")
ui.syn()
class LayoutComboContainer:
"A data structure that stores the layout combos."
def __init__(self):
self.data = {}
# How data is stored:
# dict, keys are mode-key hashes ->
# dict, keys are char-key hashes ->
# LayoutCombo
def store(self, layout):
mode_keys_hash = keys_hash(layout.mode_keys)
char_keys_hash = keys_hash(layout.char_keys)
try:
self.data[mode_keys_hash]
except KeyError:
self.data[mode_keys_hash] = {}
self.data[mode_keys_hash][char_keys_hash] = layout
def retrieve(self, mode_keys, char_keys):
# detect whether this is an array of scancodes or keys:
if isinstance(char_keys[0], int):
return self.data[scancodes_hash(mode_keys)] \
[scancodes_hash(char_keys)]
elif isinstance(char_keys[0], Key):
return self.data[keys_hash(mode_keys)][keys_hash(char_keys)]
# process the conf file
# matchers and necessary variables
# A review of the variables keys.conf sets:
# [GeneralSettings]
# ONE_KEY_MODE True or False
# DEFAULT_LABEL a letter between A and Z
# [InputKeys]
# Where "X" stands in for any letter between A and Z:
# X_MODE_KEYS a list of Key's
# X_CHAR_KEYS a list of Key's
# SHIFT_KEY a Key (typically LEFTSHIFT or RIGHTSHIFT)
# CTRL_KEY a Key (typically LEFTCTRL or RIGHTCTRL)
# ALT_KEY a Key (typically LEFTALT or RIGHTALT)
# QUIT_KEY a Key
# [SpecialChars]
# SPECIAL_CHARS a dict; keys are str's, values are lists of Keypress'es
header_line_matcher = re.compile(r'^\[(\w+)\](#.*)?')
# match groups:
# 0: the whole line, if it's a valid header line (unused)
# 1: the header's name
# 2: the comment, if any (unused)
definition_line_matcher = \
re.compile(r'^\s*(\S+)\s*=\s*([^#\n]*[^#\s])?\s*(#.*)?')
# match groups:
# 0: the whole line, if it's a valid definition line (unused)
# 1: the defined variable's name
# 2: the defined variable's value
# 3: the comment, if any (unused)
keys_var_matcher = re.compile(r'([A-Z])_(MODE|CHAR)_KEYS')
# for use in identifying mod and char key set definitions
# match groups:
# 0: the whole variable name (unused)
# 1: the letter associated with the key set
# 2: whether it's a mode or char key set definition
special_char_matcher = re.compile(r'^\+[\w_]+$')
# for use in identifying valid SpecialChars definitions
symbol_matcher = re.compile(r'^((PRESS|DOWN|UP)!)?(\w+)$')
# for use in parsing of SpecialChars definitions
# match groups:
# 0: the whole symbol, if it's a valid symbol (unused)
# 1: the symbol's prefix plus the exclamation point, if any (unused)
# 2: the symbol's prefix, if any
# 3: the symbol's key name
blank_line_matcher = re.compile(r'^\s*(#.*)?')
# match groups:
# 0: the whole line, if it's a valid blank line (unused)
# 1: the comment, if any (unused)
# start processing
try:
conf_file = open('keys.conf', 'r')
except FileNotFoundError:
print("Missing keys.conf!")
sys.exit()
line_count = 0
header = None
for line in conf_file:
line_count += 1
header_line_matches = header_line_matcher.match(line)
if header_line_matches != None: # it's a header
new_header = header_line_matches.group(1)
if new_header in ("GeneralSettings", "InputKeys", "SpecialChars"):
header = new_header
else:
print("Invalid header in conf file! Error on line " +\
str(line_count))
continue
definition_line_matches = definition_line_matcher.match(line)
if definition_line_matches: # it's a definition
var_name = definition_line_matches.group(1)
var_value = definition_line_matches.group(2)
if header == "GeneralSettings":
if var_name == "ONE_KEY_MODE":
lowered = var_value.lower()
if lowered == "true":
ONE_KEY_MODE = True
elif lowered == "false":
ONE_KEY_MODE = False
else:
print("Invalid definition in conf file! Error on line " +
str(line_count))
elif var_name == "DEFAULT_LABEL":
DEFAULT_LABEL = var_value
else:
print("Invalid definition in conf file! Error on line " +
str(line_count))
elif header == "InputKeys":
keys_var_matches = keys_var_matcher.match(var_name)
if keys_var_matches:
keyset = keys_var_matches.group(2)
keyset_label = keys_var_matches.group(1)
new_keys = tuple((Key(name) for name in var_value.split()))
if keyset == "MODE":
MODE_KEYS[keyset_label] = new_keys
elif keyset == "CHAR":
CHAR_KEYS[keyset_label] = new_keys
else:
print("Invalid definition in conf file! Error on line " +
str(line_count))
elif var_name == "SHIFT_KEY":
SHIFT_KEY = Key(var_value)
if var_value not in ("LEFTSHIFT", "RIGHTSHIFT"):
print("Warning: weird definition for shift key!")
elif var_name == "CTRL_KEY":
CTRL_KEY = Key(var_value)
if var_value not in ("LEFTCTRL", "RIGHTCTRL"):
print("Warning: weird definition for ctrl key!")
elif var_name == "ALT_KEY":
ALT_KEY = Key(var_value)
if var_value not in ("LEFTALT", "RIGHTALT"):
print("Warning: weird definition for alt key!")
elif var_name == "QUIT_KEY":
QUIT_KEY = Key(var_value)
else:
print("Invalid definition in conf file! Error on line " +
str(line_count))
elif header == "SpecialChars":
if special_char_matcher.match(var_name) != None:
def break_down_symbol(symbol):
"""Make up the arguments for the map() call in the definition
of key_sequence."""
symbol_matches = symbol_matcher.match(symbol)
prefix = symbol_matches.group(2)
name = symbol_matches.group(3)
return (Key(name), "PRESSED" if prefix == None else prefix)
key_sequence = tuple(
(Keypress(*args) for args in map(break_down_symbol,
var_value.split())))
SPECIAL_CHARS[var_name] = key_sequence
else:
print("Invalid definition in conf file! Error on line " +\
str(line_count))
continue
blank_line_matches = blank_line_matcher.match(line)
if blank_line_matches: # it's blank
continue
print("Invalid conf file! Error on line "+str(line_count))
### process the layout file
## matchers and necessary variables:
# absolutely disgusting:
mode_line_matcher =\
re.compile(
r'^([A-Z])?([-*]{4,})'
r'((:)|=\s*((S\+|C\+|M\+)*)([A-Z])?([-*]{4,}))\s*(#.*)?')
# match groups:
# 0: the whole line, if it's a valid mode line (unused)
# 1: the laber of the mode keys, if any
# 2: the keys of the mode this is defining
# 3: the rest of the line that isn't a comment (unused)
# 4: the colon, if it's an original mode and not a copied one
# 5: the modifier keys, if it's a copied mode
# 6: the last modifier key, if it's a copied mode (unused)
# 7: the label of the mode it's copying from, if any
# 8: the keys of mode it's copying from, if any
# 9: the comment, if any (unused)
char_line_matcher = re.compile(r'([ A-Z])([-* ]{6}) (.+);\s*(#.*)?')
# match groups:
# 0: the whole line, if it's a valid char line (unused)
# 1: the label of the char keys, if any
# 2: the keys of the char that this is defining
# 3: the char that this is defining
# 4: the comment, if any (unused)
# (blank_line_matcher reused from previous file's processing)
try:
layout_file = open('layout.txt', 'r')
except FileNotFoundError:
print("Missing layout.txt!")
sys.exit()
layout_combos = LayoutComboContainer()
line_count = 0
for line in layout_file:
line_count += 1
mode_line_matches = mode_line_matcher.match(line)
if mode_line_matches != None: # it's a mode line
mode = mode_line_matches.group(2)
mode_position = DEFAULT_LABEL
if mode_line_matches.group(1) != None:
mode_position = mode_line_matches.group(1)
# print(("%(line_count)03d"+": "+mode_position+mode) %\
# {'line_count' : line_count})
if mode_line_matches.group(4) != None:
# print(" MODE LINE (U) @ "+str(line_count))
pass
else:
# if this mode is a copy of another
# print(" MODE LINE (C) @ "+str(line_count))
# if line_count in (141, 142) + tuple(range(189, 205)):
# for x in range(0, 10):
# print(" GROUP "+str(x)+": "+\
# str(mode_line_matches.group(x)))
modifier_keys = []
if mode_line_matches.group(5) != None:
for modifier in mode_line_matches.group(5).split('+')[:-1]:
if modifier == 'S':
modifier_keys.append(SHIFT_KEY)
elif modifier == 'C':
modifier_keys.append(CTRL_KEY)
elif modifier == 'M':
modifier_keys.append(ALT_KEY)
# select every layout combo with the same mode keys as ours:
original_mode = mode_line_matches.group(8)
original_mode_position = mode_line_matches.group(7)
dummy_layout_combo = LayoutCombo(original_mode,
'',
'',
original_mode_position)
for layout_combo in \
layout_combos.data[
keys_hash(dummy_layout_combo.mode_keys)].values():
layout_combo_copy = copy.deepcopy(layout_combo)
layout_combo_copy.set_mode(mode, mode_position)
layout_combo_copy.output_keypresses[0:0] = \
[Keypress(key, "DOWN") for key in modifier_keys]
layout_combo_copy.output_keypresses.extend(
[Keypress(key, "UP") for key in modifier_keys[::-1]])
layout_combos.store(layout_combo_copy)
continue
char_line_matches = char_line_matcher.match(line)
if char_line_matches != None: # it's a char line
char_position = DEFAULT_LABEL
if char_line_matches.group(1) != ' ':
char_position = char_line_matches.group(1)
layout_combo = LayoutCombo(mode,
char_line_matches.group(2),
char_line_matches.group(3),
mode_position,
char_position)
layout_combos.store(layout_combo)
# print(" LAYOUT STORED:")
# print(" MODE KEYS: ", end="")
# for key in layout_combo.mode_keys:
# print(key.name + " ", end="")
# print()
# print(" CHAR KEYS: ", end="")
# for key in layout_combo.char_keys:
# print(key.name + " ", end="")
# print()
# print(" OUTPUT KEYPRESSES: ", end="")
# for keypress in layout_combo.output_keypresses:
# print(keypress.key_action + "!" + keypress.key.name + " ", end="")
# print()
continue
blank_line_matches = blank_line_matcher.match(line)
if blank_line_matches != None: # it's blank
continue
print("Invalid layout file! Error on line "+str(line_count))
### start keyboarding
# for key in layout_combos.keys():
# print(key+" : "+layout_combos[key].output_code+" : "+\
# str([keypress.key_name for keypress in layout_combos[key].output_keys]))
ui = evdev.UInput()
kb.grab() # kb was opened at very beginning of file, to read ecodes out of
mode_keys_active = set([])
char_keys_active = set([])
ALL_MODE_KEYS = set(reduce(lambda x,y: x+y, MODE_KEYS.values()))
ALL_CHAR_KEYS = set(reduce(lambda x,y: x+y, CHAR_KEYS.values()))
for event in kb.read_loop():
if event.type == evdev.ecodes.EV_KEY:
key_event = evdev.categorize(event)
if key_event.keystate == KEYDOWN:
if key_event.scancode == QUIT_KEY.code:
ui.close()
kb.ungrab()
sys.exit()
if key_event.scancode in [c.code for c in ALL_MODE_KEYS]:
mode_keys_active.add(key_event.scancode)
elif key_event.scancode in [c.code for c in ALL_CHAR_KEYS]:
char_keys_active.add(key_event.scancode)
else:
ui.write_event(event)
ui.syn()
elif key_event.keystate == KEYUP:
## if it's a change in the mode keys:
if key_event.scancode in [c.code for c in ALL_MODE_KEYS]:
mode_keys_active.remove(key_event.scancode)
elif key_event.scancode in [c.code for c in ALL_CHAR_KEYS]:
## figure out whether any char keys are being held down
active_keys = kb.active_keys()
char_keys_are_active = False
for key in ALL_CHAR_KEYS:
if key.code in active_keys:
char_keys_are_active = True
break
## if no char keys are pressed, type the character
if not char_keys_are_active:
try:
pressed_layout_combo = \
layout_combos.retrieve(list(mode_keys_active),
list(char_keys_active))
## type the combo
pressed_layout_combo.type_out(ui)
except KeyError:
ui.write_event(event)
ui.syn()
# zero out the active char keys
char_keys_active = set([])
else:
ui.write_event(event)
ui.syn()
else:
ui.write_event(event)
ui.syn()
| mszegedy/claccord | claccord.py | claccord.py | py | 22,788 | python | en | code | 0 | github-code | 13 |
73102066896 | import numpy as np
import pathlib
import glob
import os
import sys
import shutil
import uuid
class Fld:
"""Create folders automatically to be used as stand-alone application or along PySoftK.
Examples
--------
"""
def __init__(self):
pass
def fxd_name(self, testname):
"""Create an array of fixed names for folders.
Parameters
----------
testname: str
Base name to be used as name
Returns
-------
np.ndarray
An array of names for folders
"""
return np.array([str(testname)+ "_" +
str(i) for i in range(self.times)])
def _make_dir(self, dir_names):
"""Function to create a folder in the current working directory.
Parameters
----------
dir_names : str
Name of the folder that will be created.
Returns
-------
None
Creates a folder with a provided name.
"""
dir_cwd = pathlib.Path().absolute()
os.mkdir("".join((str(dir_cwd),"/", dir_names)))
def create(self, times=None):
"""Function to create a folder in the current working directory.
Parameters
----------
times : boolean, optional
Number of times that a folder will be created
Returns
-------
None : None
Creates a folder with a provided name.
"""
times = int(0) if times is None else int(times)
dir_names = np.array([self._unique_name()
for i in range(times)])
list(map(self._make_dir,dir_names))
print ("Succesfully created: " + str(len(dir_names)) + " folders")
def _unique_name(self):
"""Function to create an unique name
Returns
-------
name : str
Creates a folder with a random name.
"""
name = uuid.uuid4().hex
return name
def seek_files(self, format_extension):
"""Function to seek files in the current working directory.
Parameters
----------
format_extension : str
Extension used to seek in the current working directory
Returns
-------
inp_name : str
"""
query="".join(("*",".",str(format_extension)))
inp_name = list(pathlib.Path().absolute().glob(query))
return inp_name
def _seek_dir(self):
"""Function to seek and list directories in the current working directory.
Returns
-------
folder_dir : List[n str]
Sorted list of folders inside the current working directory.
"""
import re
directory = pathlib.Path().absolute()
folder_dir = [f.path for f in os.scandir(directory) if f.is_dir()]
x=[i for i, item in enumerate(folder_dir)
if item.endswith('__pycache__')]
if x:
folder_dir.pop(x[0])
return sorted(folder_dir)
def copy_dir(self, source, destination):
"""Function to copy files to directories.
Parameters
----------
source : str
Source path where the files is located.
destination : str
Source path where the directory is located.
Returns
-------
None
Creates a folder with a provided name.
"""
from pathlib import Path
Path(source).rename(destination)
def file_to_dir(self, format_extension, num_cores=None):
"""Function to move files to directories in parallel
Parameters
----------
format_extension : str
Format extension used to seek.
num_cores : int. optional
Number of cores to be used.
Results
-------
None:
Move files to directories.
Raises
------
NotImplementedError
Folders can not be created.
"""
import os.path
from pathos.pools import ProcessPool
num_cores = int(1) if num_cores is None else int(num_cores)
files = self.seek_files(format_extension)
names=[os.path.basename(i) for i in files]
self.create(len(names))
dirs = self._seek_dir()
destinations= ["".join((dirs[i],"/",names[i]))
for i in range(len(names))]
with ProcessPool(nodes=int(num_cores)) as pool:
pool.map(self.copy_dir, files, destinations)
pool.close()
pool.join()
| alejandrosantanabonilla/pysoftk | pysoftk/folder_manager/folder_creator.py | folder_creator.py | py | 4,800 | python | en | code | 13 | github-code | 13 |
33250522550 | import time
import tensorflow as tf
import numpy
import muct_input
import os
import matplotlib.pyplot as plt
from tensorflow.python.framework.ops import GraphKeys
from tensorflow.python.framework.ops import convert_to_tensor
from tfobjs import *
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.8
config.gpu_options.allow_growth = True
record_log = True
record_log_dir = './log/square_loss_relu_singlefc_conv4_k33/'
data_dir = './muct-data-bin/'
global_step = tf.Variable(0, trainable=False, name='global_step')
learning_rate = tf.train.exponential_decay(1e-3, global_step, 500, 0.95, staircase=False, name='learning_rate')
[img_train, pts_train] = muct_input.inputs(False, data_dir, 128)
[img_test, pts_test] = muct_input.inputs(True, data_dir, 128)
# ========================
sess = tf.InteractiveSession(config=config)
with tf.variable_scope('input'):
ph_x = tf.placeholder(tf.float32, [None] + img_train.shape.as_list()[1:], name='ph_img')
ph_pts = tf.placeholder(tf.float32, [None] + pts_train.shape.as_list()[1:], name='ph_pts')
is_training = tf.placeholder(tf.bool, name='ph_is_training')
# ===================48-->24
with tf.variable_scope('layer1'):
layer1 = ConvObj()
layer1.set_input(ph_x)
layer1.batch_norm(layer1.conv2d([3, 3], 64, [1, 2, 2, 1]), is_training=is_training)
layer1.set_output(tf.nn.relu(layer1.bn))
# ===================24-->12
with tf.variable_scope('layer2'):
layer2 = ConvObj()
layer2.set_input(layer1.output)
layer2.batch_norm(layer2.conv2d([3, 3], 128, [1, 2, 2, 1]), is_training=is_training)
layer2.set_output(tf.nn.relu(layer2.bn))
# ===================12-->6
with tf.variable_scope('layer3'):
layer3 = ConvObj()
layer3.set_input(layer2.output)
layer3.batch_norm(layer3.conv2d([3, 3], 256, [1, 2, 2, 1]), is_training=is_training)
layer3.set_output(tf.nn.relu(layer3.bn))
# ===================6-->3
with tf.variable_scope('layer4'):
layer4 = ConvObj()
layer4.set_input(layer3.output)
layer4.batch_norm(layer4.conv2d([3, 3], 512, [1, 2, 2, 1]), is_training=is_training)
layer4.set_output(tf.nn.relu(layer4.bn))
# ===================3*3*512-->pts
with tf.variable_scope('layer5'):
layer5 = FcObj()
layer5.set_input(layer4.output)
layer5.fc(pts_train.shape.as_list()[1])
layer5.set_output(layer5.logit)
with tf.variable_scope('loss'):
weight_loss = tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
cross_entropy = tf.reduce_mean(tf.square(layer5.output - ph_pts))
total_loss = cross_entropy + weight_loss
accuarcy = tf.reduce_mean(tf.abs(layer5.output - ph_pts))
summary_losses = [tf.summary.scalar('cross_entropy', cross_entropy), tf.summary.scalar('weight_loss', weight_loss)]
# ===================
train_step = tf.train.AdamOptimizer(learning_rate).minimize(total_loss, global_step=global_step)
# ===================
merged = tf.summary.merge([summary_losses])
if record_log:
train_writer = tf.summary.FileWriter(os.path.join(record_log_dir, 'train'), sess.graph)
test_writer = tf.summary.FileWriter(os.path.join(record_log_dir, 'test'), sess.graph)
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners()
saver = tf.train.Saver()
# ========restore===============
# saver.restore(sess, tf.train.get_checkpoint_state(record_log_dir).model_checkpoint_path)
# tf.train.write_graph(sess.graph_def, record_log_dir, "graph.pb", as_text = False);
# builder = tf.saved_model.builder.SavedModelBuilder("./saved_model")
# builder.add_meta_graph_and_variables(sess, ["tf-muct"])
# builder.save()
# =============================
start_time = time.time()
while(True):
[img_, pts_] = sess.run([img_train, pts_train])
[ _] = sess.run([train_step], feed_dict={ph_x: img_, ph_pts: pts_, is_training: True})
if global_step.eval() % 100 == 0 :
print('step = %d, lr = %g, time = %g min' % (global_step.eval(), learning_rate.eval(), (time.time() - start_time) / 60.0))
[img_, pts_] = sess.run([img_train, pts_train])
[summary_train, acc_train] = sess.run([merged, accuarcy], feed_dict={ph_x: img_, ph_pts: pts_, is_training: False})
[img_, pts_] = sess.run([img_test, pts_test])
[summary_test, acc_test] = sess.run([merged , accuarcy], feed_dict={ph_x: img_, ph_pts: pts_, is_training: False})
if record_log:
train_writer.add_summary(summary_train, global_step.eval())
test_writer.add_summary(summary_test, global_step.eval())
print(acc_train)
print(acc_test)
if global_step.eval() % 500 == 0 :
if record_log:
saver.save(sess, os.path.join(record_log_dir, 'model.ckpt'), global_step.eval())
pass
print('total time = ', time.time() - start_time, 's')
if record_log:
train_writer.close();
test_writer.close();
| htkseason/CNN-Facial-Points-Localization | cnn_muct.py | cnn_muct.py | py | 5,034 | python | en | code | 0 | github-code | 13 |
16184721473 | """
๋ณผ๋ง๊ณต ๊ณ ๋ฅด๊ธฐ
- ์
๋ ฅ : ๋ณผ๋ง๊ณต์ ๊ฐ์ N(1 <= N <= 1,000), ๊ณต์ ์ต๋ ๋ฌด๊ฒ M(1 <= M <= 10)
๊ฐ ๋ณผ๋ง๊ณต์ ๋ฌด๊ฒ K๊ฐ ๊ณต๋ฐฑ์ผ๋ก ๊ตฌ๋ถ๋์ด ์ฃผ์ด์ง(1 <= K <= M)
- ์ถ๋ ฅ : ๋ ์ฌ๋์ด ๋ณผ๋ง๊ณต์ ๊ณ ๋ฅด๋ ๊ฒฝ์ฐ์ ์
"""
from sys import stdin
n, m = map(int, stdin.readline().split())
balls = list(map(int, stdin.readline().split()))
# 1๋ถํฐ 10๊น์ง์ ๋ฌด๊ฒ๋ฅผ ๋ด์ ์ ์๋ ๋ฆฌ์คํธ
array = [0] * 11
for ball in balls:
# ๊ฐ ๋ฌด๊ฒ์ ํด๋นํ๋ ๋ณผ๋ง๊ณต์ ๊ฐ์ ์นด์ดํธ
array[ball] += 1
result = 0
# 1๋ถํฐ m๊น์ง์ ๊ฐ ๋ฌด๊ฒ์ ๋ํ์ฌ ์ฒ๋ฆฌ
for i in range(1, m + 1):
n -= array[i] # ๋ฌด๊ฒ๊ฐ i์ธ ๋ณผ๋ง๊ณต์ ๊ฐ์(A๊ฐ ์ ํํ ์ ์๋ ๊ฐ์) ์ ์ธ
result += array[i] * n # B๊ฐ ์ ํํ๋ ๊ฒฝ์ฐ์ ์์ ๊ณฑํ๊ธฐ
print(result) | akana0321/Algorithm | ์ด๊ฒ์ด ์ฝ๋ฉํ
์คํธ๋ค with ํ์ด์ฌ/Previous_Question_by_Algorithm_Type/ball_choice.py | ball_choice.py | py | 852 | python | ko | code | 0 | github-code | 13 |
6793433632 | # -*- coding: UTF-8 -*-
#Original code from:
#http://ankitvad.github.io/blog/visualizingwhatsappchathistory.html
##
def hasNumbers(inputString):
return any(char.isdigit() for char in inputString)
# Define inputs
whatsappfile="WhatsApp Chat with Misha Imtiaz.txt" #Whatsapp text file as made by whatsapp.
names=['Siddharth','Misha Imtiazโฌ'] #Exactly as it is in the Whatsapp file
#Note that the input text file might have to be manually cleansed depending on what is there.
##
#dates.py
##loading data and splitting dates
for name in names:
x = open(whatsappfile,"r")
me = name+":"
print(me)
my_date = open(name+"_date"+".txt","w")
y = x.readline().decode('utf-8-sig').encode('utf-8')
while y:
if (me in y):
temp = y.split(" ",1)
my_date.write(temp[0]+"\n")
y = x.readline().decode('utf-8-sig').encode('utf-8')
my_date.close();
del(y)
print('Data loaded and split');
##
x.close()
#Cleaning data
#cal_msg.py :
import re
b = open(whatsappfile,"r")
a = open("Messages_"+whatsappfile+".txt","w")
c = open("Exceptions_"+whatsappfile+".txt","w")
y = b.readline().decode('utf-8-sig').encode('utf-8')
while y:
if(y != '\r\n'):
temp = y.split(": ",2)
try:
x = temp[1] #SIDS
x = re.sub('([\:\;][\)\|\\\/dDOoPp\(\'\"][\(\)DOo]?)','',x)
x = re.sub('[?\.#_]','',x)
x = re.sub('[\s]+',' ',x)
a.write(x+"\n")
except:
a.write(" "+"\n")
c.write(temp[0]+'\n')
y = b.readline()
a.close();
c.close();
print("Lines causing exception in input file written to: " + "Exceptions_"+whatsappfile+".txt")
print('Data cleansed and raw messages saved to '+"messages_"+whatsappfile+".txt")
#
x = open(whatsappfile,"r")
for name in names:
#count dates and Export dates to csv
x = open(name+"_date"+".txt",'r')
y = x.read()
from collections import Counter
counted= Counter(y.split('\n'))
outfileme="counted_"+name+".csv"
with open(outfileme,'w') as f:
for k,v in counted.most_common():
f.write( "{} {}\n".format(k,v) )
f.close()
#
| Sid281088/Whatstat | WhatStat_group.py | WhatStat_group.py | py | 2,343 | python | en | code | 0 | github-code | 13 |
33524135788 | #-*- coding:utf-8 -*-
from utils import run_cmd, run_adb_cmd, RunCmdError, list_snapshots, CacheDecorator
from config import getConfig
import os, subprocess
import sys
import time
import re
class AVD:
def __init__(self, name, device, path, target, base, tag, optional_pairs):
self.name = name
self.device = device
self.path = path
self.target = target
self.base = base
self.tag = tag
self.optional_pairs = optional_pairs
# Two cases:
# running (there is serial)
# not running
self.running = False
self.serial = None
def setRunning(self, serial):
self.running = True
self.serial = serial
def __repr__(self):
if self.running:
return '<AVD[{}] {}, {}, running with {}>'.format(self.name, self.device, self.tag, self.serial)
else:
return '<AVD[{}] {}, {}, available>'.format(self.name, self.device, self.tag)
def getDetail(self):
ret = []
for attr in ['device', 'path', 'target', 'base', 'tag',
'optional_pairs', 'running', 'serial']:
ret.append('AVD<{}>.{} = {}'.format(self.name, attr, getattr(self, attr)))
return '\n'.join(ret)
@CacheDecorator
def _run_avdmanager_list_avd():
# get all available devices
# this function seems not be changed, so use cache
avdmanager = getConfig()['AVDMANAGER_PATH']
output = run_cmd('{} list avd'.format(avdmanager))
# parse result for avdmanager list avd
avd_list = []
try:
lines = output.split('\n')
assert lines[0].startswith('Parsing ')
i = 1
while i < len(lines) and lines[i] != '':
name = re.match(r'Name: (.*)', lines[i].lstrip()).groups()[0]
device = re.match(r'Device: (.*)', lines[i+1].lstrip()).groups()[0]
path = re.match(r'Path: (.*)', lines[i+2].lstrip()).groups()[0]
target = re.match(r'Target: (.*)', lines[i+3].lstrip()).groups()[0]
base, tag = re.match(r'Based on: (.*) Tag/ABI: (.*)', lines[i+4].lstrip()).groups()
# now, optional arguments - Skin, Sdcard, Snapshot
i += 5
optional_pairs = []
while i < len(lines) and lines[i] != '' and not lines[i].startswith('----'):
sep = lines[i].index(':')
key = lines[i][:sep].strip()
value = lines[i][sep+1:].strip()
optional_pairs.append((key, value))
i += 1
if lines[i].startswith('----'):
i += 1
avd = (name, device, path, target, base, tag, optional_pairs)
if tag.startswith('google_apis_playstore'):
print('Warning: AVD[{}] cannot be rooted'.format(name), file=sys.stderr)
else:
avd_list.append(avd)
except Exception as e:
print('Error: Unexpected form on avdmanager list avd', file=sys.stderr)
print('Result of avdmanger list avd -->', file=sys.stderr)
print('//---------------------------//')
print(output, file=sys.stderr)
print('//---------------------------//')
raise
return avd_list
def get_avd_list(warned = False):
avd_list = list(map(lambda args:AVD(*args), _run_avdmanager_list_avd()))
# fetch currently running devices
res = run_adb_cmd('devices')
assert res.count('\n') >= 2, res
for line in res.split('\n')[1:]:
if line == '':
continue
serial = line.split()[0]
# At now, assert all devices are based on emulator
avd_name = run_adb_cmd('emu avd name', serial=serial).split()[0]
# find with avd_name
for avd in avd_list:
if avd.name == avd_name:
avd.setRunning(serial)
break
return avd_list
def kill_emulator(serial = None):
try:
run_adb_cmd('emu kill', serial = serial)
except RunCmdError as e:
print('Exception on emu kill')
print('RunCmdError: out', e.out)
print('RunCmdError: err', e.err)
def _check_port_is_available(port):
try:
run_cmd('lsof -i :{}'.format(port))
return False
except RunCmdError as e:
return True
def emulator_run_and_wait(avd_name, serial=None, snapshot=None, wipe_data=False, writable_system=False):
# check avd
avd_list = get_avd_list()
if any(a.running and a.name == avd_name for a in avd_list):
raise RuntimeError('AVD<{}> is already running'.format(avd_name))
r_fd, w_fd = os.pipe()
if serial is None:
# Pairs for port would be one of
# (5554,5555) (5556,5557) ... (5584,5585)
serial = 5554
while True:
if _check_port_is_available(serial):
break
else:
serial += 2
if serial > 5584:
raise RuntimeError
assert _check_port_is_available(serial+1) == True
print('RunEmulator[{}]: Port set to {}, {}'.format(avd_name, serial, serial+1))
elif type(serial) == str:
serial = re.match(r'emulator-(\w+)', serial).groups()[0]
else:
assert type(serial) == int
# parent process
emulator_cmd = ['./emulator',
'-netdelay', 'none',
'-netspeed', 'full',
'-ports', '{},{}'.format(serial, serial+1),
'-avd', avd_name
]
if snapshot is not None and wipe_data:
print("RunEmulator[{}, {}]: Warning, wipe_data would remove all of snapshots".format(avd_name, serial))
if snapshot is not None:
# This option would not raise any exception,
# even if there is no snapshot with specified name.
# You should check it with list_snapshots()
emulator_cmd.append('-snapshot')
emulator_cmd.append(snapshot)
if wipe_data:
# It would wipe all data on device, even snapshots.
emulator_cmd.append('-wipe-data')
if writable_system:
# This option is used when modification on /system is needed.
# It could be used for modifying /system/lib/libart.so, as MiniTracing does
emulator_cmd.append('-writable-system')
proc = subprocess.Popen(' '.join(emulator_cmd), stdout=w_fd, stderr=w_fd, shell=True,
cwd = os.path.join(getConfig()['SDK_PATH'], 'tools'))
os.close(w_fd)
bootanim = ''
not_found_cnt = 0
while not bootanim.startswith('stopped'):
try:
print('RunEmulator[{}, {}]: shell getprop init.svc.bootanim'.format(avd_name, serial))
bootanim = run_adb_cmd('shell getprop init.svc.bootanim', serial=serial)
except RunCmdError as e:
if 'not found' in e.err and not_found_cnt < 4:
not_found_cnt += 1
else:
print('RunEmulator[{}, {}]: Failed, check following log from emulator'.format(avd_name, serial))
print('RunCmdError: out', e.out)
print('RunCmdError: err', e.err)
kill_emulator(serial=serial)
handle = os.fdopen(r_fd, 'r')
while True:
line = handle.readline()
if not line:
break
print(line, end='')
handle.close()
raise RuntimeError
print('RunEmulator[{}, {}]: Waiting for booting emulator'.format(avd_name, serial))
time.sleep(5)
# turn off keyboard
run_adb_cmd("shell settings put secure show_ime_with_hard_keyboard 0", serial=serial)
run_adb_cmd("root", serial=serial)
if writable_system:
run_adb_cmd("remount", serial=serial)
run_adb_cmd("shell su root mount -o remount,rw /system", serial=serial)
os.close(r_fd)
return serial
def emulator_setup(serial = None):
''' File setup borrowed from Stoat '''
folder, filename = os.path.split(__file__)
files = [
"./sdcard/1.vcf",
"./sdcard/2.vcf",
"./sdcard/3.vcf",
"./sdcard/4.vcf",
"./sdcard/5.vcf",
"./sdcard/6.vcf",
"./sdcard/7.vcf",
"./sdcard/8.vcf",
"./sdcard/9.vcf",
"./sdcard/10.vcf",
"./sdcard/Troy_Wolf.vcf",
"./sdcard/pic1.jpg",
"./sdcard/pic2.jpg",
"./sdcard/pic3.jpg",
"./sdcard/pic4.jpg",
"./sdcard/example1.txt",
"./sdcard/example2.txt",
"./sdcard/license.txt",
"./sdcard/first.img",
"./sdcard/sec.img",
"./sdcard/hackers.pdf",
"./sdcard/Hacking_Secrets_Revealed.pdf",
"./sdcard/Heartbeat.mp3",
"./sdcard/intermission.mp3",
"./sdcard/mpthreetest.mp3",
"./sdcard/sample.3gp",
"./sdcard/sample_iPod.m4v",
"./sdcard/sample_mpeg4.mp4",
"./sdcard/sample_sorenson.mov",
"./sdcard/wordnet-3.0-1.html.aar",
"./sdcard/sample_3GPP.3gp.zip",
"./sdcard/sample_iPod.m4v.zip",
"./sdcard/sample_mpeg4.mp4.zip",
"./sdcard/sample_sorenson.mov.zip",
]
for file in files:
res = run_adb_cmd("push {} /mnt/sdcard/".format(os.path.join(folder, file)), serial=serial)
print(res)
def create_avd(name, sdkversion, tag, device, sdcard):
avdmanager = getConfig()['AVDMANAGER_PATH']
assert tag in ['default', 'google_apis'], tag
assert sdkversion.startswith('android-'), sdkversion
package = "system-images;{};{};x86".format(sdkversion, tag)
cmd = "{} create avd --force --name '{}' --package '{}' "\
"--sdcard {} --device '{}'".format(
avdmanager,
name,
package,
sdcard,
device
)
try:
ret = run_cmd(cmd)
print('Create avd success')
except RunCmdError as e:
print('CREATE_AVD failed')
if 'Package path is not valid' in e.err:
print(e.err)
print('Currently set package path is {}'.format(package))
print('You would install new package with sdkmanager')
else:
print('RunCmdError: out')
print(e.out)
print('RunCmdError: err')
print(e.err)
if __name__ == "__main__":
'''
Run emulator
python emulator.py status
python emulator.py run DEVICE_NAME [SERIAL]
'''
import argparse
parser = argparse.ArgumentParser(description='Manages android emulator')
subparsers = parser.add_subparsers(dest='func')
list_parser = subparsers.add_parser('status')
list_parser.add_argument('--detail', action='store_true')
run_parser = subparsers.add_parser('run')
run_parser.add_argument('device_name', action='store', type=str)
run_parser.add_argument('--port', action='store', default=None)
run_parser.add_argument('--snapshot', action='store', default=None)
run_parser.add_argument('--wipe_data', action='store_true')
run_parser.add_argument('--writable_system', action='store_true')
arbi_parser = subparsers.add_parser('exec')
arbi_parser.add_argument('expression', action='store', type=str)
setup_parser = subparsers.add_parser('setup')
setup_parser.add_argument('serial', action='store', type=str)
create_parser = subparsers.add_parser('create')
create_parser.add_argument('name', action='store')
create_parser.add_argument('--sdkversion', action='store', default='android-22')
create_parser.add_argument('--tag', action='store', default='default')
create_parser.add_argument('--device', action='store', default='Nexus 5')
create_parser.add_argument('--sdcard', action='store', default='512M')
args = parser.parse_args()
if args.func == 'status':
avd_list = get_avd_list()
if args.detail:
for avd in avd_list:
print(avd.getDetail())
else:
for avd in avd_list:
print(avd)
elif args.func == 'run':
try:
port = int(args.port)
except (TypeError, ValueError):
port = args.port
emulator_run_and_wait(args.device_name,
serial=port,
snapshot=args.snapshot,
wipe_data=args.wipe_data,
writable_system=args.writable_system
)
elif args.func == 'exec':
print('Executing {}...'.format(args.expression))
retval = exec(args.expression)
print('Return value for {}: {}'.format(args.expression, retval))
elif args.func == 'setup':
try:
serial = int(args.serial)
except (TypeError, ValueError):
serial = args.serial
emulator_setup(serial = serial)
elif args.func == 'create':
create_avd(args.name, args.sdkversion, args.tag, args.device, args.sdcard)
else:
raise
| sjoon2455/smartMonkey_login | emulator.py | emulator.py | py | 12,803 | python | en | code | 0 | github-code | 13 |
22635526997 | import torch
from torch import Tensor
from torch.nn.parameter import Parameter
class ExtendibleLinear(torch.nn.Linear):
def updateUniverseSize(self, n):
f_to_add = n - self.in_features
self.weights = torch.cat(
(self.weights, Parameter(torch.Tensor(self.out_features, f_to_add))), 1)
class TwoLayerNet(torch.nn.Module):
def __init__(self, D_in, D_out):
"""
In the constructor we instantiate two nn.Linear modules and assign them as
member variables.
"""
# The hidden size is between the input and the output size
w_in = 1
w_out = 5
H = (D_in * w_in + D_out * w_out) / (w_in + w_out)
super(TwoLayerNet, self).__init__()
self.linear1 = ExtendibleLinear(D_in, H)
self.linear2 = torch.nn.Linear(H, D_out)
self.D_out = D_out
def updateUniverseSize(self, D_in):
self.linear1.updateUniverseSize(D_in)
def forward(self, x):
"""
In the forward function we accept a Tensor of input data and we must return
a Tensor of output data. We can use Modules defined in the constructor as
well as arbitrary operators on Tensors.
"""
h_relu = self.linear1(x).clamp(min=0)
y_pred = self.linear2(h_relu)
return y_pred
initial_input_size = 5
initial_output_size = 5
# Number of timesteps back to look
T = 10
I = initial_input_size
U = (initial_input_size + initial_output_size) * T
O = initial_output_size
m1 = TwoLayerNet(U, O)
models = [m1]
initialInput = torch.rand(I, 1)
initialOutput = torch.sin(initialInput)
for x in range(100):
# forward pass
y_pred = m1()
| tchordia/ML | transfer/transfer.py | transfer.py | py | 1,677 | python | en | code | 0 | github-code | 13 |
12336173358 | if __name__ == "__main__":
n = int(input())
coins = list(map(lambda i : int(i), input().split(' ')))
coins = sorted(coins, reverse=True)
total_value = 0
coin_count = 0
for value in coins:
total_value += value
count = 0
value = 0
for val in coins:
value += val
count += 1
if value > total_value / 2:
print(count)
break
| Shaharafat/Problem-Solving | codeforces/twins.py | twins.py | py | 377 | python | en | code | 0 | github-code | 13 |
3634448380 | # Given a JSON file with several dictionaries in it, find the value for a given key.
# The key will always be in one of the dictionaries.
# Remember, post an explanation with your code.
# [{"Fruit": "Apples", "Lock": 4, "Code": "Python"},
# {"Market": "Bazaar", "Funny": true, "Math": 0.003, "Fly": false},
# {"Animal": "Aardvark", "7": "Lucky", "true": 1.6}]
import json
file_name = "05_names.json"
with open(file_name,'r') as f:
data = f.read()
obj = json.loads(data)
print(obj)
index = int(input("Enter Index Number: "))
key = input('Enter Key : ')
print(str(obj[index][key]))
# Step1: Create .json files & Create json objects inside Array
# Step2: Create python file import json module
# Step3: Read json file & Parse it
# In Json files key is always str type hence we have to type cast during printing of values
| ashish-kumar-hit/python-qt | python/practice-questions-code/find-dict-value.py | find-dict-value.py | py | 824 | python | en | code | 0 | github-code | 13 |
41148948963 | from datos import insertRow,readOrder
from pantallas_class import *
from datos import *
def partida():
game = Partidas()
higescore=0
salir=False
while not salir:
game = Partidas()
salir=game.menu_pp()
if not salir:
salir=game.pantalla_juego(higescore)
higescore= game.hige_score
scr=game.score
if game.vidas>0 and salir!=True:
game.pantalla_juego2(higescore)
if not salir:
salir=game.game_ov()
score=readOrder("score")
if len(score)<5:
nombre=game.user_text
insertRow(nombre,scr)
if len(score)>=5 and (score[0][1]<scr or score[1][1]<scr or score[2][1]<scr or score[3][1]<scr):
nombre=game.user_text
insertRow(nombre,scr)
| Aisengar/THE_QUEST_NAVE | THE_QUEST/controlador.py | controlador.py | py | 950 | python | es | code | 1 | github-code | 13 |
5438787482 | #!/usr/bin/env python
# coding: utf-8
# # 9์ฅ. ์ง๋ฆฌ ์ ๋ณด ๋ถ์ (1) ์ฃผ์๋ฐ์ดํฐ๋ถ์+๋งต
# # 1. ๋ฐ์ดํฐ ์์ง
# ### ๋ฐ์ดํฐ ํ์ผ ์ฝ์ด์ค๊ธฐ
# In[1]:
import pandas as pd
CB = pd.read_csv('./DATA/CoffeeBean.csv', encoding='CP949', index_col=0, header=0, engine='python')
CB.head() #์์
๋ด์ฉ ํ์ธ์ฉ ์ถ๋ ฅ
# # 2. ๋ฐ์ดํฐ ์ค๋น ๋ฐ ํ์
# ## ์/๋ ํ์ ๊ตฌ์ญ ์ด๋ฆ ์ ๊ทํ
# In[2]:
# ๋จ์ด๋ณ๋ก ๋ถ๋ฆฌ
addr = []
for address in CB.address:
addr.append(str(address).split())
#์์
๋ด์ฉ ํ์ธ์ฉ ์ถ๋ ฅ
print('๋ฐ์ดํฐ ๊ฐ์ : %d' % len(addr))
addr
# ### - addr์์ ํ์ ๊ตฌ์ญ ํ์ค ์ด๋ฆ์ด ์๋๊ฒ ์์ ํ๊ธฐ
# In[3]:
addr2 = []
# addr์์ ํ์ ๊ตฌ์ญ ํ์ค ์ด๋ฆ์ด ์๋๊ฒ ์์ ํ๊ธฐ
for i in range(len(addr)):
if addr[i][0] == "์์ธ": addr[i][0]="์์ธํน๋ณ์"
elif addr[i][0] == "์์ธ์": addr[i][0]="์์ธํน๋ณ์"
elif addr[i][0] == "๋ถ์ฐ์": addr[i][0]="๋ถ์ฐ๊ด์ญ์"
elif addr[i][0] == "์ธ์ฒ": addr[i][0]="์ธ์ฒ๊ด์ญ์"
elif addr[i][0] == "๊ด์ฃผ": addr[i][0]="๊ด์ฃผ๊ด์ญ์"
elif addr[i][0] == "๋์ ์": addr[i][0]="๋์ ๊ด์ญ์"
elif addr[i][0] == "์ธ์ฐ์": addr[i][0]="์ธ์ฐ๊ด์ญ์"
elif addr[i][0] == "์ธ์ข
์": addr[i][0]="์ธ์ข
ํน๋ณ์์น์"
elif addr[i][0] == "๊ฒฝ๊ธฐ": addr[i][0]="๊ฒฝ๊ธฐ๋"
elif addr[i][0] == "์ถฉ๋ถ": addr[i][0]="์ถฉ์ฒญ๋ถ๋"
elif addr[i][0] == "์ถฉ๋จ": addr[i][0]="์ถฉ์ฒญ๋จ๋"
elif addr[i][0] == "์ ๋ถ": addr[i][0]="์ ๋ผ๋ถ๋"
elif addr[i][0] == "์ ๋จ": addr[i][0]="์ ๋ผ๋จ๋"
elif addr[i][0] == "๊ฒฝ๋ถ": addr[i][0]="๊ฒฝ์๋ถ๋"
elif addr[i][0] == "๊ฒฝ๋จ": addr[i][0]="๊ฒฝ์๋จ๋"
elif addr[i][0] == "์ ์ฃผ": addr[i][0]="์ ์ฃผํน๋ณ์์น๋"
elif addr[i][0] == "์ ์ฃผ๋": addr[i][0]="์ ์ฃผํน๋ณ์์น๋"
elif addr[i][0] == "์ ์ฃผ์": addr[i][0]="์ ์ฃผํน๋ณ์์น๋"
addr2.append(' '.join(addr[i]))
addr2 #์์
๋ด์ฉ ํ์ธ์ฉ ์ถ๋ ฅ
# In[4]:
addr2 = pd.DataFrame(addr2, columns=['address2'])
addr2 #์์
๋ด์ฉ ํ์ธ์ฉ ์ถ๋ ฅ
# In[5]:
CB2 = pd.concat([CB, addr2], axis=1 )
CB2.head() #์์
๋ด์ฉ ํ์ธ์ฉ ์ถ๋ ฅ
# In[6]:
CB2.to_csv('./DATA/CoffeeBean_2.csv',encoding='CP949', index = False)
# # 3. ๋ฐ์ดํฐ ๋ชจ๋ธ๋ง
# ### - ์ง๋ ์ ๋ณด ์๊ฐํ ๋ผ์ด๋ธ๋ฌ๋ฆฌ ์ค์น ๋ฐ ์ํฌํธํ๊ธฐ
# In[ ]:
#get_ipython().system('pip install folium')
# In[7]:
import folium
# ### 1) ์ญ๋ก๋ฌธ ์ขํ๋ฅผ ์ฌ์ฉํ์ฌ ์ง๋ ๊ฐ์ฒด ํ
์คํธํ๊ธฐ
# In[8]:
map_osm = folium.Map(location=[37.560284, 126.975334], zoom_start = 16)
# In[9]:
# ์ง๋
map_osm.save('./DATA/map.html')
# ### 2) ์ ๋ฆฌํด๋ CoffeeBean_2.csv ํ์ผ ๋ก๋
# In[10]:
CB_file = pd.read_csv('./DATA/CoffeeBean_2.csv',encoding='cp949', engine='python')
CB_file.head() #์์
๋ด์ฉ ํ์ธ์ฉ ์ถ๋ ฅ
# ### 3) ์คํ ์ํํธ์จ์ด Geocoder-Xr์ ์ฌ์ฉํ์ฌ ๊ตฌํ GPS ์ขํ ํ์ผ ๋ก๋
# In[11]:
CB_geoData = pd.read_csv('./DATA/CB_geo.shp_2.csv',encoding='cp949', engine='python')
len(CB_geoData) #ํ์ธ์ฉ ์ถ๋ ฅ
# In[12]:
# ๋จ๋๋ฌธ์ ํ์
map_CB = folium.Map(location=[37.560284, 126.975334], zoom_start = 15)
# In[13]:
# ์ปคํผ๋น ๋งค์ฅ์ ์์น
for i, store in CB_geoData.iterrows():
folium.Marker(location=[store['์๋'], store['๊ฒฝ๋']], popup= store['store'], icon=folium.Icon(color='red', icon='star')).add_to(map_CB)
# In[14]:
# ์ปคํผ๋น ๋งค์ฅ์ ์์น๊ฐ ํ์๋ ์ง๋
map_CB.save('./DATA/map_CB.html')
# In[15]:
import webbrowser
webbrowser.open('C:/BigData/DATA/map_CB.html')
chrome_path = 'C:/Program Files (x86)/Google/Chrome/Application/chrome.exe %s'
webbrowser.get(chrome_path).open('C:/BigData/DATA/map_CB.html')
| devamateur/bigdata | week08/09์ฅ_์ฃผ์๋ฐ์ดํฐ๋ถ์.py | 09์ฅ_์ฃผ์๋ฐ์ดํฐ๋ถ์.py | py | 3,768 | python | ko | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.