seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
2033008070 | # N,M이 모두 1일 경우: 계싼 필요 X
# N 또는 M이 1일 경우: 그 방향만 확인
# 둘다 1이 아닐 경우: 모두 확인
T = int(input())
for i in range(1, T+1):
N, M = map(int, input().split())
board = [input() for _ in range(N)]
_skip = 0
_odd = set()
_even = set()
for y in range(0, N):
for x in range(M):
if board[y][x] == '?':
continue
if (x+y) % 2 == 0:
_even.add(board[y][x])
elif (x+y) % 2 == 1:
_odd.add(board[y][x])
if len(_odd) == 2 or len(_even) == 2 or (len(_odd) == 1 and _odd == _even):
print(f"#{i} impossible")
else:
print(f"#{i} possible")
| YeonHoLee-dev/Python | SW Expert Academy/Lv.3/[14413] 격자판 칠하기.py | [14413] 격자판 칠하기.py | py | 721 | python | ko | code | 0 | github-code | 13 |
33266633040 | # -*- coding: utf-8 -*-
from Users.models import CustomUser
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
@login_required(login_url="/users/login/")
def index(request):
return render(request, 'index.html')
@login_required(login_url="/users/login/")
def UserList(request):
result = CustomUser.objects.all().values('id',
'username',
'nickname',
'email',
'is_active',
'phone',
'department',
'last_login',
'groups__name')
return render(request, 'users/users_list.html', {'result': result}) | Donyintao/SoilServer | Users/views.py | views.py | py | 772 | python | en | code | 7 | github-code | 13 |
15184178665 | import os
from robot.libraries import BuiltIn
def getSuiteSrcDir():
from robot.api import logger as log
builtin = BuiltIn.BuiltIn()
srcPath = builtin.get_variable_value('${SUITE SOURCE}')
if os.path.isfile(srcPath):
srcDir = os.path.dirname(srcPath)
elif os.path.isdir(srcPath):
srcDir = srcPath
else:
log.error("Suite source directory not found; src={0}".format(srcPath))
srcDir = ''
return srcDir
def hi():
return 'Hello' | SawarkarMayur/RobotTestWork | lib/util.py | util.py | py | 534 | python | en | code | 0 | github-code | 13 |
22957025125 | import random
import time
from heapq import heappop
from heapq import heappush
from utils import PositionManager as PosMan
class NoPathFoundException(Exception):
pass
class Agent:
def __init__(self, id_, position, target, env):
self.id = id_
self.position = position
self.target = target
self.env = env
self.end = False
self.stuck = False
@staticmethod
def get_closer(source, target, n, get_neighbors):
closers = PosMan.get_closers_neighbors(source, target, n, get_neighbors)
if closers:
return random.choice(closers)
else:
raise NoPathFoundException
@staticmethod
def get_farther(source, target, n, get_neighbors):
farthers = PosMan.get_closers_neighbors(source, target, n, get_neighbors)
if farthers:
return random.choice(farthers)
else:
raise NoPathFoundException
def move(self, source, target):
pass
def run(self):
while not self.end:
time.sleep(0.2)
if not self.stuck:
try:
self.move(self.position, self.target)
except NoPathFoundException:
self.stuck = True
if self.position == self.target:
self.end = True
def __str__(self):
return str(self.id)
class SimpleAgent(Agent):
def __init__(self, id_, position, target, env):
super().__init__(id_, position, target, env)
def move(self, source, target):
closer = self.get_closer(self.position, self.target, self.env.n, self.env.get_neighbors)
self.env.next_move(self, closer)
class DijkstraAgent(Agent):
def __init__(self, id_, position, target, env):
super().__init__(id_, position, target, env)
@staticmethod
def dijkstra(source, target, get_neighbors):
prev = {}
graph = set()
distance = {source: 0}
to_use = [(0, source)] # Couples (distance[node], node)
while to_use:
dist_node, node = heappop(to_use)
if node in graph:
continue
graph.add(node)
for neighbor in get_neighbors(node):
if neighbor in graph:
continue
dist_neighbor = dist_node + 1
if neighbor not in distance or distance[neighbor] > dist_neighbor:
distance[neighbor] = dist_neighbor
heappush(to_use, (dist_neighbor, neighbor))
prev[neighbor] = node
path = [target]
node = target
while node != source:
if node in prev:
node = prev[node]
else:
raise NoPathFoundException
path.insert(0, node)
return path
def move(self, source, target):
path = self.dijkstra(source, target, self.env.get_neighbors)
if len(path) < 2:
raise NoPathFoundException
self.env.next_move(self, path[1])
class Message:
def __init__(self, priority):
self.priority = priority
class ACKMessage(Message):
pass
class GiveWayMessage(Message):
def __init__(self, chain, priority):
super().__init__(priority)
self.chain = chain
def __str__(self):
return "{chain: %a with priority: %i}" % (self.chain, self.priority)
def __repr__(self):
return self.__str__()
class LetsTurnMessage(Message):
pass
class Messenger:
def __init__(self, id_):
self.id = id_
self.handler = {}
self.received_messages = {}
self.handler[ACKMessage] = self.ack_handler
def receive(self, sender, message):
if sender in self.received_messages:
old_message = self.received_messages[sender]
if old_message.priority > message.priority:
self.received_messages[sender] = message
else:
self.received_messages[sender] = message
def handle_messages(self):
if not self.received_messages:
return
for sender, message in list(self.received_messages.items()):
for msg_type in self.handler:
if isinstance(message, msg_type):
self.handler[msg_type](sender)
def send(self, receiver, message):
receiver.receive(self, message)
def ack(self, receiver):
self.send(receiver, ACKMessage(self.id))
self.received_messages.pop(receiver)
def ack_handler(self, sender):
self.received_messages.pop(sender)
class Actuator:
def __init__(self, agent):
self.agent = agent
self.can_end = False
def do(self, source, target):
pass
class DirectWayActuator(Actuator):
def __init__(self, agent):
super().__init__(agent)
self.can_end = False
def do(self, source, target):
if source == target:
self.can_end = True
return True
return self.agent.move_or_send_give_way(source, target, self.agent.id)
class N2Actuator(Actuator):
def __init__(self, agent, target_bis):
super().__init__(agent)
self.target_bis = target_bis
def do(self, source, target):
if source == self.target_bis:
self.can_end = True
return True
return self.agent.move_or_send_give_way(source, self.target_bis, self.agent.id)
class N1Actuator(N2Actuator):
def __init__(self, agent, target_bis, master_id):
super().__init__(agent, target_bis)
self.master_id = master_id
def do(self, source, target):
if source == target:
self.can_end = True
return True
if source == self.target_bis:
return self.agent.move_or_send_lets_turn(target)
return self.agent.move_or_send_give_way(source, self.target_bis, self.master_id)
class InteractiveAgent(DijkstraAgent, Messenger):
def __init__(self, id_, position, target, env):
DijkstraAgent.__init__(self, id_, position, target, env)
Messenger.__init__(self, id_)
self.handler[GiveWayMessage] = self.give_way_handler
self.handler[LetsTurnMessage] = self.lets_turn_handler
self.waiting = False
self.next = 1
self.actuator = self.create_actuator()
def create_actuator(self):
n = self.env.n
target_2d = PosMan.pos_2D(self.id, n)
if (target_2d[0] < n - 2 and target_2d[1] < n - 2) or (target_2d[0] == n-1 and target_2d[1] == n - 2):
return DirectWayActuator(self)
elif target_2d[0] >= n - 2 > target_2d[1]:
if target_2d[0] == n - 2:
target_bis = PosMan.pos_1D(n - 1, (self.id // n), n)
return N2Actuator(self, target_bis)
elif target_2d[0] == n - 1:
target_bis = PosMan.pos_1D(n - 1, (self.id // n) + 1, n)
return N1Actuator(self, target_bis, self.id - 1)
elif target_2d[1] >= n - 2:
if target_2d[1] == n - 2:
self.next = n
target_bis = PosMan.pos_1D((self.id % n), n - 1, n)
return N2Actuator(self, target_bis)
elif target_2d[1] == n - 1:
self.next = -n + 1
target_bis = PosMan.pos_1D((self.id % n) + 1, n - 1, n)
return N1Actuator(self, target_bis, self.id - n)
def receive(self, sender, message):
super().receive(sender, message)
def send(self, receiver, message):
if isinstance(message, GiveWayMessage):
self.waiting = True
super().send(receiver, message)
def move(self, source, target):
did = self.actuator.do(source, target)
if not did:
raise NoPathFoundException
def run(self):
while True:
if self.received_messages:
self.handle_messages()
elif (not (self.stuck or self.waiting or self.end)) and self.env.activeAgent == self.id:
try:
self.move(self.position, self.target)
except NoPathFoundException:
self.stuck = True
if self.actuator.can_end:
if self.position == self.target:
self.end = True
self.stuck = False
if self.env.activeAgent == self.id and not self.waiting:
self.env.activeAgent += self.next
def ack_handler(self, sender):
super().ack_handler(sender)
self.waiting = False
self.stuck = False
def give_way_handler(self, sender):
if self.waiting:
return
message = self.received_messages[sender]
if self.position != message.chain[0] or len(message.chain) < 2:
self.ack(sender)
return
pos = message.chain[1]
if self.env.is_empty(pos):
self.env.next_move(self, pos)
self.ack(sender)
else:
try:
receiver = self.env.agents[pos]
path = message.chain[1:]
self.send(receiver, GiveWayMessage(path, message.priority))
except KeyError:
pass
def lets_turn_handler(self, sender):
if self.waiting:
return
def empty_handler(pos):
return self.env.next_move(self, pos)
def fill_handler(pos):
try:
receiver = self.env.agents[pos]
if isinstance(receiver, Messenger) and receiver not in self.received_messages \
and receiver.id > self.id:
return self.send_give_way(receiver, self.id)
except KeyError:
pass
return False
closers = PosMan.get_closers_neighbors(self.position, self.target, self.env.n, self.env.get_all_neighbors)
old_pos = self.position
self.do_for_empty_or_then_fill(closers, empty_handler, fill_handler)
if old_pos != self.position:
self.ack(sender)
def send_give_way(self, receiver, priority):
def is_servant(pos):
if pos in self.env.agents:
agent = self.env.agents[pos]
return isinstance(agent, Messenger) and agent not in self.received_messages \
and agent.id > priority and agent.id != self.id
else:
return True
def get_neighbors(pos):
return self.env.get_neighbors(pos, is_servant)
closers_empty = PosMan.get_closers_empty(receiver.position, self.env.n, self.env.is_empty)
closer_empty = random.choice(closers_empty)
try:
path = self.dijkstra(receiver.position, closer_empty, get_neighbors)
self.send(receiver, GiveWayMessage(path, priority))
return True
except NoPathFoundException:
return False
def move_or_send_lets_turn(self, target):
if self.env.is_empty(target):
self.env.next_move(self, target)
return True
try:
receiver = self.env.agents[self.target]
if isinstance(receiver, Messenger) and receiver not in self.received_messages:
self.send(receiver, LetsTurnMessage(self.id))
return True
except KeyError:
pass
return False
def do_for_empty_or_then_fill(self, positions, empty_handler, fill_handler):
empty_positions = []
fill_positions = []
for pos in positions:
if self.env.is_empty(pos):
empty_positions.append(pos)
else:
fill_positions.append(pos)
while empty_positions:
i = random.randint(0, len(empty_positions) - 1)
pos = empty_positions[i]
if empty_handler(pos):
return True
empty_positions.pop(i)
while fill_positions:
i = random.randint(0, len(fill_positions) - 1)
pos = fill_positions[i]
if fill_handler(pos):
return True
fill_positions.pop(i)
return False
def move_or_send_give_way(self, source, target, priority):
def empty_handler(pos):
return self.env.next_move(self, pos)
def fill_handler(pos):
try:
receiver = self.env.agents[pos]
if isinstance(receiver, Messenger) and receiver not in self.received_messages \
and receiver.id > priority and receiver.id != self.id:
return self.send_give_way(receiver, priority)
except KeyError:
pass
return False
closers = PosMan.get_closers_neighbors(source, target, self.env.n, self.env.get_all_neighbors)
return self.do_for_empty_or_then_fill(closers, empty_handler, fill_handler)
| Dramine/TaquinSolver | agents.py | agents.py | py | 13,355 | python | en | code | 0 | github-code | 13 |
25123618181 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 14 22:34:58 2023
@author: Themanwhosoldtheworld
https://leetcode.com/problems/verifying-an-alien-dictionary/
"""
class Solution:
def isAlienSorted( self, words, order):
LexIndex = {}
for i in range(len(order)):
LexIndex[order[i]] = i
for word1, word2 in zip(words, words[1:]):
#check for fullsubstring
if (len(word1) > len(word2) and word1[:len(word2)]==word2):
return False
for c1, c2 in zip(word1, word2):
if LexIndex[c1] < LexIndex[c2]:
break
elif LexIndex[c1] > LexIndex[c2]:
return False
return True | themanwhosoldtheworld7/LeetCode-Python | Alien Dictionary.py | Alien Dictionary.py | py | 795 | python | en | code | 0 | github-code | 13 |
21476561909 | from PyQt5.QtWidgets import QWidget, QApplication, QTimeEdit, QVBoxLayout
import sys
from PyQt5 import QtGui, QtCore
class Window(QWidget):
def __init__(self):
super().__init__()
self.title = "This is first thing"
self.height = 700
self.width = 1100
self.top = 100
self.left = 200
self.iconName = "plioky.ico"
self.init_window()
def init_window(self):
self.setWindowIcon(QtGui.QIcon(self.iconName))
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.my_time()
self.show()
def my_time(self):
vbox = QVBoxLayout()
time = QtCore.QTime()
time.setHMS(13, 15, 20)
time_edit = QTimeEdit()
time_edit.setFont(QtGui.QFont("Sanserif", 30))
time_edit.setTime(time)
vbox.addWidget(time_edit)
self.setLayout(vbox)
if __name__ == "__main__":
myapp = QApplication(sys.argv)
window = Window()
sys.exit(myapp.exec()) | patsonev/PyQt5 | QTimeEdit.py | QTimeEdit.py | py | 1,100 | python | en | code | 0 | github-code | 13 |
16518187129 | #!/usr/local/bin/python
"""Solution to the 05-03-2015 NPR puzzle for pyshortz blog.
@authors: Leiran Biton, John O'Brien
"""
from test.test_getargs2 import Keywords_TestCase
class Solution(object):
"""Solution engine for the week's problem."""
# imports
from nltk.corpus import words, wordnet
# attributes
problem = """Think of a common two word phrase for something
you might see in a kitchen. Reverse the words--that is, put
the second word in front of the first--and you'll name a food,
in one word, that you might prepare in a kitchen.
What is it?
"""
allwords = words.words()
# methods
def __init__(self
,verbose=False
,DEBUG=False
):
self.verbose = verbose
self.DEBUG = DEBUG
if self.verbose:
print(self.problem)
def word_check(self
,word
,larger_word
):
"""checks whether the leftover portion of a word after a subword is removed is a word.
args:
word, larger_word"""
word_index = larger_word.find(word)
word_len = len(word)
leftover_word = larger_word[:word_index] + larger_word[word_index + word_len:]
if leftover_word in self.allwords and len(leftover_word) > 2:
syns = self.wordnet.synsets(leftover_word)
for syn in syns:
if syn.name().split(".")[0] == leftover_word:
return leftover_word
return None
def word_switch(self
,phrase
):
"""Returns a string with the words reversed and spaces removed.
args:
phrase - a phrase of any length"""
return "".join(reversed(phrase.split()))
def populate_kitchen_items(self
,keywords=[]
):
"""method to run nltk and match against a set of user-supplied keywords
adds a list of words that contain one of the keywords in their definition
to the parent object."""
self.kitchen_items = []
if self.verbose:
print("populating kitchen_items list using nltk...")
for word in self.allwords:
synset = self.wordnet.synsets(word)
for syn in synset:
if any([kw.lower() in syn.definition().lower() for kw in keywords]):
self.kitchen_items.append(syn.name().split(".")[0].lower())
if self.verbose: print(".", end="", flush=True)
if self.verbose:
print("\n...added %i words to kitchen_items" % len(self.kitchen_items))
# remove duplicates
self.kitchen_items = list(set(self.kitchen_items))
def populate_food(self, keywords):
"""method to run nltk and match against a set of user-supplied keywords
adds a list of words that contain one of the keywords in their definition
to the parent object."""
self.food = []
if self.verbose:
print("populating food list using nltk...")
for word in self.allwords:
synset = self.wordnet.synsets(word)
for syn in synset:
if any([kw.lower() in syn.definition().lower() for kw in keywords]):
self.food.append(syn.name().split(".")[0].lower())
if self.verbose: print(".", end="", flush=True)
if self.verbose:
print("\n...added %i words to food" % len(self.food))
# remove duplicates
self.food = list(set(self.food))
def build_candidates(self):
"compare food list against kitchen list"
self.candidates = set()
for k_item in self.kitchen_items:
for food in self.food:
if (food.startswith(k_item) or food.endswith(k_item)) \
and k_item != food:
other_word = self.word_check(k_item, food)
if other_word:
self.candidates.add((k_item, other_word, food))
if self.verbose: print("found %i possible partial solutions" % len(self.candidates))
if __name__ == "__main__":
k_kws = ["kitchen"
,"utensil"
,"cabinet"
,"knife"
,"cupboard"
,"cutlery"
,"flatware"
,"silverware"
,"chef"
,"appliance"
,"implement"
,"dish"
,"dishes"
,"fixture"
,"cupboard"
,"food"
]
f_kws = ["food"
,"meat"
,"cheese"
,"bread"
,"vegetable"
,"fruit"
,"dish"
,"recipe"
,"meal"
,"breakfast"
,"lunch"
,"sandwich"
,"dinner"
,"loaf"
,"croquette"
,"bean"
,"beans"
,"appetizer"
,"dessert"
]
s = Solution(DEBUG=True, verbose=True)
s.populate_kitchen_items(k_kws)
s.populate_food(f_kws)
s.build_candidates()
for k_item, other_word, food in s.candidates:
print("{0} + {1} -> {2}".format(k_item, other_word, food))
# found 18 possible partial solutions (pan fish)
# found 105 possible partial solutions <-- removed 1-2 letter words
# found 86 possible partial solutions
# cook + book -> cookbook
# bean + feast -> beanfeast
# bread + stuff -> breadstuff
# spice + all -> allspice
# fish + wolf -> wolffish
# pick + tooth -> toothpick
# pie + pot -> potpie
# meal + time -> mealtime
# bread + basket -> breadbasket
# fish + dollar -> dollarfish
# ling + dump -> dumpling
# meat + horse -> horsemeat
# meat + less -> meatless
# fast + break -> breakfast
# ware + oven -> ovenware
# board + cheese -> cheeseboard
# board + bread -> breadboard
# oven + ware -> ovenware
# washer + dish -> dishwasher
# sweet + sop -> sweetsop
# dish + pan -> dishpan
# pan + dish -> dishpan
# sago + sap -> sapsago
# fish + pork -> porkfish
# oat + meal -> oatmeal
# frog + bit -> frogbit
# fish + cat -> catfish
# black + berry -> blackberry
# stick + chop -> chopstick
# mess + mate -> messmate
# bread + fruit -> breadfruit
# grocer + green -> greengrocer
# bean + stalk -> beanstalk
# cheese + cake -> cheesecake
# sop + sweet -> sweetsop
# spoon + dessert -> dessertspoon
# mars + ala -> marsala
# ware + table -> tableware
# egg + cup -> eggcup
# milk + shake -> milkshake
# mince + meat -> mincemeat
# fish + sword -> swordfish
# dishonor + able -> dishonorable
# dish + washer -> dishwasher
# ware + dinner -> dinnerware
# ware + china -> chinaware
# dish + rag -> dishrag
# sop + sour -> soursop
# pan + try -> pantry
# fish + shell -> shellfish
# serve + con -> conserve
# egg + plant -> eggplant
# eat + age -> eatage
# pan + fish -> panfish
# fish + weak -> weakfish
# fish + dolphin -> dolphinfish
# fish + spade -> spadefish
# spoon + table -> tablespoon
# meat + mince -> mincemeat
# bird + oil -> oilbird
# can + ape -> canape
# pea + cow -> cowpea
# cheese + board -> cheeseboard
# fish + tile -> tilefish
# fish + rock -> rockfish
# dish + water -> dishwater
# fish + blue -> bluefish
# fish + white -> whitefish
# bread + board -> breadboard
# ling + green -> greenling
# digest + ion -> digestion
# copper + head -> copperhead
# cheese + head -> headcheese
# fish + pan -> panfish
# fish + king -> kingfish
# oat + cake -> oatcake
# black + thorn -> blackthorn
# cook + out -> cookout
# bean + bag -> beanbag
# pea + cock -> peacock
# seed + moon -> moonseed
# pen + elope -> penelope
# sup + ping -> supping
# fish + butter -> butterfish
# meal + oat -> oatmeal
# dessert + spoon -> dessertspoon
# of these 18 partial solutions, "panfish" seems promising.
# fish pan --> panfish is a legitimate solution.
# not sure where to go from here. | johnobrien/pyshortz | solutions/20150503/kitchen.py | kitchen.py | py | 7,960 | python | en | code | 0 | github-code | 13 |
17055595524 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class ManjiangTestttttt(object):
def __init__(self):
self._oi = None
@property
def oi(self):
return self._oi
@oi.setter
def oi(self, value):
self._oi = value
def to_alipay_dict(self):
params = dict()
if self.oi:
if hasattr(self.oi, 'to_alipay_dict'):
params['oi'] = self.oi.to_alipay_dict()
else:
params['oi'] = self.oi
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ManjiangTestttttt()
if 'oi' in d:
o.oi = d['oi']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/ManjiangTestttttt.py | ManjiangTestttttt.py | py | 771 | python | en | code | 241 | github-code | 13 |
2424123313 | import os
import json
def load_json_file(json_file):
if not os.path.exists(json_file):
raise FileNotFoundError()
with open(json_file, 'r') as f:
json_data = json.load(f)
return json_data
def dump_json_data(json_data, out_path):
os.makedirs(os.path.dirname(out_path), exist_ok=True)
with open(out_path, 'w', encoding='utf-8') as f:
json.dump(json_data, f, ensure_ascii=False, indent=4)
| jadehh/MagicONNX | magiconnx/utils/io.py | io.py | py | 435 | python | en | code | 0 | github-code | 13 |
71548033618 | from pathlib import Path
import glob
def replace(fn):
with Path(fn).open('r') as f:
lines = f.readlines()
new_lines = []
is_start = True
no_lang = False
for i, l in enumerate(lines):
if '```' in l:
if is_start:
try:
lang = l.split('```')[-1].split('\n')[0].replace(' ', '')
except:
lang = ''
if lang:
print('replacing language {} at line {} in file {}'.format(
lang, i, fn
))
new_lines += ['{% highlight ' + lang + ' %}\n']
else:
new_lines += l
no_lang = True
is_start = False
else:
try:
lang = l.split('```')[-1].split('\n')[0].replace(' ', '')
except:
lang = ''
if lang:
raise ValueError('Error. closing ``` might be missing in file', fn)
if no_lang:
new_lines += l
no_lang = False
else:
new_lines += ['{% endhighlight %}\n']
is_start = True
else:
new_lines += [l]
with Path(fn).open('w') as f:
f.writelines(new_lines)
if __name__ == '__main__':
print('Replacing ``` by highlight tags if a language is specified')
path = Path().absolute()
files = list(glob.iglob(str(path) + '/**/*.md', recursive=True))
for filename in files:
replace(filename)
| vict0rsch/_vict0rsch.github.io | blockquotes_to_highlight.py | blockquotes_to_highlight.py | py | 1,628 | python | en | code | 0 | github-code | 13 |
25146278846 |
# 让我测试下你工作正不正常
import sys
from cvmaj import MajInfo
def testAI(moduleName):
m = __import__(moduleName, fromlist=[''])
if (not('discard' in dir(m) and 'action' in dir(m))):
raise Exception('missing implementation')
info = MajInfo()
info.hand = ['8p', '3m', '4m', '5m', '7m', '8m', '5s', '6s', '6s', '6s', '7s', '8s', '8s', '5p']
info.discard = ['2m', '3m']
discardIndex = m.discard(info)
print('discard %d' % discardIndex)
if (len(sys.argv) > 1):
testAI('ai.' + sys.argv[1])
| xdedss/cvmaj | cvmaj_check.py | cvmaj_check.py | py | 555 | python | en | code | 3 | github-code | 13 |
26041481256 | from typing import Dict
from typing import List
from typing import Optional
from typing import Union
import nucleus7 as nc7
import tensorflow as tf
from ncgenes7.data_fields.images import ImageDataFields
from ncgenes7.data_fields.object_detection import DetectionDataFields
from ncgenes7.data_fields.object_detection import ObjectDataFields
from ncgenes7.utils import object_detection_utils as od_utils
from ncgenes7.utils.general_utils import broadcast_with_expand_to
from ncgenes7.utils.general_utils import check_if_dynamic_keys_flat
class DetectionsPostprocessor(nc7.model.ModelPostProcessor):
"""
Simply forwards inputs detections to outputs and mask with zeros all
detections outside of num_object_detections and add 1 to classes
if needed
It also has a dynamic keys option, so all the additional keys that will
be provided to its postprocessor must have first dimension after batch
to be also object dimension, e.g. `[bs, max_num_detections, *]`.
These additional keys are not allowed to be nested, so only 1 level dicts
are allowed.
Parameters
----------
offset_detection_classes
if the classes of detection should be increased by 1, e.g. should be
set to True for second stage faster rcnn, as detection_classes there
are 0-based
Attributes
----------
incoming_keys
* detection_object_boxes : detection boxes in
format [ymin, xmin, ymax, xmax], [bs, max_num_detections, 4], float32
* detection_object_scores : detection scores, [bs, max_num_detections],
float32
* detection_object_classes : (optional) detection classes;
if not defined assumed to be 1 on all active detections,
[bs, max_num_detections], int32
* num_object_detections : number of detections, [bs], int32
* detection_object_instance_ids : (optional) detection instance ids;
if not defined assumed to be 0 on all active detections,
[bs, max_num_detections], int32
generated_keys
* detection_object_boxes : detection boxes in
format [ymin, xmin, ymax, xmax], [bs, max_num_detections, 4], float32
* detection_object_scores : detection scores ,[bs, max_num_detections],
float32,
* detection_object_classes : detection classes,
[bs, max_num_detections], int32
* num_object_detections : number of detections, [bs], int32
* detection_object_instance_ids : (optional) detection instance ids;
if not defined assumed to be 0 on all active detections,
[bs, max_num_detections], int32
"""
dynamic_incoming_keys = True
dynamic_generated_keys = True
incoming_keys = [
DetectionDataFields.detection_object_boxes,
DetectionDataFields.detection_object_scores,
DetectionDataFields.num_object_detections,
"_" + DetectionDataFields.detection_object_classes,
"_" + DetectionDataFields.detection_object_instance_ids,
]
generated_keys = [
DetectionDataFields.detection_object_boxes,
DetectionDataFields.detection_object_scores,
DetectionDataFields.num_object_detections,
DetectionDataFields.detection_object_classes,
DetectionDataFields.detection_object_instance_ids,
]
def __init__(self, *,
offset_detection_classes=False,
**postprocessor_kwargs):
super().__init__(**postprocessor_kwargs)
self.offset_detection_classes = offset_detection_classes
@check_if_dynamic_keys_flat
def process(self, *,
detection_object_boxes,
detection_object_scores,
num_object_detections,
detection_object_classes=None,
detection_object_instance_ids=None,
**dynamic_inputs):
# pylint: disable=arguments-differ
# base class has more generic signature
offset_classes = self.offset_detection_classes
if detection_object_classes is None:
offset_classes = True
detection_object_classes = _maybe_init_classes(
detection_object_classes, detection_object_scores)
detection_object_instance_ids = _maybe_init_instance_ids(
detection_object_classes, detection_object_instance_ids)
(detection_object_boxes, num_object_detections,
detection_object_scores, detection_object_classes,
detection_object_instance_ids, dynamic_outputs
) = od_utils.zero_out_invalid_pad_objects(
detection_object_boxes,
detection_object_scores,
detection_object_classes,
detection_object_instance_ids,
**dynamic_inputs,
num_objects=num_object_detections)
if offset_classes:
detection_object_classes = od_utils.offset_object_classes(
detection_object_classes, num_objects=num_object_detections
)
result = {
DetectionDataFields.detection_object_boxes: detection_object_boxes,
DetectionDataFields.detection_object_scores:
detection_object_scores,
DetectionDataFields.num_object_detections: num_object_detections,
DetectionDataFields.detection_object_classes:
detection_object_classes,
DetectionDataFields.detection_object_instance_ids:
detection_object_instance_ids,
}
result.update(dynamic_outputs)
return result
# pylint: disable=too-many-instance-attributes
# attributes cannot be combined or extracted further
class DetectionsFilterPostprocessor(nc7.model.ModelPostProcessor):
"""
Postprocessor which can filter detections according to scores, min and
max width
All of the parameters can be controlled during inference too
It also has a dynamic keys option, so all the additional keys that will
be provided to its postprocessor must have first dimension after batch
to be also object dimension, e.g. `[bs, max_num_detections, *]`.
These additional keys are not allowed to be nested, so only 1 level dicts
are allowed.
Parameters
----------
num_classes
number of classes; if not provided, assumes that only 1 class
min_object_width
objects with width less than this value will be removed;
can be classwise e.g. list of values; also during inference, it is
possible to provide either 1 value for all classes or list of values
for each class
max_object_width
objects with width greater than this value will be removed;
can be classwise e.g. list of values; also during inference, it is
possible to provide either 1 value for all classes or list of values
for each class
min_object_height
objects with height less than this value will be removed;
can be classwise e.g. list of values; also during inference, it is
possible to provide either 1 value for all classes or list of values
for each class
max_object_height
objects with height greater than this value will be removed;
can be classwise e.g. list of values; also during inference, it is
possible to provide either 1 value for all classes or list of values
for each class
score_threshold
objects with score less than this value will be removed;
can be classwise e.g. list of values; also during inference, it is
possible to provide either 1 value for all classes or list of values
for each class
reorder_filtered
if the objects should be reordered after filtering out, e.g. 0 objects
will be treated as paddings and set to the end of object lists and also
the number of objects will be recalculated. If not specified, then just
all the inputs which filtered out are zeroed.
classes_to_select
which classes to select; if specified, then the classes only with
this ids will be selected
Attributes
----------
incoming_keys
* detection_object_boxes : detection boxes in
format [ymin, xmin, ymax, xmax], [bs, max_num_detections, 4], float32
* detection_object_scores : detection scores, [bs, max_num_detections],
float32
* detection_object_classes : (optional) detection classes;
if not defined assumed to be 1 on all active detections,
[bs, max_num_detections], int32
* num_object_detections : number of detections, [bs], int32
* detection_object_instance_ids : (optional) detection instance ids;
if not defined assumed to be 0 on all active detections,
[bs, max_num_detections], int32
generated_keys
* detection_object_boxes : detection boxes in
format [ymin, xmin, ymax, xmax], [bs, max_num_detections, 4], float32
* detection_object_scores : detection scores ,[bs, max_num_detections],
float32,
* detection_object_classes : detection classes,
[bs, max_num_detections], int32
* num_object_detections : number of detections, [bs], int32
* detection_object_instance_ids : (optional) detection instance ids;
if not defined assumed to be 0 on all active detections,
[bs, max_num_detections], int32
"""
dynamic_incoming_keys = True
dynamic_generated_keys = True
incoming_keys = [
DetectionDataFields.detection_object_boxes,
DetectionDataFields.detection_object_scores,
DetectionDataFields.num_object_detections,
"_" + DetectionDataFields.detection_object_classes,
"_" + DetectionDataFields.detection_object_instance_ids,
]
generated_keys = [
DetectionDataFields.detection_object_boxes,
DetectionDataFields.detection_object_scores,
DetectionDataFields.num_object_detections,
DetectionDataFields.detection_object_classes,
DetectionDataFields.detection_object_instance_ids,
]
def __init__(self, *,
num_classes: int = 1,
min_object_width: Union[float, List[float]] = 0,
max_object_width: Union[float, List[float]] = 0,
min_object_height: Union[float, List[float]] = 0,
max_object_height: Union[float, List[float]] = 0,
score_threshold: Union[float, List[float]] = 0,
reorder_filtered: bool = True,
classes_to_select: Optional[Union[int, list]] = None,
**postprocessor_kwargs):
super().__init__(**postprocessor_kwargs)
min_object_width = _validate_classwise_parameters(
min_object_width, num_classes, "min_object_width")
max_object_width = _validate_classwise_parameters(
max_object_width, num_classes, "max_object_width")
min_object_height = _validate_classwise_parameters(
min_object_height, num_classes, "min_object_height")
max_object_height = _validate_classwise_parameters(
max_object_height, num_classes, "max_object_height")
score_threshold = _validate_classwise_parameters(
score_threshold, num_classes, "score_threshold")
assert all(each_threshold >= 0 for each_threshold in score_threshold), (
"object score thresholds should be >= 0")
self.num_classes = num_classes
self.min_object_width = min_object_width
self.max_object_width = max_object_width
self.min_object_height = min_object_height
self.max_object_height = max_object_height
self.score_threshold = score_threshold
self.reorder_filtered = reorder_filtered
if isinstance(classes_to_select, int):
classes_to_select = [classes_to_select]
if classes_to_select and (
not all((isinstance(each_class, int) and each_class >= 0
for each_class in classes_to_select))):
msg = ("{}: provided classes_to_select is invalid! "
"It must be either single int or a list of ints "
"(provided: {})").format(self.name, classes_to_select)
raise ValueError(msg)
self.classes_to_select = classes_to_select
@check_if_dynamic_keys_flat
def process(self, *,
detection_object_boxes,
detection_object_scores,
num_object_detections,
detection_object_classes=None,
detection_object_instance_ids=None,
**dynamic_inputs):
# pylint: disable=arguments-differ
# base class has more generic signature
# pylint: disable=too-many-locals
# cannot reduce number of locals without more code complexity
min_object_width = self.add_default_placeholder(
self.min_object_width, "min_object_width", tf.float32,
shape=None, broadcast_shape=[self.num_classes])
max_object_width = self.add_default_placeholder(
self.max_object_width, "max_object_width", tf.float32,
shape=None, broadcast_shape=[self.num_classes])
min_object_height = self.add_default_placeholder(
self.min_object_height, "min_object_height", tf.float32,
shape=None, broadcast_shape=[self.num_classes])
max_object_height = self.add_default_placeholder(
self.max_object_height, "max_object_height", tf.float32,
shape=None, broadcast_shape=[self.num_classes])
score_threshold = self.add_default_placeholder(
self.score_threshold, "score_threshold", tf.float32,
shape=None, broadcast_shape=[self.num_classes])
detection_object_classes = _maybe_init_classes(
detection_object_classes, detection_object_scores)
detection_object_instance_ids = _maybe_init_instance_ids(
detection_object_classes, detection_object_instance_ids)
apply_classwise = _parameters_are_classwise(
min_object_width, max_object_width, min_object_height,
max_object_height, score_threshold)
mask_detections = _get_filtered_detections_mask_dynamic(
self.num_classes,
detection_object_boxes, detection_object_scores,
detection_object_classes,
num_object_detections,
min_object_width, max_object_width,
min_object_height, max_object_height, score_threshold,
classwise=apply_classwise
)
if self.classes_to_select:
mask_classes, _ = od_utils.create_classes_mask(
detection_object_classes, self.classes_to_select,
num_objects=num_object_detections)
mask_detections = tf.logical_and(mask_detections, mask_classes)
if self.reorder_filtered:
(detection_object_boxes_filtered, num_object_detections_filtered,
detection_object_classes_filtered,
detection_object_instance_ids_filtered,
detection_object_scores_filtered,
dynamic_inputs) = od_utils.filter_objects_over_mask(
mask_detections,
detection_object_boxes, detection_object_classes,
detection_object_instance_ids, detection_object_scores,
**dynamic_inputs)
else:
num_object_detections_filtered = num_object_detections
detection_object_boxes_filtered = _zero_according_to_object_mask(
mask_detections, detection_object_boxes)
detection_object_scores_filtered = _zero_according_to_object_mask(
mask_detections, detection_object_scores)
detection_object_classes_filtered = _zero_according_to_object_mask(
mask_detections, detection_object_classes)
(detection_object_instance_ids_filtered
) = _zero_according_to_object_mask(mask_detections,
detection_object_instance_ids)
dynamic_inputs = {
k: _zero_according_to_object_mask(mask_detections, v)
for k, v in dynamic_inputs.items()
}
result = {
DetectionDataFields.detection_object_boxes:
detection_object_boxes_filtered,
DetectionDataFields.detection_object_scores:
detection_object_scores_filtered,
DetectionDataFields.num_object_detections:
num_object_detections_filtered,
DetectionDataFields.detection_object_classes:
detection_object_classes_filtered,
DetectionDataFields.detection_object_instance_ids:
detection_object_instance_ids_filtered,
}
result.update(dynamic_inputs)
return result
# pylint: enable=too-many-instance-attributes
class NonMaxSuppressionPostprocessor(nc7.model.ModelPostProcessor):
"""
Perform Non Maximum Suppression on detections.
Will add all parameters like IOU threshold to default placeholders, so
is possible to modify them during inference.
All of the parameters can be controlled during inference too
It also has a dynamic keys option, so all the additional keys that will
be provided to its postprocessor must have first dimension after batch
to be also object dimension, e.g. `[bs, max_num_detections, *]`.
These additional keys are not allowed to be nested, so only 1 level dicts
are allowed.
Parameters
----------
num_classes
number of classes; if not provided, assumes that only 1 class
iou_threshold
intersection over union threshold for suppression;
can be classwise e.g. list of values; also during inference, it is
possible to provide either 1 value for all classes or list of values
for each class
score_threshold
objects score threshold for suppression;
can be classwise e.g. list of values; also during inference, it is
possible to provide either 1 value for all classes or list of values
for each class
max_size_per_class
max number of boxes after nms per class
max_total_size
max number of boxes in total; if not specified, will be used the
parallel_iterations
number of parallel iterations to use to map the nms method over the
batch
Attributes
----------
incoming_keys
* detection_object_boxes : detection boxes in
format [ymin, xmin, ymax, xmax], [bs, max_num_detections, 4], float32
* detection_object_scores : detection scores, [bs, max_num_detections],
float32
* detection_object_classes : (optional) detection classes;
if not defined assumed to be 1 on all active detections,
[bs, max_num_detections], int32
* num_object_detections : number of detections, [bs], int32
* detection_object_instance_ids : (optional) detection instance ids;
if not defined assumed to be 0 on all active detections,
[bs, max_num_detections], int32
generated_keys
* detection_object_boxes : detection boxes in
format [ymin, xmin, ymax, xmax], [bs, max_num_detections, 4], float32
* detection_object_scores : detection scores ,[bs, max_num_detections],
float32,
* detection_object_classes : detection classes, zero-based,
[bs, max_num_detections], int32
* num_object_detections : number of detections, [bs], int32
* detection_object_instance_ids : (optional) detection instance ids;
if not defined assumed to be 0 on all active detections,
[bs, max_num_detections], int32
"""
dynamic_incoming_keys = True
dynamic_generated_keys = True
incoming_keys = [
DetectionDataFields.detection_object_boxes,
DetectionDataFields.detection_object_scores,
DetectionDataFields.num_object_detections,
"_" + DetectionDataFields.detection_object_classes,
"_" + DetectionDataFields.detection_object_instance_ids,
]
generated_keys = [
DetectionDataFields.detection_object_boxes,
DetectionDataFields.detection_object_scores,
DetectionDataFields.num_object_detections,
DetectionDataFields.detection_object_classes,
DetectionDataFields.detection_object_instance_ids,
]
def __init__(self, *,
num_classes=1,
iou_threshold: Union[float, List[float]] = 0.6,
score_threshold: Union[float, List[float]] = 0.0,
max_size_per_class: int = 100,
max_total_size: int = 200,
parallel_iterations: int = 16,
**postprocessor_kwargs):
super().__init__(**postprocessor_kwargs)
iou_threshold = _validate_classwise_parameters(
iou_threshold, num_classes, "iou_threshold")
score_threshold = _validate_classwise_parameters(
score_threshold, num_classes, "score_threshold")
assert all(each_threshold >= 0 for each_threshold in score_threshold), (
"object score thresholds should be >= 0")
assert all(each_threshold >= 0 for each_threshold in iou_threshold), (
"iou thresholds should be >= 0")
self.num_classes = num_classes
self.iou_threshold = iou_threshold
self.score_threshold = score_threshold
self.parallel_iterations = parallel_iterations
self.max_size_per_class = max_size_per_class
self.max_total_size = max_total_size
@check_if_dynamic_keys_flat
def process(self, *,
detection_object_boxes,
detection_object_scores,
num_object_detections,
detection_object_classes=None,
detection_object_instance_ids=None,
**dynamic_inputs):
# pylint: disable=arguments-differ
# base class has more generic signature
# pylint: disable=too-many-locals
# cannot reduce number of locals without more code complexity
iou_threshold = self.add_default_placeholder(
self.iou_threshold, "iou_threshold", tf.float32,
shape=None, broadcast_shape=[self.num_classes])
score_threshold = self.add_default_placeholder(
self.score_threshold, "score_threshold", tf.float32,
shape=None, broadcast_shape=[self.num_classes])
max_size_per_class = self.add_default_placeholder(
self.max_size_per_class, "max_size_per_class", tf.int32)
max_total_size = self.add_default_placeholder(
self.max_total_size, "max_total_size", tf.int32)
detection_object_classes = _maybe_init_classes(
detection_object_classes, detection_object_scores)
detection_object_instance_ids = _maybe_init_instance_ids(
detection_object_classes, detection_object_instance_ids)
# is needed to be sure that detections are zeroed
# according to num_object_detections
(detection_object_boxes, num_object_detections,
detection_object_scores, detection_object_classes,
detection_object_instance_ids, dynamic_inputs
) = od_utils.zero_out_invalid_pad_objects(
detection_object_boxes,
detection_object_scores,
detection_object_classes,
detection_object_instance_ids,
**dynamic_inputs,
num_objects=num_object_detections)
(detection_object_boxes_nms, detection_object_scores_nms,
detection_object_classes_nms, num_object_detections_nms,
additional_fields_nms
) = od_utils.batch_multiclass_non_max_suppression(
object_boxes=detection_object_boxes,
object_scores=detection_object_scores,
object_classes=detection_object_classes,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
num_classes=self.num_classes,
max_size_per_class=max_size_per_class,
max_total_size=max_total_size,
detection_object_instance_ids=detection_object_instance_ids,
**dynamic_inputs)
result = {
DetectionDataFields.detection_object_boxes:
detection_object_boxes_nms,
DetectionDataFields.detection_object_scores:
detection_object_scores_nms,
DetectionDataFields.num_object_detections:
num_object_detections_nms,
DetectionDataFields.detection_object_classes:
detection_object_classes_nms,
}
result.update(additional_fields_nms)
return result
class ConverterToImageFramePostprocessor(nc7.model.ModelPostProcessor):
"""
Convert normalized coordinates in range of [0, 1] to image coordinates with
x in [0, width] and y in [0, height].
Attributes
----------
incoming_keys
* images : images with shape [bs, height, width, num_channels]
* detection_object_boxes : detection boxes in
format [ymin, xmin, ymax, xmax], [bs, max_num_detections, 4], float32
generated_keys
* detection_object_boxes : detection boxes in
format [ymin, xmin, ymax, xmax] with coordinates in image space,
[bs, max_num_detections, 4], float32
"""
incoming_keys = [
ImageDataFields.images,
DetectionDataFields.detection_object_boxes,
]
generated_keys = [
DetectionDataFields.detection_object_boxes,
]
def process(self, *,
images,
detection_object_boxes):
# pylint: disable=arguments-differ
# base class has more generic signature
image_size = [tf.shape(images)[1], tf.shape(images)[2]]
boxes_image_frame = od_utils.local_to_image_coordinates(
detection_object_boxes, image_size=image_size)
result = {
DetectionDataFields.detection_object_boxes: boxes_image_frame}
return result
class ExtractKeypointsFromHeatmaps(nc7.model.ModelPostProcessor):
"""
Extract keypoints from their heatmaps. For it first apply gaussian smoothing
and then select argmax of it
Parameters
----------
smoothing_kernel_size
size of the gaussian smoothing kernel; can be changed during inference
normalize_smoothing_kernel
if the smoothing kernel should be normalized, e.g. kernel sum =1
score_conversion_fn
which function to use to convert the input heatmaps to its scores;
if not specified, linear conversion is used; must be inside of
'tf.nn' namespace
Attributes
----------
incoming_keys
* detection_object_boxes : detection boxes in normalized coordinates,
tf.float32, [bs, num_detections, 4]
* detection_object_keypoints_heatmaps : predicted heatmaps (or logits)
for keypoints; tf.float32,
[bs, num_detections, map_width, map_height, num_keypoints]
generated_keys
* detection_object_keypoints : detection keypoints normalized
to image coordinates in format [y, x]; tf.float32,
[bs, num_detections, num_keypoints, 2]
* detection_object_keypoints_scores :
scores for keypoints, which are taken on heatmaps after gaussian
filter application; so the values may be not normalized;
shape [bs, num_detections, , num_keypoints], tf.float32
"""
incoming_keys = [
DetectionDataFields.detection_object_keypoints_heatmaps,
DetectionDataFields.detection_object_boxes,
]
generated_keys = [
DetectionDataFields.detection_object_keypoints,
DetectionDataFields.detection_object_keypoints_scores,
]
def __init__(self, *,
smoothing_kernel_size: int = 3,
normalize_smoothing_kernel: bool = False,
score_conversion_fn: Optional[str] = None,
**postprocessor_kwargs):
super().__init__(**postprocessor_kwargs)
self.smoothing_kernel_size = smoothing_kernel_size
self.normalize_smoothing_kernel = normalize_smoothing_kernel
self.score_conversion_fn = score_conversion_fn
def process(self, detection_object_keypoints_heatmaps,
detection_object_boxes) -> Dict[str, tf.Tensor]:
# pylint: disable=arguments-differ
# base class has more generic signature
smoothing_kernel_size = self.add_default_placeholder(
self.smoothing_kernel_size, "smoothing_kernel_size", tf.int32)
if self.score_conversion_fn:
score_conversion_fn = getattr(tf.nn, self.score_conversion_fn)
detection_object_keypoints_heatmaps = score_conversion_fn(
detection_object_keypoints_heatmaps)
num_keypoints = detection_object_keypoints_heatmaps.shape.as_list()[-1]
heatmaps_sq = tf.reshape(
detection_object_keypoints_heatmaps,
tf.concat([[-1],
tf.shape(detection_object_keypoints_heatmaps)[2:]], 0))
keypoints_relative_sq, keypoints_scores_sq = (
od_utils.extract_keypoints_from_heatmaps(
heatmaps_sq, smoothing_kernel_size,
self.normalize_smoothing_kernel))
first_dims = tf.shape(detection_object_keypoints_heatmaps)[:2]
keypoints_relative = tf.reshape(
keypoints_relative_sq,
tf.concat([first_dims, [num_keypoints, 2]], 0))
keypoints_scores = tf.reshape(
keypoints_scores_sq, tf.concat([first_dims, [num_keypoints]], 0))
keypoints = od_utils.decode_keypoints_from_boxes(
keypoints_relative, detection_object_boxes)
result = {
DetectionDataFields.detection_object_keypoints: keypoints,
DetectionDataFields.detection_object_keypoints_scores:
keypoints_scores
}
return result
class KeypointsFilterPostprocessor(nc7.model.ModelPostProcessor):
"""
Filter keypoints by their score
Parameters
----------
score_threshold
score threshold for keypoints - all keypoints with score less it will
be set to 0; can be changed during inference
Attributes
----------
incoming_keys
* detection_object_keypoints : detection keypoints normalized
to image coordinates in format [y, x]; tf.float32,
[bs, num_detections, num_keypoints, 2]
* detection_object_keypoints_scores :
scores for keypoints, which are taken on heatmaps after gaussian
filter application; so the values may be not normalized;
shape [bs, num_detections, , num_keypoints], tf.float32
generated_keys
* detection_object_keypoints : detection keypoints normalized
to image coordinates in format [y, x]; tf.float32,
[bs, num_detections, num_keypoints, 2]
* detection_object_keypoints_scores :
scores for keypoints, which are taken on heatmaps after gaussian
filter application; so the values may be not normalized;
shape [bs, num_detections, , num_keypoints], tf.float32
"""
incoming_keys = [
DetectionDataFields.detection_object_keypoints,
DetectionDataFields.detection_object_keypoints_scores,
]
generated_keys = [
DetectionDataFields.detection_object_keypoints,
DetectionDataFields.detection_object_keypoints_scores,
]
def __init__(self, score_threshold: float = 0.1,
**postprocessor_kwargs):
super().__init__(**postprocessor_kwargs)
self.score_threshold = score_threshold
def process(self, detection_object_keypoints,
detection_object_keypoints_scores) -> Dict[str, tf.Tensor]:
# pylint: disable=arguments-differ
# base class has more generic signature
score_threshold = self.add_default_placeholder(
self.score_threshold, "score_threshold", tf.float32)
scores_mask = tf.greater_equal(
detection_object_keypoints_scores,
score_threshold
)
scores_filtered = tf.where(
scores_mask,
detection_object_keypoints_scores,
tf.zeros_like(detection_object_keypoints_scores))
score_mask_keypoints = tf.tile(scores_mask[..., tf.newaxis],
[1, 1, 1, 2])
keypoints_filtered = tf.where(score_mask_keypoints,
detection_object_keypoints,
tf.zeros_like(detection_object_keypoints))
return {
DetectionDataFields.detection_object_keypoints: keypoints_filtered,
DetectionDataFields.detection_object_keypoints_scores:
scores_filtered,
}
class InstanceMasksToImageFrame(nc7.model.ModelPostProcessor):
"""
Postprocessor to reframe the instance masks from boxes frames to image frame
Parameters
----------
binary_threshold
threshold to convert values to uint8; resulted values will be 0 or 1;
can be changed during inference
Attributes
----------
incoming_keys
* images : images; [bs, image_height, image_width, num_channels],
tf.float32
* object_boxes : boxes in normalized coordinates,
tf.float32, [bs, num_objects, 4]
* object_instance_masks : instance masks to reframe;
[bs, num_objects, mask_width, mask_height, {num_channels}];
last dimension is optional; if it exists, then first the sum over
all the channels will be taken and then this single channeled mask
will be reframed
generated_keys
* object_instance_masks_on_image : instance masks reframed to image;
[bs, num_objects, image_height, image_width]; tf.uint8
"""
incoming_keys = [
ImageDataFields.images,
ObjectDataFields.object_boxes,
ObjectDataFields.object_instance_masks,
]
generated_keys = [
ObjectDataFields.object_instance_masks_on_image,
]
def __init__(self, *,
binary_threshold=0.5,
**postprocessor_kwargs):
super().__init__(**postprocessor_kwargs)
self.binary_threshold = binary_threshold
def process(self, images: tf.Tensor,
object_boxes: tf.Tensor,
object_instance_masks: tf.Tensor
) -> Dict[str, tf.Tensor]:
# pylint: disable=arguments-differ
# base class has more generic signature
binary_threshold = self.add_default_placeholder(
self.binary_threshold, "binary_threshold", tf.float32)
if len(object_instance_masks.shape) == 5:
if object_instance_masks.shape.as_list()[-1] > 1:
object_instance_masks = tf.reduce_sum(object_instance_masks, -1)
else:
object_instance_masks = tf.squeeze(object_instance_masks, -1)
image_sizes = od_utils.get_true_image_shapes(
images)[:, :-1]
masks_on_image = od_utils.decode_instance_masks_to_image(
object_instance_masks, object_boxes, image_sizes)
masks_on_image_binary = tf.cast(tf.greater(
masks_on_image, binary_threshold), tf.uint8)
return {
ObjectDataFields.object_instance_masks_on_image:
masks_on_image_binary
}
class KeypointsConverterToImageFramePostprocessor(nc7.model.ModelPostProcessor):
"""
Convert normalized keypoints in range of [0, 1] to image coordinates with
x in [0, width] and y in [0, height].
Attributes
----------
incoming_keys
* images : images with shape [bs, height, width, num_channels]
* detection_object_keypoints : detection keypoints in
format [y, x], [bs, max_num_detections, num_keypoints, 4], float32
generated_keys
* detection_object_keypoints : detection keypoints with coordinates
in image frame in format [y, x],
[bs, max_num_detections, num_keypoints, 4], float32
"""
incoming_keys = [
ImageDataFields.images,
DetectionDataFields.detection_object_keypoints,
]
generated_keys = [
DetectionDataFields.detection_object_keypoints,
]
def process(self, *,
images,
detection_object_keypoints):
# pylint: disable=arguments-differ
# base class has more generic signature
image_size = [tf.shape(images)[1], tf.shape(images)[2]]
keypoints_image_frame = (
od_utils.keypoints_local_to_image_coordinates(
detection_object_keypoints, image_size=image_size))
result = {
DetectionDataFields.detection_object_keypoints:
keypoints_image_frame
}
return result
class ObjectClassesCombiner(nc7.model.ModelPostProcessor):
"""
Postprocessor that allows to accumulate different class ids
new class id is calculated as a flatten index of the multi dimensional array
of shape [..., num_classes3, num_classes2, num_classes1], e.g. works similar
to `numpy.ravel_multi_index` but on the transposed shape and reversed index.
This is done due to the logic to have the class components in the
priority order, e.g. first num_classes indices must be for class component
1 and 0 for all other components.
Parameters
----------
num_classes_to_combine
list of dicts with mapping from input key to number of classes for that
key, e.g. [{"key1": 10}, {"key2": 2}, {"key3": 2}]; number of classes
must include also a background, e.g. 0 class;
mask_according_to_first_key
if the first key should serve also as a mask, e.g. if it is 0, then
result will be also 0 disregard of other classes
"""
generated_keys = [
ObjectDataFields.object_classes,
]
dynamic_incoming_keys = True
def __init__(self, *,
num_classes_to_combine: List[dict],
mask_according_to_first_key: bool = True,
**postprocessor_kwargs):
super().__init__(**postprocessor_kwargs)
self.num_classes_to_combine = num_classes_to_combine
self.mask_according_to_first_key = mask_according_to_first_key
def process(self, **inputs) -> Dict[str, tf.Tensor]:
# pylint: disable=arguments-differ
# base class has more generic signature
class_ids_in_order = [
inputs[list(each_item.keys())[0]]
for each_item in self.num_classes_to_combine]
num_classes = [list(each_item.values())[0]
for each_item in self.num_classes_to_combine]
index_multiplier = tf.cast(
tf.cumprod([1] + num_classes[:-1]), tf.int32)
new_object_classes = tf.reduce_sum(
tf.stack(class_ids_in_order, 0)
* index_multiplier[:, tf.newaxis, tf.newaxis],
0)
if self.mask_according_to_first_key:
new_object_classes = tf.where(tf.greater(class_ids_in_order[0], 0),
new_object_classes,
tf.zeros_like(new_object_classes))
return {
ObjectDataFields.object_classes: new_object_classes
}
def _get_filtered_detections_mask_dynamic(num_classes,
detection_object_boxes,
detection_object_scores,
detection_object_classes,
num_object_detections,
min_object_width,
max_object_width, min_object_height,
max_object_height, score_threshold,
classwise: tf.Tensor):
# pylint: disable=too-many-arguments
# all the arguments are needed to perform filter operation
mask_detections = tf.cond(
classwise,
lambda: _get_filtered_detections_mask(
num_classes,
detection_object_boxes, detection_object_scores,
detection_object_classes,
num_object_detections,
min_object_width, max_object_width,
min_object_height, max_object_height, score_threshold,
classwise=True),
lambda: _get_filtered_detections_mask(
num_classes,
detection_object_boxes, detection_object_scores,
detection_object_classes,
num_object_detections,
min_object_width, max_object_width,
min_object_height, max_object_height, score_threshold,
classwise=False)
)
return mask_detections
def _get_filtered_detections_mask(num_classes,
detection_object_boxes,
detection_object_scores,
detection_object_classes,
num_object_detections,
min_object_width,
max_object_width, min_object_height,
max_object_height, score_threshold,
classwise: bool = False):
# pylint: disable=too-many-arguments,too-many-locals
# all the arguments are needed to perform filter operation
if not classwise:
num_classes = 1
masks_detections_per_classes = [
_get_filtered_detections_mask_single_class(
class_index, detection_object_boxes, detection_object_scores,
min_object_width, max_object_width, min_object_height,
max_object_height, score_threshold
)
for class_index in range(num_classes)
]
if classwise:
mask_classes = [tf.equal(detection_object_classes, class_index + 1)
for class_index in range(num_classes)]
masks_detections_per_classes = [
tf.logical_and(each_classwise_filtered, each_class_mask)
for each_classwise_filtered, each_class_mask in zip(
masks_detections_per_classes, mask_classes)
]
max_num_detections = tf.shape(detection_object_boxes)[-2]
mask_num_detections = od_utils.get_objects_mask_from_num_objects(
max_num_detections, num_object_detections)
masks_detections = tf.reduce_any(
tf.stack(masks_detections_per_classes, -1), -1)
mask_detections = tf.logical_and(
masks_detections, mask_num_detections)
return mask_detections
def _get_filtered_detections_mask_single_class(
class_index,
detection_object_boxes,
detection_object_scores,
min_object_width,
max_object_width, min_object_height,
max_object_height, score_threshold):
# pylint: disable=too-many-arguments
# all the arguments are needed to perform filter operation
mask_scores = tf.greater_equal(
detection_object_scores, score_threshold[class_index])
mask_width = od_utils.get_filter_mask_by_width(
detection_object_boxes,
min_object_width[class_index], max_object_width[class_index])
mask_height = od_utils.get_filter_mask_by_height(
detection_object_boxes,
min_object_height[class_index], max_object_height[class_index])
mask_detections = tf.reduce_all(
tf.stack([mask_scores, mask_width, mask_height], -1), -1)
return mask_detections
def _maybe_init_classes(detection_object_classes, detection_object_scores):
if detection_object_classes is None:
detection_object_classes = tf.cast(
tf.zeros_like(detection_object_scores), tf.int32)
return detection_object_classes
def _maybe_init_instance_ids(detection_object_classes,
detection_object_instance_ids):
if detection_object_instance_ids is None:
detection_object_instance_ids = tf.zeros_like(detection_object_classes)
return detection_object_instance_ids
def _parameters_are_classwise(*parameters):
parameters_are_classwise = tf.logical_not(tf.reduce_all(
tf.stack([
tf.reduce_all(tf.equal(each_item, each_item[0]))
for each_item in parameters
])))
return parameters_are_classwise
def _validate_classwise_parameters(parameter, num_classes, parameter_name):
if not isinstance(parameter, (list, tuple)):
parameter = [parameter]
else:
assert len(parameter) == num_classes, (
"Parameter {} should be of length num_classes, {}, to use "
"classwise! To use it class agnostic, provide only a scalar"
).format(parameter_name, num_classes)
return parameter
def _zero_according_to_object_mask(mask: tf.Tensor, item: tf.Tensor
) -> tf.Tensor:
mask = tf.cast(broadcast_with_expand_to(mask, item), item.dtype)
item_zeroed = mask * item
return item_zeroed
| audi/ncgenes7 | ncgenes7/postprocessors/object_detection.py | object_detection.py | py | 45,248 | python | en | code | 9 | github-code | 13 |
21949012096 | def gnome_sort(arr):
"""Гномья сортировака по возрастанию"""
pointer = 1
i = 1
while i < len(arr):
while arr[i] < arr[i - 1] and i != 0:
arr[i], arr[i - 1] = arr[i - 1], arr[i]
i -= 1
pointer += 1
i = pointer
return arr
| n-inferno/algorithms | sorting/gnome_sort.py | gnome_sort.py | py | 319 | python | en | code | 0 | github-code | 13 |
23744589875 | import urllib.request, urllib.parse, urllib.error
from bs4 import BeautifulSoup
import ssl
import re
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
#Main Program
web = input('Enter - ')
count = input('Enter count: ')
count = int(count)
position = input('Enter position: ')
position = int(position)
def letsGo(url,position):
html = urllib.request.urlopen(url, context=ctx).read()
soup = BeautifulSoup(html, 'html.parser')
# Retrieve all of the anchor tags
tags = soup('a')
temp=tags[position-1].get('href', None)
return temp
if len(web)<1 : web ='http://py4e-data.dr-chuck.net/known_by_Tatiana.html'
for i in range (count):
nextUrl=letsGo(web,position)
web=nextUrl
print(web)
nameResult = re.findall('_([^_]*?)\.html',web)
nameResult = nameResult[0]
print(nameResult)
| criticalhitx/PythonForEverybody_CSR | Course 3/Chap 12/bsexam3.py | bsexam3.py | py | 889 | python | en | code | 0 | github-code | 13 |
18770750089 | # n진수 게임
# N : 튜브의 마지막 회차에 나오는 가장 큰 수의 10진수
# 시간복잡도 : O(N log N)
# n 을 base 진수로 바꾸는 함수
def make_n_base_number(n: int, base: int) -> str:
if n == 0:
return '0'
n_base_number = ''
while n > 0:
n, mod = divmod(n, base)
if mod >= 10:
n_base_number = chr(mod - 10 + ord('A')) + n_base_number
continue
n_base_number = str(mod) + n_base_number
return n_base_number
# 튜브가 마지막으로 말할 때까지의 게임 결과를 반환
def make_max_n_base_lines(n: int, t: int, m: int) -> str:
max_n_base_lines = ''
number = 0
while len(max_n_base_lines) < t * m:
max_n_base_lines += make_n_base_number(number, n)
number += 1
return max_n_base_lines
# 진법 n, 미리 구할 숫자의 갯수 t, 게임에 참가하는 인원 m, 튜브의 순서 p
def solution(n: int, t: int, m: int, p: int) -> str:
answer = ''
p -= 1
max_n_base = make_max_n_base_lines(n, t, m)
for multiply in range(t):
answer += max_n_base[p + multiply * m]
return answer
| galug/2023-algorithm-study | level_2/n_base_game.py | n_base_game.py | py | 1,150 | python | ko | code | null | github-code | 13 |
7393951919 | from email import message
import enum
import discord
from discord.ext import commands
from dotenv import load_dotenv
import os.path
import csv
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
GUILD = os.getenv('DISCORD_GUILD')
client = discord.Client()
member =list ()
def data_base_manager():
file_existence = os.path.exists('data.csv')
if file_existence == False:
f = open('data.csv', 'w')
print("CSV is created")
elif file_existence == True:
print("CSV exists")
with open('data.csv', mode='r') as infile:
reader= csv.reader(infile)
mydict = dict((rows[0],rows[0]) for rows in reader)
for n in list(mydict):
list[n]= mydict.items()
for n in list.size():
print (list[n])
#print(mydict.keys())
for n in mydict:
if n in mydict:
print (n)
data_base_manager()
tuple1 = ("fuck",2)
tuple2 = ("meaw",3)
#Fetch user
@client.event
async def on_message(message):
if message.author == client.user:
return
else:
current_user_id= message.author.id
await message.channel.send('Your uid is ' + str( current_user_id))
if current_user_id in mydict[{}]:
await message.channel.send('current user is registered.')
emojis= ['0️⃣','1️⃣','2️⃣','3️⃣','4️⃣','5️⃣','6️⃣','7️⃣','8️⃣','9️⃣','🔟']
for emoji in emojis:
await message.add_reaction(emoji)
# if '!claim' in message.content.lower():
# for n in 30:
client.run(TOKEN) | wangel1990/BiscuitCasino | ReadWritebot.py | ReadWritebot.py | py | 1,665 | python | en | code | 1 | github-code | 13 |
25451744490 | """
@Project_Description:
Observe HaarCascade Face Recognition Classifier
performance on basic to complex images.
@Author:
Can Ali Ates
"""
# Import Libraries.
import os
import cv2
# Import Classifier.
faceCascade = cv2.CascadeClassifier("Files/face_recognition.xml")
# Create a Directory to Save Results.
if not os.path.isdir("Output"):
os.mkdir("Output")
# Arrange Suffixes to Detect Images.
suffixes = (".png", ".jpg", ".jpeg")
# Iterate Over Media Files.
for file in os.listdir("Media"):
# Check Image File.
if file.endswith(suffixes):
# Read Image.
image = cv2.imread(f"Media/{file}")
# Convert Image to Gray Scale.
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Arrange HaarCascade Face Classifier Parameters.
faces = faceCascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30), flags=cv2.CASCADE_SCALE_IMAGE)
# Print Count of Detected Faces.
image = cv2.putText(image, f"Face Count: {len(faces)}", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2, cv2.LINE_AA)
# Draw Rectangle Around Face(s).
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
# Show Face Detected Images.
cv2.imshow("Face Recognition", image)
# Save Result.
cv2.imwrite(f"Output/{file}", image)
# Arrange Image Duration to 1 second.
cv2.waitKey(1500)
# Close All Windows.
cv2.waitKey(0)
cv2.destroyAllWindows()
| canatess/basic_image_processing_projects | Face Detection on Image/main.py | main.py | py | 1,535 | python | en | code | 0 | github-code | 13 |
33759804001 | import aipy as a, numpy as n
def sim_src_Jy(fq_GHz, amp_Jy_150=.1, index=-1., dly_ns=100., phs_offset=1.):
spec = amp_Jy_150 * (fq_GHz / .150)**index
phs = phs_offset * n.exp(-2j*n.pi * fq_GHz.astype(n.complex64) * dly_ns)
return spec * phs
def sim_srcs(aa, pol, cnt_100Jy_per_sr_Jy=.1, cnt_index=-2., avg_index=-1.,
std_index=.25, lo_cutoff_Jy=.3, hi_cutoff_Jy=100., bl_ns=100.,
std_ionref_arcmin=1.):
fq_GHz = aa.get_afreqs()
bins = 10**n.arange(n.log10(hi_cutoff_Jy), n.log10(lo_cutoff_Jy), -.3)
spec = n.zeros_like(fq_GHz, dtype=n.complex64)
umag = bl_ns * fq_GHz
for i, flx in enumerate(bins[:-1]):
flx_interval = bins[i] - bins[i+1]
flx += flx_interval/2
sr = 2*n.pi
cnt = int(n.around(cnt_100Jy_per_sr_Jy * flx_interval * sr * (flx / 100.)**cnt_index))
# I'm ignoring beam weighting on sky
for j in xrange(cnt):
index = avg_index + n.random.normal(scale=std_index)
while True:
x = n.sin(n.random.uniform(-n.pi/2, n.pi/2))
y = n.sin(n.random.uniform(-n.pi/2, n.pi/2))
z = x**2 + y**2
if z <= 1:
z = n.sqrt(1-z)
break
bm_resp = aa[0].bm_response((x,y,z), pol=pol[0]) * \
n.conj(aa[0].bm_response((x,y,z), pol=pol[1]))
bm_resp = bm_resp.flatten()
#bm_resp = n.exp(-(x**2+y**2) / (2*20*a.ephem.degree)**2)
dly = x * bl_ns
dth = n.random.normal(scale=std_ionref_arcmin) * a.ephem.arcminute
dw = dth * umag * (fq_GHz / .150)**-2
o = n.random.uniform()
phs = n.exp(-2j*n.pi*(dw + o))
spec += bm_resp * sim_src_Jy(fq_GHz, flx, index=index, dly_ns=dly, phs_offset=phs)
return spec
def sim_sync(fq_GHz, hmap, aa, bl_ns=100., mfreq=.408, pol=None, jd=2455746.5):
BL_SCALAR = 4.4
SDF0 = fq_GHz[1] - fq_GHz[0]
NCHAN = fq_GHz.size * 16
MFREQ = n.average(fq_GHz)
px = n.arange(hmap.npix())
px_area = 4*n.pi / hmap.npix()
if True:
tx,ty,tz = hmap.px2crd(px)
valid = n.where(tz > 0)
tx,ty,tz = tx[valid], ty[valid], tz[valid]
else:
im = a.img.Img(size=400, res=.4)
tx,ty,tz = im.get_top(center=(500,500))
tx,ty,tz = tx.filled(0).flatten(), ty.filled(0).flatten(), tz.filled(-1).flatten()
top = n.array([tx, ty, tz])
if pol != None:
afreqs = aa.get_afreqs()
ch = n.argmin(n.abs(afreqs - MFREQ))
aa.select_chans(n.array([ch]))
p1,p2 = pol
bm_resp = aa[0].bm_response((tx,ty,tz), pol=p1) * \
n.conj(aa[0].bm_response((tx,ty,tz), pol=p2))
bm_resp = bm_resp.flatten()
aa.select_chans()
else: bm_resp = 1
bx,by,bz = 0.,0.,0.
try: bx,by,bz = bl_ns
except(ValueError):
try: bx,by = bl_ns
except(ValueError): bx = bl_ns
BL_LEN = n.sqrt(bx**2 + by**2 + bz**2) * BL_SCALAR
SDF = 2**n.floor(n.log2(1 / BL_LEN / SDF0)) * SDF0
BW = NCHAN * SDF
bins = n.fft.fftfreq(NCHAN, SDF)
#bins = n.concatenate([bins[bins.size/2:], bins[:bins.size/2]])
dbin = bins[1] - bins[0]
tau = bx*tx + by*ty + bz*tz
taubin = n.around(tau / dbin).astype(n.int)
taures = tau - taubin * dbin
phs_res = n.exp(-2j*n.pi*taures.astype(n.complex) * MFREQ)
taubin.shape += (1,)
aa.set_jultime(jd)
# Precessing
m_precess = a.coord.convert_m('eq','eq', oepoch=aa.epoch)
m = n.linalg.inv(n.dot(aa.eq2top_m, m_precess)) # takes top to J2000
ex,ey,ez = n.dot(m, top)
d = hmap[ex,ey,ez] * bm_resp
if False:
import pylab as p
d.shape = (1000,1000)
p.imshow(n.abs(d))
p.colorbar()
p.show()
# Misc preparations
freq = n.fft.fftfreq(bins.size, dbin)
freq = n.where(freq < 0, freq + BW, freq)
while n.all(freq < MFREQ): freq += BW
sdf = freq[1] - freq[0]
window1 = n.where(freq + sdf/2 >= .05, 1., 0) * n.where(freq + sdf/2 < .25, 1., 0)
freq_pad = n.arange(.05,.25, SDF0)
sdf = freq_pad[1] - freq_pad[0]
window2 = n.where(freq_pad + sdf/2 >= .1, 1., 0) * n.where(freq_pad + sdf/2 < .2, 1., 0)
# Sub-delay bin phasing
d_bl = d * phs_res
# Binning
hist = n.zeros(bins.size, dtype=n.complex)
a.utils.add2array(hist, taubin, d_bl)
# Computing coarse spectrum
#hist = n.concatenate([hist[hist.size/2:], hist[:hist.size/2]])
spec = n.fft.fft(hist)
spec = spec.compress(window1)
freq = freq.compress(window1)
if False:
import pylab as p
p.subplot(121)
p.plot(bins, hist)
p.subplot(122)
p.plot(freq, spec.real)
#p.show()
# Computing fine spectrum
# window is necessary here to prevent high-freq band edges from coupling
# into delay spectrum model, and then back out into the final spectrum.
w = a.dsp.gen_window(spec.size, window='blackman-harris')
dspec = n.fft.ifft(spec*w)
dspec, info = a.deconv.clean(dspec, n.fft.ifft(w), tol=1e-9, stop_if_div=False)
dspec_pad = n.zeros(2*fq_GHz.size, dtype=n.complex)
dspec_pad[:dspec.size/2] = dspec[:dspec.size/2]
dspec_pad[-dspec.size/2+1:] = dspec[-dspec.size/2+1:]
spec_pad = n.fft.fft(dspec_pad)
spec_pad = spec_pad.compress(window2)
if False:
import pylab as p
p.subplot(121)
p.plot(n.fft.fftfreq(freq.size, freq[1]-freq[0]), n.abs(dspec))
p.subplot(122)
p.plot(freq_pad.compress(window2), spec_pad.real)
p.subplot(121)
ddspec = n.fft.ifft(spec_pad)
freq_pad = freq_pad.compress(window2)
bins = n.fft.fftfreq(freq_pad.size, freq_pad[1]-freq_pad[0])
p.plot(bins, n.abs(ddspec))
p.show()
# Apply synchrotron spectral index
spec_pad *= (fq_GHz / mfreq)**-2.5
#spec_pad *= (.150 / mfreq)**-2.5
# Convert to Jy
spec_pad *= 2 * a.const.k / (a.const.c/fq_GHz/1e9)**2 * px_area / 1e-23
#spec_pad *= 2 * a.const.k / 200.**2 * px_area / 1e-23
return spec_pad
| HERA-Team/hera_sandbox | src/fg_sim.py | fg_sim.py | py | 6,084 | python | en | code | 1 | github-code | 13 |
43356981667 | from collections import deque
def solution(queue1, queue2):
answer = 0
goal = sum(queue1) + sum(queue2)
len_queue = len(queue1)
queue1, queue2 = deque(queue1), deque(queue2)
sum1, sum2 = sum(queue1), sum(queue2)
if goal % 2 == 1:
return -1
while sum1 != sum2:
answer += 1
if sum1 > sum2:
tmp = queue1.popleft()
queue2.append(tmp)
sum1 -= tmp
sum2 += tmp
else:
tmp = queue2.popleft()
queue1.append(tmp)
sum1 += tmp
sum2 -= tmp
if answer > len_queue * 4:
answer = -1
break
return answer
| tr0up2r/coding-test | website/programmers/level2/187_same_sum_queues.py | 187_same_sum_queues.py | py | 684 | python | en | code | 0 | github-code | 13 |
30920818352 | from flask import Flask, Response
from flask import jsonify
from flask import make_response
from flask import request
app = Flask(__name__)
@app.route("/", methods=['GET', 'POST'])
def index():
if request.method == 'GET':
return Response("TastySpace is up and running!"), 200
else:
req = request.get_json(force=True)
#intent_name = req.get('queryResult').get('intent').get('displayName')
source = req.get('originalDetectIntentRequest').get('source')
#time = req.get('queryResult').get('parameters').get('time')
time = req.get('queryResult').get('outputContexts')[0].get('parameters').get('time.original')
print(req)
response = {
"fulfillmentText": "You choose " + time + ". No problem!",
}
if source == 'line' or source=='facebook':
return make_response(jsonify(response))
else:
return make_response(jsonify({
"fulfillmentText": "Sorry: this channel will not process the current intent",
}))
if __name__ == "__main__":
app.run(debug=True)
| amonmoce/TastySpace | __init__.py | __init__.py | py | 1,117 | python | en | code | 0 | github-code | 13 |
70977529617 | import shioaji as sj
import pandas as pd
import numpy as np
from datetime import datetime as dt
from flask import Flask, request, abort
import sqlite3
from linebot import (
LineBotApi, WebhookHandler
)
from linebot.exceptions import (
InvalidSignatureError
)
from linebot.models import *
app = Flask(__name__)
# 必須放上自己的Channel Access Token
line_bot_api = LineBotApi('zzUh9KHvribwehLEmlO+GhwDl/K+bxyi8xPHY8532rVhOef3fFOimt+H08o+JXCuD+bZoY2eKwNjsGdufN5l47XKyoUtlj2JTnmz1OPoffr8QBnmol3UaqwUbpce+v3OOT/8vVq5DboPe9XavbmLfQdB04t89/1O/w1cDnyilFU=')
# 必須放上自己的Channel Secret
handler = WebhookHandler('320ed24fe6ac098d810cf99649840800')
line_bot_api.push_message('U837080b63aa93f9574e7f70e3c5b2c5e', TextSendMessage(text='你可以開始了'))
# 監聽所有來自 /callback 的 Post Request
@app.route("/callback", methods=['POST'])
def callback():
# get X-Line-Signature header value
signature = request.headers['X-Line-Signature']
# get request body as text
body = request.get_data(as_text=True)
app.logger.info("Request body: " + body)
# handle webhook body
try:
handler.handle(body, signature)
except InvalidSignatureError:
abort(400)
return 'OK'
# 我是分隔線
dbname = "TWStock.db"
db = sqlite3.connect(dbname)
cursorfordatabase = db.cursor()
query_for_samplefile = """SELECT * from number"""
cursorfordatabase.execute(query_for_samplefile)
required_records = cursorfordatabase.fetchall()
list1 = []
for row in required_records:
list1.append(row)
api = sj.Shioaji()
accounts = api.login("帳號", "密碼")
# 我是分隔線
def stock(number):
contracts = [api.Contracts.Stocks[number]]
snapshots = api.snapshots(contracts)
df = pd.DataFrame(snapshots)
list_of_df = np.array(df)
main = []
for a in list_of_df:
for a_deeper in a:
a_deeper = a_deeper.__str__()
a_deeper = a_deeper.replace("(", "").replace(")", "")
if a_deeper[:8] == "'open', ":
a_deeper = a_deeper.replace("'open', ", "")
main.append(a_deeper)
elif a_deeper[:8] == "'high', ":
a_deeper = a_deeper.replace("'high', ", "")
main.append(a_deeper)
elif a_deeper[:7] == "'low', ":
a_deeper = a_deeper.replace("'low', ", "")
main.append(a_deeper)
elif a_deeper[:9] == "'close', ":
a_deeper = a_deeper.replace("'close', ", "")
main.append(a_deeper)
elif a_deeper[:13] == "'buy_price', ":
a_deeper = a_deeper.replace("'buy_price', ", "")
main.append(a_deeper)
elif a_deeper[:14] == "'sell_price', ":
a_deeper = a_deeper.replace("'sell_price', ", "")
main.append(a_deeper)
now = dt.now()
now = now.strftime("%Y/%m/%d")
main.append(now)
return main
@handler.add(MessageEvent, message=TextMessage)
def handle_message(event):
a = event.message.text
for row in list1:
if a == row[1]:
stock_list = stock(a)
stock_message = "日期: " + stock_list[6] + '\n' + "開盤: " + stock_list[0] + '\n' + "最高價: " + stock_list[1] + '\n' + "最低價: " + \
stock_list[2] + '\n' + "收盤價: " + stock_list[3] + '\n' + \
"賣價: " + stock_list[4] + '\n' + "買價: " + stock_list[5]
line_bot_api.reply_message(event.reply_token, TextSendMessage(text=stock_message))
elif a == row[2]:
a = row[1]
stock_list = stock(a)
stock_message = "日期: " + stock_list[6] + '\n' + "開盤: " + stock_list[0] + '\n' + "最高價: " + stock_list[1] + '\n' + "最低價: " + \
stock_list[2] + '\n' + "收盤價: " + stock_list[3] + '\n' + \
"賣價: " + stock_list[4] + '\n' + "買價: " + stock_list[5]
line_bot_api.reply_message(event.reply_token, TextSendMessage(text=stock_message))
#主程式
import os
if __name__ == "__main__":
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
| willta981165/Line_chatbot_Demo | app.py | app.py | py | 4,120 | python | en | code | 0 | github-code | 13 |
41726648074 | # Иванов М.
# Задание-1:
# Напишите функцию, возвращающую ряд Фибоначчи с n-элемента до m-элемента.
# Первыми элементами ряда считать цифры 1 1
def fibonacci(n, m):
def e_fibonacci(num):
if (num == 1) | (num == 2):
return 1
else:
return e_fibonacci(num - 1) + e_fibonacci(num - 2)
ser = []
if m < n :
pass
elif m == n :
ser.append(elem_fibonacci(n))
else:
ser.append(e_fibonacci(n))
ser.append(e_fibonacci(n+1))
counter = m - n - 1
idx = 2
while counter > 0:
ser.append(ser[idx-1] + ser[idx-2])
counter -= 1
idx += 1
return ser
print(fibonacci(3,9))
# Задача-2:
# Напишите функцию, сортирующую принимаемый список по возрастанию.
# Для сортировки используйте любой алгоритм (например пузырьковый).
# Для решения данной задачи нельзя использовать встроенную функцию и метод sort()
def sort_to_max(origin):
n = 1
length_array = len(origin)
while n < length_array:
for i in range(length_array-n):
if origin[i] > origin[i+1]:
origin[i], origin[i+1] = origin[i+1], origin[i]
n += 1
a = [2, 10, -12, 2.5, 20, -11, 4, 4, 0]
sort_to_max(a)
print(a)
# Задача-3:
# Напишите собственную реализацию стандартной функции filter.
# Разумеется, внутри нельзя использовать саму функцию filter.
def my_filter(lambda_f, array):
result = []
for a in array:
if lambda_f(a):
result.append(a)
return result
# Задача-4:
# Даны четыре точки А1(х1, у1), А2(x2 ,у2), А3(x3 , у3), А4(х4, у4).
# Определить, будут ли они вершинами параллелограмма.
A1 = [1,1]
A2 = [2,2]
A3 = [3,3]
A4 = [4,4]
def is_paral(A1,A2,A3,A4):
A1A2 = [A2[0]-A1[0],A2[1]-A1[1]]
A3A4 = [A4[0]-A3[0],A4[1]-A3[1]]
A1A4 = [A4[0]-A1[0],A4[1]-A1[1]]
A2A3 = [A3[0]-A2[0],A3[1]-A2[1]]
if ((A1A2[0]*A3A4[1]-A1A2[1]*A3A4[0]) == 0 ) & ((A1A4[0]*A2A3[1]-A1A4[1]*A2A3[0]) == 0 ):
return True
else:
return False
print(is_paral(A1,A2,A3,A4))
| Max11175/DL_homework | homework 1/дз 4/hw04_normal.py | hw04_normal.py | py | 2,532 | python | ru | code | 0 | github-code | 13 |
7318650946 | import os
from unittest import mock
import swapper
from django.conf import settings
from django.core.files.uploadedfile import SimpleUploadedFile
from openwisp_controller.connection.tests.utils import CreateConnectionsMixin
from ..swapper import load_model
Build = load_model('Build')
Category = load_model('Category')
FirmwareImage = load_model('FirmwareImage')
DeviceFirmware = load_model('DeviceFirmware')
DeviceFirmware = load_model('DeviceFirmware')
Credentials = swapper.load_model('connection', 'Credentials')
class TestUpgraderMixin(CreateConnectionsMixin):
FAKE_IMAGE_PATH = os.path.join(settings.PRIVATE_STORAGE_ROOT, 'fake-img.bin')
FAKE_IMAGE_PATH2 = os.path.join(settings.PRIVATE_STORAGE_ROOT, 'fake-img2.bin')
TPLINK_4300_IMAGE = 'ath79-generic-tplink_tl-wdr4300-v1-squashfs-sysupgrade.bin'
TPLINK_4300_IL_IMAGE = (
'ath79-generic-tplink_tl-wdr4300-v1-il-squashfs-sysupgrade.bin'
)
def tearDown(self):
for fw in FirmwareImage.objects.all():
fw.delete()
def _get_build(self, version="0.1", **kwargs):
opts = {"version": version}
opts.update(kwargs)
try:
return Build.objects.get(**opts)
except Build.DoesNotExist:
return self._create_build(**opts)
def _get_category(self, cat_name="Test Category", **kwargs):
opts = {"name": cat_name}
opts.update(kwargs)
try:
return Category.objects.get(**opts)
except Category.DoesNotExist:
return self._create_category(**opts)
def _create_category(self, **kwargs):
opts = dict(name='Test Category')
opts.update(kwargs)
if 'organization' not in opts:
opts['organization'] = self._get_org()
c = Category(**opts)
c.full_clean()
c.save()
return c
def _create_build(self, **kwargs):
opts = dict(version='0.1')
opts.update(kwargs)
category_opts = {}
if 'organization' in opts:
category_opts = {'organization': opts.pop('organization')}
if 'category' not in opts:
opts['category'] = self._get_category(**category_opts)
b = Build(**opts)
b.full_clean()
b.save()
return b
def _create_firmware_image(self, **kwargs):
opts = dict(type=self.TPLINK_4300_IMAGE)
opts.update(kwargs)
build_opts = {}
if 'organization' in opts:
build_opts['organization'] = opts.pop('organization')
if 'build' not in opts:
opts['build'] = self._get_build(**build_opts)
if 'file' not in opts:
opts['file'] = self._get_simpleuploadedfile()
fw = FirmwareImage(**opts)
fw.full_clean()
fw.save()
return fw
def _get_simpleuploadedfile(self, filename=None):
if not filename:
filename = self.FAKE_IMAGE_PATH
with open(filename, 'rb') as f:
image = f.read()
return SimpleUploadedFile(
name=f'openwrt-{self.TPLINK_4300_IMAGE}',
content=image,
content_type='application/octet-stream',
)
def _create_device_firmware(self, upgrade=False, device_connection=True, **kwargs):
opts = dict()
opts.update(kwargs)
if 'image' not in opts:
opts['image'] = self._create_firmware_image()
if 'device' not in opts:
org = opts['image'].build.category.organization
opts['device'] = self._create_device(organization=org)
self._create_config(device=opts['device'])
if device_connection:
self._create_device_connection(device=opts['device'])
device_fw = DeviceFirmware(**opts)
device_fw.full_clean()
device_fw.save(upgrade=upgrade)
return device_fw
def _create_upgrade_env(
self, device_firmware=True, upgrade_operation=False, **kwargs
):
org = kwargs.pop('organization', self._get_org())
category = kwargs.pop('category', self._get_category(organization=org))
build1 = self._create_build(category=category, version='0.1')
image1a = self._create_firmware_image(build=build1, type=self.TPLINK_4300_IMAGE)
image1b = self._create_firmware_image(
build=build1, type=self.TPLINK_4300_IL_IMAGE
)
# create devices
d1 = self._create_device(
name='device1',
organization=org,
mac_address='00:22:bb:33:cc:44',
model=image1a.boards[0],
)
d2 = self._create_device(
name='device2',
organization=org,
mac_address='00:11:bb:22:cc:33',
model=image1b.boards[0],
)
ssh_credentials = self._get_credentials(organization=None)
self._create_config(device=d1)
self._create_config(device=d2)
self._create_device_connection(device=d1, credentials=ssh_credentials)
self._create_device_connection(device=d2, credentials=ssh_credentials)
# create a new firmware build
build2 = self._create_build(category=category, version='0.2')
image2a = self._create_firmware_image(build=build2, type=self.TPLINK_4300_IMAGE)
image2b = self._create_firmware_image(
build=build2, type=self.TPLINK_4300_IL_IMAGE
)
data = {
'build1': build1,
'build2': build2,
'd1': d1,
'd2': d2,
'image1a': image1a,
'image1b': image1b,
'image2a': image2a,
'image2b': image2b,
}
# force create device firmware (optional)
if device_firmware:
device_fw1 = self._create_device_firmware(
device=d1,
image=image1a,
upgrade=upgrade_operation,
device_connection=False,
)
device_fw2 = self._create_device_firmware(
device=d2,
image=image1b,
upgrade=upgrade_operation,
device_connection=False,
)
data.update(
{
'device_fw1': device_fw1,
'device_fw2': device_fw2,
}
)
return data
def _create_firmwareless_device(self, organization):
d = self._create_device(
name='firmwareless',
mac_address='01:12:23:44:55:66',
organization=organization,
)
self._create_config(device=d)
self._create_device_connection(
device=d, credentials=Credentials.objects.first()
)
return d
def _create_device_with_connection(self, **kwargs):
d1 = self._create_device(**kwargs)
self._create_config(device=d1)
self._create_device_connection(device=d1)
return d1
def spy_mock(method, pre_action):
magicmock = mock.MagicMock()
def wrapper(*args, **kwargs):
magicmock(*args, **kwargs)
pre_action(*args, **kwargs)
return method(*args, **kwargs)
wrapper.mock = magicmock
return wrapper
| openwisp/openwisp-firmware-upgrader | openwisp_firmware_upgrader/tests/base.py | base.py | py | 7,170 | python | en | code | 40 | github-code | 13 |
72366443537 | #10.Search for a value from an array.
def search(array, n):
i = 0
while i<len(array):
if array[i] == n:
return True
i = i + 1
return False
array = [2,3,4,5,10,11,12,15]
n = 10
if search(array,n):
print("found")
else:
print("not found") | karimsuzon/Developing-Python-Application | week6_taska10.py | week6_taska10.py | py | 288 | python | en | code | 0 | github-code | 13 |
18347580808 | """
Inception V3, suitable for images with around 299 x 299
Reference:
Szegedy, Christian, et al. "Rethinking the Inception Architecture for Computer Vision." arXiv preprint arXiv:1512.00567 (2015).
"""
import mxnet as mx
def Conv(data, num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0), name=None, suffix=''):
conv = mx.sym.Convolution(data=data, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, no_bias=True, name='%s%s_conv2d' %(name, suffix))
bn = mx.sym.BatchNorm(data=conv, name='%s%s_batchnorm' %(name, suffix), fix_gamma=True)
act = mx.sym.Activation(data=bn, act_type='relu', name='%s%s_relu' %(name, suffix))
return act
def Inception7A(data,
num_1x1,
num_3x3_red, num_3x3_1, num_3x3_2,
num_5x5_red, num_5x5,
pool, proj,
name):
tower_1x1 = Conv(data, num_1x1, name=('%s_conv' % name))
tower_5x5 = Conv(data, num_5x5_red, name=('%s_tower' % name), suffix='_conv')
tower_5x5 = Conv(tower_5x5, num_5x5, kernel=(5, 5), pad=(2, 2), name=('%s_tower' % name), suffix='_conv_1')
tower_3x3 = Conv(data, num_3x3_red, name=('%s_tower_1' % name), suffix='_conv')
tower_3x3 = Conv(tower_3x3, num_3x3_1, kernel=(3, 3), pad=(1, 1), name=('%s_tower_1' % name), suffix='_conv_1')
tower_3x3 = Conv(tower_3x3, num_3x3_2, kernel=(3, 3), pad=(1, 1), name=('%s_tower_1' % name), suffix='_conv_2')
pooling = mx.sym.Pooling(data=data, kernel=(3, 3), stride=(1, 1), pad=(1, 1), pool_type=pool, name=('%s_pool_%s_pool' % (pool, name)))
cproj = Conv(pooling, proj, name=('%s_tower_2' % name), suffix='_conv')
concat = mx.sym.Concat(*[tower_1x1, tower_5x5, tower_3x3, cproj], name='ch_concat_%s_chconcat' % name)
return concat
# First Downsample
def Inception7B(data,
num_3x3,
num_d3x3_red, num_d3x3_1, num_d3x3_2,
pool,
name):
tower_3x3 = Conv(data, num_3x3, kernel=(3, 3), pad=(0, 0), stride=(2, 2), name=('%s_conv' % name))
tower_d3x3 = Conv(data, num_d3x3_red, name=('%s_tower' % name), suffix='_conv')
tower_d3x3 = Conv(tower_d3x3, num_d3x3_1, kernel=(3, 3), pad=(1, 1), stride=(1, 1), name=('%s_tower' % name), suffix='_conv_1')
tower_d3x3 = Conv(tower_d3x3, num_d3x3_2, kernel=(3, 3), pad=(0, 0), stride=(2, 2), name=('%s_tower' % name), suffix='_conv_2')
pooling = mx.symbol.Pooling(data=data, kernel=(3, 3), stride=(2, 2), pad=(0,0), pool_type="max", name=('max_pool_%s_pool' % name))
concat = mx.sym.Concat(*[tower_3x3, tower_d3x3, pooling], name='ch_concat_%s_chconcat' % name)
return concat
def Inception7C(data,
num_1x1,
num_d7_red, num_d7_1, num_d7_2,
num_q7_red, num_q7_1, num_q7_2, num_q7_3, num_q7_4,
pool, proj,
name):
tower_1x1 = Conv(data=data, num_filter=num_1x1, kernel=(1, 1), name=('%s_conv' % name))
tower_d7 = Conv(data=data, num_filter=num_d7_red, name=('%s_tower' % name), suffix='_conv')
tower_d7 = Conv(data=tower_d7, num_filter=num_d7_1, kernel=(1, 7), pad=(0, 3), name=('%s_tower' % name), suffix='_conv_1')
tower_d7 = Conv(data=tower_d7, num_filter=num_d7_2, kernel=(7, 1), pad=(3, 0), name=('%s_tower' % name), suffix='_conv_2')
tower_q7 = Conv(data=data, num_filter=num_q7_red, name=('%s_tower_1' % name), suffix='_conv')
tower_q7 = Conv(data=tower_q7, num_filter=num_q7_1, kernel=(7, 1), pad=(3, 0), name=('%s_tower_1' % name), suffix='_conv_1')
tower_q7 = Conv(data=tower_q7, num_filter=num_q7_2, kernel=(1, 7), pad=(0, 3), name=('%s_tower_1' % name), suffix='_conv_2')
tower_q7 = Conv(data=tower_q7, num_filter=num_q7_3, kernel=(7, 1), pad=(3, 0), name=('%s_tower_1' % name), suffix='_conv_3')
tower_q7 = Conv(data=tower_q7, num_filter=num_q7_4, kernel=(1, 7), pad=(0, 3), name=('%s_tower_1' % name), suffix='_conv_4')
pooling = mx.sym.Pooling(data=data, kernel=(3, 3), stride=(1, 1), pad=(1, 1), pool_type=pool, name=('%s_pool_%s_pool' % (pool, name)))
cproj = Conv(data=pooling, num_filter=proj, kernel=(1, 1), name=('%s_tower_2' % name), suffix='_conv')
# concat
concat = mx.sym.Concat(*[tower_1x1, tower_d7, tower_q7, cproj], name='ch_concat_%s_chconcat' % name)
return concat
def Inception7D(data,
num_3x3_red, num_3x3,
num_d7_3x3_red, num_d7_1, num_d7_2, num_d7_3x3,
pool,
name):
tower_3x3 = Conv(data=data, num_filter=num_3x3_red, name=('%s_tower' % name), suffix='_conv')
tower_3x3 = Conv(data=tower_3x3, num_filter=num_3x3, kernel=(3, 3), pad=(0,0), stride=(2, 2), name=('%s_tower' % name), suffix='_conv_1')
tower_d7_3x3 = Conv(data=data, num_filter=num_d7_3x3_red, name=('%s_tower_1' % name), suffix='_conv')
tower_d7_3x3 = Conv(data=tower_d7_3x3, num_filter=num_d7_1, kernel=(1, 7), pad=(0, 3), name=('%s_tower_1' % name), suffix='_conv_1')
tower_d7_3x3 = Conv(data=tower_d7_3x3, num_filter=num_d7_2, kernel=(7, 1), pad=(3, 0), name=('%s_tower_1' % name), suffix='_conv_2')
tower_d7_3x3 = Conv(data=tower_d7_3x3, num_filter=num_d7_3x3, kernel=(3, 3), stride=(2, 2), name=('%s_tower_1' % name), suffix='_conv_3')
pooling = mx.sym.Pooling(data=data, kernel=(3, 3), stride=(2, 2), pool_type=pool, name=('%s_pool_%s_pool' % (pool, name)))
# concat
concat = mx.sym.Concat(*[tower_3x3, tower_d7_3x3, pooling], name='ch_concat_%s_chconcat' % name)
return concat
def Inception7E(data,
num_1x1,
num_d3_red, num_d3_1, num_d3_2,
num_3x3_d3_red, num_3x3, num_3x3_d3_1, num_3x3_d3_2,
pool, proj,
name):
tower_1x1 = Conv(data=data, num_filter=num_1x1, kernel=(1, 1), name=('%s_conv' % name))
tower_d3 = Conv(data=data, num_filter=num_d3_red, name=('%s_tower' % name), suffix='_conv')
tower_d3_a = Conv(data=tower_d3, num_filter=num_d3_1, kernel=(1, 3), pad=(0, 1), name=('%s_tower' % name), suffix='_mixed_conv')
tower_d3_b = Conv(data=tower_d3, num_filter=num_d3_2, kernel=(3, 1), pad=(1, 0), name=('%s_tower' % name), suffix='_mixed_conv_1')
tower_3x3_d3 = Conv(data=data, num_filter=num_3x3_d3_red, name=('%s_tower_1' % name), suffix='_conv')
tower_3x3_d3 = Conv(data=tower_3x3_d3, num_filter=num_3x3, kernel=(3, 3), pad=(1, 1), name=('%s_tower_1' % name), suffix='_conv_1')
tower_3x3_d3_a = Conv(data=tower_3x3_d3, num_filter=num_3x3_d3_1, kernel=(1, 3), pad=(0, 1), name=('%s_tower_1' % name), suffix='_mixed_conv')
tower_3x3_d3_b = Conv(data=tower_3x3_d3, num_filter=num_3x3_d3_2, kernel=(3, 1), pad=(1, 0), name=('%s_tower_1' % name), suffix='_mixed_conv_1')
pooling = mx.sym.Pooling(data=data, kernel=(3, 3), stride=(1, 1), pad=(1, 1), pool_type=pool, name=('%s_pool_%s_pool' % (pool, name)))
cproj = Conv(data=pooling, num_filter=proj, kernel=(1, 1), name=('%s_tower_2' % name), suffix='_conv')
# concat
concat = mx.sym.Concat(*[tower_1x1, tower_d3_a, tower_d3_b, tower_3x3_d3_a, tower_3x3_d3_b, cproj], name='ch_concat_%s_chconcat' % name)
return concat
# In[49]:
def get_symbol(num_classes=1000, **kwargs):
data = mx.symbol.Variable(name="data")
# stage 1
conv = Conv(data, 32, kernel=(3, 3), stride=(2, 2), name="conv")
conv_1 = Conv(conv, 32, kernel=(3, 3), name="conv_1")
conv_2 = Conv(conv_1, 64, kernel=(3, 3), pad=(1, 1), name="conv_2")
pool = mx.sym.Pooling(data=conv_2, kernel=(3, 3), stride=(2, 2), pool_type="max", name="pool")
# stage 2
conv_3 = Conv(pool, 80, kernel=(1, 1), name="conv_3")
conv_4 = Conv(conv_3, 192, kernel=(3, 3), name="conv_4")
pool1 = mx.sym.Pooling(data=conv_4, kernel=(3, 3), stride=(2, 2), pool_type="max", name="pool1")
# stage 3
in3a = Inception7A(pool1, 64,
64, 96, 96,
48, 64,
"avg", 32, "mixed")
in3b = Inception7A(in3a, 64,
64, 96, 96,
48, 64,
"avg", 64, "mixed_1")
in3c = Inception7A(in3b, 64,
64, 96, 96,
48, 64,
"avg", 64, "mixed_2")
in3d = Inception7B(in3c, 384,
64, 96, 96,
"max", "mixed_3")
# stage 4
in4a = Inception7C(in3d, 192,
128, 128, 192,
128, 128, 128, 128, 192,
"avg", 192, "mixed_4")
in4b = Inception7C(in4a, 192,
160, 160, 192,
160, 160, 160, 160, 192,
"avg", 192, "mixed_5")
in4c = Inception7C(in4b, 192,
160, 160, 192,
160, 160, 160, 160, 192,
"avg", 192, "mixed_6")
in4d = Inception7C(in4c, 192,
192, 192, 192,
192, 192, 192, 192, 192,
"avg", 192, "mixed_7")
in4e = Inception7D(in4d, 192, 320,
192, 192, 192, 192,
"max", "mixed_8")
# stage 5
in5a = Inception7E(in4e, 320,
384, 384, 384,
448, 384, 384, 384,
"avg", 192, "mixed_9")
in5b = Inception7E(in5a, 320,
384, 384, 384,
448, 384, 384, 384,
"max", 192, "mixed_10")
# pool
pool = mx.sym.Pooling(data=in5b, kernel=(8, 8), stride=(1, 1), pool_type="avg", name="global_pool")
flatten = mx.sym.Flatten(data=pool, name="flatten")
fc1 = mx.symbol.FullyConnected(data=flatten, num_hidden=num_classes, name='fc1')
softmax = mx.symbol.SoftmaxOutput(data=fc1, name='softmax')
return softmax
| zhreshold/mxnet-ssd | symbol/inceptionv3.py | inceptionv3.py | py | 9,786 | python | en | code | 763 | github-code | 13 |
18719868845 | #!/usr/bin/env python
# coding: utf-8
import os
import torch
import wandb
import time
from torch.optim.lr_scheduler import StepLR
from utils.register_dataset import register_vrd_dataset
from config import get_vrd_cfg, CHECKPOINT_DIR
from modeling.vltranse_256 import VLTransE
from utils.annotations import get_object_classes, get_predicate_classes
from modeling.word_features import get_triples_features, get_trained_triples_memo
from detectron2.data import (
DatasetCatalog,
DatasetMapper,
build_detection_train_loader,
build_detection_test_loader,
)
import detectron2.data.transforms as T
from utils.trainer import load_checkpoint
def train_log(loss, lr, it, epoch, loss_subj, loss_pred, loss_obj, loss_transr):
# Where the magic happens
wandb.log(
{
"lr": lr,
"epoch": epoch,
"loss": loss,
"loss_subj": loss_subj,
"loss_pred": loss_pred,
"loss_obj": loss_obj,
"loss_transr": loss_transr,
},
step=it,
)
def train_model(model_name="vltranse_256"):
cfg = get_vrd_cfg()
torch.manual_seed(0)
dataset_name = "vrd"
checkpoint_model_name = "vltranse_max_negative_8000.pt"
# [Only Run once] Register dataset with detectron2 instead of using my own dataloader
register_vrd_dataset(dataset_name)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = VLTransE(cfg)
model.to(device)
# Parallel
if torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model)
# Data Loader
train_dataset = DatasetCatalog.get(f"{dataset_name}_train")
train_dataloader = build_detection_train_loader(
cfg,
dataset=train_dataset,
mapper=DatasetMapper(cfg, is_train=True, augmentations=[T.Resize((800, 800))]),
)
iter_dataloader = iter(train_dataloader)
# How long will the training be?
n_iters = 12001 # cfg.SOLVER.MAX_ITER
n_datapoints = len(train_dataset)
num_epochs = int(n_iters / n_datapoints)
learning_rate = 0.001
optimizer = torch.optim.SGD(
model.parameters(), lr=learning_rate, momentum=0.9, nesterov=True
)
scheduler = StepLR(optimizer, step_size=4, gamma=0.1)
# Checkpoint
chkpoint_it = n_datapoints # create a checkpoint every epoch
initial_it = 0 # checkpoint intial iteration to resume training
losses = []
# Load Checkpoint
load_chkpoint = False
if load_chkpoint:
chkpoint_full_path = os.path.join(CHECKPOINT_DIR, checkpoint_model_name)
it, start_epoch, losses = load_checkpoint(
model, chkpoint_full_path, optimizer=optimizer
)
initial_it = it
n_datapoints = len(train_dataset)
interval_cnt = 0
# WanDB
project_name = model_name
log_interval = 20
wandb.init(project=project_name, entity="herobaby71")
wandb.config = {
"seed": 0,
"learning_rate": learning_rate,
"gamma": 0.1,
"momentum": 0.9,
"epochs": num_epochs,
"n_iters": n_iters,
"batch_size": 1,
}
wandb.watch(model, log="all", log_freq=20)
# Losses
total_loss = 0
subj_loss = 0
obj_loss = 0
pred_loss = 0
transr_loss = 0
# start_it
it = 0
start_time = time.time()
for i in range(n_iters):
# iterator
try:
data = next(iter_dataloader)[0]
except StopIteration:
print(
"iterator has reach its end at iteration {}. Initializing a new iterator.".format(
str(it)
)
)
iter_dataloader = iter(train_dataloader)
data = next(iter_dataloader)[0]
# continue training from the previous checkpoint
if i < initial_it % n_datapoints:
continue
if len(data["relationships"]["subj_bboxes"]) <= 1:
# image has only one relationship, cannot train
print("an image has been removed for this batch")
continue
# other exclusion due to bad label
if "1841.jpg" in data["file_name"]:
print("this image has bad label and has been removed.")
continue
optimizer.zero_grad()
# forward passes
negative_examples = {}
negative_examples = model.generate_negative_examples(data, K=100)
triplet_losses = model(data, negative_examples)
# compute gradient backward
final_loss = (
triplet_losses["obj"]
+ triplet_losses["pred"]
+ triplet_losses["subj"]
+ triplet_losses["transr"]
)
final_loss.backward()
optimizer.step()
# total loss
total_loss += final_loss.item()
subj_loss += triplet_losses["subj"].item()
pred_loss += triplet_losses["pred"].item()
obj_loss += triplet_losses["obj"].item()
transr_loss += triplet_losses["transr"].item()
interval_cnt += 1
if it > initial_it and it % log_interval == 0 and it > 0:
current_loss = total_loss / interval_cnt
losses.append(current_loss)
elapsed = time.time() - start_time
epoch = it / n_datapoints
print(
"| it {} | epoch {} | lr {} | ms/batch {:5.2f} | loss {:5.2f}".format(
it,
int(epoch),
scheduler.get_last_lr()[0],
elapsed * 1000 / log_interval,
current_loss,
)
)
train_log(
current_loss,
scheduler.get_last_lr()[0],
it,
int(epoch),
loss_subj=subj_loss / interval_cnt,
loss_pred=pred_loss / interval_cnt,
loss_obj=obj_loss / interval_cnt,
loss_transr=transr_loss / interval_cnt,
)
total_loss = 0
subj_loss = 0
pred_loss = 0
obj_loss = 0
transr_loss = 0
interval_cnt = 0
start_time = time.time()
if it > initial_it and it % chkpoint_it == 0 and it > 0:
chkpnt = {
"model": model.state_dict(),
"optimizer": optimizer.state_dict(),
"epoch": epoch,
"it": it,
"losses": losses,
}
torch.save(
chkpnt, os.path.join(CHECKPOINT_DIR, f"{model_name}_{str(it)}.pt")
)
# increment total count
it = it + 1
train_model()
| herobaby71/vltranse | src/train.py | train.py | py | 6,656 | python | en | code | 0 | github-code | 13 |
10734536598 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
from collections import deque
class Solution:
def widthOfBinaryTree(self, root: TreeNode) -> int:
q = deque()
q.append([root, 0])
maxWidth = 1
while q:
init_idx = q[0][1]
end_idx = q[-1][1]
maxWidth = max(maxWidth, end_idx - init_idx + 1)
for i in range(len(q)):
node, val = q.popleft()
idx = val - init_idx
if node.left:
q.append([node.left, 2*idx + 1])
if node.right:
q.append([node.right, 2*idx + 2])
return maxWidth
"""
Start time: 8:41PM
approach1:
""" | Therealchainman/LeetCode | problems/maximum_width_of_binary_tree/solution.py | solution.py | py | 894 | python | en | code | 0 | github-code | 13 |
40474496115 | import inspect
from typing import Any
import pytest
from flowlayer.core.api import EngineAPI, FeatureStoreAPI, NetworkAPI, NetworkPlotAPI
@pytest.mark.parametrize(
"cls",
[
NetworkPlotAPI,
NetworkAPI,
EngineAPI,
FeatureStoreAPI,
],
)
def test_api_definition_setup(cls: Any) -> None:
"""Test definition of all stubs."""
methods = [m for m in dir(cls) if not m.startswith("_")]
for method in methods:
with pytest.raises(NotImplementedError):
result = getattr(cls, method)
if isinstance(result, property):
result.fget(None) # type: ignore
else:
params = [None] * len([p for p in inspect.signature(result).parameters.values() if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD])
result(*params)
| jsam/flowlayer | tests/core/test_api.py | test_api.py | py | 848 | python | en | code | 1 | github-code | 13 |
24822265655 | import collections
from typing import List
class Solution:
def groupAnagrams(self, strs: List[str]) -> List[List[str]]:
# Create a dictionary to store the anagrams
anagram_dict = collections.defaultdict(list)
# Iterate through each word and group it with its anagrams
for word in strs:
# Sort the letters in the word to create a key for the anagram dictionary
key = "".join(sorted(word))
anagram_dict[key].append(word)
# Return the values of the anagram dictionary as a list of lists
return list(anagram_dict.values())
if __name__ == "__main__":
strs = ["eat", "tea", "tan", "ate", "nat", "bat"]
print(Solution().groupAnagrams(strs))
| AhmedMunna172367/leetcode-150 | arrays-and-hashing/group_anagrams.py | group_anagrams.py | py | 736 | python | en | code | 0 | github-code | 13 |
32082051106 | # ESERCIZI
# REVERSE
# Scrivi una funzione che ha come argomento una parola.
# Verifica e stampa la stessa parola ma al contrario.
# es. 'abcd' -> 'dbca'
def reverse(word):
str = ""
for i in word:
str = i + str
print(str)
# se voglio che mi ritorni un oggetto devo usare return
# print me la stampa e basta
return str
parola = input("Scrivi una parola: ")
reverse(parola)
# CUSTOM LEN
# Scrivi una funzione che ha come argomento una parola.
# Verifica e stampa la lunghezza esatta di quella parola.
def custom_len(word):
len = 0
for i in word:
len += 1
print(f"La tua parola è lunga {len} caratteri")
parola2 = input("Scrivi una seconda parola: ")
custom_len(parola2)
# PALINDROMO
# Scrivi una funzione che ha come argomento una parola.
# Verifica e stampa se la parola è palindroma o oppure no.
def palindromo(word):
if word == reverse(word):
print("La tua parola è palindroma")
else:
print("La tua parola non è palindroma")
parola3 = input("Scrivi una terza parola: ")
palindromo(parola3)
# ALTERNATIVA CON CICLO FOR
def palindrome(word):
notPalindrome = False
for i in range(len(word)):
if word[i] != word[-i - 1]:
notPalindrome = True
if notPalindrome:
word += " non"
print(f"La parola {word} è palindroma.")
palindrome(input("Inserisci una parola: "))
| MartPic/python-excercises | es_funzioni.py | es_funzioni.py | py | 1,403 | python | it | code | 0 | github-code | 13 |
73569049296 | def stringmaker(str):
phrase=str.capitalize();
if str.startswith(("how","why","when","where")):#how why etc are tuples
return "{}?".format(phrase)
else:
return "{}".format(phrase)
phrase=[]
while True:
str=input("Say something: ")
if(str=="/end"):
break
phrase.append(stringmaker(str))
print(",".join(phrase))
| Kanishq10/Practice-Programs | New python Docs/simpleprogram.py | simpleprogram.py | py | 375 | python | en | code | 0 | github-code | 13 |
15912856626 | from __future__ import absolute_import
import functools
import warnings
__author__ = "yesudeep@google.com (Yesudeep Mangalapilly)"
__all__ = [
"deprecated",
]
def deprecated(func):
"""Marks functions as deprecated.
This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used.
Usage::
@deprecated
def my_func():
pass
@other_decorators_must_be_upper
@deprecated
def my_func():
pass
:param func:
The function to deprecate.
:returns:
Deprecated function object.
"""
@functools.wraps(func)
def new_func(*args, **kwargs):
"""Wrapper function."""
warnings.warn_explicit(
"Call to deprecated function %(funcname)s." % {
"funcname": func.__name__,
},
category=DeprecationWarning,
filename=func.func_code.co_filename,
lineno=func.func_code.co_firstlineno + 1
)
return func(*args, **kwargs)
return new_func
| gorakhargosh/mom | mom/decorators.py | decorators.py | py | 1,057 | python | en | code | 37 | github-code | 13 |
22911875968 | import numpy as np
import matplotlib.pyplot as plt
import scipy.special as sp
#if using termux
#import subprocess
#import shlex
#end if
rng = np.random.default_rng()
num_samples = 1000000
s0 = np.array([1,0]).reshape(2,1)
max_snr = 10
snr_db = np.arange(0, max_snr+1)
p_error_est = np.zeros(snr_db.shape[0])
p_error_th = np.zeros(snr_db.shape[0])
for i in snr_db:
N_var = rng.normal(size=(2, num_samples))
snr = 10**(0.1*i)
y_var = snr*s0 + N_var
p_error_val_est = np.count_nonzero(np.where(y_var[0] < y_var[1], 1, 0))/num_samples
p_error_est[i] = p_error_val_est
p_error_val_th = 0.5*sp.erfc(snr/2)
p_error_th[i] = p_error_val_th
plt.semilogy(snr_db, p_error_est, 'o')
plt.semilogy(snr_db, p_error_th)
plt.grid()
plt.xlabel('$SNR_{dB}$')
plt.ylabel('$P_e(SNR_{dB})$')
plt.legend(["Numerical","Theory"])
plt.savefig('../../figs/chapter5/biv_pe_vs_snr.pdf')
plt.savefig('../../figs/chapter5/biv_pe_vs_snr.png')
#if using termux
#subprocess.run(shlex.split("termux-open ../../figs/chapter5/biv_pe_vs_snr.pdf"))
#else
plt.show() #opening the plot window | Muhammed-Hamdan/iith-fwc-2022-23 | communication/codes/chapter5/biv_pe_snr.py | biv_pe_snr.py | py | 1,086 | python | en | code | 3 | github-code | 13 |
27781776653 |
# coding: utf-8
import numpy as np
#import PyQt4
import matplotlib
#matplotlib.use('qt4agg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import itertools
pdfFile = PdfPages("Confusion_matrix.pdf")
def plot_confusion_matrix(cm, classes,
title='Confusion matrix',
cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
fig = plt.gcf()
plt.show()
plt.draw()
pdfFile.savefig(fig)
plt.clf()
def closeFile() :
plt.close('all')
pdfFile.close()
| Gayatri012/MachineLearning | Neural Networks Classsification/cnm_plot.py | cnm_plot.py | py | 1,172 | python | en | code | 1 | github-code | 13 |
32072786638 | import mysql.connector
import os
import json
from flask import Flask, request, jsonify
from sqlalchemy import create_engine
from sqlalchemy import URL
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import text
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+mysqlconnector://root:root@localhost:3306/internet_store'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
with app.app_context():
try:
# db.session.execute('SELECT 1')
db.session.execute(text('SELECT 1'))
print('\n\n----------- Connection successful !')
except Exception as e:
print('\n\n----------- Connection failed ! ERROR : ', e)
# Product Table
class Product(db.Model):
__tablename__ = 'product'
id = Column(Integer, primary_key=True)
name = Column(String(250), nullable=False)
description = Column(String(250), nullable=False)
price = Column(Integer)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
# Home page
@app.route('/')
def all_products():
products = Product.query.all()
product_list = []
for product in products:
product_data = {
'id': product.id,
'name': product.name,
}
product_list.append(product_data)
return jsonify(product_list)
@app.route('/product/<int:id>')
def get_one(id):
if id:
product = Product.query.filter_by(id=id).first()
if not product:
return 'Product not found'
return jsonify({'id': product.id, 'name': product.name})
# Add new products
# class Product(db.Model):
# __tablename__ = 'product'
# __table_args__ = {'extend_existing': True}
# id = db.Column(db.Integer, primary_key=True)
# name = db.Column(db.String(100))
@app.route('/product', methods=['POST'])
def add_product():
data = request.get_json()
name = data.get('name')
price = data.get('price')
if not name or not price:
return jsonify({'message': 'Invalid product data'}), 400
new_product = Product(name=name, price=price)
db.session.add(new_product)
db.session.commit()
return jsonify({'message': 'Product added successfully'}), 201
# Маршрут для удаления продукта по его id
@app.route('/delete_product/<int:id>', methods=['DELETE'])
def delete_product(id):
product = Product.query.get(id)
if not product:
return jsonify({'message': 'Product not found'}), 404
db.session.delete(product)
db.session.commit()
return jsonify({'message': 'Product deleted successfully'}), 200
# Маршрут для редактирования продукта по его id
@app.route('/edit_product/<int:id>', methods=['PUT'])
def edit_product(id):
product = Product.query.get(id)
if not product:
return jsonify({'message': 'Product not found'}), 404
data = request.get_json()
name = data.get('name')
price = data.get('price')
if not name or not price:
return jsonify({'message': 'Invalid product data'}), 400
product.name = name
product.price = price
db.session.commit()
# Открываем файл products.json и добавляем продукты в базу данных
with open('../backend/products.json', 'r') as file:
products_data = json.load(file)
for product_data in products_data:
name = product_data.get('name')
price = product_data.get('price')
if name and price:
new_product = Product(name=name, price=price)
db.session.add(new_product)
db.session.commit()
if __name__ == "__main__":
app.run(debug=True)
| Samir-Mamedaliyev/online_shop | backend/server.py | server.py | py | 3,788 | python | en | code | 0 | github-code | 13 |
29860073446 | import discord
from discord.ext import commands
client = commands.Bot(command_prefix="")
@client.event
async def on_ready():
print("Bot is ready")
@client.command()
async def hello(ctx):
await ctx.send("Hi there, I am the bot of V&P server. This is a sever made by Vihaan and Prakhar and we want you to invite more people..")
client.run("Nzk1ODgxMzIxODc2MTYwNTIy.X_P0PQ.eP1YRXbu6pRcFpk7apUQ_cwdRxA") | vihaan035/my_testing_world_python | Discord Bot/bot.py | bot.py | py | 411 | python | en | code | 0 | github-code | 13 |
39536998639 | import json
import os
import sys
from pathlib import Path
import numpy as np
from sklearn.model_selection import train_test_split
CREMAD_DIR = Path(sys.argv[1])
print ('Generating labels and train/validation/test groups...')
label_dir = Path('Audio_16k')
labeldict = {
'ANG': 'anger',
'HAP': 'happy',
'DIS': 'disgust',
'SAD': 'sad',
'FEA': 'fear',
'NEU': 'neutral'
}
audio_list, label_list = [], []
for x, full_audio_name in enumerate(label_dir.rglob('*.wav')):
file_name = os.path.basename(full_audio_name).split('/')[-1]
label = str(file_name)[-10:-7]
if label not in labeldict:
continue
audio_list.append(file_name)
label_list.append(labeldict[label])
labels = {
'Train': {},
'Val': {},
'Test': {}
}
X_train, X_rem, y_train, y_rem = train_test_split(
audio_list, label_list,
train_size=0.8,
random_state=23,
stratify=label_list)
# Now since we want the valid and test size to be equal (10% each of overall data).
# we have to define valid_size=0.5 (that is 50% of remaining data)
test_size = 0.5
X_valid, X_test, y_valid, y_test = train_test_split(
X_rem, y_rem,
test_size=0.5,
stratify=y_rem)
for i in range(len(X_train)):
labels['Train'][X_train[i]] = y_train[i]
for i in range(len(X_valid)):
labels['Val'][X_valid[i]] = y_valid[i]
for i in range(len(X_test)):
labels['Test'][X_test[i]] = y_test[i]
with open(f'labels.json', 'w') as f:
json.dump(labels, f, indent=4) | Okko98/project | dataset/gen_labels.py | gen_labels.py | py | 1,492 | python | en | code | 0 | github-code | 13 |
11078853994 | import logging
import importlib
import functools
import collections
from typing import Dict, Any, Callable, Set, Optional
from .events import BaseEvent
from .runners import BaseRunner
from .utils import get_cls_path
from .hubs.base_hub import BaseHub
from .runnables import BaseRunnable
from .event_subscription import EventSubscription
logger = logging.getLogger(__name__)
class Kafthon():
def __init__(self, event_hub: BaseHub, runner: BaseRunner, validate_events: bool = True):
self._event_hub = event_hub
event_hub._kafthon_app = self
self._runner = runner
self.validate_events = validate_events
self._event_registry: Dict[str, BaseEvent] = {}
self._runnable_registry: Dict[str, BaseRunnable] = {}
self._method_sub_registry: Dict[Callable, Set[EventSubscription]] = collections.defaultdict(set)
self._signal_handlers: Dict[str, Set[Callable]] = collections.defaultdict(set)
self._BaseEvent: Optional[type] = None
self._BaseRunnable: Optional[type] = None
@property
def event_hub(self):
return self._event_hub
@property
def BaseEvent(self):
if self._BaseEvent is None:
self._BaseEvent = type('BaseEvent', (BaseEvent,), dict(_kafthon_app=self))
return self._BaseEvent
@property
def BaseRunnable(self):
if self._BaseRunnable is None:
self._BaseRunnable = type('BaseRunnable', (BaseRunnable,), dict(_kafthon_app=self))
return self._BaseRunnable
def register(self, target: Any):
cls_path = get_cls_path(target)
if issubclass(target, BaseEvent):
self._event_registry[cls_path] = target
elif issubclass(target, BaseRunnable):
self._runnable_registry[cls_path] = target
else:
raise TypeError('Can only register event and runnable classes.')
target._kafthon_app = self
return target
def _register_method_subscription(self, event_type, unwrap: bool, method: Callable):
self._method_sub_registry[method].add(
EventSubscription(
event_type=event_type,
unwrap=unwrap,
handler=method
)
)
def get_event_type_by_cls_path(self, cls_path):
if cls_path not in self._event_registry:
module_path, _ = cls_path.split('-')
importlib.import_module(module_path)
return self._event_registry.get(
cls_path
)
def bind_signal(self, handler: Optional[Callable] = None, signal_type: str = None):
if signal_type is None:
raise TypeError('signal_type argument must not be None')
if handler is None:
return functools.partial(self.bind_signal, signal_type=signal_type)
self._signal_handlers[signal_type].add(handler)
def fire_signal(self, signal_type: str, *args, **kwargs):
handler_set = self._signal_handlers.get(signal_type)
if handler_set:
for handler in handler_set:
try:
handler(*args, **kwargs)
except:
logger.exception('Error occurred during signal handling.')
| aabversteeg/kafthon | kafthon/kafthon.py | kafthon.py | py | 3,231 | python | en | code | 0 | github-code | 13 |
31282346389 | from sys import stdin, stdout
lines = []
total = 0
for line in stdin:
line = line.strip()
# lines.append(line.strip())
# print(line)
if ((len(line.split(" "))) == 2):
a, b = line.split(" ")
if (int(b) < 100000):
print(int(b))
total += int(b)
print(total)
| math919191/AdventOfCode22 | FinishedDays/day7test.py | day7test.py | py | 314 | python | en | code | 1 | github-code | 13 |
27767810279 | """
Temp file for testing purposes.
"""
from pathlib import Path
from .untype import untype
NEXAMPLES = 4
EXAMPLES_PATH = Path("code_examples")
for n in range(1, NEXAMPLES + 1):
code_path = EXAMPLES_PATH / f"example_{n}.py"
code = code_path.read_text()
cleaned_code_path = EXAMPLES_PATH / f"example_{n}(untyped).py"
cleaned_code_path.write_text(untype(code))
| salt-die/gradual_untyping | gradual_untyping/__main__.py | __main__.py | py | 378 | python | en | code | 0 | github-code | 13 |
10892159835 | from database import db
class UsuarioModel(db.Model):
__tablename__ = 'usuario'
id = db.Column(db.Integer, primary_key=True)
usuario = db.Column(db.String(80))
senha = db.Column(db.String(80))
nome = db.Column(db.String(80))
sobrenome = db.Column(db.String(80))
def __init__(self, usuario, senha, nome, sobrenome):
self.usuario = usuario
self.senha = senha
self.nome = nome
self.sobrenome = sobrenome
def json(self):
return {
'id': self.id,
'usuario': self.usuario,
'senha': self.senha,
'nome': self.nome,
'sobrenome': self.sobrenome
}
@classmethod
def find_usuario(cls, usuario):
usuario = cls.query.filter_by(usuario=usuario).first()
if usuario:
return usuario
return None
def save_usuario(self):
db.session.add(self)
db.session.commit()
def update_usuario(self, senha, nome, sobrenome):
self.senha = senha
self.nome = nome
self.sobrenome = sobrenome
def delete_usuario(self):
db.session.delete(self)
db.session.commit() | CaioSilve/tei-martineira-back-python | Models/UsuarioModel.py | UsuarioModel.py | py | 1,183 | python | es | code | 0 | github-code | 13 |
42447360941 | alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
def encrypt(plain_text, shift_amount):
cipher_text = ""
for letter in plain_text:
position = alphabet.index(letter)
new_position = verifyShift(position + shift_amount)
cipher_text += alphabet[new_position]
return cipher_text
def verifyShift(shift_amout):
if shift_amout >= len(alphabet):
shift_amout -= len(alphabet)
return shift_amout
def decrypt(plain_text, shift_amount):
cipher_text = ""
for letter in plain_text:
position = alphabet.index(letter)
cipher_text += alphabet[position - verifyShift(shift_amount)]
return cipher_text
def cesarEncryption():
direction = input("Type 'encode' to encrypt, type 'decode' to decrypt:\n")
text = input("Type your message:\n").lower()
shift = int(input("Type the shift number:\n"))
if direction == "encode":
print("The encode text is "+ encrypt(text, shift))
elif direction == "decode":
print("The decode text is " + decrypt(text, shift))
else:
print("Wrong chosen, please try again.\n")
cesarEncryption()
cesarEncryption() | MarcosDaNight/100-days-of-code-Python | day-08/steps/DecryptionCeaser.py | DecryptionCeaser.py | py | 1,231 | python | en | code | 0 | github-code | 13 |
17058990304 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class ReportData(object):
def __init__(self):
self._city_code = None
self._line_code = None
self._position_id = None
self._pv = None
self._uv = None
@property
def city_code(self):
return self._city_code
@city_code.setter
def city_code(self, value):
self._city_code = value
@property
def line_code(self):
return self._line_code
@line_code.setter
def line_code(self, value):
self._line_code = value
@property
def position_id(self):
return self._position_id
@position_id.setter
def position_id(self, value):
self._position_id = value
@property
def pv(self):
return self._pv
@pv.setter
def pv(self, value):
self._pv = value
@property
def uv(self):
return self._uv
@uv.setter
def uv(self, value):
self._uv = value
def to_alipay_dict(self):
params = dict()
if self.city_code:
if hasattr(self.city_code, 'to_alipay_dict'):
params['city_code'] = self.city_code.to_alipay_dict()
else:
params['city_code'] = self.city_code
if self.line_code:
if hasattr(self.line_code, 'to_alipay_dict'):
params['line_code'] = self.line_code.to_alipay_dict()
else:
params['line_code'] = self.line_code
if self.position_id:
if hasattr(self.position_id, 'to_alipay_dict'):
params['position_id'] = self.position_id.to_alipay_dict()
else:
params['position_id'] = self.position_id
if self.pv:
if hasattr(self.pv, 'to_alipay_dict'):
params['pv'] = self.pv.to_alipay_dict()
else:
params['pv'] = self.pv
if self.uv:
if hasattr(self.uv, 'to_alipay_dict'):
params['uv'] = self.uv.to_alipay_dict()
else:
params['uv'] = self.uv
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ReportData()
if 'city_code' in d:
o.city_code = d['city_code']
if 'line_code' in d:
o.line_code = d['line_code']
if 'position_id' in d:
o.position_id = d['position_id']
if 'pv' in d:
o.pv = d['pv']
if 'uv' in d:
o.uv = d['uv']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/ReportData.py | ReportData.py | py | 2,618 | python | en | code | 241 | github-code | 13 |
19147221074 | import dataclasses
from typing import Callable, Optional, Tuple
import haiku as hk
import jax.numpy as jnp
from typing_extensions import Protocol
from corax import types
from corax.jax import types as jax_types
# This definition is deprecated. Use jax_types.PRNGKey directly instead.
# TODO(sinopalnikov): migrate all users and remove this definition.
PRNGKey = jax_types.PRNGKey
# Commonly-used types.
BatchSize = int
Observation = types.NestedArray
Action = types.NestedArray
Params = types.NestedArray
NetworkOutput = types.NestedArray
QValues = jnp.ndarray
Logits = jnp.ndarray
LogProb = jnp.ndarray
Value = jnp.ndarray
RecurrentState = types.NestedArray
Entropy = jnp.ndarray
# Commonly-used function/network signatures.
QNetwork = Callable[[Observation], QValues]
LSTMOutputs = Tuple[Tuple[Logits, Value], hk.LSTMState]
PolicyValueRNN = Callable[[Observation, hk.LSTMState], LSTMOutputs]
RecurrentQNetwork = Callable[[Observation, hk.LSTMState], Tuple[QValues, hk.LSTMState]]
SampleFn = Callable[[NetworkOutput, PRNGKey], Action]
LogProbFn = Callable[[NetworkOutput, Action], LogProb]
@dataclasses.dataclass
class FeedForwardNetwork:
"""Holds a pair of pure functions defining a feed-forward network.
Attributes:
init: A pure function: ``params = init(rng, *a, **k)``
apply: A pure function: ``out = apply(params, rng, *a, **k)``
"""
# Initializes and returns the networks parameters.
init: Callable[..., Params]
# Computes and returns the outputs of a forward pass.
apply: Callable[..., NetworkOutput]
class ApplyFn(Protocol):
def __call__(
self,
params: Params,
observation: Observation,
*args,
is_training: bool,
key: Optional[PRNGKey] = None,
**kwargs,
) -> NetworkOutput:
...
@dataclasses.dataclass
class TypedFeedForwardNetwork:
"""FeedForwardNetwork with more specific types of the member functions.
Attributes:
init: A pure function. Initializes and returns the networks parameters.
apply: A pure function. Computes and returns the outputs of a forward pass.
"""
init: Callable[[PRNGKey], Params]
apply: ApplyFn
def non_stochastic_network_to_typed(
network: FeedForwardNetwork,
) -> TypedFeedForwardNetwork:
"""Converts non-stochastic FeedForwardNetwork to TypedFeedForwardNetwork.
Non-stochastic network is the one that doesn't take a random key as an input
for its `apply` method.
Arguments:
network: non-stochastic feed-forward network.
Returns:
corresponding TypedFeedForwardNetwork
"""
def apply(
params: Params,
observation: Observation,
*args,
is_training: bool,
key: Optional[PRNGKey] = None,
**kwargs,
) -> NetworkOutput:
del is_training, key
return network.apply(params, observation, *args, **kwargs)
return TypedFeedForwardNetwork(init=network.init, apply=apply)
| ethanluoyc/corax | corax/jax/networks/base.py | base.py | py | 2,970 | python | en | code | 27 | github-code | 13 |
17842818552 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import streamlit as st
import pandas as pd
# In[ ]:
# In[2]:
def local_css(file_name):
with open(file_name) as f:
st.markdown(f'<style>{f.read()}</style>', unsafe_allow_html=True)
# In[ ]:
# In[3]:
st.markdown(
"""
<style>
.reportview-container {
background: #8cd7b3
}
.sidebar .sidebar-content {
background: #8dc5bf
}
</style>
""",
unsafe_allow_html=True
)
# In[ ]:
# In[4]:
df=pd.read_csv("Final_Data.csv")
# In[ ]:
# In[5]:
st.write("""
# Drug Analysis
""")
# In[ ]:
# In[6]:
drug_name = df['drugName']
condition = df['condition']
side_effect = df['Side_Effect']
effectiveness = df['effectiveness']
# In[ ]:
# In[7]:
dummy = df['drugName'].tolist()
# In[ ]:
# In[13]:
st.sidebar.text(" ")
st.sidebar.text(" ")
drug = st.sidebar.selectbox('Drug Name:' , options=drug_name)
st.sidebar.text(" ")
st.sidebar.text(" ")
# In[ ]:
# In[9]:
index = dummy.index(drug)
# In[ ]:
# In[11]:
if(st.sidebar.button('Show Result')):
st.info("Information")
st.markdown("<h3 style= 'color: blue';>Side Effects</h3>", unsafe_allow_html=True)
a = side_effect[index]
st.write(a)
st.text(" ")
st.markdown("<h3 style= 'color: blue';>Conditions</h3>", unsafe_allow_html=True)
b = condition[index]
st.write(b)
st.text(" ")
st.markdown("<h3 style= 'color: blue';>Effectiveness</h3>", unsafe_allow_html=True)
c = effectiveness[index]
st.write(c)
st.text(" ")
else:
st.text(" ")
st.text(" ")
st.text(" ")
st.markdown("<h3 style= 'color: blue';>Side Effects</h3>", unsafe_allow_html=True)
st.text(" ")
st.text(" ")
st.text(" ")
st.markdown("<h3 style= 'color: blue';>Conditions</h3>", unsafe_allow_html=True)
st.text(" ")
st.text(" ")
st.text(" ")
st.markdown("<h3 style= 'color: blue';>Effectiveness</h3>", unsafe_allow_html=True)
st.text(" ")
st.text(" ")
st.text(" ")
# In[ ]:
| himanshuparekh16/Data-Science_Python | updated_deployment.py | updated_deployment.py | py | 2,127 | python | en | code | 2 | github-code | 13 |
42826965590 | from json import dumps
import frappe
from frappe.model.document import Document
class InsightsDashboard(Document):
def validate(self):
self.validate_duplicate_items()
def validate_duplicate_items(self):
items = [d.visualization for d in self.visualizations]
if len(items) != len(set(items)):
duplicates = [item for item in items if items.count(item) > 1]
frappe.throw("Duplicate items found: {0}".format(", ".join(duplicates)))
@frappe.whitelist()
def get_visualizations(self):
visualizations = [row.visualization for row in self.visualizations]
return frappe.get_all(
"Insights Query Chart",
filters={"name": ("not in", visualizations), "type": ["!=", "Pivot"]},
fields=["name", "title", "type"],
)
@frappe.whitelist()
def add_visualization(self, visualization, layout=None):
if not layout:
layout = {"w": 8, "h": 8}
self.append(
"visualizations",
{
"visualization": visualization,
"layout": dumps(layout, indent=2),
},
)
self.save()
@frappe.whitelist()
def refresh_visualizations(self):
for visualization in self.visualizations:
try:
frappe.get_doc("Insights Query", visualization.query).run()
except BaseException:
frappe.log_error(title="Error while executing query")
@frappe.whitelist()
def remove_visualization(self, visualization):
for row in self.visualizations:
if row.visualization == visualization:
self.remove(row)
self.save()
break
@frappe.whitelist()
def update_layout(self, updated_layout):
updated_layout = frappe._dict(updated_layout)
if not updated_layout:
return
for row in self.visualizations:
# row.name can be an interger which could get converted to a string
if str(row.name) in updated_layout or row.name in updated_layout:
new_layout = (
updated_layout.get(str(row.name))
or updated_layout.get(row.name)
or {}
)
row.layout = dumps(new_layout, indent=2)
self.save()
| morghim/insights | insights/insights/doctype/insights_dashboard/insights_dashboard.py | insights_dashboard.py | py | 2,376 | python | en | code | null | github-code | 13 |
43260346102 | from re import match
s, t = input().replace('?', '.'), input()
for i in range(len(s) - len(t) + 1)[::-1]:
if match(s[i:i + len(t)], t):
key = (s[:i] + t + s[i + len(t):]).replace('.', 'a')
print(key)
break
else:
print('UNRESTORABLE')
| Shirohi-git/AtCoder | abc071-/abc076_c.py | abc076_c.py | py | 267 | python | en | code | 2 | github-code | 13 |
7001502636 | from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument, ExecuteProcess
from launch.substitutions import Command, FindExecutable
from launch.substitutions import LaunchConfiguration, PathJoinSubstitution
from launch_ros.actions import Node
from launch_ros.substitutions import FindPackageShare
def generate_launch_description():
use_fake_hardware = LaunchConfiguration('use_fake_hardware')
use_orientation = LaunchConfiguration('use_orientation')
use_clutch = LaunchConfiguration('use_clutch')
# Get URDF via xacro
robot_description_content = Command(
[
PathJoinSubstitution([FindExecutable(name='xacro')]),
' ',
PathJoinSubstitution(
[
FindPackageShare('fd_description'),
'config',
'fd.config.xacro',
]
),
' use_fake_hardware:=', use_fake_hardware,
' use_orientation:=', use_orientation,
' use_clutch:=', use_clutch,
]
)
robot_description = {'robot_description': robot_description_content}
phi_controllers = PathJoinSubstitution(
[
FindPackageShare('fd_description'),
'config',
'fd_controllers.yaml',
]
)
node_robot_state_publisher = Node(
package='robot_state_publisher',
executable='robot_state_publisher',
namespace='fd',
output='screen',
parameters=[robot_description],
)
# A node to publish world -> iiwa_base transform
static_tf = Node(
package='tf2_ros',
executable='static_transform_publisher',
name='static_transform_publisher',
output='log',
arguments=[
'0.0', '0.0', '0.0', '3.1416', '0.0', '0.0',
'world',
'fd_base'
],
)
controller_manager_node = Node(
package='controller_manager',
executable='ros2_control_node',
namespace='fd',
parameters=[robot_description, phi_controllers],
output={
'stdout': 'screen',
'stderr': 'screen',
},
)
# Load controllers
load_controllers = []
for controller in ['fd_controller', 'joint_state_broadcaster']:
load_controllers += [
ExecuteProcess(
cmd=[
'ros2 run controller_manager spawner' +
f'--controller-manager /fd/controller_manager {controller}'
],
shell=True,
output='screen',
)
]
nodes = [
controller_manager_node,
node_robot_state_publisher,
static_tf,
] + load_controllers
return LaunchDescription([
DeclareLaunchArgument(
'use_fake_hardware',
default_value='false',
description='Use fake r2c hardware interfaces'),
DeclareLaunchArgument(
'use_orientation',
default_value='false',
description='Read angular positions.velocities'
+ '(WARNING! RPY parameterization)'
),
DeclareLaunchArgument(
'use_clutch',
default_value='false',
description='Enable clutch (read pos/vel/force and write force)'),
] + nodes)
| ICube-Robotics/forcedimension_ros2 | fd_bringup/launch/fd.launch.py | fd.launch.py | py | 3,367 | python | en | code | 7 | github-code | 13 |
38834992033 | from SortingAlgorithm import SortingAlgorithm
from bubbleSort import bubbleSort
from Experiment import Experiment
import random
def geraListaOrdenada(tam):
lista = []
for i in range(tam):
lista.append(i)
return lista
def geraLista(tam):
lista = []
for i in range(tam):
lista.append(i)
random.shuffle(lista)
return lista
def geraListaReversa(tam):
lista = []
for i in range(tam):
lista.append(i)
lista.reverse()
return lista
'''
vet = bubbleSort([7, 6, 5, 4, 3, 2, 1])
print(vet.vet)
#print(vet.stats.swaps)
vet.sort()
print(vet.vet)
#print(vet.stats.swaps)
'''
intervals = [100, 200, 300, 400, 500]
vectMelhor = bubbleSort(geraListaOrdenada(500), intervals)
vectPior = bubbleSort(geraListaReversa(500), intervals)
vect = bubbleSort(geraLista(500), intervals)
E1 = Experiment([vect, vectMelhor, vectPior], intervals, title="Bubble Sort")
E1.calculaTempos(bubbleSort)
E1.plotar()
x = 1 | luisfilipels/Interview-Preparation | SortingAlgorithmsOOP/TestFile.py | TestFile.py | py | 954 | python | en | code | 10 | github-code | 13 |
31721610025 | import json
import requests
# KEEP THIS FILE VERY PRIVATE BECAUSE IT CONTAINS THE API KEY
APP_ID = "enter app id"
APP_KEY = "enter you key"
class NXException(BaseException):
pass
class NXClient(object):
SEARCH_END_POINT = "https://trackapi.nutritionix.com/v2/search/instant"
FOOD_END_POINT = "https://trackapi.nutritionix.com/v2/natural/nutrients"
HEADERS = {"Content-Type": "application/json",
"x-app-id": APP_ID, "x-app-key": APP_KEY}
@classmethod
def search(cls, txt):
v = requests.get(cls.SEARCH_END_POINT, params={"query": txt}, headers=cls.HEADERS)
if v.status_code != 200:
raise NXException("could not use nutritionix API")
return json.loads(v.content.decode('utf-8'))
@classmethod
def food(cls, txt):
body = json.dumps({"query": txt})
v = requests.post(cls.FOOD_END_POINT, data=body, headers=cls.HEADERS)
if v.status_code != 200:
raise NXException("could not use nutritionix API")
return json.loads(v.content.decode('utf-8'))
def call_food_api(food_name):
"""
:param str food_name: food name or sentence
:rtype Dict:
"""
return NXClient.food(food_name)
| saratherv/Bot | fb_messenger_server/nutritionix.py | nutritionix.py | py | 1,217 | python | en | code | 0 | github-code | 13 |
21508766593 | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.formula.api as smf
import statsmodels.api as sm
from scipy import stats
df = np.genfromtxt("proband_denovo_mutations_by_parent_age_tab_delimited.txt", delimiter = "\t", dtype = None, encoding = None, names = ["Proband_ID", "Paternal_Mutations", "Maternal_Mutations", "Paternal_Age", "Maternal_Age"])
fig, ax = plt.subplots()
ax.scatter(df["Maternal_Age"], df["Maternal_Mutations"])
ax.set_xlabel("Maternal Age")
ax.set_ylabel("Maternal Mutations")
ax.set_title("Offspring De Novo Mutations of Maternal Origin vs Maternal Age")
plt.savefig("ex2_a.png")
plt.close(fig)
fig, ax = plt.subplots()
ax.scatter(df["Paternal_Age"], df["Paternal_Mutations"])
ax.set_xlabel("Paternal Age")
ax.set_ylabel("Paternal Mutations")
ax.set_title("Offspring De Novo Mutations of Paternal Origin vs Paternal Age")
plt.savefig("ex2_b.png")
plt.close(fig)
maternal_model = smf.ols(formula = "Maternal_Mutations ~ 1 + Maternal_Age", data = df).fit()
print(maternal_model.summary())
paternal_model = smf.ols(formula = "Paternal_Mutations ~ 1 + Paternal_Age", data = df).fit()
print(paternal_model.summary())
fig, ax = plt.subplots()
ax.hist(df["Maternal_Mutations"], alpha = 0.5, label = "Maternal")
ax.hist(df["Paternal_Mutations"], alpha = 0.5, label = "Paternal")
ax.set_xlabel("Number of De Novo Mutations")
ax.set_ylabel("Frequency")
ax.set_title("De Novo Mutations by Parental Origin")
ax.legend()
plt.savefig("ex2_c.png")
print(stats.ttest_ind(df["Maternal_Mutations"],df["Paternal_Mutations"]))
new_data = df[0]
new_data.fill(0)
new_data['Paternal_Age'] = 50.5
print(paternal_model.predict(new_data)) | cefmillard/qbb2022-answers | day5-lunch/proband_parental_age_regression.py | proband_parental_age_regression.py | py | 1,686 | python | en | code | 0 | github-code | 13 |
19219070977 | def bubble_sort(arr):
swap_count = 0
end = n - 1
while end > 0:
last_swap = 0
for i in range(end):
if arr[i] > arr[i + 1]:
arr[i], arr[i + 1] = arr[i + 1], arr[i]
last_swap = i
swap_count += 1
end = last_swap
print(swap_count)
n = int(input())
arr = list(map(int, input().split()))
bubble_sort(arr) | Choi-Jiwon-38/WINK-algorithm-study | week 3/버블 소트.py | 버블 소트.py | py | 414 | python | en | code | 0 | github-code | 13 |
33163971800 | print("Enter 1- Addition \n 2- Subtraction \n 3- Multiplication \n 4- Division \n 5- Find the Remainder")
user = int(input())
no1 = int(input("Enter the first number - "))
no2 = int(input("Enter the second number - "))
result = 0
def add():
result = no1+no2
return result
def subtract():
result = no1-no2
return result
def multiply():
result = no1*no2
return result
def divide():
result = no1/no2
return result
def remainder():
result = no1%no2
return result
if user==1:
print("The sum of the two numbers is : ", add())
elif user==2:
print("The number after subtraction is : ", subtract())
elif user==3:
print("The product of the two numbers is : ", multiply())
elif user==4:
print("The number after division is : ", divide())
elif user==5:
print("The remainder is : ", remainder())
else:
print("Invalid Input.")
| AmanKeswani/Studies | python/calculator.py | calculator.py | py | 915 | python | en | code | 5 | github-code | 13 |
6997244173 | class Node:
def __init__(self, val):
self.data = val
self.right = None
self.left = None
def printLevelOrder(root):
if root is None:
return root
queue = []
return_list = []
queue.append(root)
while len(queue) > 0:
ans = []
l = len(queue)
for l in range(l):
node = queue.pop(0)
ans.append(node.data)
if node.left != None:
queue.append(node.left)
if node.right != None:
queue.append(node.right)
return_list.append(ans)
return return_list
root = Node(1)
root.left = Node(2)
root.right = Node(3)
root.left.left = Node(4)
root.left.right = Node(5)
print(printLevelOrder(root)) | sairamkrishna222/adil_workshop | adil_workshop/adil-mohammed-au9-dev/coding-challenges/week07/Day5/Levelorder.py | Levelorder.py | py | 753 | python | en | code | 0 | github-code | 13 |
31272866912 | #!/usr/bin/env python3
'''
Statistcal functions
'''
from collections import deque
from operator import add
def scan(f, g):
'''
left scan
:param f: function to apply to acc, value
:param g: iterable of values
'''
acc = tuple(next(g))
for x in g:
yield acc
acc = tuple(f(*vals) for vals in zip(acc, x))
yield acc
def tail(g, n=1):
return deque(g, maxlen=n)
def average(xs):
'''
simple average
>>> average(range(10))
4.5
'''
total, count = tail(scan(add, ((x, 1) for x in xs)))[0]
return total / count
if __name__ == '__main__':
import doctest
doctest.testmod()
| dutc/sample-repo | sample/statistics.py | statistics.py | py | 655 | python | en | code | 2 | github-code | 13 |
28520695440 | import argparse
import os
import matplotlib.pyplot as plt
import numpy as np
import random
import torch
import torch.nn as nn
from planning.image_tool_classifier.model import resnet18
from planning.image_tool_classifier.dataset import ToolDataset
from datetime import datetime
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader
# from torchvision.models import resnet18, resnet50, ResNet18_Weights
from tqdm import tqdm
from utils.data_utils import set_seed, Tee
from utils.visualize import *
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", type=int, default=32)
parser.add_argument(
"--tool_dataf",
type=str,
default="data/image_classifier/data_classifier_08-24_epwise_final_v4",
)
parser.add_argument("--random_seed", type=int, default=3407)
parser.add_argument("--num_views", type=int, default=4)
parser.add_argument("--num_workers", type=int, default=0)
parser.add_argument("--lr", type=float, default=0.0004)
parser.add_argument("--beta1", type=float, default=0.9)
parser.add_argument("--n_epoch", type=int, default=100)
parser.add_argument("--eval_freq", type=int, default=10)
args = parser.parse_args()
set_seed(args.random_seed)
# dump_path = os.path.join(args.tool_dataf, 'dump', datetime.now().strftime("%b-%d-%H:%M:%S"))
dump_path = os.path.join(args.tool_dataf, "dump", "Sep-27-00:10:07")
os.system("mkdir -p " + dump_path)
tee = Tee(os.path.join(dump_path, "train.log"), "w")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def test(visualize=False):
for dataset in ["train", "valid", "test"]:
os.system("mkdir -p " + os.path.join(dump_path, dataset, "images", "true"))
os.system("mkdir -p " + os.path.join(dump_path, dataset, "images", "false"))
test_set = ToolDataset(args, dataset)
test_set.phase = "test"
dataloader = DataLoader(
test_set,
batch_size=1,
shuffle=False,
num_workers=args.num_workers,
)
# model = resnet50()
# model.conv1 = nn.Conv2d(24, 64, kernel_size=7, stride=2, padding=3, bias=False)
# num_features = model.fc.in_features # extract fc layers features
# model.fc = nn.Linear(num_features, len(test_set.classes))
model = resnet18(num_views=args.num_views, num_classes=len(test_set.classes))
model_path = os.path.join(dump_path, f"net_best_v11.pth")
pretrained_dict = torch.load(model_path, map_location=device)
model.load_state_dict(pretrained_dict, strict=False)
model.eval()
model = model.to(device)
criterion = nn.CrossEntropyLoss()
running_loss = 0.0
succ_hit = 0
idx_preds = []
for i, data in enumerate(tqdm(dataloader)):
img, label = data
img = img.to(device)
label = label.to(device)
output = model(img)
loss = criterion(output, label)
pred = output.softmax(dim=1).argmax(dim=1)
idx_preds.append(pred.cpu().numpy()[0])
is_succ = torch.sum(pred == label)
if visualize:
img_paths, target = test_set.samples[i]
if is_succ == 0:
print(f"False prediction!")
vis_path = os.path.join(
dump_path, dataset, "images", "false", f"{str(i).zfill(3)}.png"
)
visualize_image_pred(
img_paths,
target,
pred.cpu().numpy(),
test_set.classes,
path=vis_path,
)
# else:
# vis_path = os.path.join(dump_path, dataset, 'images', 'true', f'{str(i).zfill(3)}.png')
succ_hit += is_succ
running_loss += loss.item()
print(
f"test loss: {running_loss / len(test_set):.3f}; success: {succ_hit / (len(test_set))}"
)
if visualize:
idx_labels = list(list(zip(*test_set.samples))[1])
plot_cm(
test_set,
idx_labels,
idx_preds,
path=os.path.join(dump_path, dataset, "cm.png"),
)
def train():
phases = ["train", "valid"]
datasets = {phase: ToolDataset(args, phase) for phase in phases}
dataloaders = {
phase: DataLoader(
datasets[phase],
batch_size=args.batch_size,
shuffle=True if phase == "train" else False,
num_workers=args.num_workers,
)
for phase in phases
}
# model = resnet50()
# model.conv1 = nn.Conv2d(24, 64, kernel_size=7, stride=2, padding=3, bias=False)
# num_features = model.fc.in_features # extract fc layers features
# model.fc = nn.Linear(num_features, len(datasets['train'].classes))
model = resnet18(
num_views=args.num_views, num_classes=len(datasets["train"].classes)
)
model_path = os.path.join(dump_path, f"net_best_v11.pth")
pretrained_dict = torch.load(model_path, map_location=device)
model.load_state_dict(pretrained_dict, strict=False)
params = model.parameters()
model = model.to(device)
optimizer = torch.optim.Adam(params, lr=args.lr, betas=(args.beta1, 0.999))
scheduler = ReduceLROnPlateau(
optimizer, "min", factor=0.8, patience=3, verbose=True
)
criterion = nn.CrossEntropyLoss()
best_epoch, best_valid_loss, best_valid_accuracy = 0, np.inf, 0
plateau_epoch = 0
train_stats = {
"train_accuracy": [],
"valid_accuracy": [],
"train_loss": [],
"valid_loss": [],
}
for epoch in range(args.n_epoch):
rgb_mean_list = []
rgb_std_list = []
for phase in phases:
running_loss = 0.0
succ_hit = 0
model.train(phase == "train")
for i, data in enumerate(
tqdm(dataloaders[phase], desc=f"epoch {epoch}/{args.n_epoch}")
):
img, label = data
img = img.to(device)
label = label.to(device)
output = model(img)
loss = criterion(output, label)
succ_hit += torch.sum(
output.softmax(dim=1).argmax(dim=1) == label
).item()
if phase == "train":
optimizer.zero_grad()
loss.backward()
optimizer.step()
# rgb_mean = torch.stack([
# torch.mean(torch.mean(img[:, ::3], axis=(0, 2, 3)).reshape((4, 2)), axis=1),
# torch.mean(torch.mean(img[:, 1::3], axis=(0, 2, 3)).reshape((4, 2)), axis=1),
# torch.mean(torch.mean(img[:, 2::3], axis=(0, 2, 3)).reshape((4, 2)), axis=1),
# ]).T
# rgb_std = torch.stack([
# torch.mean(torch.std(img[:, ::3], axis=(0, 2, 3)).reshape((4, 2)), axis=1),
# torch.mean(torch.std(img[:, 1::3], axis=(0, 2, 3)).reshape((4, 2)), axis=1),
# torch.mean(torch.std(img[:, 2::3], axis=(0, 2, 3)).reshape((4, 2)), axis=1),
# ]).T
rgb_mean = np.array(
[
torch.mean(img[:, ::3]).cpu(),
torch.mean(img[:, 1::3]).cpu(),
torch.mean(img[:, 2::3]).cpu(),
]
)
rgb_std = np.array(
[
torch.std(img[:, ::3]).cpu(),
torch.std(img[:, 1::3].cpu()),
torch.std(img[:, 2::3]).cpu(),
]
)
rgb_mean_list.append(rgb_mean)
rgb_std_list.append(rgb_std)
running_loss += loss.item()
loss_avg = running_loss / len(datasets[phase])
accuracy = succ_hit / len(datasets[phase])
print(
f"[{epoch}] {phase} loss: {loss_avg:.6f} "
+ f"{phase} success {accuracy:.6f}"
)
train_stats[f"{phase}_loss"].append(min(loss_avg, 1))
train_stats[f"{phase}_accuracy"].append(accuracy)
stats_mean = np.mean(np.stack(rgb_mean_list), axis=0)
stats_std = np.mean(np.stack(rgb_std_list), axis=0)
print(f"{stats_mean} +- {stats_std}")
if phase == "valid":
scheduler.step()
if loss_avg < best_valid_loss:
best_epoch = epoch
best_valid_loss = loss_avg
best_valid_accuracy = accuracy
best_model_path = f"{dump_path}/net_best_v11.pth"
torch.save(model.state_dict(), best_model_path)
plateau_epoch = 0
else:
plateau_epoch += 1
if plateau_epoch >= 10:
print(f"Breaks after not improving for {plateau_epoch} epoches!")
break
print(
f"Best epoch {best_epoch}: valid loss: {best_valid_loss:.6f} valid accuracy: {best_valid_accuracy:.6f}!"
)
plot_train_loss(train_stats, path=os.path.join(dump_path, "training_loss.png"))
def main():
train()
test(visualize=True)
if __name__ == "__main__":
main()
| hshi74/robocook | planning/image_tool_classifier/train.py | train.py | py | 9,556 | python | en | code | 43 | github-code | 13 |
74718305936 | from __future__ import annotations
from dataclasses import dataclass
from typing import Optional, Type, Generic
from persisty.batch_edit import BatchEdit, C, U
@dataclass
class BatchEditResult(Generic[C, U]):
edit: BatchEdit[C, U]
success: bool = False
code: Optional[str] = None
details: Optional[str] = None
def copy_from(self, result: BatchEditResult):
self.edit = result.edit
self.success = result.success
self.code = result.code
self.details = result.details
def batch_edit_result_dataclass_for(batch_edit_type: Type) -> Type:
params = {
"__annotations__": {
"edit": batch_edit_type,
"success": bool,
"code": Optional[str],
"details": Optional[str],
},
"__doc__": f"Batch edit result of {batch_edit_type.__name__}",
"success": False,
"code": None,
"details": None,
}
type_name = f"{batch_edit_type.__name__}Result"
type_ = dataclass(type(type_name, (), params))
return type_
| tofarr/persisty | persisty/batch_edit_result.py | batch_edit_result.py | py | 1,054 | python | en | code | 1 | github-code | 13 |
3918601038 |
class Person:
def __init__(self,data):
self.name = data[0]
self.githubname = data[1]
self.semestercount = data[2]
self.githubcommits = data[3] #This should come from github api
self.attendcount = data[4]
self.wincount = data[5]
def str(self):
return self.name + " " + self.githubname | aztec-developers/roundrobin-wan | person.py | person.py | py | 303 | python | en | code | 0 | github-code | 13 |
22028937464 | #!/usr/bin/env python
# encoding: utf-8
"""
mh.py - A single myosin head
Created by Dave Williams on 2010-01-04.
"""
import numpy.random as random
random.seed() # Ensure proper seeding
from numpy import pi, sqrt, log, radians
import math as m
import warnings
class Spring:
"""A generic spring, from which we make the myosin heads"""
def __init__(self, config):
## Passed variables
self.r_w = config['rest_weak']
self.r_s = config['rest_strong']
self.k_w = config['konstant_weak']
self.k_s = config['konstant_strong']
## Diffusion governors
# k_T = Boltzmann constant * temperature = (1.381E-23 J/K * 288 K)
k_t = 1.381*10**-23 * 288 * 10**21 #10**21 converts J to pN*nM
# Normalize: a factor used to normalize the PDF of the segment values
self.normalize = sqrt(2*pi*k_t/self.k_w)
self.stand_dev = sqrt(k_t/self.k_w) # of segment values
def to_dict(self):
"""Create a JSON compatible representation of the spring """
return self.__dict__.copy()
def from_dict(self, sd):
""" Load values from a spring dict. Values read in correspond
to the current output documented in to_dict.
"""
self.r_w = sd['r_w']
self.r_s = sd['r_s']
self.k_w = sd['k_w']
self.k_s = sd['k_s']
self.normalize = sd['normalize']
self.stand_dev = sd['stand_dev']
def rest(self, state):
"""Return the rest value of the spring in state state
Takes:
state: the state of the spring, ['free'|'loose'|'tight']
Returns:
length/angle: rest length/angle of the spring in the given state
"""
if state in ("free", "loose"):
return self.r_w
elif state == "tight":
return self.r_s
else:
warnings.warn("Improper value for spring state")
def constant(self, state):
"""Return the spring constant of the spring in state state
Takes:
state: the state of the spring, ['free'|'loose'|'tight']
Returns:
spring constant: for the spring in the given state
"""
if state in ("free", "loose"):
return self.k_w
elif state == "tight":
return self.k_s
else:
warnings.warn("Improper value for spring state")
def energy(self, spring_val, state):
"""Given a current length/angle, return stored energy
Takes:
spring_val: a spring length or angle
state: a spring state, ['free'|'loose'|'tight']
Returns:
energy: the energy required to achieve the given value
"""
if state in ("free", "loose"):
return (0.5 * self.k_w * m.pow((spring_val-self.r_w), 2))
elif state == "tight":
return (0.5 * self.k_s * m.pow((spring_val-self.r_s), 2))
else:
warnings.warn("Improper value for spring state")
def bop(self):
"""Bop for a new value, given an exponential energy dist
A longer explanation is in singlexb/Crossbridge.py
Takes:
nothing: assumes the spring to be in the unbound state
Returns:
spring_value: the length or angle of the spring after diffusion"""
return (random.normal(self.r_w, self.stand_dev))
class SingleSpringHead:
"""A single-spring myosin head, as in days of yore"""
def __init__(self):
"""Create the spring that makes up the head and set energy values"""
self.state = "free"
self.g = Spring({
'rest_weak': 5,
'rest_strong': 0,
'konstant_weak': 5 / 3.976,
'konstant_strong': 5 / 3.976})
# Free energy calculation helpers
g_atp = 13 # In units of RT
atp = 5 * 10**-3
adp = 30 * 10**-6
phos = 3 * 10**-3
self.deltaG = abs(-g_atp - log(atp / (adp * phos)))
self.alpha = 0.28
self.eta = 0.68
# The time-step, master of all time
self.timestep = 1 # ms
def transition(self, bs):
"""Transition to a new state (or not)
Takes:
bs: relative Crown to Actin distance (x,y)
Returns:
boolean: transition that occurred (as string) or None
"""
## Transitions rates are checked against a random number
check = random.rand()
## Check for transitions depending on the current state
if self.state == "free":
if self._r12(bs) > check:
self.state = "loose"
return '12'
elif self.state == "loose":
if self._r23(bs) > check:
self.state = "tight"
return '23'
elif (1-self._r21(bs)) < check:
self.state = "free"
return '21'
elif self.state == "tight":
if self._r31(bs) > check:
self.state = "free"
return '31'
elif (1-self._r32(bs)) < check:
self.state = "loose"
return '32'
# Got this far? Than no transition occurred!
return None
def axialforce(self, tip_location):
"""Find the axial force a Head generates at a given location
Takes:
tip_location: relative Crown to Actin distance (x,y)
Returns:
f_x: the axial force generated by the Head
"""
## Get the Head length
g_len = tip_location[0]
## Write all needed values to local variables
g_s = self.g.rest(self.state)
g_k = self.g.constant(self.state)
## Find and return force
f_x = g_k * (g_len - g_s)
return f_x
def radialforce(self, tip_location):
"""Find the radial force a Head generates at a given location
Takes:
tip_location: relative Crown to Actin distance (x,y)
Returns:
f_y: the radial force generated by the Head
"""
return 0.0
def energy(self, tip_location, state=None):
"""Return the energy in the xb with the given parameters
Takes:
tip_location: relative Crown to Actin distance (x,y)
state: kinetic state of the cross-bridge, ['free'|'loose'|'tight']
Returns:
xb_energy: the energy stored in the cross-bridge"""
if state is None:
state = self.state
return self.g.energy(tip_location[0], state)
@property
def numeric_state(self):
"""Return the numeric state (0, 1, or 2) of the head"""
lookup_state = {"free":0, "loose":1, "tight":2}
return lookup_state[self.state]
def _set_timestep(self, timestep):
"""Set the length of time step used to calculate transitions"""
self.timestep = timestep
def _r12(self, bs):
"""Binding rate, based on the distance from the Head tip to a Actin
Takes:
bs: relative Crown to Actin distance (x,y)
Returns:
probability: chance of binding occurring
"""
## Get needed values
k_xb = self.g.constant("free")
xb_0 = self.g.rest("free")
A = 2000 # From Tanner, 2008 Pg 1209
## Calculate the binding probability
rate = (A * sqrt(k_xb / (2 * pi)) *
m.exp(-.5 * k_xb * (bs[0] - xb_0)**2)) * self.timestep
return float(rate)
def _r21(self, bs):
"""The reverse transition, from loosely bound to unbound
Takes:
bs: relative Crown to Actin distance (x,y)
Returns:
rate: probability of transition occurring this timestep
"""
## The rate depends on the states' free energies
g_1 = self._free_energy(bs, "free")
g_2 = self._free_energy(bs, "loose")
## Rate, as in pg 1209 of Tanner et al, 2007
try:
rate = self._r12(bs) / m.exp(g_1 - g_2)
except ZeroDivisionError:
rate = 1
return float(rate)
def _r23(self, bs):
"""Probability of becoming tightly bound if loosely bound
Takes:
bs: relative Crown to Actin distance (x,y)
Returns:
rate: probability of becoming tightly bound
"""
## Get other needed values
k_xb = self.g.constant("loose")
xb_0 = self.g.rest("loose")
B = 100 # From Tanner, 2008 Pg 1209
C = 1
D = 1
## Rate taken from single cross-bridge work
rate = (B / sqrt(k_xb) * (1 - m.tanh(C * sqrt(k_xb) *
(bs[0] - xb_0))) + D) * self.timestep
return float(rate)
def _r32(self, bs):
"""The reverse transition, from tightly to loosely bound
Takes:
bs: relative Crown to Actin distance (x,y)
Returns:
rate: probability of becoming loosely bound
"""
## Governed as in self_r21
g_2 = self._free_energy(bs, "loose")
g_3 = self._free_energy(bs, "tight")
try:
rate = self._r23(bs) / m.exp(g_2 - g_3)
except ZeroDivisionError:
rate = 1
return float(rate)
def _r31(self, bs):
"""Probability of unbinding if tightly bound
Takes:
bs: relative Crown to Actin distance (x,y)
Returns:
rate: probability of detaching from the binding site
"""
## Get needed values
k_xb = self.g.constant("tight")
M = 3600 # From Tanner, 2008 Pg 1209
N = 40
P = 20
## Based on the energy in the tight state
rate = (sqrt(k_xb) * (sqrt(M * (bs[0]-4.76)**2) -
N * (bs[0]-4.76)) + P) * self.timestep
return float(rate)
def _free_energy(self, tip_location, state):
"""Free energy of the Head
Takes:
tip_location: relative Crown to Actin distance (x,y)
state: kinetic state of the cross-bridge, ['free'|'loose'|'tight']
Returns:
energy: free energy of the head in the given state
"""
if state == "free":
return 0
elif state == "loose":
k_xb = self.g.constant(state)
xb_0 = self.g.rest(state)
x = tip_location[0]
return self.alpha * -self.deltaG + k_xb * (x - xb_0)**2
elif state == "tight":
k_xb = self.g.constant(state)
x = tip_location[0]
return self.eta * -self.deltaG + k_xb * x**2
class Head:
"""Head implements a single myosin head"""
def __init__(self):
"""Create the springs that make up the head and set energy values
Values are choosen for consistancy with single spring rest lengths
and rest lattice spacings. More documentaion in the single spring
code. All numerical values referenced are discussed in single
crossbridge PLOS paper.
"""
# Remember thine kinetic state
self.state = "free"
# Create the springs which make up the head
self.c = Spring({ # the converter domain
'rest_weak': radians(47.16),
'rest_strong': radians(73.20),
'konstant_weak': 40,
'konstant_strong': 40})
self.g = Spring({ # the globular domain
'rest_weak': 19.93,
'rest_strong': 16.47,
'konstant_weak': 2,
'konstant_strong': 2})
# Free energy calculation helpers
g_atp = 13 # In units of RT
atp = 5 * 10**-3
adp = 30 * 10**-6
phos = 3 * 10**-3
deltaG = abs(-g_atp - log(atp / (adp * phos)))
self.alphaDG = 0.28 * -deltaG
self.etaDG = 0.68 * -deltaG
# The time-step, master of all time
self._timestep = 1 # ms
def transition(self, bs, ap):
"""Transition to a new state (or not)
Takes:
bs: relative Crown to Actin distance (x,y)
ap: Actin binding permissiveness, from 0 to 1
Returns:
boolean: transition that occurred (as string) or None
"""
## Transitions rates are checked against a random number
check = random.rand()
## Check for transitions depending on the current state
if self.state == "free":
if self._prob(self._bind(bs))*ap > check:
self.state = "loose"
return '12'
elif self.state == "loose":
if self._prob(self._r23(bs)) > check:
self.state = "tight"
return '23'
elif (1 - self._prob(self._r21(bs))) < check:
self.state = "free"
return '21'
elif self.state == "tight":
if self._prob(self._r31(bs)) > check:
self.state = "free"
return '31'
elif (1 - self._prob(self._r32(bs))) < check:
self.state = "loose"
return '32'
# Got this far? Than no transition occurred!
return None
def axialforce(self, tip_location):
"""Find the axial force a Head generates at a given location
Takes:
tip_location: relative Crown to Actin distance (x,y)
Returns:
f_x: the axial force generated by the Head
"""
## Get the Head length and angle
(c_ang, g_len) = self._seg_values(tip_location)
## Write all needed values to local variables
c_s = self.c.rest(self.state)
g_s = self.g.rest(self.state)
c_k = self.c.constant(self.state)
g_k = self.g.constant(self.state)
## Find and return force
f_x = (g_k * (g_len - g_s) * m.cos(c_ang) +
1/g_len * c_k * (c_ang - c_s) * m.sin(c_ang))
return f_x
def radialforce(self, tip_location):
"""Find the radial force a Head generates at a given location
Takes:
tip_location: relative Crown to Actin distance (x,y)
Returns:
f_y: the radial force generated by the Head
"""
## Get the Head length and angle
(c_ang, g_len) = self._seg_values(tip_location)
## Write all needed values to local variables
c_s = self.c.rest(self.state)
g_s = self.g.rest(self.state)
c_k = self.c.constant(self.state)
g_k = self.g.constant(self.state)
## Find and return force
f_y = (g_k * (g_len - g_s) * m.sin(c_ang) +
1/g_len * c_k * (c_ang - c_s) * m.cos(c_ang))
return f_y
def energy(self, tip_location, state=None):
"""Return the energy in the xb with the given parameters
Takes:
tip_location: relative Crown to Actin distance (x,y)
state: kinetic state of the cross-bridge, ['free'|'loose'|'tight']
Returns:
xb_energy: the energy stored in the cross-bridge"""
if state == None:
state = self.state
(ang, dist) = self._seg_values(tip_location)
xb_energy = self.c.energy(ang, state) + self.g.energy(dist, state)
return xb_energy
@property
def numeric_state(self):
"""Return the numeric state (0, 1, or 2) of the head"""
lookup_state = {"free":0, "loose":1, "tight":2}
return lookup_state[self.state]
@property
def timestep(self):
return self._timestep
@timestep.setter
def timestep(self, timestep):
"""Set the length of time step used to calculate transitions"""
self._timestep = timestep
def _prob(self, rate):
"""Convert a rate to a probability, based on the current timestep
length and the assumption that the rate is for a Poisson process.
We are asking, what is the probability that at least one Poisson
distributed value would occur during the timestep.
Takes:
rate: a per ms rate to convert to probability
Returns:
probability: the probability the event occurs during a timestep
of length determined by self.timestep
"""
return 1 - m.exp(-rate*self.timestep)
def _bind(self, bs):
"""Bind (or don't) based on the distance from the Head tip to a Actin
Takes:
bs: relative Crown to Actin distance (x,y)
Returns:
probability: chance of binding occurring during a timestep
"""
## Flag indicates successful diffusion
bop_right = False
while bop_right is False:
## Bop the springs to get new values
c_ang = self.c.bop()
g_len = self.g.bop()
## Translate those values to an (x,y) position
tip = (g_len * m.cos(c_ang), g_len * m.sin(c_ang))
## Only a bop that lands short of the thin fil is valid
bop_right = bs[1] >= tip[1]
## Find the distance to the binding site
distance = m.hypot(bs[0]-tip[0], bs[1]-tip[1])
## The binding rate is dependent on the exp of the dist
# Rate = \tau * \exp^{-dist^2}
rate = 72 * m.exp(-distance**2)
## Return the rate
return rate
def _r21(self, bs):
"""The reverse transition, from loosely bound to unbound
This depends on the prob r12, the binding prob, which is given
in a stochastic manner. Thus _p21 is returning not the prob of
going from loosely bound to tightly bound, but the change that
occurs in one particular timestep, the stochastic probability.
Takes:
bs: relative Crown to Actin distance (x,y)
ap: Actin binding permissiveness, from 0 to 1
Returns:
prob: probability of transition
"""
## The rate depends on the states' free energies
unbound_free_energy = self._free_energy(bs, "free")
loose_free_energy = self._free_energy(bs, "loose")
## Rate, as in pg 1209 of Tanner et al, 2007
## With added reduced-detachment factor, increases dwell time
try:
rate = self._bind(bs) / m.exp(
unbound_free_energy - loose_free_energy)
except ZeroDivisionError:
rate = 1
return float(rate)
def _r23(self, bs):
"""Rate of becoming tightly bound if loosely bound
Takes:
bs: relative Crown to Actin distance (x,y)
Returns:
rate: per ms rate of becoming tightly bound
"""
## The transition rate depends on state energies
loose_energy = self.energy(bs, "loose")
tight_energy = self.energy(bs, "tight")
## Powerstroke rate, per ms
rate = (0.6 * # reduce overall rate
(1 + # shift rate up to avoid negative rate
m.tanh(6 + # move center of transition to right
0.2 * (loose_energy - tight_energy))))
return float(rate)
def _r32(self, bs):
"""The reverse transition, from tightly to loosely bound
Takes:
bs: relative Crown to Actin distance (x,y)
Returns:
rate: per ms rate of transition
"""
## Governed as in self_p21
loose_free_energy = self._free_energy(bs, "loose")
tight_free_energy = self._free_energy(bs, "tight")
try:
rate = self._r23(bs)/ m.exp(loose_free_energy - tight_free_energy)
except ZeroDivisionError:
rate = 1
return float(rate)
def _r31(self, bs):
"""Per ms rate of unbinding if tightly bound
Takes:
bs: relative Crown to Actin distance (x,y)
Returns
rate: per ms rate of detaching from the binding site
"""
## Based on the energy in the tight state
loose_energy = self.energy(bs, "loose")
tight_energy = self.energy(bs, "tight")
rate = m.sqrt(0.01*tight_energy) + 0.02
return float(rate)
def _free_energy(self, tip_location, state):
"""Free energy of the Head
Takes:
tip_location: relative Crown to Actin distance (x,y)
state: kinetic state of the cross-bridge, ['free'|'loose'|'tight']
Returns:
energy: free energy of the head in the given state
"""
if state == "free":
return 0
elif state == "loose":
return self.alphaDG + self.energy(tip_location, state)
elif state == "tight":
return self.etaDG + self.energy(tip_location, state)
@staticmethod
def _seg_values(tip_location):
"""Return the length and angle to the Head tip
Takes:
tip_location: relative Crown to Actin distance (x,y)
Returns:
(c_ang, g_len): the angle and length of the Head's springs
"""
c_ang = m.atan2(tip_location[1], tip_location[0])
g_len = m.hypot(tip_location[1], tip_location[0])
return (c_ang, g_len)
class Crossbridge(Head):
"""A cross-bridge, including status of links to actin sites"""
def __init__(self, index, parent_face, thin_face):
"""Set up the cross-bridge
Parameters:
index: the cross-bridge's index on the parent face
parent_face: the associated thick filament face
thin_face: the face instance opposite this cross-bridge
"""
# Do that super() voodoo that instantiates the parent Head
super(Crossbridge, self).__init__()
# What is your name, where do you sit on the parent face?
self.index = index
# What log are you a bump upon?
self.parent_face = parent_face
# Remember who thou art squaring off against
self.thin_face = thin_face
# How can I ever find you?
self.address = ('xb', self.parent_face.parent_filament.index,
self.parent_face.index, self.index)
# Remember if thou art bound unto an actin
self.bound_to = None # None if unbound, BindingSite object otherwise
def __str__(self):
"""String representation of the cross-bridge"""
out = '__XB_%02d__State_%s__Forces_%d_%d__'%(
self.index, self.state,
self.axialforce(), self.radialforce())
return out
def to_dict(self):
"""Create a JSON compatible representation of the crown
Example usage: json.dumps(crown.to_dict(), indent=1)
Current output includes:
address: largest to most local, indices for finding this
state: the free, loose, strong state of binding
thin_face: the address of the opposing thin face
bound_to: None or the address of the bound binding site
"""
xbd = self.__dict__.copy()
xbd.pop('_timestep')
xbd.pop('index')
xbd.pop('c')
xbd.pop('g')
xbd.pop('parent_face')
if xbd['bound_to'] is not None:
xbd['bound_to'] = xbd['bound_to'].address
xbd['thin_face'] = xbd['thin_face'].address
return xbd
def from_dict(self, xbd):
""" Load values from a crossbridge dict. Values read in correspond
to the current output documented in to_dict.
"""
# Check for index mismatch
read, current = tuple(xbd['address']), self.address
assert read==current, "index mismatch at %s/%s"%(read, current)
# Local keys
self.state = xbd['state']
self.etaDG = xbd['etaDG']
self.alphaDG = xbd['alphaDG']
# Sub-structure and remote keys
self.thin_face = self.parent_face.parent_filament.parent_lattice.\
resolve_address(xbd['thin_face'])
if xbd['bound_to'] is None:
self.bound_to = None
else:
self.bound_to = self.parent_face.parent_filament.parent_lattice.\
resolve_address(xbd['bound_to'])
@property
def timestep(self):
"""Timestep size is stored at the half-sarcomere level"""
return self.parent_face.parent_filament.parent_lattice.timestep_len
def transition(self):
"""Gather the needed information and try a transition
Parameters:
None
Returns:
transition: string of transition ('12', '32', etc.) or None
"""
# When unbound, try to bind, otherwise just try a transition
if self.bound_to is None:
# Find the lattice spacing
lattice_spacing = self._get_lattice_spacing()
# Find this cross-bridge's axial location
xb_axial_loc = self.axial_location
# Find the potential binding site
actin_site = self.thin_face.nearest(xb_axial_loc)
actin_axial_loc = actin_site.axial_location
actin_state = actin_site.permissiveness
# Find the axial separation
axial_sep = actin_axial_loc - xb_axial_loc
# Combine the two distances
distance_to_site = (axial_sep, lattice_spacing)
# Allow the myosin head to take it from here
trans = super(Crossbridge, self).transition(distance_to_site,
actin_state)
# Process changes to bound state
if trans == '12':
self.bound_to = actin_site
actin_site.bind_to(self)
else:
assert (trans is None), 'Bound state mismatch'
else:
# Get the distance to the actin site
distance_to_site = self._dist_to_bound_actin()
actin_state = self.bound_to.permissiveness
# Allow the myosin head to take it from here
trans = super(Crossbridge, self).transition(distance_to_site,
actin_state)
# Process changes to the bound state
if trans in set(('21', '31')):
self.bound_to.bind_to(None)
self.bound_to = None
else:
assert (trans in set(('23', '32', None))) , 'State mismatch'
return trans
def axialforce(self, base_axial_loc=None, tip_axial_loc = None):
"""Gather needed information and return the axial force
Parameters:
base_axial_location: location of the crown (optional)
tip_axial_loc: location of an attached actin node (optional)
Returns:
f_x: the axial force generated by the cross-bridge
"""
# Unbound? No force!
if self.bound_to is None:
return 0.0
# Else, get the distance to the bound site and run with it
distance = self._dist_to_bound_actin(base_axial_loc, tip_axial_loc)
# Allow the myosin head to take it from here
return super(Crossbridge, self).axialforce(distance)
def radialforce(self):
"""Gather needed information and return the radial force
Parameters:
None
Returns:
f_y: the radial force generated by the cross-bridge
"""
# Unbound? No force!
if self.bound_to is None:
return 0.0
# Else, get the distance to the bound site and run with it
distance_to_site = self._dist_to_bound_actin()
# Allow the myosin head to take it from here
return super(Crossbridge, self).radialforce(distance_to_site)
@property
def axial_location(self):
"""Find the axial location of the thick filament attachment point
Parameters:
None
Returns:
axial: the axial location of the cross-bridge base
"""
axial = self.parent_face.get_axial_location(self.index)
return axial
def _dist_to_bound_actin(self, xb_axial_loc=None, tip_axial_loc=None):
"""Find the (x,y) distance to the bound actin
This is the distance format used by the myosin head.
Parameters:
xb_axial_loc: current axial location of the crown (optional)
tip_axial_loc: location of an attached actin node (optional)
Returns:
(x,y): the axial distance between the cross-bridge base and
the actin site (x), and the lattice spacing (y)
"""
# Are you really bound?
assert (self.bound_to is not None) , "Lies, you're unbound!"
# Find the lattice spacing
lattice_spacing = self._get_lattice_spacing()
# Find this cross-bridge's axial location if need be
if xb_axial_loc is None:
xb_axial_loc = self.axial_location
# Find the distance to the bound actin site if need be
if tip_axial_loc is None:
tip_axial_loc = self.bound_to.axial_location
# Combine the two distances
return (tip_axial_loc - xb_axial_loc, lattice_spacing)
def _get_lattice_spacing(self):
"""Ask our superiors for lattice spacing data"""
return self.parent_face.lattice_spacing
if __name__ == '__main__':
print("mh.py is really meant to be called as a supporting module")
| cdw/multifil | multifil/mh.py | mh.py | py | 29,117 | python | en | code | 1 | github-code | 13 |
3279970939 | import numpy as np
import random as random
from random import randrange
SAMPLES = 100
SEPARATION = 0.33
def linearly_separable_data(mA, sigmaA, mB, sigmaB):
classA_x1 = np.zeros(SAMPLES)
classA_x2 = np.zeros(SAMPLES)
classB_x1 = np.zeros(SAMPLES)
classB_x2 = np.zeros(SAMPLES)
for i in range(SAMPLES):
classA_x1[i] = np.random.normal() * sigmaA + mA[0] + SEPARATION
classA_x2[i] = np.random.normal() * sigmaA + mA[1] + SEPARATION
classB_x1[i] = np.random.normal() * sigmaB + mB[0] - SEPARATION
classB_x2[i] = np.random.normal() * sigmaB + mB[1] - SEPARATION
classA = np.vstack([classA_x1, classA_x2])
labelsA = np.ones([1,SAMPLES])
classB = np.vstack([classB_x1, classB_x2])
labelsB = -np.ones([1,SAMPLES])
data = np.hstack([classA, classB])
targets = np.hstack([labelsA, labelsB])
X = np.zeros([3, 2 * SAMPLES])
t = np.zeros([1, 2 * SAMPLES])
shuffle = np.random.permutation(2 * SAMPLES)
for i in shuffle:
X[:2,i] = data[:2, shuffle[i]] # shuffle data
X[2,i] = 1 # add bias
t[0,i] = targets[0, shuffle[i]]
return X, t
def new_data_generation(m_a, m_b, sigma_a, sigma_b):
classA_x1 = np.zeros(SAMPLES)
classA_x2 = np.zeros(SAMPLES)
classB_x1 = np.zeros(SAMPLES)
classB_x2 = np.zeros(SAMPLES)
for i in range(round(SAMPLES/2)):
classA_x1[i] = np.random.normal() * sigma_a - m_a[0]
for i in range(round(SAMPLES/2)):
classA_x1[i + round(SAMPLES/2)] = np.random.normal() * sigma_a + m_a[0]
for i in range(SAMPLES):
classA_x2[i] = np.random.normal() * sigma_a + m_a[1]
classB_x1[i] = np.random.normal() * sigma_b + m_b[0]
classB_x2[i] = np.random.normal() * sigma_b + m_b[1]
classA = np.vstack([classA_x1, classA_x2])
labelsA = np.ones([1,SAMPLES])
classB = np.vstack([classB_x1, classB_x2])
labelsB = -np.ones([1,SAMPLES])
data = np.hstack([classA, classB])
targets = np.hstack([labelsA, labelsB])
X = np.zeros([3, 2 * SAMPLES])
t = np.zeros([1, 2 * SAMPLES])
shuffle = np.random.permutation(2 * SAMPLES)
for i in shuffle:
X[:2,i] = data[:2, shuffle[i]] # shuffle data
X[2,i] = 1 # add bias
t[0,i] = targets[0, shuffle[i]]
return X, t
def non_linearly_separable_data(m, sigma):
classA_x1 = np.zeros(SAMPLES)
classA_x2 = np.zeros(SAMPLES)
classB_x1 = np.zeros(SAMPLES)
classB_x2 = np.zeros(SAMPLES)
for i in range(SAMPLES):
classA_x1[i] = np.random.normal() * sigma + m[0]
classA_x2[i] = np.random.normal() * sigma + m[1]
classB_x1[i] = np.random.normal() * sigma + m[0]
classB_x2[i] = np.random.normal() * sigma + m[1]
classA = np.vstack([classA_x1, classA_x2])
labelsA = np.ones([1,SAMPLES])
classB = np.vstack([classB_x1, classB_x2])
labelsB = -np.ones([1,SAMPLES])
data = np.hstack([classA, classB])
targets = np.hstack([labelsA, labelsB])
X = np.zeros([3, 2 * SAMPLES])
t = np.zeros([1, 2 * SAMPLES])
shuffle = np.random.permutation(2 * SAMPLES)
for i in shuffle:
X[:2,i] = data[:2, shuffle[i]] # shuffle data
X[2,i] = 1 # add bias
t[0,i] = targets[0, shuffle[i]]
return X, t
"""
Data removal condition:
- random 50% from classA
"""
def generate_training_a(x, t, percentage):
x_training = x
t_training = t
x_test = list()
t_test = list()
# Class A
removal_pos_a = list()
positions = range(x_training.shape[1])
while (len(removal_pos_a) < round(x_training.shape[1] * percentage)):
aux = np.random.randint(low=0, high=len(positions) - 1)
if (t_training[0,positions[aux]] > 0):
removal_pos_a.append(positions[aux])
x_test.append(x_training[:,positions[aux]])
t_test.append(t_training[:,positions[aux]])
x_training = np.delete(x_training, removal_pos_a, axis=1)
t_training = np.delete(t_training, removal_pos_a, axis=1)
return x_training, t_training, np.transpose(np.array(x_test)), np.transpose(np.array(t_test))
"""
Data removal condition:
- random 50% from classB
"""
def generate_training_b(x, t, percentage):
x_training = x
t_training = t
x_test = list()
t_test = list()
# Class B
removal_pos_b = list()
while (len(removal_pos_b) < round(x_training.shape[1] * percentage)):
aux = np.random.randint(low=0, high=x_training.shape[1] - 1)
if (t_training[0,aux] < 0):
if (aux not in removal_pos_b):
removal_pos_b.append(aux)
x_test.append(x_training[:,aux])
t_test.append(t_training[:,aux])
x_training = np.delete(x_training, removal_pos_b, axis=1)
t_training = np.delete(t_training, removal_pos_b, axis=1)
return x_training, t_training, np.transpose(np.array(x_test)), np.transpose(np.array(t_test))
"""
Data removal condition:
- random 25% from each class
"""
def generate_training_a_b(x, t, percentage):
x_training = x
t_training = t
x_training, t_training, x_test_1, t_test_1 = generate_training_a(x_training, t_training, percentage)
x_training, t_training, x_test_2, t_test_2 = generate_training_b(x_training, t_training, percentage)
return x_training, t_training, np.concatenate((x_test_1, x_test_2), axis=1), np.concatenate((t_test_1, t_test_2), axis=1)
"""
Data removal condition:
- 20% from a subset of classA for which classA(1,:)<0
and 80% from a subset of classA for which classA(1,:)>0
"""
def generate_training_a_subsets(x, t, percentage1, percentage2):
x_training = x
t_training = t
x_test = list()
t_test = list()
# Removing negative values
cnt = list()
for i in range(x_training.shape[1]):
if (x_training[1,i] < 0 and t_training[0,i] > 0):
cnt.append(i) # Position of a negative value
cnt_neg = round(len(cnt) * percentage1) # Number of negative values to remove
removal = list()
for i in range(cnt_neg):
pos = randrange(len(cnt)) # Get random position in the array
removal.append(cnt[pos]) # Append the value stored in the position
x_test.append(x_training[:,cnt[pos]])
t_test.append(t_training[:,cnt[pos]])
cnt = np.delete(cnt, pos) # Delete appended value
x_training = np.delete(x_training, removal, axis=1)
t_training = np.delete(t_training, removal, axis=1)
# Removing positive values
cnt = list()
for i in range(x_training.shape[1]):
if (x_training[1,i] > 0 and t_training[0,i] > 0):
cnt.append(i) # Position of a positive value
cnt_pos = round(len(cnt) * percentage2) # Number of positive values to remove
removal = list()
for i in range(cnt_pos):
pos = randrange(len(cnt)) # Get random position in the array
removal.append(cnt[pos]) # Append the value stored in the position
x_test.append(x_training[:,cnt[pos]])
t_test.append(t_training[:,cnt[pos]])
cnt = np.delete(cnt, pos) # Delete appended value
x_training = np.delete(x_training, removal, axis=1)
t_training = np.delete(t_training, removal, axis=1)
return x_training, t_training, np.transpose(np.array(x_test)), np.transpose(np.array(t_test))
# ---------------------------- DATA QUESTION 3.2.2. ---------------------------- #
"""
hour-glass shaped topology
simple autoencoder with 8–3–8 feed-forward architecture
only one input variable is active (=1)
"""
def enconder_data():
X = -np.ones((8, 8))
index = random.sample(range(X.shape[0]), 8)
for i in range(X.shape[0]):
X[index[i],i] = 1
#X[i,i] = 1
t = X.copy()
X = np.vstack((X, np.ones((X.shape[1]))))
return X, t
# ---------------------------- DATA QUESTION 3.2.3. ---------------------------- #
"""
Bell-shaped gaussian
"""
def gaussian_data(percentage):
data = np.arange(-5, 5, 0.5)
x = np.transpose(data.reshape((1,len(data))))
y = np.transpose(data.reshape((1,len(data))))
elements = round(x.shape[0] * percentage)
x_training = x.copy()
y_training = y.copy()
x_testing = np.zeros(elements)
y_testing = np.zeros(elements)
for i in range(elements):
aux = np.random.randint(low=0, high=x_training.shape[0] - 1)
x_testing[i] = x_training[aux]
x_training = np.delete(x_training, aux)
y_testing[i] = y_training[aux]
y_training = np.delete(y_training, aux)
# Training
x_training = np.transpose(np.reshape(x_training, (1, len(x_training))))
y_training = np.transpose(np.reshape(y_training, (1, len(y_training))))
z_train = np.dot(np.exp(-x_training*x_training*0.1), np.transpose(np.exp(-y_training*y_training*0.1))) - 0.5
xx, yy = np.meshgrid(x_training, y_training)
size = len(x_training)*len(y_training)
xx_ = np.reshape(xx, (1, size))
yy_ = np.reshape(yy, (1, size))
X_train = np.vstack((xx_, yy_, np.ones((size))))
t_train = np.reshape(z_train, (1,size))
# Testing
x_testing = np.array(np.transpose(np.reshape(x_testing, (1, len(x_testing)))))
y_testing = np.array(np.transpose(np.reshape(y_testing, (1, len(y_testing)))))
z_test = np.dot(np.exp(-x_testing*x_testing*0.1), np.transpose(np.exp(-y_testing*y_testing*0.1))) - 0.5
xx_test, yy_test = np.meshgrid(x_testing, y_testing)
size = len(x_testing)*len(y_testing)
xx_ = np.reshape(xx_test, (1, size))
yy_ = np.reshape(yy_test, (1, size))
X_test = np.vstack((xx_, yy_, np.ones((size))))
t_test = np.reshape(z_test, (1,size))
return X_train, t_train, xx, yy, X_test, t_test | MariaBjelikj/DD2437 | Assignment1/DataGeneration.py | DataGeneration.py | py | 9,702 | python | en | code | 2 | github-code | 13 |
41813688213 | """find and write the intercept of the Iris versicolor class, label 1"""
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
data = load_iris()
X = data.data
y = data.target
model = LogisticRegression()
model.fit(X, y)
print(model.intercept_[1])
| ElenaErratic/samplePythonTasks | sklearn_logistic_regression/logistic_regression_intercept.py | logistic_regression_intercept.py | py | 289 | python | en | code | 0 | github-code | 13 |
16332856890 | '''
STUFF LEARNED:
- I really wanted to try to solve this with numpy, but was not able to learn the basics in time
- Trouble with finding the right "Frame" to view the code "ZAEZRLZG"
earlier solutions that appended coordinates to the pixel list were
abandoned bc of printing in the console
*********************
- print("@", end="")
*********************
- This solution finds coordinates within boundary ranges ymin to ymax, then xmin to xmax
and prints an "@"
- Probably too many sources to cite on boundary management tips, but github.com/klaa97
was the most clear and used the print("@", end="")
NEXT:
- Plot this in Numpy... animate if possible?
'''
import string
import sys
import re
class PT(object):
def __init__(self, x, y, vx, vy):
self.initial_settings = [x,y,vx,vy]
self.x = x
self.y = y
self.vx = vx
self.vy = vy
def update(self):
self.x = self.x + self.vx
self.y = self.y + self.vy
def getx(self):
return self.x
def gety(self):
return self.y
def out(self):
return [self.x, self.y]
def openInput(fname, testing = False):
# Read, Process, Strip, Sort Input
with open(fname) as f:
lines = [l.strip('\n') for l in f]
lines = [[int(i) for i in re.findall(r'-?\d+', l)] for l in lines]
f.close()
if testing:
# 19999 197,948
for i in range(20000):
minx = min(x + i * vx for (x, y, vx, vy) in lines)
maxx = max(x + i * vx for (x, y, vx, vy) in lines)
miny = min(y + i * vy for (x, y, vx, vy) in lines)
maxy = max(y + i * vy for (x, y, vx, vy) in lines)
print (i, maxx - minx + maxy - miny)
print(len(lines))
return lines
def part1(in_f):
coords = in_f
pts = []
pixels = []
l = 0
#generate all pt objects
for i in coords:
pts.append(PT(i[0],i[1],i[2],i[3]))
for s in range(10105):
limit =[]
width = 100
for i in pts:
i.update()
minx = min([x.getx() for x in pts])
miny = min([y.gety() for y in pts])
maxx = max([x.getx() for x in pts])
maxy = max([y.gety() for y in pts])
if minx + width >= maxx and miny + width >= maxy:
print(s)
pixels = [pt.out() for pt in pts]
for y in range(miny,maxy+1):
for x in range(minx,maxx+1):
if [x,y] in pixels:
print("@", end="")
else:
print(" ", end="")
print("")
part1(openInput('d10.txt')) | toastedyeti/AdventOfCode2018 | D10/D10.py | D10.py | py | 2,677 | python | en | code | 0 | github-code | 13 |
29507314912 | class Entry:
def __init__(self, id, n_values, values):
self.id = id
self.n_values = n_values
self.values = values
def execute(n, entries):
minimums = []
for entry in entries:
minimums.append(min(entry.values))
smallest_min = min(minimums)
output = '{0} '.format(smallest_min) + '{'
entries_to_add = []
for i in range(n):
if smallest_min in entries[i].values:
entries_to_add.append(entries[i])
entries_to_add.sort(key=lambda x: x.id)
for j in range(len(entries_to_add)):
output += '{0}'.format(entries_to_add[j].id) + (',' if j != len(entries_to_add) - 1 else '')
output += '}'
return output
outputs = []
for _ in range(10):
n = int(input())
entries = []
for i in range(n):
line = input().split()
n_values = int(line[1])
values = line[2:n_values + 2]
entry = Entry(int(line[0]), n_values, [int(value) for value in values])
entries.append(entry)
outputs.append(execute(n, entries))
for output in outputs:
print(output) | galacticglum/contest-solutions | ECOO/P2_R1_2018.py | P2_R1_2018.py | py | 1,105 | python | en | code | 0 | github-code | 13 |
13313294337 | # This file is basically just a dumb way to get around the Django settings issue for ANSIString...
class AnsiSettings:
def __init__(self):
# Mapping to extend Evennia's normal ANSI color tags. The mapping is a list of
# tuples mapping the exact tag (not a regex!) to the ANSI convertion, like
# `(r"%c%r", ansi.ANSI_RED)` (the evennia.utils.ansi module contains all
# ANSI escape sequences). Default is to use `|` and `|[` -prefixes.
self.COLOR_ANSI_EXTRA_MAP = []
# Extend the available regexes for adding XTERM256 colors in-game. This is given
# as a list of regexes, where each regex must contain three anonymous groups for
# holding integers 0-5 for the red, green and blue components Default is
# is r'\|([0-5])([0-5])([0-5])', which allows e.g. |500 for red.
# XTERM256 foreground color replacement
self.COLOR_XTERM256_EXTRA_FG = []
# XTERM256 background color replacement. Default is \|\[([0-5])([0-5])([0-5])'
self.COLOR_XTERM256_EXTRA_BG = []
# Extend the available regexes for adding XTERM256 grayscale values in-game. Given
# as a list of regexes, where each regex must contain one anonymous group containing
# a single letter a-z to mark the level from white to black. Default is r'\|=([a-z])',
# which allows e.g. |=k for a medium gray.
# XTERM256 grayscale foreground
self.COLOR_XTERM256_EXTRA_GFG = []
# XTERM256 grayscale background. Default is \|\[=([a-z])'
self.COLOR_XTERM256_EXTRA_GBG = []
# ANSI does not support bright backgrounds, so Evennia fakes this by mapping it to
# XTERM256 backgrounds where supported. This is a list of tuples that maps the wanted
# ansi tag (not a regex!) to a valid XTERM256 background tag, such as `(r'{[r', r'{[500')`.
self.COLOR_ANSI_XTERM256_BRIGHT_BG_EXTRA_MAP = []
# If set True, the above color settings *replace* the default |-style color markdown
# rather than extend it.
self.COLOR_NO_DEFAULT = False
self.CLIENT_DEFAULT_WIDTH = 78
settings = AnsiSettings()
| volundmush/shinma | shinma/modules/net/ansi_settings.py | ansi_settings.py | py | 2,159 | python | en | code | 0 | github-code | 13 |
31692673610 | percent_fat = int(input())
percent_protein = int(input())
percent_carbohydrates = int(input())
total_amount_kcal = int(input())
percent_water = int(input())
total_grams_fat = ((percent_fat / 100) * total_amount_kcal) / 9
total_grams_protein = ((percent_protein / 100) * total_amount_kcal) / 4
total_grams_carbs = ((percent_carbohydrates / 100) * total_amount_kcal) / 4
total_weight_food = total_grams_fat + total_grams_protein + total_grams_carbs
kcal_per_gram = total_amount_kcal / total_weight_food
sum_water = 100 - percent_water
final_amount = (sum_water / 100) * kcal_per_gram
print(f"{final_amount:.4f}") | DPrandzhev/Python-SoftUni | Programming_Basics-SoftUni-Python/pre-exam/cat_diet.py | cat_diet.py | py | 615 | python | en | code | 0 | github-code | 13 |
18091395969 | """
python实现链式栈模型
思路:
1.目标:栈(LIFO)
2.设计
栈顶:链表头部作为栈顶
入栈:添加链表头节点
出栈:删除链表头节点
栈底:链表尾部作为栈底,入栈和出栈操作
"""
class Node:
def __init__(self, value):
self.value = value
self.next = None
class LinkStack:
def __init__(self):
"""初始化一个空栈"""
self.head = None
def enstack(self, item):
"""入栈:添加链表头节点"""
node = Node(item)
node.next = self.head
self.head = node
def destack(self):
"""出栈:删除链表头节点"""
if not self.head:
raise Exception('destack from empty stack')
value = self.head.value
self.head = self.head.next
return value
if __name__ == '__main__':
s = LinkStack()
s.enstack(100)
s.enstack(200)
s.enstack(300)
# 300 200 100 异常
print(s.destack())
print(s.destack())
print(s.destack())
print(s.destack())
| zmj90/document | tech/technical/数据结构/datastructure/day02_course/day02_code/05_linkStack.py | 05_linkStack.py | py | 1,065 | python | en | code | 0 | github-code | 13 |
38869136392 | #!/usr/bin/env python3
"""
Unit test ReviseAnno.
"""
__author__ = "Scott Teresi"
import logging
import os
import pytest
import coloredlogs
import numpy as np
import pandas as pd
import logging
from transposon.transposon_data import TransposonData
from transposon.revise_annotation import ReviseAnno
# -------------------------------------------------------------
# TEST VALUES
TRUE_SingleC_SingleE_SUPERFAM = [
784,
1021,
912,
1694,
634,
212,
150,
94,
1432,
947,
623,
177,
385,
229,
2382,
131,
189,
1170,
501,
351,
]
TRUE_SingleC_MultiE_SUPERFAM = [
1412,
560,
1219,
2209,
212,
150,
94,
2452,
623,
177,
385,
2611,
131,
189,
1170,
501,
351,
]
TRUE_SingleC_ConcOverlap_SUPERFAM = [608, 792, 201]
TRUE_SingleC_SingleE_ORDER = [
784,
1021,
912,
1694,
1170,
501,
351,
]
TRUE_SingleC_MultiE_ORDER = [1880, 1219, 2209, 301, 212, 150, 94, 2452, 2611]
TRUE_SingleC_ConcOverlap_ORDER = [608, 792, 201]
TRUE_SingleC_SingleE_NAMELESS = [
784,
1021,
912,
1694,
1170,
551,
]
TRUE_SingleC_MultiE_NAMELESS = [1880, 1219, 2209, 212, 150, 2545, 2611]
TRUE_SingleC_ConcOverlap_NAMELESS = [608, 792, 201]
# -------------------------------------------------------------
@pytest.fixture
def logger_obj():
"""
Dummy logging object for ReviseAnno constructor
"""
logger = logging.getLogger(__name__)
return logger
@pytest.fixture
def h5_cache_loc():
"""
Location for outputting h5 revised annotation files.
"""
path_main = os.path.abspath(__file__)
h5_cache_loc = "tests/test_h5_cache_loc/"
if not os.path.isdir(os.path.abspath(os.path.join(path_main, h5_cache_loc))):
os.makedirs(h5_cache_loc, exist_ok=True)
return h5_cache_loc
@pytest.fixture
def revised_te_annotation_loc():
"""
Location for outputing revised annotation files for visual inspection.
"""
path_main = os.path.abspath(__file__)
data_output = "tests/output_data/"
if not os.path.isdir(os.path.abspath(os.path.join(path_main, data_output))):
os.makedirs(data_output, exist_ok=True)
return data_output
@pytest.fixture
def superfam_name():
"""
Dummy name used for ReviseAnno constructor
"""
return "test_superfam_set"
@pytest.fixture
def order_name():
"""
Dummy name used for ReviseAnno constructor
"""
return "test_order_set"
@pytest.fixture
def nameless_name():
"""
Dummy name used for ReviseAnno constructor
"""
return "test_nameless_set"
# Input Data
path_main = os.path.abspath(__file__)
data_input = "tests/input_data/"
if not os.path.isdir(os.path.abspath(os.path.join(path_main, data_input))):
os.makedirs(data_input, exist_ok=True)
# Supers
SingleC_SingleElongate_Super = (
"tests/input_data/Test_SingleC_SingleElongate_Superfam_Revision.tsv"
)
SingleC_MultiElongate_Super = (
"tests/input_data/Test_SingleC_MultiElongate_Superfam_Revision.tsv"
)
SingleC_Conc_Super = (
"tests/input_data/Test_SingleC_ConcurrentOverlap_Superfam_Revision.tsv"
)
# Orders
SingleC_SingleElongate_Order = (
"tests/input_data/Test_SingleC_SingleElongate_Order_Revision.tsv"
)
SingleC_MultiElongate_Order = (
"tests/input_data/Test_SingleC_MultiElongate_Order_Revision.tsv"
)
SingleC_Conc_Order = (
"tests/input_data/Test_SingleC_ConcurrentOverlap_Order_Revision.tsv"
)
# Nameless
SingleC_SingleElongate_Nameless = (
"tests/input_data/Test_SingleC_SingleElongate_Nameless_Revision.tsv"
)
SingleC_MultiElongate_Nameless = (
"tests/input_data/Test_SingleC_MultiElongate_Nameless_Revision.tsv"
)
SingleC_Conc_Nameless = (
"tests/input_data/Test_SingleC_ConcurrentOverlap_Nameless_Revision.tsv"
)
@pytest.fixture
def TEData_TestObj(request):
"""
Give a TransposonData object based on an input dataframe
"""
# NOTE since we are parametrizing this fixture, the argument has to be
# named request.... I do not know why, can't figure out
transposon_input_dataframe = pd.read_csv(
request.param,
header="infer",
dtype={"Start": "float32", "Stop": "float32", "Length": "float32"},
sep="\t",
)
te_data = TransposonData(transposon_input_dataframe, "Mock_Camarosa")
return te_data
@pytest.mark.parametrize(
"TEData_TestObj, true_values, output_filenames",
[
(
SingleC_SingleElongate_Super,
TRUE_SingleC_SingleE_SUPERFAM,
"SingleC_SingleE_Super.tsv",
),
(
SingleC_MultiElongate_Super,
TRUE_SingleC_MultiE_SUPERFAM,
"SingleC_MultiE_Super.tsv",
),
(
SingleC_Conc_Super,
TRUE_SingleC_ConcOverlap_SUPERFAM,
"SingleC_Conc_Super.tsv",
),
],
indirect=["TEData_TestObj"],
)
def test_superfam(
TEData_TestObj,
true_values,
output_filenames,
h5_cache_loc,
superfam_name,
revised_te_annotation_loc,
):
"""Create superfamily revisions"""
revise_anno_obj = ReviseAnno(
TEData_TestObj.data_frame, "test.tsv", h5_cache_loc, superfam_name
)
revise_anno_obj.create_superfam()
observed = revise_anno_obj.updated_te_annotation.Length.to_numpy(copy=False)
# NB this is just for manual inspection
revise_anno_obj._write(
revise_anno_obj.updated_te_annotation,
os.path.join(revised_te_annotation_loc, output_filenames),
)
assert np.array_equal(observed, true_values)
@pytest.mark.parametrize(
"TEData_TestObj, true_values, output_filenames",
[
(
SingleC_SingleElongate_Order,
TRUE_SingleC_SingleE_ORDER,
"SingleC_SingleE_Order.tsv",
),
(
SingleC_MultiElongate_Order,
TRUE_SingleC_MultiE_ORDER,
"SingleC_MultiE_Order.tsv",
),
(
SingleC_Conc_Order,
TRUE_SingleC_ConcOverlap_ORDER,
"SingleC_Conc_Order.tsv",
),
],
indirect=["TEData_TestObj"],
)
def test_order(
TEData_TestObj,
true_values,
output_filenames,
h5_cache_loc,
order_name,
revised_te_annotation_loc,
):
"""Create order revisions"""
revise_anno_obj = ReviseAnno(
TEData_TestObj.data_frame, "test.tsv", h5_cache_loc, order_name
)
revise_anno_obj.create_order()
observed = revise_anno_obj.updated_te_annotation.Length.to_numpy(copy=False)
# NB this is just for manual inspection
revise_anno_obj._write(
revise_anno_obj.updated_te_annotation,
os.path.join(revised_te_annotation_loc, output_filenames),
)
assert np.array_equal(observed, true_values)
@pytest.mark.parametrize(
"TEData_TestObj, true_values, output_filenames",
[
(
SingleC_SingleElongate_Nameless,
TRUE_SingleC_SingleE_NAMELESS,
"SingleC_SingleE_Nameless.tsv",
),
(
SingleC_MultiElongate_Nameless,
TRUE_SingleC_MultiE_NAMELESS,
"SingleC_MultiE_Nameless.tsv",
),
(
SingleC_Conc_Nameless,
TRUE_SingleC_ConcOverlap_NAMELESS,
"SingleC_Conc_Nameless.tsv",
),
],
indirect=["TEData_TestObj"],
)
def test_nameless(
TEData_TestObj,
true_values,
output_filenames,
h5_cache_loc,
nameless_name,
revised_te_annotation_loc,
):
"""Create nameless revisions"""
revise_anno_obj = ReviseAnno(
TEData_TestObj.data_frame, "test.tsv", h5_cache_loc, nameless_name
)
revise_anno_obj.create_nameless()
observed = revise_anno_obj.updated_te_annotation.Length.to_numpy(copy=False)
# NB this is just for manual inspection
revise_anno_obj._write(
revise_anno_obj.updated_te_annotation,
os.path.join(revised_te_annotation_loc, output_filenames),
)
assert np.array_equal(observed, true_values)
if __name__ == "__main__":
pytest.main(["-s", __file__]) # for convenience
| sjteresi/TE_Density | tests/unit/test_ReviseAnno.py | test_ReviseAnno.py | py | 8,109 | python | en | code | 25 | github-code | 13 |
32635790297 | import wave
import numpy as np
import pyaudio
import time
import librosa
import matplotlib.pyplot as plt
import librosa.display
from backend.models import NoteResult, Note
RECORDING_PATH = "backend/output.wav"
VOICED_PROB_THRESHOLD = 0.2
#TODO: COMPLETE
BEATS_AND_NOTE_NAME = {
0.25: "16",
0.5: "8",
0.75: "8d",
1: "q",
# 1.25: "4~16",
1.5: "qd",
1.75: "qdd",
2: "h",
# 2.25: "2~16",
# 2.5: "2~8",
# 2.75: "2~8~16",
3: "hd",
# 3.25: "2.~8",
3.5: "hdd",
4: "1"
}
# BEATS_AND_NOTE_NAME = {
# 0.25: "16",
# 0.5: "8",
# 0.75: "8d",
# 1: "q",
# 1.25: "4~16",
# 1.5: "qd",
# 1.75: "qdd",
# 2: "h",
# 2.25: "2~16",
# 2.5: "2~8",
# 2.75: "2~8~16",
# 3: "hd",
# 3.25: "2.~8",
# 3.5: "hdd",
# 4: "1"
# }
def index_of_closest_in_array(array, value):
return (np.abs(array - value)).argmin()
def beat_name(beat):
return BEATS_AND_NOTE_NAME[beat[0][1] - beat[0][0]]
def most_common(lst):
return max(set(lst), key=lst.count)
def transform_note(note_name, note_duration, note_tie):
name = note_name[0:-1].lower() + "/" + \
note_name[-1] if note_name != "R" else "b/4"
duration = BEATS_AND_NOTE_NAME[note_duration]
if note_name == "R":
duration += "r"
return Note(name, duration, note_tie)
def split_note(note):
# note: name, duration (#beats), tie
# out list of note: name, duration(string), tie
if note.duration not in BEATS_AND_NOTE_NAME.keys():
note_name_list = list(BEATS_AND_NOTE_NAME.keys())
# print(note_name_list)
j = len(note_name_list)-1
remain = note.duration
notes = []
while j > -1 and remain > 0:
# print(notes, note_name_list[j])
if note_name_list[j] <= remain:
remain -= note_name_list[j]
notes.append(transform_note(
note.name, note_name_list[j], remain > 0 and note.tie))
j -= 1
return notes
else:
return [transform_note(note.name, note.duration, note.tie)]
def get_track(notes):
old_count = 0
new_count = 0
track = []
bar = []
if notes[0].name == "R":
notes = notes[1:]
if notes[-1].name == "R":
notes = notes[:-1]
for i in notes:
# print(counter, [i.duration for i in bar])
new_count += i.duration
if new_count < 4:
bar += split_note(i)
old_count += i.duration
elif new_count == 4:
bar += split_note(i)
track.append(bar)
bar = []
old_count = 0
new_count = 0
else:
# print([i.duration for i in bar])
bar += split_note(Note(i.name, 4-old_count, i.name != "R"))
track.append(bar)
bar = split_note(Note(i.name, new_count-4, i.tie))
new_count = new_count-4
old_count = new_count
# print(new_count, [i.duration for i in bar])
# fill last bars
if bar:
bar+split_note(Note(i.name, 4-new_count, i.tie))
track.append(bar)
return track
class NoteAnalyzer():
def __init__(self, BPM, MUSIC_GRANULARITY, DURATION):
self.BPM = BPM # beats per min
# TIME_SIGNATURE="4/4"
# BEATS_PER_BAR=request.args.get('beats per bar')
# BARS=request.args.get('number of bars')
self.MUSIC_GRANULARITY = MUSIC_GRANULARITY
self.DURATION = DURATION
def seconds_to_beats(self, seconds):
return round(seconds * self.BPM / 60*self.MUSIC_GRANULARITY)/self.MUSIC_GRANULARITY
def beats_to_seconds(self, beats):
return beats/self.BPM*60
def find_beat_times(self, voiced_probs, times):
beat_indices = []
voiced = False
start = 0
# TODO add deque?
for (i, prob) in enumerate(voiced_probs):
#voiced is before, prob is now
if (prob > VOICED_PROB_THRESHOLD and not voiced) or (prob < VOICED_PROB_THRESHOLD and voiced):
beat_indices.append([[start, i], voiced])
start = i
voiced = prob > VOICED_PROB_THRESHOLD
if i == len(voiced_probs)-1:
beat_indices.append([[start, i], voiced])
beat_times = [[[times[j] for j in i[0]], i[1]] for i in beat_indices]
cleaned_beat_times = [i for i in beat_times if (
i[0][1]-i[0][0]) > self.beats_to_seconds(1/self.MUSIC_GRANULARITY)]
# join rests
i = 0
while i < len(cleaned_beat_times)-1:
if (not cleaned_beat_times[i][1]) and not (cleaned_beat_times[i+1][1]):
cleaned_beat_times[i][0][1] = cleaned_beat_times[i+1][0][1]
cleaned_beat_times.pop(i+1)
i += 1
i += 1
cleaned_beat_times[0][0][0] = 0
cleaned_beat_times[-1][0][0] = beat_times[-1][0][0]
filled_beat_times = cleaned_beat_times.copy()
for i in range(len(filled_beat_times)-1):
# if end of prev!=start of next
if filled_beat_times[i][0][1] != filled_beat_times[i+1][0][0]:
mid = (filled_beat_times[i][0][1] +
filled_beat_times[i+1][0][0])/2
filled_beat_times[i][0][1] = mid
filled_beat_times[i + 1][0][0] = mid
return filled_beat_times # [[start, end], voiced]
def analyze(self):
y, sr = librosa.load(RECORDING_PATH)
f0, voiced_flag, voiced_probs = librosa.pyin(
y, fmin=librosa.note_to_hz('C2'), fmax=librosa.note_to_hz('C7'))
times = librosa.times_like(f0, sr=sr)
i=0
while i<len(times) and voiced_probs[i]<VOICED_PROB_THRESHOLD:
i+=1
f0=f0[i:]
voiced_probs=voiced_probs[i:]
times=times[0:-i]
beat_times = self.find_beat_times(voiced_probs, times)
beat_indices = [[[index_of_closest_in_array(times, i[0][0]),
index_of_closest_in_array(times, i[0][1])],
i[1]] for i in beat_times]
pitches = [librosa.hz_to_note(i) if not np.isnan(i)
else np.nan
for i in f0] # get most common pitch
clean_pitches = [most_common(pitches[i[0][0]:i[0][1]]) if i[1] == True
else "R"
for i in beat_indices] # get most common pitch
# beat_bars = [[[self.seconds_to_beats(j) for j in i[0]], i[1]]
# for i in beat_times]
beat_lengths = [self.seconds_to_beats(i[0][1] - i[0][0]) for i in beat_times]
# note in english
return [Note(clean_pitches[i], beat_lengths[i]) for i in range(len(beat_lengths))]
def record(self):
chunk = 1024 # Record in chunks of 1024 samples
sample_format = pyaudio.paInt16 # 16 bits per sample
channels = 2
fs = 44100 # Record at 44100 samples per second
seconds = self.DURATION
filename = "backend/output.wav"
p = pyaudio.PyAudio() # Create an interface to PortAudio
print('Recording')
stream = p.open(format=sample_format,
channels=channels,
rate=fs,
frames_per_buffer=chunk,
input=True)
frames = [] # Initialize array to store frames
# Store data in chunks for 3 seconds
num_chunks = int(fs / chunk * seconds)
for i in range(4):
print(i+1)
time.sleep(60/self.BPM)
beat_per_chunk = 1/self.BPM*60*fs/chunk
metronome_chunks = [round(i*beat_per_chunk)
for i in range(0, round(num_chunks*beat_per_chunk))]
for i in range(0, num_chunks):
data = stream.read(chunk)
if i in metronome_chunks:
print(i//(round(1/self.BPM*60*fs/chunk)))
frames.append(data)
# Stop and close the stream
stream.stop_stream()
stream.close()
# Terminate the PortAudio interface
p.terminate()
print('Finished recording')
# Save the recorded data as a WAV file
wf = wave.open(filename, 'wb')
wf.setnchannels(channels)
wf.setsampwidth(p.get_sample_size(sample_format))
wf.setframerate(fs)
wf.writeframes(b''.join(frames))
wf.close()
def run(self):
self.record()
return get_track(self.analyze()) # list of bars of notes
| TheHong/VoiceIt | backend/note_analyzer.py | note_analyzer.py | py | 8,560 | python | en | code | 0 | github-code | 13 |
16015907747 | from inputdata import *
def interface():
print("""1 - добавление персоны,
2 - поиск,
3 - вывод на экран,
4 - импорт в файл
5 - удаление персоны
6 - изменить запись\n""")
ask = int(input())
if ask == 1:
input_data()
elif ask == 2:
search()
elif ask == 3:
print_data()
elif ask == 4:
load()
elif ask == 5:
delete_line()
elif ask == 5:
change_line() | Lerabal/Python | DZ8/interface_1.py | interface_1.py | py | 451 | python | ru | code | 0 | github-code | 13 |
17054029944 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class KbIsvMaCode(object):
def __init__(self):
self._code = None
self._num = None
@property
def code(self):
return self._code
@code.setter
def code(self, value):
self._code = value
@property
def num(self):
return self._num
@num.setter
def num(self, value):
self._num = value
def to_alipay_dict(self):
params = dict()
if self.code:
if hasattr(self.code, 'to_alipay_dict'):
params['code'] = self.code.to_alipay_dict()
else:
params['code'] = self.code
if self.num:
if hasattr(self.num, 'to_alipay_dict'):
params['num'] = self.num.to_alipay_dict()
else:
params['num'] = self.num
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KbIsvMaCode()
if 'code' in d:
o.code = d['code']
if 'num' in d:
o.num = d['num']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/KbIsvMaCode.py | KbIsvMaCode.py | py | 1,183 | python | en | code | 241 | github-code | 13 |
11098829849 | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 25 17:27:28 2018
@author: tao
"""
"""
已知两点,求过两点直线的方程。已知第三点,求第三点到直线的距离,过第三点与此点的垂线、平行线方程。以及垂足
已知两点为(x1,y1),(x2,y2)第三点为(x0,y0)
输出以上所有结果
k为斜率
"""
import csv
import math
fo=open('C:\work\直线距离垂足问题.txt','w')
varys=[]
i=0
#数据格式要求,前四个数据为确定直线的数据,最后两个数据为直线外一点
with open('C:\work\data\pdata2.txt','r',encoding='utf-8') as foo:
datas=csv.reader(foo,dialect="excel-tab")
for data in datas:
for da in data:
varys.append(float(da))
i+=1
x1=varys[0]
y1=varys[1]
x2=varys[2]
y2=varys[3]
x0=varys[4]
y0=varys[5]
if abs(y2-y1)<=1E-5:
A=0
B=1
C=-y1
k=0
if -y1>=0:
fo.write('直线方程为:\n y'+'+'+str(-y1)+'=0\n')
else:
fo.write('直线方程为: \ny'+'-'+str(y1)+'=0\n')
elif abs(x2-x1)<=1E-5:
A=1
B=0
C=-x1
if -x1<=0:
fo.write('直线方程为: \nx'+'-'+str(x1)+'=0\n')
else:
fo.write('直线方程为: \nx'+'+'+str(-x1)+'=0\n')
else:
k=(y2-y1)/(x2-x1)
A=-k
B=1
C=-y1+k*x1
print('直线方程为:'+str(A)+'x'+'+'+str(B)+'y'+str(C)+'=0')
fo.write('直线方程为:\n'+str(A)+'x'+'+'+str(B)+'y'+'+'+str(C)+'=0\n')
#定点到此直线的距离
d=abs(A*x0+B*y0+C)/math.sqrt(A*A+B*B)
print('此点到直线的距离为',d)
#垂线方程
if k==0:
A0=1
B0=0
C0=-x0
else:
A0=1/k
B0=1
C0=-y0-x0/k
print('垂线方程为:\n',str(A0)+'x+'+str(B0)+'y+'+str(C0)+'=0\n')
fo.write('垂线方程为:\n'+str(A0)+'x+'+str(B0)+'y+'+str(C0)+'=0\n')
#平行线
fo.write('平行线方程为:\n'+str(A)+'x'+'+'+str(B)+'y'+'+'+str(k*x0-y0)+'=0\n')
#求垂足
x3=(B*B*x0-A*B*y0-A*C)/(A*A+B*B)
y3=(-A*B*x0+A*A*y0-B*C)/(A*A+B*B)
print('垂足坐标为:\n(',x3,',',y3,')')
fo.write('此点到直线的距离为:\n'+str(d)+'\n')
fo.write('垂足坐标为:\n('+str(x3)+','+str(y3)+')\n')
#求到左端点距离,x2和y2都要改为x1和y1
s1=math.sqrt(math.pow((x3-x2),2)+math.pow((y3-y2),2))
print(s1)
fo.write('左端点到垂足的距离为:\n'+str(s1)+'\n')
fo.close()
| sirztao/caculate-lines-and-points | Points and lines caculations.py | Points and lines caculations.py | py | 2,292 | python | zh | code | 2 | github-code | 13 |
12653085030 | import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
# 使用掩膜提取ROI区域
img = cv.imread('D:/PycharmProjects/pythonProject1/Opencv 4.5/images/1.jpg')
img_RGB = cv.cvtColor(img, cv.COLOR_BGR2RGB)
# eyes = img[420:570, 420:860]
# create a mask
mask = np.zeros(img.shape[:2], np.uint8)
mask[420:570, 420:860] = 255
masked_img = cv.bitwise_and(img_RGB,img_RGB,mask=mask)
plt.subplot(131), plt.imshow(img_RGB), plt.title('Image')
plt.subplot(132), plt.imshow(mask,'gray'), plt.title('Mask')
plt.subplot(133), plt.imshow(masked_img), plt.title('Mask_Image')
plt.show()
| Darling1116/Greeting_1116 | Opencv/lesson_4/ROI_2.py | ROI_2.py | py | 601 | python | en | code | 0 | github-code | 13 |
43254366231 | import sys; sys.stdin = open('15686_치킨배달_G5.txt', 'r')
from itertools import combinations
input = sys.stdin.readline
N, M = map(int, input().split())
stores = []
houses = []
dis = []
for i in range(N):
tmp = list(map(int, input().split()))
for j in range(N):
if tmp[j] == 2:
stores.append((i, j))
elif tmp[j] == 1:
houses.append((i, j))
comb = combinations(stores, M)
for com in comb:
tmp_total = 0
for house in houses:
tmp = 0xffffff
for can in com:
tmp = min(tmp, abs(can[0] - house[0]) + abs(can[1] - house[1]))
tmp_total += tmp
dis.append(tmp_total)
print(min(dis))
| KimSoomae/Algoshipda | week14/G5/김병완_15686_치킨배달_G5.py | 김병완_15686_치킨배달_G5.py | py | 682 | python | en | code | 0 | github-code | 13 |
410079877 | # install pip for python 3.9 if not installed
# check version usint "$ pip3.9 --version"
# if pip installed for python3.9 then run "pip3.9 install camelcase"
import camelcase
c = camelcase.CamelCase()
txt = "hello world"
print(c.hump(txt))
| atulkrishnathakur/mypython3_9 | camel_case_package.py | camel_case_package.py | py | 245 | python | en | code | 0 | github-code | 13 |
27503805503 | from lxml import etree
import re
import asyncio
import aiohttp
import psycopg2
import time
from pymongo import MongoClient
class Themes(object):
def __init__(self, title, url, author, text='', price=0, currency='грн'):
self.title = title
self.url = url
self.author = author
self.text = text
self.price = price
self.currency = currency
@asyncio.coroutine
def parsetheme(self):
currency = ["usd", "грн", "гривен", "гривень", "uah", "eur", "євро", "евро"]
url = self.url
page = yield from get(url)
root = etree.HTML(page)
text = root.xpath('//div[@class="content"]')[0].xpath('./descendant-or-self::text()')
text = " ".join(text)
prices = re.findall(reexpr, text)
prices = list(filter(lambda x: True if x[1] in currency else False, prices))
if len(prices) == 1:
self.price = prices[0][0]
self.currency = prices[0][1]
elif len(prices) > 1:
maxprice = ('0', "")
for price in prices:
if price[0] > maxprice[0]:
maxprice = price
self.price = maxprice[0]
self.currency = maxprice[1]
self.text = text
def todatabase(self, cur):
cur.execute("INSERT INTO theme (title, url, author, text, price, currency) SELECT %S, %S, %S, %S, %S, %S "
"WHERE NOT EXISTS (SELECT * FROM theme WHERE title = %S AND author = %S)",
(self.title, self.url, self.author, self.text, self.price, self.currency, self.title, self.author))
def todict(self):
return {'title': self.title, 'url': self.url, 'author': self.author,
'text': self.text, 'price': self.price, 'currency': self.currency}
@asyncio.coroutine
def get(url):
response = aiohttp.request(method='GET', url=url)
result = yield from asyncio.wait_for(response, timeout=60)
body = yield from result.read()
return body
@asyncio.coroutine
def parsepage(url, announces=False):
"""parse everything except for announces"""
page = yield from get(url)
root = etree.HTML(page)
if announces:
themes = root.xpath('//li[contains(@class, "row bg")]/dl[contains(@class, "announce")]')
else:
themes = root.xpath('//li[contains(@class, "row bg")]/dl[not(contains(@class, "announce"))]')
instances = []
for i in themes:
theme = i.xpath('.//dt/div/a[@class="topictitle"]')[0]
title = theme.text
themeurl = "http://forum.overclockers.ua/" + theme.attrib['href'][2:]
themeurl = themeurl.split('&sid')[0]
author = i.xpath('.//dd[@class="author"]/a/text()')[0]
instances.append(Themes(title, themeurl, author))
return instances
def dump_to_postgre(instances):
with psycopg2.connect("dbname='postgres' user='postgres' host='localhost' password='1'") as conn:
cur = conn.cursor()
cur.execute("""CREATE TABLE IF NOT EXISTS theme(
id SERIAL PRIMARY KEY,
title TEXT NOT NULL,
url TEXT NOT NULL,
author TEXT NOT NULL,
text TEXT NOT NULL,
price TEXT NOT NULL,
currency TEXT NOT NULL)""")
cur.execute("""CREATE INDEX IF NOT EXISTS title_author_index ON theme(title, author);""")
for i in instances:
i.todatabase(cur)
conn.commit()
def dump_to_mongo(instances):
client = MongoClient()
db = client.test_database
for i in instances:
inst = i.todict()
db.themes.update({'url': inst['url']}, inst, upsert=True)
if __name__ == '__main__':
reexpr = re.compile(r'([\d,.]+)\s*([a-zA-Zа-яА-Я]+)')
loop = asyncio.get_event_loop()
url = "http://forum.overclockers.ua/viewforum.php?f=26&start="
try:
pages = int(input("How many pages to parse? "))
except:
pages = 1
t0 = time.time()
tasks = [parsepage(url, announces=True)]
for i in range(pages):
tasks.append(parsepage(url + str(i * 40)))
results = loop.run_until_complete(asyncio.wait(tasks))[0]
instances = []
for i in results:
instances += i.result()
print("{0} results".format(len(instances)))
tasks = []
for i in instances:
tasks.append(i.parsetheme())
results = loop.run_until_complete(asyncio.wait(tasks))
# dump_to_db(instances)
dump_to_mongo(instances)
print("Done in {0} seconds".format(time.time() - t0))
loop.close()
| illusi0n455/SoftGroup-test | homework6/3.py | 3.py | py | 4,702 | python | en | code | 0 | github-code | 13 |
11578031720 | from django.contrib.auth import get_user_model
from django.db import models
from django.utils.translation import ugettext as _
from django_mailbox.models import Mailbox
__all__ = ["UserMailbox"]
class UserMailbox(models.Model):
mailbox = models.OneToOneField(
Mailbox, on_delete=models.CASCADE, related_name="user", primary_key=True
)
user = models.ForeignKey(
get_user_model(), on_delete=models.CASCADE, related_name="mailboxes"
)
created_on = models.DateTimeField(auto_now_add=True, editable=False)
class Meta:
verbose_name = _("User Mailbox")
verbose_name_plural = _("User Mailboxes")
ordering = ("user",)
def __str__(self):
return self.user.__str__()
| Maible/maible | web/models.py | models.py | py | 738 | python | en | code | 1 | github-code | 13 |
35622061408 | import pygame
from config import Config
class Dino:
def __init__(self, x, y):
self.x = x
self.y = y
self.image = "dinoRun.png"
def dinoDraw(self, screen, duck, size):
x = size[0]
y = size[1]
xMod = 0
yMod = 0
if duck:
self.image = ".\images\dinoDuck.png"
xMod = 0
yMod = 20
else: self.image = ".\images\dinoRun.png"
self.dino = pygame.image.load(self.image)
self.dino = pygame.transform.scale(self.dino, (x - xMod, y - yMod))
self.dinoRect = self.dino.get_rect()
self.dinoRect = self.dinoRect.inflate(-40, -20)
self.dinoRect.center = (self.x, self.y)
screen.blit(self.dino, self.dinoRect) | hariskhawja/Dino-Game | dino.py | dino.py | py | 767 | python | en | code | 0 | github-code | 13 |
23030631592 | #!/usr/bin/env python3
import os
import sys
import torch
import logging
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.distributed import run_on_main
import webdataset as wds
from glob import glob
import io
import torchaudio
logger = logging.getLogger(__name__)
# Brain class for speech recognition training
class ShallowFusion(sb.Brain):
def compute_forward(self, batch, stage):
# We first move the batch to the appropriate device.
batch = batch.to(self.device)
feats = self.hparams.compute_features(batch.wav.data)
feats = self.modules.normalize(feats, batch.wav.lengths)
# Running the encoder (prevent propagation to feature extraction)
encoded_signal = self.modules.encoder(feats.detach())
predictions, _ = self.hparams.test_search(
encoded_signal, batch.wav.lengths
)
return predictions
def compute_objectives(self, predictions, batch, stage):
specials = [self.hparams.bos_index, self.hparams.eos_index, self.hparams.unk_index]
predictions = [
[token for token in pred if token not in specials]
for pred in predictions
]
predicted_words = [
self.hparams.tokenizer.decode_ids(prediction).split(" ")
for prediction in predictions
]
target_words = [words.split(" ") for words in batch.trn]
# Monitor word error rate and character error rated at
# valid and test time.
self.wer_metric.append(batch.__key__, predicted_words, target_words)
self.cer_metric.append(batch.__key__, predicted_words, target_words)
return torch.tensor([0.])
def on_stage_start(self, stage, epoch):
self.cer_metric = self.hparams.cer_computer()
self.wer_metric = self.hparams.error_rate_computer()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of an epoch.
Arguments
---------
stage : sb.Stage
One of sb.Stage.TRAIN, sb.Stage.VALID, sb.Stage.TEST
stage_loss : float
The average loss for all of the data processed in this stage.
epoch : int
The currently-starting epoch. This is passed
`None` during the test stage.
"""
# Store the train loss until the validation stage.
stage_stats = {}
stage_stats["CER"] = self.cer_metric.summarize("error_rate")
stage_stats["WER"] = self.wer_metric.summarize("error_rate")
with open(self.hparams.wer_file, "w") as w:
self.wer_metric.write_stats(w)
with open(self.hparams.decode_text_file, "w") as fo:
for utt_details in self.wer_metric.scores:
print(utt_details["key"], " ".join(utt_details["hyp_tokens"]), file=fo)
def on_evaluate_start(self, max_key=None, min_key=None):
lm_ckpt = self.hparams.lm_ckpt_finder.find_checkpoint(min_key="loss")
self.hparams.lm_pretrainer.collect_files(lm_ckpt.path)
self.hparams.lm_pretrainer.load_collected(self.device)
asr_ckpt = self.hparams.asr_ckpt_finder.find_checkpoint(min_key="WER")
self.hparams.asr_pretrainer.collect_files(asr_ckpt.path)
self.hparams.asr_pretrainer.load_collected(self.device)
def dataio_prepare(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions.
Arguments
---------
hparams : dict
This dictionary is loaded from the `train.yaml` file, and it includes
all the hyperparameters needed for dataset construction and loading.
Returns
-------
datasets : dict
Dictionary containing "train", "valid", and "test" keys mapping to
WebDataset datasets dataloaders for them.
"""
def tokenize(sample):
text = sample["trn"]
# quick hack for one sample in text of test2021:
text = text.replace(" <NOISE>", "")
fulltokens = torch.LongTensor(
[hparams["bos_index"]] + hparams["tokenizer"].encode(text) + [hparams["eos_index"]]
)
sample["tokens"] = fulltokens[1:-1]
sample["tokens_bos"] = fulltokens[:-1]
sample["tokens_eos"] = fulltokens[1:]
return sample
validdata = (
wds.WebDataset(hparams["validshards"])
.decode()
.rename(trn="transcript.txt", wav="audio.pth")
.map(tokenize)
.batched(
batchsize=hparams["validbatchsize"],
collation_fn=sb.dataio.batch.PaddedBatch,
partial=True
)
)
testseen = (
wds.WebDataset(hparams["test_seen_shards"])
.decode()
.rename(trn="transcript.txt", wav="audio.pth")
.map(tokenize)
.batched(
batchsize=hparams["validbatchsize"],
collation_fn=sb.dataio.batch.PaddedBatch,
partial=True
)
)
testunseen = (
wds.WebDataset(hparams["test_unseen_shards"])
.decode()
.rename(trn="transcript.txt", wav="audio.pth")
.map(tokenize)
.batched(
batchsize=hparams["validbatchsize"],
collation_fn=sb.dataio.batch.PaddedBatch,
partial=True
)
)
test2021 = (
wds.WebDataset(hparams["test_2021_shards"])
.decode()
.rename(trn="transcript.txt", wav="audio.pth")
.map(tokenize)
.batched(
batchsize=hparams["validbatchsize"],
collation_fn=sb.dataio.batch.PaddedBatch,
partial=True
)
)
test_speecon = (
wds.WebDataset(hparams["test_speecon_shards"])
.decode()
.rename(trn="transcript.txt", wav="audio.pth", meta="meta.json")
.map(tokenize)
.batched(
batchsize=hparams["validbatchsize"],
collation_fn=sb.dataio.batch.PaddedBatch,
partial=True
)
)
test_yle = (
wds.WebDataset(hparams["test_yle_shards"])
.decode()
.rename(trn="transcript.txt", wav="audio.pth", meta="meta.json")
.map(tokenize)
.batched(
batchsize=hparams["validbatchsize"],
collation_fn=sb.dataio.batch.PaddedBatch,
partial=True
)
)
normalizer = sb.dataio.preprocess.AudioNormalizer()
def normalize_audio(sample):
signal = sample["wav"]
samplerate = sample["meta"]["samplerate"]
sample["wav"] = normalizer(signal, samplerate)
sample["meta"]["samplerate"] = normalizer.sample_rate
return sample
test_lp= (
wds.WebDataset(hparams["test_lp_shards"])
.decode()
.rename(trn="transcript.txt", wav="audio.pth", meta="meta.json")
.map(tokenize)
.map(normalize_audio)
.batched(
batchsize=hparams["validbatchsize"],
collation_fn=sb.dataio.batch.PaddedBatch,
partial=True
)
)
return {"valid": validdata, "test-seen": testseen,
"test-unseen": testunseen, "test2021": test2021,
"test-speecon": test_speecon, "test-yle": test_yle,
"test-lp": test_lp}
if __name__ == "__main__":
# Reading command line arguments
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
# Load hyperparameters file with command-line overrides
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
datasets = dataio_prepare(hparams)
# Trainer initialization
asr_brain = ShallowFusion(
modules=hparams["modules"],
hparams=hparams,
run_opts=run_opts,
)
test_loader_kwargs = hparams.get("test_loader_kwargs", {})
test_loader_kwargs.setdefault("batch_size", None)
# Load best checkpoint (highest STOI) for evaluation
test_stats = asr_brain.evaluate(
test_set=datasets[hparams["test_data_id"]],
test_loader_kwargs = test_loader_kwargs
)
| Gastron/sb-2015-2020-kevat_e2e | eval_with_shallow_fusion.py | eval_with_shallow_fusion.py | py | 8,567 | python | en | code | 0 | github-code | 13 |
25811072084 | """
TCP Window scan
nmap flag: -sW
"""
import socket
from impacket import ImpactPacket, ImpactDecoder
from impacket.ImpactPacket import TCP
'''
This technique is same as the ACK scan but this goes a bit further to figure out
if the port is open or closed by checking the window size.
If window is positive, port is open. If it's zero, port is closed.
This works only on a few systems and is not very reliable.
'''
src = '10.0.2.15'
dst = '10.0.2.4'
sport = 12345 # Random source port
dport = 80 # Port that we want to probe
# Create a new IP packet and set its source and destination addresses.
# Construct the IP Packet
ip = ImpactPacket.IP()
ip.set_ip_src(src)
ip.set_ip_dst(dst)
# Construct the TCP Segment
tcp = ImpactPacket.TCP()
tcp.set_th_sport(sport) # Set the source port in TCP header
tcp.set_th_dport(dport) # Set the destination in TCP header
tcp.auto_checksum = 1
tcp.set_ACK()
# Put the TCP Segment into the IP Packet
ip.contains(tcp)
# Create a Raw Socket to send the above constructed Packet
# socket(<domain>, <type>, <protocol>)
s = socket.socket(socket.AF_INET, socket.SOCK_RAW,
6) # protocol value can also be fetched like this: socket.getprotobyname('tcp')
s.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)
# calls sendto() syscall
s.sendto(ip.get_packet(), (dst, 0))
s.settimeout(3)
# UNCOMMENT BELOW LINE IF src AND dst ARE 127.0.0.1
# packet = s.recvfrom(4096)[0] # This will show the packet sent from sender to receiver.
try:
packet = s.recvfrom(4096)[0] # This is the packet we're interested in. Receiver to sender packet
except socket.timeout:
print('target is filtered')
exit(0)
# Decode the received Packet
res_ip = ImpactDecoder.IPDecoder().decode(packet)
res_tcp: TCP = res_ip.child() # Get the response TCP Segment from the IP Packet
print("Pretty print the IP Packet:")
print(res_ip)
print("Flag bit format: URG-ACK-PSH-RST-SYN-FIN")
print("Request Flag bits: " + bin(tcp.get_th_flags())[2:].zfill(6))
flag_bits = bin(res_tcp.get_th_flags())[2:].zfill(6)
print("Response Flag bits: " + flag_bits)
# Flag format: URG-ACK-PSH-RST-SYN-FIN
# if RST is set
if flag_bits == '000100':
print('target is unfiltered')
window_size = res_tcp.get_th_win()
if window_size == 0:
print('%d is closed' % dport)
elif window_size > 0:
print('%d is open' % dport)
s.close()
'''
This technique didn't work when I tested it with
Linux (5.10.0-6parrot1-amd64 #1 SMP Debian 5.10.28-6parrot1 (2021-04-12) x86_64 GNU/Linux).
Window size was always zero.
''' | nandan-desai-extras/nmap-port-scan-works | tcp_window.py | tcp_window.py | py | 2,563 | python | en | code | 0 | github-code | 13 |
22153153510 | ############################## Load package ##############################
import os
import cv2
import sys
import glob
import math
import json
import time
import random
import shutil
import argparse
import requests
import functools
import numpy as np
from numpy import asarray
from numpy import moveaxis
from numpy import expand_dims
from PIL import Image
from imutils import paths
from scipy.linalg import sqrtm
import tensorflow as tf
from tensorflow.python.ops import array_ops
import keras
from keras import initializers
from keras import backend as K
from keras.optimizers import Adam
from keras.utils import plot_model
from keras.preprocessing import image
from keras.models import Sequential, Model
from keras.preprocessing.image import img_to_array
from keras.preprocessing.image import ImageDataGenerator
from keras.layers import Conv2D, Conv2DTranspose, Reshape, Flatten
from keras.layers import Input, Dense, LeakyReLU, BatchNormalization, Dropout
from keras.applications.imagenet_utils import decode_predictions
from keras.applications.inception_v3 import InceptionV3, preprocess_input
############################## Define parse ##############################
parser = argparse.ArgumentParser()
parser.add_argument("-base_dir", "--base_dir", type=str, default=None, help='Image path')
parser.add_argument("-training", "--training", type=str, default='False', help='Selection for Generate image')
parser.add_argument("-rotation_range", "--rotation_range", type=int, default=90, help='Rotation range 0-90')
parser.add_argument("-shear_range", "--shear_range", type=float, default=0.1, help='Shear range 0.1-0.2')
parser.add_argument("-horizontal_flip", "--horizontal_flip", type=str, default='True', help='Select True or False for Horizontal flip')
parser.add_argument("-vertical_flip", "--vertical_flip", type=str, default='True', help='Select True or False for Vertical flip')
parser.add_argument("-jobid", "--jobid", type=str, default=None, help='Write anything')
parser.add_argument("-url_prefix", "--url_prefix", type=str, default=None, help='URL address')
args = parser.parse_args()
#################### Define funcion for image Generation ####################
multiply_number_for_2_increaing = 400
height = 48
width = 48
channels = 1
def resize_down_size_image(base_directory, original_images_dir):
# original_image_path = os.path.join(base_directory, 'original_images')
original_image_path = original_images_dir
list_image_paths = list(paths.list_images(original_image_path))
save_path = os.path.join(base_directory, '1_1_down_size_images')
if os.path.isdir(save_path):
shutil.rmtree(save_path)
if not os.path.isdir(save_path):
os.makedirs(save_path)
for inx, path in enumerate(list_image_paths):
image = Image.open(path)
resize_img = image.resize((48, 48))
image_label = path.split(os.path.sep)[-1]
resize_img.save(save_path + '/' + image_label)
def increasing_down_size_image(base_directory):
image_path_for_increasing = os.path.join(base_directory, '1_1_down_size_images')
list_image_paths_for_increasing = list(paths.list_images(image_path_for_increasing))
save_path = os.path.join(base_directory, '1_2_increasing_down_size_images')
if os.path.isdir(save_path):
shutil.rmtree(save_path)
if not os.path.isdir(save_path):
os.makedirs(save_path)
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=90,
horizontal_flip=True,
vertical_flip=True,
fill_mode='nearest'
)
image_array = []
for index, img in enumerate(list_image_paths_for_increasing):
image = cv2.imread(img, cv2.IMREAD_GRAYSCALE)
image = img_to_array(image)
image_array.append(image)
image_array = np.array(image_array, dtype="float") / 128. - 1
train_datagen.fit(image_array)
multiply_number = math.ceil(multiply_number_for_2_increaing / len(list_image_paths_for_increasing))
i = 0
for batch in train_datagen.flow(image_array,
batch_size = len(list_image_paths_for_increasing),
save_to_dir = save_path,
save_prefix='bw',
save_format='png'):
i += 1
if i > (multiply_number - 1):
break
def generate_image(base_directory):
image_path_for_generating = os.path.join(base_directory, '1_2_increasing_down_size_images')
list_image_paths_for_generating = list(paths.list_images(image_path_for_generating))
save_path = os.path.join(base_directory, '1_3_generated_images')
if os.path.isdir(save_path):
shutil.rmtree(save_path)
if not os.path.isdir(save_path):
os.makedirs(save_path)
random.shuffle(list_image_paths_for_generating)
train_datas = []
for index, img_path in enumerate(list_image_paths_for_generating):
image = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
image = img_to_array(image)
train_datas.append(image)
train_datas = np.array(train_datas)
x_train = train_datas.reshape((train_datas.shape[0],) + (height, width, channels)).astype('float32')
X_train = (x_train - 127.5) / 127.5
## define model
# latent space dimension
latent_dim = 100
# Image demension
init = initializers.RandomNormal(mean=0.0, stddev=0.02, seed=None)
# Generator network
generator = Sequential()
# FC:
generator.add(Dense(144, input_shape=(latent_dim,), kernel_initializer=init))
# FC:
generator.add(Dense(12*12*128))
generator.add(Reshape((12, 12, 128)))
generator.add(Dropout(0.5))
# Conv 1:
generator.add(Conv2DTranspose(128, kernel_size=2, strides=2, padding='same'))
generator.add(BatchNormalization(momentum=0.8))
generator.add(LeakyReLU(0.2))
# Conv 2:
generator.add(Conv2DTranspose(128, kernel_size=2, strides=2, padding='same'))
generator.add(BatchNormalization(momentum=0.8))
generator.add(LeakyReLU(0.2))
# Conv 3:
generator.add(Conv2DTranspose(64, kernel_size=2, strides=1, padding='same'))
generator.add(BatchNormalization(momentum=0.8))
generator.add(LeakyReLU(0.2))
# Conv 4:
generator.add(Conv2DTranspose(1, kernel_size=2, strides=1, padding='same', activation='tanh'))
# Discriminator network
discriminator = Sequential()
# Conv 1:
discriminator.add(Conv2D(64, kernel_size=1, strides=1, padding='same', input_shape=(48, 48, 1), kernel_initializer=init))
discriminator.add(LeakyReLU(0.2))
# Conv 2:
discriminator.add(Conv2D(64, kernel_size=2, strides=1, padding='same'))
discriminator.add(BatchNormalization(momentum=0.8))
discriminator.add(LeakyReLU(0.2))
# Conv 3:
discriminator.add(Conv2D(128, kernel_size=2, strides=2, padding='same'))
discriminator.add(BatchNormalization(momentum=0.8))
discriminator.add(LeakyReLU(0.2))
# Conv 4:
discriminator.add(Conv2D(256, kernel_size=2, strides=2, padding='same'))
discriminator.add(BatchNormalization(momentum=0.8))
discriminator.add(LeakyReLU(0.2))
# FC
discriminator.add(Flatten())
discriminator.add(Dense(256))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Dense(512))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Dropout(0.5))
# Output
discriminator.add(Dense(1, activation='sigmoid'))
# Optimizer
optimizer = Adam(lr=0.0002, beta_1=0.5)
discriminator.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['binary_accuracy'])
discriminator.trainable = False
z = Input(shape=(latent_dim,))
img = generator(z)
decision = discriminator(img)
d_g = Model(inputs=z, outputs=decision)
d_g.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['binary_accuracy'])
epochs = 150
batch_size = 32
smooth = 0.1
real = np.ones(shape=(batch_size, 1))
fake = np.zeros(shape=(batch_size, 1))
d_loss = []
d_g_loss = []
for e in range(epochs):
print(e, file=sys.stderr, flush=True)
for i in range(len(X_train) // batch_size):
# Train Discriminator weights
discriminator.trainable = True
# Real samples
X_batch = X_train[i*batch_size:(i+1)*batch_size]
d_loss_real = discriminator.train_on_batch(x=X_batch, y=real*(1-smooth))
# Fake samples
z = np.random.normal(loc=0, scale=1, size=(batch_size, latent_dim))
X_fake = generator.predict_on_batch(z)
d_loss_fake = discriminator.train_on_batch(x=X_fake, y=fake)
# Discriminator loss
d_loss_batch = 0.5 * (d_loss_real[0] + d_loss_fake[0])
# Train Generator weights
discriminator.trainable = False
d_g_loss_batch = d_g.train_on_batch(x=z, y=real)
samples = batch_size
if e == (epochs-1):
for k in range(len(X_batch)):
x_fake = generator.predict(np.random.normal(loc=0, scale=1, size=(samples, latent_dim)))
img = keras.preprocessing.image.array_to_img(x_fake[k] * 255., scale=False)
img.save(os.path.join(save_path, 'generated_wafer' + str(e) + '_' + str(i) + '_' + str(k) +'.png'))
d_loss.append(d_loss_batch)
d_g_loss.append(d_g_loss_batch[0])
def save_binarized_image(base_directory):
image_path_for_binarization = os.path.join(base_directory, '1_3_generated_images')
list_image_path_for_binarization = list(paths.list_images(image_path_for_binarization))
save_path = os.path.join(base_directory, '1_4_binarized_images')
if os.path.isdir(save_path):
shutil.rmtree(save_path)
if not os.path.isdir(save_path):
os.makedirs(save_path)
for index, path in enumerate(list_image_path_for_binarization):
image = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
ret, threshold_image = cv2.threshold(image, 140, 255, cv2.THRESH_BINARY)
img = Image.fromarray(threshold_image)
img.save(os.path.join(save_path, 'binarization_wafer' + '_' + str(index) + '(140)' + '.png'))
def resize_upsize_image(base_directory, image_name):
image_path_for_upsize = os.path.join(base_directory, '1_4_binarized_images')
list_image_path_for_upsize = list(paths.list_images(image_path_for_upsize))
# save_path = os.path.join(base_directory, '1_5_up_size_images')
save_path = os.path.join(base_directory, 'AI_Generated_Images_' + image_name)
# if os.path.isdir(save_path):
# shutil.rmtree(save_path)
# if not os.path.isdir(save_path):
# os.makedirs(save_path)
if os.path.isdir(save_path):
image_name_number = 2
save_path = os.path.join(base_directory, 'AI_Generated_images_' + image_name + '(' + str(image_name_number) + ')')
if os.path.isdir(save_path):
image_name_number = 3
save_path = os.path.join(base_directory, 'AI_Generated_images_' + image_name + '(' + str(image_name_number) + ')')
if os.path.isdir(save_path):
image_name_number = 4
save_path = os.path.join(base_directory, 'AI_Generated_images_' + image_name + '(' + str(image_name_number) + ')')
if os.path.isdir(save_path):
image_name_number = 5
save_path = os.path.join(base_directory, 'AI_Generated_images_' + image_name + '(' + str(image_name_number) + ')')
if os.path.isdir(save_path):
image_name_number = 6
save_path = os.path.join(base_directory, 'AI_Generated_images_' + image_name + '(' + str(image_name_number) + ')')
if os.path.isdir(save_path):
image_name_number = 7
save_path = os.path.join(base_directory, 'AI_Generated_images_' + image_name + '(' + str(image_name_number) + ')')
if os.path.isdir(save_path):
image_name_number = 8
save_path = os.path.join(base_directory, 'AI_Generated_images_' + image_name + '(' + str(image_name_number) + ')')
if os.path.isdir(save_path):
image_name_number = 9
save_path = os.path.join(base_directory, 'AI_Generated_images_' + image_name + '(' + str(image_name_number) + ')')
if not os.path.isdir(save_path):
os.makedirs(save_path)
for inx, path in enumerate(list_image_path_for_upsize):
image = Image.open(path)
resize_img = image.resize((120, 120))
image_label = path.split(os.path.sep)[-1]
resize_img.save(save_path + '/' + image_label)
return save_path
########## Incressing image for compare ##########
def increasing_image_for_compare(base_directory, original_images_dir):
# image_path_for_increasing = os.path.join(base_directory, 'original_images')
image_path_for_increasing = original_images_dir
list_image_paths_for_increasing = list(paths.list_images(image_path_for_increasing))
save_path = os.path.join(base_directory, '3_1_original_size_images_for_compare')
if os.path.isdir(save_path):
shutil.rmtree(save_path)
if not os.path.isdir(save_path):
os.makedirs(save_path)
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=90,
horizontal_flip=True,
vertical_flip=True,
fill_mode='nearest'
)
image_array = []
for index, img in enumerate(list_image_paths_for_increasing):
image = cv2.imread(img, cv2.IMREAD_GRAYSCALE)
image = img_to_array(image)
image_array.append(image)
image_array = np.array(image_array, dtype="float") / 128. - 1
train_datagen.fit(image_array)
multiply_number = math.ceil(multiply_number_for_2_increaing / len(list_image_paths_for_increasing))
i = 0
for batch in train_datagen.flow(image_array,
batch_size = len(list_image_paths_for_increasing),
save_to_dir = save_path,
save_prefix='bw',
save_format='png'):
i += 1
if i > (multiply_number - 1):
break
############################## if training == "False" ##############################
def training_false_increasing_image(base_directory, original_images_dir, rotation_range, shear_range, horizontal_flip, vertical_flip, image_name):
# image_path_for_increasing = os.path.join(base_directory, 'original_images')
image_path_for_increasing = original_images_dir
list_image_paths_for_increasing = list(paths.list_images(image_path_for_increasing))
# save_path = os.path.join(base_directory, '2_1_increasing_original_size_images')
save_path = os.path.join(base_directory, 'User_Generated_Images_' + image_name)
# if os.path.isdir(save_path):
# shutil.rmtree(save_path)
# save_path = os.path.join(base_directory, 'User_Generated_images_' + image_name + ' (' + str(image_name_number) + ')')
# if not os.path.isdir(save_path):
# os.makedirs(save_path)
if os.path.isdir(save_path):
image_name_number = 2
save_path = os.path.join(base_directory, 'User_Generated_images_' + image_name + '(' + str(image_name_number) + ')')
if os.path.isdir(save_path):
image_name_number = 3
save_path = os.path.join(base_directory, 'User_Generated_images_' + image_name + '(' + str(image_name_number) + ')')
if os.path.isdir(save_path):
image_name_number = 4
save_path = os.path.join(base_directory, 'User_Generated_images_' + image_name + '(' + str(image_name_number) + ')')
if os.path.isdir(save_path):
image_name_number = 5
save_path = os.path.join(base_directory, 'User_Generated_images_' + image_name + '(' + str(image_name_number) + ')')
if os.path.isdir(save_path):
image_name_number = 6
save_path = os.path.join(base_directory, 'User_Generated_images_' + image_name + '(' + str(image_name_number) + ')')
if os.path.isdir(save_path):
image_name_number = 7
save_path = os.path.join(base_directory, 'User_Generated_images_' + image_name + '(' + str(image_name_number) + ')')
if os.path.isdir(save_path):
image_name_number = 8
save_path = os.path.join(base_directory, 'User_Generated_images_' + image_name + '(' + str(image_name_number) + ')')
if os.path.isdir(save_path):
image_name_number = 9
save_path = os.path.join(base_directory, 'User_Generated_images_' + image_name + '(' + str(image_name_number) + ')')
if not os.path.isdir(save_path):
os.makedirs(save_path)
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=rotation_range,
shear_range=shear_range,
horizontal_flip=horizontal_flip,
vertical_flip=vertical_flip,
fill_mode='nearest'
)
image_array = []
for index, img in enumerate(list_image_paths_for_increasing):
image = cv2.imread(img, cv2.IMREAD_GRAYSCALE)
image = img_to_array(image)
image_array.append(image)
image_array = np.array(image_array, dtype="float") / 128. - 1
train_datagen.fit(image_array)
multiply_number = math.ceil(multiply_number_for_2_increaing / len(list_image_paths_for_increasing))
i = 0
for batch in train_datagen.flow(image_array,
batch_size = len(list_image_paths_for_increasing),
save_to_dir = save_path,
save_prefix='bw',
save_format='png'):
i += 1
if i > (multiply_number - 1):
break
return save_path
############################## function related to IS ##############################
def image_array_for_is(path_for_is):
image_paths = list(paths.list_images(path_for_is))
image_array = []
for ix, path in enumerate(image_paths):
image = cv2.imread(path, cv2.IMREAD_COLOR)
image = img_to_array(image)
image = moveaxis(image, 2, 0)
image_array.append(image)
image_array = np.array(image_array)
return image_array
tfgan = tf.contrib.gan
#session=tf.compat.v1.InteractiveSession()
session=tf.InteractiveSession()
# A smaller BATCH_SIZE reduces GPU memory usage, but at the cost of a slight slowdown
BATCH_SIZE = 64
INCEPTION_URL = 'http://download.tensorflow.org/models/frozen_inception_v1_2015_12_05.tar.gz'
INCEPTION_FROZEN_GRAPH = 'inceptionv1_for_inception_score.pb'
# Run images through Inception.
#inception_images = tf.compat.v1.placeholder(tf.float32, [None, 3, None, None])
inception_images = tf.placeholder(tf.float32, [None, 3, None, None])
def inception_logits(images = inception_images, num_splits = 1):
images = tf.transpose(images, [0, 2, 3, 1])
size = 299
# images = tf.compat.v1.image.resize_bilinear(images, [size, size])
images = tf.image.resize_bilinear(images, [size, size])
generated_images_list = array_ops.split(images, num_or_size_splits = num_splits)
logits = tf.map_fn(
fn = functools.partial(
tfgan.eval.run_inception,
default_graph_def_fn = functools.partial(
tfgan.eval.get_graph_def_from_url_tarball,
INCEPTION_URL,
INCEPTION_FROZEN_GRAPH,
os.path.basename(INCEPTION_URL)),
output_tensor = 'logits:0'),
elems = array_ops.stack(generated_images_list),
parallel_iterations = 1,
back_prop = False,
swap_memory = True,
name = 'RunClassifier')
logits = array_ops.concat(array_ops.unstack(logits), 0)
return logits
logits=inception_logits()
def get_inception_probs(inps):
n_batches = int(np.ceil(float(inps.shape[0]) / BATCH_SIZE))
preds = np.zeros([inps.shape[0], 1000], dtype = np.float32)
for i in range(n_batches):
inp = inps[i * BATCH_SIZE:(i + 1) * BATCH_SIZE] / 255. * 2 - 1
preds[i * BATCH_SIZE : i * BATCH_SIZE + min(BATCH_SIZE, inp.shape[0])] = session.run(logits,{inception_images: inp})[:, :1000]
preds = np.exp(preds) / np.sum(np.exp(preds), 1, keepdims=True)
return preds
def preds2score(preds, splits=10):
scores = []
for i in range(splits):
part = preds[(i * preds.shape[0] // splits):((i + 1) * preds.shape[0] // splits), :]
q = np.expand_dims(np.mean(part, 0), 0)
kl = part * (np.log(part / q)) + (1 - part) * np.log((1 - part) / (1 - q))
kl = np.mean(kl)
scores.append(np.exp(kl))
return np.mean(scores), np.std(scores)
def preds2score(preds, splits=10):
scores = []
for i in range(splits):
part = preds[(i * preds.shape[0] // splits):((i + 1) * preds.shape[0] // splits), :]
kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0)))
kl = np.mean(np.sum(kl, 1))
scores.append(np.exp(kl))
return np.mean(scores), np.std(scores)
def get_inception_score(images, splits=10):
assert(type(images) == np.ndarray)
assert(len(images.shape) == 4)
assert(images.shape[1] == 3)
assert(np.min(images[0]) >= 0 and np.max(images[0]) > 10), 'Image values should be in the range [0, 255]'
print('Calculating Inception Score with %i images in %i splits' % (images.shape[0], splits), file=sys.stderr, flush=True)
start_time=time.time()
preds = get_inception_probs(images)
mean, std = preds2score(preds, splits)
print('Inception Score calculation time: %f s' % (time.time() - start_time), file=sys.stderr, flush=True)
return mean, std
############################## function related to FID ##############################
def resize_wafer(x):
x_list = []
for i in range(x.shape[0]):
if training == "True":
img = image.array_to_img(x[i, :, :, :].reshape(48, 48, -1))
else:
img = image.array_to_img(x[i, :, :, :].reshape(120, 120, -1))
img = img.resize(size=(299, 299), resample=Image.LANCZOS)
x_list.append(image.img_to_array(img))
return np.array(x_list)
def cal_h(x, resizer, batch_size=64):
model = InceptionV3()
model4fid = Model(inputs=model.input, outputs=model.get_layer("avg_pool").output)
r = None
n_batch = (x.shape[0]+batch_size-1) // batch_size
for j in range(n_batch):
x_batch = resizer(x[j*batch_size:(j+1)*batch_size, :, :, :])
r_batch = model4fid.predict(preprocess_input(x_batch))
r = r_batch if r is None else np.concatenate([r, r_batch], axis=0)
return r
def wafer_h(n_train, n_val, list_real_wafer, list_generated_wafer):
real_data = []
fake_data = []
x = [0, 0]
h = [0, 0]
n = [n_train, n_val]
for ix, image_path in enumerate(list_real_wafer):
image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
image = img_to_array(image)
real_data.append(image)
real_data = np.array(real_data, dtype="float") / 255.0
x[0] = np.tile(real_data, (1, 1, 1, 3))
for ix, image_path in enumerate(list_generated_wafer):
image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
image = img_to_array(image)
fake_data.append(image)
fake_data = np.array(fake_data, dtype="float") / 255.0
x[1] = np.tile(fake_data, (1, 1, 1, 3))
for i in range(2):
h[i] = cal_h(x[i][0:n[i], :, :, :], resize_wafer)
return h[0], h[1]
def mean_cov(x):
mean = np.mean(x, axis=0)
sigma = np.cov(x, rowvar=False)
return mean, sigma
def frechet_distance(m1, c1, m2, c2):
return np.sum((m1 - m2)**2) + np.trace(c1 + c2 -2*(sqrtm(np.dot(c1, c2))))
def calculate_fid_score(h1, h2):
# model = InceptionV3()
# model4fid = Model(inputs=model.input, outputs=model.get_layer("avg_pool").output)
m1, c1 = mean_cov(h1)
m2, c2 = mean_cov(h2)
return frechet_distance(m1, c1, m2, c2)
############################## Execute ##############################
if __name__ == "__main__":
original_images_dir = args.base_dir
# __file__ = base_dir_for_split
base_dir = os.path.realpath(original_images_dir).rsplit('/', 1)[0] + '/'
print(original_images_dir, file=sys.stderr, flush=True)
print(base_dir, file=sys.stderr, flush=True)
image_name = os.path.basename(os.path.normpath(original_images_dir))
print(image_name, file=sys.stderr, flush=True)
training = args.training
jobid = args.jobid
URL = args.url_prefix
URL = URL + '/generator/updateStatus'
headers={'Content-type':'application/json', 'Accept':'application/json'}
if training == "True":
### False increaing image
data = {'step': 'step-1', 'status': 'RUNNING', 'jobid': jobid, 'training': 'True'}
res = requests.post(URL, json=data, headers=headers)
print(res, file=sys.stderr, flush=True)
resize_down_size_image(base_dir, original_images_dir)
# with open(os.path.join(base_dir,"ai_status.txt"), "w") as f:
# f.write("Finished 1/7 ")
data = {'step': 'step-2', 'status': 'RUNNING', 'jobid': jobid, 'training': 'True'}
res = requests.post(URL, json=data, headers=headers)
print(res, file=sys.stderr, flush=True)
increasing_down_size_image(base_dir)
# with open(os.path.join(base_dir,"ai_status.txt"), "w") as f:
# f.write("Finished 2/7")
data = {'step': 'step-3', 'status': 'RUNNING', 'jobid': jobid, 'training': 'True'}
res = requests.post(URL, json=data, headers=headers)
print(res, file=sys.stderr, flush=True)
generate_image(base_dir)
# with open(os.path.join(base_dir,"ai_status.txt"), "w") as f:
# f.write("Finished 3/7")
data = {'step': 'step-4', 'status': 'RUNNING', 'jobid': jobid, 'training': 'True'}
res = requests.post(URL, json=data, headers=headers)
print(res, file=sys.stderr, flush=True)
save_binarized_image(base_dir)
# with open(os.path.join(base_dir,"ai_status.txt"), "w") as f:
# f.write("Finished 4/7")
data = {'step': 'step-5', 'status': 'RUNNING', 'jobid': jobid, 'training': 'True'}
res = requests.post(URL, json=data, headers=headers)
print(res, file=sys.stderr, flush=True)
save_path = resize_upsize_image(base_dir, image_name)
# with open(os.path.join(base_dir,"ai_status.txt"), "w") as f:
# f.write("Finished 5/7")
### Increasing image for compare
# increasing_image_for_compare(base_dir)
### Calculate IS
# path_for_is = os.path.join(base_dir, '1_5_up_size_images')
# path_for_is = os.path.join(base_dir, 'AI_Gerated_Images_' + image_name)
path_for_is = save_path
image_array = image_array_for_is(path_for_is)
data = {'step': 'step-6', 'status': 'RUNNING', 'jobid': jobid, 'training': 'True'}
res = requests.post(URL, json=data, headers=headers)
print(res, file=sys.stderr, flush=True)
inception_score_mean, inception_score_standard = get_inception_score(image_array, splits=10)
# with open(os.path.join(base_dir,"ai_status.txt"), "w") as f:
# f.write("Finished 6/7")
print(inception_score_mean, file=sys.stderr, flush=True)
### Calculate FID
# real_wafer_path = os.path.join(base_dir, '3_1_original_size_images_for_compare')
# generated_wafer_path = os.path.join(base_dir, '1_5_up_size_images')
# generated_wafer_path = os.path.join(base_dir, 'AI_Gerated_Images_' + image_name)
generated_wafer_path = save_path
real_wafer_path_for_fid = os.path.join(base_dir, '1_1_down_size_images')
generated_wafer_path_for_fid = os.path.join(base_dir, '1_4_binarized_images')
list_real_wafer = list(paths.list_images(real_wafer_path_for_fid))
n_train = len(list_real_wafer)
list_generated_wafer = list(paths.list_images(generated_wafer_path_for_fid)) #416
n_val = len(list_generated_wafer)
h_real, h_fake = wafer_h(n_train, n_val, list_real_wafer, list_generated_wafer)
data = {'step': 'step-7', 'status': 'RUNNING', 'jobid': jobid, 'training': 'True'}
res = requests.post(URL, json=data, headers=headers)
print(res, file=sys.stderr, flush=True)
fid_score = calculate_fid_score(h_real, h_fake)
data = {'step': 'FINISH', 'status': 'FINISH', 'jobid': jobid, 'training': 'True'}
res = requests.post(URL, json=data, headers=headers)
print(res, file=sys.stderr, flush=True)
shutil.rmtree(os.path.join(base_dir, '1_1_down_size_images'))
shutil.rmtree(os.path.join(base_dir, '1_2_increasing_down_size_images'))
shutil.rmtree(os.path.join(base_dir, '1_3_generated_images'))
shutil.rmtree(os.path.join(base_dir, '1_4_binarized_images'))
fid_score = str(fid_score)[1:10]
fid_score = float(fid_score)
# with open(os.path.join(base_dir,"ai_status.txt"), "w") as f:
# f.write("Finished 7/7")
print(fid_score, file=sys.stderr, flush=True)
score_dict = {}
score_dict['FID'] = fid_score
score_dict['IS'] = inception_score_mean
score_dict = str(score_dict)
with open(os.path.join(save_path,"result.txt"), "w") as f:
f.write(score_dict)
else:
rotation_range = args.rotation_range
shear_range = args.shear_range
horizontal_flip = args.horizontal_flip
vertical_flip = args.vertical_flip
### Auto increaing image
data = {'step': 'step-1', 'status': 'RUNNING', 'jobid': jobid, 'training': 'False'}
res = requests.post(URL, json=data, headers=headers)
print(res, file=sys.stderr, flush=True)
# training_false_increasing_image(base_dir, original_images_dir, rotation_range, shear_range, horizontal_flip, vertical_flip, image_name)
save_path = training_false_increasing_image(base_dir, original_images_dir, rotation_range, shear_range, horizontal_flip, vertical_flip, image_name)
# with open(os.path.join(base_dir,"user_status.txt"), "w") as f:
# f.write("Finished 1/4")
### Increasing image for compare
data = {'step': 'step-2', 'status': 'RUNNING', 'jobid': jobid, 'training': 'False'}
res = requests.post(URL, json=data, headers=headers)
print(res, file=sys.stderr, flush=True)
increasing_image_for_compare(base_dir, original_images_dir)
# with open(os.path.join(base_dir,"user_status.txt"), "w") as f:
# f.write("Finished 2/4")
### Calculate IS
data = {'step': 'step-3', 'status': 'RUNNING', 'jobid': jobid, 'training': 'False'}
res = requests.post(URL, json=data, headers=headers)
print(res, file=sys.stderr, flush=True)
# path_for_is = os.path.join(base_dir, '2_1_increasing_original_size_images')
# path_for_is = os.path.join(base_dir, 'User_Generated_Images_'+image_name)
path_for_is = save_path
image_array = image_array_for_is(path_for_is)
inception_score_mean, inception_score_standard = get_inception_score(image_array, splits=10)
# with open(os.path.join(base_dir,"user_status.txt"), "w") as f:
# f.write("Finished 3/4")
print(inception_score_mean, file=sys.stderr, flush=True)
### Calculate FID
real_wafer_path = os.path.join(base_dir, '3_1_original_size_images_for_compare')
# generated_wafer_path = os.path.join(base_dir, '2_1_increasing_original_size_images')
# generated_wafer_path = os.path.join(base_dir, 'User_Generated_Images_'+image_name)
generated_wafer_path = save_path
list_real_wafer = list(paths.list_images(real_wafer_path))
n_train = len(list_real_wafer)
list_generated_wafer = list(paths.list_images(generated_wafer_path))
n_val = len(list_generated_wafer)
h_real, h_fake = wafer_h(n_train, n_val, list_real_wafer, list_generated_wafer)
data = {'step': 'step-4', 'status': 'RUNNING', 'jobid': jobid, 'training': 'False'}
res = requests.post(URL, json=data, headers=headers)
print(res, file=sys.stderr, flush=True)
fid_score = calculate_fid_score(h_real, h_fake)
data = {'step': 'FINISH', 'status': 'FINISH', 'jobid': jobid, 'training': 'False'}
res = requests.post(URL, json=data, headers=headers)
print(res, file=sys.stderr, flush=True)
### Delete real_wafer_path
if os.path.isdir(real_wafer_path):
shutil.rmtree(real_wafer_path)
fid_score = str(fid_score)[1:10]
fid_score = float(fid_score)
# with open(os.path.join(base_dir,"user_status.txt"), "w") as f:
# f.write("Finished 4/4")
print(fid_score, file=sys.stderr, flush=True)
score_dict = {}
score_dict['FID'] = fid_score
score_dict['IS'] = inception_score_mean
score_dict = str(score_dict)
with open(os.path.join(save_path,"result.txt"), "w") as f:
f.write(score_dict)
# result_output = {"generated_image_path": generated_wafer_path, "is_score": inception_score_mean, "fid_score": fid_score}
# print(result_output, file=sys.stderr, flush=True)
# result_json = json.dumps(result_output)
# sys.stdout.write(result_json)
inception_score_mean = np.float32(inception_score_mean)
fid_score = np.float32(fid_score)
result_output = {}
result_output["generated_image_path"] = generated_wafer_path
result_output["is_score"] = inception_score_mean.item()
result_output["fid_score"] = fid_score.item()
#print(result_output, file=sys.stdout, flush=True)
result_json = json.dumps(result_output)
print(result_json, file=sys.stdout, flush=True)
| joyoon1110/generator | generator.py | generator.py | py | 34,686 | python | en | code | 0 | github-code | 13 |
73708411216 | from typing import List
class MetricProvider:
@staticmethod
def mean_absolute_error(y_true, y_pred):
"""
Returns the average absolute error between each y value.
:param y_true: List of true values.
:param y_pred: List of predicted values.
:return: The average error.
"""
errors = []
for _y_true, _y_pred in zip(y_true, y_pred):
error = abs(_y_true - _y_pred)
errors.append(error)
return sum(errors) / len(y_true)
@staticmethod
def error_of_average(y_true: List[float], y_pred: List[float]):
"""
Returns the absolute different between the average of y_true and y_pred.
:param y_true: List of true values.
:param y_pred: List of predicted values.
:return: The absolute value of the difference between means.
"""
y_true_mean = sum(y_true) / len(y_true)
y_pred_mean = sum(y_pred) / len(y_pred)
return abs(y_true_mean - y_pred_mean)
| thearod5/calorie-predictor | src/experiment/metric_provider.py | metric_provider.py | py | 1,015 | python | en | code | 1 | github-code | 13 |
31955914214 | import numpy as np
import pandas as pd
from imblearn.over_sampling import SMOTENC
def augment_dataset(
data: pd.DataFrame,
categorical_features: list,
target_feature: str = 'target',
k_parameter: int = 3
) -> pd.DataFrame:
"""Over-Sampling dataset augmentation
The main function will augment the given dataset
with synthetic instances associated to the less
representative target class, through a variant of
SMOTE that allows categorical features
Args:
data: Dataset to resample
categorical_features: Names of the categorical features
target_feature: Name of the target class. By default, 'target'
k_parameter: Number of nearest neighbours to be used for the synthetic model
Returns:
Augmented dataset
"""
categorical_mask = [True if feature in categorical_features else False for feature in data.columns]
smote_obj = SMOTENC(
categorical_features=categorical_mask,
sampling_strategy='minority',
k_neighbors=k_parameter
)
features_resampled, target_resampled = smote_obj.fit_sample(
data[np.setdiff1d(data.columns, target_feature)], data[target_feature]
)
resampled_data = pd.DataFrame(
data=features_resampled,
columns=np.setdiff1d(data.columns, [target_feature])
)
resampled_data[target_feature] = target_resampled
return resampled_data
| MAGomes95/Heart_Failure | src/features/augmention.py | augmention.py | py | 1,439 | python | en | code | 0 | github-code | 13 |
25205856964 | from psqldb import GetGameInfo
from dealsdb import GetDeals
from stringtolist import stolist2
# formats source for deal
def find_source(link):
if "gamestop" in link:
return "GameStop"
elif "razer" in link:
return "Razer Gamestore"
elif "humblebundle" in link:
return "Humble Bundle"
elif "amazon" in link:
return "Amazon"
elif "bestbuy" in link:
return "Best Buy"
elif "cdkeys" in link:
return "CD Keys"
def format_plat(plat):
plat = plat.lower()
if "ps4" in plat:
return "Playstation 4"
elif "pc" in plat:
return "PC"
elif "xbox" in plat:
return "Xbox One"
elif "nintendo" in plat:
return "Nintendo Switch"
else:
return "PC"
def vendor_desc(price, vendor, game):
if "Amazon" in vendor:
support = "Amazon is the largest online retailer in the world and their currently offering {} at the discounted price of {}.".format(game, price)
return support
elif "Humble Bundle" in vendor:
support = "Humble Bundle is a trusted digital video game distributor and is beloved by the gaming community for their charity work. Humble Bundle is currently offering {} for {}.".format(game, price)
return support
elif "GameStop" in vendor:
support = "GameStop is a well-known retail video game vendor, and is currently offering {} for {}.".format(game, price)
return support
elif "Best Buy" in vendor:
support = "Best Buy is a popular electronics seller, and it’s currently offering {} for {}.".format(game, price)
return support
elif "CD Keys" in vendor:
support = "CD Keys is a trusted video game seller and does thousands of sales everyday. The site manages to get the lowest price for games by using currency fluctuations. CD Keys is offering {} for {}".format(game, price)
return support
elif "Razer Gamestore" in vendor:
support = "Razer is a popular gaming gear seller, but what a lot of people don’t know is that Razer also sells games on the Razer Gamestore. The Razer Gamestore is currently offering {} for {}.".format(game, price)
return support
def generate_text(game_name):
final_response = {}
deal_from_psql = GetDeals()
deals = deal_from_psql.deal_info(game_name)
deal_from_psql.close_session()
reviews = []
if deals:
num_deals = len(deals)
# Stores unique vendors
vendors = []
# Stores unique plats
plats = []
# vendor support
support_vend = []
# stores lowest price
low_price = 100
for li in deals:
vend = find_source(li.buy_link)
if vend not in vendors:
vend_rep = vendor_desc(li.price, vend, game_name)
support_vend.append(vend_rep)
vendors.append(vend)
plat = format_plat(li.platform)
if plat not in plats:
plats.append(plat)
try:
pri = float(li.price.replace("$", ""))
if pri < low_price:
low_price = pri
except Exception:
pass
# SECTION 1 BASIC INFO
# Number of deals
if num_deals == 1:
dis_info = "There is one deal available for {}, from {}. ".format(game_name, vendors[0])
elif num_deals > 1:
dis_info = "There are {} deals available for {}, from {}. ".format(num_deals, game_name, ", ".join(vendors))
else:
dis_info = ""
# price for deals
if low_price != 100:
dis_info2 = "The current lowest price for {} is ${}. ".format(game_name, low_price)
else:
dis_info2 = "The current lowest price for {} is 10% off. ".format(game_name)
# Platforms for deals
if len(plats) == 1:
dis_info3 = "This deal is available for the {} version of the game. ".format(plats[0])
elif len(plats) > 1:
dis_info3 = "These deals are available for the {} versions of the game. ".format(", ".join(plats))
final_response["section_1"] = dis_info + dis_info2 + dis_info3
# SECTION 2 REVIEWS
deep_deals_info = GetGameInfo()
deep_info = deep_deals_info.game_info(game_name)
deep_deals_info.close_session()
revs = stolist2(deep_info.storyline)
revs1 = revs[2:]
final_response["section_2"] = revs1
# SECTION 3 VENDOR REVIEWS
final_response["section_3"] = support_vend
final_response["section_stock"] = "http://schema.org/InStock"
final_response["section_price"] = low_price
return final_response
else:
# SECTION 2 REVIEWS
deep_deals_info = GetGameInfo()
deep_info = deep_deals_info.game_info(game_name)
deep_deals_info.close_session()
revs = stolist2(deep_info.storyline)
revs1 = revs[2:]
return {"section_1": "There are no current deals for {}.".format(game_name), "section_2": revs1, "section_3":"", "section_stock":"http://schema.org/OutOfStock", "section_price": 0}
| HussanKhan/Gamescout.io | GameScout/content_gen.py | content_gen.py | py | 5,177 | python | en | code | 0 | github-code | 13 |
43364355039 | # -*- coding: utf-8 -*-
"""
Created on Sat Feb 2 21:51:47 2019
@author: aguec
"""
import math
primes = [2,3]
for i in range(5,1000000,2):
primality = True
for j in range(3,int(math.sqrt(i))+1,2):
if i%j == 0:
primality = False
break
if primality == True:
primes.append(i)
candidates = []
for m in range(len(primes)):
count = 0
summ = 0
for k in range(m,len(primes)):
count = count + 1
summ = summ + primes[k]
if summ > primes[-1]:
break
elif count > 500 and summ in primes: #this is the line in the program that I
candidates.append([summ,count]) #adjusted to make the list of candidates
#less entropic.
def my_max(x):
maximum = 0
prime = 0
for r in range(len(x)):
if x[r][1] > maximum:
maximum = x[r][1]
prime = x[r][0]
return prime,maximum
print('Prime and consecutive prime sums respectively\
',my_max(candidates)) | aguecig/Project-Euler | Problems 41 - 50/pe_50.py | pe_50.py | py | 1,096 | python | en | code | 0 | github-code | 13 |
13129446185 | import argparse
import numpy as np
from chainer import serializers
from yolov2 import YOLOv2
parser = argparse.ArgumentParser(description="Convert darknet weights into chainer weights")
parser.add_argument('path', help="path of darknet yolo_v2 weights")
args = parser.parse_args()
print("loading", args.path)
file = open(args.path, "rb")
dat = np.fromfile(file, dtype=np.float32)[4:] # skip header(4xint)
# load model
print("loading initial model...")
n_classes = 80
n_boxes = 5
last_out = (n_classes + 5) * n_boxes
yolov2 = YOLOv2(n_classes=n_classes, n_boxes=n_boxes)
yolov2.train = True
yolov2.finetune = False
layers = [
[3, 32, 3],
[32, 64, 3],
[64, 128, 3],
[128, 64, 1],
[64, 128, 3],
[128, 256, 3],
[256, 128, 1],
[128, 256, 3],
[256, 512, 3],
[512, 256, 1],
[256, 512, 3],
[512, 256, 1],
[256, 512, 3],
[512, 1024, 3],
[1024, 512, 1],
[512, 1024, 3],
[1024, 512, 1],
[512, 1024, 3],
[1024, 1024, 3],
[1024, 1024, 3],
[3072, 1024, 3],
]
offset = 0
for i, l in enumerate(layers):
in_ch = l[0]
out_ch = l[1]
ksize = l[2]
# load bias(Bias.bはout_chと同じサイズ)
txt = "yolov2.bias%d.b.data = dat[%d:%d]" % (i + 1, offset, offset + out_ch)
offset += out_ch
exec(txt)
# load bn(BatchNormalization.gammaはout_chと同じサイズ)
txt = "yolov2.bn%d.gamma.data = dat[%d:%d]" % (i + 1, offset, offset + out_ch)
offset += out_ch
exec(txt)
# (BatchNormalization.avg_meanはout_chと同じサイズ)
txt = "yolov2.bn%d.avg_mean = dat[%d:%d]" % (i + 1, offset, offset + out_ch)
offset += out_ch
exec(txt)
# (BatchNormalization.avg_varはout_chと同じサイズ)
txt = "yolov2.bn%d.avg_var = dat[%d:%d]" % (i + 1, offset, offset + out_ch)
offset += out_ch
exec(txt)
# load convolution weight(Convolution2D.Wは、outch * in_ch * フィルタサイズ。これを(out_ch, in_ch, 3, 3)にreshapeする)
txt = "yolov2.conv%d.W.data = dat[%d:%d].reshape(%d, %d, %d, %d)" % (
i + 1, offset, offset + (out_ch * in_ch * ksize * ksize), out_ch, in_ch, ksize, ksize)
offset += (out_ch * in_ch * ksize * ksize)
exec(txt)
print(i + 1, offset)
# load last convolution weight(BiasとConvolution2Dのみロードする)
in_ch = 1024
out_ch = last_out
ksize = 1
txt = "yolov2.bias%d.b.data = dat[%d:%d]" % (i + 2, offset, offset + out_ch)
offset += out_ch
exec(txt)
txt = "yolov2.conv%d.W.data = dat[%d:%d].reshape(%d, %d, %d, %d)" % (
i + 2, offset, offset + (out_ch * in_ch * ksize * ksize), out_ch, in_ch, ksize, ksize)
offset += out_ch * in_ch * ksize * ksize
exec(txt)
print(i + 2, offset)
print("save weights file to yolov2_darknet.model")
serializers.save_hdf5("yolov2_darknet.model", yolov2)
| Kiikurage/webdnn-yolo_v2 | convert_model/convert_darknet_weight.py | convert_darknet_weight.py | py | 2,800 | python | en | code | 1 | github-code | 13 |
23169682879 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys, os
from subprocess import Popen
from shutil import which
from PyQt5 import QtCore
from PyQt5.QtGui import ( QPainter, QColor, QPixmap, QImage, QIcon, QStandardItem,
QIntValidator, QStandardItemModel
)
from PyQt5.QtWidgets import (
QApplication, QMainWindow, QWidget, QFrame, QVBoxLayout, QLabel,
QFileDialog, QInputDialog, QAction, QLineEdit,
QComboBox, QMessageBox,
QDialog, QSystemTrayIcon
)
from popplerqt5 import Poppler
sys.path.append(os.path.dirname(__file__)) # for enabling python 2 like import
import resources_rc
from __init__ import __version__
from ui_mainwindow import Ui_window
from dialogs import ExportToImageDialog, DocInfoDialog
DEBUG = False
def debug(*args):
if DEBUG: print(*args)
SCREEN_DPI = 100
HOMEDIR = os.path.expanduser("~")
#pt2pixel = lambda point, dpi : dpi*point/72.0
class Renderer(QtCore.QObject):
rendered = QtCore.pyqtSignal(int, QImage)
textFound = QtCore.pyqtSignal(int, list)
def __init__(self, page_set=1):
# page_set = 1 for odd, and 0 for even
QtCore.QObject.__init__(self)
self.doc = None
self.page_set = page_set
self.painter = QPainter()
self.link_color = QColor(0,0,127, 40)
def render(self, page_no, dpi):
""" render(int, float)
This slot takes page no. and dpi and renders that page, then emits a signal with QImage"""
# Returns when both is true or both is false
if page_no%2 != self.page_set:
return
page = self.doc.page(page_no-1)
if not page :
return
img = page.renderToImage(dpi, dpi)
# Add Heighlight over Link Annotation
self.painter.begin(img)
annots = page.annotations()
for annot in annots:
if annot.subType() == Poppler.Annotation.ALink:
x, y = annot.boundary().left()*img.width(), annot.boundary().top()*img.height()
w, h = annot.boundary().width()*img.width()+1, annot.boundary().height()*img.height()+1
self.painter.fillRect(x, y, w, h, self.link_color)
self.painter.end()
self.rendered.emit(page_no, img)
def loadDocument(self, filename, password=''):
""" loadDocument(str)
Main thread uses this slot to load document for rendering """
self.doc = Poppler.Document.load(filename, password.encode(), password.encode())
self.doc.setRenderHint(Poppler.Document.TextAntialiasing | Poppler.Document.TextHinting |
Poppler.Document.Antialiasing | Poppler.Document.ThinLineSolid )
def findText(self, text, page_num, find_reverse):
if find_reverse:
pages = [i for i in range(1,page_num+1)]
pages.reverse()
else:
pages = [i for i in range(page_num, self.doc.numPages()+1)]
for page_no in pages:
page = self.doc.page(page_no-1)
textareas = page.search(text,Poppler.Page.CaseInsensitive,0)
if textareas != []:
self.textFound.emit(page_no, textareas)
break
class Window(QMainWindow, Ui_window):
renderRequested = QtCore.pyqtSignal(int, float)
loadFileRequested = QtCore.pyqtSignal(str, str)
findTextRequested = QtCore.pyqtSignal(str, int, bool)
def __init__(self, parent=None):
QMainWindow.__init__(self, parent)
self.setupUi(self)
self.dockSearch.hide()
self.dockWidget.hide()
self.dockWidget.setMinimumWidth(310)
self.findTextEdit.setFocusPolicy(QtCore.Qt.StrongFocus)
self.treeView.setAlternatingRowColors(True)
self.treeView.clicked.connect(self.onOutlineClick)
# resizing pages requires some time to take effect
self.resize_page_timer = QtCore.QTimer(self)
self.resize_page_timer.setSingleShot(True)
self.resize_page_timer.timeout.connect(self.onWindowResize)
# Add shortcut actions
self.gotoPageAction = QAction(QIcon(":/goto.png"), "GoTo Page", self)
self.gotoPageAction.triggered.connect(self.gotoPage)
self.copyTextAction = QAction(QIcon(":/copy.png"), "Copy Text", self)
self.copyTextAction.setCheckable(True)
self.copyTextAction.triggered.connect(self.toggleCopyText)
self.findTextAction = QAction(QIcon(":/search.png"), "Find Text", self)
self.findTextAction.setShortcut('Ctrl+F')
self.findTextAction.triggered.connect(self.dockSearch.show)
# connect menu actions signals
self.openFileAction.triggered.connect(self.openFile)
self.lockUnlockAction.triggered.connect(self.lockUnlock)
self.printAction.triggered.connect(self.printFile)
self.quitAction.triggered.connect(self.close)
self.toPSAction.triggered.connect(self.exportToPS)
self.pageToImageAction.triggered.connect(self.exportPageToImage)
self.docInfoAction.triggered.connect(self.docInfo)
self.zoominAction.triggered.connect(self.zoomIn)
self.zoomoutAction.triggered.connect(self.zoomOut)
self.undoJumpAction.triggered.connect(self.undoJump)
self.prevPageAction.triggered.connect(self.goPrevPage)
self.nextPageAction.triggered.connect(self.goNextPage)
self.firstPageAction.triggered.connect(self.goFirstPage)
self.lastPageAction.triggered.connect(self.goLastPage)
# Create widgets for menubar / toolbar
self.gotoPageEdit = QLineEdit(self)
self.gotoPageEdit.setPlaceholderText("Jump to page...")
self.gotoPageEdit.setMaximumWidth(120)
self.gotoPageEdit.returnPressed.connect(self.gotoPage)
self.gotoPageValidator = QIntValidator(1,1, self.gotoPageEdit)
self.gotoPageEdit.setValidator(self.gotoPageValidator)
self.pageNoLabel = QLabel(self)
self.pageNoLabel.setFrameShape(QFrame.StyledPanel)
spacer = QWidget(self)
spacer.setSizePolicy(1|2|4,1|4)
self.zoomLevelCombo = QComboBox(self)
self.zoomLevelCombo.addItems(["Fixed Width", "75%", "90%","100%","110%","121%","133%","146%", "175%", "200%"])
self.zoomLevelCombo.activated.connect(self.setZoom)
self.zoom_levels = [0, 75, 90, 100, 110 , 121, 133, 146, 175, 200]
# Add toolbar actions
self.toolBar.addAction(self.openFileAction)
self.toolBar.addAction(self.printAction)
self.toolBar.addSeparator()
self.toolBar.addAction(self.docInfoAction)
self.toolBar.addSeparator()
self.toolBar.addAction(self.zoomoutAction)
self.toolBar.addWidget(self.zoomLevelCombo)
self.toolBar.addAction(self.zoominAction)
self.toolBar.addSeparator()
self.toolBar.addAction(self.firstPageAction)
self.toolBar.addAction(self.prevPageAction)
self.toolBar.addWidget(self.pageNoLabel)
self.toolBar.addAction(self.nextPageAction)
self.toolBar.addAction(self.lastPageAction)
self.toolBar.addAction(self.undoJumpAction)
self.toolBar.addWidget(self.gotoPageEdit)
self.toolBar.addAction(self.gotoPageAction)
self.toolBar.addSeparator()
self.toolBar.addAction(self.copyTextAction)
self.toolBar.addAction(self.findTextAction)
#self.toolBar.addAction(self.saveUnlockedAction)
self.toolBar.addWidget(spacer)
self.attachAction = self.toolBar.addAction(QIcon(":/attachment.png"), "A")
self.attachAction.setVisible(False)
self.toolBar.addSeparator()
self.toolBar.addAction(self.quitAction)
# Add widgets
self.statusbar = QLabel(self)
self.statusbar.setStyleSheet("QLabel { font-size: 12px; border-radius: 2px; padding: 2px; background: palette(highlight); color: palette(highlighted-text); }")
self.statusbar.setMaximumHeight(16)
self.statusbar.hide()
# Impoort settings
desktop = QApplication.desktop()
self.settings = QtCore.QSettings("gospel-pdf", "main", self)
self.recent_files = self.settings.value("RecentFiles", [])
self.history_filenames = self.settings.value("HistoryFileNameList", [])
self.history_page_no = self.settings.value("HistoryPageNoList", [])
self.offset_x = int(self.settings.value("OffsetX", 4))
self.offset_y = int(self.settings.value("OffsetY", 26))
self.available_area = [desktop.availableGeometry().width(), desktop.availableGeometry().height()]
self.zoomLevelCombo.setCurrentIndex(int(self.settings.value("ZoomLevel", 2)))
# Connect Signals
self.scrollArea.verticalScrollBar().valueChanged.connect(self.onMouseScroll)
self.scrollArea.verticalScrollBar().sliderReleased.connect(self.onSliderRelease)
self.findTextEdit.returnPressed.connect(self.findNext)
self.findNextButton.clicked.connect(self.findNext)
self.findBackButton.clicked.connect(self.findBack)
self.findCloseButton.clicked.connect(self.dockSearch.hide)
self.dockSearch.visibilityChanged.connect(self.toggleFindMode)
# Create separate thread and move renderer to it
self.thread1 = QtCore.QThread(self)
self.renderer1 = Renderer(0)
self.renderer1.moveToThread(self.thread1) # this must be moved before connecting signals
self.renderRequested.connect(self.renderer1.render)
self.loadFileRequested.connect(self.renderer1.loadDocument)
self.findTextRequested.connect(self.renderer1.findText)
self.renderer1.rendered.connect(self.setRenderedImage)
self.renderer1.textFound.connect(self.onTextFound)
self.thread1.start()
self.thread2 = QtCore.QThread(self)
self.renderer2 = Renderer(1)
self.renderer2.moveToThread(self.thread2)
self.renderRequested.connect(self.renderer2.render)
self.loadFileRequested.connect(self.renderer2.loadDocument)
self.renderer2.rendered.connect(self.setRenderedImage)
self.thread2.start()
# Initialize Variables
self.doc = None
self.filename = ''
self.passwd = ''
self.pages = []
self.jumped_from = None
self.max_preload = 1
self.recent_files_actions = []
self.addRecentFiles()
# Show Window
width = int(self.settings.value("WindowWidth", 1040))
height = int(self.settings.value("WindowHeight", 717))
self.resize(width, height)
self.show()
def addRecentFiles(self):
self.recent_files_actions[:] = [] # pythonic way to clear list
self.menuRecentFiles.clear()
for each in self.recent_files:
name = elideMiddle(os.path.basename(each), 60)
action = self.menuRecentFiles.addAction(name, self.openRecentFile)
self.recent_files_actions.append(action)
self.menuRecentFiles.addSeparator()
self.menuRecentFiles.addAction(QIcon(':/edit-clear.png'), 'Clear Recents', self.clearRecents)
def openRecentFile(self):
action = self.sender()
index = self.recent_files_actions.index(action)
self.loadPDFfile(self.recent_files[index])
def clearRecents(self):
self.recent_files_actions[:] = []
self.menuRecentFiles.clear()
self.recent_files[:] = []
def removeOldDoc(self):
if not self.doc:
return
# Save current page number
self.saveFileData()
# Remove old document
for i in range(len(self.pages)):
self.verticalLayout.removeWidget(self.pages[-1])
for i in range(len(self.pages)):
self.pages.pop().deleteLater()
self.frame.deleteLater()
self.attachAction.setVisible(False)
self.jumped_from = None
self.addRecentFiles()
def loadPDFfile(self, filename):
""" Loads pdf document in all threads """
filename = os.path.expanduser(filename)
doc = Poppler.Document.load(filename)
if not doc : return
password = ''
if doc.isLocked() :
password = QInputDialog.getText(self, 'This PDF is locked', 'Enter Password :', 2)[0]
if password == '' :
if self.doc == None: sys.exit(1)#exif if first document
else : return
locked = doc.unlock(password.encode(), password.encode())
if locked:
return QMessageBox.critical(self, "Failed !","Incorrect Password")
self.passwd = password
self.lockUnlockAction.setText("Save Unlocked")
else:
self.lockUnlockAction.setText("Encrypt PDF")
self.removeOldDoc()
doc.setRenderHint(Poppler.Document.TextAntialiasing | Poppler.Document.TextHinting |
Poppler.Document.Antialiasing | Poppler.Document.ThinLineSolid )
self.doc = doc
self.filename = filename
self.pages_count = self.doc.numPages()
self.current_page = 1
self.rendered_pages = []
self.getOutlines(self.doc)
# Load Document in other threads
self.loadFileRequested.emit(self.filename, password)
if collapseUser(self.filename) in self.history_filenames:
self.current_page = int(self.history_page_no[self.history_filenames.index(collapseUser(self.filename))])
self.current_page = min(self.current_page, self.pages_count)
self.scroll_render_lock = False
# Show/Add widgets
if self.doc.hasEmbeddedFiles():
self.attachAction.setVisible(True)
self.frame = Frame(self.scrollAreaWidgetContents, self.scrollArea)
self.verticalLayout = QVBoxLayout(self.frame)
self.horizontalLayout_2.addWidget(self.frame)
self.scrollArea.verticalScrollBar().setValue(0)
self.frame.jumpToRequested.connect(self.jumpToPage)
self.frame.copyTextRequested.connect(self.copyText)
self.frame.showStatusRequested.connect(self.showStatus)
# Render 4 pages, (Preload 3 pages)
self.max_preload = min(4, self.pages_count)
# Add pages
for i in range(self.pages_count):
page = PageWidget(i+1, self.frame)
self.verticalLayout.addWidget(page, 0, QtCore.Qt.AlignCenter)
self.pages.append(page)
self.resizePages()
self.pageNoLabel.setText('<b>%i/%i</b>' % (self.current_page, self.pages_count) )
self.gotoPageValidator.setTop(self.pages_count)
self.setWindowTitle(os.path.basename(self.filename)+ " - Gospel PDF " + __version__)
if self.current_page != 1 :
QtCore.QTimer.singleShot(150+self.pages_count//3, self.jumpToCurrentPage)
def setRenderedImage(self, page_no, image):
""" takes a QImage and sets pixmap of the specified page
when number of rendered pages exceeds a certain number, old page image is
deleted to save memory """
debug("Set Rendered Image :", page_no)
self.pages[page_no-1].setPageData(page_no, QPixmap.fromImage(image), self.doc.page(page_no-1))
# Request to render next page
if self.current_page <= page_no < (self.current_page + self.max_preload - 2):
if (page_no+2 not in self.rendered_pages) and (page_no+2 <= self.pages_count):
self.rendered_pages.append(page_no+2)
self.renderRequested.emit(page_no+2, self.pages[page_no+1].dpi)
# Replace old rendered pages with blank image
if len(self.rendered_pages)>10:
cleared_page_no = self.rendered_pages.pop(0)
debug("Clear Page :", cleared_page_no)
self.pages[cleared_page_no-1].clear()
debug("Rendered Pages :", self.rendered_pages)
def renderCurrentPage(self):
""" Requests to render current page. if it is already rendered, then request
to render next unrendered page """
requested = 0
for page_no in range(self.current_page, self.current_page+self.max_preload):
if (page_no not in self.rendered_pages) and (page_no <= self.pages_count):
self.rendered_pages.append(page_no)
self.renderRequested.emit(page_no, self.pages[page_no-1].dpi)
requested += 1
debug("Render Requested :", page_no)
if requested == 2: return
def onMouseScroll(self, pos):
""" It is called when vertical scrollbar value is changed.
Get the current page number on scrolling, then requests to render"""
index = self.verticalLayout.indexOf(self.frame.childAt(self.frame.width()/2, pos))
if index == -1: return
self.pageNoLabel.setText('<b>%i/%i</b>' % (index+1, self.pages_count) )
if self.scrollArea.verticalScrollBar().isSliderDown() or self.scroll_render_lock : return
self.current_page = index+1
self.renderCurrentPage()
def onSliderRelease(self):
self.onMouseScroll(self.scrollArea.verticalScrollBar().value())
def openFile(self):
filename, sel_filter = QFileDialog.getOpenFileName(self,
"Select Document to Open", self.filename,
"Portable Document Format (*.pdf);;All Files (*)" )
if filename != "":
self.loadPDFfile(filename)
def lockUnlock(self):
if which("qpdf")==None :
self.lockUnlockAction.setEnabled(False)
QMessageBox.warning(self, "qpdf Required","qpdf command not found.\nInstall qpdf program.")
return
if self.lockUnlockAction.text()=="Encrypt PDF":
self.encryptPDF()
return
filename, ext = os.path.splitext(self.filename)
new_name = filename + "-unlocked.pdf"
proc = Popen(["qpdf", "--decrypt", "--password="+self.passwd, self.filename, new_name])
stdout, stderr = proc.communicate()
if proc.returncode==0:
notifier = Notifier(self)
notifier.showNotification("Successful !", "File saved as\n"+os.path.basename(new_name))
else:
QMessageBox.warning(self, "Failed !", "Failed to save as unlocked")
def encryptPDF(self):
password, ok = QInputDialog.getText(self, "Lock PDF", "Enter Password :",
QLineEdit.PasswordEchoOnEdit)
if not ok or password=="":
return
filename, ext = os.path.splitext(self.filename)
new_name = filename + "-locked.pdf"
proc = Popen(["qpdf", "--encrypt", password, password, '128', '--', self.filename, new_name])
stdout, stderr = proc.communicate()
if proc.returncode == 0:
basename = os.path.basename(new_name)
notifier = Notifier(self)
notifier.showNotification("Successful !", "File saved as\n"+basename)
else:
QMessageBox.warning(self, "Failed !", "Failed to save as Encrypted")
def printFile(self):
if which("quikprint")==None :
QMessageBox.warning(self, "QuikPrint Required","Install QuikPrint program.")
return
Popen(["quikprint", self.filename])
def exportToPS(self):
width = self.doc.page(self.current_page-1).pageSizeF().width()
height = self.doc.page(self.current_page-1).pageSizeF().height()
filename, sel_filter = QFileDialog.getSaveFileName(self, "Select File to Save",
os.path.splitext(self.filename)[0]+'.ps',
"Adobe Postscript Format (*.ps)" )
if filename == '' : return
conv = self.doc.psConverter()
conv.setPaperWidth(width)
conv.setPaperHeight(height)
conv.setOutputFileName(filename)
conv.setPageList([i+1 for i in range(self.pages_count)])
ok = conv.convert()
if ok:
notifier = Notifier(self)
notifier.showNotification("Successful !", "File has been successfully saved")
else:
QMessageBox.warning(self, "Failed !","Failed to export to Postscript")
def exportPageToImage(self):
dialog = ExportToImageDialog(self.current_page, self.pages_count, self)
if dialog.exec_() == QDialog.Accepted:
try:
dpi = int(dialog.dpiEdit.text())
for page_no in range(dialog.pageNoSpin.value(), dialog.toPageNoSpin.value()+1):
filename = os.path.splitext(self.filename)[0]+'-'+str(page_no)+'.jpg'
page = self.doc.page(page_no-1)
if not page : return
img = page.renderToImage(dpi, dpi)
img.save(filename)
notifier = Notifier(self)
notifier.showNotification("Successful !","Image(s) has been saved")
except:
QMessageBox.warning(self, "Failed !","Failed to export to Image")
def docInfo(self):
info_keys = list(self.doc.infoKeys())
values = [self.doc.info(key) for key in info_keys]
page_size = self.doc.page(self.current_page-1).pageSizeF()
page_size = "%s x %s pts"%(page_size.width(), page_size.height())
info_keys += ['Page Size']
values += [page_size]
dialog = DocInfoDialog(self)
dialog.setInfo(info_keys, values)
dialog.exec_()
def jumpToCurrentPage(self):
""" this is used as a slot, to connect with a timer"""
self.jumpToPage(self.current_page)
def jumpToPage(self, page_num, top=0.0):
""" scrolls to a particular page and position """
if page_num < 1: page_num = 1
elif page_num > self.pages_count: page_num = self.pages_count
if not (0 < top < 1.0) : top = 0
self.jumped_from = self.current_page
self.current_page = page_num
scrollbar_pos = self.pages[page_num-1].pos().y()
scrollbar_pos += top*self.pages[page_num-1].height()
self.scrollArea.verticalScrollBar().setValue(scrollbar_pos)
def undoJump(self):
if self.jumped_from == None: return
self.jumpToPage(self.jumped_from)
def goNextPage(self):
if self.current_page == self.pages_count : return
self.jumpToPage(self.current_page + 1)
def goPrevPage(self):
if self.current_page == 1 : return
self.jumpToPage(self.current_page - 1)
def goFirstPage(self):
self.jumpToPage(1)
def goLastPage(self):
self.jumpToPage(self.pages_count)
def gotoPage(self):
text = self.gotoPageEdit.text()
if text=="" : return
self.jumpToPage(int(text))
self.gotoPageEdit.clear()
self.gotoPageEdit.clearFocus()
###################### Zoom and Size Management ##########################
def availableWidth(self):
""" Returns available width for rendering a page """
dock_width = 0 if self.dockWidget.isHidden() else self.dockWidget.width()
return self.width() - dock_width - 50
def resizePages(self):
'''Resize all pages according to zoom level '''
page_dpi = self.zoom_levels[self.zoomLevelCombo.currentIndex()]*SCREEN_DPI/100
fixed_width = self.availableWidth()
for i in range(self.pages_count):
pg_width = self.doc.page(i).pageSizeF().width() # width in points
pg_height = self.doc.page(i).pageSizeF().height()
if self.zoomLevelCombo.currentIndex() == 0: # if fixed width
dpi = 72.0*fixed_width/pg_width
else: dpi = page_dpi
self.pages[i].dpi = dpi
self.pages[i].setFixedSize(pg_width*dpi/72.0, pg_height*dpi/72.0)
for page_no in self.rendered_pages:
self.pages[page_no-1].clear()
self.rendered_pages = []
self.renderCurrentPage()
def setZoom(self, index):
""" Gets called when zoom level is changed"""
self.scroll_render_lock = True # rendering on scroll is locked as set scroll position
self.resizePages()
QtCore.QTimer.singleShot(300, self.afterZoom)
def zoomIn(self):
index = self.zoomLevelCombo.currentIndex()
if index == len(self.zoom_levels) - 1 : return
if index == 0 : index = 3
self.zoomLevelCombo.setCurrentIndex(index+1)
self.setZoom(index+1)
def zoomOut(self):
index = self.zoomLevelCombo.currentIndex()
if index == 1 : return
if index == 0: index = 4
self.zoomLevelCombo.setCurrentIndex(index-1)
self.setZoom(index-1)
def afterZoom(self):
scrolbar_pos = self.pages[self.current_page-1].pos().y()
self.scrollArea.verticalScrollBar().setValue(scrolbar_pos)
self.scroll_render_lock = False
######### Search Text #########
def toggleFindMode(self, enable):
if enable:
self.findTextEdit.setText('')
self.findTextEdit.setFocus()
self.search_text = ''
self.search_result_page = 0
elif self.search_result_page != 0:
self.pages[self.search_result_page-1].highlight_area = None
self.pages[self.search_result_page-1].updateImage()
def findNext(self):
""" search text in current page and next pages """
text = self.findTextEdit.text()
if text == "" : return
# search from current page when text changed
if self.search_text != text or self.search_result_page == 0:
search_from_page = self.current_page
else:
search_from_page = self.search_result_page + 1
self.findTextRequested.emit(text, search_from_page, False)
if self.search_result_page != 0: # clear previous highlights
self.pages[self.search_result_page-1].highlight_area = None
self.pages[self.search_result_page-1].updateImage()
self.search_result_page = 0
self.search_text = text
def findBack(self):
""" search text in pages before current page """
text = self.findTextEdit.text()
if text == "" : return
if self.search_text != text or self.search_result_page == 0:
search_from_page = self.current_page
else:
search_from_page = self.search_result_page - 1
self.findTextRequested.emit(text, search_from_page, True)
if self.search_result_page != 0:
self.pages[self.search_result_page-1].highlight_area = None
self.pages[self.search_result_page-1].updateImage()
self.search_result_page = 0
self.search_text = text
def onTextFound(self, page_no, areas):
self.pages[page_no-1].highlight_area = areas
self.search_result_page = page_no
if self.pages[page_no-1].pixmap():
self.pages[page_no-1].updateImage()
first_result_pos = areas[0].y()/self.doc.page(page_no-1).pageSize().height()
self.jumpToPage(page_no, first_result_pos)
######### Cpoy Text to Clip Board #########
def toggleCopyText(self, checked):
self.frame.enableCopyTextMode(checked)
def copyText(self, page_no, top_left, bottom_right):
zoom = self.pages[page_no-1].height()/self.doc.page(page_no-1).pageSize().height()
# Copy text to clipboard
text = self.doc.page(page_no-1).text(QtCore.QRectF(top_left/zoom, bottom_right/zoom))
QApplication.clipboard().setText(text)
self.copyTextAction.setChecked(False)
self.toggleCopyText(False)
########## Other Functions ##########
def getOutlines(self, doc):
toc = doc.toc()
if not toc:
self.dockWidget.hide()
return
self.dockWidget.show()
outline_model = QStandardItemModel(self)
parent_item = outline_model.invisibleRootItem()
node = toc.firstChild()
loadOutline(doc, node, parent_item)
self.treeView.setModel(outline_model)
if parent_item.rowCount() < 4:
self.treeView.expandToDepth(0)
self.treeView.setHeaderHidden(True)
self.treeView.header().setSectionResizeMode(0, 1)
self.treeView.header().setSectionResizeMode(1, 3)
self.treeView.header().setStretchLastSection(False)
def onOutlineClick(self, m_index):
page_num = self.treeView.model().data(m_index, QtCore.Qt.UserRole+1)
top = self.treeView.model().data(m_index, QtCore.Qt.UserRole+2)
if not page_num: return
self.jumpToPage(page_num, top)
def showStatus(self, url):
if url=="":
self.statusbar.hide()
return
self.statusbar.setText(url)
self.statusbar.adjustSize()
self.statusbar.move(0, self.height()-self.statusbar.height())
self.statusbar.show()
def resizeEvent(self, ev):
QMainWindow.resizeEvent(self, ev)
if self.filename == '' : return
if self.zoomLevelCombo.currentIndex() == 0:
self.resize_page_timer.start(200)
def onWindowResize(self):
for i in range(self.pages_count):
self.pages[i].annots_listed = False # Clears prev link annotation positions
self.resizePages()
wait(300)
self.jumpToCurrentPage()
if not self.isMaximized():
self.settings.setValue("WindowWidth", self.width())
self.settings.setValue("WindowHeight", self.height())
def saveFileData(self):
if self.filename != '':
filename = collapseUser(self.filename)
if filename in self.history_filenames:
index = self.history_filenames.index(filename)
self.history_page_no[index] = self.current_page
else:
self.history_filenames.insert(0, filename)
self.history_page_no.insert(0, self.current_page)
if filename in self.recent_files:
self.recent_files.remove(filename)
self.recent_files.insert(0, filename)
def closeEvent(self, ev):
""" Save all settings on window close """
self.saveFileData()
self.settings.setValue("OffsetX", self.geometry().x()-self.x())
self.settings.setValue("OffsetY", self.geometry().y()-self.y())
self.settings.setValue("ZoomLevel", self.zoomLevelCombo.currentIndex())
self.settings.setValue("HistoryFileNameList", self.history_filenames[:100])
self.settings.setValue("HistoryPageNoList", self.history_page_no[:100])
self.settings.setValue("RecentFiles", self.recent_files[:10])
return QMainWindow.closeEvent(self, ev)
def onAppQuit(self):
""" Close running threads """
loop1 = QtCore.QEventLoop()
loop2 = QtCore.QEventLoop()
self.thread1.finished.connect(loop1.quit)
self.thread2.finished.connect(loop2.quit)
self.thread1.quit()
loop1.exec_()
self.thread2.quit()
loop2.exec_()
def loadOutline(doc, node, parent_item):
"""loadOutline(Poppler::Document* doc, const QDomNode& node, QStandardItem* parent_item) """
element = node.toElement()
item = QStandardItem(element.tagName())
linkDestination = None
if element.hasAttribute("Destination"):
linkDestination = Poppler.LinkDestination(element.attribute("Destination"))
elif element.hasAttribute("DestinationName"):
linkDestination = doc.linkDestination(element.attribute("DestinationName"))
if linkDestination:
# NOTE: in some files page_num may be in range 1 -> pages_count,
# also, top may be not in range 0.0->1.0, we have to take care of that
page_num = linkDestination.pageNumber()
top = linkDestination.top() if linkDestination.isChangeTop() else 0
item.setData(page_num, QtCore.Qt.UserRole + 1)
item.setData(top, QtCore.Qt.UserRole + 2)
pageItem = item.clone()
pageItem.setText(str(page_num))
pageItem.setTextAlignment(QtCore.Qt.AlignRight)
parent_item.appendRow([item, pageItem])
else:
parent_item.appendRow(item)
# Load next sibling
siblingNode = node.nextSibling()
if not siblingNode.isNull():
loadOutline(doc, siblingNode, parent_item)
# Load its child
childNode = node.firstChild()
if not childNode.isNull():
loadOutline(doc, childNode, item)
class Frame(QFrame):
""" This widget is a container of PageWidgets. PageWidget communicates
Window through this widget """
jumpToRequested = QtCore.pyqtSignal(int, float)
copyTextRequested = QtCore.pyqtSignal(int, QtCore.QPoint, QtCore.QPoint)
showStatusRequested = QtCore.pyqtSignal(str)
# parent is scrollAreaWidgetContents
def __init__(self, parent, scrollArea):
QFrame.__init__(self, parent)
self.setFrameShape(QFrame.StyledPanel)
self.setFrameShadow(QFrame.Raised)
self.vScrollbar = scrollArea.verticalScrollBar()
self.hScrollbar = scrollArea.horizontalScrollBar()
self.setMouseTracking(True)
self.clicked = False
self.copy_text_mode = False
def mousePressEvent(self, ev):
self.click_pos = ev.globalPos()
self.v_scrollbar_pos = self.vScrollbar.value()
self.h_scrollbar_pos = self.hScrollbar.value()
self.clicked = True
def mouseReleaseEvent(self, ev):
self.clicked = False
def mouseMoveEvent(self, ev):
if not self.clicked : return
self.vScrollbar.setValue(self.v_scrollbar_pos + self.click_pos.y() - ev.globalY())
self.hScrollbar.setValue(self.h_scrollbar_pos + self.click_pos.x() - ev.globalX())
def jumpTo(self, page_num, top):
self.jumpToRequested.emit(page_num, top)
def enableCopyTextMode(self, enable):
self.copy_text_mode = enable
def copyText(self, page_num, top_left, bottom_right):
self.copyTextRequested.emit(page_num, top_left, bottom_right)
def showStatus(self, msg):
self.showStatusRequested.emit(msg)
class PageWidget(QLabel):
""" This widget shows a rendered page """
def __init__(self, page_num, frame=None):
QLabel.__init__(self, frame)
self.manager = frame
self.setMouseTracking(True)
self.setSizePolicy(0,0)
self.link_areas = []
self.link_annots = []
self.annots_listed, self.copy_text_mode = False, False
self.click_point, self.highlight_area = None, None
self.page_num = page_num
self.image = QPixmap()
def setPageData(self, page_no, pixmap, page):
self.image = pixmap
self.updateImage()
if self.annots_listed : return
annots = page.annotations()
for annot in annots:
if annot.subType() == Poppler.Annotation.ALink:
x, y = annot.boundary().left()*pixmap.width(), annot.boundary().top()*pixmap.height()
w, h = annot.boundary().width()*pixmap.width()+1, annot.boundary().height()*pixmap.height()+1
self.link_areas.append(QtCore.QRectF(x,y, w, h))
self.link_annots.append(annot)
self.annots_listed = True
def clear(self):
QLabel.clear(self)
self.image = QPixmap()
def mouseMoveEvent(self, ev):
# Draw rectangle when mouse is clicked and dragged in copy text mode.
if self.manager.copy_text_mode:
if self.click_point:
pm = self.pm.copy()
painter = QPainter()
painter.begin(pm)
painter.drawRect(QtCore.QRect(self.click_point, ev.pos()))
painter.end()
self.setPixmap(pm)
return
# Change cursor if cursor is over link annotation
for i, area in enumerate(self.link_areas):
if area.contains(ev.pos()):
linkDest = self.link_annots[i].linkDestination()
if not linkDest : continue
# For jump to page link
if linkDest.linkType() == Poppler.Link.Goto:
p = linkDest.destination().pageNumber()
self.manager.showStatus("Jump To Page : %i" % p)
self.setCursor(QtCore.Qt.PointingHandCursor)
# For URL link
elif linkDest.linkType() == Poppler.Link.Browse:
self.manager.showStatus("URL : %s" % linkDest.url())
self.setCursor(QtCore.Qt.PointingHandCursor)
return
self.manager.showStatus("")
self.unsetCursor()
ev.ignore() # pass to underlying frame if not over link or copy text mode
def mousePressEvent(self, ev):
# In text copy mode
if self.manager.copy_text_mode:
self.click_point = ev.pos()
self.pm = self.pixmap().copy()
return
# In normal mode
for i, area in enumerate(self.link_areas):
if not area.contains(ev.pos()): continue
link_dest = self.link_annots[i].linkDestination()
if not link_dest : continue
# For jump to page link
if link_dest.linkType() == Poppler.Link.Goto:
page_num = link_dest.destination().pageNumber()
top = 0.0
if link_dest.destination().isChangeTop():
top = link_dest.destination().top()
self.manager.jumpTo(page_num, top)
# For URL link
elif link_dest.linkType() == Poppler.Link.Browse:
url = link_dest.url()
if url.startswith("http"):
confirm = QMessageBox.question(self, "Open Url in Browser",
"Do you want to open browser to open...\n%s" %url, QMessageBox.Yes|QMessageBox.Cancel)
if confirm == QMessageBox.Yes:
Popen(["x-www-browser", url])
return
ev.ignore()
def mouseReleaseEvent(self, ev):
if self.manager.copy_text_mode:
self.manager.copyText(self.page_num, self.click_point, ev.pos())
self.setPixmap(self.pm)
self.click_point = None
self.pm = None
return
ev.ignore()
def updateImage(self):
""" repaint page widget, and draw highlight areas """
if self.highlight_area:
img = self.image.copy()
painter = QPainter(img)
zoom = self.dpi/72.0
for area in self.highlight_area:
box = QtCore.QRectF(area.left()*zoom, area.top()*zoom,
area.width()*zoom, area.height()*zoom)
painter.fillRect(box, QColor(0,255,0, 127))
painter.end()
self.setPixmap(img)
else:
self.setPixmap(self.image)
class Notifier(QSystemTrayIcon):
def __init__(self, parent):
QSystemTrayIcon.__init__(self, QIcon(':/adobe.png'), parent)
self.messageClicked.connect(self.deleteLater)
self.activated.connect(self.deleteLater)
def showNotification(self, title, message):
self.show()
# Wait for 200ms, otherwise notification bubble will showup in wrong position.
wait(200)
self.showMessage(title, message)
QtCore.QTimer.singleShot(4000, self.deleteLater)
def wait(millisec):
loop = QtCore.QEventLoop()
QtCore.QTimer.singleShot(millisec, loop.quit)
loop.exec_()
def collapseUser(path):
''' converts /home/user/file.ext to ~/file.ext '''
if path.startswith(HOMEDIR):
return path.replace(HOMEDIR, '~', 1)
return path
def elideMiddle(text, length):
if len(text) <= length: return text
return text[:length//2] + '...' + text[len(text)-length+length//2:]
def main():
app = QApplication(sys.argv)
win = Window()
if len(sys.argv)>1 and os.path.exists(os.path.abspath(sys.argv[-1])):
win.loadPDFfile(os.path.abspath(sys.argv[-1]))
app.aboutToQuit.connect(win.onAppQuit)
sys.exit(app.exec_())
if __name__ == "__main__":
main()
| ksharindam/gospel-pdf-viewer | gospel_pdf/main.py | main.py | py | 40,027 | python | en | code | 7 | github-code | 13 |
13051186120 | from Encryption import encrypt
from Decryption import decrypt
choice=int(input("Enter Your Option\n 1. Encryption\n 2. Decryption\n"))
if choice==1:
encrypt()
elif choice==2:
decrypt()
else:
print("Please Enter Correct Option") | aaryanrlondhe/Encryption-Decryption | encrypt-decrypt.py | encrypt-decrypt.py | py | 249 | python | en | code | 0 | github-code | 13 |
73614030739 | from flask import Flask, render_template, request
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/results', methods=['POST'])
def create_user():
print("submitted info")
print(request.form)
name_from_form = request.form['name']
dojo_location_from_form = request.form['dojo_location']
favoriate_language_from_form=request.form['favorite_language']
comment_from_form=request.form['comment']
return render_template("page.html", name_on_template=name_from_form,dojo_location_on_template=dojo_location_from_form,favoriate_language_on_template=favoriate_language_from_form,comment_on_template=comment_from_form)
if __name__=="__main__":
app.run(debug=True) | karmel-yacoub/python | python-stack/flask/flask_fundamentals/Dojo_Survey/survey.py | survey.py | py | 740 | python | en | code | 0 | github-code | 13 |
297879979 | #test_7_10.py
#调查问卷开始
print("------Polling-----")
active = True
responses = {}
while active:
name = input("\nWhat's your name?")
place = input("If you could visit one place in the world,where would you go? ")
responses[name] = place
repeat = input("Would you like to let another person respond? (Y/N)")
if repeat == 'N':
active = False
print("\n-----Poll Result-----")
for name,place in responses.items():
print(name + " would like to travel to " + place + ".")
| ZYC0515/LearnPython | Python编程从入门到实践/第七章学习/代码实现/test_7_10.py | test_7_10.py | py | 517 | python | en | code | 0 | github-code | 13 |
23322544929 | #!/usr/bin/env python
import time
from samplebase import SampleBase
from rgbmatrix import graphics
from datetime import datetime
from PIL import Image
import subprocess
class ImageScroller(SampleBase):
def __init__(self, *args, **kwargs):
super(ImageScroller, self).__init__(*args, **kwargs)
self.parser.add_argument("-i", "--image", help="The image to display", default="../../../examples-api-use/runtext.ppm")
def run(self):
if not 'image' in self.__dict__:
self.image = Image.open(self.args.image).convert('RGB')
self.image.resize((self.matrix.width, self.matrix.height), Image.ANTIALIAS)
double_buffer = self.matrix.CreateFrameCanvas()
img_width, img_height = self.image.size
font = graphics.Font()
font_time = graphics.Font()
# font.LoadFont("../../../fonts/mplus_h12r.bdf")
font_time.LoadFont("./fonts/21-Adobe-Helvetica.bdf")
# font.LoadFont("../../../fonts/15-Adobe-Helvetica.bdf")
font.LoadFont("./fonts/16-Adobe-Helvetica-Bold.bdf")
textColor = graphics.Color(245, 0, 111)
timeColor = graphics.Color(61, 147, 215)
pos = double_buffer.width
# let's scroll
xpos = -128
while True:
xpos += 2
if (xpos > img_width):
exit()
d = datetime.now()
h = (" " + str(d.hour))[-2:]
time_text = d.strftime("%H:%M:%S")
double_buffer.Clear()
# len = graphics.DrawText(offscreen_canvas, font, 4, 12, textColor, date_text)
#len1 = graphics.DrawText(double_buffer, font_time, 14, 30, timeColor, time_text)
double_buffer.SetImage(self.image, -xpos)
#double_buffer.SetImage(self.image, -xpos + img_width)
double_buffer = self.matrix.SwapOnVSync(double_buffer)
time.sleep(0.02)
# Main function
# e.g. call with
# sudo ./image-scroller.py --chain=4
# if you have a chain of four
if __name__ == "__main__":
out = "Welcome to ヨォドォバァシィカァメェラ。亲爱的顾客朋友、你们好。衷心欢迎您光临友都八喜。友都八喜是日本著名的大型购物中心。精明商品将近一百万种、数码相机、摄像机、名牌手表、化妆品、电子游戏、名牌箱包等应有尽有。最新的款式、最优惠的价格、最优质的服务。"
subprocess.call('sudo convert -background black -fill "#e8ad35" -font /usr/share/fonts/truetype/jfdot/JF-Dot-jiskan24.ttf -pointsize 30 label:"{0}" /home/pi/rpi-clock/sign.png'.format(out),shell=True)
subprocess.call('sudo python3 image-scroller.py --led-chain=1 --led-cols=64 -i sign.png -b 80 --led-no-hardware-pulse 1',shell=True)
image_scroller = ImageScroller()
if (not image_scroller.process()):
image_scroller.print_help()
| Sw-Saturn/rpi-clock | sign.py | sign.py | py | 2,878 | python | en | code | 0 | github-code | 13 |
33629296067 | fname = input("Enter file name: ")
fh = open(fname)
count = 0.0
nume = 0.0
for line in fh:
if not line.startswith("X-DSPAM-Confidence:") : continue
num = float(line[19:])
count = count + 1.0
nume = nume + num
result = nume/count
print("Average spam confidence: "+str(result))
| Marlon-Camacho/Python-Curses | secondtwoexercise.py | secondtwoexercise.py | py | 298 | python | en | code | 0 | github-code | 13 |
72995668179 | #!/usr/bin/python3
""" module that prins new lines after some characters
"""
def text_indentation(text):
"""function that prints a text with 2 new lines
after each of these characters: ., ? and :
Args:
text (str): string to print
"""
if type(text) is not str:
raise TypeError('text must be a string')
aux = text.replace(".", ".\n\n")
aux2 = aux.replace("?", "?\n\n")
aux = aux2.replace(":", ":\n\n")
aux2 = aux.rstrip(" ")
aux = aux2.lstrip(" ")
aux3 = aux.split("\n")
for i in range(len(aux3)):
aux3[i] = aux3[i].rstrip(" ")
aux3[i] = aux3[i].lstrip(" ")
print('\n'.join(aux3), end='')
| HeimerR/holbertonschool-higher_level_programming | 0x07-python-test_driven_development/5-text_indentation.py | 5-text_indentation.py | py | 676 | python | en | code | 1 | github-code | 13 |
41519585319 | # Cetvrta
# Solution by Hasan Kalzi 08-10-2020
# Link to problem in Kattis: https://open.kattis.com/problems/cetvrta
from sys import stdin, stdout
values = []
for i in range(3):
values.append(stdin.readline().strip().split())
if values[0][0] == values[1][0]:
x = values[2][0]
elif values[0][0] == values[2][0]:
x = values[1][0]
else:
x = values[0][0]
if values[0][1] == values[1][1]:
y = values[2][1]
elif values[0][1] == values[2][1]:
y = values[1][1]
else:
y = values[0][1]
stdout.write(x + ' ' + y)
| Hasan-Kalzi/Kattis-Python3 | src/Py2/Cetvrta.py | Cetvrta.py | py | 531 | python | en | code | 0 | github-code | 13 |
10861483075 | """
geometry.py
Contains a number of utility functions to expediate
3D geometric programming, with points specified as numpy arrays.
"""
import numpy as np
import math
from operator import add
from multithreading_help import *
def dist(p, q):
"""Returns the L2 distance between points p and q."""
return np.linalg.norm((p - q), 2)
def unit_vector(v):
"""Provided a vector v, returns the unit vector pointing
in the same direction as v."""
return v / np.linalg.norm(v, 2)
def center_of_mass(cloud):
"""Returns the center of mass, or average point,
in the point cloud 'cloud'."""
if type(cloud) is np.array:
return cloud.sum() / len(cloud)
else:
return reduce(add, cloud) / float(len(cloud))
def bounding_box(cloud):
"""Returns two points, opposing corners of the minimum
bounding box (orthotope) that contains the point cloud 'cloud'."""
minx = 0
maxx = 0
miny = 0
maxy = 0
minz = 0
maxz = 0
for p in cloud:
if p[0] < minx:
minx = p[0]
if p[0] > maxx:
maxx = p[0]
if p[0] < miny:
miny = p[0]
if p[0] > maxy:
maxy = p[0]
if p[0] < minz:
minz = p[0]
if p[0] > maxz:
maxz = p[0]
return np.array([[minx, miny, minz], [maxx, maxy, maxz]])
def estimate_max_diagonal(cloud):
"""Estimates the longest distance between two points in the point cloud
'cloud' in O(n), by computing the center of mass, then finding the point
farthest away from the COM, then finding the point farthest away from that
point, and returning the distance between these two points."""
com = center_of_mass(cloud)
p0 = farthest_from(com, cloud)
p1 = farthest_from(p0, cloud)
return np.linalg.norm(p1 - p0, 2)
def farthest_from(point, cloud):
"""Returns the point in cloud which is farthest from the point 'point'."""
furthest_from_pt = None
furthest_from_pt_distance = 0
for p in cloud:
p_distance = np.linalg.norm(point - p, 2)
if p_distance > furthest_from_pt_distance:
furthest_from_pt = p
furthest_from_pt_distance = p_distance
return furthest_from_pt
def single_rotation_matrix(theta, axis = 0):
"""Returns a 3x3 rotation matrix of theta radians about the provided axis;
0 = x, 1 = y, 2 = z."""
M = np.identity(3)
M[(axis + 1) % 3, (axis + 1) % 3] = math.cos(theta)
M[(axis + 2) % 3, (axis + 2) % 3] = math.cos(theta)
M[(axis + 2) % 3, (axis + 1) % 3] = math.sin(theta)
M[(axis + 1) % 3, (axis + 2) % 3] = -math.sin(theta)
return M
def rotation_matrix(theta_x, theta_y, theta_z):
"""Returns a 3x3 rotation matrix which rotates theta_x about the x-axis,
then theta_y about the y-axis, and then theta_z about the z-axis."""
return compose(single_rotation_matrix(theta_x, 0),
single_rotation_matrix(theta_y, 1),
single_rotation_matrix(theta_z, 2))
def arbitrary_axis_rotation(axis, theta):
"""Returns a 4x4 rotation matrix which rotates input vectors by theta
radians about the provided axis, using a right-handed coordinate system.
axis should be a numpy.ndarray., theta is a float. Uses the derivation
found at http://science.kennesaw.edu/~plaval/math4490/rotgen.pdf"""
import math
assert(not np.allclose(axis, np.zeros(3)))
x, y, z = unit_vector(axis)
C = math.cos(theta)
S = math.sin(theta)
t = 1.0 - C
result = np.array([[t*x**2 + C, t*x*y - S*z, t*x*z + S*y, 0],
[t*x*y + S*z, t*y**2 + C, t*y*z - S*x, 0],
[t*x*z - S*y, t*y*z + S*x, t*z**2 + C, 0],
[0, 0, 0, 1]])
assert(abs(np.linalg.det(result)) - 1.0 < 1e-3)
return result
def arbitrary_axis_rotation_at_arbitrary_origin(axis, origin, theta):
"""Returns a 4x4 affine transformation which rotates input vectors
by theta radians about the provided axis, centered at the provided
origin, using a right-handed coordinate system."""
return compose(translation_matrix(origin),
arbitrary_axis_rotation(axis, theta),
translation_matrix(-origin))
def unitary_matrix(M):
# must be square
assert(M.shape[0] == M.shape[1])
return M / (np.linalg.det(M) ** (1./M.shape[0]))
def get_unify_segments_matrix(a, b, c):
"""Returns a matrix which rotates the line segment bc to put it
on the line defined by segment ab."""
import math
# get first 3 components, if these are affine 4x1 vectors,
# so that cross is well defined.
a = vector_from_affine(a)
b = vector_from_affine(b)
c = vector_from_affine(c)
# unit_vector(cross) fails if they are identical,
# so in that case return the identity.
if np.allclose(np.array([0,0,0]), (a-b) - (c-b)):
return np.identity(4)
axis = unit_vector(np.cross(c-b, a-b))
origin = b
theta = math.acos(min(1.0, np.dot(unit_vector(a-b), unit_vector(c-b))))
return unitary_matrix(arbitrary_axis_rotation_at_arbitrary_origin(axis,
origin, theta))
def vector_to_affine(v):
result = np.ones(4)
result[0:3] = v.copy()
return result
def vector_from_affine(v):
return v[0:3].copy()
def to_affine(A,b):
"""Turns a 3x3 ndarray and 3x1 ndarray pair (A,b) into an
equivalent affine 4x4 matrix."""
result = np.identity(4)
result[0:3, 0:3] = A
result[0:3, 3] = b
return result
def from_affine(affine):
"""Given an affine transformation T, return the pair (A,b) such that the
action Tx on a 4d vector x is equivalent to the action Ax' + b on the 3d vector
x' produced by ignoring x's w component."""
return (affine[0:3, 0:3], affine[0:3, 3])
def translation_matrix(dv):
"""Returns a 4x4 affine translation matrix which translates each point by
the 3x1 vector dv."""
M = np.identity(4)
M[0:3, 3] = dv[0:3]
return M
def scaling_matrix(dv):
"""Returns a 4x4 affine scaling matrix which stretches the x axis by
dv[0], y axis by dv[1], and z axis by dv[2]."""
M = np.identity(4)
M[0, 0] = dv[0]
M[1, 1] = dv[1]
M[2, 2] = dv[2]
return M
def max_eigenvector(eigenvalues, eigenvectors):
"""Sorts the eigenvectors, then picks the eigenvector with the maximum eigenvalue."""
return eigenvectors[np.argsort(eigenvalues)[-1]]
def principal_axis(cloud):
"""Returns the principal axis of point cloud cloud, the eigenvectors
of its covariant matrix."""
center = center_of_mass(cloud)
centered = cloud - center
A = np.sum(np.outer(dif, dif) for dif in centered)
eigenvalues, eigenvectors = np.linalg.eig(A)
return max_eigenvector(eigenvalues, eigenvectors)
def principal_axes(cloud, n_components = 3):
"""Returns the principal components of the point cloud cloud
as an np.array.
Relies on the PCA decomposition implementation in sklearn."""
from sklearn.decomposition import PCA
pca = PCA(n_components = n_components)
pca.fit(cloud)
return pca.components_
# Tried to implement my own here, but
#def principal_axes(cloud):
# dimensions = len(cloud[0])
#
# def remove_component(cloud, component):
# return cloud - np.outer(np.dot(cloud, component), component)
#
# components = [np.array([0,0,0])]
#
# center = center_of_mass(cloud)
# copy = cloud.copy()
# copy = copy - center
#
# for d in range(dimensions):
# copy = remove_component(copy, components[-1])
#
# A = np.sum(np.outer(v, v) for v in copy)
# eigenvalues, eigenvectors = np.linalg.eig(A)
#
# principal = max_eigenvector(eigenvalues, eigenvectors)
# components.append(principal)
#
# return components[1:]
def promote(M, w = 1):
"""Promotes the 3x3 matrix M to a 4x4 matrix by just adding zeros. Entry
4,4 will be equal to w (useful for translation matrices)."""
A = np.zeros((4, 4))
A[0:3, 0:3] = M
A[3, 3] = w
return A
def apply_transform(M, X, cols = 4):
"""Apply the matrix M to all the vectors in X, which are row vectors.
Conveniently, it will apply M even if M is affine and X is comprised
of 3x1 vectors, using decompose_affine()."""
if cols == 3:
A, b = decompose_affine(M)
return np.dot(A, X.T).T + b
elif cols == 4:
return np.dot(M, X.T).T
def quaternion_to_rotation_matrix(q):
"""Returns a 3x3 matrix for rotation in R3 corresponding to the quaternion
vector q=[q0 q1 q2]."""
q0 = q[0]
q1 = q[1]
q2 = q[2]
q3 = q[3]
return np.array([
[q0**2 + q1**2 - q2**2 - q3**2, 2*(q1*q2 - q0*q3), 2*(q1*q3 + q0*q2)],
[2*(q1*q2 + q0*q3), q0**2 + q2**2 - q1**2 - q3**2, 2*(q2*q3 - q0*q1)],
[2*(q1*q3 - q0*q2), 2*(q2*q3 + q0*q1), q0**2 + q3**2 - q1**2 - q2**2]])
def compose(*args):
"""Composes input matrices into a single matrix by multiplying them
together from left to right."""
return reduce(np.dot, args)
def decompose_affine(M):
"""Returns a tuple, 3x3 rotation matrix A and 3x1 vector b, such that
Ax+b for 3x1 vector x will result in the same vector as M*[x0 x1 x2 1]."""
return M[0:3, 0:3].copy(), M[0:3, 3].copy()
def affine_transform_trimesh(mesh, M):
return transform_trimesh(mesh, lambda p: apply_transform(M, p, 3))
def transform_trimesh(mesh, func):
for i, vertex in enumerate(mesh.vs):
mesh.vs[i] = func(vertex)
mesh.positions_changed()
def mean_square_error(P, X):
"""Returns the sum of the L2 norms between equal-indiced points on P and
X, approximating the difference between the two point clouds."""
error = 0.0
for i in range(min(len(P), len(X))):
error += dist(P[i], X[i]) # L2 norm, see geometry.py
return error / len(P)
def nearest_neighbor_sampling_error(P, X, P_nearest_neighbors, sample_size = 1000):
"""Returns the sum of the L2 norms between a subset of closest points on P and X,
estimating the difference between the two point clouds."""
import random
sample_size = min(len(X), sample_size)
sample = np.array(random.sample(X, sample_size))
distances, indices = P_nearest_neighbors.kneighbors(sample)
return reduce(lambda arr, y: arr[0] + y, distances)[0] / sample_size
def nearest_neighbor_distance(point, P_nearest_neighbors):
return P_nearest_neighbors.kneighbors(np.array(point))[0][0][0]
def nearest_neighbor_index(point, P_nearest_neighbors):
return P_nearest_neighbors.kneighbors(np.array(point))[1][0][0]
def estimate_grasp_point(vs, sample_size = 5):
"""Estimates the grasp point on mesh by taking an average of the topmost
(highest Y) sample_size points on mesh."""
actual_sample_size = min(len(vs), sample_size)
import trimesh
vsarray = trimesh.asarray(vs)
return center_of_mass(vsarray[vsarray[:,1].argsort()][-actual_sample_size:])
def curvature_of_edge(he, mesh):
"""Computes the curvature at the provided halfedge of the TriMesh mesh."""
edge_indices = mesh.edges[he.edge]
if edge_indices[1] == he.to_vertex:
# flip the halfedge
he = mesh.halfedges[he.opposite_he]
return he.get_curvature(mesh)
def curvature_at_point(tup):
i, mesh = tup
"""Returns the curvature at the point at index i on mesh.
NOTE: If the point is on a boundary, returns +inf."""
if mesh.vertex_is_boundary(i):
return i, float("+inf")
vertex_neighbors = mesh.vertex_vertex_neighbors(i)
# some halfedges are directed outward, some inward. we want all of them.
he_neighbors = []
for vi in vertex_neighbors:
outgoing = mesh.directed_edge2he_index((i, vi))
ingoing = mesh.directed_edge2he_index((vi, i))
if outgoing:
he_neighbors.append(mesh.halfedges[outgoing])
if ingoing:
he_neighbors.append(mesh.halfedges[ingoing])
return i, np.linalg.norm(sum(map(lambda he: curvature_of_edge(he, mesh), he_neighbors))) / 2.0
def get_indices_of_high_curvature(mesh, cutoff_percentile = 0.25):
"""Returns an array of indices of the points on mesh whose absolute value
of mean curvature is above the cutoff_percentile, by default, this means
its curvature is higher than 25% of points on the mesh."""
work = [(i, mesh) for i in range(len(mesh.vs))]
curvature_of_points = map(curvature_at_point, work)
curvature_of_points.sort(key=lambda i: i[1])
cutoff_index = int(math.floor((1 - cutoff_percentile) * len(curvature_of_points)))
return map(lambda tup: tup[0], curvature_of_points[:cutoff_index])
def get_mesh_of_high_curvature(mesh, cutoff_percentile = 0.25):
"""Returns a copy of mesh which consists only of points whose absolute
value of mean curvature is above the cutoff_percentile, by default, this
means its curvature is higher than 25% of points on the mesh."""
from multiprocessing import Pool, cpu_count
copy = mesh.copy()
work = [(i, mesh) for i in range(len(mesh.vs))]
curvature_of_points = map(curvature_at_point, work)
curvature_of_points.sort(key=lambda i: i[1])
cutoff_index = int(math.floor((1 - cutoff_percentile) * len(curvature_of_points)))
copy.remove_vertex_indices(map(lambda tup: tup[0], curvature_of_points[:cutoff_index]))
return copy
| augustjd/identify | geometry.py | geometry.py | py | 13,475 | python | en | code | 1 | github-code | 13 |
9644852458 | # importamos la conexion a la base de datos
from conexion import db_leyes
# importamos las funciones necesarias para el CRUD
import funciones
# definimos el menu principal con las opciones del CRUD y la opcion de salir del sistema
def menuPrincipal():
continuar = True
while continuar:
opcionCorrecta = False
while not opcionCorrecta:
print("==================== MENÚ PRINCIPAL ====================")
print("1.- Listar leyes")
print("2.- Registrar ley")
print("3.- Actualizar ley")
print("4.- Eliminar ley")
print("5.- Salir")
print("========================================================")
opcion = int(input("Seleccione una opción: "))
if opcion < 1 or opcion > 5:
print("Opción incorrecta, ingrese nuevamente")
elif opcion == 5:
continuar = False
print("¡Gracias por usar este sistema!")
break
else:
opcionCorrecta = True
ejecutarOpcion(opcion)
# definimos el comportamiento segun la opcion elegida
def ejecutarOpcion(opcion):
DB_Leyes = db_leyes()
# opcion R del CRUD: READ(leer)
if opcion == 1:
try:
leyes = DB_Leyes.listarLeyes()
if len(leyes) > 0:
funciones.listarLeyes(leyes)
else:
print("No se encontraron leyes")
except:
print("Ocurrió un error")
# opcion C del CRUD: CREATE(crear)
elif opcion == 2:
ley = funciones.pedirDatosRegistro()
try:
DB_Leyes.registrarLey(ley)
except:
print("Ocurrió un error")
# opcion U del CRUD: UPDATE(modificar)
elif opcion == 3:
try:
ley = DB_Leyes.listarLeyes()
if len(ley) > 0:
ley = funciones.pedirDatosActualizacion(leyes)
if ley:
DB_Leyes.actualizarLey(ley)
else:
print("Número de ley a actualizar no encontrado\n")
else:
print("No se encontró la ley")
except:
print("Ocurrió un error")
# opcion D del CRUD: DELETE(eliminar)
elif opcion == 4:
try:
ley = DB_Leyes.listarLeyes()
if len(ley) > 0:
numeroEliminar = funciones.pedirDatosEliminacion(ley)
if not (numeroEliminar == ""):
DB_Leyes.eliminarLey(numeroEliminar)
else:
print("Número de ley no encontrado\n")
else:
print("No se encontró la ley")
except:
print("Ocurrió un error")
else:
print("Opción no válida")
menuPrincipal()
| KaybaMgk/CodeSquad | main.py | main.py | py | 2,835 | python | es | code | 0 | github-code | 13 |
19561279328 | # open one csv file
import pandas as pd
import easygui as eg
import numpy as np
import matplotlib.pyplot as plt
import os
def OpenCSVFiles(dirName):
directory = eg.diropenbox(default=dirName)
filenames = os.listdir(directory)
return directory, filenames
if __name__ == "__main__":
home_path = r'C:\Users\wahlm\Documents\School\Research\Allison'
directory, filenames = OpenCSVFiles(home_path)
# curve_labels = ['4x 1.15A', '4x 1.2A', '4x 1.25A', '4x 1.175A', '4x 1.2A', '4x 1.225A', '4x 1.25A']
curve_labels = ['11111111111111111110', '10101010101010101010',
'10000000000000000000', '10100100010000100000',
'11111111110000000000', '10001000100010001000']
# curve_powers_mW = [237, 248, 253, 250, 250, 250, 250, 250]
# curve_powers_mW = [240, 240, 240, 240, 240, 240, 240]
# curve_powers_nJ = [4, 4, 4, 4, 4, 4]
while directory is not None:
title, pulse_energy_nj = eg.multenterbox(fields = ["Title", "Pulse energy (nJ)"])
pulse_energy_nj = float(pulse_energy_nj)
# loop through all files in the directory
dfs = []
for filename in filenames:
# check if file ends with .csv or .txt
if filename.endswith('.csv') or filename.endswith('.txt') or filename.endswith('.CSV'):
df = pd.read_csv(os.path.join(directory, filename), skiprows=4)
dfs.append(df)
if filename.endswith('.xls') or filename.endswith('.xlsx'):
df = pd.read_excel(os.path.join(directory, filename), sheet_name=1)
dfs.append(df)
# Create a figure and axis object using matplotlib
fig, ax = plt.subplots(figsize=(20, 8))
spectra = []
top = 0
for i in range(len(dfs)):
df_new = dfs[i].iloc[:, :2] # select first two column
df_new_numeric = df_new.applymap(
lambda x: pd.to_numeric(x, errors='coerce')).dropna() # select only numerical rows
WaveLength = np.array(df_new_numeric.iloc[:, 0].values, dtype='float64')
SpectrumIntensity = np.array(df_new_numeric.iloc[:, 1].values, dtype='float64')
if SpectrumIntensity[int(len(SpectrumIntensity) / 2)] < 0:
SpectrumIntensity = np.power(10, SpectrumIntensity / 10)
# Normalize, assume even sample spacing
delta_lambda = WaveLength[1] - WaveLength[0]
integral = np.sum(SpectrumIntensity)
# SpectrumIntensity = SpectrumIntensity / integral * curve_powers_mW[i] / delta_lambda
SpectrumIntensity = SpectrumIntensity / integral * pulse_energy_nj / delta_lambda
if max(SpectrumIntensity) > top:
top = max(SpectrumIntensity)
spectra.append(np.array([WaveLength, SpectrumIntensity]))
ax.plot(WaveLength, SpectrumIntensity, label=curve_labels[i])
# Add axis labels and a legend
ax.set_xlabel('wavelength (nm)')
ax.set_ylabel('Spectral Energy per Pulse (nJ/nm)')
ax.legend()
ax.set_title(title)
plt.semilogy()
plt.ylim(top * 10 ** (-4), top)
# Display the plot
plt.show(block=False)
plt.tight_layout()
# response = eg.multenterbox(fields = ['Plot # (0 indexed)',
# 'Lower bound [nm]',
# 'Upper bound [nm]',
# 'Total power [mW]'])
# while response is not None:
# spectrum = spectra[int(response[0])]
# lower = int(response[1])
# upper = int(response[2])
# power = int(response[3])
# integrate_power(spectrum, lower, upper, power)
# response = eg.multenterbox(fields=['Plot # (0 indexed)',
# 'Lower bound [nm]',
# 'Upper bound [nm]',
# 'Total power [mW]'])
directory, filenames = OpenCSVFiles(os.path.split(home_path))
| Seabear-attack/AllisonLab | Plotting/spectrum_multiplotter.py | spectrum_multiplotter.py | py | 4,110 | python | en | code | 0 | github-code | 13 |
5941475856 | import sublime
import sublime_plugin
import os
GIT_BASE_MASTER_BRANCH_URL = "https://github.com/freshdesk/helpkit/tree/rails3-phase2/"
GIT_BASE_STAGING_BRANCH_URL = "https://github.com/freshdesk/helpkit/tree/staging/"
class LookAtMasterCommand(sublime_plugin.TextCommand):
def run(self, edit):
Utils.open_on_git(self.view, GIT_BASE_MASTER_BRANCH_URL)
class LookAtStagingCommand(sublime_plugin.TextCommand):
def run(self, edit):
Utils.open_on_git(self.view, GIT_BASE_STAGING_BRANCH_URL)
class Utils:
@staticmethod
def open_on_git(from_view, branch_url):
splitted_path = from_view.file_name().split("/helpkit/")
if(len(splitted_path) == 2):
(row,col) = from_view.rowcol(from_view.sel()[0].begin())
os.system('open '+branch_url+splitted_path[1]+"#L"+str(row+1)) | SmartChimp/GitCheck | GitCheck.py | GitCheck.py | py | 785 | python | en | code | 1 | github-code | 13 |
19563146189 | print("Consulta1")
alquiler = abrir_tabla(tablas,"alquiler")
cliente = abrir_tabla(tablas,"cliente")
pais = abrir_tabla(tablas,"pais")
print(pais)
VC = pd.merge (alquiler,cliente,left_on ='id_cliente',right_on='id_cliente',how='inner')
VC = pd.merge (VC,pais,left_on ='id_pais',right_on='id',how='inner')
df= pd.read_excel(tablas)
print(df.head())
resultado1 = df[['fecha_compra','monto','id_cliente','nombre_cliente','nombre_pais']]
print(VC[resultado1].to_string(index = False))
ax = resultado1.plot.bar(x ="id_cliente",y="nombre_pais", rot=0)
plt.show()
#df= pd.read_excel(tablas)
#print(df.head())
#valores = df[["id_cliente","edad"]]
#print(valores)
#ax =
valores.plot.bar(x="id_cliente", y="edad",rot = 0)
#
plt.show
()
#print("tabla 2")
#df1 = pd.read_excel(tablas, sheet_name='anime')
#print(df) | richardparra99/Programacion-IV | opencv/practica/proyecto.py | proyecto.py | py | 813 | python | pt | code | 0 | github-code | 13 |
15184213548 | from PyPDF2 import PdfFileWriter, PdfFileReader
import sys
from collections import Counter
pdfFile = sys.argv[1]
input = PdfFileReader(open(pdfFile, "rb"))
numOfPages = input.getNumPages()
for i in range(numOfPages):
page = input.getPage(i)
data = page.extractText()
data = data.split()
data = Counter(data)
print (data)
| HakubJozak/book-prereader | pdf.py | pdf.py | py | 342 | python | en | code | 2 | github-code | 13 |
41706720462 | # Brooke Czerwinski
# Homework 1
# Natural Language Processing - CS 410
# References:
# https://scikit-learn.org/stable/auto_examples/model_selection/gridSearch_text_feature_extraction.html
# https://scikit-learn.org/stable/tutorial/text_analytics/working_with_text_data.html
import numpy as np
import pandas as pd
import itertools
import time
import os
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import SGDClassifier
from sklearn import metrics
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
from nltk.tokenize import word_tokenize
def parseJSON(fname, start_idx, stop_idx):
with open(fname, 'r') as text_file:
for line in itertools.islice(text_file, start_idx, stop_idx):
yield eval(line)
def countLines(fname):
count = 0
for line in open(fname, 'r'):
count += 1
return count
def main():
# Location of data
cwd = os.getcwd()
data_file = cwd + '/News-Headlines-Dataset-For-Sarcasm-Detection/Sarcasm_Headlines_Dataset.json'
# Split the dataset for training, testing, and validation (8:1:1 split, respectively)
data_size = countLines(data_file)
cutoff_1 = int(.80 * data_size)
cutoff_2 = int(.90 * data_size)
raw_training_data = parseJSON(data_file, 0, cutoff_1)
raw_test_data = parseJSON(data_file, cutoff_1, cutoff_2)
raw_validation_data = parseJSON(data_file, cutoff_2, data_size)
# Preprocess text for pipeline using panda
training_data = pd.DataFrame.from_dict(raw_training_data, orient='columns', dtype=None, columns=None)
test_data = pd.DataFrame.from_dict(raw_test_data, orient='columns', dtype=None, columns=None)
# Set up a pipeline object for the Naive Bayes baseline model
baseline_NB = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', MultinomialNB())
])
# Set up a pipeline object for the Logistic Regression baseline model
baseline_LR = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', LogisticRegression())
])
# Train the baselines
baseline_NB.fit(training_data.headline, training_data.is_sarcastic)
baseline_LR.fit(training_data.headline, training_data.is_sarcastic)
# Get predictions with the set of test headlines
test_headlines = test_data.headline
baseline_prediction_NB = baseline_NB.predict(test_headlines)
baseline_prediction_LR = baseline_LR.predict(test_headlines)
# Get accuracies for the baselines
baseline_accuracy_NB = np.mean(baseline_prediction_NB == test_data.is_sarcastic)
baseline_accuracy_LR = np.mean(baseline_prediction_LR == test_data.is_sarcastic)
print("Baseline accuracy:")
print('Naive Bayes: ', baseline_accuracy_NB)
print('Logistic Regression: ', baseline_accuracy_LR)
# Grid search for the most accurate features
# Set parameters for grid search
parameters = {
"vect__ngram_range": ((1, 1), (1, 2), (1, 3), (1, 4), (1, 5),
(2, 2), (2, 3), (2, 4), (2, 5),
(3, 3), (3, 4), (3, 5),
(4, 4), (4, 5),
(5, 5)
),
# "vect__max_df": (0.5, 0.75, 1.0),
# 'vect__max_features': (None, 5000, 10000, 50000),
# 'tfidf__use_idf': (True, False),
# 'tfidf__norm': ('l1', 'l2'),
#"clf__alpha": (0.00001, 0.000001),
#'clf__max_iter': (10, 50, 80),
}
print('\n\nPerforming Gridsearch to find best n-gram parameters for baseline models')
# Create Grid Searches to find the best set of the given parameters
grid_search_NB = GridSearchCV(baseline_NB, parameters, n_jobs=-1, verbose=1, cv=10)
grid_search_NB.fit(training_data.headline, training_data.is_sarcastic)
grid_search_LR = GridSearchCV(baseline_LR, parameters, n_jobs=-1, verbose=1, cv=10)
grid_search_LR.fit(training_data.headline, training_data.is_sarcastic)
# Report the results of the grid search:
print("\n\nBest parameters for baselines:")
best_parameters_NB = grid_search_NB.best_estimator_.get_params()
print("Naive Bayes:")
for parameter_name in sorted(parameters.keys()):
print("\t%s: %r" % (parameter_name, best_parameters_NB[parameter_name]))
best_parameters_LR = grid_search_LR.best_estimator_.get_params()
print("Logistic Regression:")
for parameter_name in sorted(parameters.keys()):
print("\t%s: %r" % (parameter_name, best_parameters_LR[parameter_name]))
##########################
# Models that include punctuation
# Set up a pipeline object for the Naive Bayes baseline model
punctuation_NB = Pipeline([
('vect', CountVectorizer(tokenizer=word_tokenize)),
('tfidf', TfidfTransformer()),
('clf', MultinomialNB())
])
# Set up a pipeline object for the Logistic Regression baseline model
punctuation_LR = Pipeline([
('vect', CountVectorizer(tokenizer=word_tokenize)),
('tfidf', TfidfTransformer()),
('clf', LogisticRegression())
])
# Train the punctuateds models
punctuation_NB.fit(training_data.headline, training_data.is_sarcastic)
punctuation_LR.fit(training_data.headline, training_data.is_sarcastic)
# Get predictions with the set of test headlines
test_headlines = test_data.headline
punctuation_prediction_NB = baseline_NB.predict(test_headlines)
punctuation_prediction_LR = baseline_LR.predict(test_headlines)
# Get accuracies for the baselines
punctuation_accuracy_NB = np.mean(punctuation_prediction_NB == test_data.is_sarcastic)
punctuation_accuracy_LR = np.mean(punctuation_prediction_LR == test_data.is_sarcastic)
print("Punctuation-included accuracy:")
print('Naive Bayes: ', punctuation_accuracy_NB)
print('Logistic Regression: ', punctuation_accuracy_LR)
# Create Grid Searches for models with punctuation
print('\n\nPerforming Gridsearch to find best n-gram parameters for punctuation-included models')
grid_search_NB = GridSearchCV(punctuation_NB, parameters, n_jobs=-1, verbose=1, cv=10)
grid_search_NB.fit(training_data.headline, training_data.is_sarcastic)
grid_search_LR = GridSearchCV(punctuation_LR, parameters, n_jobs=-1, verbose=1, cv=10)
grid_search_LR.fit(training_data.headline, training_data.is_sarcastic)
# Report the results of the grid search:
print("\n\nBest parameters for punctuation-included:")
best_parameters_NB = grid_search_NB.best_estimator_.get_params()
print("Naive Bayes:")
for parameter_name in sorted(parameters.keys()):
print("\t%s: %r" % (parameter_name, best_parameters_NB[parameter_name]))
best_parameters_LR = grid_search_LR.best_estimator_.get_params()
print("Logistic Regression:")
for parameter_name in sorted(parameters.keys()):
print("\t%s: %r" % (parameter_name, best_parameters_LR[parameter_name]))
# Metrics for all models created
print('\n\nNaive Bayes (baseline)')
print(metrics.classification_report(test_data.is_sarcastic, baseline_prediction_NB))
print('\n\nLogistic Regression (baseline)')
print(metrics.classification_report(test_data.is_sarcastic, baseline_prediction_LR))
print('\n\nNaive Bayes (WITH punctuation)')
print(metrics.classification_report(test_data.is_sarcastic, punctuation_prediction_NB))
print('\n\nLogistic Regression (WITH punctuation)')
print(metrics.classification_report(test_data.is_sarcastic, punctuation_prediction_LR))
if __name__ == "__main__":
main()
# wordMap[token].prevList[prev] = WordLink(prev)
# wordMap Hash table
# Key: token
# prevList Hash table
# Key: prevWord token | CzerPDX/cs410-nlp | hw1/hw1.py | hw1.py | py | 8,028 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.