blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
bcc8ffc76dece06c64fe25ee405465282e64aca1 | Python | priyanshusankhala/Research-paper | /retrievewebtitles.py | UTF-8 | 8,811 | 2.9375 | 3 | [] | no_license | # ****************************************************************************************************************************************************************
# Batch Retrieve Web Titles From URLs
#
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------
# The code that gets the Web page titles is based on code from:
#
# Extract the title from a webpage using the python 3 standard lib - Code Review Stack Exchange
# https://codereview.stackexchange.com/questions/183160/extract-the-title-from-a-webpage-using-the-python-3-standard-lib
#!/usr/bin/python3
#-*-coding:utf8;-*-
#qpy:3
#qpy:console
# ^^^ NO IDEA WHAT THESE 3 LINES ARE??
import os
import re
import urllib
from urllib.request import urlopen
from html.parser import HTMLParser
from pathlib import Path
from urllib.request import Request
from urllib.error import URLError, HTTPError
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------
# Time out process code from:
# Python 101: How to timeout a subprocess | The Mouse Vs. The Python
# https://www.blog.pythonlibrary.org/2016/05/17/python-101-how-to-timeout-a-subprocess/
import subprocess
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------
# Continuation of code from
# Extract the title from a webpage using the python 3 standard lib - Code Review Stack Exchange
# https://codereview.stackexchange.com/questions/183160/extract-the-title-from-a-webpage-using-the-python-3-standard-lib
def error_callback(*_, **__):
pass
def is_string(data):
return isinstance(data, str)
def is_bytes(data):
return isinstance(data, bytes)
def to_ascii(data):
if is_string(data):
try:
data = data.encode('ascii', errors='ignore')
except:
try:
data = str(data).encode('ascii', errors='ignore')
except:
try:
data = str(data)
except:
data = "(could not encode data string)"
elif is_bytes(data):
try:
data = data.decode('ascii', errors='ignore')
except:
try:
data = str(data).encode('ascii', errors='ignore')
except:
try:
data = str(data)
except:
data = "(could not encode data bytes)"
else:
try:
data = str(data).encode('ascii', errors='ignore')
except:
data = "(could not encode data)"
return data
class Parser(HTMLParser):
def __init__(self, url):
self.title = None
self.rec = False
HTMLParser.__init__(self)
try:
# Added urlopen Timeout parameter so script doesn't freeze up:
#self.feed(to_ascii(urlopen(url).read()))
self.feed(to_ascii(urlopen(url, None, 5).read()))
except Exception as err:
# Not sure if I am handling exception right, script sometimes dies here:
try:
self.feed(str(err))
except:
self.feed("(unknown error in urlopen)")
self.rec = False
self.error = error_callback
def handle_starttag(self, tag, attrs):
if tag == 'title':
self.rec = True
def handle_data(self, data):
if self.rec:
self.title = data
def handle_endtag(self, tag):
if tag == 'title':
self.rec = False
def get_title(url):
try:
return Parser(url).title
except:
return "(unknown error in Parser)"
def fileLen(sFilePath):
try:
num_lines = sum(1 for line in open(sFilePath))
except UnicodeDecodeError as ude:
try:
num_lines = sum(1 for line in open(sFilePath, encoding="utf8"))
except:
num_lines = -1
return num_lines
def getFileEncoding(sFilePath):
sType = ""
try:
sType = "ascii"
num_lines = sum(1 for line in open(sFilePath))
except UnicodeDecodeError as ude:
try:
sType = "utf8"
num_lines = sum(1 for line in open(sFilePath, encoding="utf8"))
except:
sType = "other"
num_lines = -1
return sType
def getTitles(sInputFile, sStatus):
sResult = ""
iLineNum = 0
iCount = 0
iTitle = 0
iNull = 0
iTimeouts = 0
if Path(sInputFile).is_file():
sInputFile = str(sInputFile)
sOutputFile = sInputFile.replace(".txt", ".out.txt")
iLineCount = fileLen(sInputFile)
print("File \"" + sInputFile + "\" has " + str(iLineCount) + " lines.")
#print("File \"" + sInputFile + "\":")
sEncoding = getFileEncoding(sInputFile)
if (sEncoding == "ascii"):
print("File encoding = ASCII")
#fIn = open("url.txt", "r")
fIn = open(sInputFile, "r")
elif (sEncoding == "utf8"):
print("File encoding = UTF8")
fIn = open(sInputFile, "r", encoding="utf8")
else:
print("*** File encoding unknown ***")
fOut = open(sOutputFile,"w+", encoding="utf-8")
fLines = fIn.readlines()
for sLine in fLines:
iLineNum += 1
sLine = str(sLine)
sLine = repr(sLine)
#print(get_title('http://www.google.com'))
#fOut.write("This is line %d\r\n" % (i+1))
#fOut.write(get_title('http://www.google.com') + "\r\n")
sLine = sLine.lstrip('\'')
sLine = sLine.rstrip('\'')
sLine = sLine.strip('\\n')
sLine = sLine.strip('\\r')
sLine = sLine.strip('\\n')
if sLine != "":
iCount += 1
sTitle = get_title(sLine)
if sTitle is None:
iNull += 1
sTitle = ''
else:
iTitle += 1
# If title is blank then just use the URL as the description for now.
if str(sTitle)=="":
sTitle = sLine
sTitle = sTitle.replace('\n', ' ').replace('\r', ' ')
sTitle = re.sub('\s+', ' ', sTitle).strip()
print(sStatus + "Line " + str(iLineNum) + " of " + str(iLineCount))
#print(str(iLineNum) + " of " + str(iLineCount) + ": " + sLine + '\t' + sTitle)
#print(sLine + '\t' + sTitle)
##print(sLine)
##print(sTitle)
#print("")
##fOut.write(get_title(sLine) + "\r\n")
#fOut.write(sLine + '\t' + sTitle + '\r\n')
fOut.write(sLine + '\t' + sTitle + '\n')
else:
print (str(iLineNum) + " of " + str(iLineCount) + ": (Skipping blank line.)")
#print("(Skipping blank line.)")
fIn.close()
fOut.close()
sResult = "Retrieved " + str(iTitle) + " titles, " + str(iNull) + " empty, " + str(iTimeouts) + " timeouts, " + "from \"" + sInputFile + "\", output to \"" + sOutputFile + "\"."
else:
sResult = "File \"" + sInputFile + "\" not found."
return sResult
# END getTitles
# ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
def main():
script_dir = os.path.dirname(__file__) # <-- absolute dir the script is in
sSubfolder = "url"
# For now just add file names here hardcoded:
# TODO: automatically process all *.txt files in "url" folder that don't end in ".out.txt"
arrList = []
arrList.append("links1.txt")
arrList.append("links2.txt")
arrList.append("links3.txt")
iCount = 0
sTotal = str(len(arrList))
for sInputFile in arrList:
iCount += 1
sStatus = "File " + str(iCount) + " of " + sTotal + ", "
# Get filename with full path, and fix forward/back slashes in path
# (I am on Windows so some parts have backslashes and not others):
sInputFile = str(Path(os.path.join(script_dir, sSubfolder, sInputFile)))
#print(str(iCount) + ". " + sInputFile)
# Get the web titles for all the urls in the file:
sResult = getTitles(sInputFile, sStatus)
# Ouptut summary of results for the current file:
print(str(iCount) + ". " + sResult)
# Test output fileLen:
#print(" fileLen: " + str(fileLen(sInputFile)) )
print("Done.")
if __name__== "__main__":
main() | true |
308c3666fc7aa99b75ca34435381fb61a8992f10 | Python | kuroneko-hornet/ToStudy-Atcorder | /abc125_0427/code.py | UTF-8 | 1,445 | 3.265625 | 3 | [] | no_license |
# f = open('input.txt')
# args = f.readlines()
# args = [i.strip('\n') for i in args]
# a = int(args[0])
# b, c = [int(i) for i in args[1].split()]
# s = int(args[2])
# print(f'{a+b+c} {s}')
# A
'''
a, b, t = map(int, input().split())
print(b*(t//a))
'''
# B
# n = int(input())
# v = [int(i) for i in input().split()]
# c = [int(i) for i in input().split()]
# r = 0
# for i,val in enumerate(v):
# if val > c[i]:
# r += val - c[i]
# print(r)
# C
from time import time
def fermat(n):
if n % 2 == 0 or n == 1:
return 0
if pow(2, n-1, n) == 1:
return 1
def gcd(a,b):
if b==0:
return a
return gcd(b,a%b)
def get_gcd(x,n):
element = gcd(x[0], x[1])
for i in range(2,n):
element = gcd(element,x[i])
return element
n = int(input())
a = [int(i) for i in input().split()]
prime_n = 0
if n < 3:
exit(print(max(a)))
interim_max = 0
for i in range(n-1):
prime_n += fermat(a[i])
if prime_n > 2:
exit(print(1))
max_gcd = get_gcd(a[:i]+a[i+1:], n-1)
if interim_max < max_gcd:
interim_max = max_gcd
print(interim_max)
# index = i
# a[index] = interim_max
# m = float('inf')
# for i in range(n-1):
# diff = sum([abs(a[i]-j) for j in a])
# if m > diff:
# index = i
# m = diff
# if False: #ๅคใใชใๆ
# print()
# else:
# a[index] = a[index+1]
# print(a)
# print(reduce(math.gcd, a))
# D
| true |
6ee34ce917eaac3a545b1d3a309a03ea857611c4 | Python | neiloconnor/pyspark-runjob | /example_job.py | UTF-8 | 265 | 2.734375 | 3 | [] | no_license | from pyspark import SparkContext
sc = SparkContext()
# Create the initial RDD from file hosted on GCP Storage
initial_rdd = sc.textFile('gs://neiloconnor-pyspark-bucket/example_data.txt')
# Simply print the contents of the file
print(initial_rdd.collect()) | true |
d9eaf9252e6974c1d380629dd176fb68db7bd474 | Python | atosystem/midi2Tiles | /pianoTileCreator.py | UTF-8 | 11,041 | 3.046875 | 3 | [
"MIT"
] | permissive | """pianoTileCreator
Author: Ian Shih
Email: yjshih23@gmail.com
Date: 2021/09/19
"""
from tqdm import tqdm, trange
import matplotlib.animation as manimation
import matplotlib.pyplot as plt
from miditoolkit.midi import parser as midi_parser
import numpy as np
import matplotlib
matplotlib.use("Agg")
class KB_key():
def __init__(self, midi_num, rect, video_height, kb_top, tile_velocity, ticks_per_sec, key_color="green", showKeyVelocity=False, isSharp=False, notes=None):
"""Initialize Key objects
Args:
midi_num (int): the corressponding midi number of the piano key
rect (matplotlib patch): the rectangle for representing piano key
video_height (int): the height of the output video
kb_top (int): the upper boundary of the piano keyboard
tile_velocity (int): the velocity of the falling tiles
ticks_per_sec (float): ticks per second
key_color (str, optional): the color of tiles. Defaults to "green".
showKeyVelocity (bool, optional): if True, the velocity of midi notes will affect the opacity of tiles. Defaults to False.
isSharp (bool, optional): whether this key is sharp. Defaults to False.
notes (list, optional): list of miditoolkit notes. Defaults to None.
"""
self._midi_num = midi_num
self._rect = rect
self._notes = notes
self._current_tick = 0
self._isSharp = isSharp
self._tiles = []
self._kbtop = kb_top
self._key_color = key_color
self._showKeyVelocity = showKeyVelocity
self._tile_velocity = tile_velocity
self._ticks_per_sec = ticks_per_sec
self._video_height = video_height
self.createTiles()
def createTiles(self):
"""create tiles
"""
self._tiles = []
for n in self._notes:
start_y_pos = n.start / self._ticks_per_sec * self._tile_velocity + self._kbtop
end_y_pos = n.end / self._ticks_per_sec * self._tile_velocity + self._kbtop
if self._showKeyVelocity:
alpha = n.velocity / 127
else:
alpha = 1
_rect = plt.Rectangle((self._rect.get_x(), start_y_pos),
self._rect.get_width(), end_y_pos-start_y_pos,
facecolor=self._key_color, alpha=alpha)
# state 0(not appeared)->1(present)->2(done)
self._tiles.append(
{"state": 0, "rect": _rect, "start_y_pos": start_y_pos, "initial_w": end_y_pos-start_y_pos})
def update(self, tick):
"""update the animation
Called every frame
Args:
tick (int): the current midi tick
"""
# update piano tiles
for t in range(len(self._tiles)):
# move position
if self._tiles[t]["state"] <= 1:
if self._tiles[t]["rect"].get_y() <= self._kbtop:
self._tiles[t]["rect"].set_height(max(
0, self._tiles[t]["initial_w"] - self._kbtop + self._tiles[t]["start_y_pos"] - self._tile_velocity * tick / self._ticks_per_sec))
else:
self._tiles[t]["rect"].set_y(
self._tiles[t]["start_y_pos"] - self._tile_velocity * tick / self._ticks_per_sec)
# state transitiion
if self._tiles[t]["state"] == 1:
if self._tiles[t]["rect"].get_height() <= 0:
self._tiles[t]["rect"].remove()
self._tiles[t]["state"] = 2
elif self._tiles[t]["state"] == 0:
if self._tiles[t]["rect"].get_y() <= self._video_height:
plt.gca().add_patch(self._tiles[t]["rect"])
self._tiles[t]["state"] = 1
# update piano display
current_note = list(
filter(lambda n: n.start <= tick and n.end > tick, self._notes))
if len(current_note):
self._rect.set_facecolor(self._key_color)
if self._showKeyVelocity:
self._rect.set_alpha(current_note[0].velocity / 127)
else:
if self._showKeyVelocity:
self._rect.set_alpha(1)
if self._isSharp:
self._rect.set_facecolor("black")
else:
self._rect.set_facecolor("white")
class PianoTileCreator():
def __init__(self, video_width, video_height, video_dpi, video_fps, KB_ratio, tile_velocity, key_color, showKeyVelocity):
"""piano tile creator
Args:
video_width (int): the width of the output video (px)
video_height (int): the height of the output video (px)
video_dpi (int): the dpi of the output video (px)
video_fps (int): the fps of the output video (px)
KB_ratio (float): the proportion of the piano keyboard display. This value should be in [0,1)
tile_velocity (int): the velocity of the falling tiles
key_color (string): the color of the tiles
showKeyVelocity (bool): if True, the velocity of midi notes will affect the opacity of the tiles
Raises:
ValueError: KB_ratio not in [0,1)
"""
self.FFMpegWriter = manimation.writers['ffmpeg']
metadata = dict(title='', artist='',
comment='By piano tile creator')
self.writer = self.FFMpegWriter(
fps=video_fps, metadata=metadata, bitrate=video_dpi)
self.fig = plt.figure(
figsize=(video_width/video_dpi, video_height/video_dpi), dpi=video_dpi)
self.vid_width = video_width
self.vid_height = video_height
self.vid_dpi = video_dpi
self.vid_fps = video_fps
self.key_color = key_color
if KB_ratio >= 0 and KB_ratio < 1:
self.kb_top = KB_ratio * self.vid_height
else:
raise ValueError(
"KB_ratio is expected to be in [0,1), {} found".format(KB_ratio))
self.tile_velocity = tile_velocity
self.showKeyVelocity = showKeyVelocity
self.all_key_objs = []
def init_fig(self):
"""initialize figure settings
"""
# remove margins
self.fig.subplots_adjust(
left=0, bottom=0, right=1, top=1, wspace=None, hspace=None)
# set up position
self.ax = plt.axes(xlim=(0, self.vid_width), ylim=(0, self.vid_height))
# hide axis
self.ax.axis('off')
self.init_keys()
def init_keys(self):
"""initialize keyboard
"""
# draw keyboard
# draw top line
kb_top_line = plt.Line2D(
(0, self.vid_width), (self.kb_top, self.kb_top), lw=1)
plt.gca().add_line(kb_top_line)
all_key_rects = []
# whitekeys
white_key_width = float(self.vid_width) / 52
self.white_key_width = white_key_width
for x in range(52):
_rect = plt.Rectangle((x * white_key_width, 0), white_key_width,
self.kb_top, facecolor="white", edgecolor="black", lw=0.1)
plt.gca().add_patch(_rect)
all_key_rects.append(_rect)
# black keys
black_key_pattern = [16.69, 0, 13.97, 16.79, 0, 12.83, 14.76]
black_key_pattern = [x / 22.15 *
white_key_width for x in black_key_pattern]
black_key_height = self.kb_top * 80 / 126.27
black_key_width = white_key_width * 11 / 22.15
for x in range(51):
if black_key_pattern[x % len(black_key_pattern)]:
black_key_offset = black_key_pattern[x % len(
black_key_pattern)]
_rect = plt.Rectangle((x * white_key_width + black_key_offset, self.kb_top -
black_key_height), black_key_width, black_key_height, facecolor="black", lw=None)
plt.gca().add_patch(_rect)
all_key_rects.append(_rect)
all_key_rects = sorted(all_key_rects, key=lambda k: k.get_x())
self.all_key_objs = [KB_key(midi_num=21+i,
rect=k,
video_height=self.vid_height,
kb_top=self.kb_top,
tile_velocity=self.tile_velocity,
ticks_per_sec=self.ticks_per_sec,
key_color=self.key_color,
isSharp=k.get_width() < white_key_width,
showKeyVelocity=self.showKeyVelocity,
notes=[]
)
for i, k in enumerate(all_key_rects)]
def loadMidiFile(self, midiFilePath, verbose=False):
"""load midi file
Args:
midiFilePath (string): midi file path
verbose (bool, optional): show message. Defaults to False.
"""
# parse midi
self.midi_obj = midi_parser.MidiFile(midiFilePath)
if verbose:
print("Midi file loaded : {}".format(midiFilePath))
self.all_notes = []
for ins in self.midi_obj.instruments:
self.all_notes.extend(ins.notes)
# sort notes
self.all_notes = sorted(
self.all_notes, key=lambda n: (n.start, -n.pitch))
# total_duration (secs)
self.ticks_per_sec = self.midi_obj.ticks_per_beat * \
self.midi_obj.tempo_changes[0].tempo / 60
self.total_duration = self.all_notes[-1].end / self.ticks_per_sec
self.ticks_per_frame = self.ticks_per_sec / self.vid_fps
if verbose:
print("Estimated Video Total Duration {:.2f} secs".format(
self.total_duration))
self.init_fig()
# distribute notes into keyboard keys
for n in self.all_notes:
# check pitch range
if n.pitch >= 21 and n.pitch <= 108:
self.all_key_objs[n.pitch-21]._notes.append(n)
for x in self.all_key_objs:
x.createTiles()
def render(self, outputFilePath, verbose=False):
"""render piano tiles video to disk
Args:
outputFilePath (string): output video file path
verbose (bool, optional): show message. Defaults to False.
"""
if verbose:
print("Start Rendering (total {} frames)".format(
int(self.total_duration * self.vid_fps) + 2))
with self.writer.saving(self.fig, outputFilePath, self.vid_dpi):
for i in trange(0, int(self.total_duration * self.vid_fps) + 2):
tick = i * self.ticks_per_frame
for k in self.all_key_objs:
k.update(tick)
self.writer.grab_frame()
if verbose:
print("Done rendering")
print("File saved {}".format(outputFilePath))
| true |
a31a2b9c12f20a2194f4f02236947134fb86f856 | Python | peanut996/Leetcode | /Python/Leetcode_Contest_0418.py | UTF-8 | 1,969 | 3.1875 | 3 | [] | no_license | #!/usr/bin/python3.7
from typing import List
class Solution:
def minCount(self, coins: List[int]) -> int:
length=len(coins)
res=0
for i in coins:
if i&1:
res += int(i/2)+1
else:
res+= i/2
return res
def numWays(self, n: int, relation: List[List[int]], k: int) -> int:
des = [[] for _ in range(n)]
res = 0
for r in relation:
des[r[0]].append(r[1])
def dfs(i: int,nums: List[int]) -> int:
temp = 0
if i>k:
return 0
if i==k and n-1 in nums:
return 1
for d in nums:
temp+=dfs(i+1,des[d])
return temp
res = dfs(1,des[0])
return res
def getTriggerTime(self, increase: List[List[int]], requirements: List[List[int]]) -> List[int]:
c,r,h = 0,0,0
status = [[0,0,0]]
res = [-1] *len(requirements)
for i in increase:
c,r,h = c+i[0],r+i[1],h+i[2]
status.append([c,r,h])
length=len(status)
for i,requirement in enumerate(requirements):
left,right=0,length-1
while left<=right:
mid = left + (right - left)//2
if status[mid][0] >= requirement[0] and status[mid][1] >= requirement[1] and status[mid][2] >= requirement[2]:
res[i]=mid
right=mid-1
else:
left=mid+1
return res
def main():
solution = Solution()
increase = [[0,4,5],[4,8,8],[8,6,1],[10,10,0]]
requirements = [[12,11,16],[20,2,6],[9,2,6],[10,18,3],[8,14,9]]
result = solution.getTriggerTime(increase,requirements)
try:
assert result == [-1,4,3,3,3]
pass
except AssertionError:
print(result)
print('่งฃ็ญ้่ฏฏ')
if __name__ == '__main__':
main()
| true |
a913fdccf45c3162400e8e44b598e83bb94eafb4 | Python | speknet/pyspy | /pyspy-client.py | UTF-8 | 3,625 | 2.6875 | 3 | [
"MIT"
] | permissive | if __name__ == '__main__':
import socket
import threading
import Queue
import time
import win32gui, win32ui, win32con, win32api, Image, time, base64, socket
import hashlib
import sys
def rightclickmouse(x,y):
print "Right click here: "+str(x),str(y)
win32api.SetCursorPos((x,y))
win32api.mouse_event(win32con.MOUSEEVENTF_RIGHTDOWN,x,y,0,0)
win32api.mouse_event(win32con.MOUSEEVENTF_RIGHTUP,x,y,0,0)
def clickmouse(x,y):
win32api.SetCursorPos((x,y))
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,x,y,0,0)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,x,y,0,0)
def doscreenshot():
hwin = win32gui.GetDesktopWindow()
width = win32api.GetSystemMetrics(win32con.SM_CXVIRTUALSCREEN)
height = win32api.GetSystemMetrics(win32con.SM_CYVIRTUALSCREEN)
left = win32api.GetSystemMetrics(win32con.SM_XVIRTUALSCREEN)
top = win32api.GetSystemMetrics(win32con.SM_YVIRTUALSCREEN)
hwindc = win32gui.GetWindowDC(hwin)
srcdc = win32ui.CreateDCFromHandle(hwindc)
memdc = srcdc.CreateCompatibleDC()
bmp = win32ui.CreateBitmap()
bmp.CreateCompatibleBitmap(srcdc, width, height)
memdc.SelectObject(bmp)
memdc.BitBlt((0, 0), (width, height), srcdc, (left, top), win32con.SRCCOPY)
bmp.SaveBitmapFile(memdc, "bitmap.bmp")
im = Image.open("bitmap.bmp")
win32gui.DeleteObject(bmp.GetHandle())
memdc.DeleteDC()
srcdc.DeleteDC()
win32gui.ReleaseDC(hwin, hwindc)
im.save("screenshot.jpg","JPEG",quality=30)
with open("screenshot.jpg","rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
encoded_string = encoded_string + '@'
return encoded_string
def mouseChannel():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('192.168.56.1', 4444))
s.send("MOUSETRANSFER")
while True:
incomingData = s.recv(1024)
if '@' in incomingData:
coord = incomingData[:incomingData.find('@')]
xy = coord.split(',')
if(xy[2]=='1'):
clickmouse(int(xy[0]),int(xy[1]))
elif(xy[2]=='2'):
rightclickmouse(int(xy[0]),int(xy[1]))
else:
print 'Mouse action error: '+incomingData
def keyChannel():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('192.168.56.1', 4444))
s.send("KEYTRANSFER")
while True:
incomingData = s.recv(10)
if '!+!' in incomingData:
keypress = incomingData[:incomingData.find('!+!')]
print 'Got key: '+keypress
def streamChannel():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('192.168.56.1', 4444))
s.send("SCREENTRANSFER")
while True:
x = doscreenshot()
s.send(x)
time.sleep(0.1)
threads = []
keyThread = threading.Thread(target=keyChannel)
keyThread.setDaemon(True)
threads.append(keyThread)
keyThread.start()
mouseThread = threading.Thread(target=mouseChannel)
mouseThread.setDaemon(True)
threads.append(mouseThread)
mouseThread.start()
streamThread = threading.Thread(target=streamChannel)
streamThread.setDaemon(True)
threads.append(streamThread)
streamThread.start()
keyThread.join()
mouseThread.join()
streamThread.join()
| true |
7a39a5336fb69465797234d913aa93aa4dde32ec | Python | bright-night-sky/algorithm_study | /CodeUp/ํ์ด์ฌ ํ์ด/1116๋ฒ ; ์ฌ์น์ฐ์ฐ ๊ณ์ฐ๊ธฐ.py | UTF-8 | 523 | 3.640625 | 4 | [] | no_license | # https://codeup.kr/problem.php?id=1116
# readline์ ์ฌ์ฉํ๊ธฐ ์ํด importํฉ๋๋ค.
from sys import stdin
# ๋ ์ ์๋ฅผ ๊ณต๋ฐฑ์ผ๋ก ๊ตฌ๋ถํด ์
๋ ฅํฉ๋๋ค.
# ๊ฐ๊ฐ ์ ์ํ์ผ๋ก ๋ณํํฉ๋๋ค.
a, b = map(int, stdin.readline().split())
# ๋ ์ ์์ ๋ํ ์ฌ์น์ฐ์ฐ ๊ฒฐ๊ณผ๋ฅผ ์ถ๋ ฅ ์์์ ๋ง์ถ์ด ์ถ๋ ฅํฉ๋๋ค.
print(f'{a}+{b}={a + b}')
print(f'{a}-{b}={a - b}')
print(f'{a}*{b}={a * b}')
# ๋๋์
์ ์์์ ์ด ์๊ฒ ๊ทธ๋ฅ ๋ชซ์ผ๋ก๋ง ํํํฉ๋๋ค.
print(f'{a}/{b}={a // b}') | true |
5c089b69d8475af53f1018d1f2c14c85353c34fe | Python | Jarvis-K/MSc_Curiosity_MARL | /intrinsic_rewards/intrinsic_reward.py | UTF-8 | 1,383 | 2.859375 | 3 | [] | no_license | import random
import numpy as np
import torch
class IntrinsicReward:
"""
Abstract class for intrinsic rewards as exploration bonuses
"""
def __init__(self, state_size, action_size, eta=2, discrete_actions=False):
"""
Initialise parameters for MARL training
:param state_size: dimension of state input
:param action_size: dimension of action input
:param eta: curiosity loss weighting factor
:param discrete_actions: flag if discrete actions are used (one-hot encoded)
"""
self.state_size = state_size
self.action_size = action_size
self.eta = eta
self.discrete_actions = discrete_actions
def compute_intrinsic_reward(self, state, action, next_state, use_cuda, train=False):
"""
Compute intrinsic reward for given input
:param state: (batch of) current state(s)
:param action: (batch of) applied action(s)
:param next_state: (batch of) next/reached state(s)
:param use_cuda: flag if CUDA tensors should be used
:param train: flag if model should be trained
:return: (batch of) intrinsic reward(s)
"""
raise NotImplementedError
def get_losses(self):
"""
Get losses of last computation if existing
:return: list of (batch of) loss(es)
"""
return []
| true |
06ee397df7fce5b3b1281c121cba5c735efab85a | Python | pnmfonseca/RotorHazard | /src/server/ANSIPixel.py | UTF-8 | 1,608 | 3.0625 | 3 | [
"MIT"
] | permissive | '''Dummy LED layer.'''
from colorama import init, Fore, Cursor
def Color(red, green, blue):
"""Convert the provided red, green, blue color to a 24-bit color value.
Each color component should be a value 0-255 where 0 is the lowest intensity
and 255 is the highest intensity.
"""
return (red << 16) | (green << 8) | blue
class ANSIPixel:
def __init__(self, count, pin, freq, dma, invert, brightness, channel, strip):
'''Constructor'''
self.pixels = [0 for i in range(count)]
def begin(self):
init(autoreset=True)
def numPixels(self):
return len(self.pixels)
def setPixelColor(self, i, color):
r = color >> 16 & 0xff
g = color >> 8 & 0xff
b = color & 0xff
if color == 0:
c = Fore.BLACK
elif r == 255 and g == 255 and b == 255:
c = Fore.WHITE
elif r == 255 and g == 0 and b == 0:
c = Fore.LIGHTRED_EX
elif r == 0 and g == 255 and b == 0:
c = Fore.LIGHTGREEN_EX
elif r == 0 and g == 0 and b == 255:
c = Fore.LIGHTBLUE_EX
elif r > 128 and g > 128 and b < 128:
c = Fore.LIGHTYELLOW_EX
elif r > 128 and g < 128 and b > 128:
c = Fore.LIGHTMAGENTA_EX
elif r < 128 and g > 128 and b > 128:
c = Fore.LIGHTCYAN_EX
elif r > 0 and g == 0 and b == 0:
c = Fore.RED
elif r == 0 and g > 0 and b == 0:
c = Fore.GREEN
elif r == 0 and g == 0 and b > 0:
c = Fore.BLUE
elif r > 0 and g > 0 and b == 0:
c = Fore.YELLOW
elif r > 0 and g == 0 and b > 0:
c = Fore.MAGENTA
elif r == 0 and g > 0 and b > 0:
c = Fore.CYAN
else:
return
self.pixels[i] = c
def show(self):
print Cursor.POS() + ''.join(p+'*' for p in self.pixels)
| true |
63311dd2261a28571d51249694a5a465995c2675 | Python | nemanja1995/Linear-Regression-and-Logistic-Regression-Visualization | /Logistic_regression/create_2_class_rand_dataset.py | UTF-8 | 2,949 | 3.546875 | 4 | [] | no_license | """
Creating random dastaset (x1, x2) pairs for Logistic regression. It creates data for two classes,
with 2 features (x1 feature, and x2 feature).
It is used to create simple, easily visualised dataset for visualising Logistic regression.
"""
import numpy as np
import matplotlib.pyplot as plt
def create_dataset(num_data_per_class=100, plot_data=False, plot_features=False):
"""
Create data with 2 features for 2 separated classes. That data can be used for training and visualising
Logistic regression witch can separate 2 data clusters (data for two classes).
:param num_data_per_class: How much data pairs it should generate for each class.
Total number of data examples will be num_data_per_class * 2.
:param plot_data: Plot data pairs (x1, x2). Plot features.
:param plot_features: Plot data pairs (xi, class). Plot chart for (x1, class), (x2, class).
:return:
data - num_data_per_class * 2 shuffled (x1, x2 feature) pairs.
classes - num_data_per_class * 2 shuffled labels (class for each (x1, x2) feature pair) for data.
class1_pairs, class2_pairs - separated (x1, x2) pairs per class.
"""
# Create data for class 1
class_1_x1 = np.linspace(1.0, 10.0, num_data_per_class)[:, np.newaxis]
class_1_x2 = np.sin(class_1_x1) + 0.1*np.power(class_1_x1, 2) + 0.5*np.random.randn(num_data_per_class, 1) + 2
class_1_x1 /= np.max(class_1_x1)
y1 = np.zeros(shape=(num_data_per_class, 1))
# Create data for class 2
class_2_x1 = np.linspace(1.0, 10.0, num_data_per_class)[:, np.newaxis]
class_2_x2 = np.cos(class_2_x1) + 0.1*np.power(class_2_x1, 2) + 0.8*np.random.randn(num_data_per_class, 1) - 2
class_2_x1 /= np.max(class_2_x1)
y2 = np.ones(shape=(num_data_per_class, 1))
# Plot x1, x2 pairs
if plot_data:
plt.plot(class_1_x1, class_1_x2, 'o', label='class 1')
plt.plot(class_2_x1, class_2_x2, 'x', label='class 2')
plt.legend()
plt.show()
# Plot feature with class. Plot (xi feature, y class) pairs.
if plot_features:
plt.close()
plt.plot(class_1_x1, y1, 'o', label='class 1')
plt.plot(class_2_x1, y2, 'x', label='class 2')
plt.legend()
plt.show()
plt.close()
plt.plot(class_1_x2, y1, 'o', label='class 1')
plt.plot(class_2_x2, y2, 'x', label='class 2')
plt.legend()
plt.show()
y = np.append(y1, y2)
x1 = np.append(class_1_x1, class_2_x1)
x2 = np.append(class_1_x2, class_2_x2)
x = np.zeros(shape=(num_data_per_class*2, 2))
x[:, 0] = x1
x[:, 1] = x2
idx = np.random.permutation(x.shape[0])
data, classes = x[idx, :], y[idx]
classes = np.reshape(classes, newshape=(200, 1))
class1_pairs = [class_1_x1, class_1_x2]
class2_pairs = [class_2_x1, class_2_x2]
return data, classes, class1_pairs, class2_pairs
if __name__ == "__main__":
create_dataset(100, True, True)
| true |
5dd09e3956999a682aad1cba5908600cb879b6ce | Python | bastienboutonnet/AccPred | /devExp/testFilePresence.py | UTF-8 | 2,082 | 2.546875 | 3 | [] | no_license | import os, re, time
import pandas as pd
#go through trial list sent IDs
#for each ID, combined with each speaker and relatedness is there a file with that name.
#if not print the combination in a text file.
# missing=open('missingSentences.txt','w')
#
# def writeToFile(fileHandle,trial,sync=True):
# """Writes a trial (array of lists) to a fileHandle"""
# line = '\t'.join([str(i) for i in trial]) #TABify
# line += '\n' #add a newline
# fileHandle.write(line)
# if sync:
# fileHandle.flush()
# os.fsync(fileHandle)
t=time.strftime("%m%d%H%M")
def findFiles(folder,fileList,expOrCont='exp'):
filenames=os.listdir(folder)
if expOrCont=='cont':
sentList=pd.read_csv(fileList)
else:
sentList=pd.read_csv(fileList,encoding='utf-16')
with open('missingSentences_'+t+'.txt','a') as file:
for curID in sentList['sentID']:
if expOrCont=='cont':
curNatCont='Nat_'+str(curID)+'_control.wav'
curnonNatCont='nonNat_'+str(curID)+'_control.wav'
if curNatCont not in filenames:
file.write(curNatCont+'\n')
if curnonNatCont not in filenames:
file.write(curnonNatCont+'\n')
else:
curNatRel='Nat_'+str(curID)+'_related.wav'
curNatUnrel='Nat_'+str(curID)+'_unrelated.wav'
curNonNatRel='nonNat_'+str(curID)+'_related.wav'
curNonNatUnrel='nonNat_'+str(curID)+'_unrelated.wav'
if curNatRel not in filenames:
file.write(curNatRel+'\n')
if curNatUnrel not in filenames:
file.write(curNatUnrel+'\n')
if curNonNatRel not in filenames:
file.write(curNonNatRel+'\n')
if curNonNatUnrel not in filenames:
file.write(curNonNatUnrel+'\n')
if __name__ == "__main__":
findFiles('/Users/boutonnetbpa/Dropbox/3.CurrentProjects/AThEME/accentedSpeech/AccPred/devExp/stimuli','../database/120allAbove70.csv',expOrCont='exp')
| true |
04ed0f0198bcf6d08c4759001c7a94adfc70b2b5 | Python | Party4Bread/SunrinWellProblem-Approach | /0128Approach/compcomp.py | UTF-8 | 3,516 | 2.671875 | 3 | [] | no_license | import numpy as np
from scipy.spatial.distance import cdist, euclidean
from scipy.spatial import ConvexHull
import random
import timeit
from time import gmtime, strftime
import matplotlib.pyplot as plt
def ternary_search_X0(p,s,a,e):
# s: point(x0,x1), idx: variable argument
l = s[0]-e
r = s[0]+e
xi = s[1]
while True:
lp = l+(r-l)/3
rp = r-(r-l)/3
lv = sum_distances(np.array([[lp, xi]]),p)
rv = sum_distances(np.array([[rp, xi]]),p)
if lv <= rv:
r = rp
if lv >= rv:
l = lp
if r-l < a:
break
return np.array([l,xi])
def ternary_search_X1(p,s,a,e):
# s: point(x0,x1), idx: variable argument
l = s[1]-e
r = s[1]+e
xi = s[0]
while True:
lp = l+(r-l)/3
rp = r-(r-l)/3
lv = sum_distances(np.array([[xi, lp]]),p)
rv = sum_distances(np.array([[xi, rp]]),p)
if lv <= rv:
r = rp
if lv >= rv:
l = lp
if r-l < a:
break
return np.array([xi,l])
def cordinate_descent(p,a,e):
X = np.sum(p,0) / p.shape[0]
while True:
Xt = ternary_search_X0(p,X,a,e)
Y = ternary_search_X1(p,Xt,a,e)
diff = cdist([X],[Y])
X = Y
if diff < a:
break
return X
def sum_distances(x,p):
# x: variable value, p: const points
# print(x)
# print()
distance = cdist(p,x)
res = np.sum(distance, 0)
return res
def k_objfunc(x,k,func,maxiter=1e5):
curiter=0
Tm = np.min(x,0)
TM = np.max(x,0)
#evpoints = np.random.permutation(x)[:k]
evpoints=((TM-Tm)*np.random.sample((k,2)))+Tm #init func
labels=np.argmin(cdist(x,evpoints),axis=1)
#assign init labels
while curiter<maxiter:
curiter+=1
#calc evpoints
for i in range(k):
iassoc=np.array([x[j] for j in range(x.shape[0]) if labels[j]==i])
if len(iassoc)>0:
evpoints[i]=func(iassoc)
newlabels=np.argmin(cdist(x,evpoints),axis=1)
if (labels==newlabels).all():
break
labels=np.copy(newlabels)
return evpoints,labels
def k_objfunc2(x,k,func,maxiter=1e5):
curiter=0
Tm = np.min(x,0)
TM = np.max(x,0)
evpoints = np.random.permutation(x)[:k]
#evpoints=((TM-Tm)*np.random.sample((k,2)))+Tm #init func
labels=np.argmin(cdist(x,evpoints),axis=1)
#assign init labels
while curiter<maxiter:
curiter+=1
#calc evpoints
for i in range(k):
iassoc=np.array([x[j] for j in range(x.shape[0]) if labels[j]==i])
if len(iassoc)>0:
evpoints[i]=func(iassoc)
newlabels=np.argmin(cdist(x,evpoints),axis=1)
if (labels==newlabels).all():
break
labels=np.copy(newlabels)
return evpoints,labels
def main():
from functools import partial
p=np.random.sample((2000,2))*1000
Tm = np.min(p,0)
TM = np.max(p,0)
e = max(TM[0] -Tm[0], TM[1]-Tm[1])
cdobj=partial(cordinate_descent,a=1e-5,e=e)
for i in range(4):
ev,l=k_objfunc(p,10,cdobj)
dist=np.sum(sum_distances(ev,p))
print(dist)
for i in range(4):
ev,l=k_objfunc2(p,10,cdobj)
dist=np.sum(sum_distances(ev,p))
print(dist)
if __name__=="__main__":
main() | true |
77d44d64c181176f11dc56b9970cf014f0df17c0 | Python | Dohwee-Kim/image_merge_tool | /gui_basic/reference_pys/6_checkbox.py | UTF-8 | 613 | 3.109375 | 3 | [] | no_license | from tkinter import *
root = Tk()
root.title("My GUI")
root.geometry("640x480")
chkvar = IntVar() #save the inttype value to the variable chkvar
chkbox = Checkbutton(root, text="Do not show again today", variable=chkvar)
#chkbox.select() #auto selected
#chkbox.deselect() #de select
chkbox.pack()
chkvar2 = IntVar() #save the inttype value to the variable chkvar2
chkbox2 = Checkbutton(root, text="Do not show again this week", variable=chkvar2)
chkbox2.pack()
def btncmd():
#print(chkvar.get())
#print(chkvar2.get())
btn = Button(root,text= "Click", command=btncmd)
btn.pack()
root.mainloop() | true |
cc0709fc25c195dc2bdf9c624c61cd16278b0742 | Python | SpaceTeam/TXV_ECUI | /SimulatedHedgehog.py | UTF-8 | 474 | 2.78125 | 3 | [] | no_license | class SimulatedHedgehog(object):
def __init__(self):
pass
def get_analog(self, port):
return 700
def set_servo_raw(self, port, us):
if us is False:
print('SimulatedHedgehog Servo %d disabled' % port)
else:
print('SimulatedHedgehog Servo %d set to %dus' % (port, us))
def move_motor(self, port, power):
print('SimulatedHedgehog Motor %d set to %d' % (port, power))
def motor_off(self, port):
print('SimulatedHedgehog Motor %d switched off' % port)
| true |
702e2e7ed17aee6515e187b0da1d452c7bfd337b | Python | y43560681/y43560681-270201054 | /lab4/example1.py | UTF-8 | 321 | 3.578125 | 4 | [] | no_license | num = int(input("Please enter number :"))
num = str(num)
num1 = []
result = 0
for i in num:
num1.append(i)
if len(num1) == 1:
result = num1[0]
elif len(num1) > 1:
result = int(num1[len(num1)-1]) + int(num1[len(num1)-2])
result = str(result)
if len(result) < 2:
print("0"+result)
else:
print(result)
| true |
226bd60082fcc2f726beb4393f62f28ca42f1358 | Python | luisnarvaez19/Proyectos_Python | /edu/cursoLN/graficos/myplot2.py | UTF-8 | 2,512 | 3.5 | 4 | [] | no_license | '''
Created on Oct 6, 2018
@author: luis
'''
import math
import matplotlib.pyplot as plt
import numpy as np
# Create sinewaves with sine and cosine
xs = [i / 5.0 for i in range(0, 50)]
print(xs)
y1s = [math.sin(x) for x in xs]
y2s = [math.cos(x) for x in xs]
# Plot both sinewaves on the same graph
plt.plot(xs, y1s, 'r^', label='sin(x)')
plt.plot(xs, y2s, 'b--', label='cos(x)')
# Adjust the axes' limits: [xmin, xmax, ymin, ymax]
plt.axis([-1, 11, -1.5, 1.5])
# Give the graph a title and axis labels
plt.title('My Sinewaves')
plt.xlabel('Radians')
plt.ylabel('Value')
# Show a legend
plt.legend()
# Save the image
plt.savefig('sinewaves.png')
# Draw to the screen
plt.show()
import matplotlib.pyplot as plt
plt.figure(1) # the first figure
plt.subplot(211) # the first subplot in the first figure
plt.plot([1, 2, 3])
plt.subplot(212) # the second subplot in the first figure
plt.plot([4, 5, 6])
plt.show()
mu, sigma = 100, 15
x = mu + sigma * np.random.randn(10000)
# the histogram of the data
n, bins, patches = plt.hist(x, 50, density=1, facecolor='g', alpha=0.75)
plt.xlabel('Smarts')
plt.ylabel('Probability')
plt.title('Histogram of IQ')
plt.text(60, .025, r'$\mu=100,\ \sigma=15$')
plt.legend()
plt.axis([40, 160, 0, 0.03])
plt.grid(True)
plt.show()
from matplotlib.ticker import NullFormatter # useful for `logit` scale
# Fixing random state for reproducibility
np.random.seed(19680801)
# make up some data in the interval ]0, 1[
y = np.random.normal(loc=0.5, scale=0.4, size=1000)
y = y[(y > 0) & (y < 1)]
y.sort()
x = np.arange(len(y))
# plot with various axes scales
plt.figure()
# linear
plt.subplot(221)
plt.plot(x, y)
plt.yscale('linear')
plt.title('linear')
plt.grid(True)
# log
plt.subplot(222)
plt.plot(x, y)
plt.yscale('log')
plt.title('log')
plt.grid(True)
# symmetric log
plt.subplot(223)
plt.plot(x, y - y.mean())
plt.yscale('symlog', linthreshy=0.01)
plt.title('symlog')
plt.grid(True)
# logit
plt.subplot(224)
plt.plot(x, y)
plt.yscale('logit')
plt.title('logit')
plt.grid(True)
# Format the minor tick labels of the y-axis into empty strings with
# `NullFormatter`, to avoid cumbering the axis with too many labels.
plt.gca().yaxis.set_minor_formatter(NullFormatter())
# Adjust the subplot layout, because the logit one may take more space
# than usual, due to y-tick labels like "1 - 10^{-3}"
plt.subplots_adjust(top=0.92, bottom=0.08, left=0.10, right=0.95, hspace=0.25,
wspace=0.35)
plt.show()
| true |
29c487264dce1f44e026747560885fc10dfba5bf | Python | vmaksimovaptl/trafic | /table_with_trafic_every_domen.py | UTF-8 | 3,330 | 3.21875 | 3 | [] | no_license | import pandas as pd
def take_domen_name_second_level(table):
domen_name = []
for row in table:
link = row[1]
string = link.replace('/', '.').replace('"', '').strip()
new_row = string.split('.')
for i in range(len(new_row)):
name_domen = ''
if new_row[i] == 'com' or new_row[i] == 'ru':
name_domen = new_row[i-1] + '.' + new_row[i]
domen_name.append(name_domen)
break
domen = set(domen_name)
return list(domen)
def count_of_trafic_for_domen(data, domen):
list_for_count = []
for name_domen in domen:
count_trafic = 0
part_of_trafic = []
for link in data:
if name_domen in link[1]:
count_trafic += int(link[2])
else:
continue
count_trafic = round((count_trafic / (1024*3)), 5)
part_of_trafic.append(name_domen)
part_of_trafic.append(count_trafic)
list_for_count.append(part_of_trafic)
return list_for_count
def procent_trafic(data, list_for_count_trafic):
summ_trafic = 0
for i in range(len(data)):
summ_trafic += int(data[i][2])
summ_trafic /= (1024*3)
procent = []
for i in range(len(list_for_count_trafic)):
domen_procent = round(((list_for_count_trafic[i][1] / summ_trafic) * 100), 5)
procent.append(domen_procent)
return procent
def date(data):
mounth = {'01': 'ะฏะฝะฒะฐัั',
'02': 'ะคะตะฒัะฐะปั',
'03': 'ะะฐัั',
'04': 'ะะฟัะตะปั',
'05': 'ะะฐะน',
'06': 'ะัะฝั',
'07': 'ะัะปั',
'08': 'ะะฒะณััั',
'09': 'ะกะตะฝััะฑัั',
'10': 'ะะบััะฑัั',
'11': 'ะะพัะฑัั',
'12': 'ะะตะบะฐะฑัั'}
date = data[0][0].replace('"', '')
key_mount = date.split('-')
if int(key_mount[1]) <= 9:
mount_data = mounth['0'+str(key_mount[1])] + '/' + str(key_mount[0])
else:
mount_data = mounth[str(key_mount[1])] + '/' + str(key_mount[0])
return mount_data
def make_dict(domen, count_of_trafic, procent, mounth):
len_table = len(count_of_trafic)
data_dict = {'Domen name': domen,
'Trafic volume': [name[1] for name in count_of_trafic],
'Procent': procent,
'Date': [mounth for i in range(len_table)]}
return data_dict
def mk_table(domen, count_of_trafic, procent, mounth):
table_for_csv = make_dict(domen, count_of_trafic, procent, mounth)
end_table = pd.DataFrame(table_for_csv,
columns = ['ะะฐะธะผะตะฝะพะฒะฐะฝะธะต ะดะพะผะตะฝะฐ ะฒัะพัะพะณะพ ััะพะฒะฝั', 'ะะฑัะตะผ ะดะฐะฝะฝัั
(ััะฐัะธะบะฐ), ะะฑะฐะนั', 'ะัะฟะพะปัะทะพะฒะฐะฝะธะต ะพั ะพะฑัะตะณะพ ะพะฑัะตะผะฐ ะดะฐะฝะฝัั
ะฝะฐ ะบะฐะฝะฐะปะต (ะฟะพ ัะฑัะฒะฐะฝะธั), %', 'ะะตัะธะพะด ะธะทะผะตัะตะฝะธั ะฟะพะบะฐะทะฐัะตะปั: ะะตััั/ะณะพะด'])
end_table['โ'] = end_table['ะะฑัะตะผ ะดะฐะฝะฝัั
(ััะฐัะธะบะฐ), ะะฑะฐะนั'].rank(ascending = 1)
end_table = end_table.set_index('โ')
end_table = end_table.sort_index(ascending=False)
writer = pd.ExcelWriter('doc_for_send/table_with_data_volume.xlsx')
end_table.to_excel(writer)
writer.save()
#here need box with send this file to mail
| true |
79c223f0e19bb01645b8e7c6253ac4ddd5c342d0 | Python | pjoz/whatpolliessay | /apps/getHansards.py | UTF-8 | 902 | 2.578125 | 3 | [] | no_license | #!/usr/bin/env python3
from bs4 import BeautifulSoup
from datetime import datetime, timedelta
import requests
def DownloadFile(url):
local_filename = url.split('/')[-1].split(';')[0]
r = requests.get(url)
f = open(local_filename, 'wb')
for chunk in r.iter_content(chunk_size=512 * 1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.close()
return
url = 'http://www.aph.gov.au/Parliamentary_Business/Hansard?wc=%(q)s'
payload = {
'q': '09/05/2011',
}
date = datetime.strptime(payload['q'], "%d/%m/%Y")
while date < datetime.today():
print(payload['q'])
r = requests.get(url % payload)
soup = BeautifulSoup(r.text)
for link in soup.find_all(title='XML format'):
print(link.get('href'))
DownloadFile(link.get('href'))
date = date + timedelta(days=7)
payload['q'] = date.strftime("%d/%m/%Y")
| true |
c9a9f2c6acd728b7b00bf9983ac78168b9d0500b | Python | chensivan/aol_data_parsing | /detail_processing.py | UTF-8 | 1,522 | 2.6875 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 28 16:04:17 2017
@author: yanchenm
"""
import csv
import string
import nltk
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
import re
from collections import Counter
import numpy as np
import matplotlib.pyplot as plt
top100_non_perfect = open('top100_perfect.csv','w')
top100_perfect_writer = csv.writer(top100_non_perfect)
import csv
word_fre = {}
with open('perfect.csv', 'r') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in spamreader:
if len(row) > 0 and row[0] != 'AnonID':
if row[1] not in word_fre:
word_fre[row[1]] = {}
word_fre[row[1]]['freq'] = 0
word_fre[row[1]]['ids'] = set()
word_fre[row[1]]['freq'] += 1
word_fre[row[1]]['ids'].add(row[0])
top100_perfect_writer.writerow(['Rank', 'Query', 'Frequency', '# of user ID'])
for key, value in word_fre.items():
value['num_user'] = len(value['ids'])
value['freqPerUser'] = len(value['ids']) / value['freq']
# row = [i, key, value['freq'], value['num_user']]
# print(row)
# top100_perfect_writer.writerow(row)
i = 1
for key, value in sorted(word_fre.items(), key=lambda x: (x[1]['freq'], x[1]['num_user']), reverse=True)[:100]:
# print (i, key, value)
row = [i, key, value['freq'], value['num_user']]
print(row)
top100_perfect_writer.writerow(row)
i += 1
| true |
86ff5328ef8941fe1783f2399d1dd740be1db12e | Python | NickMathworld/clases | /clase1.py | UTF-8 | 480 | 3.984375 | 4 | [] | no_license | #Parametros
#Variables que se les asigna un nuevo nombre
#Y tiene un orden de ser llenados
# Enteros
# Flotantes
# Cadenas
# booleanas
a = 5.14
a = 12
a = 5
b = a+5
c = "HOLA MUNDO"
d = c+" NICK"
d = True
e = c+d
# b = 10
def operaciones(a,b):
return a**b
def quitaChar(param1,ch1):
return param1
p = operaciones(2,6)
print(p)
param1 = int(input("Introduce param1 "))
param2 = int(input("Introduce param2 "))
d = operaciones(param1,param2)
print(d)
# 100
# 011
# 111 | true |
6727cdb9daecef615413f3ea2250166ae70ba3ff | Python | massinat/ML | /resultHelper.py | UTF-8 | 2,044 | 3.328125 | 3 | [] | no_license | """
Helper class to visualize nicely the classification results.
@Author: Massimiliano Natale
"""
import os
import csv
import matplotlib.pyplot as plt
class ResultHelper:
def __init__(self, outputFile):
self.outputFile = outputFile
def write(self, experimentData):
if os.path.exists(self.outputFile):
os.remove(self.outputFile)
with open(self.outputFile, "a+") as txtFile:
for item in experimentData:
txtFile.write(item)
def draw(self, title):
x = []
yCorrect = []
yWrong = []
yAccuracy = []
with open(self.outputFile, "r") as csvFile:
plots = csv.reader(csvFile, delimiter=",")
for row in plots:
x.append(int(row[0]))
yCorrect.append(float(row[1]))
yWrong.append(float(row[2]))
yAccuracy.append(float(row[1]) * 100 / float(row[0]))
plt.subplot(2, 1, 1)
plt.plot(x, yCorrect)
plt.plot(x, yWrong)
plt.annotate(int(yCorrect[-1]), xy=(x[-1] + 3, yCorrect[-1]))
plt.annotate(int(yWrong[-1]), xy=(x[-1] + 3, yWrong[-1]))
plt.xlabel("Classified instances")
plt.ylabel("#")
plt.title(title)
plt.legend(["y = Correct", "y = wrong"], loc="upper left")
plt.subplot(2, 1, 2)
plt.plot(x, yAccuracy)
plt.annotate(f"{yAccuracy[-1]}%", xy=(x[-1] + 3, yAccuracy[-1]))
plt.xlabel("Classified instances")
plt.ylabel("% Accuracy")
plt.show()
def drawRSquared(self, title):
x = []
y = []
with open(self.outputFile, "r") as csvFile:
plots = csv.reader(csvFile, delimiter=",")
for row in plots:
x.append(int(row[0]))
y.append(float(row[-1]))
plt.plot(x, y)
plt.annotate("%.2f" % (y[-1] * 100) + "%", xy=(x[-1] + 3, y[-1]))
plt.xlabel("Total instances")
plt.ylabel("R Squared")
plt.title(title)
plt.show()
| true |
6647cb298bc8c8034586b9395e7c77eebbd7bea3 | Python | AdityaShidlyali/Python | /Learn_Python/Chapter_2/14_assignment_operator.py | UTF-8 | 384 | 3.96875 | 4 | [] | no_license | name = "Aditya" # this the simple assignment operator which assogns the value left value to right value
name += " Shidlyali" # this is short hand assignment oprator which means : name = name + " Shidlyali"
# short hand assignment can be used for not only for addition it can be -, /, *, %
name *= 3 # this multiplies the string 3 times and assigns it to the variable name
print(name) | true |
6833e40dd1d919f8569a6064d1a12d86eef54d07 | Python | matpang99/homeworks | /3-1/homework2.py | UTF-8 | 1,575 | 2.90625 | 3 | [] | no_license | while True:
guess = 50
num = 101
t = 1
input = int(raw_input())
if input == 1:
break
if input == 0:
answer = int(raw_input())
while guess != answer:
t = t + 1
if num % 2 == 0:
if answer < guess:
num = num / 2 - 1
if num % 2 == 0:
guess = guess - (num / 2 + 1)
continue
if num % 2 == 1:
guess = guess - ((num + 1) / 2)
continue
if guess < answer:
num = num / 2
if num % 2 == 0:
guess = guess + (num / 2)
continue
if num % 2 == 1:
guess = guess + ((num + 1) / 2)
continue
if num % 2 == 1:
num = (num - 1) / 2
if answer < guess:
if num % 2 == 0:
guess = guess - (num / 2 + 1)
continue
if num % 2 == 1:
guess = guess - ((num + 1) / 2)
continue
if guess < answer:
if num % 2 == 0:
guess = guess + (num / 2)
continue
if num % 2 == 1:
guess = guess + ((num + 1) / 2)
continue
else:
print guess
print t
| true |
6bff5e2dfa2c53bd8bfe0336a613f2a0f4f6d273 | Python | kamil559/Pomodorr_backend_v2 | /pomodoro_system/web_app/commands.py | UTF-8 | 1,847 | 2.59375 | 3 | [
"MIT"
] | permissive | from datetime import datetime
import click
from flask import current_app
from flask.cli import AppGroup
from flask_security import UserDatastore, hash_password
from foundation.utils import to_utc
from marshmallow import Schema, ValidationError, fields, validates_schema
from pony.orm import db_session
user_cli = AppGroup("users")
class CreateAdminSchema(Schema):
email = fields.Email(required=True, allow_none=False)
password = fields.String(required=True, allow_none=False)
def __init__(self, datastore: UserDatastore, *args, **kwargs) -> None:
self.datastore = datastore
super(CreateAdminSchema, self).__init__(*args, **kwargs)
@validates_schema
def validate(self, data: dict, **_kwargs) -> None:
email = data["email"]
password = data["password"]
existing_user = self.datastore.find_user(email=email)
if existing_user is not None:
raise ValidationError(f"Email {email} already exists.")
self.save_admin(email, password)
@db_session
def save_admin(self, email: str, password: str) -> None:
user = self.datastore.create_user(
email=email, password=hash_password(password), confirmed_at=to_utc(datetime.now()), active=True
)
admin_role = self.datastore.find_role("admin")
self.datastore.add_role_to_user(user, admin_role)
@user_cli.command("create_admin")
@click.argument("email")
@click.argument("password")
def create_admin(email: str, password: str) -> None:
datastore = current_app.extensions["security"].datastore
try:
CreateAdminSchema(datastore=datastore).load({"email": email, "password": password})
except ValidationError as error:
click.echo(error.messages)
return None
else:
click.echo(f"Admin {email} has been created successfully.")
| true |
7608b015cb6d20d30d0071e45396b76ff73dfd85 | Python | efabens/nfl_college | /nfl_college_processor.py | UTF-8 | 3,618 | 2.859375 | 3 | [] | no_license | import json
import xlsxwriter as xlsx
from collections import defaultdict
def longest_current_t(team_dict):
e = sorted(team_dict.keys(), reverse=True)
longest = set()
prev = e[0]
for i in e:
# print i
temp = set(team_dict[i])
if len(longest) == 0:
longest = temp
# print sorted(list(temp))
else:
temp.intersection_update(longest)
# print sorted(list(temp))
if len(temp) == 0:
return longest, prev
else:
longest = temp
prev = i
return longest, prev
def xl_longest_current_t(all_teams, c_team):
c_team.write(0, 0, 'Data Retrieved 3/13/2015 from footballdb.com')
a, b = 1, 0
for i, j in enumerate(['Team', 'First Year', 'College -->']):
c_team.write(1, i, j)
a, b = a + 1, 0
for i in all_teams.keys():
j, k = longest_current_t(all_teams[i])
c_team.write(a, b, i)
b += 1
c_team.write(a, b, int(k))
b += 1
for m, n in enumerate(j):
c_team.write(a, b + m, n)
a, b = a + 1, 0
def longest_ever_t(team_dict):
e = sorted(team_dict.keys(), reverse=True)
max_years = 0
max_teams = []
current = defaultdict(int)
for i in e:
temp = set(team_dict[i])
for j in set(current.keys()).union(temp):
if j in temp:
current[j] += 1
if current[j] > max_years:
max_teams = [[j, current[j], i]]
max_years = current[j]
elif current[j] == max_years:
max_teams.append([j, current[j], i])
else:
current[j] = 0
return max_teams
def xl_longest_ever_t(all_teams, sheet, func=longest_ever_t):
sheet.write(0, 0, 'Data Retrieved 3/13/2015 from footballdb.com')
a, b = 1, 0
for i, j in enumerate(['Team', 'Years', 'College', 'Start -->']):
sheet.write(a, i, j)
a+=1
for i in all_teams:
m = func(all_teams[i])
sheet.write(a, b, i)
b += 1
for j in m:
if b == 1:
sheet.write(a, b, j[1])
b+=1
sheet.write(a, b, j[0])
b+=1
sheet.write(a, b, int(j[2]))
b += 1
b = 0
a += 1
def most_years_t(team_dict):
e = sorted(team_dict.keys(), reverse=True)
max_years = 0
max_teams = []
current = defaultdict(int)
for i in e:
temp = set(team_dict[i])
for j in temp:
current[j] += 1
if current[j] > max_years:
max_teams = [[j, current[j], i]]
max_years = current[j]
elif current[j] == max_years:
max_teams.append([j, current[j], i])
return max_teams
def college_years(all_teams, sheet, uni=True):
p=defaultdict(lambda: defaultdict(int))
for i, j in all_teams.iteritems():
for k, l in j.iteritems():
for m in l:
p[m][k]+=1
return p
if __name__ == "__main__":
with open("all_teams3.json", 'r') as outfile:
all_teams = json.load(outfile)
book = xlsx.Workbook('NFL Colleges.xlsx')
c_team = book.add_worksheet('Longest Current')
xl_longest_current_t(all_teams, c_team)
l_ever_team = book.add_worksheet("Longest Tenure ever")
xl_longest_ever_t(all_teams, l_ever_team)
most_years_sheet = book.add_worksheet("Most Seasons on Team")
xl_longest_ever_t(all_teams, most_years_sheet, func=most_years_t)
r = college_years(all_teams, [])
book.close()
| true |
4a1543910dbf334185992913f1cdd31fb6deb9b2 | Python | thaReal/MasterChef | /leetcode/square_submatrices.py | UTF-8 | 2,714 | 3.671875 | 4 | [] | no_license | #!/usr/bin/python3
#-*- coding: utf-8 -*-
'''
Given a m * n matrix of ones and zeros, return how
many square submatrices have all ones.
'''
def countSquares(matrix):
'''Returns the largest square region of 1's in
the input matrix. Works; but runs in I believe
O(1^n) time, so does not pass tests'''
h = len(matrix)
w = len(matrix[0])
sz = min(h, w) #maximum size of a potentially full square
cnt = 0
while sz > 0:
for i in range(h-sz+1):
submat = matrix[i:i+sz]
for j in range(w-sz+1):
valid = True
for row in submat:
if 0 in row[j:j+sz]:
valid = False
break
if valid:
cnt += 1
sz -= 1
return cnt
def additiveTable(matrix):
h = len(matrix)
w = len(matrix[0])
table = []
for i in range(h):
row = [None] * w
table.append(row)
for i in range(h):
for j in range(w):
val = matrix[i][j]
if i == 0 and j == 0:
table[i][j] = val
elif i == 0:
table[i][j] = table[i][j-1] + val
elif j == 0:
table[i][j] = table[i-1][j] + val
else:
table[i][j] = val + table[i][j-1] + table[i-1][j] - table[i-1][j-1]
return table
def countSquares2(matrix):
table = additiveTable(matrix)
# first get height and width of rectangular matrix
h = len(table)
w = len(table[0])
sz = min(h, w)
cntr = 0
while sz > 1:
# our starting index is the outer corner of the submatrix
# and then our calculations subtract two submatrix values
# in order to check if array is valid
for i in range(sz-1, h):
for j in range(sz-1, w):
if j-sz >= 0:
if i-sz >= 0:
# subtract prior rows
c1 = table[i-sz][j] - table[i-sz][j-sz]
# subtract prior columns
c2 = table[i][j-sz]
else:
# just subtract prior columns
c1 = 0
c2 = table[i][j-sz]
else:
if i-sz >= 0:
# just subtract prior rows
c1 = table[i-sz][j]
c2 = 0
else:
c1 = c2 = 0
# finally check if the value at our current index is
# equal to sz**2 - c1 - c2
val = table[i][j] - c1 - c2
if val == sz**2:
cntr += 1
sz -= 1
# Last, use list.count() rather than additive table method to
# speed up case where sz=1
for row in matrix:
cntr += row.count(1)
return cntr
'''
Notes:
sz = 3
[ 0 1 2 3 ] [ 0 1 ]
[ 1 3 5 7 ]
[ 1 4 7 10 ]
sz = 2
[ 0 1 2 3 ] [ 0 1 1 ]
[ 1 3 5 7 ]
sz = 2,
[ 1 3 5 7 ] [ 0 1 1 ]
[ 1 4 7 10 ]
'''
if __name__=='__main__':
matrix1 = [
[0,1,1,1],
[1,1,1,1],
[0,1,1,1]
]
matrix2 = [
[1,0,1],
[1,1,0],
[1,1,0]
]
matrix3 = [
[0, 0, 0],
[0, 0, 0]
]
cnt = countSquares2(matrix1)
print (cnt)
cnt = countSquares2(matrix2)
print (cnt)
| true |
26b30bd5fb4d644ec4b2413358308bc06de8f3fe | Python | mzulqarnain1/PythonScrapingSimplest | /Main.py | UTF-8 | 1,967 | 3.96875 | 4 | [] | no_license | """
This is the file where we read data from 4 different web pages using
urllib library and we do it using threads
"""
import urllib
from threading import Thread
from bs4 import BeautifulSoup
def fetch_and_display(url, keys, thread):
"""
this function is real performer which will get all the data and convert it
to readable form using beautiful soup and give us matching headings
according to our keywords
:param url: page to read data from
:param keys: keywords that we want to match headings with
:param thread: page to show from where we got this heading
"""
response = urllib.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, 'html.parser')
# iterating through all the anchor tags (headings)
for link in soup.find_all('a'):
# checking headings that have matching keywords as of input
if any(word.lower() in link.get_text().lower() for word in keys):
print (link.get_text()) + " [From %s] \n " % thread
if __name__ == "__main__":
print "Enter Space Separated Keywords To Search"
KEYWORDS = raw_input('>')
KEYWORDS = KEYWORDS.split(" ")
# Making thread objects to read data from multiple links parallel
REDDIT = Thread(target=fetch_and_display,
args=('https://www.reddit.com/r/programming',
KEYWORDS, 'Reddit'))
REDDIT.start()
YCOMB = Thread(target=fetch_and_display,
args=('https://news.ycombinator.com/',
KEYWORDS, 'Y Combinator'))
YCOMB.start()
GUARDIAN = Thread(target=fetch_and_display,
args=('https://www.theguardian.com/us/technology',
KEYWORDS, 'Guardian'))
GUARDIAN.start()
NYTIMES = Thread(target=fetch_and_display,
args=('http://www.nytimes.com/pages/technology/index.html',
KEYWORDS, 'NY Times'))
NYTIMES.start()
| true |
0105385ae5b96420dcefe551457f620504182aa3 | Python | aksarkar/frea-work | /enr/generic/cutoffs/cutoffs.py | UTF-8 | 1,867 | 2.65625 | 3 | [] | no_license | """Compute enrichment at selected cutoffs
Usage: python cutoffs.py PHENOTYPE FEATURE CELLTYPE [EXCLUDE]
Reference: Maurano et al. "Systematic Localization of Common Disease-Associated
Variation in Regulatory DNA." Science. 2012. doi:10.1126/science.1222794
Expects whitespace-separated (score, binary annotation) sorted by decreasing
score on stdin. Writes space-separated (phenotype, feature, cell type, cutoff,
fold enrichment) on stdout.
If EXCLUDE is non-nil, exclude the SNPs meeting the previous cutoff (compute
enrichment for disjoint intervals of SNP ranks).
Author: Abhishek Sarkar <aksarkar@mit.edu>
"""
import math
import collections
import itertools
import sys
import scipy.stats
phenotype = sys.argv[1]
feature = sys.argv[2]
celltype = sys.argv[3]
exclude = len(sys.argv) > 4
data = (line.split() for line in sys.stdin)
parsed = ((float(s), int(a)) for s, a in data)
overlaps = [0]
totals = []
cutoffs = []
breaks = (100 << i for i in itertools.count())
current_bin = next(breaks)
total_snps = 0
total_overlaps = 0
for i, (s, a) in enumerate(parsed):
if i >= current_bin:
current_bin = next(breaks)
overlaps.append(0)
totals.append(current_bin - i)
cutoffs.append(s)
if a:
overlaps[-1] += 1
total_overlaps += 1
total_snps += 1
if not exclude:
overlaps = itertools.accumulate(overlaps)
totals = itertools.accumulate(totals)
for overlap_count, total_count, cutoff in zip(overlaps, totals, cutoffs):
contingency = [[overlap_count, min(total_snps, total_count)],
[total_overlaps, total_snps]]
odds_ratio, p = scipy.stats.fisher_exact(contingency, alternative='greater')
logp = -math.log(p, 10) if p > 0 else 1000
print('{} {} {} {:.3f} {:.3f} {:.3f}'.format(phenotype, feature, celltype,
cutoff, logp, odds_ratio))
| true |
a18194378c2feed674076a00fae3b28e5db1b1b3 | Python | AmeliaMaier/Data_Science_Notes | /dev_basics/Python/unit_testing[dev Python UnitTesting].py | UTF-8 | 726 | 3.046875 | 3 | [] | no_license | import time
import unittest
import src.class_being_tested as name_of_class_being_tested
SLOW_TEST_THRESHOLD = 0.1
'''make sure to add the name of the file to your makefile'''
'''https://jeffknupp.com/blog/2013/12/09/improve-your-python-understanding-unit-testing/'''
class Test_NameOfMethodBeingTested(unittest.TestCase):
def setUp(self):
self._started_at = time.time()
def tearDown(self):
elapsed = time.time() - self._started_at
if elapsed > SLOW_TEST_THRESHOLD:
print(f'{self.id()}: {round(elapsed,2)}s')
def test_description_of_test_being_run(self):
# write in the code to set up expected and found values
self.assertEqual(expected_value, found_value)
| true |
faf346072ebd9a4ded2069f5dce74a5a45aca002 | Python | Dzeiberg/ClassPriorEstimation | /dataProcessing/yangDistance.py | UTF-8 | 2,392 | 3.125 | 3 | [] | no_license | try:
import cupy as xp
except ImportError:
import numpy as xp
from scipy.stats import beta
def calcDifference(sample, aNeg, bNeg, aPos, bPos):
negPDF = beta.pdf(sample, aNeg, bNeg)
posPDF = beta.pdf(sample, aPos, bPos)
pdfDiff = negPDF - posPDF
pdfDiffNeg = xp.maximum(pdfDiff, xp.zeros_like(pdfDiff))
pdfDiffPos = xp.maximum(-1 * pdfDiff, xp.zeros_like(pdfDiff))
pdfMax = xp.maximum(negPDF, posPDF)
return negPDF, posPDF, pdfDiffPos, pdfDiffNeg, pdfMax
def yangDistributionDifference(aNeg, bNeg, aPos, bPos, p=1):
"""
Eq. (7) from :
Yang, R., Jiang, Y., Mathews, S. et al.
Data Min Knowl Disc (2019) 33: 995.
https://doi.org/10.1007/s10618-019-00622-6
"""
sampleSize = 1000
negSample = xp.random.beta(aNeg, bNeg, sampleSize)
posSample = xp.random.beta(aPos, bPos, sampleSize)
negPDF_NEG, posPDF_NEG, pdfDiffPos_NEG, pdfDiffNeg_NEG, pdfMax_NEG = calcDifference(negSample, aNeg, bNeg, aPos, bPos)
negPDF_POS, posPDF_POS, pdfDiffPos_POS, pdfDiffPOS_POS, pdfMax_POS = calcDifference(posSample, aNeg, bNeg, aPos, bPos)
numerator1 = xp.mean(pdfDiffNeg_NEG / negPDF_NEG)
numerator2 = xp.mean(pdfDiffPos_POS / posPDF_POS)
sumVecs = xp.power(numerator1, xp.ones_like(numerator1) * p) + xp.power(numerator2, xp.ones_like(numerator2) * p)
dPHat = xp.power(sumVecs, xp.ones_like(sumVecs) * (1/p))
dTermNeg = (posPDF_NEG * 0.5) + (negPDF_NEG * 0.5)
dTermPos = (posPDF_POS * 0.5) + (negPDF_POS * 0.5)
denominator = (xp.sum(pdfMax_NEG / dTermNeg) + xp.sum(pdfMax_POS / dTermPos)) / (2 * sampleSize)
return dPHat / denominator
def vectorPDistance(x, y, pExp):
# binary vector selecting indices in which xi >= yi
gtMask = (x >= y)
# binary vector for selecting indices in which xi < yi
ltMask = (x < y)
gtSum = xp.sum(x[gtMask] - y[gtMask])**pExp
ltSum = xp.sum(y[ltMask] - x[ltMask])**pExp
return (gtSum + ltSum)**(1/pExp)
def yangVectorDistance(negativeVector, positiveVector, p=1):
x = xp.array(negativeVector).reshape((-1,1))
y = xp.array(positiveVector).reshape((-1,1))
pExp = int(p)
assert x.shape == y.shape, "x ({}) and y ({}) must be of the same shape".format(x.shape, y.shape)
assert pExp > 0, "p must be an integer greater than 0"
numerator = vectorPDistance(x,y,pExp)
max_X_Y = xp.maximum(xp.absolute(x), xp.absolute(y))
maxes = xp.maximum(max_X_Y, xp.absolute(x-y))
return numerator / xp.sum(maxes) | true |
793c496cb58f2adea29ebad5838c38d2b25b631c | Python | suryak24/python-code | /86.py | UTF-8 | 174 | 3.546875 | 4 | [] | no_license | n=input("Enter the string:")
a=[]
flag=0
for i in n:
if i not in a:
a.append(i)
else:
flag=1
if(flag==1):
print("no")
else:
print("yes")
| true |
9fec7dbf2626de699ae1c9239c8d22c39a7cae4f | Python | tony10101105/AIOT4th | /fall detection/dataset.py | UTF-8 | 2,348 | 2.75 | 3 | [] | no_license | import csv
import matplotlib.pyplot as plt
import numpy as np
import torch
from torch.utils.data.dataset import Dataset
from PIL import Image
from sklearn.decomposition import PCA
class ACC(Dataset):
def __init__(self, transform=None, mode = 'train'):
self.transform = transform
if mode == 'train':
DATA_PATH = './data/fall_train.txt'
else:
DATA_PATH = './data/fall_test.txt'
self.final = []
temp = []
with open(DATA_PATH, newline='\n') as file:
for row in file.readlines():
row = row.split(' ')
try:
row = [float(i) for i in row]
except:
print('row:', row)
raise Exception('format error')
if row == [1,1,1,1,1,1] or row == [0,0,0,0,0,0]:
self.final.append((temp, row))
temp = []
else:
temp.append(row)
input_data = []
for a, b in self.final:
list_data = []
for row in a:
for element in row:
list_data.append(element)
input_data.append(list_data)
'''
print(len(input_data[1]))
pca = PCA(2, copy = True)
pca_data = pca.fit_transform(input_data)
plt.figure()
plt.plot(pca_data)
plt.show()
'''
def __getitem__(self, index):
data, label = self.final[index]
if label == [1,1,1,1,1,1]:
label = torch.tensor(1.0)
elif label == [0,0,0,0,0,0]:
label = torch.tensor(0.0)
else:
raise Exception('label assignment error')
for i in range(len(data)):
if len(data[i]) != 6:
assert i == 0, 'defective data does not happen at the first row: {}'.format(data[i])
del data[i]
break
if len(data) == 100:
del data[0]
assert len(data) == 99, 'data length incorrect: {}'.format(len(data))
data = np.array(data)
data = self.transform(data)
return data, label
def __len__(self):
return len(self.final)
| true |
6e5550e0f8b1e4a09ce7eaf74314e8f5b465aaa6 | Python | nikneural/MlRecipe1 | /ะกะพะทะดะฐะฝะธะต ัะฐะทัะตะถะตะฝะฝะพะน ะผะฐััะธัั.py | UTF-8 | 467 | 3 | 3 | [] | no_license | # ะะผะตัััั ะดะฐะฝะฝัะต ั ะพัะตะฝั ะผะฐะปัะผ ะบะพะปะธัะตััะฒะพะผ ะฝะตะฝัะปะตะฒัั
ะทะฝะฐัะตะฝะธะน,
# ะบะพัะพััะต ััะตะฑัะตััั ัััะตะบัะธะฒะฝะพ ะฟัะตะดััะฐะฒะธัั
import numpy as np
from scipy import sparse
matrix = np.array([[0, 0],
[0, 1],
[3, 0]])
# ะกะพะทะดะฐัั ัะถะฐััั ัะฐะทัะตะถะตะฝะฝัั ะผะฐััะธัั-ัััะพะบั
matrix_sparse = sparse.csr_matrix(matrix)
print(matrix_sparse) | true |
92ab21b755a61acae2a0f23b646932ae1825778d | Python | mdk-klm/btc_game | /main.py | UTF-8 | 2,097 | 3.09375 | 3 | [] | no_license | import pygame
from game import Game
import math
pygame.init()
# generer la fenรชtre du jeu
pygame.display.set_caption("Comet fall game")
screen = pygame.display.set_mode((1080, 720))
# importer l'arriere plan
background = pygame.image.load('assets/bg.jpg')
# importer la banniรจre
banner = pygame.image.load('assets/banner.png')
banner = pygame.transform.scale(banner, (500, 500))
banner_rect = banner.get_rect()
banner_rect.x = math.ceil(screen.get_width() / 4)
# bouton pour lancer la partie
play_button = pygame.image.load('assets/button.png')
play_button = pygame.transform.scale(play_button, (400, 150))
play_button_rect = play_button.get_rect()
play_button_rect.x = math.ceil(screen.get_width() / 3.33)
play_button_rect.y = math.ceil(screen.get_height() / 2)
# charger le jeu
game = Game()
running = True
while running:
# appliquer arriere plan
screen.blit(background, (0, -200))
#vรฉrifier si le jeu a commencรฉ ou non
if game.is_playing:
# declencher les instructions
game.update(screen)
# vรฉrifier si le jeu n'a pas commencencรฉ
else:
# ajouter ecran accueil
screen.blit(play_button, play_button_rect)
screen.blit(banner, banner_rect)
# mettre a jour l'รฉcran
pygame.display.flip()
# si fenetre fermรฉe
for event in pygame.event.get():
# que l'evenement est fermeture de fenetre
if event.type == pygame.QUIT:
running = False
pygame.quit()
# detecter si un joueur lache une touche du clavier
elif event.type == pygame.KEYDOWN:
game.pressed[event.key] = True
# detecter touche espace
if event.key == pygame.K_SPACE:
game.player.launch_eth()
elif event.type == pygame.KEYUP:
game.pressed[event.key] = False
elif event.type == pygame.MOUSEBUTTONDOWN:
# vรฉrification pour savoir si la souris est sur le bouton
if play_button_rect.collidepoint(event.pos):
# mettre le jeu en mode "lancรฉ"
game.start()
| true |
9f184f12dafae4c666143bda63e77787e0f2815f | Python | pjz987/2019-10-28-fullstack-night | /Assignments/andrew/django/library/libapp/models.py | UTF-8 | 846 | 2.578125 | 3 | [] | no_license | from django.db import models
class Author(models.Model):
name = models.CharField(max_length=150)
def __str__(self):
return self.name
class Book(models.Model):
title = models.CharField(max_length=140)
pub_date = models.DateField()
author = models.ForeignKey(Author, on_delete=models.CASCADE)
available = models.BooleanField(default=True)
def __str__(self):
return f"Title: {self.title}; Publication Date: {self.pub_date}; Author: {self.author}"
class Checkout(models.Model):
book = models.ForeignKey(
book, related_name='checkouts', on_delete=models.PROTECT)
user = models.CharField(max_length=140)
out = models.BooleanField()
def __str__(self):
return f"{self.book}; {self.user}; ("out" if self.out else "in") + ("out" if self.out else "in")"
| true |
9b6fcc6efb4e5d2f3a46c869d5bdfcc0ea5276a9 | Python | salokr/jigsaw | /Jigsaw_Predict.py | UTF-8 | 733 | 2.578125 | 3 | [] | no_license | import pandas as pd
import numpy as np
from keras.preprocessing.sequence import pad_sequences
labels=['toxic','severe_toxic','obscene','threat','insult','identity_hate']
def predict_save_df(test_file_address,model,df_name,tokenizer=None,maxlen=None):
test_df=pd.read_csv(test_file_address)
X_test=test_df['comment_text'].fillna('<UNK>')
X_test=tokenizer.texts_to_sequences(X_test)
X_test=pad_sequences(X_test,maxlen=maxlen)
probabilities=model.predict(X_test)
submission_df = pd.DataFrame(columns=['id'] + labels)
submission_df['id'] = test_df['id'].values
submission_df[labels] = probabilities
submission_df.to_csv("./" + raw_input('Enter Prediction File Name(Don\'t append .csv at end) : ') + '_jigsaw.csv',index=False) | true |
fc980031c41ff097c0c71114d956471a69d80e30 | Python | watanany/20200716--BrushFontGenerator | /src/lib/mybrush.py | UTF-8 | 1,578 | 2.78125 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from math import sqrt
from pyglet.gl import *
from droplet.brush import Brush
from droplet.simplebrush import SimpleBrush
class MyBrush(Brush):
def __init__(self, *args, **kwds):
super(MyBrush, self).__init__(*args, **kwds)
#self.N = 9
#self.rate = [1.0 / self.N] * self.N
def draw(self):
u'็พๅจใฎๅบงๆจใซใใญใใใฌใใใขใใซ(ๆฏ็ญ)ใๆใ'
# ใใญใใใฌใใใขใใซใๆใ
c = 0.1
glColor4d(c, c, c, 1.0)
for model in self:
for droplet in model:
glBegin(GL_POLYGON)
#glBegin(GL_LINES)
for x, y in droplet:
glVertex2d(x, y)
glEnd()
# glBegin(GL_POLYGON)
# for x, y in self.hull():
# glVertex2d(x, y)
# glEnd()
@classmethod
def get_brush(cls, width, height, a=1.0):
b = sqrt(width * width + height * height) / 1.2
brush = cls({
'length': 0.15 * a * b,
'D': 0.1 * a * b,
'humidity': 0.5,
'k': 0.5,
'hair_number': 30,
'pigment': 0.8,
'threshold': {
((1, 2), 3): 0.8,
((3, 4), 5): 0.8,
},
'p_tip': [0.8, 0.2],
})
return brush
# class MyBrush(ShinBrush):
# @classmethod
# def get_brush(cls, width, height):
# brush = cls()
# return brush
| true |
e4e2014ed212fe70bf0820b3251814a446ddc1fa | Python | PrincetonUniversity/faas-profiler | /functions/microbenchmarks/primes/primes-python.py | UTF-8 | 1,017 | 3.171875 | 3 | [
"MIT"
] | permissive | # Copyright (c) 2019 Princeton University
# Copyright (c) 2016 Ivan Zahariev
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
def main(params):
"""
standard optimized sieve algorithm to get a list of prime numbers
"""
n = 10000000
if n < 2:
return {'Number of primes found': 0}
if n == 2:
return {'Number of primes found': 2}
# do only odd numbers starting at 3
if sys.version_info.major <= 2:
s = range(3, n + 1, 2)
else: # Python 3
s = list(range(3, n + 1, 2))
# n**0.5 simpler than math.sqr(n)
mroot = n ** 0.5
half = len(s)
i = 0
m = 3
while m <= mroot:
if s[i]:
j = (m * m - 3) // 2 # int div
s[j] = 0
while j < half:
s[j] = 0
j += m
i = i + 1
m = 2 * i + 3
res = [2] + [x for x in s if x]
return {'Number of primes found': len(res)}
| true |
d80e87e3c8818f2342c65956585a3c522d62812a | Python | DreamingLi/Blog-back-end | /user/models.py | UTF-8 | 467 | 2.578125 | 3 | [] | no_license | # Create your models here.
from django.db import models
class User(models.Model):
class Meta:
db_table = 'user'
id = models.AutoField(primary_key=True);
name = models.CharField(max_length=60, null=False);
email = models.CharField(max_length=60, null=False);
password = models.CharField(max_length=60, null=False);
def __repr__(self):
return "USER : {} {} {}".format(self.id, self.name, self.email)
__str__ = __repr__
| true |
ea323363e3d8f589c9011df29c08334ce97c35e2 | Python | Lafcadio79/Multi-Access-Edge-Fog-Crowd-Sensing-Simulator | /users.py | UTF-8 | 5,715 | 2.90625 | 3 | [] | no_license | ##################################################################################
# Module: users.py
# Description: The module provides definitions for creating and reading of user
# movements list of events files
#
# Created on 24/feb/2019
# @author: Dimitri Belli
# License: GPLv3
# Web: https://github.com/Lafcadio79/Multi-Access-Edge-Fog-Crowd-Sensing-Simulator
##################################################################################
# This program is free software; you can redistribuite it and/or modify it under
# the terms of the GNU/General Pubblic License as published the Free software
# Foundation; either version 3 of the License, or (at your opinion) any later
# version
##################################################################################
import networkx as nx
import random as rd
import osmnx as ox
import os.path
import time
import os
from haversine import haversine as hv
from random import randrange as rdg
from tqdm import tqdm
ox.config(log_console=True, use_cache=True)
def walking_time(g, p, min_s, max_s):
"""
Computing the walking time from point a to point b (in seconds)
for all user's movements through the haversine distance
(fair method currently in disuse because it increases the complexity
of the user movements list of events generator algorithm from O(n^2) to O(n^3))
:param g: networkx multidigraph
:param p: networkx shortest path
:param min_s: walker's minimum speed
:param max_s: walker's maximum speed
:return: dictionary made up of {[(a coordinates), (b coordinates)] : time}
"""
seconds = {}
# we assume different speed for each sub-path
for i in range(1, len(p)):
speed = rd.uniform(min_s, max_s)
t1 = (g.node[p[i-1]]['y'], g.node[p[i-1]]['x'])
t2 = (g.node[p[i]]['y'], g.node[p[i]]['x'])
dist = hv(t1, t2, 'm')
seconds[dist / speed] = [t1, t2]
return seconds
def time_update(tm, et):
"""
Update the simuation timestamp
:param tm: current simulation timestamp
:param et: elapsed walking time (in seconds)
:return: list made up of [new_timestamp, [day, hour, minute, second]]
"""
nt = tm + et
return nt
def list_of_events_generator(g, u, d, k, st, et):
"""
User movements list of events generator
:param g: networkx multidigraph
:param u: number of users
:param d: simulation duration (days)
:param k: kind of network
:param st: simulation starting time
:param et: simulation ending time
:return: file - all users' movements with the following information
[user_id latitude longitude day hour minute second]
"""
path = []
if(k == 1):
# between 3.6 Km/h and 5.4 Km/h (walk)
min_speed = 1 # m/s
max_speed = 1.5 # m/s
elif(k == 2):
# between 10 Km/h and 20 Km/h (bike)
min_speed = 2.7 # m/s
max_speed = 5.5 # m/s
elif(k == 3):
# between 20 Km/h and 50 Km/h (drive)
min_speed = 5.5 # m/s
max_speed = 13.8 # m/s
else:
# between 3.6 Km/h and 50 Km/h (all)
min_speed = 1 # m/s
max_speed = 13.8 # m/s
file_number = 0
record_counter = 0
file = open("./Inputs/Mobility/Users/UserMovementsListEvents_{}.txt".format(file_number), 'w')
file.write("user_id lat lon timestamp\n")
print("Generating user movements list of events")
#################################################
# Main Cycle #
#################################################
for i in tqdm(range(u)):
lst = list(g.nodes())
rorg = rd.randint(0, len(lst)-1)
rdst = rd.randint(0, len(lst)-1)
org = lst[rorg]
dst = lst[rdst]
path = nx.shortest_path(g, org, dst, weight='length')
r_start = rd.uniform(st,et)
wt = walking_time(g, path, min_speed, max_speed)
w = wt.keys()
nl = []
et0 = 0
nl.append(time_update(r_start, et0))
for m in w:
ct = nl[len(nl)-1]
nl.append(time_update(ct, m))
for j in range(len(path)):
file.write("{} {} {} {}\n".format(i+1, g.node[path[j]]['y'], g.node[path[j]]['x'], nl[j]))
record_counter += 1
if(record_counter % 99999 == 0):
file_number += 1
file.close()
file = open("./Inputs/Mobility/Users/UserMovementsListEvents_{}.txt".format(file_number), 'w')
file.write("user_id lat lon timestamp\n")
time.sleep(0.01)
print("Records generated:", record_counter, "\nFiles generated:", file_number+1)
file.close()
def read_user_movements_list_events(path):
"""
Read data from file
:return: list of lists made up of user movements (time and space information)
[user_id latitude longitude day hour minute second]
"""
n_files = len([name for name in os.listdir(path) if os.path.isfile(os.path.join(path, name))])
setup_data = []
l = []
for i in range(n_files):
with open("{}/UserMovementsListEvents_{}.txt".format(path,i)) as f:
data = f.readlines()
setup_data = setup_data + data[1:len(data)]
for k in range(1, len(setup_data)):
try:
sd = setup_data[k].split()
l.append([int(sd[0]), float(sd[1]), float(sd[2]), float(sd[3])])
except:
print("header ", end=" ")
return l
| true |
9786097ada591e94f099c464e4e71c36a7f5af85 | Python | reachtoakhtar/data-structure | /tree/problems/binary_tree/leaf_nodes.py | UTF-8 | 642 | 3.53125 | 4 | [] | no_license | __author__ = "akhtar"
from collections import deque
def find_leaf_nodes(root):
"""
Find the number of leaf nodes in a binary tree.
:param BTreeNode root: The root of the tree.
:return: the count of leaf nodes.
:rtype: int
"""
if root is None:
return
q = deque([])
q.append(root)
count = 0
while len(q):
node = q.popleft()
if node.left is None and node.right is None:
count += 1
if node.left is not None:
q.append(node.left)
if node.right is not None:
q.append(node.right)
return count
| true |
c9e3b0dbdd6cb8ec9d02fa8e8c13a8b83abdb6df | Python | Dashermankiller/Flask-APP | /app2.py | UTF-8 | 2,529 | 2.859375 | 3 | [] | no_license | #Imports
from flask import Flask, render_template, request, redirect, url_for
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
from flask_wtf import Form
from wtforms.validators import InputRequired
from wtforms import StringField, PasswordField
import os
import BeautifulSoup
from googlevoice import Voice
import base64
key = 'abcdefghijklmnopqrstuvwxyz'
key2 = '123456789'
# Encryption Fucntion:
def encrypt(n, plaintext):
"""Encrypt the string and return the ciphertext"""
result = ''
for l in plaintext.lower():
try:
i = (key.index(l) + n) % 26
result += key[i]
except ValueError:
result += l
return result.lower()
# Decryption Funcion:
def decrypt(n, ciphertext):
"""Decrypt the string and return the plaintext"""
result = ''
for l in ciphertext:
try:
i = (key.index(l) - n) % 26
result += key[i]
except ValueError:
result += l
return result
app = Flask(__name__)
# Change the SECRET_KEY to cometing else
app.config['SECRET_KEY'] = 'Test'
#"""
#class Encryption(Form):
# encode = StringField('encode', validators=[InputRequired()])
# decode = StringField('decode', validators=[InputRequired()]
#""""
from googlevoice import Voice
#from googlevoice.util import input
Email="Your Gmail "
passwd="Your Gmail Password"
# Route http request to /sms page then POST Data from sms form
@app.route('/sms', methods=['GET','POST'])
def SMS():
#form = Encryption()
#offset = 5
#Varify http Method is POST and if it is then get the From data
if request.method == 'POST':
text = request.form['text']
text2 = request.form['number']
voice = Voice()
voice.login(Email,passwd)
phoneNumber = text2
text = text
print phoneNumber
print text
voice.send_sms(phoneNumber, text)
return render_template('sms.html')
# Home route Accepts POST AND GET Methods:
@app.route('/', methods=['GET','POST'])
def index():
offset = 5
if request.method == 'POST':
text = request.form['encode']
text2 = request.form['decode'] # Decrypt data
encode3 = base64.b64encode(encrypt(offset, text)) #Decode base64 encoded string
decode2 = base64.b64decode(text2)
decode3 = decrypt(offset, decode2)
return render_template ('encode.html', encode3=encode3,decode3=decode3) # response with decoded and Decrypted data
#return render_template ('decode.html', decode3=decode3)
return render_template('index.html')
#Start Server
if __name__ == '__main__':
app.run(host='0.0.0.0',port=5000, debug=False)
| true |
3bf35f578be8d51c8ed5ab758a3e17a765668bce | Python | fbdesignpro/sweetviz | /sweetviz/type_detection.py | UTF-8 | 5,644 | 2.96875 | 3 | [
"MIT"
] | permissive | import pandas as pd
from sweetviz.sv_types import FeatureType
from sweetviz.from_profiling_pandas import is_boolean, is_numeric, is_categorical, could_be_numeric
def determine_feature_type(series: pd.Series, counts: dict,
must_be_this_type: FeatureType, which_dataframe: str) -> object:
# Replace infinite values with NaNs to avoid issues with histograms
# TODO: INFINITE VALUE HANDLING/WARNING
# series.replace(to_replace=[np.inf, np.NINF, np.PINF], value=np.nan,
# inplace=True)
if counts["value_counts_without_nan"].index.inferred_type.startswith("mixed"):
raise TypeError(f"\n\nColumn [{series.name}] has a 'mixed' inferred_type (as determined by Pandas).\n"
f"This is is not currently supported; column types should not contain mixed data.\n"
f"e.g. only floats or strings, but not a combination.\n\n"
f"POSSIBLE RESOLUTIONS:\n"
f"BEST -> Make sure series [{series.name}] only contains a certain type of data (numerical OR string).\n"
f"OR -> Convert series [{series.name}] to a string (if makes sense) so it will be picked up as CATEGORICAL or TEXT.\n"
f" One way to do this is:\n"
f" df['{series.name}'] = df['{series.name}'].astype(str)\n"
f"OR -> Convert series [{series.name}] to a numerical value (if makes sense):\n"
f" One way to do this is:\n"
f" df['{series.name}'] = pd.to_numeric(df['{series.name}'], errors='coerce')\n"
f" # (errors='coerce' will transform string values to NaN, that can then be replaced if desired;"
f" consult Pandas manual pages for more details)\n"
)
try:
# TODO: must_be_this_type ENFORCING
if counts["distinct_count_without_nan"] == 0:
# Empty
var_type = FeatureType.TYPE_ALL_NAN
# var_type = FeatureType.TYPE_UNSUPPORTED
elif is_boolean(series, counts):
var_type = FeatureType.TYPE_BOOL
elif is_numeric(series, counts):
var_type = FeatureType.TYPE_NUM
elif is_categorical(series, counts):
var_type = FeatureType.TYPE_CAT
else:
var_type = FeatureType.TYPE_TEXT
except TypeError:
var_type = FeatureType.TYPE_UNSUPPORTED
# COERCE: only supporting the following for now:
# TEXT -> CAT
# CAT/BOOL -> TEXT
# CAT/BOOL -> NUM
# NUM -> CAT
# NUM -> TEXT
if must_be_this_type != FeatureType.TYPE_UNKNOWN and \
must_be_this_type != var_type and \
must_be_this_type != FeatureType.TYPE_ALL_NAN and \
var_type != FeatureType.TYPE_ALL_NAN:
if var_type == FeatureType.TYPE_TEXT and must_be_this_type == FeatureType.TYPE_CAT:
var_type = FeatureType.TYPE_CAT
elif (var_type == FeatureType.TYPE_CAT or var_type == FeatureType.TYPE_BOOL ) and \
must_be_this_type == FeatureType.TYPE_TEXT:
var_type = FeatureType.TYPE_TEXT
elif (var_type == FeatureType.TYPE_CAT or var_type == FeatureType.TYPE_BOOL) and \
must_be_this_type == FeatureType.TYPE_NUM:
# Trickiest: Coerce into numerical
if could_be_numeric(series):
var_type = FeatureType.TYPE_NUM
else:
raise TypeError(f"\n\nCannot force series '{series.name}' in {which_dataframe} to be converted from its {var_type} to\n"
f"DESIRED type {must_be_this_type}. Check documentation for the possible coercion possibilities.\n"
f"POSSIBLE RESOLUTIONS:\n"
f" -> Use the feat_cfg parameter (see docs on git) to force the column to be a specific type (may or may not help depending on the type)\n"
f" -> Modify the source data to be more explicitly of a single specific type\n"
f" -> This could also be caused by a feature type mismatch between source and compare dataframes:\n"
f" In that case, make sure the source and compared dataframes are compatible.\n")
elif var_type == FeatureType.TYPE_NUM and must_be_this_type == FeatureType.TYPE_CAT:
var_type = FeatureType.TYPE_CAT
elif var_type == FeatureType.TYPE_BOOL and must_be_this_type == FeatureType.TYPE_CAT:
var_type = FeatureType.TYPE_CAT
elif var_type == FeatureType.TYPE_NUM and must_be_this_type == FeatureType.TYPE_TEXT:
var_type = FeatureType.TYPE_TEXT
else:
raise TypeError(f"\n\nCannot convert series '{series.name}' in {which_dataframe} from its {var_type}\n"
f"to the desired type {must_be_this_type}.\nCheck documentation for the possible coercion possibilities.\n"
f"POSSIBLE RESOLUTIONS:\n"
f" -> Use the feat_cfg parameter (see docs on git) to force the column to be a specific type (may or may not help depending on the type)\n"
f" -> Modify the source data to be more explicitly of a single specific type\n"
f" -> This could also be caused by a feature type mismatch between source and compare dataframes:\n"
f" In that case, make sure the source and compared dataframes are compatible.\n")
return var_type
| true |
5f7753c0187b296d7aea7aa06ca06a0a19ec3c61 | Python | Baidaly/datacamp-samples | /14 - Data manipulation with Pandas/chapter 4 - Creating and Visualizing DataFrames/7 - Replacing missing values.py | UTF-8 | 1,064 | 4.34375 | 4 | [
"MIT"
] | permissive | '''
Another way of handling missing values is to replace them all with the same value. For numerical variables, one option is to replace values with 0โ you'll do this here. However, when you replace missing values, you make assumptions about what a missing value means. In this case, you will assume that a missing number sold means that no sales for that avocado type were made that week.
In this exercise, you'll see how replacing missing values can affect the distribution of a variable using histograms. You can plot histograms for multiple variables at a time as follows:
dogs[["height_cm", "weight_kg"]].hist()
pandas has been imported as pd and matplotlib.pyplot has been imported as plt. The avocados_2016 dataset is available.
'''
# From previous step
cols_with_missing = ["small_sold", "large_sold", "xl_sold"]
avocados_2016[cols_with_missing].hist()
plt.show()
# Fill in missing values with 0
avocados_filled = avocados_2016.fillna(0)
# Create histograms of the filled columns
avocados_filled[cols_with_missing].hist()
# Show the plot
plt.show() | true |
2186a52278d92e7f9a0869de7c48df843eb40202 | Python | Harshit4199/Auto_Attendance_System | /main.py | UTF-8 | 913 | 2.828125 | 3 | [] | no_license | import database
import datasetcreator
import detector
import trainning
import datetime
print "press 1 to add student..."
print "press 2 to take attendance..."
number = input()
if number==1:
Id = input("enter id :")
name = raw_input("enter name : ")
data = [(Id,name,0)]
database.add_user(data)
datasetcreator.create_data(name,Id)
trainning.train()
elif number==2:
subject = ['SE','WT','TOC','AJAVA']
print ("select subject...")
print "1 SOFTWARE ENGINEERING"
print "2 WEB TECHNOLOGY"
print "3 TOC"
print "4 ADVANCED JAVA"
sub = input()-1
date = datetime.datetime.today().strftime('_%d_%b_%y')
time = datetime.datetime.today().strftime("%H:%M")
print subject[sub]
print date
print time
column_name = subject[sub]+date
database.update(column_name)
else:
print "enter valid number !"
| true |
029140a455d7c82734d4e5f602b9f9826b73f6f1 | Python | ferdivanderspoel/pythonProject | /Basic track/week 4/exercise 4.9.1.py | UTF-8 | 398 | 4.125 | 4 | [] | no_license | import turtle
def draw_square(animal, size):
for _ in range(4):
animal.color('pink')
animal.forward(size)
animal.left(90)
animal.penup()
animal.forward(size * 2)
animal.pendown()
window = turtle.Screen()
window.bgcolor("lightgreen")
tess = turtle.Turtle()
tess.pensize(3)
for _ in range(5):
draw_square(tess, 20)
tess.speed(10)
window.mainloop() | true |
6dd315e3feb7734bc6dbe6eb64dee4ef745e21a7 | Python | LavinaVRovine/pipe_data | /helpers.py | UTF-8 | 1,612 | 2.9375 | 3 | [] | no_license | import pandas as pd
from config import PIPE_DATE_FORMAT
import json
from datetime import datetime
from typing import Optional
class GetNotSucceedException(Exception):
pass
def extract_id(df: pd.DataFrame, column_name: str) -> Optional[pd.Series]:
try:
if column_name == 'org_id':
return df[column_name].apply(lambda x: None if x is None else x['value'])
else:
return df[column_name].apply(lambda x: None if x is None else x['id'])
except KeyError:
return
def reformat_dates(df: pd.DataFrame) -> pd.DataFrame:
"""
Formats str:dates in specific format to date. If does not match format, do nothing. Can't use
pd to_datetime, as it reformats even ints and floats
:param df: dataframe
:return: formatted dataframe
"""
for col in list(df.columns):
try:
df[col] = df[col].apply(lambda x: datetime.strptime(x, PIPE_DATE_FORMAT) if x is not None else None)
except TypeError or ValueError:
pass
except:
pass
return df
def add_writetime_column(df: pd.DataFrame) -> None:
"""
adds column with current time inplace
:rtype: None
"""
df.loc[:, 'db_write_time'] = datetime.now()
return
def dict2json(dictionary):
return json.dumps(dictionary, ensure_ascii=False)
def handle_jsons(df):
for col in list(df.columns):
if df[col].apply(lambda x: type(x) is dict or type(x) is list).any():
# map twice...dict to str, str to str squared
df.loc[:, col] = df[col].map(dict2json).map(dict2json)
return df
| true |
bb7969242bc1795218db73646800124ad1badf65 | Python | innuy/python_unitest_example | /test_arithmetic_example.py | UTF-8 | 403 | 3.34375 | 3 | [] | no_license | from unittest import TestCase, main
class TestBasicArithmeticMethods(TestCase):
def test_add(self):
self.assertEqual(2 + 2, 4)
def test_subtract(self):
self.assertEqual(3 - 2, 1)
def test_multiply(self):
self.assertEqual(3 * 3, 9)
def test_equal(self):
self.assertTrue(3 == 3)
self.assertFalse(2 == 9)
if __name__ == '__main__':
main()
| true |
d7a9b20fe2bf663daab124a88c312cef8423fa37 | Python | tombh/high-fidelity-generative-compression | /src/compression/reversed_coders.py | UTF-8 | 5,889 | 2.703125 | 3 | [
"Apache-2.0"
] | permissive | def ans_index_encoder_reversed(symbols, indices, cdf, cdf_length, cdf_offset, precision,
overflow_width=OVERFLOW_WIDTH, **kwargs):
message = vrans.empty_message(())
coding_shape = symbols.shape[1:]
symbols = symbols.astype(np.int32).flatten()
indices = indices.astype(np.int32).flatten()
max_overflow = (1 << overflow_width) - 1
overflow_cdf_size = (1 << overflow_width) + 1
overflow_cdf = np.arange(overflow_cdf_size, dtype=np.uint64)
enc_statfun_overflow = _indexed_cdf_to_enc_statfun(overflow_cdf)
dec_statfun_overflow = _indexed_cdf_to_dec_statfun(overflow_cdf,
len(overflow_cdf))
overflow_push, overflow_pop = base_codec(enc_statfun_overflow,
dec_statfun_overflow, overflow_width)
# LIFO - last item compressed is first item decompressed
for i in reversed(range(len(indices))): # loop over flattened axis
cdf_index = indices[i]
cdf_i = cdf[cdf_index]
cdf_length_i = cdf_length[cdf_index]
assert (cdf_index >= 0 and cdf_index < cdf.shape[0]), (
f"Invalid index {cdf_index} for symbol {i}")
max_value = cdf_length_i - 2
assert max_value >= 0 and max_value < cdf.shape[1] - 1, (
f"Invalid max length {max_value} for symbol {i}")
# Data in range [offset[cdf_index], offset[cdf_index] + m - 2] is ANS-encoded
# Map values with tracked probabilities to range [0, ..., max_value]
value = symbols[i]
value -= cdf_offset[cdf_index]
# If outside of this range, map value to non-negative integer overflow.
overflow = 0
if (value < 0):
overflow = -2 * value - 1
value = max_value
elif (value >= max_value):
overflow = 2 * (value - max_value)
value = max_value
assert value >= 0 and value < cdf_length_i - 1, (
f"Invalid shifted value {value} for symbol {i} w/ "
f"cdf_length {cdf_length[cdf_index]}")
# Bin of discrete CDF that value belongs to
enc_statfun = _indexed_cdf_to_enc_statfun(cdf_i)
dec_statfun = _indexed_cdf_to_dec_statfun(cdf_i, cdf_length_i)
symbol_push, symbol_pop = base_codec(enc_statfun, dec_statfun, precision)
message = symbol_push(message, value)
if value == max_value:
pass
encoded = vrans.flatten(message)
message_length = len(encoded)
return encoded, coding_shape
def vec_ans_index_encoder_reversed(symbols, indices, cdf, cdf_length, cdf_offset, precision,
coding_shape, overflow_width=OVERFLOW_WIDTH, **kwargs):
"""
Vectorized version of `ans_index_encoder`. Incurs constant bit overhead,
but is faster.
ANS-encodes unbounded integer data using an indexed probability table.
"""
symbols_shape = symbols.shape
B, n_channels = symbols_shape[:2]
symbols = symbols.astype(np.int32)
indices = indices.astype(np.int32)
cdf_index = indices
assert bool(np.all(cdf_index >= 0)) and bool(np.all(cdf_index < cdf.shape[0])), (
"Invalid index.")
max_value = cdf_length[cdf_index] - 2
assert bool(np.all(max_value >= 0)) and bool(np.all(max_value < cdf.shape[1] - 1)), (
"Invalid max length.")
# Map values with tracked probabilities to range [0, ..., max_value]
values = symbols - cdf_offset[cdf_index]
# If outside of this range, map value to non-negative integer overflow.
overflow = np.zeros_like(values)
of_mask = values < 0
overflow = np.where(of_mask, -2 * values - 1, overflow)
values = np.where(of_mask, max_value, values)
of_mask = values >= max_value
overflow = np.where(of_mask, 2 * (values - max_value), overflow)
values = np.where(of_mask, max_value, values)
assert bool(np.all(values >= 0)), (
"Invalid shifted value for current symbol - values must be non-negative.")
assert bool(np.all(values < cdf_length[cdf_index] - 1)), (
"Invalid shifted value for current symbol - outside cdf index bounds.")
if B == 1:
# Vectorize on patches - there's probably a way to interlace patches with
# batch elements for B > 1 ...
if ((symbols_shape[2] % PATCH_SIZE[0] == 0) and (symbols_shape[3] % PATCH_SIZE[1] == 0)) is False:
values = utils.pad_factor(torch.Tensor(values), symbols_shape[2:],
factor=PATCH_SIZE).cpu().numpy().astype(np.int32)
indices = utils.pad_factor(torch.Tensor(indices), symbols_shape[2:],
factor=PATCH_SIZE).cpu().numpy().astype(np.int32)
assert (values.shape[2] % PATCH_SIZE[0] == 0) and (values.shape[3] % PATCH_SIZE[1] == 0)
assert (indices.shape[2] % PATCH_SIZE[0] == 0) and (indices.shape[3] % PATCH_SIZE[1] == 0)
values, _ = compression_utils.decompose(values, n_channels)
cdf_index, unfolded_shape = compression_utils.decompose(indices, n_channels)
coding_shape = values.shape[1:]
message = vrans.empty_message(coding_shape)
# LIFO - last item compressed is first item decompressed
for i in reversed(range(len(cdf_index))): # loop over batch dimension
# Bin of discrete CDF that value belongs to
value_i = values[i]
cdf_index_i = cdf_index[i]
cdf_i = cdf[cdf_index_i]
cdf_i_length = cdf_length[cdf_index_i]
enc_statfun = _vec_indexed_cdf_to_enc_statfun(cdf_i)
dec_statfun = _vec_indexed_cdf_to_dec_statfun(cdf_i, cdf_i_length)
symbol_push, symbol_pop = base_codec(enc_statfun, dec_statfun, precision)
message = symbol_push(message, value_i)
"""
Encode overflows here
"""
encoded = vrans.flatten(message)
message_length = len(encoded)
# print('{} symbols compressed to {:.3f} bits.'.format(B, 32 * message_length))
return encoded, coding_shape
| true |
405d13c039da57dd2ab52b724c0672cf2a81f905 | Python | Cherrycold/fantastic-rotary-phone | /src/painter.py | UTF-8 | 4,426 | 2.671875 | 3 | [] | no_license | from tkinter import *
import tkinter.scrolledtext as scrolledtext
import time
import threading
import queue
from video import Video
class Painter(object):
def __init__(self):
self.window = Tk()
self.window.title("่ง้ข")
self.buttonlist = {}
self.toplist = {}
self.textlist = {}
sw = self.window.winfo_screenwidth()
sh = self.window.winfo_screenheight()
ww = 640
wh = 480
self.allnum = 0
self.num = 0
self.window.geometry("%dx%d+%d+%d" %(ww,wh,(sw-ww) / 2,(sh-wh) / 2))
self.window.resizable(0,0)
self.nf_queue = queue.Queue()
def process_msg(self):
while True:
time.sleep(0.3)
if not self.nf_queue.empty():
msg = self.nf_queue.get()
if msg == 1:
self.edit()
elif msg == 2:
self.delprogressing()
def start(self):
start = Button(self.window, text='ๅฏๅจ่ฟๅบฆๆก', command=self.progressing)
start.place(x=400, y=105)
over = Button(self.window, text='ๅๆญข่ฟๅบฆๆก', command=self.delprogressing)
over.place(x=200, y=105)
self.buttonlist["start"] = start
self.buttonlist["over"] = over
#ๅฏๅจไธไธช็บฟ็จ๏ผๅป็ๅฌๆฐๆฎ
t1 = threading.Thread(target=self.process_msg)
t1.setDaemon(True)
t1.start()
self.window.mainloop()
def sync_windows(self,event=None):
#้่ฆไธไธช็ฎก็ๅจ๏ผ็ปไธๆชไฝ็ฝฎ
width = event.width
height = event.height
x = event.x
y = event.y
for _,top in self.toplist.items():
top.geometry("320x110+"+str(int(x+width/2))+"+"+str(int(y+height/2)))
def edit(self):
self.num = self.num + 1
top = self.toplist["progressing"]
T = self.textlist["progressing"]
fill_line = top.canvas.create_rectangle(1.5, 1.5, 0, 23, width=0, fill="green")
# for n in range (self.num, self.allnum+1) :
text = str(int(self.num)) + "/" +str(int(self.allnum))
T.configure(text=text)
s = self.num
if self.num != 0:
s = 265 / 100 * self.num
top.canvas.coords(fill_line, (0, 0, s, 60))
self.window.update()
# time.sleep(0.02) # ๆงๅถ่ฟๅบฆๆกๆตๅจ็้ๅบฆ
def test(self):
self.window.attributes("-disabled", 1) #็ฆๆญขไธป็ชๅฃ
# time.sleep(1)
# self.delprogressing()
# # ๆธ
็ฉบ่ฟๅบฆๆก
# fill_line = top.canvas.create_rectangle(1.5, 1.5, 0, 23, width=0, fill="white")
# x = 500 # ๆช็ฅๅ้๏ผๅฏๆดๆน
# n = 465 / x # 465ๆฏ็ฉๅฝขๅกซๅ
ๆปก็ๆฌกๆฐ
# for t in range(x):
# n = n + 465 / x
# # ไปฅ็ฉๅฝข็้ฟๅบฆไฝไธบๅ้ๅผๆดๆฐ
# top.canvas.coords(fill_line, (0, 0, n, 60))
# self.window.update()
# time.sleep(0) # ๆถ้ดไธบ0๏ผๅณ้ฃ้ๆธ
็ฉบ่ฟๅบฆๆก
def progressing(self):
#start process video
top = Toplevel()
self.toplist["progressing"] = top
top.resizable(0,0)
top.overrideredirect(True)
curWidth = top.winfo_width()
curHeight = top.winfo_height()
x = self.window.winfo_x()
y = self.window.winfo_y()
# scnWidth, scnHeight = self.window.winfo_geometry()
top.geometry("320x110+"+str(int(x+320-50))+"+"+str(int(y+240)))
text = str(int(self.num)) + "/" +str(int(self.allnum))
T = Label(top, text=text)
self.textlist["progressing"] = T
T.place(x=50, y=60)
top.canvas = Canvas(top, width=565, height=22, bg="white")
top.canvas.place(x=110, y=60)
self.window.bind("<Configure>", self.sync_windows)
self.test()
self.buttonlist["start"].place_forget()
self.buttonlist["over"].place_forget()
vd = Video(self.nf_queue)
self.allnum = vd.get_num()
t1 = threading.Thread(target=vd.start)
t1.setDaemon(True)
t1.start()
top.mainloop()
def delprogressing(self):
top = self.toplist["progressing"]
top.destroy()
self.window.attributes("-disabled", 0)
self.buttonlist["start"].place(x=400, y=105)
self.buttonlist["over"].place(x=200, y=105)
if __name__ =='__main__':
painter = Painter()
painter.start() | true |
0899bfe5ea2296a92466cfefa678d12a1bf279b4 | Python | PythonistasBR/bot | /autonomia/features/basic.py | UTF-8 | 3,054 | 2.59375 | 3 | [
"MIT"
] | permissive | import json
from urllib import parse, request
from telegram.ext import CallbackContext, CommandHandler, Filters, MessageHandler
from telegram.update import Update
from autonomia.core import bot_handler
def cmd_all(update: Update, context: CallbackContext):
"""
tag all users in the room at once
params bot: instance of bot
params update:
return:
rtype:
"""
chat_id = update.message.chat_id
admins = context.bot.get_chat_administrators(chat_id)
admins = [item.user.mention_markdown() for item in admins]
update.message.reply_markdown(" ".join(admins))
@bot_handler
def all_factory():
"""
/all - mention all admins on the room
"""
return CommandHandler("all", cmd_all)
def cmd_me(update: Update, context: CallbackContext):
"""
get the first_name of the user and create a /me IRC style
the object is from, but as it's a python reserved word
we must use from_user instead
"""
message = " ".join(context.args)
name = update.message.from_user.first_name
update.message.reply_markdown(f"_{name} {message}_")
@bot_handler
def me_factory():
"""
/me <text> - clone /me from IRC
"""
return CommandHandler("me", cmd_me, pass_args=True)
@bot_handler
def au_factory():
"""
send sticker au
"""
return MessageHandler(Filters.regex(r".*\b([aA][uU])\b.*"), cmd_au)
def cmd_au(update: Update, context: CallbackContext):
"""
send sticker au
"""
chat = update.message.chat
context.bot.send_sticker(chat.id, "CAADAQAD0gIAAhwh_Q0qq24fquUvQRYE")
def cmd_larissa(update: Update, context: CallbackContext):
chat = update.message.chat
context.bot.send_sticker(chat.id, "CAADAQADCwADgGntCPaKda9GXFZ3Ag")
@bot_handler
def larissa_factory():
return MessageHandler(
Filters.regex(r".*\b([Hh][Bb]|[\[hH\].nr.qu.[\s]*[bB].st.s)\b.*"), cmd_larissa
)
def cmd_aurelio(update: Update, context: CallbackContext):
"""
Teach you how to find something on the internet
"""
message = parse.quote(" ".join(context.args))
update.message.reply_markdown(f"Tenta ai, http://lmgtfy.com/?q={message}")
@bot_handler
def aurelio_factory():
"""
/aurelio - teach James how to use google
"""
return CommandHandler("aurelio", cmd_aurelio, pass_args=True)
def cmd_joke(update: Update, context: CallbackContext):
"""
Tell a random joke
"""
try:
req = request.urlopen("http://api.icndb.com/jokes/random")
joke = parse.unquote(json.loads(req.read())["value"]["joke"])
update.message.reply_text(joke)
except Exception:
update.message.reply_text("To sem saco!")
@bot_handler
def joke_factory():
"""
/joke - send a random joke about Chuck Norris
"""
return CommandHandler("joke", cmd_joke)
def cmd_clear(update: Update, context: CallbackContext):
update.message.reply_text(".\n" * 50)
@bot_handler
def clear_factory():
"""
/clear - save your ass at work
"""
return CommandHandler("clear", cmd_clear)
| true |
77df635a3a2217d920d3027043340767072248d0 | Python | codi21/inteligencia_artifical | /Agente_racional/Agente_reactivo_simple.py | UTF-8 | 2,717 | 2.921875 | 3 | [] | no_license | import random
from Enviroment import Enviroment
# n = random.randint(min ,max)
##--------------------------------------
##-----------AGENTE---------------------
##--------------------------------------
class Agente_reactivo_simple:
def __init__(self,env):
self.puntos=0
self.env = env
print("--------------MAPA GENERADO---------------")
self.env.print_enviroment()
self.think()
def up(self):
self.env.init_posX -= 1
return
def down(self):
self.env.init_posX += 1
return
def left(self):
self.env.init_posY -= 1
return
def right(self):
self.env.init_posY += 1
return
def suck(self):
self.env.map[self.env.init_posX][self.env.init_posY] = "-"
self.puntos+=1
return
def idle(self):
return
#def prespective(self,env):
def think(self):
print("cantidad de suciedad",self.env.getAmount())
while(self.env.accept_action(self.puntos)):
if (self.env.if_dirty()):
print("clean")
self.suck()
if(self.env.init_posX == self.env.sizeX -1 and self.env.init_posY == self.env.sizeY - 1 ):## Si termine el juego
print("Limpieza finalizada")
print("Se limpio ",self.puntos," de ",self.env.getAmount() , " de suciedad")
print("Se utilizaron" , self.env.vidas," vidas")
break
if(self.env.init_posX % 2 == 0 and self.env.init_posY != self.env.sizeY - 1 ):# par y extremo derecho
self.env.map[self.env.init_posX][self.env.init_posY] = "*"
print("derecha")
self.right()
elif(self.env.init_posX % 2 == 0 and self.env.init_posY == self.env.sizeY - 1):#par y fondo
self.env.map[self.env.init_posX][self.env.init_posY] = "*"
print("down")
self.down()
elif(self.env.init_posX % 2 != 0 and self.env.init_posY == 0 and self.env.init_posX != self.env.sizeX - 1):#impar y extremo izquierdo
self.env.map[self.env.init_posX][self.env.init_posY] = "*"
print("down")
self.down()
elif(self.env.init_posX %2 != 0 and self.env.init_posX == self.env.sizeX -1 ):
self.env.map[self.env.init_posX][self.env.init_posY] = "*"
print("right")
self.right()
else:
self.env.map[self.env.init_posX][self.env.init_posY] = "*"
print("left")
self.left()
self.env.print_enviroment()
e = Enviroment(128,128,0,0,0.8)
a = Agente_reactivo_simple(e) | true |
18be542956214c8e6e2aedc912fd4f414ed85c6a | Python | oscarsura/sudoku | /test_driver.py | UTF-8 | 1,696 | 2.6875 | 3 | [
"MIT"
] | permissive | import sys, time
import hashlib
from os import system
output_dir = 'stats/'
unop = 'python solution.py'
reop = 'python optimized_solution.py'
rounds = 10
unopt_array = []
optim_array = []
console = False
raw = True
dump = True
def clear():
_ = system('clear')
def init():
for x in range(3):
clear()
print('Starting algorithm testing in ' + str(3-x))
time.sleep(1)
def test(s):
for x in range(rounds):
start = time.time()
_ = system(s)
end = time.time()
diff = end - start
arr = unopt_array
if 'opt' in s:
arr = optim_array
arr.append(diff)
def print_results():
clear()
if not console:
return
if raw:
for x in range(rounds):
sys.stdout.write(str(unopt_array[x]) + '\n')
sys.stdout.write(str(optim_array[x]) + '\n')
return
print('Sudoku Algorithm Test Driver v0.0.1\n')
for x in range(rounds):
sys.stdout.write('unoptimized-time ' + str(x) + ': ' + str(unopt_array[x]) + '\n')
sys.stdout.write(' optimized-time ' + str(x) + ': ' + str(optim_array[x]) + '\n')
sys.stdout.write('algorithmadvantage ' + str(optim_array[x] - unopt_array[x]) + '\n')
sys.stdout.write('----------------------------------\n')
def dump():
if not dump:
return
curr_time = str(time.time())
filename_md5 = hashlib.md5(bytes(curr_time))
filename = output_dir + filename_md5.hexdigest()
out = open(filename, 'w+')
for x in range(rounds):
out.write(str(unopt_array[x]) + '\n')
out.write(str(optim_array[x]) + '\n')
init()
test(unop)
test(reop)
print_results()
dump()
| true |
9db2f833352c070df73fe0e515ef0894ba66e336 | Python | Seventeen-coding/python_study | /byte_of_ptyhon/05module/09from_import.py | UTF-8 | 86 | 2.90625 | 3 | [] | no_license |
#from sys import argv
from math import sqrt
print('Square root of 16 is', sqrt(16)) | true |
fe0acc2c3d4f23d43a36883c6a2c455310a961e0 | Python | petrihei/DH_ennenjanyt | /data_parser.py | UTF-8 | 674 | 2.71875 | 3 | [] | no_license | import re
# data_parser.py parses the articles that deal with digital topics
# Input data is ennenjanyt.net articles 2001-2016 (https://korp.csc.fi/download/lehdet90-00/) from Kielipankki
fileIn = 'teksti.txt'
inputfile = open(fileIn, 'r', encoding="utf-8")
document = inputfile.read()
# Split to separate articles
profilesList = re.split(r'<text title=', document)
word = "digita"
text_file = open("Ennenjanyt_output_data.txt", "w")
# Write output file that includes articles that include string "digita"
for line in profilesList:
line_str = str(line)
line_lower = line_str.lower()
if word in line_lower:
text_file.write(line)
text_file.close() | true |
edd8dc0e522fe6f5e3d7b7014c89163ee08ad3d8 | Python | xzguy/LeetCode | /Problem 1 - 100/P5.py | UTF-8 | 3,294 | 3.578125 | 4 | [] | no_license | def expandPalindrome(s: str, left: int, right: int) -> int:
L, R = left, right
while (L >= 0 and R < len(s) and s[L] == s[R]):
L -= 1
R += 1
return R - L - 1
def longestPalindrome(s: str) -> str:
if (s == None or len(s) < 1):
return ""
start, end = 0, 0
for i in range(len(s)):
len1 = expandPalindrome(s, i, i)
len2 = expandPalindrome(s, i, i+1)
len_max = max(len1, len2)
if len_max > end - start:
start = i - (len_max-1) // 2
end = i + len_max // 2
return s[start: end+1]
s = "abadabca"
print(longestPalindrome(s))
class Solution:
# time complexity O(len(s)^2), center-expand method
def longestPalindrome(self, s: str) -> str:
max_palindrome = ""
for c in range(len(s)):
# case 1, palindrome center is one character
r = 1
while c-r >=0 and c+r <len(s) and s[c-r] == s[c+r]:
r += 1
if 2*(r-1) + 1 > len(max_palindrome):
max_palindrome = s[c-r+1:c+r]
# case 2, palindrome center is two characters
r = 1
while c-r+1 >=0 and c+r <len(s) and s[c-r+1] == s[c+r]:
r += 1
if 2*(r-1) > len(max_palindrome):
max_palindrome = s[c-r+2:c+r]
return max_palindrome
# time complexity O(len(s)), Manacher's method
def longestPalindrome_Manacher(self, s: str) -> str:
bogus_char = '#'
odd_s = bogus_char + bogus_char.join([c for c in s]) + bogus_char
palindrome_radius = [0] * len(odd_s)
center = 0
radius = 0
while center < len(odd_s):
# determine the longest palindrome starting at
# center-radius and going to center + radius
while center - radius - 1 >= 0 and \
center + radius + 1 < len(odd_s) and \
odd_s[center-radius-1] == odd_s[center+radius+1]:
radius += 1
palindrome_radius[center] = radius
old_center = center
old_radius = radius
center += 1
radius = 0
while center <= old_center + old_radius:
mirror_center = 2*old_center - center
max_mirror_radius = old_center + old_radius - center
if palindrome_radius[mirror_center] < max_mirror_radius:
palindrome_radius[center] = palindrome_radius[mirror_center]
center += 1
elif palindrome_radius[mirror_center] > max_mirror_radius:
palindrome_radius[center] = max_mirror_radius
center += 1
else:
# palindrome_radius[mirror_center] == max_mirror_radius
radius = max_mirror_radius
break
max_radius = max(palindrome_radius)
center_idx = palindrome_radius.index(max_radius)
longest_palindrome_str = odd_s[center_idx-max_radius : center_idx+max_radius+1]
longest_palindrome_str = longest_palindrome_str[1:-1:2]
return longest_palindrome_str
s = "cbbd"
s = "abadabca"
sol = Solution()
print(sol.longestPalindrome_Manacher(s)) | true |
8182badcfdbd97968cdbbb95de5b8c5bc893a2ad | Python | israel-dryer/PyJobScraper | /acct_job_scrape/dhg.py | UTF-8 | 2,917 | 2.796875 | 3 | [] | no_license | """
JOB SCRAPER for DHG (Dixon Hughes Goodman)
Created: 2020-11-30
Modified: 2021-03-22
Author: Israel Dryer
2021-03-22 > Site changed; adjusted get request and json parsing logic.
"""
import wsl.webscraper as ws
from wsl.datatools import DataTools, ConnectionString
CONN_STRING = ConnectionString.build()
INSERT_QUERY = "INSERT INTO jobs.RawJobData VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?)"
URL = ("https://jobs.dhg.com/?method=cappservicesPortal.getPortalWidgetListData&_dc=1616424559756" +
"&listId=opportunitylist-5&columnList=id,positiontitletext,url,location&localKeywords=" +
"&categoryID=2&portalModelID=1&page=1&start=1&limit=500")
def parse_json_to_dict(soup):
"""Convert json string to python dictionary"""
try:
text = soup.find('script', {'type': 'application/ld+json'}).contents[0]
except (IndexError, AttributeError):
return
try:
return ws.json.loads(text)
except ws.json.JSONDecodeError:
return
class JobScraper(ws.WebScraper):
"""Web scraper for DHG jobs"""
def __init__(self):
super().__init__(name='DHG')
def extract_card_data(self, card):
pass
def extract_page_urls(self, page):
"""Extract urls from the page for further scraping; return to `urls_to_scrape`"""
json_data = self.get_request(URL, out_format='json')['query']['data']
for record in json_data:
self.urls_to_scrape.add(record['url'])
def extract_page_data(self, page):
"""Extract data from page; return should reflect final form and return to `scraped_data`"""
job_id = page['identifier']
req_id = job_id.split('-')[-1]
title = page['title']
try:
location = page['jobLocation'][0] if isinstance(page['jobLocation'], list) else page['jobLocation']
city = location['address']['addressLocality']
state = location['address']['addressRegion']
except:
city = state = ''
description = ws.BeautifulSoup(page['description'], 'lxml').text
record_id = '185-' + self.today + str(job_id) + str(req_id)
return record_id, self.today, job_id, req_id, self.name, title, "", "", city, state, "", description
def run(self):
"""Run the scraper"""
self.extract_page_urls(None)
for url in self.urls_to_scrape:
soup = self.get_request(url, out_format='soup')
page = parse_json_to_dict(soup)
if page:
record = self.extract_page_data(page)
self.data_scraped.append(record + (url,))
if self.data_scraped:
DataTools.save_to_database(self.data_scraped, CONN_STRING, INSERT_QUERY)
print(f"{self.name} >> {len(self.data_scraped)} records")
if __name__ == '__main__':
print("Starting...")
scraper = JobScraper()
scraper.run()
print("Finished.")
| true |
2103ab1c5cfdddc1c4204bbebf0345827f8be998 | Python | CodeUs-Coders/Back-End | /Back-End/crawling/sports.py | UTF-8 | 1,740 | 2.578125 | 3 | [] | no_license | from selenium import webdriver
from urllib.request import urlopen
from bs4 import BeautifulSoup
#import pandas as pd
import pymysql
driver = webdriver.Chrome('C:/Users/multicampus/Downloads/chromedriver_win32/chromedriver')
driver.implicitly_wait(3)
driver.get('https://sports.news.naver.com/wfootball/schedule/index.nhn?year=2020&month=01&category=epl')
html = driver.page_source
soup = BeautifulSoup(html, 'html.parser')
db=pymysql.connect("localhost","ssafy","ssafy","crawling",charset="utf8")
cursor = db.cursor()
#print(soup)
day=[]
time=[]
left_team=[]
right_team=[]
place=[]
data_result=soup.find('tbody',{'id':'_monthlyScheduleList'}).findAll('tr')
for tr_data in data_result:
if tr_data.find('em')!=None:
day_tmp=tr_data.find('em')
if tr_data.find('td',{'class':'time_place'})==None:
continue
day.append(str(day_tmp).replace('<em>', '').replace('</em>',''))
time.append(str(tr_data.find('span',{'class':'time'})).replace('<span class="time">', '').replace('</span>',''))
place.append(str(tr_data.find('span',{'class':'place'})).replace('<span class="place">', '').replace('</span>',''))
left_team.append(str(tr_data.find('span',{'class':'team_left'}).find('span',{'class':'name'})).replace('<span class="name">', '').replace('</span>',''))
right_team.append(str(tr_data.find('span',{'class':'team_right'}).find('span',{'class':'name'})).replace('<span class="name">', '').replace('</span>',''))
data=[]
for i in range(len(day)):
data.append((day[i],time[i],left_team[i]+" vs "+right_team[i],place[i]))
print(data)
query="""insert into soccer(day,time,content,place) values (%s,%s,%s,%s)"""
cursor.executemany(query, tuple(data))
db.commit() | true |
d8b8a3869f2f523232194a2ff3bd6fe6aaa0daa2 | Python | team-data-science/apache-kafka | /code/producer.py | UTF-8 | 801 | 2.890625 | 3 | [] | no_license |
from kafka import KafkaProducer
# Create Message
msg = 'Hello this is a test message'
# Create a producer
producer = KafkaProducer(bootstrap_servers='localhost:9092')
# Function to send messages into Kafka
def kafka_python_producer_async(producer, msg):
producer.send('mytesttopic', msg).add_callback(success).add_errback(error)
producer.flush()
def success(metadata):
print(metadata.topic)
def error(exception):
print(exception)
print("start producing")
# Produce the message, but serialize into bytes
kafka_python_producer_async(producer,bytes(msg, 'utf-8'))
print("done")
'''
def kafka_python_producer_sync(producer, size):
for _ in range(size):
future = producer.send('topic', msg)
result = future.get(timeout=60)
producer.flush()
''' | true |
76431a7ee6e0dd5587a0b8750e083c1a0aabdbf6 | Python | prosis369/Data-Structures | /subtract_diff.py | UTF-8 | 607 | 3.75 | 4 | [] | no_license | '''
1281. Subtract the Product and Sum of Digits of an Integer
Given an integer number n, return the difference between the product of its digits and the sum of its digits.
Example 1:
Input: n = 234
Output: 15
Explanation:
Product of digits = 2 * 3 * 4 = 24
Sum of digits = 2 + 3 + 4 = 9
Result = 24 - 9 = 15
'''
class Solution:
def subtractProductAndSum(self, n: int) -> int:
l = list(str(n))
if n>10:
s = reduce(lambda x,y: int(x)+int(y), l)
p = reduce(lambda x,y: int(x)*int(y), l)
return p-s
else:
return 0 | true |
440fda362aa0cd87d6d110686e50687fd1a8d2c9 | Python | alee86/Informatorio | /Practic/Estructuras de control/Complementarios/Complementarios5.py | UTF-8 | 893 | 4.78125 | 5 | [] | no_license | """
Diseรฑar un programa que lea las longitudes de los tres lados de un triรกngulo (L1,L2,L3) y determine quรฉ tipo
de triรกngulo es, de acuerdo a los siguientes casos.
Suponiendo que A determina el mayor de los tres lados y B y C corresponden con los otros dos, entonces:
Si A>=B + C No se trata de un triรกngulo
Si A2 = B2 + C2 Es un triรกngulo rectรกngulo
Si A2 > B2 + C2 Es un triรกngulo obtusรกngulo
Si A2 < B2 + C2 Es un triรกngulo acutรกngulo
"""
A = int(input("Ingrese el valor del primer (el mayor) lado: "))
B = int(input("Ingrese el valor del segundo lado: "))
C = int(input("Ingrese el valor del tercero lado: "))
if A>= (B + C):
print("No se trata de un triรกngulo")
elif A**2 == (B**2 + C**2):
print ("Es un triรกngulo rectรกngulo")
elif A**2 > (B**2 + C**2):
print("Es un triรกngulo obtusรกngulo")
elif A**2 < (B**2 + C**2):
print("Es un triรกngulo acutรกngulo") | true |
251922e509ad129bd8893565584ebf2d5cd9715c | Python | haydenshively/Fluid-Simulation | /main.py | UTF-8 | 3,484 | 2.859375 | 3 | [] | no_license | """
https://thecodingtrain.com/CodingChallenges/132-fluid-simulation.html
http://www.dgp.toronto.edu/people/stam/reality/Research/pdf/GDC03.pdf
https://mikeash.com/pyblog/fluid-simulation-for-dummies.html
"""
import numpy as np
import cv2
from random import random
from fluid import Fluid
ix = 0
iy = 0
def grayscale():
fluid = Fluid([128, 128], 0.002, diff = 0.0, visc = 0.0)
# fluid = Fluid(128, 0.002, diff = 0.0001, visc = 0.000005)
cx = fluid.size[1]//2
cy = fluid.size[0]//2
q1x = cx//2
q3x = cx + q1x
w = 4
cv2.namedWindow('dye')
def add_dye(event, x, y, flags, param):
global ix, iy
if event == cv2.EVENT_MOUSEMOVE:
x = x//4
y = y//4
c = 5
fluid.v[y - w:y + w, x - w:x + w] = [c*(y - iy), c*(x - ix)]
ix = x
iy = y
cv2.setMouseCallback('dye', add_dye)
# t = 0
fluid.d[:,:] = 127
while True:
# fluid.d[cx - w:cx + w, cx - w:cx + w] = 200 + 55*random()
# fluid.v[cx - w:cx + w, cx - w:cx + w] = np.sin(np.array([t, t + 3.14/2]))*20.0
# fluid.d[cx - w:cx + w, q1x - w:q1x + w] = 200 + 55*random()
# fluid.v[cx - w:cx + w, q1x - w:q1x + w] = np.sin(np.array([t, t + 3.14/2]))*20.0
# fluid.d[cx - w:cx + w, q3x - w:q3x + w] = 200 + 55*random()
# fluid.v[cx - w:cx + w, q3x - w:q3x + w] = np.sin(np.array([-t, -t - 3.14/2]))*20.0
fluid.d[cy - w:cy + w, q1x - w:q1x + w] = 200 + 55*random()
fluid.v[cy - w:cy + w, q1x - w:q1x + w] = [0, 15*random()]
fluid.d[cy - w:cy + w, q3x - w:q3x + w] = 55*random()
fluid.v[cy - w:cy + w, q3x - w:q3x + w] = [0, -15*random()]
fluid.step()
# fluid.d = np.clip(fluid.d - 1.0, 0, 255)
# t += random()*0.03
cv2.imshow('dye', cv2.pyrUp(cv2.pyrUp(fluid.d.astype('uint8'))))
ch = cv2.waitKey(1)
if ch == 27: break
cv2.destroyAllWindows()
def color():
fluid_r = Fluid([128, 128], 0.002, diff = 0.0, visc = 0.0)
fluid_g = Fluid([128, 128], 0.002, diff = 0.0, visc = 0.0)
fluid_b = Fluid([128, 128], 0.002, diff = 0.0, visc = 0.0)
cx = fluid_r.size[1]//2
cy = fluid_r.size[0]//2
q1x = cx//2
q3x = cx + q1x
w = 4
while True:
velocity_right = -15*random()
velocity_left = 15*random()
fluid_r.d[cy - w:cy + w, q1x - w:q1x + w] = 200 + 55*random()
fluid_r.d = np.clip(fluid_r.d - 0.5, 0, 255)
fluid_r.v[cy - w:cy + w, q1x - w:q1x + w] = [0, velocity_left]
fluid_r.v[cy - w:cy + w, q3x - w:q3x + w] = [0, velocity_right]
fluid_b.d[cy - w:cy + w, q3x - w:q3x + w] = 200 + 55*random()
fluid_b.d = np.clip(fluid_b.d - 0.5, 0, 255)
fluid_b.v[cy - w:cy + w, q1x - w:q1x + w] = [0, velocity_left]
fluid_b.v[cy - w:cy + w, q3x - w:q3x + w] = [0, velocity_right]
fluid_g.d[cy - w:cy + w, cx - w:cx + w] = 200 + 55*random()
fluid_g.d = np.clip(fluid_g.d - 0.5, 0, 255)
fluid_g.v[cy - w:cy + w, q1x - w:q1x + w] = [0, velocity_left]
fluid_g.v[cy - w:cy + w, q3x - w:q3x + w] = [0, velocity_right]
fluid_r.step()
fluid_g.step()
fluid_b.step()
full_color = np.dstack((fluid_b.d, fluid_g.d, fluid_r.d))
cv2.imshow('dye', cv2.pyrUp(cv2.pyrUp(full_color.astype('uint8'))))
ch = cv2.waitKey(1)
if ch == 27: break
cv2.destroyAllWindows()
if __name__ == '__main__':
grayscale()
| true |
058aa7b7bb8a3fbe8bbdc1e8dc509e4323e24de3 | Python | Satvik782/OpenCV | /transformations.py | UTF-8 | 1,077 | 3.265625 | 3 | [] | no_license | import cv2 as cv
import numpy as np
img=cv.imread('Photos/podium.jpeg')
cv.imshow('Podium',img)
#Translation (Move image)
def translate(img,x,y):
transMat=np.float32([[1,0,x],[0,1,y]])
dimensions=(img.shape[1],img.shape[0])
return cv.warpAffine(img,transMat,dimensions)
#-x->Left +x->Right -y->Up +y->Down
translated=translate(img,-100,-100)
# cv.imshow('Translated',translated)
#Rotation
def rotate(img, angle, rotPoint=None):
(height,width)=img.shape[:2]
if rotPoint==None:
rotPoint=(width//2,height//2)
rotMat= cv.getRotationMatrix2D(rotPoint, angle,1.0)
dimensions=(width,height)
return cv.warpAffine(img, rotMat, dimensions)
rotated=rotate(img,-30)
# cv.imshow('Rotated',rotated)
#Resizing
resized= cv.resize(img, (500,500), interpolation=cv.INTER_CUBIC)
# cv.imshow('Resized', resized)
#Flipping
flip=cv.flip(img, 1) #Considering img in 1st quadrant 0 -> x transform , 1 -> y transform, -1 -> xy transform
cv.imshow('Flipped', flip)
#Cropping
cropped=img[200:300,400:500]
cv.imshow('Cropped',cropped)
cv.waitKey(0)
| true |
0f17371a15ba58a1e01fa11dca85a751a40d7248 | Python | SoumaiaBK/intelligent-multiAgent-system-based-on-machine-learning-algoritms--covid19 | /Api_Flask/Scarping_nouvelles.py | UTF-8 | 887 | 2.875 | 3 | [] | no_license | from bs4 import BeautifulSoup
import requests
import pandas as pd
def Scarping_nouvelles(numOfPages):
formats='html.parser'
tags='h3'
news_dict=[]
numOfPages=int(numOfPages)
numOfPages=numOfPages+1
for i in range(1, numOfPages):
url="https://www.moroccoworldnews.com/news-2/page/{}".format(i)
response = requests.get(url)
soup = BeautifulSoup(response.content,formats)
for div in soup.findAll("div", {'class':'td-ss-main-sidebar'}):
div.decompose()
for div1 in soup.findAll("div", {'class':'td-subcategory-header'}):
div1.decompose()
for head in soup.find_all(tags, {'class':'entry-title td-module-title'}):
titre=head.find('a').get('title')
news_dict.append({'Titre': titre})
news_df=pd.DataFrame(news_dict)
news_df.to_csv("NewsScrapped.csv" ,index=False, encoding='utf8')
| true |
918fa0210bf8acf7f891f06c9272bc367a171407 | Python | chungheng/neural | /neural/network/network.py | UTF-8 | 16,382 | 2.53125 | 3 | [] | no_license | """
Network module for constructing an abitrary circuit of neurons and synapses.
Examples:
>>> nn = Network()
>>> iaf = nn.add(IAF, bias=0., c=10., record=['v'])
>>> syn = nn.add(AlphaSynapse, ar=1e-1, stimulus=iaf.spike)
>>> hhn = nn.add(HodgkinHuxley, stimulus=syn.I)
>>> nn.input(s=iaf.stimulus)
>>>
>>> nn.compile(dtype=dtype)
>>>
>>> nn.run(dt, s=numpy.random.rand(10000))
"""
import sys
from collections import OrderedDict
from functools import reduce
from numbers import Number
from inspect import isclass
import numpy as np
import pycuda.gpuarray as garray
from tqdm.auto import tqdm
from ..basemodel import Model
from ..future import SimpleNamespace
from ..recorder import Recorder
from ..codegen.symbolic import SympyGenerator
from ..utils import MINIMUM_PNG
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY2:
raise Exception("neural.network does not support Python 2.")
class Symbol(object):
def __init__(self, container, key):
self.container = container
self.key = key
def __getitem__(self, given):
attr = getattr(self.container.recorder, self.key)
return attr.__getitem__(given)
class Input(object):
def __init__(self, num=None, name=None):
self.num = num
self.name = name
self.data = None
self.steps = 0
self.iter = None
self.latex_src = "External stimulus"
self.graph_src = MINIMUM_PNG
def __call__(self, data):
self.data = data
self.steps = len(data) if hasattr(data, "__len__") else 0
self.iter = iter(self.data)
if hasattr(data, "__iter__"):
self.iter = iter(self.data)
elif isinstance(data, garray.GPUArray):
self.iter = (x for x in self.data)
else:
raise TypeError()
return self
def __next__(self):
return next(self.iter)
def reset(self):
self.iter = iter(self.data)
class Container(object):
"""
A wrapper holds an Model instance with symbolic reference to its varaibles.
Examples:
>>> hhn = Container(HodgkinHuxley)
>>> hhn.v # reference to hhn.states['v']
"""
def __init__(self, obj, num, name=None):
self.obj = obj
self.num = num
self.name = name or ""
self.vars = {}
self.inputs = dict()
self.recorder = None
self.latex_src = self._get_latex()
self.graph_src = self._get_graph()
self._rec = []
def __call__(self, **kwargs):
for key, val in kwargs.items():
if isinstance(self.obj, Model):
if key in self.obj.Variables:
setattr(self.obj, key, val)
elif key in self.obj.Inputs:
assert isinstance(val, (Symbol, Number, Input))
self.inputs[key] = val
else:
raise KeyError("Unexpected variable '{}'".format(key))
else:
assert isinstance(val, (Symbol, Number, Input))
self.inputs[key] = val
return self
def __getattr__(self, key):
if key in self.vars:
return self.vars[key]
try:
_ = getattr(self.obj, key)
self.vars[key] = Symbol(self, key)
return self.vars[key]
except Exception as e:
return super(Container, self).__getattribute__(key)
def record(self, *args):
for arg in args:
_ = getattr(self.obj, arg)
if arg not in self._rec:
self._rec.append(arg)
def set_recorder(self, steps, rate=1):
if not self._rec:
self.recorder = None
elif (
(self.recorder is None)
or (self.recorder.total_steps != steps)
or (set(self.recorder.dct.keys()) != set(self._rec))
):
self.recorder = Recorder(
self.obj, self._rec, steps, gpu_buffer=500, rate=rate
)
return self.recorder
def _get_latex(self):
latex_src = "{}:<br><br>".format(self.obj.__class__.__name__)
if isinstance(self.obj, Model):
sg = SympyGenerator(self.obj)
latex_src += sg.latex_src
vars = ["\({}\)".format(x) for x in sg.signature]
latex_src += "<br>Input: " + ", ".join(vars)
vars = []
for _k, _v in sg.variables.items():
if (_v.type == "state" or _v.type == "intermediate") and (
_v.integral == None
):
vars.append("\({}\)".format(_k))
latex_src += "<br>Variables: " + ", ".join(vars)
return latex_src
def _get_graph(self):
if isinstance(self.obj, Model):
return self.obj.to_graph()
return MINIMUM_PNG
@classmethod
def isacceptable(cls, obj):
return hasattr(obj, "update") and callable(obj.update)
class Network(object):
""""""
def __init__(self, solver="euler", backend="cuda"):
self.containers = OrderedDict()
self.inputs = OrderedDict()
self.solver = solver
self.backend = backend
self._iscompiled = False
def input(self, num=None, name=None):
name = name or "input{}".format(len(self.inputs))
input = Input(num=num, name=name)
self.inputs[name] = input
self._iscompiled = False
return input
def add(self, module, num=None, name=None, record=None, **kwargs):
backend = kwargs.pop("backend", self.backend)
solver = kwargs.pop("solver", self.solver)
num = num
name = name or "obj{}".format(len(self.containers))
record = record or []
if isinstance(module, Model):
obj = module
elif issubclass(module, Model):
obj = module(solver=solver, **kwargs)
elif isclass(module):
assert Container.isacceptable(module)
kwargs["size"] = num
obj = module(**kwargs, backend=backend)
else:
msg = "{} is not a submodule nor an instance of {}"
raise ValueError(msg.format(module, Model))
container = Container(obj, num, name)
if record is not None:
if isinstance(record, (tuple, list)):
container.record(*record)
else:
container.record(record)
self.containers[name] = container
self._iscompiled = False
return container
def run(self, dt, steps=0, rate=1, verbose=False, **kwargs):
solver = kwargs.pop("solver", self.solver)
if not self._iscompiled:
raise Exception("Please compile before running the network.")
# calculate number of steps
steps = reduce(max, [input.steps for input in self.inputs.values()], steps)
# reset recorders
recorders = []
for c in self.containers.values():
recorder = c.set_recorder(steps, rate)
if recorder is not None:
recorders.append(recorder)
# reset Modle variables
for c in self.containers.values():
if isinstance(c.obj, Model):
c.obj.reset()
# reset inputs
for input in self.inputs.values():
input.reset()
iterator = range(steps)
if verbose:
iterator = tqdm(iterator)
for i in iterator:
for c in self.containers.values():
args = {}
for key, val in c.inputs.items():
if isinstance(val, Symbol):
args[key] = getattr(val.container.obj, val.key)
elif isinstance(val, Input):
args[key] = next(val)
elif isinstance(val, Number):
args[key] = val
else:
raise Exception()
if isinstance(c.obj, Model):
c.obj.update(dt, **args)
else:
c.obj.update(**args)
for recorder in recorders:
recorder.update(i)
def compile(self, dtype=None, debug=False, backend="cuda"):
dtype = dtype or np.float64
for c in self.containers.values():
dct = {}
for key, val in c.inputs.items():
if isinstance(val, Symbol):
if val.container.num is not None:
# if c.num is not None and val.container.num != c.num:
# raise Error("Size mismatches: {} {}".format(
# c.name, val.container.name))
dct[key] = np.zeros(val.container.num)
else:
dct[key] = dtype(0.0)
elif isinstance(val, Input):
if val.num is not None:
if c.num is not None and val.num != c.num:
raise Exception(
"Size mismatches: {} {}".format(c.name, val.name)
)
dct[key] = np.zeros(val.num)
else:
dct[key] = dtype(0.0)
elif isinstance(val, Number):
dct[key] = dtype(val)
else:
raise Exception()
if hasattr(c.obj, "compile"):
if isinstance(c.obj, Model):
c.obj.compile(backend=backend, dtype=dtype, num=c.num, **dct)
else:
c.obj.compile(**dct)
if debug:
s = "".join([", {}={}".format(*k) for k in dct.items()])
print(
"{}.cuda_compile(dtype=dtype, num={}{})".format(
c.name, c.num, s
)
)
self._iscompiled = True
def record(self, *args):
for arg in args:
assert isinstance(arg, Symbol)
arg.container.record(arg.key)
def to_graph(self, png=False, svg=False):
import pydot
graph = pydot.Dot(
graph_type="digraph", rankdir="LR", splines="ortho", decorate=True
)
nodes = {}
for c in list(self.containers.values()) + list(self.inputs.values()):
node = pydot.Node(c.name, shape="rect")
nodes[c.name] = node
graph.add_node(node)
edges = []
for c in self.containers.values():
target = c.name
v = nodes[target]
for key, val in c.inputs.items():
if isinstance(val, Symbol):
source = val.container.name
label = val.key
elif isinstance(val, Input):
source = val.name
label = ""
else:
raise Exception()
u = nodes[source]
graph.add_edge(pydot.Edge(u, v, label=label))
edges.append((source, target, label))
if png:
png_str = graph.create_png(prog="dot")
return png_str
else:
D_bytes = graph.create_dot(prog="dot")
D = str(D_bytes, encoding="utf-8")
if D == "": # no data returned
print("Graphviz layout with %s failed" % (prog))
print()
print("To debug what happened try:")
print("P = nx.nx_pydot.to_pydot(G)")
print('P.write_dot("file.dot")')
print("And then run %s on file.dot" % (prog))
# List of "pydot.Dot" instances deserialized from this string.
Q_list = pydot.graph_from_dot_data(D)
assert len(Q_list) == 1
Q = Q_list[0]
# return Q
def get_node(Q, n):
node = Q.get_node(n)
if isinstance(node, list) and len(node) == 0:
node = Q.get_node('"{}"'.format(n))
assert node
return node[0]
def get_label_xy(x, y, ex, ey):
min_dist = np.inf
min_ex, min_ey = [0, 0], [0, 0]
for _ex, _ey in zip(zip(ex, ex[1:]), zip(ey, ey[1:])):
dist = (np.mean(_ex) - x) ** 2 + (np.mean(_ey) - y) ** 2
if dist < min_dist:
min_dist = dist
min_ex[:] = _ex[:]
min_ey[:] = _ey[:]
if min_ex[0] == min_ex[1]:
_x = min_ex[0]
_x = np.sign(x - _x) * 10 + _x
_y = y
else:
_x = x
_y = min_ey[0]
_y = np.sign(y - _y) * 10 + _y
return _x, _y - 3
elements = []
bb = Q.get_bb()
viewbox = bb[1:-1].replace(",", " ")
for n in nodes.keys():
node = get_node(Q, n)
# strip leading and trailing double quotes
pos = node.get_pos()[1:-1]
if pos is not None:
obj = self.get_obj(n)
w = float(node.get_width())
h = float(node.get_height())
x, y = map(float, pos.split(","))
attrs = {
"width": w,
"height": h,
"rx": 5,
"ry": 5,
"x": x,
"y": y,
"stroke-width": 1.5,
"fill": "none",
"stroke": "#48caf9",
}
elements.append(
{
"label": [n, x, y],
"shape": "rect",
"attrs": attrs,
"latex": obj.latex_src,
"graph": obj.graph_src,
}
)
min_x, min_y, scale_w, scale_h = np.inf, np.inf, 0, 0
for el in elements:
if min_x > el["attrs"]["x"]:
min_x = el["attrs"]["x"]
scale_w = 2 * min_x / el["attrs"]["width"]
if min_y > el["attrs"]["y"]:
min_y = el["attrs"]["y"]
scale_h = 2 * min_y / el["attrs"]["height"]
for el in elements:
w = scale_w * el["attrs"]["width"]
h = scale_h * el["attrs"]["height"]
el["attrs"]["x"] = el["attrs"]["x"] - w / 2
el["attrs"]["y"] = el["attrs"]["y"] - h / 2
el["attrs"]["width"] = w
el["attrs"]["height"] = h
for e in Q.get_edge_list():
pos = (e.get_pos()[1:-1]).split(" ")
ax, ay = [float(v) for v in pos[0].split(",")[1:]]
pos = [v.split(",") for v in pos[1:]]
xx = [float(v[0]) for v in pos] + [ax]
yy = [float(v[1]) for v in pos] + [ay]
x, y, _x, _y = [], [], 0, 0
for __x, __y in zip(xx, yy):
if not (__x == _x and __y == _y):
x.append(__x)
y.append(__y)
_x = __x
_y = __y
path = ["{} {}".format(_x, _y) for _x, _y in zip(x, y)]
p = "M" + " L".join(path)
attrs = {"d": p, "stroke-width": 1.5, "fill": "none", "stroke": "black"}
lp = e.get_lp()
if lp:
lx, ly = [float(v) for v in lp[1:-1].split(",")]
lx, ly = get_label_xy(lx, ly, x, y)
label = [e.get_label() or "", lx, ly]
elements.append({"label": label, "shape": "path", "attrs": attrs})
output = {"elements": elements, "viewbox": viewbox}
return output
def get_obj(self, name):
if name in self.containers:
return self.containers[name]
elif name in self.inputs:
return self.inputs[name]
else:
raise TypeError("Unexpected name: '{}'".format(name))
| true |
fb810d1818bac2065a00a1d0ed25133af40c6579 | Python | whut-leslie/PyLearning | /03_ๅพช็ฏ/Ex_07_่ฝฌไนๅญ็ฌฆ.py | UTF-8 | 298 | 4 | 4 | [] | no_license | # \t ๅจๆงๅถๅฐ่พๅบไธไธชๅถ่กจ็ฌฆ๏ผๅๅฉๅจ่พๅบๆๆฌๆถ๏ผๅ็ดๆนๅ ไฟๆๅฏน้ฝ
print("1\t2\t3\t")
print("10\t20\t30\t")
# \n ๅจๆงๅถๅฐ่พๅบไธไธช ๆข่ก็ฌฆ
print("hello\npython")
# \" ่พๅบๅๅผๅท
# print("hello"hello") ๆณ่พๅบhello"hello ไผๅบ้
print("hello\"hello")
| true |
6d1808ecc5758b31ddced75ac445bbc80246dd91 | Python | ThayaFluss/candle | /tests/io/test_matplotlib.py | UTF-8 | 519 | 2.53125 | 3 | [
"MIT"
] | permissive | import unittest
from candle.io.matplotlib import *
from candle.io.util import touch
class TestMatplotlib(unittest.TestCase):
def test_plotter(self):
filename = "log/temp/test_matplotlib_plotter.log"
figfile = "plot/temp/test_matplotlib_plotter.png"
touch(filename)
touch(figfile)
with open(filename, "w") as f:
f.write("0\n")
f.write("1\n")
f.write("2\n")
plotter(filename, figfile, xlabel="this is x", ylabel="this is y")
| true |
2995f2e83c4f9ad64891ff14b8b6935b434359a4 | Python | dmitryg-ops/phyton | /lesson01/1_hw.py | UTF-8 | 1,068 | 4.09375 | 4 | [] | no_license | """
1. ะะพัะฐะฑะพัะฐะนัะต ั ะฟะตัะตะผะตะฝะฝัะผะธ, ัะพะทะดะฐะนัะต ะฝะตัะบะพะปัะบะพ, ะฒัะฒะตะดะธัะต ะฝะฐ ัะบัะฐะฝ,
ะทะฐะฟัะพัะธัะต ั ะฟะพะปัะทะพะฒะฐัะตะปั ะฝะตัะบะพะปัะบะพ ัะธัะตะป ะธ ัััะพะบ ะธ ัะพั
ัะฐะฝะธัะต ะฒ ะฟะตัะตะผะตะฝะฝัะต, ะฒัะฒะตะดะธัะต ะฝะฐ ัะบัะฐะฝ.
"""
pervaja = 1 # ัะพะทะดะฐะนัะต ะฝะตัะบะพะปัะบะพ ะฟะตัะตะผะตะฝะฝัั
vtoraja = 2
tretja = 3
print(pervaja, vtoraja, tretja) # ะฒัะฒะตะดะธัะต ะฝะฐ ัะบัะฐะฝ
year = input('ะะฒะตะดะธัะต ะณะพะด ัะพะถะดะตะฝะธั: ')
month = input('ะะฒะตะดะธัะต ะผะตััั ัะพะถะดะตะฝะธั: ') # ะทะฐะฟัะพัะธัะต ั ะฟะพะปัะทะพะฒะฐัะตะปั ะฝะตัะบะพะปัะบะพ ัะธัะตะป ะธ ัััะพะบ ะธ ัะพั
ัะฐะฝะธัะต ะฒ ะฟะตัะตะผะตะฝะฝัะต
day = input('ะะฒะตะดะธัะต ะดะตะฝั ัะพะถะดะตะฝะธั: ')
name = input('ะะฒะตะดะธัะต ะธะผั: ')
surname = input('ะะฒะตะดะธัะต ะคะฐะผะธะปะธั: ')
print(f'{year}- ะณะพะดะฐ, {month} {day} {name} {surname} - ัั ัะตััะพะฒัะบะธ ััะฐััะปะธะฒัะน!!') # ะฒัะฒะตะดะธัะต ะฝะฐ ัะบัะฐะฝ.
| true |
5a98b0d00143fc8dd82b3de00835fa3af44b7705 | Python | xuwangliao/AlgorithmQIUZHAO | /Week_06/242. ๆๆ็ๅญๆฏๅผไฝ่ฏ.py | UTF-8 | 495 | 2.875 | 3 | [] | no_license | class Solution:
def isAnagram(self, s: str, t: str) -> bool:
if len(s) != len(t):
return False
dic = {}
for c in s:
if c not in dic:
dic[c] = 1
else:
dic[c] += 1
for c in t:
if c not in dic:
return False
else:
if dic[c] == 0:
return False
else:
dic[c] -= 1
return True | true |
501c9df18c6576660720b00bff59447ac119169a | Python | vt-dataengineer/leetcode | /leetcode_problems/file6/team.py | UTF-8 | 479 | 3.078125 | 3 | [] | no_license | # 3
# 4 3
# 3 1 9 100
# 6 2
# 5 5 1 2 3 4
# 5 5
# 7 7 1 7 7
n = input()
tc = input()
skill = input()
length = len(skill.split())
# if length > int(tc.split()[0]):
# print("Error in values")
# else:
print(n)
print(tc)
print(skill)
total_player = tc.split()[0]
player_to_choose = tc.split()[1]
print('Total: '+ total_player)
print('Player to choose : '+ player_to_choose)
ss = sorted(skill.split())
# print(ss)
ll = []
for x in ss:
ll.append(int(x))
print(sorted(ll))
| true |
af9015b88407c2b518831fcd9b3e3005d94efc62 | Python | shadimsaleh/pink-python | /scripts/functions4.py | UTF-8 | 275 | 3.296875 | 3 | [] | no_license | x=1
y=2
def xx():
global x
x=3 # x become global
y=4 # y still local
print('local x = %s'%x)
print('local y = %s'%y)
print('global x = %s'%x)
print('global y = %s'%y)
xx()
print('again global x = %s'%x)
print('again global y = %s'%y)
| true |
86170dfb26071f51f22733122af8dd879b0937cc | Python | coderlubo/python_base | /02_้ขๅๅฏน่ฑกๅบ็ก/10_ๅผๅธธ/ๅผๅธธๅค็.py | UTF-8 | 788 | 4.09375 | 4 | [] | no_license | # ๆ็ฆๆณข
# 2021/1/28 14:49
# ๆ็คบ็จๆท่พๅ
ฅไธไธชๆดๆฐ
# ไฝฟ็จ 8 ้คไปฅ็จๆท่พๅ
ฅ็ๆดๆฐๅนถไธ่พๅบ
try:
num = int(input("่ฏท่พๅ
ฅไธไธชๆดๆฐ:"))
print("8 / %d = %.2f" % (num, 8 / num))
# ๅฏน้คๆฐไธบ 0 ่ฟ่กๅค็
except (ZeroDivisionError, ValueError):
print("่ฏทไธ่ฆ่พๅ
ฅ 0")
# ๅฏน่พๅ
ฅ้ๆดๆฐ่ฟ่กๅค็
except ValueError:
print("่ฏท่พๅ
ฅๆญฃ็กฎ็ๆดๆฐ")
# ๅฏนๆช็ฅ้่ฏฏ่ฟ่กๅค็
except Exception as result:
print("ๆช็ฅ้่ฏฏ %s" % result)
# ๆๅฐ่ฏๆง่ก็ไปฃ็ ๆฒกๆๅบ็ฐๅผๅธธๆไผๆง่ก็่ฏญๆณ
else:
print("ๆๆฏ else,ๆๆง่ก่ฏดๆไฝ ๆๅฐ่ฏๆง่ก็ไปฃ็ ๆฒกๆๅบ็ฐๅผๅธธ")
# ๆ ่ฎบๆฏๅฆๅบ็ฐๅผๅธธ้ฝไผๆง่ก็ไปฃ็
finally:
print("ๆๆฏ finally,ๆ ่ฎบๆฏๅฆๅบ็ฐๅผๅธธๆ้ฝไผๆง่ก")
| true |
3a4abab1fe5d1b6b7ba7a5d72556aa6b44ae4801 | Python | CodeR57/DeepSentiment | /SVMSentimentAnalysis.py | UTF-8 | 3,706 | 2.578125 | 3 | [] | no_license | import cPickle,os
import preProcessor as pp
#from Preprocessor import transformData
import numpy
import re
from keras.models import Sequential
from keras.layers.core import Dense, Activation,Dropout,TimeDistributedDense
from keras.layers.recurrent import LSTM
from keras.layers.embeddings import Embedding
from sklearn import svm
from sklearn import cross_validation
from sklearn.multiclass import OneVsRestClassifier
from sklearn.externals import joblib
class SVMSentiment:
def __init__(self):
self.max_length = 500
self.batch_size=50
self.model = OneVsRestClassifier(svm.SVC(kernel='rbf',gamma=1,C = 1,tol=0.0001,cache_size=5000) )
def configureSVMModel(self,TrainX,TrainY,validX,validY):
print('Configuring the SVM Model')
currPath = os.getcwd()
currFiles = os.listdir(currPath)
print('################### Test #####################')
print(currFiles.count('SVMScores.pkl'))
if(currFiles.count('SVMScores.pkl')==0):
self.model.fit(TrainX, TrainY)
# Saving model scores
joblib.dump(self.model,currPath+'/SVMScores.pkl')
else:
print('Loading already existing Model')
self.model = joblib.load(currPath+'/SVMScores.pkl')
def evaluateSVMModel(self,TestX,TestY):
print self.model.score(TestX, TestY)
predicted_data=[]
for i in range(len(TestX)):
predicted_data.append(list([self.model.predict (TestX[i].reshape(1,-1)) ,TestY[i]]) )
print "Predicted Data"
print predicted_data
#print TestY
def predictSentiment(self,dataX,dataY):
print('@@@@@@@@@@@@@@@@ Length of test data : ',len(dataX))
for i in range(len(dataX)):
predicted_data = self.model.predict(dataX[i].reshape(1,-1))
expected_out = dataY[i]
print('############### Predicted data :',predicted_data,' ; ; ',expected_out)
return predicted_data
def getTrainTestData(self):
print('Loading Training and Test data')
trainX=[]
trainY=[]
testX=[]
testY = []
f= open('trainingdata.pkl','rb')
(trainX,trainY) = cPickle.load(f)
f= open('testingdata.pkl','rb')
(testX,testY) = cPickle.load(f)
return ((trainX,trainY),(testX,testY))
def getValidationData(self,dataX,dataY):
return dataX[0:self.batch_size,:],dataY[0:self.batch_size,:]
# arg rpresents the path of the filename which has the text converted from the speech signal.
def main(arg):
print('Initializing the LSTM Model')
cwd = os.getcwd()
svm = SVMSentiment()
print('Retrieving the Training and Test Data')
path = os.getcwd()
((TrainX,TrainY),(TestX,TestY)) = svm.getTrainTestData()
print('Getting the Validation Data')
validX, validY = svm.getValidationData(TrainX,TrainY)
print('Configuring the SVM Model')
svm.configureSVMModel(TrainX,TrainY,validX,validY)
#print('Evaluating the Model')
svm.evaluateSVMModel(TestX,TestY)
if arg=='':
return
else:
emotfile = open(arg,"rb");
dataX = []
dataY =[]
dataX.append(emotfile.read())
dataY.append('0') #Random output so as to call the pp.transformData function. This is not to be used anywhere
worddict = cPickle.load(open(cwd+'/dictionary.pkl','rb'))
(DataX,DataY) = pp.transformData(dataX,dataY,worddict)
prediction = svm.predictSentiment(DataX,DataY)
return prediction
if __name__ =='__main__':
#arg ='/home/smeera380/spark-1.6.0/SpeechProject/SpeechSentimentAnalysis/aclImdb/emotion.txt'
arg = "/home/vyassu/Downloads/Telegram Desktop/aclImdb/test/neg1/0_2.txt"
main(arg)
| true |
5626adcf97369df7c993b665827400dcb60cc249 | Python | ssaporito/kmst | /kmst_main.py | UTF-8 | 2,438 | 2.65625 | 3 | [] | no_license | from kmst_guess import *
from kmst_branchbound import *
import time
import random
def test_kmst(n_nodes,k,tries,draw):
solutions=[]
solution_weight=float("inf")
G=nx.Graph()
#e=[(0,1,3.0),(1,2,1.0),(0,2,2.0),(0,3,1.0),(3,1,0.5),(1,4,5.0),(2,4,3.0)]
#e=[(0,1,1),(0,2,1),(0,3,2),(1,2,10),(2,3,1),(3,1,10),(2,4,1),(3,5,1),(6,2,2),(3,7,1)]
#e=[(0,1,1),(0,2,1),(1,3,10),(2,3,10),(3,4,2),(3,5,2),(4,6,3),(5,6,2)]
#G.add_weighted_edges_from(e)
G=nx.complete_graph(n_nodes)
i=1
for e in G.edges():
# G[e[0]][e[1]]['weight']=i
G[e[0]][e[1]]['weight']=random.uniform(1,100)
i+=1
n=G.number_of_nodes()
m=G.number_of_edges()
#tries=10
print("n="+str(n)+",k="+str(k)+",m="+str(m))
T=[]
V=dict([])
E=G.edges()
start=time.time()
for i in range(0,tries):
#pass
#global solutions
solutions.clear()
global P_memo
P_memo.clear()
kmst_guess(G,k,solutions)
end=time.time()
print("Heuristics")
print_results(solutions,start,end,tries,G)
val_guess=minimum_solution(get_weighted_solutions(G,solutions))[1]
T=[]
V=dict([])
E=G.edges()
start=time.time()
for i in range(0,tries):
solutions.clear()
kmst_branchbound(G,k,T,V,E,solutions,[solution_weight])
end=time.time()
print("Branch and Bound")
print_results(solutions,start,end,tries,G)
val_branch=minimum_solution(get_weighted_solutions(G,solutions))[1]
print("Diff: "+str((val_guess-val_branch)/(val_branch)))
#print_solutions(G,get_weighted_solutions(G,solutions))
try:
#print(solutions)
min_solution=minimum_solution(get_weighted_solutions(G,solutions))
#print(min_solution)
#print(solution_weight)
if draw:
selected=min_solution[0]
pos=nx.spring_layout(G)
nx.draw_networkx_nodes(G,pos)
nx.draw_networkx_edges(G,pos,edge_color='b')
nx.draw_networkx_edges(G,pos,edgelist=selected,edge_color='r')
nx.draw_networkx_labels(G,pos)
#nx.draw_networkx_edge_labels(G,pos)
plt.show()
#print(sorted(solutions,key=lambda el:el[1])) #print asc sorted solutions
except(TypeError,IndexError):
print("An error occurred or there is no solution.")
test_kmst(8,5,1,False)
| true |
260f494256573e187473f7403f180ad1ec2d46ef | Python | huytrinhx/Reinforcement-Learning | /DQN-Food-Collector/agent.py | UTF-8 | 6,566 | 2.8125 | 3 | [] | no_license | import numpy as np
import random
from model import QNetwork
from replay_buffer import ReplayBuffer, PrioritizedReplayBuffer
import torch
import torch.nn.functional as F
import torch.optim as optim
BUFFER_SIZE = int(1e5)
BATCH_SIZE = 128
GAMMA = 0.95
TAU = 1e-3
LR = 5e-4
UPDATE_EVERY = 10
STACK_SIZE = 4
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class DQNAgent():
def __init__(self, state_size, action_size, seed):
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(seed)
#Q-Network
self.qnetwork_local = QNetwork(state_size, action_size, seed).to(device)
self.qnetwork_target = QNetwork(state_size, action_size, seed).to(device)
self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=LR)
#Replay memory
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed)
#Initialize time step ( for updating every UPDATE_EVERY steps)
self.t_step = 0
#Implement stack image buffer
self.stack_size = STACK_SIZE
def preprocess_state(self, img):
return img @ (0.3,0.1,0.7)
def stack_images(self, img1, img2):
# https://danieltakeshi.github.io/2016/11/25/frame-skipping-and-preprocessing-for-deep-q-networks-on-atari-2600-games/
# if image is in greyscale and img1 is fully-stacked
# https://github.com/PacktPublishing/Deep-Learning-with-TensorFlow-2-and-Keras/blob/master/Chapter%2011/DQN_Atari_v2.ipynb
# print(img1.shape)
if img1.shape == 3 and img1.shape[0] == self.stack_size:
im = np.append(img1[1:,:,:], np.expand_dims(img2,0), axis=2)
im = np.expand_dims(im,axis=0)
# print(im.shape)
return im
else: #otherwise, clone img1 to the size of the stack hyperparams
im = np.vstack([img1]*self.stack_size)
im = np.squeeze(im,axis=None)
# print(im.shape)
return im
def step(self, state, action, reward, next_state, done):
# Save experiece in replay memory
self.memory.add(state, action, reward, next_state, done)
# Learn every UPDATE_EVERY time steps
self.t_step = (self.t_step + 1) % UPDATE_EVERY
if self.t_step == 0:
#if enough samples are available in memory, get randome subset and learn
if len(self.memory) > BATCH_SIZE:
experiences = self.memory.sample()
self.learn(experiences, GAMMA)
def act(self, state, eps=0.):
state = torch.from_numpy(state).float().unsqueeze(0).to(device)
self.qnetwork_local.eval()
with torch.no_grad():
action_values = self.qnetwork_local(state)
self.qnetwork_local.train()
#epsilon-greedy action selection
if random.random() > eps:
return np.argmax(action_values.cpu().data.numpy())
else:
return random.choice(np.arange(self.action_size))
def learn(self, experiences, gamma):
"""Update value parameters using given batch of experience tuples.
Params
======
experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, actions, rewards, next_states, dones = experiences
# Get max predicted Q values (for next states) from target model
Q_targets_next = self.qnetwork_target(next_states).detach().max(1)[0].unsqueeze(1)
# Compute Q targets for current states
Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))
# Get expected Q values from local model
Q_expected = self.qnetwork_local(states).gather(1, actions)
# Compute loss
loss = F.mse_loss(Q_expected, Q_targets)
# Minimize the loss
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# ------------------- update target network ------------------- #
self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)
def soft_update(self, local_model, target_model, tau):
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
def save_checkpoints(self, model_dir="./model_dir/", mean_score=0.0):
torch.save(self.qnetwork_local.state_dict(), "{0}agent_checkpoint_{1}.pth".format(model_dir,int(mean_score)))
def load_checkpoints(self, model_dir="./model_dir/", mean_score=0.0):
self.qnetwork_local.load_state_dict(torch.load("{0}agent_checkpoint_{1}.pth".format(model_dir,int(mean_score))))
class DQNAgent_PER(DQNAgent):
def __init__(self, state_size, action_size, seed):
super(DQNAgent_PER, self).__init__(state_size, action_size, seed)
self.memory = PrioritizedReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed)
def learn(self, experiences, gamma):
"""Update value parameters using given batch of experience tuples.
Params
======
experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, actions, rewards, next_states, dones, idxs, weights = experiences
# Get max predicted Q values (for next states) from target model
Q_targets_next = self.qnetwork_target(next_states).detach().max(1)[0].unsqueeze(1)
# Compute Q targets for current states
Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))
# Get expected Q values from local model
Q_expected = self.qnetwork_local(states).gather(1, actions)
# Compute loss MSE
loss = (Q_expected - Q_targets.detach()).pow(2)
# Add weights to loss
loss = loss * weights
# Add noise to loss to arrive at prior weights
prios = loss + 1e-6
# Take mean
loss = loss.mean()
# Minimize the loss
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# Update buffer priorities
self.memory.update_priorities(zip(idxs, prios.data.cpu().numpy()))
# ------------------- update target network ------------------- #
self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)
| true |
b48bc8a51365fe1f267c4b68d79250a71bd91ff6 | Python | UW-GRID/PV_sizing | /data_parser.py | UTF-8 | 324 | 2.71875 | 3 | [] | no_license | import pandas as pd
print('Input path to load profile')
filename = input("> ")
raw_input = pd.read_csv(filename, header=None, skiprows = 12, nrows=1)
raw_input = raw_input.dropna(axis='columns')
raw_input = raw_input.drop([6, 7, 57], axis='columns')
hourly_load = []
for i in raw_input.iloc[0]:
hourly_load.append(i)
| true |
15c7abbd4664e88e1117774cfb683b99653206d2 | Python | Putheareak/ReakPython | /Stop_gninnipS_My_sdroW!.py | UTF-8 | 282 | 3.15625 | 3 | [] | no_license | def test(x):
x = list(x.split())
new_arr = []
for i in x:
if len(i) > 4:
i=i[::-1]
new_arr.append(i)
else:
new_arr.append(i)
new_arr = [" ".join(new_arr)]
return new_arr[0]
print(test("This is another test"))
| true |
3084870ae9e6413c2af8eb9a40018b07e64a6ede | Python | safaabukharmeh1/AposHealth | /Sites/Studio/Pages/CoachManagerPage.py | UTF-8 | 3,932 | 2.5625 | 3 | [] | no_license | import time
from Sites.Studio.Locators.locators import Locators
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import random as r
class CoachManager:
@classmethod
def __init__(cls, driver):
cls.driver = driver
cls.dashboard = Locators.rmdy_studio_logo_xpath
cls.random_no = []
cls.password = "123456Qa"
cls.location = "East Street"
cls.new_coach_name = ""
def generate_us_phone_number(self):
self.random_no.append(r.randint(6, 9))
for i in range(1, 10):
self.random_no.append(r.randint(0, 9))
strings = [str(integer) for integer in self.random_no]
random_no_str = "".join(strings)
return random_no_str
def showdashboard(self):
self.driver.find_element_by_xpath(Locators.rmdy_studio_logo_xpath).click()
def create_new_coach(self):
self.driver.find_element_by_xpath(Locators.coach_manager_button_xpath).click()
time.sleep(5)
random_phone = self.generate_us_phone_number()
self.new_coach_name = "AuC" + random_phone
self.driver.find_element_by_id(Locators.coach_manager_new_coach_button_id).click()
self.driver.find_element_by_id(Locators.coach_manager_first_name_field_id).send_keys(self.new_coach_name)
self.driver.find_element_by_id(Locators.coach_manager_last_name_field_id).send_keys(self.new_coach_name)
self.driver.find_element_by_id(Locators.coach_manager_title_field_id).send_keys("Mr")
self.driver.find_element_by_id(Locators.coach_manager_user_name_id).send_keys(self.new_coach_name)
self.driver.find_element_by_id(Locators.coach_manager_password_id).send_keys(self.password)
self.driver.find_element_by_id(Locators.coach_manager_password_confirm_id).send_keys(self.password)
self.driver.find_element_by_id(Locators.coach_manager_location_field_id).send_keys(self.location)
self.driver.find_element_by_id(Locators.coach_manager_birthdate_field_id).send_keys("05/18/1950")
self.driver.find_element_by_id(Locators.coach_manager_gender_list_id).click()
self.driver.find_element_by_xpath(Locators.coach_manager_gender_male_xpath).click()
self.driver.find_element_by_id(Locators.coach_manager_email_field_id).send_keys(self.new_coach_name+"@mailinator.com")
self.driver.find_element_by_xpath(Locators.coach_manager_test_site_xpath).click()
self.driver.find_element_by_id(Locators.coach_manager_about_me_field_id).send_keys("About me")
self.driver.find_element_by_id(Locators.coach_manager_external_id_field_id).send_keys(random_phone)
f = open("coach.txt", "w+")
f.write(self.new_coach_name)
f.close()
self.driver.find_element_by_id(Locators.coach_manager_save_button_id).click()
def find_coach(self):
new_coach_name = self.new_coach_name
self.driver.find_element_by_id(Locators.coach_manager_coach_user_name_id).send_keys(new_coach_name)
self.driver.find_element_by_id(Locators.coach_manager_search_coach_button_id).click()
WebDriverWait(self.driver, 10).until(EC.presence_of_all_elements_located((By.TAG_NAME, 'td')))
table = self.driver.find_element_by_xpath('//*[@id="pagedListContainer0"]/table')
trs = table.find_elements_by_tag_name('tr')
row_count = 0
matched_row_count = 0
try:
for tr in trs:
tds = tr.find_elements_by_tag_name('td')
for i, td in enumerate(tds):
if i == 3 and td.text == new_coach_name:
print("Coach " + new_coach_name + " was added Successfully!, at row no: " + str(matched_row_count))
row_count = row_count + 1
except ValueError:
print("could not find the coach") | true |
0ec8914bb678758c09e8c45bb1940ab6e367fc50 | Python | dashu1999/Roscosmos | /src/utils.py | UTF-8 | 1,541 | 2.796875 | 3 | [
"MIT"
] | permissive | import numpy as np
import matplotlib.pyplot as plt
def decode_mask(mask):
pixels = mask.T.flatten()
pixels = np.concatenate([[0], pixels, [0]])
runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
runs[1::2] -= runs[::2]
return ' '.join(str(x) for x in runs)
def visualize(**images):
"""PLot images in one row."""
n = len(images)
plt.figure(figsize=(30, 20))
for i, (name, image) in enumerate(images.items()):
plt.subplot(1, n, i + 1)
plt.xticks([])
plt.yticks([])
plt.title(' '.join(name.split('_')).title())
plt.imshow(image)
plt.show()
def normalize(img1, img2, num_channels):
if num_channels == 1:
img1 = img1[0] * 0.299 + img1[1] * 0.587 + img1[2] * 0.114
img2 = img2[0] * 0.299 + img2[1] * 0.587 + img2[2] * 0.114
img1 = img1 / 1024
img2 = img2 / 1024
img1 = (img1 - np.mean(img1)) / np.std(img1)
img2 = (img2 - np.mean(img2)) / np.std(img2)
if num_channels == 3:
img1 = np.moveaxis(img1, 0, -1)
img2 = np.moveaxis(img2, 0, -1)
return img1, img2
def generate_new_shape(img, img_size, num_channels):
if num_channels == 1:
new_shape = (
int(np.ceil(img.shape[0] / img_size) * img_size),
int(np.ceil(img.shape[1] / img_size) * img_size)
)
else:
new_shape = (
int(np.ceil(img.shape[0] / img_size) * img_size),
int(np.ceil(img.shape[1] / img_size) * img_size),
3
)
return new_shape | true |
dc6e47e577afb8e45a676641d9ba13956e90f2ff | Python | PSG1995/psg_1 | /web_data/09_movie_01.py | UTF-8 | 2,123 | 2.9375 | 3 | [] | no_license | import pandas as pd
from bs4 import BeautifulSoup
from urllib.request import urlopen
url = "https://movie.naver.com/movie/running/current.naver"
page = urlopen(url)
soup = BeautifulSoup(page, 'lxml')
## ์์์/์์ ์ ์ ๋ชฉ๋ง ๋ฝ๊ธฐ
movie_info = soup.find("ul", class_='lst_detail_t1').find_all("li")
print( len(movie_info) )
print( movie_info[122].find("dt", class_="tit").a.text )
## ํ์ ๋ง ๋ฝ๊ธฐ
print( movie_info[0].find("span", class_="num").text)
## ์ฐธ์ฌ ๋ช
์
print( movie_info[0].find("em").text)
## ์๋งค์จ
temp = movie_info[122].find("dl", class_="info_exp")
if temp is not None:
t=temp.span.text
print("๊ฐ์ด ์์", t)
else:
t=0
print("๊ฐ์ด ์์", t)
# print(one.find("dt", class_="tit").a.text)
## ๊ฐ์
txt = movie_info[0].find("span", class_="link_txt").text
txt_last = txt.replace("\n", "")
txt_last = txt_last.replace("\t", "")
txt_last = txt_last.replace("\r", "")
print( txt_last )
# ## ๊ฐ๋
# director = soup.find(("dl", class_="info_txt1")."span", class_="link_txt")
# print(director)
# ์ ๋ชฉ, ํ์ , ์ฐธ์ฌ์, ๊ฐ์
all_title = []
all_score = []
all_people = []
all_category = []
all_rate = []
for one in movie_info:
title = one.find("dt", class_="tit").text
score = one.find("span", class_="num").text
num = one.find("em").text
#์๋งค์จ
tmp = one.find("dl", class_="info_exp")
if tmp is not None:
rate = tmp.span.text
else:
rate = 0
category = one.find("span", class_="link_txt").text
txt_last = txt.replace("\n", "")
txt_last = txt_last.replace("\t", "")
txt_last = txt_last.replace("\r", "")
all_title.append(title)
all_score.append(score)
all_people.append(num)
all_rate.append(rate)
all_category.append(txt_last)
print(len(all_title), len(all_score), len(all_people), len(all_category), len(all_rate))
dat_dict = {
"์ ๋ชฉ":all_title, "ํ์ ":all_score, "์ฐธ์ฌ๋ช
์":all_people, "์๋งค์จ":all_rate, "๊ฐ์": all_category}
dat = pd.DataFrame(dat_dict)
dat.to_csv("๋ค์ด๋ฒ์ํ.csv", index = False)
dat.to_excel("๋ค์ด๋ฒ์ํ.xlsx", index = False) | true |
edb5c321d1eaad4b5985913e8267fea7cba3d5bd | Python | sobyandrew/languageNotesAndExamples | /DataTypes/BuiltInTypes/Python/dataTypes.py | UTF-8 | 3,457 | 3.328125 | 3 | [] | no_license | import math
dictOfBuiltInTypes = [
"boolean", "int", "float", "complex",
"iterator", "list", "tuple", "range",
"string", "bytes", "bytearray", "memoryview"
"set", "frozenset", "dict"
]
def showBooleanUsage():
exampleBool = Boolean()
exampleBool = True
if exampleBool:
print("exampleBool is True")
if not exampleBool:
print("This won't print out")
exampleBool = False
if not exampleBool:
print("exampleBool is False")
if exampleBool:
print("This won't print out")
def showIntUsage():
print("Int Usage: ")
exampleInt = int()
exampleInt = 1
print(exampleInt)
exampleInt = 1 + 2
print(exampleInt)
exampleInt = 1 - 2
print(exampleInt)
exampleInt = 2 * 3
print(exampleInt)
exampleInt = 4 / 2 #Returns floating point
print(exampleInt)
exampleInt = 5 // 2 #Floored Quotient
print(exampleInt)
exampleInt = 5 % 2
print(exampleInt)
exampleInt = -exampleInt
print(exampleInt)
exampleInt = +exampleInt
print(exampleInt)
exampleInt = abs(exampleInt)
print(exampleInt)
exampleInt = int(3.0)
print(exampleInt)
floorQuotient, remainder = divmod(5, 2)
print(floorQuotient)
print(remainder)
print(exampleInt)
exampleInt = pow(3, 2)
print(exampleInt)
exampleInt = 3 ** 2
print(exampleInt)
def showFloatUsage():
print("Float Usage: ")
exampleFloat = int()
exampleFloat = 1.
print(exampleFloat)
exampleFloat = 1. + 2.5
print(exampleFloat)
exampleFloat = 1. - 2.5
print(exampleFloat)
exampleFloat = 2.3 * 3
print(exampleFloat)
print(math.ceil(exampleFloat))
print(round(exampleFloat, 1))
exampleFloat = 4 / 2 #Returns floating point
print(exampleFloat)
exampleFloat = 5 // 2 #Floored Quotient
print(exampleFloat)
exampleFloat = 5 % 2
print(exampleFloat)
exampleFloat = 2.1
exampleFloat = -exampleFloat
print(exampleFloat)
exampleFloat = +exampleFloat
print(exampleFloat)
exampleFloat = abs(exampleFloat)
print(exampleFloat)
exampleFloat = float(1)
print(exampleFloat)
floorQuotient, remainder = divmod(5.5, 2)
print(floorQuotient)
print(remainder)
print(exampleFloat)
exampleFloat = pow(3.3, 2)
print(exampleFloat)
print(math.ceil(exampleFloat))
print(round(exampleFloat, 1))
exampleFloat = 3.3 ** 2
print(exampleFloat)
print(math.ceil(exampleFloat))
print(round(exampleFloat, 1))
def showComplexUsage():
exampleComplex = complex(1, -2)
print(exampleComplex.real)
print(exampleComplex.imag)
print(exampleComplex.conjugate())
def showIteratorUsage():
a = ["1", "2", "3"]
exampleIter = a.__iter__()
print(exampleIter.__next__())
print(next(exampleIter))
def showListUsage():
exampleList = ["1", "2", "3", "4"]
print("1" in exampleList)
print("5" not in exampleList)
helperList = ["5", "6", "7"]
print(exampleList + helperList)
print(exampleList * 3)
print(exampleList[1])
print(exampleList[1:3])
# def showTupleUsage():
# f
# def showRangeUsage():
# f
# def showStringUsage():
# f
# def showBytesUsage():
# f
# def showByteArrayUsage():
# f
# def showMemoryviewsage():
# f
# def showSetUsage():
# f
# def showFroaenSetUsage():
# f
# def showDictUsage():
# f
# showIntUsage()
# showFloatUsage()
showIteratorUsage()
| true |
39b0ffb71ca8b118cda12e37e764df31c8d9144d | Python | BenDavidAaron/dob-bylan-songwriter | /train_model.py | UTF-8 | 1,170 | 2.671875 | 3 | [] | no_license | import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = ""
from dylan_functions import get_lookup_table, tokenize_per_character
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import LSTM
from keras.callbacks import ModelCheckpoint
doc = open('cleaned text.txt','r').read()
looker = get_lookup_table(doc)
data = tokenize_per_character(doc, lookup_table=looker, sequence_length=250)
X = data['x']
y = data['y']
#define a basic model
model = Sequential()
model.add(LSTM(256, input_shape=(X.shape[1], X.shape[2]), return_sequences=True))
model.add(Dropout(0.35))
model.add(LSTM(256))
model.add(Dropout(0.35))
model.add(Dense(y.shape[1], activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
# checkpoint this cause I'm gonna be here all day
filepath="models/dylan-net_epoch-{epoch:02d}-loss-{loss:.4f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=True, mode='min')
callbacks_list = [checkpoint]
model.fit(X, y, epochs=100, batch_size=1, callbacks=callbacks_list)
| true |
5b310bf6403e4674214c21f465dd223512cc7ed5 | Python | lilyjamie/OCRTool | /OCREditTools/Func/FiledictObserver.py | UTF-8 | 836 | 2.828125 | 3 | [] | no_license | from .ObserverSubject import Subject
class File(Subject):
def __init__(self, file_dict):
super(File, self).__init__()
self._file_dict = file_dict
@property
def file_dict(self):
return self._file_dict
@file_dict.setter
def file_dict(self, dic):
self._file_dict = dic
self.notify()
def add(self, key, value):
self.file_dict[key] = value
self.notify()
def delete(self, key):
self.file_dict.pop(key)
self.notify()
# ๆดๆขkeyๅ
def change_key(self, key, new_key):
self.file_dict[new_key] = self.file_dict[key]
self.file_dict.pop(key)
self.notify()
# ๆดๆขvalue
def change_value(self, key, value):
self.file_dict[key] = value
self.notify()
| true |
764f89b005f541f83882d9040a090d1b343c7cad | Python | danielharada/mastermind | /mastermindUI.py | UTF-8 | 4,248 | 3.828125 | 4 | [
"MIT"
] | permissive | """Provides methods for getting user input for Mastermind game."""
import random
class MastermindUI:
def __init__(self):
self.keywords = {'help' : 'help', 'info' : 'info'}
self.color_options = ('black', 'blue', 'green', 'red', 'white', 'yellow')
print('Welcome to Mastermind in Python, written by Daniel Harada')
self.info ='\nIn this game you are trying to break a 4 item code. Each slot in the code will'\
' be filled by a colored peg. If you guess a color in the code in the correct position'\
' you will recieve a black result peg. If you guess the correct color but in an incorrect'\
' location, you will recieve a white result peg. For each peg you guess which is not in the'\
' code, you recieve no result pegs.\n'
print(self.info)
print('If you have any questions, please type "help" or "info" at any time. Good luck!\n')
def helpMe(self, keyword):
help_with_spaces = '\nIn the base game there are 6 color options for code pegs: black, blue, green, red, '\
'white, and yellow, which gives 1,296 possible 4 peg patterns. Adding space as an'\
' option(no peg placed) increases the number of possibilities to 2401. Please enter'\
' "yes" to allow spaces in the codes or "no" to disallow spaces.\n'
help_with_guesses = '\nPlease pick a four color code as your guess. The possible colors are: {}.'\
' Each color in your code should be separated by a space.\n'.format(', '.join(self.color_options))
help_generic = '\nThis is equal to the generic help message. It should not be reached\n'
help_keywords = {'help' : help_generic, 'help with spaces' : help_with_spaces, 'help with guesses' : help_with_guesses, 'info' : self.info}
print(help_keywords[keyword])
def userDecidesIfWithSpaces(self):
self.keywords['help'] = 'help with spaces'
while True:
user_spaces_decision = input('Play with spaces? Yes or No: ').lower()
self.checkForKeywords(user_spaces_decision)
if (user_spaces_decision == 'yes') or (user_spaces_decision == 'no'):
break
self.use_spaces = user_spaces_decision
def checkForKeywords(self, user_input):
if user_input in self.keywords:
self.helpMe(self.keywords[user_input])
return True
def setColorOptions(self):
color_options = ['black', 'blue', 'green', 'red', 'white', 'yellow']
if self.use_spaces == 'yes':
color_options += ['space']
self.color_options = tuple(color_options)
def generateSolution(self):
self.solution_pegs = tuple(random.choice(self.color_options) for x in range(4))
def userInputsGuess(self):
self.keywords['help'] = 'help with guesses'
valid_guess = False
while not valid_guess:
user_input = input('Please enter your guess: ').lower()
if not self.checkForKeywords(user_input):
user_guess = tuple(user_input.split())
valid_guess = validateGuess(user_guess)
self.guess_pegs = user_guess
def validateGuess(user_guess):
if not (set(user_guess) < set(self.color_options)):
print('Guess needs to only include colors from: ', ', '.join(self.color_options))
elif (len(user_guess) != 4):
print('Please enter a 4 color guess, each color separated by a space')
else:
return True
def userDecidesPlayAgain(self):
play_again_TF = {'yes' : True, 'no' : False}
while True:
play_again = input('Would you like to play again? Yes or No: ').lower()
if (play_again == 'yes') or (play_again == 'no'):
break
return play_again_TF[play_again]
if __name__ == '__main__':
thisUI = MastermindUI()
thisUI.userDecidesIfWithSpaces()
thisUI.setColorOptions()
thisUI.generateSolution()
thisUI.userInputsGuess()
play_again = thisUI.userDecidesPlayAgain()
print(thisUI.color_options)
print(thisUI.solution_pegs)
print(thisUI.guess_pegs)
print(play_again)
| true |
dba12dfaee7b12859396d967c7fc77255531902c | Python | eischaire/my_homeworks | /prog.py | UTF-8 | 2,160 | 3.21875 | 3 | [] | no_license | import os, re
def textprocessing(name):
f = open(name, 'r', encoding='windows-1251')
text = f.read()
f.close()
return(text)
def countwords():
countwords = {}
for root, dirs, files in os.walk('.'):
for name in files:
if name.endswith('.xhtml'):
k = 0
text = textprocessing(name)
text = text.split('\n')
for word in text:
if word.startswith('<w>'):
k += 1
countwords[name] = k
a = open('countwords.txt', 'w', encoding='utf-8')
for name in sorted(countwords):
a.write(name)
a.write('\t')
a.write(str(countwords[name]))
a.write('\n')
a.close()
#ะะพะถะฝะพ ะปะธ ะฑัะปะพ ะฟัะพััะพ ัะตะณัะปััะฝัะผ ะฒััะฐะถะตะฝะธะตะผ ะฒััะฐัะธัั ัะธัะปะพ ะธะท ัะตะณะฐ words ะฒ ัะฐะทะผะตัะบะต? ะฏ ะฟะพะฑะพัะปะฐัั ััะพ ะดะตะปะฐัั, ะฒะดััะณ ัะฐะผ ะฝะตะฟัะฐะฒะธะปัะฝัะต ัะธัะปะฐ
def authors():
auths = {}
dates = {}
b = open('authors.csv', 'w', encoding='utf-8')
b.write('ะะฐะทะฒะฐะฝะธะต ัะฐะนะปะฐ')
b.write('\t')
b.write('ะะฒัะพั')
b.write('\t')
b.write('ะะฐัะฐ ัะพะทะดะฐะฝะธั')
b.write('\n')
for root, dirs, files in os.walk('.'):
for name in files:
if name.endswith('.xhtml'):
file = textprocessing(name)
if re.search('meta content=".+?" name="author">', file):
auths[name] = re.search('".+?"', re.search('<meta content=".+?" name="author">', file).group()).group().strip('"')
if re.search('meta content=".+?" name="created">', file):
dates[name] = re.search('".+?"', re.search('<meta content=".+?" name="created">', file).group()).group().strip('"')
b.write(name)
b.write('\t')
b.write(auths[name])
b.write('\t')
b.write(dates[name])
b.write('\n')
b.close()
def main():
countwords()
authors()
main()
| true |
7a706c981e61ef53454c6161914a9cf0ddc0fa07 | Python | xwzhong/classical-machine-learning-algorithm | /hidden-markov-model/forward.py | UTF-8 | 2,617 | 3.296875 | 3 | [] | no_license | #coding: utf-8
#date: 2016-05-29
#mail: artorius.mailbox@qq.com
#author: xinwangzhong -version 1.0
class HMMForward():
def __init__(self):
# 3็ง้่ๅฑ็ถๆ:sun cloud rain
self.hidden = []
self.hidden.append('sun')
self.hidden.append('cloud')
self.hidden.append('rain')
self.len_hidden = len(self.hidden)
# 3็ง่งๅฏๅฑ็ถๆ:dry damp soggy
self.observation = []
self.observation.append('dry')
self.observation.append('damp')
self.observation.append('soggy')
self.len_obs = len(self.observation)
# ๅๅง็ถๆ็ฉ้ต๏ผ1*N็ฌฌไธๅคฉๆฏsun๏ผcloud๏ผrain็ๆฆ็๏ผ
self.pi = (0.3,0.3,0.4)
# ็ถๆ่ฝฌ็งป็ฉ้ตA๏ผlen_hidden*len_hidden ้่ๅฑ็ถๆไน้ดไบ็ธ่ฝฌๅ็ๆฆ็๏ผ
self.A=((0.2,0.3,0.5),(0.1,0.5,0.4),(0.6,0.1,0.3))
# ๆททๆท็ฉ้ตB๏ผlen_hidden*len_obs ้่ๅฑ็ถๆๅฏนๅบ็่งๅฏๅฑ็ถๆ็ๆฆ็๏ผ
self.B=((0.1,0.5,0.4),(0.2,0.4,0.4),(0.3,0.6,0.1))
def forward(self, observed):
p = 0.0
#่งๅฏๅฐ็็ถๆๆฐ็ฎ
len_observed = len(observed)
#ไธญ้ดๆฆ็ len_observed*len_obs
alpha = [([0]*self.len_hidden) for i in range(len_observed)]
#็ฌฌไธไธช่งๅฏๅฐ็็ถๆ,็ถๆ็ๅๅงๆฆ็ไนไธ้่็ถๆๅฐ่งๅฏ็ถๆ็ๆกไปถๆฆ็ใ
for j in range(self.len_hidden):
alpha[0][j] = self.pi[j]*self.B[j][self.observation.index(observed[0])]
#็ฌฌไธไธชไนๅ็็ถๆ๏ผ้ฆๅ
ไปๅไธๅคฉ็ๆฏไธช็ถๆ๏ผ่ฝฌ็งปๅฐๅฝๅ็ถๆ็ๆฆ็ๆฑๅ๏ผ็ถๅไนไธ้่็ถๆๅฐ่งๅฏ็ถๆ็ๆกไปถๆฆ็ใ
for i in range(1, len_observed):
for j in range(self.len_hidden):
sum_tmp = 0.0
for k in range(self.len_hidden):
sum_tmp += alpha[i-1][k]*self.A[k][j]
alpha[i][j] = sum_tmp * self.B[j][self.observation.index(observed[i])]
for i in range(self.len_hidden):
p += alpha[len_observed-1][i]
return p
if __name__ == '__main__':
#ๅ่ฎพ่งๅฏๅฐไธ็ปๅบๅไธบobserved๏ผ่พๅบHMMๆจกๅ๏ผlen_hidden๏ผlen_obs๏ผA๏ผB๏ผpi๏ผไบง็่งๅฏๅบๅobserved็ๆฆ็
observed = ['dry']
hmm_forword = HMMForward()
print hmm_forword.forward(observed)
observed = ['damp']
print hmm_forword.forward(observed)
observed = ['dry','damp']
print hmm_forword.forward(observed)
observed = ['dry','damp','soggy']
print hmm_forword.forward(observed)
#
# 0.21
# 0.51
# 0.1074
# 0.030162
# [Finished in 0.2s]
| true |
6c4790372a97ab98c5121cb3d6925bbb7245cbce | Python | MilicaPerisic/Data-Preprocessing-Task | /dictionary_processing.py | UTF-8 | 2,018 | 3.0625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 19 10:34:11 2020
@author: Milica
"""
import pandas as pd
import json
file_path = 'data/devices.csv'
file_name_json = 'data/results/devices_dictionary.json'
key_order = ['MODELNAME', 'IP', 'SYSDESC', 'COMMUNITY', 'CLASSNAME', 'HOSTNAME', 'SERIALNUMBER' ]
def read_data_dict():
''' Reads data from a file.
NaN values filled with '' as in example.
Returns a list of dictionaries created from a data frame rows.'''
df = pd.read_csv(file_path)
df = df.fillna('')
devices = df.to_dict('records')
return devices
def sort_keys(list_of_devices):
''' Sorts keys in order specified above. '''
devices = []
for unordered_device in list_of_devices:
device = {k: unordered_device[k] for k in key_order}
devices = devices + [device]
return devices
def add_key_vendor(list_of_devices):
''' Creates a new key - VENDOR. '''
for device in list_of_devices:
if 'Cisco' in device['CLASSNAME']:
device.update({'VENDOR' : 'Cisco'})
if ('Alcatel' in device['CLASSNAME']) or ('Nokia' in device['CLASSNAME']):
device.update({'VENDOR' : 'Nokia'})
if 'Huawei' in device['CLASSNAME']:
device.update({'VENDOR' : 'Huawei'})
if 'Juniper' in device['CLASSNAME']:
device.update({'VENDOR' : 'Juniper'})
def remove_devices_without_vendors_dict(list_of_devices):
''' Removes devices without vendor. '''
for device in list_of_devices:
if 'VENDOR' not in device.keys():
list_of_devices.remove(device)
def remove_key(list_of_devices, key):
''' Removes a key. '''
for device in list_of_devices:
del device[key]
def create_json_dict(list_of_devices):
''' Creates json file from a list of dictionaries. File name specified above.'''
with open(file_name_json, 'w') as file_out:
json.dump(list_of_devices, file_out, indent=4)
return file_name_json
| true |
33a1dc555cb321485f3b2c837f19c86c70d7f737 | Python | GabStoelting/CalciumImagingHelpers | /ReduceConcatFolderVideo.py | UTF-8 | 4,539 | 2.90625 | 3 | [] | no_license |
import argparse
import os
import numpy as np
import tifffile as tf
from natsort import natsorted
from cv2 import VideoWriter, VideoWriter_fourcc
import time
def array_binning(a, n: int = 2):
if n % 2 != 0:
raise ValueError("The binning coefficient must be a multiple of 2.")
for i in range(n // 2):
a = a[::, ::2] + a[::, 1::2]
a = a[::2, ::] + a[1::2, ::]
return a
def gray(im):
# Converts a numpy array (im) into a grayscale RGB image
# into a pygame surface
im = 255 * (im / im.max())
w, h = im.shape
ret = np.empty((w, h, 3), dtype=np.uint8)
ret[:, :, 2] = ret[:, :, 1] = ret[:, :, 0] = im
return ret
# Get information from commandline arguments
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', help="input TIFF file directory",
required=True)
parser.add_argument('-b', '--binning', help="bxb pixels are averaged",
required=True)
parser.add_argument('-c', '--codec', help="set the four letter video codec")
args = parser.parse_args()
# Read directory and check if it really is one
directory = args.input
if(os.path.isdir(directory) is False):
print(directory, "is not a directory.")
quit()
# Import binning parameter
binning = int(args.binning)
# Check if a codec is specified, otherwise use "MP42"
if(args.codec):
fourcc = VideoWriter_fourcc(*str(args.codec))
else:
fourcc = VideoWriter_fourcc(*'MP42')
# Search through the specified directory
for subdir, dirs, files in os.walk(directory):
filelist = []
for file in files:
# print os.path.join(subdir, file)
filepath = subdir + os.sep + file
if filepath.endswith(".tif") or filepath.endswith(".tiff"):
filelist.append(filepath) # append to filelist
if(len(filelist) > 1):
filelist = natsorted(filelist) # sort if more than one
try:
image = tf.imread(filelist[0], key=0)
height, width = image.shape
except:
print("Couldn't open"+filelist[0])
quit()
print(width, height)
running = True
d = 1
file_i = 1
# Iterate through all files
#
# Make sure this works when spanning over multiple files!
# Save video into this file
outavi = VideoWriter(filelist[0]+"_concat.avi", fourcc, 10.0,
(int(width/binning), int(height/binning)))
# Save ratio images into this file
runtime = [0, 0, 0, 0, 0]
with tf.TiffWriter(filelist[0]+"_concat.tif", bigtiff=True) as outtif:
# Iterate through all files
for filename in filelist:
t_start = time.time()
print(filename)
i = 0
while running:
try:
t_0 = time.time()
image = tf.imread(filename, key=i)
t_1 = time.time()
# Reduce dimensions of images
image = array_binning(image, binning)
t_2 = time.time()
# Write frame to video
outavi.write(gray(image))
t_3 = time.time()
# Convert frame to 16bit integer for TIF
image = image.astype("uint16")
outtif.save(image, compress=0, photometric='minisblack')
t_4 = time.time()
i = i+d
except IndexError:
break
except Exception as e:
print(e)
break
# Print status message
print(f"File {file_i}/{len(filelist)}, converting frame {i}")
t_5 = time.time()
print(f"imread: {t_1-t_0}, binning: {t_2-t_1}, avi: {t_3-t_2}, tiff: {t_4-t_3}, total {t_5-t_0}")
runtime[0] += 1
runtime[1] += t_1-t_0
runtime[2] += t_2-t_1
runtime[3] += t_3-t_2
runtime[4] += t_4-t_3
print(f"File total: {time.time()-t_start}")
file_i += 1
print(runtime)
print(f"i: {runtime[0]} imread: {runtime[1]}, binning: {runtime[2]}, avi: {runtime[3]}, "
f"tiff: {runtime[4]}, total {runtime[1]+runtime[2]+runtime[3]+runtime[4]}")
outtif.close()
outavi.release()
| true |
257909dba9154a4f6ccbc230756af565bc31637c | Python | ajloinformatico/Brawl-Start-Simulate | /functions.py | UTF-8 | 3,336 | 3.59375 | 4 | [] | no_license | import os
import time
def show_players():
"""
Shows a list of players players have a directory
each player has their directory
:return (str): a string with all the players
"""
players = str()
for p in os.scandir("./players"):
p = str(p)[11:-2] # name of file is from 10 to -2
if p != "info.txt": # skip info file
players += p + "\n"
else:
continue
return players
def check_exists_players():
"""
Ensures if players exist
:return (bool): True if exists, False if not exists
"""
players = show_players()
if players != "":
return True
return False
def check_only_one_player_exists(player: str):
"""
Ensures if an execle player exists
:param player (str): String of player to look for
:return (bool): True if player exists, False if not exists
"""
players = show_players().split()
for p in players:
if p == player:
return True
return False
def not_number(n):
"""todo show why when i write more than one letter it fails
Check if param is a number or not
:param n: param to check if it is a number or not
:return (bool): True if param is a number, False if param is not a number
"""
try:
for i in n:
int(i)
return False
except ValueError:
return True
def create_new_user(ruta: str):
"""
Create a new file for a new player
:param ruta (str): name of the player
:return (void): just create a file for player
"""
file = open(ruta, "w")
print("create")
file.close()
def count_number_of_lines(ruta: str):
"""
Count the lines of a file
:param ruta (str): route of the file to read
:return lines (int): an integer of all the line of the file
"""
file = open(ruta, 'r')
lines = (len(file.readlines()))
file.close()
return lines
def load_dic_from_file(ruta: str):
"""
Load a dic from a file. This dic will contain keys as names of characters
and values of type of characters
:param name (str): name of the file to load
:return (dic): return the elements of the file on a dic
"""
dic = {}
with open(ruta, "r") as manf:
c = count_number_of_lines(ruta) / 2 # we need the number of lines / 2
while (c != 0):
c -= 1 # for each couple rest one to the counter
value = manf.readline().rstrip()
key = manf.readline().rstrip()
dic[key] = value
return dic
def time_sleep():
"""
print sleep time
:return (void):
"""
for i in range(3):
print(".", end=" ")
time.sleep(2)
print()
def delete_game(name:str):
"""
Remove current file. Which is the file where there is the player
:param name (str): player name to remove
:return (void): just close the game after show message with sleep time
"""
print("Removing your file")
time_sleep()
os.remove("players/"+name)
print("Current player was removed\nGood bye")
exit(0)
def count(dic):
"""
Count the elements of a dictionaries
:param dic (dict): standard dictionary
:return count (int): number of elements of a dictionary
"""
counter = 0
for ele in dic:
counter += 1
return counter | true |
a61b33fb26a2187be18a1b119976d7ead14cac62 | Python | ishmam-hossain/problem-solving | /common-problems/array_rotate.py | UTF-8 | 866 | 3.90625 | 4 | [] | no_license | def rotate_array(arr, rotate_by):
# I have no idea what I did here :3
res = [None] * len(arr)
for i, n in enumerate(arr):
res[(i + (len(arr) % rotate_by)) % len(arr)] = arr[i]
return res
def right_rotate(arr, k):
# This one is done
ln = len(arr)
res = [None]*ln
for i, n in enumerate(arr):
res[(i+k) % ln] = arr[i]
return res
def right_rotate_in_place(arr, k):
# in-place pore korbo :3
ln = len(arr)
for i, n in enumerate(arr):
temp = arr[(i + k) % ln]
arr[(i + k) % ln] = arr[i]
return arr
if __name__ == '__main__':
# print(rotate_array([1, 2, 3, 4, 5, 6, 7], 3))
print([1, 2, 3, 4, 5, 6, 7])
print("-----------------")
print(right_rotate([1, 2, 3, 4, 5, 6, 7], 2))
print("-----------------")
print(right_rotate_in_place([1, 2, 3, 4, 5, 6, 7], 2))
| true |
eedb9dceb8cd1688b79ee1901bd0ca786c78280a | Python | co4uhutejib/parsedoc | /get_methods.py | UTF-8 | 367 | 2.640625 | 3 | [] | no_license |
# -*- coding: utf-8 -*-
from utils import *
def print_get_methods(data, competences):
competences = list(set(competences))
methods = set()
for competence in competences:
comp_descr = find_competence(data, competence)
assert None != comp_descr
methods.add(comp_descr['methods'])
for method in methods:
print(method) | true |
0f3a00a92892ddc9173cd761833c459357c7a823 | Python | prodrigues07/paulo-victor-sor1-p7-info | /Aula_22_03/exercicio1.py | UTF-8 | 198 | 4.21875 | 4 | [
"Apache-2.0"
] | permissive | # Escrever um programa para somar todos os elementos de uma lista de nรบmeros.
l = [10, 20, 30]
x = 0
y = 0
while y < len(l):
x = x + l[y]
y += 1
print (f'A soma total da Lista รฉ: {x}') | true |
4bf095f1bb3af64fa76c373d3a536de2073b8ee5 | Python | nightwatch2019/base | /connect_remote_db.py | UTF-8 | 1,104 | 2.875 | 3 | [] | no_license | # connect remote mysql server
import pymysql
my_host = "111.230.244.189"
my_user = "nightwatch"
my_password = "3.1415926535@@"
class TestRemoteDB(object):
"connect remote database"
db_host = my_host
db_user = my_user
db_password = my_password
def __init__(self, db_db):
"db_db: database_name"
self.db_host = TestRemoteDB.db_host
self.db_user = TestRemoteDB.db_user
self.db_password = TestRemoteDB.db_password
self.db_db = db_db
def __connect(self):
"connect remote database with arguments"
self.db = pymysql.connect(self.db_host, self.db_user, self.db_password, self.db_db)
self.cursor = self.db.cursor()
def __close(self):
if self.db:
self.db.close()
def select(self, sql):
self.__connect()
self.cursor.execute(sql)
data = self.cursor.fetchall()
self.__close()
return data
def test():
test_db = TestRemoteDB("nightwatch")
results = test_db.select("select * from employees")
print(results)
if __name__ == "__main__":
test() | true |
239f81592f5c85411d53ced66e13b7ec94218b97 | Python | Isaac-D-Dawson/Homework-Uploads | /PyCheckIO/MedianOfThree.py | UTF-8 | 1,536 | 4.4375 | 4 | [] | no_license | # Given an iterable of ints , create and return a new iterable whose first two elements are the same as in items, after which each element equals the median of the three elements in the original list ending in that position.
# Wait...You don't know what the "median" is? Go check out the separate "Median" mission on CheckiO.
# Input: Iterable of ints.
# Output: Iterable of ints.
# The mission was taken from Python CCPS 109 Fall 2018. Itโs being taught for Ryerson Chang School of Continuing Education by Ilkka Kokkarinen
from typing import Iterable
def median_three(els: Iterable[int]) -> Iterable[int]:
if len(els) <= 2: #if there are two or less items in els
return(els) #return it
else: #otherwise
outval = els[0:2] #set the output variable to be the first two items in els
for i in range(2, len(els)): #Then, for every number other than the first two...
outval.append(sorted([els[i-2], els[i-1], els[i]])[1]) #Get the median ending at that position.
#print(outval) #Debug call
return(outval) #Output the output variable
# if __name__ == '__main__':
# print("Example:")
# print(list(median_three([1, 2, 3, 4, 5, 6, 7])))
# # These "asserts" are used for self-checking and not for an auto-testing
# assert list(median_three([1, 2, 3, 4, 5, 6, 7])) == [1, 2, 2, 3, 4, 5, 6]
# assert list(median_three([1])) == [1]
# print("Coding complete? Click 'Check' to earn cool rewards!") | true |
414972ea9eccf7bb7fa13b3a180417a5157f1f97 | Python | CuriosityLabTAU/physical_curiosity_big_analysis | /merge_row_data.py | UTF-8 | 639 | 2.546875 | 3 | [] | no_license | ###Imports:
import pickle
data1 = pickle.load(open('data/old_data12.7/raw_data_all_merged_11_7', 'rb'))
data2 = pickle.load(open('data/raw_data_10_10', 'rb'))
subject_id_data1 = []
subject_id_data2 = []
for subject_id, step in data1.items():
subject_id_data1.append(subject_id)
for subject_id, step in data2.items():
subject_id_data2.append(subject_id)
intersection = [x for x in subject_id_data1 if x in subject_id_data2]
map(data2.__delitem__, filter(data2.__contains__,intersection)) #delete form data2 the intersection
data1.update(data2) #merge
pickle.dump(obj=data1, file=open('raw_data_all_merged_8_11', 'wb'))
| true |
4c0e170c07c49b27f2f3fae3e74fc1c71207b0c2 | Python | hi-zhengcheng/tensorflow-study | /linear_regression/simple_linear_regression.py | UTF-8 | 1,864 | 3.0625 | 3 | [] | no_license | #!/usr/bin/python
import tensorflow as tf
import numpy as np
# tmp dir to store tmp data
TEMP_DIR = "./tmp"
# super parameters
learning_rate = 0.1
training_epoches = 100
# training data
train_X = np.asarray([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0])
train_Y = np.asarray([0.1, 1.2, 1.9, 2.8, 4.6, 4.9, 6.1, 6.9, 7.9, 9.0])
# training samples number
n_samples = train_X.shape[0]
# input placeholder
X = tf.placeholder(tf.float32, name='X')
Y = tf.placeholder(tf.float32, name='Y')
# trainable model params
w = tf.Variable(np.random.randn(), name='weight')
tf.summary.scalar('weight', w)
b = tf.Variable(np.random.randn(), name='bias')
tf.summary.scalar('bias', b)
# simple linear model
pred = tf.add(tf.multiply(X, w), b)
tf.summary.scalar('pred', pred)
# cost function: mean square error
cost = tf.reduce_sum(tf.pow(pred - Y, 2)) / n_samples
tf.summary.scalar('cost', cost)
# optimizer: gradient descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# create op to initialize variables
init = tf.global_variables_initializer()
# merge all summary op
merged_summary = tf.summary.merge_all()
# start train
with tf.Session() as sess:
# run init
sess.run(init)
# create writer
writer = tf.summary.FileWriter(TEMP_DIR, sess.graph)
for epoch in xrange(training_epoches):
for (x, y) in zip(train_X, train_Y):
summary, _ = sess.run([merged_summary, optimizer], feed_dict={X: x, Y: y})
# save summary result into file each epoch
summary = sess.run(merged_summary, feed_dict={X: train_X[0], Y: train_Y[0]})
writer.add_summary(summary, epoch)
c = sess.run(cost, feed_dict={X: train_X, Y: train_Y})
print("Epoch:", "%04d" % (epoch + 1), "cost=%.9f" % (c), "w :", sess.run(w), "b :", sess.run(b))
print("Optimization finished!")
| true |
20c45c04435c6163f3d209fc55c31e571e5a581f | Python | mala23/radar-data | /data/merge_wind.py | UTF-8 | 771 | 2.71875 | 3 | [] | no_license | import numpy as numpy
import pandas as pandas
# import data csv, rename and rearrange headers
data = pandas.read_csv('./datasets/wind_tidy.csv')
data.columns = ['Site', 'Startup', 'Production', 'Location', 'Canton', 'ZE-Coordinates']
data = data[['Location','Production','Startup']]
print(data)
# import coordinates csv
coordinates = pandas.read_csv('./coordinates/coordinates_wind.csv')
print(coordinates)
# merge
merged = pandas.merge(data, coordinates, how='outer', right_on='Location', left_on='Location')
#merged = merged[['Location','Production','Startup','Lat','Lng']]
merged.drop(merged.columns[[5]], axis=1, inplace=True)
merged.drop_duplicates(inplace=True)
print(merged)
# save to csv
merged.to_csv(path_or_buf='./wind.csv', encoding='utf-8', index=False)
| true |
68e6d190e4b929f0797ae2f13dd5afcf8245f7c2 | Python | Pavithra692001/Python | /sum.py | UTF-8 | 174 | 3.21875 | 3 | [] | no_license | print("additional of two number")
a=(int(input("enter the number1")))
b=(int(input("enter the number2")))
c=a+b
print("sum of two number",c)
| true |
bce29f4fba8d561c0408d0671df6b92b08e41c60 | Python | siminino/Scheduling-Algorithms | /process/test_process.py | UTF-8 | 910 | 3 | 3 | [] | no_license | import unittest
from process import Process
class ProcessTestCase(unittest.TestCase):
def setUp(self):
self.process = Process(20)
def test_create_process_object(self):
self.assertTrue(self.process)
def test_process_should_have_task(self):
self.assertEqual(self.process.task, 20)
def test_run_process_should_work_one_task(self):
self.process.run()
self.assertEqual(self.process.task, 19)
def test_run_process_with_time_should_work_tasks_to_timeout(self):
self.process.run_time(10)
self.assertEqual(self.process.task, 10)
def test_run_process_with_time_greater_then_tasks_should_close_process(self):
self.process.run_time(30)
self.assertEqual(self.process.task, 0)
def test_run_all_process_should_work_all_taks(self):
self.process.run_all_process()
self.assertEqual(self.process.task, 0)
| true |
b4382cc77ba4514b31a02e393e8eb84c0634105f | Python | ethanlow23/codingDojoCoursework | /01_Python/01_python_oop/group_cards.py | UTF-8 | 1,298 | 3.9375 | 4 | [] | no_license | import random
class Card(object):
def __init__(self, suit, value):
self.suit = suit
self.value = value
def __str__(self):
return 'SUIT: {} VALUE: {}'.format(self.suit, self.value)
class Deck(object):
deck = []
def __init__(self):
self.createDeck()
def createDeck(self):
suits = ['spade', 'heart', 'club', 'diamond']
for suit in suits:
for value in range(2,15):
self.deck.append(Card(suit, value))
def __str__(self):
return 'cards in deck: {}'.format(len(self.deck))
def Deal(self, numOfCards):
dealt = []
for i in range(numOfCards):
index = random.randint(0,len(self.deck) - 1)
dealt.append(self.deck.pop(index))
return dealt
class Player(object):
hand = []
def __init__(self, name, deckObject):
self.name = name
self.deckObject = deckObject
def add(self, numOfCards):
addCards = self.deckObject.Deal(numOfCards)
for card in addCards:
print card
self.hand.append(card)
def __str__(self):
for card in self.hand:
print card
ourDeck = Deck()
print ourDeck
ourDeck.Deal(3)
print ourDeck
jim = Player('jim', ourDeck)
jim.add(3)
print jim | true |