seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
72964692179 | import json
class ConfigMaker:
def __init__(self):
print("Remote Control Solenoid Config Maker!\n")
def write_config(self,data_dict):
with open("config.json" , "w") as File:
data_to_write =json.dumps(data_dict)
File.write(data_to_write)
def create_config(self):
host = input("host:")
port = input("port:")
name = input("name:")
data_dict = {"host":host, "port":port, "name":name}
self.write_config(data_dict)
self.logger.log_config_success()
if __name__ == "__main__":
ConfigMaker().create_config() | HOI-Devices/RemoteControlRelayController | config.py | config.py | py | 606 | python | en | code | 0 | github-code | 13 |
34927188785 | from ftplib import FTP
import ftplib
import os
"""
If the FTP server that you’re connecting to requires TLS security,
then you will want to import the FTP_TLS class instead of the FTP class.
The FTP_TLS class supports a keyfile and a certfile.
"""
# =================== CONNECTING TO FTP SERVERS ===================
ftp = FTP('ftp.debian.org')
print (ftp.login())
# 230 Login successful.
ftp = FTP('ftp.cse.buffalo.edu')
print (ftp.login())
# 230 Guest login ok, access restrictions apply.
# # =================== NAVIGATING DIRECTORIES ===================
# # We called LIST which retrieves a list of files and/or folders
# # along with their respective informations and prints them out.
# ftp.retrlines('LIST') # The retrlines method prints out the result of the command we called.
# # total 28
# # drwxrwxrwx 2 0 0 4096 Sep 6 2015 .snapshot
# # drwxr-xr-x 2 202019 5564 4096 Sep 6 2015 CSE421
# # drwxr-xr-x 2 0 0 4096 Jul 23 2008 bin
# # drwxr-xr-x 2 0 0 4096 Mar 15 2007 etc
# # drwxr-xr-x 6 89987 546 4096 Sep 6 2015 mirror
# # drwxrwxr-x 7 6980 546 4096 Jul 3 2014 pub
# # drwxr-xr-x 26 0 11 4096 Apr 29 2016 users
# print (ftp.cwd('mirror')) # cwd command to change our working directory to a different folder.
# # 250 CWD command successful.
# ftp.retrlines('LIST') # The retrlines method prints out the result of the command we called.
# # total 16
# # drwxr-xr-x 3 89987 546 4096 Sep 6 2015 BSD
# # drwxr-xr-x 5 89987 546 4096 Sep 6 2015 Linux
# # drwxr-xr-x 4 89987 546 4096 Sep 6 2015 Network
# # drwxr-xr-x 4 89987 546 4096 Sep 6 2015 X11
# # =================== DOWNLOADING VIA FTP ===================
# ftp = FTP('ftp.debian.org')
# print (ftp.login())
# # 230 Login successful.
# print (ftp.cwd('debian'))
# # 250 Directory successfully changed.
# out = 'README'
# with open(out, 'wb') as f: # We create the name of the file we want to save to and open it in write-binary mode.
# # We use the ftp object’s retrbinary to call RETR to retrieve the file and write it to our local disk.
# ftp.retrbinary('RETR ' + 'README.html', f.write)
# filenames = ftp.nlst() # We call nlst which gives us a list of filenames and directories.
# print (filenames)
# # ['README', 'README.CD-manufacture', 'README.html', 'README.mirrors.html', 'README.mirrors.txt',
# # 'dists', 'doc', 'extrafiles', 'indices', 'ls-lR.gz', 'pool', 'project', 'tools', 'zzz-dists']
# for filename in filenames:
# host_file = os.path.join('ftp_test', filename)
# try:
# with open(host_file, 'wb') as local_file:
# ftp.retrbinary('RETR ' + filename, local_file.write)
# # Traceback (most recent call last):
# # File "ftplib_sample.py", line 66, in <module>
# # with open(host_file, 'wb') as local_file:
# # FileNotFoundError: [Errno 2] No such file or directory: 'ftp_test/README'
# except ftplib.error_perm:
# pass
# ftp.quit()
# =================== UPLOADING TO FTP SERVER ===================
def ftp_upload(ftp_obj, path, ftype='TXT'):
"""
A function for uploading files to an FTP server
@param ftp_obj: The file transfer protocol object
@param path: The path to the file to upload
"""
if ftype == 'TXT':
with open(os.path.expanduser(path)) as fobj:
ftp.storlines('STOR ' + path, fobj)
else:
with open(os.path.expanduser(path), 'rb') as fobj:
ftp.storbinary('STOR ' + path, fobj, 1024)
if __name__ == '__main__':
ftp = FTP('ftp.debian.org')
ftp.login()
path = '~/Downloads/340944751-Yellowjackets-Freda-pdf.pdf'
ftp_upload(ftp, path, ftype='PDF')
ftp.quit()
# We don't have access to upload to the chosen FTP server, hence the following error.
# ftplib.error_perm: 550 Permission denied.
| joaofguerreiro/python3-course-advanced | 21_working_with_FTP/ftplib_sample.py | ftplib_sample.py | py | 3,748 | python | en | code | 2 | github-code | 13 |
12313206028 | #!/bin/python3
from typing import List
import array
def arrayManipulation(n: int, queries: List[List[int]]):
arr = array.array("i", [0] * (n + 1))
for a, b, k in queries:
arr[a - 1] += k
arr[b] -= k
maximum = 0
acc = 0
for v in arr:
acc += v
if acc > maximum:
maximum = acc
return maximum
if __name__ == "__main__":
arrayManipulation(5, [[1, 2, 100], [2, 5, 100], [3, 4, 100]])
| uztbt/cp | HackerRank/ArrayManipulation/ArrayManipulation.py | ArrayManipulation.py | py | 457 | python | en | code | 0 | github-code | 13 |
72376556819 |
def hasDuplicates(array1, array2):
contains = {}
for item in array1:
contains[item] = True
for item in array2:
if item in contains:
return True
return False
print(hasDuplicates([1, 2, 3], [4, 5, 8]))
def hasDuplicates2(array1, array2):
# combined = array1 + array2
# filtered = set(combined)
# if len(combined) > len(filtered):
# return True
# else:
# return False
return True if len(array1 + array2) > len(set(array1 + array2)) else False
print(hasDuplicates2([1, 2, "a"], [4, "b", 8]))
array = [1, 3, 9, 8]
array2 = [1, 5, 4, 6]
array3 = [0, 0, 0, 0]
num = 10
def hasSumOfTwo(array, num):
compliments = []
for i in array:
if i in compliments:
return True
compliments.append(num - i)
return False
print(hasSumOfTwo(array2, num))
def reverseStr(string):
out = ""
for i in range(len(string), 0, -1):
out += string[i-1]
return out
print(reverseStr("hello"))
def mergeSortedArrays(arr1, arr2):
return sorted(arr1 + arr2)
print(mergeSortedArrays([1,3,9,4,7], [5, 3, 7]))
nums = [1,2,3,4,5,6,7, 8, 9]
k = 6
def rotateList(k, nums):
for i in range(k):
nums.insert(0, nums[-1])
nums.pop(-1)
nums = [0,1,0,3,12]
def moveZeros(nums):
for i in range(len(nums) -1, -1, -1):
if nums[i] == 0:
nums.pop(i)
nums.append(0)
nums = [-2,-1,-3, -4,-1,-2,-1,-5,-4]
def maxSubArray(nums):
maxSum = nums[0]
curSum = 0
for i in nums:
if curSum < 0:
curSum = 0
curSum += i
if curSum > maxSum:
maxSum = curSum
return maxSum
print(maxSubArray(nums))
a=[-1, 0, 1,3,4,6,20]
b=[4,20]
def mergeSortedArrays(a, b):
a.append(None)
b.append(None)
i = 0
j = 0
new = []
while True:
if a[i] != None and b[j] != None:
if a[i] > b[j]:
new.append(b[j])
j += 1
else:
new.append(a[i])
i += 1
else:
if a[i] != None:
new.append(a[i])
i += 1
elif b[j] != None:
new.append(b[j])
j += 1
if a[i] == None and b[j] == None:
break
print(new)
mergeSortedArrays(a, b)
def func(lst):
l = []
for i in lst:
if i in l:
return i
l.append(i)
print(func([2,3,4,5]))
def func(lst):
l = {}
for i in lst:
if i in l:
return i
l[i] = True
print(func([2,2,3,3,2,4,5]))
class hashTable():
def __init__(self, size):
self.data = [None] * size
def __hash__(self, key, length = None):
hash = 0
key = str(key)
if not length:
length = len(self.data)
for i in range(len(key)):
hash = (hash + ord(key[i]) * i) % length
return hash
def __keyIndexLink__(self, address, key):
for i in range(len(self.data[address])):
if self.data[address][i][0] == key:
return i
return None
def set(self, key, value):
address = self.__hash__(key)
if not self.data[address]:
self.data[address] = [[key, value]]
else:
index = self.__keyIndexLink__(address, key)
if index != None:
self.data[address][index][1] = value
else:
self.data[address].append([key, value])
def get(self, key):
address = self.__hash__(key)
if len(self.data[address]) == 1:
return self.data[address][0][1]
else:
index = self.__keyIndexLink__(address, key)
if index != False:
return self.data[address][index][1]
def keys(self):
lst = []
for i in self.data:
if i == None:
continue
if len(i) > 1:
for j in i:
lst.append(j[0])
else:
lst.append(i[0][0])
return lst
tbl = hashTable(50)
tbl.set("hello", "hey there")
print(tbl.keys())
print(tbl.get("hello"))
print(tbl.__hash__("hello"))
import math
class node():
def __init__(self, value, doubly = False):
self.value = value
self.next = None
if doubly:
self.prev = None
class linkedList():
def __init__(self, value, doubly = False):
self.doubly = doubly
self.head = node(value, doubly)
self.tail = self.head
self.length = 1
def append(self, value):
newNode = node(value, self.doubly)
if self.doubly:
newNode.prev = self.tail
self.tail.next = newNode
self.tail = newNode
self.length += 1
def prepend(self, value):
newNode = node(value, self.doubly)
if self.doubly:
self.head.prev = newNode
newNode.next = self.head
self.head = newNode
self.length += 1
def values(self):
values = []
currentNode = self.head
for _ in range(self.length):
values.append(currentNode.value)
currentNode = currentNode.next
return values
def insert(self, value, index):
if index > self.length :
raise Exception(f"Index out of range. Last index is {self.length}")
elif index < 0:
raise Exception("Can't have negative numbers as an index")
elif index == 0:
self.prepend(value)
return
elif index == self.length:
self.append(value)
return
currentNode = self.head
for i in range(self.length):
if i == index - 1:
newNode = node(value)
if self.doubly:
newNode.prev = currentNode
currentNode.next.prev = newNode
newNode.next = currentNode.next
currentNode.next = newNode
self.length += 1
break
currentNode = currentNode.next
def find(self, value, all = False):
indexes = []
currentNode = self.head
for i in range(self.length):
if currentNode.value == value:
indexes.append(i)
if not all:
break
currentNode = currentNode.next
return indexes
def remove(self, index):
if index > self.length - 1:
raise Exception(f"Index out of range. Last index is {self.length - 1}")
elif index == 0:
self.head = self.head.next
if self.doubly:
self.head.prev = None
self.length -= 1
elif index < 0:
raise Exception("Can't have negative numbers as an index")
end = False
if index >= math.floor(self.length / 2):
end = True
if end and self.doubly:
currentNode = self.tail
for i in range (self.length -1, 1, -1):
if index + 1 == i:
currentNode.prev.prev.next = currentNode
currentNode.prev = currentNode.prev.prev
self.length -= 1
break
currentNode = currentNode.prev
else:
currentNode = self.head
for i in range(self.length - 1):
if index - 1 == i:
currentNode.next = currentNode.next.next
self.length -= 1
break
currentNode = currentNode.next
def removeValue(self, value, all = False):
lastNode = None
currentNode = self.head
valueRemoved = False
while currentNode != None:
if self.head.value == value:
self.head = self.head.next
if self.doubly:
self.head.prev = None
self.length -= 1
valueRemoved = True
elif currentNode.value == value:
lastNode.next = currentNode.next
if self.doubly:
currentNode.next.prev = lastNode
self.length -= 1
valueRemoved = True
if valueRemoved and all == False:
break
lastNode = currentNode
currentNode = currentNode.next
def reverse(self):
if self.head.next == None:
return
lastNode = self.head
currentNode = lastNode.next
for _ in range(self.length):
if currentNode == None:
break
nextNode = currentNode.next
if self.doubly:
currentNode.prev = nextNode
lastNode.prev = currentNode
currentNode.next = lastNode
lastNode = currentNode
currentNode = nextNode
if nextNode == None:
break
currentNode = nextNode
self.head.next = None
self.tail = self.head
self.head = lastNode
lst = linkedList(10, True)
lst.append(5)
lst.append(4)
lst.append(3)
lst.append(2)
lst.append(1)
lst.append(0)
lst.prepend(11)
print(lst.values())
lst.insert(19, 3)
lst.remove(7)
print(lst.values())
class node():
def __init__(self, value):
self.value = value
self.next = None
class stack():
def __init__(self, value = None):
self.top = node(value)
if self.top.value == None:
self.length = 0
else:
self.length = 1
def push(self, value):
if self.top.value == None:
self.top.value = value
self.length += 1
return
newNode = node(value)
newNode.next = self.top
self.top = newNode
self.length += 1
def peek(self):
return self.top.value
def pop(self):
if self.top.value == None:
raise Exception("Stack is empty")
value = self.top.value
if self.top.next != None:
self.top = self.top.next
self.length -= 1
else:
self.top.value = None
self.length -= 1
return value
class node():
def __init__(self, value):
self.value = value
self.next = None
class queue():
def __init__(self, value = None):
self.first = node(value)
self.last = self.first
if value == None:
self.length = 0
else:
self.length = 1
def peek(self):
return self.first.value
def enqueue(self, value):
if self.first.value == None:
self.first.value = value
self.length += 1
else:
newNode = node(value)
self.last.next = newNode
self.last = newNode
self.length += 1
def dequeue(self):
if self.first.value == None:
raise Exception("Queue empty")
elif self.first.next == None:
self.first.value = None
self.length -= 1
else:
self.first = self.first.next
self.length -= 1
class queueStack:
def __init__(self):
self.stack1 = stack()
self.stack2 = stack()
def stackToStack(self, fromStack, toStack):
for _ in range(fromStack.length):
toStack.push(fromStack.pop())
def push(self, value):
self.stackToStack(self.stack1, self.stack2)
self.stack1.push(value)
self.stackToStack(self.stack2, self.stack1)
def pop(self):
return self.stack1.pop()
def peek(self):
return self.stack1.peek()
def empty(self):
return True if self.stack1.length == 0 else False
class node():
def __init__(self, value = None):
self.value = value
self.left = None
self.right = None
class binaryTree():
def __init__(self):
self.root = node()
def insert(self, value):
if self.root.value == None:
self.root.value = value
return
currentNode = self.root
while currentNode:
if currentNode.value == value:
break
elif currentNode.value > value:
if currentNode.left != None:
currentNode = currentNode.left
else:
newNode = node(value)
currentNode.left = newNode
break
elif currentNode.value < value:
if currentNode.right != None:
currentNode = currentNode.right
else:
newNode = node(value)
currentNode.right = newNode
break
def lookup(self, value):
if self.root.value == None:
return False
currentNode = self.root
while currentNode:
if currentNode.value == value:
return True
elif currentNode.value > value:
if currentNode.left == None:
return False
else:
currentNode = currentNode.left
elif currentNode.value < value:
if currentNode.right == None:
return False
else:
currentNode = currentNode.right
def remove(self, value):
if self.root.value == None:
return
currentNode = self.root
lastNode = None
while currentNode:
if currentNode.value == value:
if currentNode.right == None:
if lastNode == None:
currentNode = currentNode.left
break
else:
lastNode.right = currentNode.left
break
elif currentNode.right != None:
if currentNode.right.left == None:
currentNode.value = currentNode.right.value
currentNode.right = currentNode.right.right
break
else:
leftNode = currentNode.right.left
lastLeftNode = None
while leftNode:
if leftNode.left == None:
currentNode.value = leftNode.value
lastLeftNode.left = leftNode.right
break
lastLeftNode = leftNode
leftNode = leftNode.left
elif currentNode.value > value:
lastNode = currentNode
currentNode = currentNode.left
elif currentNode.value < value:
lastNode = currentNode
currentNode = currentNode.right
class graph():
def __init__(self):
self.nodes = 0
self.adjList = {}
def addVertex(self, node):
if node in self.adjList:
raise Exception("Node already in graph")
self.adjList[node] = []
self.nodes += 1
def addEdge(self, node1, node2):
if node1 not in self.adjList:
self.addVertex(node2)
if node2 not in self.adjList:
self.addVertex(node2)
self.adjList[node1].append(node2)
self.adjList[node2].append(node1)
def showConnections(self):
keys = self.adjList.keys()
out = ""
for i in keys:
txt = f"{i} is connected to - "
for j in self.adjList[i]:
txt += f"{j},"
out += (f"{txt[:len(txt) - 1]}\n")
return out
def factorialRecursive(num):
if num == 2:
return 2
return num * factorialRecursive(num - 1)
def factorialIterative(num):
total = num * (num - 1)
for i in range(num - 2, 1, -1):
total *= i
return total
def fibb(n):
if n <= 1:
return n
return fibb(n-1) + fibb(n-2)
def fibbIter(n):
num = [0, 1]
for i in range(2, n+1, 1):
num.append((num[i-2] + num[i-1]))
return num[-1]
print(fibbIter(5))
[0,1,2,3,4,5,6,7,8]
[0,1,1,2,3,5,8,13,21]
def fibb(n, nums = [0, 1]):
if n == 0:
return nums[1]
else:
tmp = nums[0] + nums[1]
nums[0] = nums[1]
nums[1] = tmp
return fibb(n - 1)
print(fibb(5))
snt = "Hello there"
def revString(snt):
if snt:
return snt[-1] + revString(snt[:-1])
else:
return ""
nums = [99, 44, 6, 2, 1, 5, 63, 87, 283, 4, 0]
def bubble(arr):
x, y = 0, 0
while x <= len(arr) - 1:
if arr[y] > arr[y + 1]:
arr[y], arr[y + 1] = arr[y + 1], arr[y]
if y + 2 > len(arr) - 1:
x += 1
y = 0
else:
y += 1
nums = [99, 44, 6, 2, 1, 5, 63, 87, 283, 4, 0]
def selection(arr):
x, y = 0, 0
current = 0
while x <= len(arr) - 1:
if arr[current] > arr[y]:
current = y
if y + 1 > len(arr) - 1:
arr[x], arr[current] = arr[current], arr[x]
x += 1
y = x
else:
y += 1
nums = [99, 44, 6, 2, 1, 5, 63, 87, 283, 4, 0]
def insertion(arr):
x, y = 0, 1
while x <= len(arr) - 2:
for i in range(x):
if arr[y] > arr[i] and arr[y] < arr[i + 1]:
arr.insert(i + 1, arr.pop(y))
elif arr[y] < arr[i]:
arr.insert(i, arr.pop(y))
if y + 1 > len(arr) - 1:
x += 1
y = x + 1
else:
y += 1
nums = [99, 44, 6, 2, 1, 5, 63, 87, 283, 4, 0]
def mergeSort(arr):
print(len(arr))
if len(arr) > 2:
splitIndex = int((len(arr) - 1) / 2)
left = arr[splitIndex:]
right = arr[:splitIndex]
elif len(arr) == 2:
return merge([arr[0]], mergeSort([arr[1]]))
else:
return arr
return merge(mergeSort(left), mergeSort(right))
def merge(left, right):
left.append(None)
right.append(None)
x, y = 0, 0
new = []
while True:
if left[x] != None and right[y] != None:
if left[x] > right[y]:
new.append(right[y])
y += 1
else:
new.append(left[x])
x += 1
else:
if left[x] != None:
new.append(left[x])
x += 1
elif right[y] != None:
new.append(right[y])
y += 1
if left[x] == None and right[y] == None:
break
return new
| Kra1ven/Algorithms-Structures-Dump | test.py | test.py | py | 14,607 | python | en | code | 0 | github-code | 13 |
7790960949 | import yfinance as yf
import pandas as pd
import numpy as np
import datetime as dt
import tensorflow as tf
from tensorflow.keras.models import load_model
import requests
def Zero_One_Scale(df):
df_scaled = (df - df.min()) / (df.max() - df.min())
return df_scaled
def One_One_Scale(df):
df_scaled = 2 * (df - df.min()) / (df.max() - df.min()) - 1
return df_scaled
def Normalize(df):
df_normalized = (df - df.mean(axis=0)) / df.std(axis=0)
def RSI(x):
up, down = [i for i in x if i > 0], [i for i in x if i <= 0]
if len(down) == 0:
return 100
elif len(up) == 0:
return 0
else:
up_average = sum(up) / len(up)
down_average = - sum(down) / len(down)
return 100 * up_average / (up_average + down_average)
def SlowK(x):
min_price = min(x)
max_price = max(x)
k = (x[-1] - min_price) / (max_price - min_price)
return k
def make_prediction(df, model):
df = df.copy()
before_len = len(df)
df['return'] = np.log(df['Adj Close'] / df['Adj Close'].shift(1))
df['direction'] = np.where(df['return'] > 0, 1, -1)
df['direction'] = df['direction'].shift(-1)
df['return'] = One_One_Scale(df['return'])
df['return'] = df['return'].shift(-1)
# feature calculation
# basic information
df['price-change'] = df['Adj Close'] - df['Adj Close'].shift(1)
df['price-change-percentage'] = df['Adj Close'] / df['Adj Close'].shift(1)
df['volume'] = Zero_One_Scale(df['Volume'])
df['amount'] = df['Adj Close'] * df['Volume']
df['amount'] = Zero_One_Scale(df['amount'])
# simple moving average
df['sma10'] = df['Adj Close'].rolling(30).mean()
df['sma10-FP'] = (df['sma10'] - df['sma10'].shift(1)) / df['sma10'].shift(1)
df['sma10-FP'] = One_One_Scale(df['sma10-FP'])
df['sma10'] = Zero_One_Scale(df['sma10'])
# Moving Average Convergence Divergence
df['macd'] = df['Adj Close'].rolling(12).mean() - df['Adj Close'].rolling(26).mean()
df['macd-SG'] = df['macd'].rolling(9).mean()
df['macd-histogram'] = df['macd'] - df['macd-SG']
df['macd-histogram'] = np.where(df['macd-histogram'] > 0, 1, -1)
df['macd-SG'] = np.where(df['macd-SG'] > 0, 1, -1)
df['macd'] = np.where(df['macd'] > 0, 1, -1)
# Commodity Channel Index in 24 days
df['typical-price'] = (df['High'] + df['Low'] + df['Close']) / 3
df['sma-cci'] = df['typical-price'].rolling(24).mean()
df['mean-deviation'] = np.abs(df['typical-price'] - df['sma-cci'])
df['mean-deviation'] = df['mean-deviation'].rolling(24).mean()
df['cci'] = (df['typical-price'] - df['sma-cci']) / (0.015 * df['mean-deviation'])
df['cci-SG'] = np.where(df['cci'] > 0, 1, -1)
# MTM 10
df['mtm10'] = df['Adj Close'] - df['Adj Close'].shift(10)
df['mtm10'] = np.where(df['mtm10'] > 0, 1, -1)
# Rate of Change in 10 days
df['roc'] = (df['Adj Close'] - df['Adj Close'].shift(10)) / df['Adj Close'].shift(10)
df['roc-SG'] = np.where(df['roc'] > 0, 1, -1)
df['roc-FP'] = (df['roc'] - df['roc'].shift(1))
df['roc-FP'] = One_One_Scale(df['roc-FP'])
# Relative Strength Index in 5 days
df['rsi'] = df['price-change'].rolling(5).apply(RSI) / 100
df['rsi-FP'] = (df['rsi'] - df['rsi'].shift(1))
df['rsi-FP'] = One_One_Scale(df['rsi-FP'])
# Slow K and Slow D
df['slow-k'] = df['Adj Close'].rolling(14).apply(SlowK)
df['slow-d'] = df['slow-k'].rolling(14).mean()
df['slow-k-FP'] = df['slow-k'] - df['slow-k'].shift(1)
df['slow-d-FP'] = df['slow-d'] - df['slow-d'].shift(1)
df['slow-k'] = Zero_One_Scale(df['slow-k'])
df['slow-d'] = Zero_One_Scale(df['slow-d'])
df['slow-k-FP'] = One_One_Scale(df['slow-k-FP'])
df['slow-d-FP'] = One_One_Scale(df['slow-d-FP'])
# ADOSC
df['adosc'] = ((2 * df['Close'] - df['High'] - df['Low']) / (df['High'] - df['Low'])) * df['Volume']
df['adosc'] = df['adosc'].cumsum()
df['adosc-ema3'] = df['adosc'].ewm(span=3, adjust=False).mean()
df['adosc-ema10'] = df['adosc'].ewm(span=10, adjust=False).mean()
df['adosc-SG'] = np.where((df['adosc-ema3'] - df['adosc-ema10']) > 0, 1, -1)
df['adosc'] = Zero_One_Scale(df['adosc'])
# AR 26
hp_op = (df['High'] - df['Open']).rolling(26).sum()
op_lp = (df['Open'] - df['Low']).rolling(26).sum()
df['ar26'] = hp_op / op_lp
df['ar26'] = Zero_One_Scale(df['ar26'])
# BR 26
hp_cp = (df['High'] - df['Close']).rolling(26).sum()
cp_lp = (df['Close'] - df['Low']).rolling(26).sum()
df['br26'] = hp_cp / cp_lp
df['br26'] = Zero_One_Scale(df['br26'])
# VR 26
# BIAS 20
sma20 = df['Adj Close'].rolling(20).mean()
df['bias20'] = (df['Adj Close'] - sma20) / sma20
df['bias20'] = np.where(df['bias20'] > 0, 1, -1)
df['price-change'] = One_One_Scale(df['price-change'])
df['price-change-percentage'] = One_One_Scale(df['price-change-percentage'])
# drop row contains NaN
df.dropna(inplace=True)
after_len = len(df)
cols = ['return', 'price-change', 'price-change-percentage', 'volume', 'amount', 'sma10', 'sma10-FP',
'macd', 'macd-SG', 'macd-histogram', 'cci-SG', 'mtm10', 'roc-SG', 'roc-FP', 'rsi', 'rsi-FP', 'slow-k', 'slow-d',
'slow-k-FP', 'slow-d-FP', 'adosc', 'adosc-SG', 'ar26', 'br26', 'bias20']
# Normalization
X = df.copy()[cols]
y = X.pop('return')
float_cols = [i for i in X.columns if X[i].dtype == float]
int_cols = [i for i in X.columns if X[i].dtype == int]
X[float_cols] = (X[float_cols] - X[float_cols].mean(axis=0)) / X[float_cols].std(axis=0)
# make prediction
count = 0
result = [np.nan for i in range(30 + (before_len - after_len))]
for i in range(30,len(df)):
data = X.iloc[count:i].values
data = np.reshape(data, (1, 30, 24))
y = model.predict(data)
count += 1
result.append(y[0][-1])
return result
def lstm(symbol):
start_day = dt.datetime.now() - dt.timedelta(days=120)
df = yf.download(symbol, start=start_day, end=dt.datetime.now(), interval='1d')
model = load_model('./model.h5')
prediction = make_prediction(df, model)
response = prediction[-1] if len(prediction) > 0 else None
return str(response) | tsugumi-sys/trading-bot | docker-yfinance-api/lstm.py | lstm.py | py | 6,260 | python | en | code | 0 | github-code | 13 |
12984632912 | import pandas as pd
import matplotlib.pyplot as plt
import yfinance as yf
import csv
start = '2016-01-01'
end = '2019-01-01'
stocklist = []
with open('stock_Name.csv','r') as f:
reader = csv.reader(f)
for name in reader:
stocklist = name
print(stocklist)
for x in name:
try:
data = yf.download(x,start,end)
data.Close.plot()
data.to_csv('data/'+x+'.csv',encoding='utf-8', index=False)
# if bool(int(input('save?'))):
# data.to_csv('data/'+x+'.csv',encoding='utf-8', index=False)
# print('saved')
# else:
# print('not saved')
except:
print('no data for' +x+ 'found!')
plt.show()
| TheGamlion/Stock_RNN | getData.py | getData.py | py | 784 | python | en | code | 0 | github-code | 13 |
3324350556 | import sys,getopt
sys.path.insert(0, "../")
from core.preprocessing.data_loader import load_dataset,DATASET
from core.preprocessing import DataUtil
from core.model.rule_models.invariant_model import InvariantRuleModel,PredicateMode
from sklearn.metrics import roc_auc_score
from core.preprocessing.signals import ContinuousSignal
import json
import numpy as np
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NpEncoder, self).default(obj)
if __name__ == "__main__":
argv = sys.argv[1:]
try:
opts, args = getopt.getopt(argv, "d:m:t:g:r",["dataset=","mode=","theta=","gamma=","reproduce"])
except:
print(u'python main_ir.py --dataset <dataset> --mode <mode> --theta <theta> --gamma <gamma> --reproduce')
sys.exit(2)
ds = None
gamma = None
theta = None
load_saved = False
mode = PredicateMode.DTImpurity
for opt, arg in opts:
if opt in ['-d','--dataset']:
if arg.lower() == 'swat':
ds = DATASET.SWAT
elif arg.lower() == 'batadal':
ds = DATASET.BATADAL
elif arg.lower() == 'kddcup99':
ds = DATASET.KDDCup99
elif arg.lower() == 'gaspipeline':
ds = DATASET.GasPipeline
elif arg.lower() == 'annthyroid':
ds = DATASET.Annthyroid
elif arg.lower() == 'cardio':
ds = DATASET.Cardio
elif opt in ['-m','--mode']:
if arg.lower() == 'uniformbins':
mode = PredicateMode.UniformBins
elif arg.lower() == 'kmeansbins':
mode = PredicateMode.KMeansBins
elif arg.lower() == 'dtimpurity':
mode = PredicateMode.DTImpurity
else:
print(u'please specify right mode')
sys.exit(2)
elif opt in ['-t','--theta']:
theta = float(arg)
elif opt in ['-g','--gamma']:
gamma = float(arg)
elif opt in ['-r','--reproduce']:
load_saved = True
else:
print(u'python main_ir.py --dataset <dataset> --mode <mode> --theta <theta> --gamma <gamma> --reproduce')
sys.exit(2)
if ds is None:
print(u'please specify dataset!')
sys.exit(2)
if load_saved == False:
if theta is None or gamma is None:
print(u'please specify theta and gamma!')
sys.exit(2)
print(ds)
train_df,test_df,test_labels,signals = load_dataset(ds)
print('train size',len(train_df))
print('test size',len(test_df))
num_cont, num_cate = 0, 0
for signal in signals:
if isinstance(signal, ContinuousSignal):
num_cont += 1
else:
num_cate += 1
print(f'number of continuous signals: {num_cont}')
print(f'number of categorical signals: {num_cate}')
print('anomaly ratio:',list(test_labels).count(1)/len(test_labels))
du = DataUtil(signals,scaling_method=None)
train_df = du.normalize_and_encode(train_df)
test_df = du.normalize_and_encode(test_df)
irm = InvariantRuleModel(signals,mode)
if load_saved == False:
irm.train(train_df, max_predicates_per_rule=5, gamma=gamma, theta=theta,use_multicore=True)
# irm.save_model('../results/'+str(mode), str(ds))
else:
irm.load_model('../results/'+str(mode), str(ds))
num_rules = irm.get_num_rules()
print('num rules',num_rules)
anomaly_scores = irm.predict(test_df,use_boundary_rules=True,use_cores=1)
auc = roc_auc_score(test_labels[:len(anomaly_scores)],anomaly_scores)
pauc = roc_auc_score(test_labels[:len(anomaly_scores)],anomaly_scores,max_fpr=0.1)
print('AUC',auc)
print('pAUC',pauc)
print()
# if ds == DATASET.SWAT:
# print('---check violated rules for anomaly segments in SWAT---')
# segments = []
# for i in range(1,len(test_labels)):
# if test_labels[i] == 1 and test_labels[i-1] == 0:
# sidx = i
# if test_labels[i] == 0 and test_labels[i-1] == 1:
# segments.append((sidx,i-1))
# print('num segs',len(segments))
# print(segments)
#
# ret_json = {}
# ret_json['abnormal_segments'] = []
# for seg in segments:
# print(seg)
# seg_dict = {'start_index':seg[0], 'end_index':seg[1]}
# anomaly_df = test_df.loc[seg[0]:seg[1]+1,:]
# exp = irm.get_anomaly_explanation(anomaly_df, use_boundary_rules=True)
# causes = []
# for exp_item in exp.summary():
# causes.append( {'feature':exp_item[0],'probability':exp_item[1],'violated_rule':exp_item[2],'involved_features':exp_item[4],'violated_locations':exp_item[3]} )
# seg_dict['causes'] = causes
# ret_json['abnormal_segments'].append(seg_dict)
# # print(exp.summary())
# # print()
# with open("../results/abnormal_segments.json", "w") as outfile:
# json.dump(ret_json, outfile,cls=NpEncoder)
# print('violated rules saved in ../results/abnormal_segments.json')
| NSIBF/InvariantRuleAD | experiments/main_ir.py | main_ir.py | py | 5,500 | python | en | code | 1 | github-code | 13 |
9063390233 | import sys
input = sys.stdin.readline
# n, m, k를 입력받음
n, m, k = map(int, input().split())
number = [0] * 1000001
s = [0] * 1000001
# n개의 숫자들을 입력받고, number[1] ~ number[i]까지의 합 s[i]를 계산
for i in range(1, n + 1):
number[i] = int(input())
s[i] = s[i - 1] + number[i]
# 변경된 사항을 담는 리스트, (x, y): x번째 숫자가 기존과 y만큼의 차이로 변경됨
changed = []
for _ in range(m + k):
# a, b, c를 입력받음
a, b, c = map(int, input().split())
# a가 1이면
if a == 1:
# 변경된 사항을 담음, b번째 숫자가 c - number[b]만큼의 차이로 변경됨
changed.append((b, c - number[b]))
# number[b]를 업데이트
number[b] = c
else:
# 변경사항을 고려하지 않은 구간합이었으면 s[c] - s[b - 1]임
right = s[c]
left = s[b - 1]
# 변경사항을 살펴보며
for ch in changed:
# ch[0]번째가 변경됐으면 그 이후의 숫자는 모두 변경되므로 c >= ch[0]이면
if c >= ch[0]:
right += ch[1]
# b - 1 >= ch[0]이면
if b - 1 >= ch[0]:
left += ch[1]
# 결과 출력
print(right - left)
| yudh1232/Baekjoon-Online-Judge-Algorithm | 2042 구간 합 구하기.py | 2042 구간 합 구하기.py | py | 1,301 | python | ko | code | 0 | github-code | 13 |
32212330390 | def solution(name):
after=[]
for i in name:
after.append(min(ord(i)-ord('A'),ord('Z')-ord(i)+1))
index=0
ans=0
while True:
ans+=after[index]
after[index]=0
if sum(after)==0:
break
left,right=1,1
while after[index-left]==0:
left+=1
while after[index+right]==0:
right+=1
if left<right:
ans+=left
index-=left
else:
ans+=right
index+=right
return ans
print(solution("JEROEN")) | BlueScreenMaker/333_Algorithm | 백업/220604~230628/Programmers/조이스틱.py | 조이스틱.py | py | 557 | python | en | code | 0 | github-code | 13 |
71139731537 | """Added relationship
Revision ID: 10faf4b216d0
Revises: 85a6bc37561d
Create Date: 2020-12-16 13:15:41.124953
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '10faf4b216d0'
down_revision = '85a6bc37561d'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('events',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=255), nullable=False),
sa.Column('event_pic_path', sa.String(), nullable=True),
sa.Column('description', sa.Text(), nullable=False),
sa.Column('category', sa.String(length=255), nullable=False),
sa.Column('value', sa.String(length=255), nullable=False),
sa.Column('time', sa.DateTime(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_events_category'), 'events', ['category'], unique=False)
op.drop_index('ix_event_category', table_name='event')
op.drop_table('event')
op.add_column('donors', sa.Column('event_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'donors', 'events', ['event_id'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'donors', type_='foreignkey')
op.drop_column('donors', 'event_id')
op.create_table('event',
sa.Column('id', sa.INTEGER(), autoincrement=True, nullable=False),
sa.Column('title', sa.VARCHAR(length=255), autoincrement=False, nullable=False),
sa.Column('event_pic_path', sa.VARCHAR(), autoincrement=False, nullable=True),
sa.Column('description', sa.TEXT(), autoincrement=False, nullable=False),
sa.Column('category', sa.VARCHAR(length=255), autoincrement=False, nullable=False),
sa.Column('value', sa.VARCHAR(length=255), autoincrement=False, nullable=False),
sa.Column('time', postgresql.TIMESTAMP(), autoincrement=False, nullable=True),
sa.Column('user_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], name='event_user_id_fkey'),
sa.PrimaryKeyConstraint('id', name='event_pkey')
)
op.create_index('ix_event_category', 'event', ['category'], unique=False)
op.drop_index(op.f('ix_events_category'), table_name='events')
op.drop_table('events')
# ### end Alembic commands ###
| Ketsia-a/Charity-online | migrations/versions/10faf4b216d0_added_relationship.py | 10faf4b216d0_added_relationship.py | py | 2,598 | python | en | code | 0 | github-code | 13 |
40313023295 |
import cv2
import matplotlib.pyplot as plt
import numpy as np
import os
os.chdir(r'C:\Users\Goo\Desktop\Personal Learning\OpenCV_Tutorial')
from utils.mytool import stackImages,getContours
#%% 1 - Displaying image
img = cv2.imread('resources/lena.png')
cv2.imshow('output', img)
cv2.waitKey(0)
#%% 1 - Displaying video
cap = cv2.VideoCapture("resources/Wind turbine.mp4")
while True:
success, img = cap.read()
cv2.imshow("Video",img)
if cv2.waitKey(1) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
#%% 1 - Displaying webcam
cap = cv2.VideoCapture(0)
cap.set(3,640) # cv2.CAP_PROP_FRAME_WIDTH == 3
cap.set(4,480)
cap.set(10,100)
while True:
success, img = cap.read()
cv2.imshow("Video",img)
if cv2.waitKey(1) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
#%% 2 - Converting to Gray Image/ Blurring / Edge detector(Canny)/ Dilation/ Erosion
img = cv2.imread("resources/lena.png")
kernel = np.ones((5,5),dtype=np.uint8)
imgGray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
imgBlur = cv2.GaussianBlur(imgGray, (7,7), 0)
imgCanny = cv2.Canny(img,150,200)
imgDilate = cv2.dilate(imgCanny, kernel, iterations=1)
imgEroded = cv2.erode(imgDilate, kernel, iterations=1)
cv2.imshow('Gray',imgGray)
cv2.imshow('Blur',imgBlur)
cv2.imshow('Canny',imgCanny)
cv2.imshow('Dilation',imgDilate)
cv2.imshow('Erosion',imgEroded)
cv2.waitKey(0)
#%% 3 - Resizing and Cropping
img = cv2.imread('resources/house.jpg')
print(img.shape)
imgResize = cv2.resize(img, (400,300))
cv2.imshow('resized',imgResize)
imgCropped = imgResize[0:200,0:300]
cv2.imshow('Cropped',imgCropped)
cv2.waitKey(0)
#%% 4 - Shape and Texts
img = np.zeros([512,512,3], dtype=np.uint8)
img = img.astype(dtype=np.float32)
cv2.imshow('Black',img)
# img[:] = 255,0,0
# cv2.imshow('Blue',img)
cv2.line(img, (0,0), (300,300), color= (0,255,0), thickness=3)
# cv2.line(img, (0,0), (img.shape[1],img.shape[0]), color= (0,255,0), thickness=3)
cv2.imshow('line',img)
cv2.rectangle(img, (0,0), (250,350), color = (0,0,255), thickness=2)
# cv2.rectangle(img, (0,0), (250,350), (0,0,255), cv2.FILLED)
cv2.imshow('Rectangle',img)
cv2.circle(img, (400,50), 30, (255,255,0), 5)
cv2.imshow('Circle',img)
cv2.putText(img, "OpenCV", (300,100), cv2.FONT_HERSHEY_SCRIPT_COMPLEX, 1, (0,150,0), 1)
cv2.imshow('text added',img)
cv2.waitKey(0)
#%% 5 - Warp perspective
img = cv2.imread('resources/house.jpg')
width, height = 150, 300
# Points were manually acquired by opening image with paint app
lt = [582, 820]
rt = [803, 773]
lb = [581, 1136]
rb = [871, 1087]
pts1 = np.float32([lt, rt, lb, rb])
pts2 = np.float32([[0,0], [width,0], [0,height], [width,height]])
matrix = cv2.getPerspectiveTransform(pts1,pts2) # transformation matrix
imgOutput = cv2.warpPerspective(img, matrix, (width,height))
cv2.imshow('Image',img)
cv2.imshow('Wrap Perspective',imgOutput)
cv2.waitKey(0)
#%% 6 - Joining Images
img = cv2.imread('resources/lena.png')
imgHor = np.hstack((img,img))
imgVer = np.vstack((img,img))
cv2.imshow("Horizontal", imgHor)
cv2.imshow("Vertical", imgVer)
if cv2.waitKey(0)==27:
cv2.destroyAllWindows()
#%% 7 - Color detection
def empty(a):
pass
cv2.namedWindow(winname = "TrackBars")
cv2.resizeWindow(winname = "TrackBars",
width = 640,
height = 240)
cv2.createTrackbar("Hue Min",
"TrackBars",
85, # initial value
179, # maximum hue value (For OpenCV, it is 0-179)
empty) # need to define a function which gets called on
cv2.createTrackbar("Hue Max", "TrackBars", 102, 179, empty)
cv2.createTrackbar("Sat Min", "TrackBars", 0, 255, empty)
cv2.createTrackbar("Sat Max", "TrackBars", 35, 255, empty)
cv2.createTrackbar("Val Min", "TrackBars", 142, 255, empty)
cv2.createTrackbar("Val Max", "TrackBars", 255, 255, empty)
while True:
img = cv2.imread('resources/house.jpg')
img = cv2.resize(img, (600,400))
imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h_min = cv2.getTrackbarPos("Hue Min", "TrackBars")
h_max = cv2.getTrackbarPos("Hue Max", "TrackBars")
s_min = cv2.getTrackbarPos("Sat Min", "TrackBars")
s_max = cv2.getTrackbarPos("Sat Max", "TrackBars")
v_min = cv2.getTrackbarPos("Val Min", "TrackBars")
v_max = cv2.getTrackbarPos("Val Max", "TrackBars")
print(h_min,h_max,s_min,s_max,v_min,v_max)
lower = np.array([h_min,s_min,v_min])
upper = np.array([h_max,s_max,v_max])
mask = cv2.inRange(imgHSV, lower, upper)
imgRes = cv2.bitwise_and(img, img, mask=mask)
cv2.imshow('Image',img)
cv2.imshow('HSV',imgHSV)
cv2.imshow('Mast',mask)
cv2.imshow('Result',imgRes)
cv2.waitKey(1)
#%% 8 - Contours & Shape detection
img = cv2.imread('resources/shapes.png')
imgGray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
imgBlur = cv2.GaussianBlur(imgGray, (7,7), 1)
imgCanny = cv2.Canny(imgBlur, 50,50)
imgBlank = np.zeros_like(img)
imgContour = getContours(img, imgCanny)
imgStack = stackImages(0.6, [[img, imgGray, imgBlur],
[imgCanny, imgContour, imgBlank]])
cv2.imshow('Stack',imgStack)
if cv2.waitKey(0)==27:
cv2.destroyAllWindows()
#%% 9 - Face detection
faceCascade = cv2.CascadeClassifier("resources/haarcascade_frontalface_default.xml")
img = cv2.imread('resources/lena.png')
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(imgGray, 1.1, 4)
for (x,y,w,h) in faces:
cv2.rectangle(img, (x,y), (x+w,y+h), (255,0,0), 2)
cv2.imshow('Result',img)
if cv2.waitKey(0)==27:
cv2.destroyAllWindows() | junseokkim93/OpenCV | OpenCV_Tutorial/OpenCV_Tutorial.py | OpenCV_Tutorial.py | py | 5,782 | python | en | code | 0 | github-code | 13 |
28341398977 | # -*- coding: utf-8 -*-
"""
Created on Sat Jul 3 18:18:18 2021
@author: blah
"""
# originalsTest
# we write out all of the originals
# assuming we are in a landscale A4 paper
# note you have to scale by .15 here
from pathlib import Path
import pickle
# font Proliferate script
from axidrawinternal import axidraw
import os
import svgutils
import sys
import re
import string
import random
import math
from pathlib import Path
from addDim import *
import fixedLists # code for fixed lists of char/paths for project
import createProfile # code for accessing, modifying profiles
import spacingHandler # code for profile spacing load and write
from delimSys import *
def writeSVG(xStart, yStart, letter, profile, lineList):
scaleFactor = 1 # using the proliferated ones, already scaled (I think...)
# takes starting position and set of lines
# you can think of chunking blocks of texts here.
# passes the profile character set and spacings to use. Will load from
# the generation process.
# letter parameter checks for 3 types. Lets width and height appropriately
# set the length width params
if letter == 'Index':
letterWidth = "15.24cm"
letterHeight = "10.16cm"
elif letter == 'Envelope':
letterWidth = "24.13cm"
letterHeight = "10.47cm"
elif letter == 'Paper':
# assumed to be A4 here
letterWidth = "29.7cm"
letterHeight = "21.0cm"
#
### Build some mappings, and profile paths ###
# char map
charMap = fixedLists.charMapping()
# build some directories
masterDir = Path(__file__).parents[1]
profileDir = str(masterDir) + delim + 'Profiles' + delim + profile
if os.path.isdir(profileDir) == False:
print('Profile does not exist')
return None
GenDir = profileDir + delim + 'Generation'
# build the spacing dict
spacingDict = spacingHandler.loadSpacings(profile)
# print('spacingDict: ')
# print(spacingDict)
# folder list gen
folderList = fixedLists.folderList()
ad = axidraw.AxiDraw()
figure = svgutils.transform.SVGFigure(width=letterWidth, height = letterHeight)
# use start as origin points
xPos = xStart
yPos = yStart
for line in lineList:
charList = list(line)
for char in charList:
# check if exist?
if char in spacingDict:
charDir = GenDir + delim + charMap[char] + '-' + char + '-'
if os.path.isdir(charDir):
fileList = os.listdir(charDir)
if len(fileList) == 0:
print('no original files for character: {0} in profile {1}'.format(char, profile))
return None
# supposing we do have files, get the list of files. random into one of them
charFileList = os.listdir(charDir)
charPath = charDir + delim + charFileList[random.randint(0, len(charFileList)-1)]
svg = svgutils.transform.fromfile(charPath).getroot()
svg.moveto(xPos + random.randint(-20, 0)/40, yPos + random.randint(-20, 20)/40, scaleFactor)
figure.append(svg)
xPos = xPos + spacingDict[char] ##### ADD NOISE HERE #####
else:
xPos = xPos + 8 + random.randint(-20, 0)/20 ##### ADD NOISE HERE #####
xPos = xStart + random.randint(-20, 20)/10
yPos = yPos + 30 + random.randint(-20, 0)/20 # new line ###### ADD NOISE HERE ####
figure.save('printTemp.svg')
addDim('printTemp.svg', letter)
ad.plot_setup('printTemp.svg')
ad.options.speed_pendown = 115
ad.options.speed_penup = 115
ad.options.accel = 100
ad.options.pen_rate_lower = 100
ad.options.pen_rate_raise = 100
ad.plot_run()
# tempDict = spacingHandler.loadSpacings('Zain')
# print(tempDict)
# changes = {'e': 12,
# 'w' : 13,
# 'f': 10,
# 'n': 10,
# 'u': 10,
# 'q': 9,
# 'i': 7,
# 'r': 9,
# 'l': 7,
# 't': 7,
# 'm': 12,
# 'o': 10,
# 'v': 9,
# 'g': 11,
# 'a': 12,
# 'k': 10,
# '0': 12,
# '1': 11,
# '2': 14,
# '3': 12,
# '6': 12,
# '7': 13,
# '5': 12,
# 'J': 14}
# spacingHandler.changeSpacings('Zain', changes)
# writeSVG(0, 0, 'Paper', 'Zain',
# ['quick brown fox jumped over the lazy dog',
# 'quick brown fox jumped over the lazy dog',
# 'quick brown fox jumped over the lazy dog',
# 'Jenny now that I got your number',
# 'I got to make you mine',
# '8675309',
# '8675309']) | ottrp0p/axidraw-python | python/writeSVG.py | writeSVG.py | py | 5,014 | python | en | code | 0 | github-code | 13 |
3014572930 | from skbuild import setup
import os.path as osp
with open('VERSION', 'r') as f:
version = f.read().strip()
with open('README.md', 'r') as f:
long_description = f.read().strip()
setup( name='external_arrow',
version=version,
long_description=long_description,
long_description_content_type="text/markdown",
packages=[ 'external_arrow',
'external_arrow.arrow',
],
platforms=[
'linux',
'Unix'
],
setup_requires=[
'setuptools',
'cmake',
'scikit-build'
],
install_requires=[
'kwiver>=1.4.2',
'diva-framework>=0.0.3'
],
cmake_args=[
'-DCMAKE_BUILD_TYPE=Release',
'-DENABLE_CPP_ARROW=ON',
'-DKWIVER_PYTHON_MAJOR_VERSION=3',
],
cmake_install_dir='external_arrow',
entry_points={
'kwiver.python_plugin_registration':
[
'simple_detector=external_arrow.arrow.test_object_detector',
],
'kwiver.cpp_search_paths':
[
'simple_detector=external_arrow.register_cpp_arrow:get_cpp_path',
]
},
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Operating System :: Unix",
"License :: OSI Approved :: BSD License"
],
python_requires=">=3.5",
)
| as6520/external_arrow | setup.py | setup.py | py | 2,050 | python | en | code | 1 | github-code | 13 |
29218142691 | import base64
import http.server
import json
import msgpack
import socketserver
import threading
"""
These mock severs are for the v2 indexer/algod path and response tests.
I was unable to get 'before_all' and 'after_all' working anywhere else besides
a file named 'environment.py in this directory. Otherwise all of this would
be in the v2_steps.py file.
"""
def encode_bytes(d):
if isinstance(d, dict):
for k, v in d.items():
if isinstance(v, dict):
encode_bytes(v)
elif isinstance(v, list):
encode_bytes(v)
else:
if isinstance(v, bytes):
d[k] = base64.b64encode(v).decode()
elif isinstance(d, list):
for i in range(len(d)):
if isinstance(d[i], dict):
encode_bytes(d[i])
elif isinstance(d[i], list):
encode_bytes(d[i])
else:
if isinstance(d[i], bytes):
d[i] = base64.b64encode(d[i]).decode()
return d
class PathsHandler(http.server.SimpleHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
m = json.dumps({"path": self.path})
m = bytes(m, "ascii")
self.wfile.write(m)
def do_POST(self):
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
m = json.dumps({"path": self.path})
m = bytes(m, "ascii")
self.wfile.write(m)
def do_DELETE(self):
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
m = json.dumps({"path": self.path})
m = bytes(m, "ascii")
self.wfile.write(m)
def get_status_to_use():
f = open("tests/features/resources/mock_response_status", "r")
status = f.read()
f.close()
# overwrite to default 200 so that tests that don't write above file operate properly
with open("tests/features/resources/mock_response_status", "w") as f:
f.write("200")
return int(status)
class FileHandler(http.server.SimpleHTTPRequestHandler):
def do_GET(self):
if "mock" in self.path:
f = open("tests/features/resources/mock_response_path", "w")
f.write(self.path[6:])
f.close()
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes("done", "ascii"))
else:
self.send_response(get_status_to_use())
self.send_header("Content-type", "application/json")
self.end_headers()
f = open("tests/features/resources/mock_response_path", "r")
mock_response_path = f.read()
f.close()
f = open("tests/features/resources/" + mock_response_path, "r")
s = f.read()
f.close()
if "base64" in mock_response_path:
s = encode_bytes(
msgpack.unpackb(base64.b64decode(s), raw=False)
)
self.wfile.write(bytes(json.dumps(s), "ascii"))
else:
s = bytes(s, "ascii")
self.wfile.write(s)
def do_POST(self):
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
f = open("tests/features/resources/mock_response_path", "r")
mock_response_path = f.read()
f.close()
f = open("tests/features/resources/" + mock_response_path, "r")
s = f.read()
f.close()
s = bytes(s, "ascii")
self.wfile.write(s)
def before_all(context):
# Install path server
socketserver.TCPServer.allow_reuse_address = True
context.path_server = socketserver.TCPServer(("", 0), PathsHandler)
_, context.path_server_port = context.path_server.server_address
context.path_thread = threading.Thread(
target=context.path_server.serve_forever
)
context.path_thread.start()
# Install route server
socketserver.TCPServer.allow_reuse_address = True
context.response_server = socketserver.TCPServer(("", 0), FileHandler)
_, context.response_server_port = context.response_server.server_address
context.response_thread = threading.Thread(
target=context.response_server.serve_forever
)
context.response_thread.start()
def after_all(context):
# Shutdown path server
context.path_server.shutdown()
context.path_thread.join()
# Shutdown route server
context.response_server.shutdown()
context.response_thread.join()
| algorand/py-algorand-sdk | tests/environment.py | environment.py | py | 4,770 | python | en | code | 242 | github-code | 13 |
38631216651 | import time
import os
import drive_helpers
logfile = os.path.join(drive_helpers.get_unturned_path(), "Logs", "Client.log")
print(f"Extracting logs from: {logfile}")
# Keep track of the last line number that was read
last_line = 0
# if a log line has any of these strings in it, ignore it
IGNORE_SENTENCES = ["Look rotation viewing vector is zero"]
while True:
# Get the size of the log file
size = os.path.getsize(logfile)
# If the size of the file has decreased (e.g., due to log rotation),
# reset the last_line variable to the beginning of the file
if size < last_line:
last_line = 0
# Open the file and seek to the last line that was read
with open(logfile, encoding="utf-8") as f:
f.seek(last_line)
# Read any new lines and print them
for line in f:
# Avoid dumping entire log contents on startup
if last_line > 0:
if not any(s in line for s in IGNORE_SENTENCES):
print(line.strip())
# Update the last_line variable to the current position
if last_line == 0:
print("Logs extracted, monitoring for updates...")
last_line = f.tell()
# Wait for a few seconds before checking again
time.sleep(0.1)
| downj05/SuiteBanEv | log_reader.py | log_reader.py | py | 1,271 | python | en | code | 0 | github-code | 13 |
72206360019 | from nrclex import NRCLex
import pandas as pd
import matplotlib.pyplot as plt
def main():
sample_data = pd.read_csv('Tweets.csv')["text"]
bigString = ",".join(sample_data)
text_stuff = NRCLex(bigString)
results = text_stuff.raw_emotion_scores
labels = list(results.keys())
data = list(results.values())
# fig,ax=plt.subplots(1,1,dpi=135)
plt.figure(figsize=(10, 3))
plt.bar(range(len(results)), data, tick_label=labels)
plt.show()
if __name__ == '__main__':
main() | Ifas87/nltkstuff | emotions.py | emotions.py | py | 523 | python | en | code | 0 | github-code | 13 |
40324017424 | import pygame, torch
import numpy as np
from alphagomoku.game import GomokuGame
from alphagomoku.players.basic_player import NaivePlayer
from alphagomoku.players.mcts_player import MCTS, NaiveMCTSPlayer, NaiveMCTS, get_valid_moves, Player
from alphagomoku.players.mcts_policy_player import PolicyMCTSPlayer, PolicyNet
#####################
pygame.init()
computer_side = 0
width, height = 800, 800
font = pygame.font.SysFont('monospace', 20)
# constant vals
xy_min = 100; xy_max = 700
l = (xy_max-xy_min)/15
# define computer player
model = PolicyNet()
model.load_state_dict(torch.load('server/models/model11.pth', map_location=torch.device('cpu')))
model.eval()
player = PolicyMCTSPlayer(20, 1000, model, True)
# draw the board
def draw_board(window):
for x in np.linspace(xy_min, xy_max, 16):
pygame.draw.line(window, '#000000', (x, xy_min), (x, xy_max), width = 2)
for y in np.linspace(xy_min, xy_max, 16):
pygame.draw.line(window, '#000000', (xy_min, y), (xy_max, y), width = 2)
# draw a move
def draw_move(window, move, side):
color = 'black' if side==0 else 'white'
pygame.draw.circle(window, color, (xy_min+(move[0]+0.5)*l+1, xy_min+(move[1]+0.5)*l+1), 0.5*l-2)
# get row/column number with mouse click
def get_index(x):
locs = np.linspace(xy_min+0.5*l, xy_max-0.5*l, 15)
distance = np.abs(locs-x)
return np.argmin(distance)
def main():
window = pygame.display.set_mode((width, height))
window.fill('#996600')
game = GomokuGame()
pygame.display.set_caption('Gomoku')
clock = pygame.time.Clock()
draw_board(window)
run = True
side = 0
while run:
clock.tick(10)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
elif not game.winner is None:
pass
elif computer_side==side:
player.play(game, side)
move = game.hist[-1][:2]
draw_move(window, move, side)
# draw win rate
window.fill('#996600', rect = (0, 0, width, xy_min))
text_surface = font.render(f'Computer: {player.trees[-1].root.Q :.1%}', 0, (0, 0, 0))
window.blit(text_surface, (xy_min, xy_min/2))
side = 1-side
elif event.type == pygame.MOUSEBUTTONDOWN:
x, y = pygame.mouse.get_pos()
if xy_min<x<xy_max and xy_min<y<xy_max:
move = (get_index(x), get_index(y))
game.play(move, side)
draw_move(window, move, side)
side = 1-side
pygame.display.update()
pygame.display.update()
pygame.quit()
if __name__ == "__main__":
main()
| BangyaoZhao/gomoku | play_with_computer.py | play_with_computer.py | py | 2,775 | python | en | code | 0 | github-code | 13 |
16007361510 | import os
import numpy as np
import torch
import matplotlib.pyplot as plt
import nimblephysics as nimble
from solver.envs.rigidbody3d.r3d_grasp import GraspBox
from solver.envs.rigidbody3d.utils import arr_to_str
class TestSim(GraspBox):
def __init__(self, cfg=None):
super().__init__(cfg, A_ACT_MUL=1.)
def init_simulator(self):
self.sim.world.setGravity([0, -1, 0])
# arm
self.sim.arm = self.sim.load_urdf(
"gripper_v2.urdf",
restitution_coeff=self.restitution_coeff,
friction_coeff=self.friction_coeff,
mass=1)
# boxes
self.sim.box = [
self.sim.load_urdf(
"sphere.urdf",
restitution_coeff=self.restitution_coeff,
friction_coeff=self.friction_coeff,
mass=0.1, inertia=[0.1, 0.1, 0.1])
]
# ground
self.sim.load_urdf(
"ground.urdf",
restitution_coeff=self.restitution_coeff,
friction_coeff=self.friction_coeff)
# action only control arm, not other objects
for i in range(self.sim.arm.sapien_actor.dof, self.sim.world.getActionSize()):
self.sim.world.removeDofFromActionSpace(i)
# nimble arm forward kinematics
self.sim.arm_fk = nimble.neural.IKMapping(self.sim.world)
self.sim.arm_fk.addLinearBodyNode(self.sim.arm.nimble_actor.getBodyNode("end_effector"))
def sample_state_goal(self, batch_size=1):
state, goals = super().sample_state_goal(batch_size)
# prismatic qpos
state[3] = state[3] + 0.4522
# gripper qpos
# state[]
state[5:7] = torch.tensor([0.0, 0.0])
ee = nimble.map_to_pos(self.sim.world, self.sim.arm_fk, state)
state[10:13] = ee + torch.tensor([0, -0.45, 0])
return state, goals
def sample_state_goal(self, batch_size=1):
# set up init pos & qpos & goal
box_x = 0.4
box_y = (1 - box_x ** 2) ** 0.5
state = torch.tensor(
# [
# # arm qpos
# -1.57 / 2 - 0.2, 0.7, -0.7, 0.2822, 2.22, 0.1, 0.1,
# 0, 0, 0, 0.058, -0.404, 1.652,
# # velocities
# 0, 0, 0, 0, 0, 0, 0,
# 0, 0, 0, 0, 0, 0,
# ], dtype=torch.float32
[
# arm qpos
-1.57 / 2 - 0.2, 0.7, -0.7, 0.2822, 2.22, 0.0, 0.0,
0, 0, 0, 0.058, -0.404, 1.652,
# velocities
0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
], dtype=torch.float32
)
goals = torch.tensor([0, 1, -1.5], dtype=torch.float32)
return state, goals
def get_reward(self, s, a, s_next):
# r_mult = 0.1
# ee = nimble.map_to_pos(self.sim.world, self.sim.arm_fk, s_next) \
# + torch.tensor([0, -0.2, 0])
# box = self.box_pos(s_next, 0)
# reach = -(ee - box).norm() ** 2
# gripper_qpos_at_0 = -(s_next[5] ** 2 + s_next[6] ** 2)
# gripper_close = -((s_next[5] - 0.12) ** 2 + (s_next[6] - 0.12) ** 2)
# if self.t < 20:
# return (reach + gripper_qpos_at_0) #* r_mult
# elif self.t < 40:
# return (reach + gripper_close * 5) #* r_mult
# else:
# box_to_goal = -(box - self.goals).norm() ** 2
# return gripper_close * 5 + (box_to_goal) * r_mult * 5
return 0
def forward(env, a, T, render, render_cycle=10):
env.reset()
imgs = []
slip_diffs = []
s = env.sim.state
for t in range(T):
assist = env.sim.get_assist()
if t >= 85:
assist[-2:] = 0
env.step(a[t] + assist)
s_next = env.sim.state
if render and t % render_cycle == 0:
imgs.append(env.render())
# when arm is pulling
# if (s[3] - s_next[3]).abs() > 1e-5:
# if a[t, 3] != 0:
if t > 70:
# how much did ball slipped
slip_diff = (s[3] - s_next[3]).abs() - (s[11] - s_next[11]).abs()
# slip_diff = -s[11]
# print(t, slip_diff)
slip_diffs.append(slip_diff.item())
if t == 90:
print((env.sim.get_assist())[3].item())
s = s_next
return locals()
def test(f_pull=-10, f_hold=0.1, T=100, rest=0.1, fric=1., render=True, gui=True):
env = TestSim(restitution_coeff=rest, friction_coeff=fric)
if gui: env.sim.viewer.create_window()
a = torch.zeros((T, env.action_space.shape[0]))
a[:85, -2:] = 50
a[85:, 3] = f_pull; a[85:, -2:] = f_hold
traj = forward(env, a, T, render=render)
# print(traj['slip_diffs'])
return traj
if __name__ == "__main__":
import tqdm
output_dir = "test_sim_constants"
os.makedirs(output_dir, exist_ok=True)
# N = 100
# f_pull_interval = (-100, -10)
# plt.figure(); plt.title(f"how much pulling force {f_pull_interval} influence slip")
# plt.xlabel("simulator timestep"); plt.ylabel("delta height of (arm - box)")
# for f_pull in np.linspace(*f_pull_interval, N):
# traj = test(f_pull=f_pull, render=False, gui=False)
# print(f_pull, traj['slip_diffs'][-1])
# plt.plot(traj["slip_diffs"])
# plt.savefig(f"{output_dir}/pull_slip_{N}.png")
# N = 20
# f_pull_interval = (-100, -10)
# plt.figure(); plt.title(f"how much pulling force {f_pull_interval} influence slip")
# plt.xlabel("simulator timestep"); plt.ylabel("slip = delta height of (arm - box)")
# for f_pull in np.linspace(*f_pull_interval, N):
# traj = test(f_pull=f_pull, render=False, gui=False)
# # print(f_pull, traj['slip_diffs'][-1])
# plt.plot(traj["slip_diffs"])
# plt.savefig(f"{output_dir}/pull_slip_{N}.png")
N = 20
f_hold_interval = (0, 1000)
plt.figure(); plt.title(f"how gripper pad force {f_hold_interval} influence slip")
plt.xlabel("timestep"); plt.ylabel("delta height of (arm - box)")
for i, f_hold in zip(tqdm.trange(N), np.linspace(*f_hold_interval, N)):
traj = test(f_hold=f_hold, render=False, gui=False)
# print(f_hold, traj['slip_diffs'][-1])
plt.plot(traj["slip_diffs"])
plt.savefig(f"{output_dir}/grip_slip_{N}.png")
# test(f_hold=0, render=True, gui=True) | haosulab/RPG | solver/envs/rigidbody3d/tests/test_sim_constants.py | test_sim_constants.py | py | 6,430 | python | en | code | 18 | github-code | 13 |
72926957778 | # pylint: disable=C0103, missing-docstring
def detailed_movies(db):
'''return the list of movies with their genres and director name'''
query= """
SELECT movies.title, movies.genres, directors.name
FROM movies
JOIN directors ON movies.director_id = directors.id
"""
db.execute(query)
results = db.fetchall()
return results
def late_released_movies(db):
'''return the list of all movies released after their director death'''
query= """
SELECT movies.title
FROM directors
JOIN movies ON directors.id = movies.director_id
WHERE (movies.start_year - directors.death_year) > 0
ORDER BY movies.title
"""
db.execute(query)
results = db.fetchall()
return [r[0] for r in results]
def stats_on(db, genre_name):
'''return a dict of stats for a given genre'''
query= """
SELECT COUNT(m.title), AVG(m.minutes)
FROM movies m
WHERE m.genres = ?
"""
db.execute(query, (genre_name,))
results = db.fetchall()
return({'genre': genre_name,'number_of_movies': results[0][0],
'avg_length': round(results[0][1],2)})
def top_five_directors_for(db, genre_name):
'''return the top 5 of the directors with the most movies for a given genre'''
query= """
SELECT d.name ,COUNT(*) movie_count
FROM movies m
JOIN directors d ON m.director_id = d.id
WHERE m.genres = ?
GROUP BY d.name
ORDER BY movie_count DESC , d.name
LIMIT 5
"""
db.execute(query, (genre_name,))
results = db.fetchall()
return results
def movie_duration_buckets(db):
'''return the movie counts grouped by bucket of 30 min duration'''
query= """SELECT (minutes / 30 + 1)*30 time_range,
COUNT(*)
FROM movies
WHERE minutes IS NOT NULL
GROUP BY time_range
"""
db.execute(query)
results = db.fetchall()
return results
def top_five_youngest_newly_directors(db):
'''return the top 5 youngest directors when they direct their first movie'''
query= """SELECT directors.name, movies.start_year - directors.birth_year age
FROM directors
JOIN movies ON directors.id = movies.director_id
GROUP BY directors.name
HAVING age IS NOT NULL
ORDER BY age
LIMIT 5
"""
db.execute(query)
results = db.fetchall()
return results
| arielimaa/data-sql-queries | queries.py | queries.py | py | 2,332 | python | en | code | 1 | github-code | 13 |
34911080964 | from list_items import MessageItem, ContactItem, FileTransferItem, InlineImageItem
from PySide import QtCore, QtGui
from tox import Tox
import os
from messages import *
from settings import *
from toxcore_enums_and_consts import *
from ctypes import *
from util import curr_time, log, Singleton, curr_directory, convert_time
from tox_dns import tox_dns
from history import *
from file_transfers import *
import time
import calls
import avwidgets
class Contact(object):
"""
Class encapsulating TOX contact
Properties: name (alias of contact or name), status_message, status (connection status)
widget - widget for update
"""
def __init__(self, name, status_message, widget, tox_id):
"""
:param name: name, example: 'Toxygen user'
:param status_message: status message, example: 'Toxing on toxygen'
:param widget: ContactItem instance
:param tox_id: tox id of contact
"""
self._name, self._status_message = name, status_message
self._status, self._widget = None, widget
self._widget.name.setText(name)
self._widget.status_message.setText(status_message)
self._tox_id = tox_id
self.load_avatar()
# -----------------------------------------------------------------------------------------------------------------
# name - current name or alias of user
# -----------------------------------------------------------------------------------------------------------------
def get_name(self):
return self._name
def set_name(self, value):
self._name = value.decode('utf-8')
self._widget.name.setText(self._name)
self._widget.name.repaint()
name = property(get_name, set_name)
# -----------------------------------------------------------------------------------------------------------------
# Status message
# -----------------------------------------------------------------------------------------------------------------
def get_status_message(self):
return self._status_message
def set_status_message(self, value):
self._status_message = value.decode('utf-8')
self._widget.status_message.setText(self._status_message)
self._widget.status_message.repaint()
status_message = property(get_status_message, set_status_message)
# -----------------------------------------------------------------------------------------------------------------
# Status
# -----------------------------------------------------------------------------------------------------------------
def get_status(self):
return self._status
def set_status(self, value):
self._widget.connection_status.data = self._status = value
self._widget.connection_status.repaint()
status = property(get_status, set_status)
# -----------------------------------------------------------------------------------------------------------------
# TOX ID. WARNING: for friend it will return public key, for profile - full address
# -----------------------------------------------------------------------------------------------------------------
def get_tox_id(self):
return self._tox_id
tox_id = property(get_tox_id)
# -----------------------------------------------------------------------------------------------------------------
# Avatars
# -----------------------------------------------------------------------------------------------------------------
def load_avatar(self):
"""
Tries to load avatar of contact or uses default avatar
"""
avatar_path = '{}.png'.format(self._tox_id[:TOX_PUBLIC_KEY_SIZE * 2])
os.chdir(ProfileHelper.get_path() + 'avatars/')
if not os.path.isfile(avatar_path): # load default image
avatar_path = 'avatar.png'
os.chdir(curr_directory() + '/images/')
pixmap = QtGui.QPixmap(QtCore.QSize(64, 64))
pixmap.load(avatar_path)
self._widget.avatar_label.setScaledContents(False)
self._widget.avatar_label.setPixmap(pixmap.scaled(64, 64, QtCore.Qt.KeepAspectRatio))
self._widget.avatar_label.repaint()
def reset_avatar(self):
avatar_path = (ProfileHelper.get_path() + 'avatars/{}.png').format(self._tox_id[:TOX_PUBLIC_KEY_SIZE * 2])
if os.path.isfile(avatar_path):
os.remove(avatar_path)
self.load_avatar()
def set_avatar(self, avatar):
avatar_path = (ProfileHelper.get_path() + 'avatars/{}.png').format(self._tox_id[:TOX_PUBLIC_KEY_SIZE * 2])
with open(avatar_path, 'wb') as f:
f.write(avatar)
self.load_avatar()
def get_pixmap(self):
return self._widget.avatar_label.pixmap()
class Friend(Contact):
"""
Friend in list of friends. Can be hidden, properties 'has unread messages' and 'has alias' added
"""
def __init__(self, message_getter, number, *args):
"""
:param message_getter: gets messages from db
:param number: number of friend.
"""
super(Friend, self).__init__(*args)
self._number = number
self._new_messages = False
self._visible = True
self._alias = False
self._message_getter = message_getter
self._corr = []
self._unsaved_messages = 0
self._history_loaded = False
def __del__(self):
self.set_visibility(False)
del self._widget
if hasattr(self, '_message_getter'):
del self._message_getter
# -----------------------------------------------------------------------------------------------------------------
# History support
# -----------------------------------------------------------------------------------------------------------------
def load_corr(self, first_time=True):
"""
:param first_time: friend became active, load first part of messages
"""
if (first_time and self._history_loaded) or (not hasattr(self, '_message_getter')):
return
data = self._message_getter.get(PAGE_SIZE)
if data is not None and len(data):
data.reverse()
else:
return
data = map(lambda tupl: TextMessage(*tupl), data)
self._corr = data + self._corr
self._history_loaded = True
def get_corr_for_saving(self):
"""
Get data to save in db
:return: list of unsaved messages or []
"""
if hasattr(self, '_message_getter'):
del self._message_getter
messages = filter(lambda x: x.get_type() <= 1, self._corr)
return map(lambda x: x.get_data(), messages[-self._unsaved_messages:]) if self._unsaved_messages else []
def get_corr(self):
return self._corr[:]
def append_message(self, message):
"""
:param message: tuple (message, owner, unix_time, message_type)
"""
self._corr.append(message)
if message.get_type() <= 1:
self._unsaved_messages += 1
def get_last_message_text(self):
messages = filter(lambda x: x.get_type() <= 1 and not x.get_owner(), self._corr)
if messages:
return messages[-1].get_data()[0]
else:
return ''
def last_message_owner(self):
messages = filter(lambda x: x.get_type() <= 1, self._corr)
if messages:
return messages[-1].get_owner()
else:
return -1
def clear_corr(self):
"""
Clear messages list
"""
if hasattr(self, '_message_getter'):
del self._message_getter
self._corr = filter(lambda x: x.get_type() == 2 and x.get_status() in (2, 4), self._corr)
self._unsaved_messages = 0
def update_transfer_data(self, file_number, status, inline=None):
"""
Update status of active transfer and load inline if needed
"""
try:
tr = filter(lambda x: x.get_type() == 2 and x.is_active(file_number), self._corr)[0]
tr.set_status(status)
if inline: # inline was loaded
i = self._corr.index(tr)
self._corr.insert(i, inline)
return i - len(self._corr)
except Exception as ex:
log('Update transfer data failed: ' + str(ex))
# -----------------------------------------------------------------------------------------------------------------
# Alias support
# -----------------------------------------------------------------------------------------------------------------
def set_name(self, value):
"""
Set new name or ignore if alias exists
:param value: new name
"""
if not self._alias:
super(self.__class__, self).set_name(value)
def set_alias(self, alias):
self._alias = bool(alias)
# -----------------------------------------------------------------------------------------------------------------
# Visibility in friends' list
# -----------------------------------------------------------------------------------------------------------------
def get_visibility(self):
return self._visible
def set_visibility(self, value):
self._visible = value
visibility = property(get_visibility, set_visibility)
# -----------------------------------------------------------------------------------------------------------------
# Unread messages from friend
# -----------------------------------------------------------------------------------------------------------------
def get_messages(self):
return self._new_messages
def set_messages(self, value):
self._widget.connection_status.messages = self._new_messages = value
self._widget.connection_status.repaint()
messages = property(get_messages, set_messages)
# -----------------------------------------------------------------------------------------------------------------
# Friend's number (can be used in toxcore)
# -----------------------------------------------------------------------------------------------------------------
def get_number(self):
return self._number
def set_number(self, value):
self._number = value
number = property(get_number, set_number)
class Profile(Contact, Singleton):
"""
Profile of current toxygen user. Contains friends list, tox instance
"""
def __init__(self, tox, screen):
"""
:param tox: tox instance
:param screen: ref to main screen
"""
super(Profile, self).__init__(tox.self_get_name(),
tox.self_get_status_message(),
screen.user_info,
tox.self_get_address())
self._screen = screen
self._messages = screen.messages
self._tox = tox
self._file_transfers = {} # dict of file transfers. key - tuple (friend_number, file_number)
self._call = calls.AV(tox.AV) # object with data about calls
self._incoming_calls = set()
settings = Settings.get_instance()
self._show_online = settings['show_online_friends']
screen.online_contacts.setChecked(self._show_online)
aliases = settings['friends_aliases']
data = tox.self_get_friend_list()
self._history = History(tox.self_get_public_key()) # connection to db
self._friends, self._active_friend = [], -1
for i in data: # creates list of friends
tox_id = tox.friend_get_public_key(i)
if not self._history.friend_exists_in_db(tox_id):
self._history.add_friend_to_db(tox_id)
try:
alias = filter(lambda x: x[0] == tox_id, aliases)[0][1]
except:
alias = ''
item = self.create_friend_item()
name = alias or tox.friend_get_name(i) or tox_id
status_message = tox.friend_get_status_message(i)
message_getter = self._history.messages_getter(tox_id)
friend = Friend(message_getter, i, name, status_message, item, tox_id)
friend.set_alias(alias)
self._friends.append(friend)
self.filtration(self._show_online)
# -----------------------------------------------------------------------------------------------------------------
# Edit current user's data
# -----------------------------------------------------------------------------------------------------------------
def change_status(self):
"""
Changes status of user (online, away, busy)
"""
if self._status is not None:
status = (self._status + 1) % 3
super(self.__class__, self).set_status(status)
self._tox.self_set_status(status)
def set_name(self, value):
super(self.__class__, self).set_name(value)
self._tox.self_set_name(self._name.encode('utf-8'))
def set_status_message(self, value):
super(self.__class__, self).set_status_message(value)
self._tox.self_set_status_message(self._status_message.encode('utf-8'))
# -----------------------------------------------------------------------------------------------------------------
# Filtration
# -----------------------------------------------------------------------------------------------------------------
def filtration(self, show_online=True, filter_str=''):
"""
Filtration of friends list
:param show_online: show online only contacts
:param filter_str: show contacts which name contains this substring
"""
filter_str = filter_str.lower()
for index, friend in enumerate(self._friends):
friend.visibility = (friend.status is not None or not show_online) and (filter_str in friend.name.lower())
if friend.visibility:
self._screen.friends_list.item(index).setSizeHint(QtCore.QSize(250, 70))
else:
self._screen.friends_list.item(index).setSizeHint(QtCore.QSize(250, 0))
self._show_online, self._filter_string = show_online, filter_str
settings = Settings.get_instance()
settings['show_online_friends'] = self._show_online
settings.save()
def update_filtration(self):
"""
Update list of contacts when 1 of friends change connection status
"""
self.filtration(self._show_online, self._filter_string)
def get_friend_by_number(self, num):
return filter(lambda x: x.number == num, self._friends)[0]
# -----------------------------------------------------------------------------------------------------------------
# Work with active friend
# -----------------------------------------------------------------------------------------------------------------
def get_active(self):
return self._active_friend
def set_active(self, value=None):
"""
:param value: number of new active friend in friend's list or None to update active user's data
"""
if value is None and self._active_friend == -1: # nothing to update
return
if value == -1: # all friends were deleted
self._screen.account_name.setText('')
self._screen.account_status.setText('')
self._active_friend = -1
self._screen.account_avatar.setHidden(True)
self._messages.clear()
self._screen.messageEdit.clear()
return
try:
self.send_typing(False)
self._screen.typing.setVisible(False)
if value is not None:
self._active_friend = value
friend = self._friends[value]
self._friends[value].set_messages(False)
self._screen.messageEdit.clear()
self._messages.clear()
friend.load_corr()
messages = friend.get_corr()[-PAGE_SIZE:]
for message in messages:
if message.get_type() <= 1:
data = message.get_data()
self.create_message_item(data[0],
convert_time(data[2]),
friend.name if data[1] else self._name,
data[3])
elif message.get_type() == 2:
item = self.create_file_transfer_item(message)
if message.get_status() in (2, 4): # active file transfer
ft = self._file_transfers[(message.get_friend_number(), message.get_file_number())]
ft.set_state_changed_handler(item.update)
else: # inline
self.create_inline_item(message.get_data())
self._messages.scrollToBottom()
if value in self._call:
self._screen.active_call()
elif value in self._incoming_calls:
self._screen.incoming_call()
else:
self._screen.call_finished()
else:
friend = self._friends[self._active_friend]
self._screen.account_name.setText(friend.name)
self._screen.account_status.setText(friend.status_message)
avatar_path = (ProfileHelper.get_path() + 'avatars/{}.png').format(friend.tox_id[:TOX_PUBLIC_KEY_SIZE * 2])
if not os.path.isfile(avatar_path): # load default image
avatar_path = curr_directory() + '/images/avatar.png'
pixmap = QtGui.QPixmap(QtCore.QSize(64, 64))
pixmap.load(avatar_path)
self._screen.account_avatar.setScaledContents(False)
self._screen.account_avatar.setPixmap(pixmap.scaled(64, 64, QtCore.Qt.KeepAspectRatio))
self._screen.account_avatar.repaint() # comment?
except: # no friend found. ignore
log('Incorrect friend value: ' + str(value))
raise
active_friend = property(get_active, set_active)
def get_last_message(self):
return self._friends[self._active_friend].get_last_message_text()
def get_active_number(self):
return self._friends[self._active_friend].number if self._active_friend + 1 else -1
def get_active_name(self):
return self._friends[self._active_friend].name if self._active_friend + 1 else ''
def is_active_online(self):
return self._active_friend + 1 and self._friends[self._active_friend].status is not None
# -----------------------------------------------------------------------------------------------------------------
# Typing notifications
# -----------------------------------------------------------------------------------------------------------------
def send_typing(self, typing):
if Settings.get_instance()['typing_notifications']:
friend = self._friends[self._active_friend]
if friend.status is not None:
self._tox.self_set_typing(friend.number, typing)
def friend_typing(self, friend_number, typing):
if friend_number == self.get_active_number():
self._screen.typing.setVisible(typing)
# -----------------------------------------------------------------------------------------------------------------
# Private messages
# -----------------------------------------------------------------------------------------------------------------
def split_and_send(self, number, message_type, message):
"""
Message splitting
:param number: friend's number
:param message_type: type of message
:param message: message text
"""
while len(message) > TOX_MAX_MESSAGE_LENGTH:
size = TOX_MAX_MESSAGE_LENGTH * 4 / 5
last_part = message[size:TOX_MAX_MESSAGE_LENGTH]
if ' ' in last_part:
index = last_part.index(' ')
elif ',' in last_part:
index = last_part.index(',')
elif '.' in last_part:
index = last_part.index('.')
else:
index = TOX_MAX_MESSAGE_LENGTH - size
index += size + 1
self._tox.friend_send_message(number, message_type, message[:index])
message = message[index:]
self._tox.friend_send_message(number, message_type, message)
def new_message(self, friend_num, message_type, message):
"""
Current user gets new message
:param friend_num: friend_num of friend who sent message
:param message_type: message type - plain text or action message (/me)
:param message: text of message
"""
if friend_num == self.get_active_number(): # add message to list
user_name = Profile.get_instance().get_active_name()
self.create_message_item(message.decode('utf-8'), curr_time(), user_name, message_type)
self._messages.scrollToBottom()
self._friends[self._active_friend].append_message(
TextMessage(message.decode('utf-8'), MESSAGE_OWNER['FRIEND'], time.time(), message_type))
else:
friend = self.get_friend_by_number(friend_num)
friend.set_messages(True)
friend.append_message(
TextMessage(message.decode('utf-8'), MESSAGE_OWNER['FRIEND'], time.time(), message_type))
def send_message(self, text):
"""
Send message to active friend
:param text: message text
"""
if self.is_active_online() and text:
if text.startswith('/me '):
message_type = TOX_MESSAGE_TYPE['ACTION']
text = text[4:]
else:
message_type = TOX_MESSAGE_TYPE['NORMAL']
friend = self._friends[self._active_friend]
self.split_and_send(friend.number, message_type, text.encode('utf-8'))
self.create_message_item(text, curr_time(), self._name, message_type)
self._screen.messageEdit.clear()
self._messages.scrollToBottom()
friend.append_message(TextMessage(text, MESSAGE_OWNER['ME'], time.time(), message_type))
# -----------------------------------------------------------------------------------------------------------------
# History support
# -----------------------------------------------------------------------------------------------------------------
def save_history(self):
"""
Save history to db
"""
if hasattr(self, '_history'):
if Settings.get_instance()['save_history']:
for friend in self._friends:
messages = friend.get_corr_for_saving()
if not self._history.friend_exists_in_db(friend.tox_id):
self._history.add_friend_to_db(friend.tox_id)
self._history.save_messages_to_db(friend.tox_id, messages)
del self._history
def clear_history(self, num=None):
if num is not None:
friend = self._friends[num]
friend.clear_corr()
if self._history.friend_exists_in_db(friend.tox_id):
self._history.delete_messages(friend.tox_id)
self._history.delete_friend_from_db(friend.tox_id)
else: # clear all history
for number in xrange(len(self._friends)):
self.clear_history(number)
if num is None or num == self.get_active_number():
self._messages.clear()
self._messages.repaint()
def load_history(self):
"""
Tries to load next part of messages
"""
friend = self._friends[self._active_friend]
friend.load_corr(False)
data = friend.get_corr()
if not data:
return
data.reverse()
data = data[self._messages.count():self._messages.count() + PAGE_SIZE]
for message in data:
if message.get_type() <= 1:
data = message.get_data()
self.create_message_item(data[0],
convert_time(data[2]),
friend.name if data[1] else self._name,
data[3],
False)
elif message.get_type() == 2:
item = self.create_file_transfer_item(message, False)
if message.get_status() in (2, 4):
ft = self._file_transfers[(message.get_friend_number(), message.get_file_number())]
ft.set_state_changed_handler(item.update)
def export_history(self, directory):
self._history.export(directory)
# -----------------------------------------------------------------------------------------------------------------
# Factories for friend, message and file transfer items
# -----------------------------------------------------------------------------------------------------------------
def create_friend_item(self):
"""
Method-factory
:return: new widget for friend instance
"""
item = ContactItem()
elem = QtGui.QListWidgetItem(self._screen.friends_list)
elem.setSizeHint(QtCore.QSize(250, 70))
self._screen.friends_list.addItem(elem)
self._screen.friends_list.setItemWidget(elem, item)
return item
def create_message_item(self, text, time, name, message_type, append=True):
item = MessageItem(text, time, name, message_type, self._messages)
elem = QtGui.QListWidgetItem()
elem.setSizeHint(QtCore.QSize(self._messages.width(), item.height()))
if append:
self._messages.addItem(elem)
else:
self._messages.insertItem(0, elem)
self._messages.setItemWidget(elem, item)
def create_file_transfer_item(self, tm, append=True):
data = list(tm.get_data())
data[3] = self.get_friend_by_number(data[4]).name if data[3] else self._name
item = FileTransferItem(*data)
elem = QtGui.QListWidgetItem()
elem.setSizeHint(QtCore.QSize(600, 50))
if append:
self._messages.addItem(elem)
else:
self._messages.insertItem(0, elem)
self._messages.setItemWidget(elem, item)
return item
def create_inline_item(self, data, append=True):
item = InlineImageItem(data)
elem = QtGui.QListWidgetItem()
elem.setSizeHint(QtCore.QSize(600, item.height()))
if append:
self._messages.addItem(elem)
else:
self._messages.insertItem(0, elem)
self._messages.setItemWidget(elem, item)
# -----------------------------------------------------------------------------------------------------------------
# Work with friends (remove, block, set alias, get public key)
# -----------------------------------------------------------------------------------------------------------------
def set_alias(self, num):
friend = self._friends[num]
name = friend.name.encode('utf-8')
dialog = QtGui.QApplication.translate('MainWindow',
"Enter new alias for friend {} or leave empty to use friend's name:",
None, QtGui.QApplication.UnicodeUTF8)
dialog = dialog.format(name.decode('utf-8'))
title = QtGui.QApplication.translate('MainWindow',
'Set alias',
None, QtGui.QApplication.UnicodeUTF8)
text, ok = QtGui.QInputDialog.getText(None,
title,
dialog)
if ok:
settings = Settings.get_instance()
aliases = settings['friends_aliases']
if text:
friend.name = text.encode('utf-8')
try:
index = map(lambda x: x[0], aliases).index(friend.tox_id)
aliases[index] = (friend.tox_id, text)
except:
aliases.append((friend.tox_id, text))
friend.set_alias(text)
else: # use default name
friend.name = self._tox.friend_get_name(friend.number).encode('utf-8')
friend.set_alias('')
try:
index = map(lambda x: x[0], aliases).index(friend.tox_id)
del aliases[index]
except:
pass
settings.save()
self.set_active()
def friend_public_key(self, num):
return self._friends[num].tox_id
def delete_friend(self, num):
"""
Removes friend from contact list
:param num: number of friend in list
"""
friend = self._friends[num]
self.clear_history(num)
if self._history.friend_exists_in_db(friend.tox_id):
self._history.delete_friend_from_db(friend.tox_id)
self._tox.friend_delete(friend.number)
del self._friends[num]
self._screen.friends_list.takeItem(num)
if num == self._active_friend: # active friend was deleted
if not len(self._friends): # last friend was deleted
self.set_active(-1)
else:
self.set_active(0)
def add_friend(self, tox_id):
num = self._tox.friend_add_norequest(tox_id) # num - friend number
item = self.create_friend_item()
try:
if not self._history.friend_exists_in_db(tox_id):
self._history.add_friend_to_db(tox_id)
message_getter = self._history.messages_getter(tox_id)
except Exception as ex: # something is wrong
log('Accept friend request failed! ' + str(ex))
message_getter = None
friend = Friend(message_getter, num, tox_id, '', item, tox_id)
self._friends.append(friend)
def block_user(self, tox_id):
tox_id = tox_id[:TOX_PUBLIC_KEY_SIZE * 2]
if tox_id == self.tox_id[:TOX_PUBLIC_KEY_SIZE * 2]:
return
settings = Settings.get_instance()
if tox_id not in settings['blocked']:
settings['blocked'].append(tox_id)
settings.save()
try:
num = self._tox.friend_by_public_key(tox_id)
self.delete_friend(num)
except: # not in friend list
pass
def unblock_user(self, tox_id, add_to_friend_list):
s = Settings.get_instance()
s['blocked'].remove(tox_id)
s.save()
if add_to_friend_list:
self.add_friend(tox_id)
# -----------------------------------------------------------------------------------------------------------------
# Friend requests
# -----------------------------------------------------------------------------------------------------------------
def send_friend_request(self, tox_id, message):
"""
Function tries to send request to contact with specified id
:param tox_id: id of new contact or tox dns 4 value
:param message: additional message
:return: True on success else error string
"""
try:
message = message or 'Add me to your contact list'
if '@' in tox_id: # value like groupbot@toxme.io
tox_id = tox_dns(tox_id)
if tox_id is None:
raise Exception('TOX DNS lookup failed')
result = self._tox.friend_add(tox_id, message.encode('utf-8'))
tox_id = tox_id[:TOX_PUBLIC_KEY_SIZE * 2]
item = self.create_friend_item()
if not self._history.friend_exists_in_db(tox_id):
self._history.add_friend_to_db(tox_id)
message_getter = self._history.messages_getter(tox_id)
friend = Friend(message_getter, result, tox_id, '', item, tox_id)
self._friends.append(friend)
return True
except Exception as ex: # wrong data
log('Friend request failed with ' + str(ex))
return str(ex)
def process_friend_request(self, tox_id, message):
"""
Accept or ignore friend request
:param tox_id: tox id of contact
:param message: message
"""
try:
text = QtGui.QApplication.translate('MainWindow', 'User {} wants to add you to contact list. Message:\n{}', None, QtGui.QApplication.UnicodeUTF8)
info = text.format(tox_id, message)
fr_req = QtGui.QApplication.translate('MainWindow', 'Friend request', None, QtGui.QApplication.UnicodeUTF8)
reply = QtGui.QMessageBox.question(None, fr_req, info, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes: # accepted
self.add_friend(tox_id)
except Exception as ex: # something is wrong
log('Accept friend request failed! ' + str(ex))
# -----------------------------------------------------------------------------------------------------------------
# Reset
# -----------------------------------------------------------------------------------------------------------------
def reset(self, restart):
"""
Recreate tox instance
:param restart: method which calls restart and returns new tox instance
"""
for key in self._file_transfers.keys():
self._file_transfers[key].cancel()
del self._file_transfers[key]
del self._tox
self._tox = restart()
self.status = None
for friend in self._friends:
friend.status = None
def close(self):
self._call.stop()
del self._call
# -----------------------------------------------------------------------------------------------------------------
# File transfers support
# -----------------------------------------------------------------------------------------------------------------
def incoming_file_transfer(self, friend_number, file_number, size, file_name):
"""
New transfer
:param friend_number: number of friend who sent file
:param file_number: file number
:param size: file size in bytes
:param file_name: file name without path
"""
settings = Settings.get_instance()
friend = self.get_friend_by_number(friend_number)
auto = settings['allow_auto_accept'] and friend.tox_id in settings['auto_accept_from_friends']
inline = (file_name == 'toxygen_inline.png' or file_name == 'utox-inline.png') and settings['allow_inline']
if inline and size < 1024 * 1024:
self.accept_transfer(None, '', friend_number, file_number, size, True)
tm = TransferMessage(MESSAGE_OWNER['FRIEND'],
time.time(),
FILE_TRANSFER_MESSAGE_STATUS['INCOMING_STARTED'],
size,
file_name,
friend_number,
file_number)
elif auto:
path = settings['auto_accept_path'] or curr_directory()
if not os.path.isdir(path):
path = curr_directory()
new_file_name, i = file_name, 1
while os.path.isfile(path + '/' + new_file_name): # file with same name already exists
if '.' in file_name: # has extension
d = file_name.rindex('.')
else: # no extension
d = len(file_name)
new_file_name = file_name[:d] + ' ({})'.format(i) + file_name[d:]
i += 1
self.accept_transfer(None, path + '/' + new_file_name, friend_number, file_number, size)
tm = TransferMessage(MESSAGE_OWNER['FRIEND'],
time.time(),
FILE_TRANSFER_MESSAGE_STATUS['INCOMING_STARTED'],
size,
new_file_name,
friend_number,
file_number)
else:
tm = TransferMessage(MESSAGE_OWNER['FRIEND'],
time.time(),
FILE_TRANSFER_MESSAGE_STATUS['INCOMING_NOT_STARTED'],
size,
file_name,
friend_number,
file_number)
if friend_number == self.get_active_number():
item = self.create_file_transfer_item(tm)
if (inline and size < 1024 * 1024) or auto:
self._file_transfers[(friend_number, file_number)].set_state_changed_handler(item.update)
self._messages.scrollToBottom()
else:
friend.set_messages(True)
friend.append_message(tm)
def cancel_transfer(self, friend_number, file_number, already_cancelled=False):
"""
Stop transfer
:param friend_number: number of friend
:param file_number: file number
:param already_cancelled: was cancelled by friend
"""
if (friend_number, file_number) in self._file_transfers:
tr = self._file_transfers[(friend_number, file_number)]
if not already_cancelled:
tr.cancel()
else:
tr.cancelled()
del self._file_transfers[(friend_number, file_number)]
else:
self._tox.file_control(friend_number, file_number, TOX_FILE_CONTROL['CANCEL'])
self.get_friend_by_number(friend_number).update_transfer_data(file_number,
FILE_TRANSFER_MESSAGE_STATUS['CANCELLED'])
def accept_transfer(self, item, path, friend_number, file_number, size, inline=False):
"""
:param item: transfer item.
:param path: path for saving
:param friend_number: friend number
:param file_number: file number
:param size: file size
:param inline: is inline image
"""
if not inline:
rt = ReceiveTransfer(path, self._tox, friend_number, size, file_number)
else:
rt = ReceiveToBuffer(self._tox, friend_number, size, file_number)
self._file_transfers[(friend_number, file_number)] = rt
self._tox.file_control(friend_number, file_number, TOX_FILE_CONTROL['RESUME'])
if item is not None:
rt.set_state_changed_handler(item.update)
self.get_friend_by_number(friend_number).update_transfer_data(file_number,
FILE_TRANSFER_MESSAGE_STATUS['INCOMING_STARTED'])
def send_screenshot(self, data):
"""
Send screenshot to current active friend
:param data: raw data - png
"""
friend = self._friends[self._active_friend]
st = SendFromBuffer(self._tox, friend.number, data, 'toxygen_inline.png')
self._file_transfers[(friend.number, st.get_file_number())] = st
tm = TransferMessage(MESSAGE_OWNER['ME'],
time.time(),
FILE_TRANSFER_MESSAGE_STATUS['OUTGOING'],
len(data),
'toxygen_inline.png',
friend.number,
st.get_file_number())
item = self.create_file_transfer_item(tm)
friend.append_message(tm)
st.set_state_changed_handler(item.update)
self._messages.scrollToBottom()
def send_file(self, path):
"""
Send file to current active friend
:param path: file path
"""
friend_number = self.get_active_number()
st = SendTransfer(path, self._tox, friend_number)
self._file_transfers[(friend_number, st.get_file_number())] = st
tm = TransferMessage(MESSAGE_OWNER['ME'],
time.time(),
FILE_TRANSFER_MESSAGE_STATUS['OUTGOING'],
os.path.getsize(path),
os.path.basename(path),
friend_number,
st.get_file_number())
item = self.create_file_transfer_item(tm)
st.set_state_changed_handler(item.update)
self._friends[self._active_friend].append_message(tm)
self._messages.scrollToBottom()
def incoming_chunk(self, friend_number, file_number, position, data):
if (friend_number, file_number) in self._file_transfers:
transfer = self._file_transfers[(friend_number, file_number)]
transfer.write_chunk(position, data)
if transfer.state:
if type(transfer) is ReceiveAvatar:
self.get_friend_by_number(friend_number).load_avatar()
self.set_active(None)
elif type(transfer) is ReceiveToBuffer:
inline = InlineImage(transfer.get_data())
i = self.get_friend_by_number(friend_number).update_transfer_data(file_number,
FILE_TRANSFER_MESSAGE_STATUS['FINISHED'],
inline)
if friend_number == self.get_active_number():
count = self._messages.count()
item = InlineImageItem(transfer.get_data())
elem = QtGui.QListWidgetItem()
elem.setSizeHint(QtCore.QSize(600, item.height()))
self._messages.insertItem(count + i + 1, elem)
self._messages.setItemWidget(elem, item)
else:
self.get_friend_by_number(friend_number).update_transfer_data(file_number,
FILE_TRANSFER_MESSAGE_STATUS['FINISHED'])
del self._file_transfers[(friend_number, file_number)]
def outgoing_chunk(self, friend_number, file_number, position, size):
if (friend_number, file_number) in self._file_transfers:
transfer = self._file_transfers[(friend_number, file_number)]
transfer.send_chunk(position, size)
if transfer.state:
del self._file_transfers[(friend_number, file_number)]
if type(transfer) is not SendAvatar:
if type(transfer) is SendFromBuffer and Settings.get_instance()['allow_inline']: # inline
inline = InlineImage(transfer.get_data())
self.get_friend_by_number(friend_number).update_transfer_data(file_number,
FILE_TRANSFER_MESSAGE_STATUS['FINISHED'],
inline)
self.set_active(self._active_friend)
else:
self.get_friend_by_number(friend_number).update_transfer_data(file_number,
FILE_TRANSFER_MESSAGE_STATUS['FINISHED'])
# -----------------------------------------------------------------------------------------------------------------
# Avatars support
# -----------------------------------------------------------------------------------------------------------------
def send_avatar(self, friend_number):
"""
:param friend_number: number of friend who should get new avatar
"""
avatar_path = (ProfileHelper.get_path() + 'avatars/{}.png').format(self._tox_id[:TOX_PUBLIC_KEY_SIZE * 2])
if not os.path.isfile(avatar_path): # reset image
avatar_path = None
sa = SendAvatar(avatar_path, self._tox, friend_number)
self._file_transfers[(friend_number, sa.get_file_number())] = sa
def incoming_avatar(self, friend_number, file_number, size):
"""
Friend changed avatar
:param friend_number: friend number
:param file_number: file number
:param size: size of avatar or 0 (default avatar)
"""
ra = ReceiveAvatar(self._tox, friend_number, size, file_number)
if ra.state != TOX_FILE_TRANSFER_STATE['CANCELED']:
self._file_transfers[(friend_number, file_number)] = ra
else:
self.get_friend_by_number(friend_number).load_avatar()
if self.get_active_number() == friend_number:
self.set_active(None)
def reset_avatar(self):
super(Profile, self).reset_avatar()
for friend in filter(lambda x: x.status is not None, self._friends):
self.send_avatar(friend.number)
def set_avatar(self, data):
super(Profile, self).set_avatar(data)
for friend in filter(lambda x: x.status is not None, self._friends):
self.send_avatar(friend.number)
# -----------------------------------------------------------------------------------------------------------------
# AV support
# -----------------------------------------------------------------------------------------------------------------
def get_call(self):
return self._call
call = property(get_call)
def call_click(self, audio=True, video=False):
"""User clicked audio button in main window"""
num = self.get_active_number()
if num not in self._call and self.is_active_online(): # start call
self._call(num, audio, video)
self._screen.active_call()
elif num in self._call: # finish or cancel call if you call with active friend
self.stop_call(num, False)
def incoming_call(self, audio, video, friend_number):
"""
Incoming call from friend. Only audio is supported now
"""
friend = self.get_friend_by_number(friend_number)
self._incoming_calls.add(friend_number)
if friend_number == self.get_active_number():
self._screen.incoming_call()
else:
friend.set_messages(True)
if video:
text = QtGui.QApplication.translate("incoming_call", "Incoming video call", None, QtGui.QApplication.UnicodeUTF8)
else:
text = QtGui.QApplication.translate("incoming_call", "Incoming audio call", None, QtGui.QApplication.UnicodeUTF8)
self._call_widget = avwidgets.IncomingCallWidget(friend_number, text, friend.name)
self._call_widget.set_pixmap(friend.get_pixmap())
self._call_widget.show()
def accept_call(self, friend_number, audio, video):
"""
Accept incoming call with audio or video
"""
self._call.accept_call(friend_number, audio, video)
self._screen.active_call()
self._incoming_calls.remove(friend_number)
if hasattr(self, '_call_widget'):
del self._call_widget
def stop_call(self, friend_number, by_friend):
"""
Stop call with friend
"""
if friend_number in self._incoming_calls:
self._incoming_calls.remove(friend_number)
self._screen.call_finished()
self._call.finish_call(friend_number, by_friend) # finish or decline call
if hasattr(self, '_call_widget'):
del self._call_widget
def tox_factory(data=None, settings=None):
"""
:param data: user data from .tox file. None = no saved data, create new profile
:param settings: current profile settings. None = default settings will be used
:return: new tox instance
"""
if settings is None:
settings = Settings.get_default_settings()
tox_options = Tox.options_new()
tox_options.contents.udp_enabled = settings['udp_enabled']
tox_options.contents.proxy_type = settings['proxy_type']
tox_options.contents.proxy_host = settings['proxy_host']
tox_options.contents.proxy_port = settings['proxy_port']
tox_options.contents.start_port = settings['start_port']
tox_options.contents.end_port = settings['end_port']
tox_options.contents.tcp_port = settings['tcp_port']
if data: # load existing profile
tox_options.contents.savedata_type = TOX_SAVEDATA_TYPE['TOX_SAVE']
tox_options.contents.savedata_data = c_char_p(data)
tox_options.contents.savedata_length = len(data)
else: # create new profile
tox_options.contents.savedata_type = TOX_SAVEDATA_TYPE['NONE']
tox_options.contents.savedata_data = None
tox_options.contents.savedata_length = 0
return Tox(tox_options)
| SergeyDjam/toxygen | src/profile.py | profile.py | py | 49,373 | python | en | code | null | github-code | 13 |
21766353022 | class Integer:
import math
def __init__(self, value):
self.value = value
def roman_to_int(self, s):
rom_val = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}
int_val = 0
for i in range(len(s)):
if i > 0 and rom_val[s[i]] > rom_val[s[i - 1]]:
int_val += rom_val[s[i]] - 2 * rom_val[s[i - 1]]
else:
int_val += rom_val[s[i]]
return int_val
@classmethod
def from_float(cls, value):
if not type(value) == float:
return f"value is not a float"
value = cls.math.floor(value)
return cls(value)
@classmethod
def from_roman(cls, value):
value = cls.roman_to_int(cls, value)
return cls(value)
@classmethod
def from_string(cls, value):
if not type(value) == str:
return "wrong type"
return cls(int(value))
def add(self, integer):
if not type(integer) == Integer:
return "number should be an Integer instance"
return self.value + integer.value
# Test code
# first_num = Integer(10)
# second_num = Integer.from_roman("IV")
#
# print(Integer.from_float("2.6"))
# print(Integer.from_string(2.6))
# print(first_num.add(second_num))
| DavidStoilkovski/python-oop | attributes-and-methods-python-oop/integer.py | integer.py | py | 1,288 | python | en | code | 0 | github-code | 13 |
21473492619 | counter_bake = int(input())
max_grade = 0
for i in range(0, counter_bake):
chef = input()
count_grade = 0
sum_grade = 0
is_stop = False
while not is_stop:
command = input()
if command == 'Stop':
is_stop = True
break
grade = int(command)
count_grade += 1
sum_grade += grade
print(f'{chef} has {sum_grade} points.')
if sum_grade > max_grade:
champion = chef
max_grade = sum_grade
print(f'{chef} is the new number 1!')
print(f'{champion} won competition with {max_grade} points!') | patsonev/Python_Basics_Exam_Preparation | easter_competition.py | easter_competition.py | py | 617 | python | en | code | 0 | github-code | 13 |
21886119984 | import random
n = int(input('Введите кол монеток '))
m = []
result = 0
for i in range(0,n):
random_num = round(random.randint(0,1))
m.append(random_num)
if random_num == 0 : result += 1
print (m)
print (result)
| KOMOKlazz/TEST | Homework/10.py | 10.py | py | 243 | python | en | code | 0 | github-code | 13 |
22862283323 | """
One useful package for web scraping in Python’s standard library is urllib, which contains tools for working with URLs.
The urllib.request module contains a function called urlopen() that can be used to open a URL within a program.
"""
import re
from urllib.request import urlopen
# Method 1 - Extract Text From HTML With String Methods
url = "http://olympus.realpython.org/profiles/aphrodite"
page = urlopen(url)
#print(f'pageObject: {page}')
# prints something like "<http.client.HTTPResponse object at 0x0000027959495F70>"
html_bytes = page.read()
#print(f'html_bytes: {html_bytes}')
# prints a html source code of the webpage at url, in one single line
html = html_bytes.decode("utf-8")
#print(f'HTML: {html}')
# prints a html source code of the webpage at url, in a better format
start_index = html.find("<title>") + len("<title>")
end_index = html.find("</title>")
title = html[start_index:end_index]
print(title)
# Method 2 - Extract Text From HTML With Regular Expressions
url = "http://olympus.realpython.org/profiles/dionysus"
page = urlopen(url)
html = page.read().decode("utf-8")
pattern = "<title.*?>.*?</title.*?>"
match_results = re.search(pattern, html, re.IGNORECASE)
title = match_results.group()
title = re.sub("<.*?>", "", title) # Remove HTML tags
print(title) | RuthraVed/web-scrapping-basics | scrapper_basic.py | scrapper_basic.py | py | 1,297 | python | en | code | 0 | github-code | 13 |
72723201299 | import sqlite3
import os
def departments():
db_name = './task_3/ledger.db'
database = sqlite3.connect(db_name)
cursor = database.cursor()
cursor.execute("SELECT * FROM departments")
temp = []
for b in cursor.fetchall():
temp.append(b)
return temp
def transactions():
db_name = './task_3/ledger.db'
database = sqlite3.connect(db_name)
cursor = database.cursor()
cursor.execute("SELECT * FROM operations")
temp = []
for b in cursor.fetchall():
temp.append(b)
return temp
def calculator(*args):
dep = args[0]
trans = args[1]
out_list = []
temp_list_mounts = []
for department in dep:
dep_id, start_year, end_year, name = department
for transact in trans:
tr_id, year, month, day, department_id, income = transact
if int(day) == 0 or int(day) > 31 or int(month) > 13 or int(year) < int(start_year):
pass
elif dep_id is department_id:
if temp_list_mounts and int(month) > int(temp_list_mounts[1]):
out_list.append(temp_list_mounts)
temp_list_mounts = []
if temp_list_mounts:
temp_list_mounts = [year, month, name, int(temp_list_mounts[3]) + int(income)]
else:
temp_list_mounts = [year, month, name, int(income)]
for i in out_list:
print(i)
if __name__ == "__main__":
pass
| KonstantinLjapin/samples_and_tests | tests/korus_consulting/task_3/data_base_function.py | data_base_function.py | py | 1,479 | python | en | code | 0 | github-code | 13 |
8298273626 | # Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
"""
Given a linked list, remove the n-th node from the end of list and return its head.
Example:
Given linked list: 1->2->3->4->5, and n = 2.
After removing the second node from the end, the linked list becomes 1->2->3->5.
Note:
Given n will always be valid.
Follow up:
Could you do this in one pass?
给定一个链表,删除链表的倒数第n个节点,并且返回链表的头结点。
示例:
给定一个链表: 1->2->3->4->5, 和 n = 2.
当删除了倒数第二个节点后,链表变为 1->2->3->5.
说明:
给定的 n 保证是有效的。
进阶:
你能尝试使用一趟扫描实现吗?
"""
def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:
dummy = ListNode(0)
dummy.next = head
first = dummy
second = dummy
while first:
if n < 0:
second = second.next
first = first.next
n -= 1
if second.next:
second.next = second.next.next
return dummy.next
| mniqxh/letcode | Linked List/lc19.py | lc19.py | py | 1,225 | python | en | code | 3 | github-code | 13 |
36582066112 | from selenium import webdriver
driver = webdriver.Chrome()
driver.get('https://web.whatsapp.com/')
a=1
while a!=0:
input('Scan QR code first and hit enter')
name = input('Enter the name of user or group : ')
msg = input('Enter your message : ')
count = int(input('Enter the count : '))
user = driver.find_element_by_xpath('//span[@title = "{}"]'.format(name))
user.click()
msg_box = driver.find_element_by_class_name('_3u328')
for i in range(count):
msg_box.send_keys(msg)
button = driver.find_element_by_class_name('_3M-N-')
button.click()
a=int(input("Wanna text any other guy?(0/1):"))
if a==0:
print("Bye!\nSee you soon.")
| hastagAB/Awesome-Python-Scripts | send_whatsapp_message/whatsapp-message.py | whatsapp-message.py | py | 692 | python | en | code | 1,776 | github-code | 13 |
39240996616 | import os
from PySide6.QtWidgets import QDialog
from ide.logs import logger
try:
from data_ui.plugins import Ui_Dialog
except ImportError:
from ide.utils.ui_converter import ConvertationRecursive, ScriptOutput # pylint: disable=ungrouped-imports
ScriptOutput.logging_mode = True
ScriptOutput.print("File \"plugins.py\" not found in data_ui, starting .ui files conversion "
"(probably first application launch)")
ConvertationRecursive().run()
from data_ui.plugins import Ui_Dialog # pylint: disable=ungrouped-imports
from ide.expansion.plugins import PluginLoader # pylint: disable=ungrouped-imports
class PluginsDialog(QDialog):
"""
Dialog window which is opened when saving file has conflicts from outside
"""
def __init__(self, editor):
super().__init__()
self.ui = Ui_Dialog()
self.ui.setupUi(self)
self.editor = editor
self.selected = None
self.selected_enabled = False
self.do_enable = True
self.ui.close_btn.clicked.connect(self.trigger_closed)
self.ui.available_list.itemClicked.connect(self.trigger_available_clicked)
self.ui.installed_list.itemClicked.connect(self.trigger_installed_clicked)
self.ui.install_btn.clicked.connect(self.trigger_install_plugin)
logger.info("Opened plugin installation dialog")
self.refill_list()
self.update_sidebar()
def refill_list(self):
self.ui.installed_list.clear()
self.ui.available_list.clear()
for plugin in self.editor.app.config.plugins.enabled:
self.ui.installed_list.addItem(plugin)
for plugin in os.listdir("plugins"):
if os.path.exists(os.path.join("plugins", plugin, "__init__.py")):
if plugin not in self.editor.app.config.plugins.enabled:
self.ui.available_list.addItem(plugin)
def update_sidebar(self):
if self.selected is None:
self.ui.sidebar.setVisible(False)
elif self.selected_enabled:
self.ui.sidebar.setVisible(True)
self.ui.install_btn.setText("Disable")
self.do_enable = False
plugin_adapter = PluginLoader.load_plugin(self.selected, os.path.join("plugins", self.selected))
self.ui.plugin_name_lbl.setText(plugin_adapter.manifest["name"])
self.ui.plugin_desc_lbl.setText(plugin_adapter.manifest["description"])
elif not self.selected_enabled:
self.ui.sidebar.setVisible(True)
self.ui.install_btn.setText("Enable")
self.do_enable = True
plugin_adapter = PluginLoader.load_plugin(self.selected, os.path.join("plugins", self.selected))
self.ui.plugin_name_lbl.setText(plugin_adapter.manifest["name"])
self.ui.plugin_desc_lbl.setText(plugin_adapter.manifest["description"])
def trigger_closed(self) -> None:
"""Utility method. Bound to signal"""
self.close()
def trigger_installed_clicked(self) -> None:
"""Utility method. Bound to signal"""
if len(self.ui.installed_list.selectedItems()) == 1:
self.selected_enabled = True
self.selected = self.ui.installed_list.selectedItems()[0].text()
self.update_sidebar()
def trigger_available_clicked(self) -> None:
"""Utility method. Bound to signal"""
if len(self.ui.available_list.selectedItems()) == 1:
self.selected_enabled = False
self.selected = self.ui.available_list.selectedItems()[0].text()
self.update_sidebar()
def trigger_install_plugin(self) -> None:
"""Utility method. Bound to signal"""
if self.do_enable:
self.editor.app.config.plugins.enabled.append(self.selected)
else:
self.editor.app.config.plugins.enabled.remove(self.selected)
self.selected = None
self.refill_list()
self.update_sidebar()
| n00-name/12345 | ide/frames/dialogs/plugins/dialog.py | dialog.py | py | 3,999 | python | en | code | 0 | github-code | 13 |
2193097850 | import random
import pygame
class Apple:
def __init__(self):
self.apple = pygame.Surface((10, 10))
self.x = 0
self.y = 0
self.position = self.x, self.y
def onGrid_Random_Spawn(self):
x = random.randint(0, 590) // 10 * 10
y = random.randint(0, 590) // 10 * 10
self.x = (x // 10 * 10)
self.y = (y // 10 * 10)
self.position = self.x, self.y
| ThalesHenri/Snake_Game | apple.py | apple.py | py | 421 | python | en | code | 1 | github-code | 13 |
7100473025 | from django.shortcuts import get_object_or_404
from django.http import Http404
import django.template.context
from tagging.models import Tag, TaggedItem
from models import Post, Series, Category
def context_processor(target):
"""
Decorator that allows context processors with parameters to
be assigned (and executed properly) in a RequestContext
Example::
return render_to_response(
template_name,
context_instance=RequestContext(
request,
processors=[
test_processor1,
test_processor2(val1=test_val1, val2=test_val2),
]
)
)
"""
def cp_wrapper(*args, **kwargs):
if (
len(args) == 1 and len(kwargs) == 0) \
or (len(args) == 0 and len(kwargs) == 1 and 'request' in kwargs):
return target(*args, **kwargs)
else:
def get_processor(request):
return target(request, *args, **kwargs)
return get_processor
return cp_wrapper
@context_processor
def blog_posts_processor(request, year=None, month=None, category_slug=None,
series_slug=None, tag=None, require_featured=False, start_post=1,
max_posts=None,
posts_context_name='posts',
year_context_name='year', month_context_name='month',
category_context_name='category', series_context_name='series',
tag_context_name='tag'):
"""
Return a dictionary containing:
posts
archive year (if supplied)
archive month (if year and month supplied)
category (if a slug was supplied)
series (if a slug was supplied)
tag (if a tag was supplied)
"""
# is this user staff? Determines published post display
is_staff = request.user.is_staff
posts = Post.objects.build_query(require_published = not is_staff,
year=year, month=month, category_slug=category_slug,
series_slug=series_slug, tag=tag, require_featured=require_featured)
if max_posts != None:
posts = posts[start_post-1:max_posts]
elif start_post != None:
posts = posts[start_post-1:]
c = {
posts_context_name: posts,
}
if year:
c[year_context_name] = year
if month:
c[month_context_name] = month
if category_slug:
c[category_context_name] = get_object_or_404(
Category, slug=category_slug)
if series_slug:
c[series_context_name] = get_object_or_404(Series, slug=series_slug)
if tag:
c[tag_context_name] = tag
return c
@context_processor
def blog_post_processor(request, year, month, day, slug, context_name='post'):
"""
Return a dictionary containing a post
"""
# is this user staff? Determines published post display
is_staff = request.user.is_staff
try:
if is_staff:
post = Post.objects.get(publish_date__year=year,
publish_date__month=month, publish_date__day=day, slug=slug)
else:
post = Post.objects.get(is_published=True, publish_date__year=year,
publish_date__month=month, publish_date__day=day, slug=slug)
return {context_name: post}
except Post.DoesNotExist:
raise Http404
@context_processor
def blog_categories_processor(request, context_name='categories'):
"""
Return a dictionary containing categories
"""
return {context_name: Category.objects.all()}
@context_processor
def blog_category_processor(request, slug, context_name='category'):
"""
Return a dictionary containing a category
"""
# is this user staff? Determines published post display
is_staff = request.user.is_staff
posts = Post.objects.build_query(
require_published = not is_staff, category_slug=slug)
try:
category = Category.objects.get(slug=slug)
return{context_name: category}
except Category.DoesNotExist:
raise Http404
@context_processor
def blog_seriess_processor(request, context_name='seriess'):
"""
Return a dictionary containing seriess
"""
return {context_name: Series.objects.all()}
@context_processor
def blog_series_processor(request, slug, context_name='series'):
"""
Return a dictionary containing a series
"""
# is this user staff? Determines published post display
is_staff = request.user.is_staff
posts = Post.objects.build_query(
require_published = not is_staff, series_slug=slug)
try:
series = Series.objects.get(slug=slug)
return {context_name: series}
except Series.DoesNotExist:
raise Http404
@context_processor
def blog_tags_processor(request, context_name='tags'):
"""
Return a dictionary containing tags
"""
return {
context_name: Tag.objects.usage_for_model(Post)
}
@context_processor
def blog_tag_processor(request, tag, context_name='tag'):
"""
Return a dictionary containing a tag
"""
return {
context_name: get_object_or_404(Tag, name=tag),
}
| davisd/django-blogyall | blog/context_processors.py | context_processors.py | py | 5,097 | python | en | code | 2 | github-code | 13 |
5131172344 | import math
def solution(fees, records) :
intime = {}
result = {}
for r in records :
time, num, inout = r.split()
if inout == "IN" :
intime[num] = convert(time)
if num not in result :
result[num] = 0
else :
result[num] += convert(time) - intime[num]
del intime[num]
for key, val in intime.items() :
result[key] += 23 * 60 + 59 - val
answer = []
for key, val in sorted(result.items()) :
if val <= fees[0] :
answer.append(fees[1])
else :
answer.append(fees[1] + math.ceil((val - fees[0]) / fees[2]) * fees[3])
return answer
def convert(time) :
hh, mm = time.split(':')
return int(hh) * 60 + int(mm)
| jeongminllee/ProgrammersCodeTest | 프로그래머스/2/92341. 주차 요금 계산/주차 요금 계산.py | 주차 요금 계산.py | py | 774 | python | en | code | 0 | github-code | 13 |
14424006185 | from graph import Vertex, Edge, Graph
def dfs(g):
globals()['time'] = 0
for u in g.get_v():
u.color = "white"
u.parent = None
for u in g.get_v():
if u.color == "white":
dfs_visit(g, u)
def dfs_visit(g, u):
globals()['time'] += 1
u.d = time
u.color = "gray"
for v in g.adj(u):
if v.color == "white":
v.parent = u
dfs_visit(g, v)
u.color = "black"
globals()['time'] += 1
u.f = time
if __name__ == "__main__":
v = []
for i in range(8):
temp = Vertex(i,i,i)
temp.name = i
v.append(temp)
e = [ Edge(v[0], v[1]), Edge(v[0], v[4]), Edge(v[1], v[5]), Edge(v[2], v[3]),
Edge(v[2], v[5]), Edge(v[2], v[6]), Edge(v[3], v[6]), Edge(v[3], v[7]),
Edge(v[5], v[6]), Edge(v[6], v[7]) ]
| lancecopper/clrs | C22_C26/dfs.py | dfs.py | py | 843 | python | en | code | 1 | github-code | 13 |
74852841936 | import base64
import json
from django import template
from django.core.urlresolvers import reverse
from entity.models import KnowledgeServer
register = template.Library()
@register.simple_tag
def ks_info(ks, *args, **kwargs):
ret_html = "<p>" + ks.name
if hasattr(ks, "organization"):
ret_html += '<br>Maintained by "<a href="' + ks.organization.website + '" target="_blank">' + ks.organization.name + '</a>"'
ret_html += '<br><a href="' + ks.uri() + '/" target="_blank">' + ks.uri() + '</a>'
return ret_html
@register.simple_tag
def version_instance_info(dataset, instances, *args, **kwargs):
base64_DataSet_URIInstance = base64.encodestring(dataset.URIInstance).replace('\n','')
ret_string = ''
for instance in instances:
ret_string += '<p>"' + instance.name + '" (<a href="' + reverse('api_dataset', args=(base64_DataSet_URIInstance,"html")) + '">browse the data</a> or'
ret_string += ' get it in <a href="' + reverse('api_dataset', args=(base64_DataSet_URIInstance,"XML")) + '">XML</a> or '
ret_string += '<a href="' + reverse('api_dataset', args=(base64_DataSet_URIInstance,"JSON")) + '">JSON</a>)<br>'
ret_string += 'Version ' + ('' if dataset.version_released else '(<font color="red">not released</font>) ') + str(dataset.version_major) + '.' + str(dataset.version_minor) + '.' + str(dataset.version_patch) + ' - ' + str(dataset.version_date)
if not dataset.licenses is None:
ret_string += '<br>Licenses: '
for l in dataset.licenses.all():
ret_string += '<br> ' + l.name
if dataset.version_released:
ret_string += '</p>'
else:
ret_string += '<br>Click <a href="' + reverse('release_dataset', args=(base64_DataSet_URIInstance,)) + '" target="_blank">here</a> to release it.</p>'
return ret_string
@register.simple_tag
def browse_json_data(actual_instance, exported_json, esn, *args, **kwargs):
json_data = json.loads(exported_json)[esn.simple_entity.name]
return json_to_html(actual_instance, json_data, esn)
def json_to_html(actual_instance, json_data, esn, indent_level=0):
try:
ret_html = ""
if esn.attribute == "":
# no attribute, I am at the entry point
ret_html = (indent_level * "-- ") + " " + esn.simple_entity.name + ': "<a href="' + json_data["URIInstance"] + '">' + json_data[esn.simple_entity.name_field] +'</a>"<br>'
ret_html += actual_instance.serialized_attributes(format = 'HTML')
else:
if esn.is_many:
json_children = json_data[esn.attribute]
esn.attribute = ""
for json_child in json_children:
ext_json_child = {}
ext_json_child[esn.simple_entity.name] = json_child
ret_html += json_to_html(ext_json_child, esn, indent_level)
return ret_html
else:
# there exist simple entities with no name
try:
name = json_data[esn.simple_entity.name_field]
except:
name = ""
if name == "":
name = esn.attribute
ret_html = (indent_level * "-- ") + " " + esn.simple_entity.name + ': "<a href="' + json_data["URIInstance"] + '">' + name +'</a>"<br>'
indent_level+=1
for esn_child_node in esn.child_nodes.all():
try:
if esn_child_node.attribute == "":
ret_html += json_to_html(json_data, esn_child_node, indent_level)
else:
ret_html += json_to_html(json_data[esn_child_node.attribute], esn_child_node, indent_level)
except:
pass
return ret_html
except Exception as e:
return "" | davidegalletti/koa-proof-of-concept | kag/ks/templatetags/custom_tags.py | custom_tags.py | py | 3,872 | python | en | code | 1 | github-code | 13 |
10488312724 | import datetime
import re
from discord.ext import commands
def get_datetime_obj(st: str) -> datetime.timedelta:
"""
Takes a string with #d#h#m#s and returns a time delta object of the string
"""
res = datetime.timedelta() # Initializes res
dig = re.split(r"\D+", st) # Splits on non digits
dig = [e for e in dig if e != ""] # Removes empties
chars = re.split(r"\d+", st) # Splits on digits
chars = [e for e in chars if (e in "smhd" and e != "")] # Removes empties
test_chars = [c for c in chars if c not in "smhd"]
if test_chars:
raise commands.BadArgument("Invalid character")
if " " in chars or " " in dig:
# print(chars, dig)
raise commands.BadArgument("Please input the Rem correctly")
if len(chars) != len(dig) or not chars or not dig:
# print(chars, dig)
raise commands.BadArgument("Please input the date correctly -> Example:`15h2m` = 15 hours and 2 minutes")
dic = dict(zip(chars, dig)) # Creates a dic unit : amount
for val, value in dic.items():
if val == "s":
res += datetime.timedelta(seconds=int(value))
if val == "m":
res += datetime.timedelta(minutes=int(dic[val]))
if val == "h":
res += datetime.timedelta(hours=int(dic[val]))
if val == "d":
res += datetime.timedelta(days=int(dic[val]))
return res # Returns added Timedelta
| tfkdn/OnyxVot | Cogs/reminderRewrite/get_datetime_obj.py | get_datetime_obj.py | py | 1,440 | python | en | code | 0 | github-code | 13 |
38804541423 | #-----flightRadar24-----#
from selenium import webdriver
import json
from selenium.webdriver.common.keys import Keys
import time
lst = []
lst1 = []
lst2 = []
lst3 = []
#########---------------- Setting the path for chrome Driver---------------------------########
driver = webdriver.Chrome(executable_path="D:\\chromedriver_win32\\chromedriver.exe")
########------------------- opening url ---------------------------------------#########
driver.get("https://www.flightradar24.com/")
flightNo = input("Enter flight number")
#----------to fetch data---------#
input_element = driver.find_element_by_id("searchBox")
input_element.send_keys(flightNo)
time.sleep(2)
try:
input_element.send_keys(Keys.ARROW_DOWN)
input_element.send_keys(Keys.ARROW_DOWN)
time.sleep(2)
element1 = driver.find_element_by_class_name("rowButton.goto").click()
time.sleep(3)
for flightName in driver.find_elements_by_class_name("pnl-component.airline-info.appear"):
lst2.append(flightName.text)
lst3 = lst2[0].splitlines()
for elements in driver.find_elements_by_class_name("scroll-wrapper"):
lst.append(elements.text)
lst1 = lst[0].splitlines()
data = {
"flightNumber": lst3[0],
"flightName": lst3[1],
"from": {
"fromAirport": lst1[0],
"fromCity": lst1[1],
"scheduled_DepartureTime": lst1[4],
"actual_DepartureTime": lst1[6]
},
"to": {
"toAirport": lst1[7],
"toCity": lst1[8],
"scheduled_ArrivalTime": lst1[11],
"actual_ArrivalTime": lst1[13]
},
"distanceTravelled": lst1[14],
"distanceLeft": lst1[15]
}
json_format = json.dumps(data)
print(json_format)
#------if flight not found-------#
except:
data1 = {
"status": "Flight not found"
}
print(json.dumps(data1))
time.sleep(15)
driver.quit()
| kushalbajje/FlightStatusTracking | flightRadar24/main.py | main.py | py | 1,992 | python | en | code | 0 | github-code | 13 |
4364576139 | from pypyodbc import Connection
from exceptions import CreateTeamExceptions
from model.Error import Error
from model.Player import Player
from model.Game import Game
from model.PlayerDB import PlayerDB
from model.Team import Team
from model.TeamList import TeamList
from model.TeamListDB import TeamListDB
from model.TeamPlayerDB import TeamPlayerDB
from repository.PlayerRepository import PlayerRepository
from repository.TeamRepository import TeamRepository
from services.GameService import GameService
from services.ITeamService import ITeamService
from repository.TeamPlayerRepository import TeamPlayerRepository
class TeamService(ITeamService):
def __init__(self):
self.__team_id = 0
self.__team_name = "Team-"
self.__team_suffix = 64
self.__single_team = []
self.__team_list = []
self.__counter = 1
self.__game_service = GameService()
self.__team_repository = TeamRepository()
self.__player_repository = PlayerRepository()
self.__team_player_repository = TeamPlayerRepository()
def create_teams(self, game: Game):
try:
if not self.__is_player_count_valid(game):
raise CreateTeamExceptions.CreateTeamExceptions.InvalidData(description="Total number of players are "
"less for team creation.",
error_code=11)
else:
number_of_players_needed = self.__fetch_total_players_needed(game)
number_of_players_registered = self.__fetch_total_players_registered(game)
additional_players = []
team_counter = 0
for player in game.players:
self.__validate_player_data_boundary_conditions(player)
if self.__counter == number_of_players_needed:
self.__counter = 1
self.__single_team.append(player)
team_object = self.__create_team_object(self.__single_team, game)
self.__add_team(team_object)
team_counter += 1
self.__single_team = []
else:
if team_counter >= number_of_players_registered // number_of_players_needed:
additional_players.append(player)
else:
self.__single_team.append(player)
self.__counter += 1
return TeamList(self.__team_list, len(self.__team_list), additional_players)
except CreateTeamExceptions.CreateTeamExceptions.InvalidData as e:
return Error(e.error_type, e.message, e.description)
def save_teams(self, teams: TeamList, connection: Connection):
team_db = self.__convert_team_obj_to_team_db_obj(teams)
for team in team_db.teams:
temp_team = Team.from_dict(team)
team_id = self.__team_repository.insertIntoTeam(temp_team, connection)
for player in temp_team.players:
temp_player = PlayerDB.from_dict(player)
player_id = self.__player_repository.insertIntoPlayer(temp_player, connection)
team_player_db = TeamPlayerDB(player_id, team_id)
self.__team_player_repository.insertIntoTeamPlayer(team_player_db, connection)
for player in team_db.additional_players:
temp_player = PlayerDB.from_dict(player)
self.__player_repository.insertIntoPlayer(temp_player, connection)
def get_teams(self, connection):
return self.__team_repository.getTeams(connection)
def __is_player_count_valid(self, game) -> bool:
number_of_players_needed = self.__fetch_total_players_needed(game)
total_players_registered = self.__fetch_total_players_registered(game)
try:
if total_players_registered / number_of_players_needed >= 1:
return True
else:
return False
except ZeroDivisionError:
raise CreateTeamExceptions.CreateTeamExceptions.InvalidData(description="Invalid Game Type",
error_code=12)
def __fetch_total_players_needed(self, game: Game):
return self.__game_service.get_players_required_in_a_team(game.game_type)
def __fetch_total_players_registered(self, game: Game) -> int:
if game.players is None:
return 0
return self.__game_service.get_participants_count(game.players)
def __create_team_object(self, team, game: Game):
self.__team_id += 1
self.__team_suffix = self.__team_suffix + 1
return Team(self.__team_id, (self.__team_name + chr(self.__team_suffix)), game.game_type, team)
def __add_team(self, team_object: Team):
self.__team_list.append(team_object)
@staticmethod
def __convert_team_obj_to_team_db_obj(teams: TeamList):
return TeamListDB(teams.teams, teams.total, teams.additional_players)
@staticmethod
def __validate_player_data_boundary_conditions(player: Player):
if type(player.player_id) == str:
raise CreateTeamExceptions.CreateTeamExceptions.InvalidData(message="Incorrect Data Format",
description="Player id can not be "
"in string",
error_code=21)
elif player.player_name == "":
raise CreateTeamExceptions.CreateTeamExceptions.InvalidData(message="Incorrect Data Format",
description="Player name can not be "
"empty",
error_code=22)
elif player.player_id is None:
raise CreateTeamExceptions.CreateTeamExceptions.InvalidData(message="Incorrect Data Format",
description="Player id can not be "
"null",
error_code=23)
elif player.player_name is None:
raise CreateTeamExceptions.CreateTeamExceptions.InvalidData(message="Incorrect Data Format",
description="Player name can not be "
"null",
error_code=24)
| m-ohit-s/ISC-SportsCarnival | ISC Sports Carnival/services/TeamService.py | TeamService.py | py | 7,029 | python | en | code | 0 | github-code | 13 |
7829759010 | import os
import sys
from celery import Celery
from cl.lib.celery_utils import throttle_task
# set the default Django settings module for the 'celery' program.
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cl.settings")
app = Celery("cl")
# Bump the recursion limit to 10× normal to account for really big chains. See:
# https://github.com/celery/celery/issues/1078
sys.setrecursionlimit(10000)
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object("django.conf:settings", namespace="CELERY")
app.autodiscover_tasks()
@app.task(bind=True)
@throttle_task("2/4s")
def debug_task(self) -> None:
print(f"Request: {self.request!r}")
@app.task(bind=True)
def fail_task(self) -> float:
# Useful for things like sentry
return 1 / 0
| freelawproject/courtlistener | cl/celery_init.py | celery_init.py | py | 815 | python | en | code | 435 | github-code | 13 |
599254027 | #!/usr/bin/python
import psycopg2
from config import config
import datetime
conn = None
def connect():
""" Connect to the PostgreSQL database server """
params = config()
print('Connecting to the PostgreSQL database...')
global conn
conn = psycopg2.connect(**params)
def disconnect():
global conn
conn.close()
print('Database connection closed.')
def getdeminfo():
cur = conn.cursor()
insert_stmt = (
"SELECT body from democratcom2020"
)
cur.execute(insert_stmt)
result = cur.fetchall()
cur.close()
return result
def main():
connect()
disconnect()
if __name__ == '__main__':
main()
| ting20000119/Most_frequent_words | r:Democrats_comments_counts/dbconnect.py | dbconnect.py | py | 703 | python | en | code | 0 | github-code | 13 |
6395555924 | import sqlite3
from utils import fmt
def create_connexion(db_file):
"""Crée une connexion a la base de données SQLite spécifiée par db_file
Args:
db_file (str): Chemin d'accès à la base de données
"""
try:
conn = sqlite3.connect(db_file)
# On active les foreign keys
conn.execute("PRAGMA foreign_keys = 1")
return conn
except sqlite3.Error as e:
print(e)
def init_db(use_test_data=None):
"""Initialise la base de données. Si `use_test_data` est `True`, ajoute
des données de test. Si `use_test_data` est `None`, demande à l'utilisateur.
Args:
use_test_data (bool | None, optional): Ajouter des données de test. Par défaut, None.
Returns:
sqlite3.Connection: Connexion à la base de données
"""
# Nom de la BD à créer
db_file = "data/hydrogen.db"
# Créer une connexion a la BD
conn = create_connexion(db_file)
# Créer les tables et ajouter les types par défaut
fmt.pitalic("Initialisation de la DB...")
exec_script(conn, "data/init_tables.sql")
exec_script(conn, "data/default_types.sql")
if use_test_data is None:
use_test_data = fmt.bool_input(
"Voulez-vous ajouter des données de test ? (O/N) "
)
if use_test_data is True:
# Ajouter des données de test
exec_script(conn, "data/default_inserts.sql")
return conn
def exec_script(conn: sqlite3.Connection, file: str):
"""Exécute sur la base de données toutes les commandes contenues dans le
fichier fourni en argument.
Les commandes dans le fichier `file` doivent être séparées par un
point-virgule.
Args:
conn (sqlite3.Connection): Connexion à la base de données
file (str): Chemin d'accès au fichier contenant les commandes
"""
# Lecture du fichier et placement des requêtes dans un tableau
sqlQueries = []
with open(file, "r") as f:
createSql = f.read()
sqlQueries = createSql.split(";")
# Exécution de toutes les requêtes du tableau
cursor = conn.cursor()
for query in sqlQueries:
cursor.execute(query)
# Validation des modifications
conn.commit()
def exec_query(
conn: sqlite3.Connection, query: str, args: tuple = None
) -> sqlite3.Cursor:
"""Exécute la requête `query` sur la base de données. Ne commit pas,
empèche de modifier la base de données.
Args:
conn (sqlite3.Connection): Connexion à la base de données
query (str): Requête à exécuter
args (tuple, optional): Arguments de la requête. Par défaut, None.
"""
cursor = conn.cursor()
if args is None:
cursor.execute(query)
else:
cursor.execute(query, args)
return cursor
def drop_table(conn: sqlite3.Connection, table: str):
"""Supprime la table `table` de la base de données.
Args:
conn (sqlite3.Connection): Connexion à la base de données
table (str): Nom de la table à supprimer
"""
cursor = conn.cursor()
cursor.execute("DROP TABLE IF EXISTS " + table)
conn.commit()
def drop_all_tables(conn: sqlite3.Connection):
"""Supprime toutes les tables de la base de données.
Args:
conn (sqlite3.Connection): Connexion à la base de données
"""
fmt.pitalic("Suppression des tables...")
tables = [
"CommandesClients",
"Navires",
"Transporteurs",
"Clients",
"Commandes_base",
"Usines",
"Types",
]
for table in tables:
drop_table(conn, table)
cursor = conn.cursor()
cursor.execute("DROP VIEW IF EXISTS Commandes")
def show_results(cursor: sqlite3.Cursor) -> None:
"""Affiche les résultats d'une requete de la base de données
de manière formatée.
Args:
cursor (sqlite3.Cursor): Cursor
"""
headers = [desc[0] for desc in cursor.description]
fmt.clear()
results = cursor.fetchall()
fmt.print_table(results, headers)
if not results:
fmt.pwarn("Aucune données trouvées, vérifiez votre requête !")
# Attente de l'utilisateur
fmt.pblink("Appuyez sur Entrée pour continuer...", end="")
input()
def insert_data(conn: sqlite3.Connection, table: str, data: dict) -> Exception:
"""Insère les données `data` dans la table `table` dans la base de données.
Args:
conn (sqlite3.Connection): Connexion à la base de données
table (str): Nom de la table à insérer
data (dict): Données à insérer
Returns:
Exception: Erreur sqlite3 en cas de problème d'insertion, None sinon.
"""
cursor = conn.cursor()
q_mark = ["?"] * len(data)
q_mark = ", ".join(q_mark)
try:
cursor.execute(f"INSERT INTO {table} VALUES ({q_mark})", tuple(data.values()))
except (sqlite3.OperationalError, sqlite3.IntegrityError) as e:
return e
conn.commit()
return None
def update_data(
conn: sqlite3.Connection, table: str, filters: dict, data: dict
) -> Exception:
cursor = conn.cursor()
for k, v in filters.copy().items():
if v is None:
del filters[k]
for k, v in data.copy().items():
if v is None:
del data[k]
q_mark = []
taille_data = len(data)
for i, key in enumerate(data.keys()):
if data[key] is not None:
q_mark.append(f"{key} = ?{i + 1}")
try:
data[key] = int(data[key])
except ValueError:
pass
q_mark = ", ".join(q_mark)
# Construction et exécution de la requête
args = []
for i, key in enumerate(filters.keys()):
if filters[key] is not None:
first_char = filters[key][0]
if first_char in ["<", ">"]:
args.append(f"{key} {first_char} ?{i + 1 + taille_data}")
filters[key] = filters[key][1:]
else:
args.append(f"{key} = ?{i + 1 + taille_data}")
try:
filters[key] = int(filters[key])
except ValueError:
pass
args = " AND ".join(args)
try:
cursor.execute(
f"UPDATE {table} SET {q_mark} WHERE {args}",
tuple(list(data.values()) + list(filters.values())),
)
except (sqlite3.OperationalError, sqlite3.IntegrityError) as e:
return e
conn.commit()
return None
def delete_data(conn: sqlite3.Connection, table: str, filters: dict) -> Exception:
"""Supprime les lignes dont les filtres `filters` sont valides dans la table
`table` de la base de données.
Args:
conn (sqlite3.Connection): Connexion à la base de données
table (str): Nom de la table
filters (dict): Filtres de recherche
Returns:
Exception: Erreur sqlite3 en cas de problème de délétion, None sinon.
"""
cursor = conn.cursor()
for k, v in filters.copy().items():
if v is None:
del filters[k]
# Construction et exécution de la requête
args = []
for i, key in enumerate(filters.keys()):
if filters[key] is not None:
args.append(f"{key} = ?{i + 1}")
try:
filters[key] = int(filters[key])
except ValueError:
pass
args = " AND ".join(args)
try:
cursor.execute(f"DELETE FROM {table} WHERE {args}", tuple(filters.values()))
except (sqlite3.OperationalError, sqlite3.IntegrityError) as e:
return e
if cursor.rowcount == 0:
return Exception("Aucune données correspondante trouvée")
conn.commit()
def check_exists(conn: sqlite3.Connection, table: str, attr: tuple) -> bool:
"""Vérifie s'il existe une ligne dont l'attribut `attr[0]` est `attr[1]`
dans la table `table`.
Args:
conn (sqlite3.Connection): Connexion à la base de données
table (str): Nom de la table
attr (tuple): Attributs de la ligne
Returns:
bool: True si la ligne existe, False sinon
"""
cursor = conn.cursor()
cursor.execute(f"SELECT * FROM {table} WHERE {attr[0]} = ?", (attr[1],))
return cursor.fetchone() is not None
| comejv/uni-projects | INF403/utils/db.py | db.py | py | 8,276 | python | fr | code | 2 | github-code | 13 |
22757225375 | from collections import namedtuple
Cell = namedtuple('Cell', ['key', 'x', 'y', 'type', 'is_empty', 'sortkey'])
def cells():
types = ['dim', 'flag', 'metric']
for i, row in enumerate(open('matrix.txt')):
dims, flags, metrics = sections = row.split()
j = 0
sortkey = '%s %s' % (flags, dims)
for stype, sect in zip(types, sections):
for field in sect.split(','):
yield Cell('e-%d-%d' % (j, i),
j,
i,
stype,
field == '' or field == '0',
sortkey)
j += 1
| tuulos/sf-python-meetup-sep-2013 | data/utils.py | utils.py | py | 670 | python | en | code | 17 | github-code | 13 |
21571699333 | import math
import copy
eco = {10: 1.6, 20:0.8, 21:0.4}
comfort = {0:11.4, 10:2.4, 20:1.6, 21:0.8}
# max 8 hours of "off"
def recursive(temp, prices, curr_hour, curr_price, curr_comfort, curr_sol : list, best):
print(" "*curr_hour, curr_hour, curr_sol)
if curr_price > best[0]:
return (0, 0, curr_sol)
if curr_sol.count("off") > 8: # discard solution
return (0, 0, curr_sol)
if curr_comfort >= 124: # solution found
return (curr_price, curr_comfort, curr_sol)
if curr_hour == 23:
if curr_comfort < 124:
return (0, 0, curr_sol)
else:
return (curr_price, curr_comfort, curr_sol)
off_price = 1*prices[curr_hour]
eco_price = eco[10]*prices[curr_hour] if temp[curr_hour] < 10 else eco[20]*prices[curr_hour] if temp[curr_hour] < 20 else eco[21]*prices[curr_hour]
comf_price = comfort[0]*prices[curr_hour] if temp[curr_hour] < 0 else comfort[10]*prices[curr_hour] if temp[curr_hour] < 10 else comfort[20]*prices[curr_hour] if temp[curr_hour] < 20 else comfort[21]*price[curr_hour]
# choose "off" for this hour (only explore if off_price < eco_price)
if off_price < eco_price:
off_sol = copy.deepcopy(curr_sol)
off_sol[curr_hour] = "off"
off_ret = recursive(temp, prices, curr_hour+1, curr_price+off_price, curr_comfort, curr_sol, best)
else: off_ret = (curr_price, curr_comfort, curr_sol)
# choose "eco" for this hour (only explore if eco price < comf_price)
if eco_price < comf_price:
eco_sol = copy.deepcopy(curr_sol)
eco_sol[curr_hour] = "eco"
eco_ret = recursive(temp, prices, curr_hour+1, curr_price+eco_price, curr_comfort+4, eco_sol, best)
else: eco_ret = (curr_price, curr_comfort, curr_sol)
# choose "comfort" for this hour
comf_sol = copy.deepcopy(curr_sol)
comf_sol[curr_hour] = "comf"
comf_ret = recursive(temp, prices, curr_hour+1, curr_price+comf_price, curr_comfort+8, comf_sol, best)
best_sol = min(comf_ret, eco_ret, off_ret, key=lambda x: x[0])
if best_sol[1] >= 124 and best_sol[0] < best[0]:
best = best_sol
return best
price = [185.90
,150.00
,127.50
,114.60
,114.10
,111.10
,112.60
,114.50
,125.02
,131.97
,141.80
,138.00
,115.60
,124.99
,125.02
,143.00
,141.00
,156.17
,186.03
,193.15
,187.95
,182.30
,175.12
,140.38
]
temp = [5,4,2,-1,3,6,7,8,8,9,11,13,15,17,19,21,19,16,16,13,11,9,7,6] # day's temperatures
recursive(temp, price, 0, 0, 0, [' ' for _ in range(24)], (math.inf, 0, []))
| luciusvinicius/personal-sauna | test_exhaustive_pruning.py | test_exhaustive_pruning.py | py | 2,642 | python | en | code | 0 | github-code | 13 |
19214732090 | # <table class="userInfo"><tbody><tr><td>ФИО:</td><td><strong>Азаров Дмитрий Викторович</strong></td></tr><tr><td>Пол:</td><td><strong>муж</strong></td></tr><tr><td>Дата рождения:</td><td><strong>03/05/1986</strong></td></tr><tr><td>Место рождения:</td><td><strong>с.Яр-Сале Ямальского р-на Тюменьской области</strong></td></tr><tr><td>Табельный номер:</td><td><strong>119221</strong></td></tr></tbody></table>
import requests
import dict_users
import time
from bs4 import BeautifulSoup
def parser(user_id): # это надо было все обернуть в функцию чтобы потом при импорте вызвать модуль.функция()
url = 'https://edu.rossiya-airlines.com/nalet/'
s = requests.Session()
data = {
'refer': 'https://edu.rossiya-airlines.com//',
'login': '1',
'user_id': '',
'backend_url': 'https://sup.rossiya-airlines.com:8080',
'username': '119221', # dict_users.users[user_id]['tab_number'], # '119229',
'userpass': '2DH64rf2', # dict_users.users[user_id]['password'], # 'Parshina15',
'domain': 'stc.local',
'submit': 'войти'
}
response = s.post(url, data=data, headers=dict(Referer=url)) # work_plan = response 200
soup_html = BeautifulSoup(response.content, 'html.parser') # .find_all('div', {'class': ['dhx_cal_data']})
user_info = soup_html.select('.userInfo')[0]
for tr in user_info:
for td in tr:
print(td.text)
# for i in td:
# print(i.text)
menu = soup_html.select('.dropdown-menu.dropdown-user')[1]
for tr in menu:
for td in tr:
for i in td:
for t in i:
if str(t).isalpha():
print(t)
continue
for m in t:
if 'src' in str(m):
continue
else:
print(m)
# if not str(t).isalpha():
# continue
# else:
# print(t)
parser(157758328)
| azarovdimka/python | telebot/userinfo.py | userinfo.py | py | 2,282 | python | ru | code | 1 | github-code | 13 |
70166213459 | from framework.print.buffer import PrintBuffer
class ScanView(object):
"""
Encapsulates clang's 'scan-view' utility which displays 'scan-build'
results nicely in a broswer.
"""
def __init__(self, binary):
self.binary_path = binary['path']
self.binary_version = binary['version']
def launch_instructions(self, result_dir):
b = PrintBuffer()
b.separator()
b.add("Full details can be seen in a browser by running:\n")
b.add(" $ %s %s\n" % (self.binary_path, result_dir))
b.separator()
return str(b)
| jarret/bitcoin_helpers | framework/clang/scan_view.py | scan_view.py | py | 590 | python | en | code | 0 | github-code | 13 |
3883157707 | import requests
param = {
"lat": -0.1,
"lon": 0.51,
"appid": "",
"exclude": "current,minutely,daily"
}
hours = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
response = requests.get(url="https://api.openweathermap.org/data/2.5/onecall", params=param)
response.raise_for_status()
data = (response.json())
will_rain = False
for x in hours:
hourly_waether = data["hourly"][x]["weather"]
weather_id = hourly_waether[0]["id"]
print(weather_id)
if weather_id < 700:
will_rain = True
else:
print(hourly_waether[0]["description"])
if will_rain:
print("Bring an umbrella")
| Sidakveer/Intermediate_projects_2 | weather_tracking/main.py | main.py | py | 625 | python | en | code | 0 | github-code | 13 |
34278637751 | from datetime import date, datetime, timedelta
from decorators import catch_json_parse_errors
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.models import User as AuthUser
from django.contrib.auth.decorators import login_required
from django.db.models import Q
from django.shortcuts import render_to_response
from scheduler.httpadapters import PeriodHttpAdapter
from scheduler.utilities import ScheduleTools
from tools import CurrentObsXML
from models import *
from utilities import *
from scheduler.models import User as NellUser
from nell.tools import IcalMap
from nell.utilities import TimeAccounting, TimeAgent
from users.utilities import get_requestor
from nell.utilities.notifiers import SchedulingNotifier, Notifier, Email as EmailMessage
from nell.utilities.FormatExceptionInfo import formatExceptionInfo, printException, JSONExceptionInfo
from reversion import revision
from settings import DATABASES, DEBUG
import simplejson as json
import twitter
def current_obs_xml(request, *args, **kws):
co = CurrentObsXML.CurrentObsXML()
result, xmlStr = co.getXMLString()
xmlStr = xmlStr if result else "Error"
return HttpResponse(
xmlStr
, mimetype = "text/xml")
@login_required
def load_nubbles(request):
requestor = get_requestor(request)
if requestor.isAdmin():
return render_to_response("war/Nubbles.html", {})
else:
HttpResponseRedirect('/profile')
@revision.create_on_success
@catch_json_parse_errors
def receivers_schedule(request, *args, **kws):
"""
For a given period, specified by a start date and duration, show
all the receiver changes. Receiver changes are aligned with maintenance
days.
"""
# interpret the inputs
startdate = request.GET.get("startdate", None)
startdate = datetime.strptime(startdate, '%Y-%m-%d %H:%M:%S') if startdate else None
duration = request.GET.get("duration", None)
duration = int(duration) if duration else duration
# use the input to get the basic rx schedule
schedule = Receiver_Schedule.extract_schedule(startdate, duration)
jsonschd = Receiver_Schedule.jsondict(schedule)
# some clients also need the diff schedule
diff = Receiver_Schedule.diff_schedule(schedule)
jsondiff = Receiver_Schedule.jsondict_diff(diff).get("diff_schedule", None)
# get the dates for maintenace that cover from the start of this
# rcvr schedule.
maintenance = [TimeAgent.dt2str(p.start) for p in Period.objects.filter(
session__observing_type__type = "maintenance"
, start__gte = startdate).order_by("start")]
# which receivers are temporarily unavailable?
unavailable = [r.jsondict() for r in Receiver.objects.filter(available = False).order_by("freq_low")]
# clients want to also know all the latest rcvrs
rcvrs = [r.jsondict() for r in Receiver.objects.all().order_by("freq_low") \
if r.abbreviation != "NS"]
return HttpResponse(
json.dumps({"schedule" : jsonschd
, "diff": jsondiff
, "maintenance": maintenance
, "unavailable": unavailable
, "receivers" : rcvrs})
, mimetype = "text/plain")
@revision.create_on_success
@catch_json_parse_errors
def rcvr_available_toggle(request, *args, **kws):
"Toggles the state of the given receiver's availability."
try:
rcvr = Receiver.get_rcvr(request.POST.get("rcvr", None))
except:
error = "Invalid Input."
msg = "Invalid input: %s" % request.POST.get("rcvr", None)
return HttpResponse(json.dumps({'error': error, 'message': msg})
, mimetype = "text/plain")
# here comes the giant hit to the database; ready?
rcvr.available = not rcvr.available
rcvr.save()
# how was that?
revision.comment = get_rev_comment(request, None, "rcvr_available_toggle")
return HttpResponse(json.dumps({'success':'ok'})
, mimetype = "text/plain")
@revision.create_on_success
@catch_json_parse_errors
def rcvr_schedule_toggle_rcvr(request, *args, **kws):
"""
Toggles a rcvr on all the dates in the given date range.
For a given date, if a rcvr is up, it goes down and vice versa.
"""
try:
fromDt = datetime.strptime(request.POST.get("from", None)
, "%m/%d/%Y %H:%M:%S")
toDt = datetime.strptime(request.POST.get("to", None)
, "%m/%d/%Y %H:%M:%S")
rcvr = Receiver.get_rcvr(request.POST.get("rcvr", None))
except:
error = "Invalid Inputs."
msg = "One of the following are invalid inputs: %s, %s, %s" % \
(request.POST.get("from", None)
, request.POST.get("to", None)
, request.POST.get("rcvr", None))
return HttpResponse(json.dumps({'error': error, 'message': msg})
, mimetype = "text/plain")
success, msg = Receiver_Schedule.toggle_rcvr(fromDt, rcvr, endDt=toDt)
revision.comment = get_rev_comment(request, None, "shift_rcvr_schedule")
if success:
return HttpResponse(json.dumps({'success':'ok'})
, mimetype = "text/plain")
else:
error = "Error Toggling Receiver."
return HttpResponse(json.dumps({'error': error, 'message': msg})
, mimetype = "text/plain")
@revision.create_on_success
@catch_json_parse_errors
def rcvr_schedule_shift_date(request, *args, **kws):
"""
Moves an existing receiver change to another date.
"""
try:
fromDt = datetime.strptime(request.POST.get("from", None)
, "%m/%d/%Y %H:%M:%S")
toDt = datetime.strptime(request.POST.get("to", None)
, "%m/%d/%Y %H:%M:%S")
except:
error = "Invalid Inputs."
msg = "One of the following are invalid inputs: %s, %s" % \
(request.POST.get("from", None)
, request.POST.get("to", None))
return HttpResponse(json.dumps({'error': error, 'message': msg})
, mimetype = "text/plain")
success, msg = Receiver_Schedule.shift_date(fromDt, toDt)
revision.comment = get_rev_comment(request, None, "shift_rcvr_schedule")
if success:
return HttpResponse(json.dumps({'success':'ok'})
, mimetype = "text/plain")
else:
error = "Error shifting date of Receiver Change."
return HttpResponse(json.dumps({'error': error, 'message': msg})
, mimetype = "text/plain")
@revision.create_on_success
@catch_json_parse_errors
def rcvr_schedule_delete_date(request, *args, **kws):
"""
Removes an existing receiver change from the receiver schedule.
"""
try:
dateDt = datetime.strptime(request.POST.get("startdate", None)
, "%m/%d/%Y %H:%M:%S")
except:
error = "Invalid Inputs."
msg = "Invalid date: %s" % request.POST.get("startdate", None)
return HttpResponse(json.dumps({'error': error, 'message': msg})
, mimetype = "text/plain")
success, msg = Receiver_Schedule.delete_date(dateDt)
revision.comment = get_rev_comment(request, None, "delete_rcvr_schedule")
if success:
return HttpResponse(json.dumps({'success':'ok'})
, mimetype = "text/plain")
else:
error = "Error deleting date of Receiver Change."
return HttpResponse(json.dumps({'error': error, 'message': msg})
, mimetype = "text/plain")
@revision.create_on_success
@catch_json_parse_errors
def rcvr_schedule_add_date(request, *args, **kws):
"""
Adss a receiver change date to the receiver schedule.
"""
try:
dateDt = datetime.strptime(request.POST.get("startdate", None)
, "%m/%d/%Y %H:%M:%S")
except:
error = "Invalid Inputs."
msg = "Invalid start date: %s" % request.POST.get("startdate", None)
return HttpResponse(json.dumps({'error': error, 'message': msg})
, mimetype = "text/plain")
success, msg = Receiver_Schedule.add_date(dateDt)
revision.comment = get_rev_comment(request, None, "add_rcvr_schedule")
if success:
return HttpResponse(json.dumps({'success':'ok'})
, mimetype = "text/plain")
else:
error = "Error adding date of Receiver Change."
return HttpResponse(json.dumps({'error': error, 'message': msg})
, mimetype = "text/plain")
def isFriend(user):
au = user.auth_user
return (au.is_staff if au is not None else False) and user.username != "dss"
@revision.create_on_success
@catch_json_parse_errors
def get_options(request, *args, **kws):
mode = request.GET.get("mode", None)
if mode == "project_codes":
semesters = request.GET.get("semesters")
notcomplete = request.GET.get("notcomplete")
sponsor = request.GET.get("sponsor")
if semesters is not None and notcomplete is not None and sponsor is not None:
notcompleteFlt = notcomplete == 'Not Complete'
semesters = semesters.replace('[', '').replace(']', '').split(', ')
filter = " | " .join(["Q(semester__semester = '%s')" % s for s in semesters])
projects = Project.objects.filter(eval(filter)).order_by('pcode')
if notcomplete != 'All':
projects = projects.filter(complete = not notcompleteFlt).order_by('pcode')
if sponsor != 'All' and sponsor != '':
projects = projects.filter(sponsor__abbreviation = sponsor).order_by('pcode')
else:
projects = Project.objects.order_by('pcode')
return HttpResponse(
json.dumps({'project codes': [p.pcode for p in projects]
, 'project ids': [p.id for p in projects]})
, mimetype = "text/plain")
elif mode == "users":
users = User.objects.order_by('last_name')
return HttpResponse(
json.dumps({'users': ["%s, %s" % (u.last_name, u.first_name) \
for u in users]
, 'ids': [u.id for u in users]})
, mimetype = "text/plain")
elif mode == "friends":
users = [u for u in User.objects.all().order_by('last_name')
if isFriend(u)]
return HttpResponse(
json.dumps({'friends': ["%s, %s" % (u.last_name, u.first_name) \
for u in users]
, 'ids': [u.id for u in users]})
, mimetype = "text/plain")
elif mode == "session_handles":
semesters = request.GET.get("semesters")
enabled = request.GET.get("enabled")
notcomplete = request.GET.get("notcomplete")
sponsor = request.GET.get("sponsor")
if semesters is not None and enabled is not None and notcomplete is not None and sponsor is not None:
notcompleteFlt = notcomplete == 'Not Complete'
enabledFlt = enabled == 'Enabled'
semesters = semesters.replace('[', '').replace(']', '').split(', ')
filter = " | " .join(["Q(project__semester__semester = '%s')" % s for s in semesters])
ss = Sesshun.objects.filter(eval(filter))
if notcomplete != 'All':
ss = ss.filter(status__complete = not notcompleteFlt)
if enabled != 'All':
ss = ss.filter(status__enabled = enabledFlt)
if sponsor != 'All':
ss = ss.filter(project__sponsor__abbreviation = sponsor)
ss = ss.order_by('name')
else:
ss = Sesshun.objects.all().order_by('name')
return HttpResponse(
json.dumps({
'session handles': ["%s (%s)" % (s.name, s.project.pcode) \
for s in ss]
, 'ids' : [s.id for s in ss]})
, mimetype = "text/plain")
elif mode == "windowed_session_handles":
ss = Sesshun.objects.filter(session_type__type = "windowed").order_by('name')
return HttpResponse(
json.dumps({
'session handles': ["%s (%s)" % (s.name, s.project.pcode) \
for s in ss]
, 'ids' : [s.id for s in ss]})
, mimetype = "text/plain")
elif mode == "session_names":
ss = Sesshun.objects.order_by('name')
pcode = request.GET.get("pcode", None)
if pcode:
ss = [s for s in ss if s.project.pcode == pcode]
return HttpResponse(
json.dumps({'session names': ["%s" % s.name for s in ss]})
, mimetype = "text/plain")
elif mode == "periods":
# return period descriptions for unique combo: pcode + sess name
pcode = request.GET.get("pcode", None)
name = request.GET.get("session_name", None)
s = Sesshun.objects.get(name = name, project__pcode = pcode)
periods = Period.objects.filter(session = s).order_by('start')
return HttpResponse(
json.dumps({'periods': ["%s" % p.__str__() for p in periods]
, 'period ids': ["%s" % p.id for p in periods]})
, mimetype = "text/plain")
elif mode == "semesters":
# return all the semester names
semesters = Semester.objects.all().order_by("semester")
return HttpResponse(
json.dumps({'semesters': ["%s" % s.semester for s in semesters]})
, mimetype = "text/plain")
elif mode == "receivers":
# return all the receiver names
rx = Receiver.objects.all().order_by("freq_low")
return HttpResponse(
json.dumps({'receivers': ["%s" % r.abbreviation for r in rx]})
, mimetype = "text/plain")
else:
return HttpResponse("")
@catch_json_parse_errors
def get_ical(request, *args, **kws):
"""
Returns the entire GBT calendar in iCalendar format.
"""
response = HttpResponse(IcalMap().getSchedule(), mimetype='text/calendar')
response['Content-Type'] = 'text/calendar'
response['Content-Disposition'] = 'attachment; filename=GBTschedule.ics'
return response
@revision.create_on_success
@catch_json_parse_errors
def change_schedule(request, *args, **kws):
"""
Replaces time period w/ new session, handling time accounting.
Duration is in hours.
"""
startdate = request.POST.get("start", None)
startdate = datetime.strptime(startdate, '%Y-%m-%d %H:%M:%S') if startdate else None
duration = request.POST.get("duration", None)
duration = float(duration) if duration else duration
sess_name = request.POST.get("session", "").split("(")[0].strip()
try:
s = Sesshun.objects.get(name = sess_name)
except Sesshun.DoesNotExist:
return HttpResponse(json.dumps({'error' : "Session not found."})
, mimetype = "text/plain")
reason = request.POST.get("reason", "other_session_other")
desc = request.POST.get("description", "")
success, msg = ScheduleTools().changeSchedule(startdate, duration, s, reason, desc)
if success:
revision.comment = get_rev_comment(request, None, "change_schedule")
return HttpResponse(json.dumps({'success':'ok'})
, mimetype = "text/plain")
else:
return HttpResponse(
json.dumps({'error':'Error Inserting Period', 'message':msg})
, mimetype = "text/plain")
@revision.create_on_success
@catch_json_parse_errors
def shift_period_boundaries(request, *args, **kws):
"""
Moves boundary between two or more periods, handling time accounting.
Note: When performing the shift, this function finds all periods within
15 mins of the original boundary time. This will always give our specified
period plus any neighbor to it.
"""
time = request.POST.get("time", None)
time = datetime.strptime(time, '%Y-%m-%d %H:%M:%S') if time else None
start_boundary = bool(int(request.POST.get("start_boundary", 1)))
reason = request.POST.get("reason", "other_session_other")
desc = request.POST.get("description", "")
period_id = int(request.POST.get("period_id", None))
period = Period.objects.get(id = period_id)
original_time = period.start if start_boundary else period.end()
for p in Period.get_periods(original_time - timedelta(minutes = 1), 15.0):
if p.id != period_id:
neighbor = p
break
success, msg = ScheduleTools().shiftPeriodBoundaries(period, start_boundary, time, neighbor, reason, desc)
if success:
revision.comment = get_rev_comment(request, None, "shift_period_boundaries")
return HttpResponse(json.dumps({'success':'ok'})
, mimetype = "text/plain")
else:
return HttpResponse(
json.dumps({'error': 'Error Shifting Period Boundary'
, 'message': msg})
, mimetype = "text/plain")
@revision.create_on_success
@catch_json_parse_errors
def time_accounting(request, *args, **kws):
"""
POST: Sets Project time accounting.
GET: Serves up json for time accounting from periods up to the project
"""
project = Project.objects.get(pcode = args[0])
if request.method == 'POST':
a = project.get_allotment(float(request.POST.get("grade", None)))
a.total_time = float(request.POST.get("total_time", None))
a.save()
project.accounting_notes = request.POST.get("description", None)
project.save()
revision.comment = get_rev_comment(request, None, "time_accounting")
return HttpResponse(json.dumps(TimeAccounting().jsondict(project))
, mimetype = "text/plain")
@revision.create_on_success
@catch_json_parse_errors
def session_time_accounting(request, *args, **kws):
"""
Sets some time accounting variables for given period.
"""
s = Sesshun.objects.get(name = args[0])
if request.method == 'POST':
s.allotment.total_time = request.POST.get("total_time", None)
s.allotment.save()
s.accounting_notes = request.POST.get("description", None)
s.save()
revision.comment = get_rev_comment(request, None, "session_time_accounting")
return HttpResponse(json.dumps(TimeAccounting().jsondict(s.project))
, mimetype = "text/plain")
@revision.create_on_success
@catch_json_parse_errors
def period_time_accounting(request, *args, **kws):
"Sets some time accounting variables for given period"
period = Period.objects.get(id = args[0])
if request.method == 'POST':
a = period.accounting
a.description = request.POST.get("description", None)
a.update_from_post(request.POST)
valid, msg = a.validate()
if not valid:
title = "Error setting Period Time Accounting"
return HttpResponse(json.dumps({'error': title, 'message': msg})
, mimetype = "text/plain")
a.save()
revision.comment = get_rev_comment(request, None, "period_time_accounting")
return HttpResponse(
json.dumps(TimeAccounting().jsondict(period.session.project))
, mimetype = "text/plain")
@revision.create_on_success
@catch_json_parse_errors
def publish_periods(request, *args, **kwds):
"""
Publishes pending periods within a time range specified by a start time
and duration.
We tweet to let the world know we published. However, the default is to
tweet unless we are using our sandboxes.
Note: Supports publishing periods by time range, or a single one by id.
"""
if len(args) == 1:
# publish a single period specified in args by its ID
p = Period.objects.get(id = int(args[0]))
p.publish()
p.save()
else:
# publish periods identified by time range
startPeriods = request.POST.get("start", datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
startPeriods = datetime.strptime(startPeriods, '%Y-%m-%d %H:%M:%S')
start, duration = ScheduleTools().getSchedulingRange(
startPeriods
, request.POST.get("tz", "UTC")
, int(request.POST.get("duration", "1")) - 1)
Period.publish_periods(start, duration)
revision.comment = get_rev_comment(request, None, "publish_periods")
if DATABASES['default']['NAME'] == 'dss' and request.POST.get("tweet", "True") == "True":
update = 'GBT schedule updated. See https://dss.gb.nrao.edu/schedule/public for details.'
try:
twitter.Api(
consumer_key = settings.TWITTER['consumer_key']
, consumer_secret = settings.TWITTER['consumer_secret']
, access_token_key = settings.TWITTER['access_token_key']
, access_token_secret = settings.TWITTER['access_token_secret']
).PostUpdate(update)
except: # That's ok, the world doesn't HAVE to know.
formatExceptionInfo()
return HttpResponse(json.dumps({'success':'ok'}), mimetype = "text/plain")
@revision.create_on_success
@catch_json_parse_errors
def restore_schedule(request, *args, **kwds):
"""
Removes pending periods of open sessions for the specified time range,
given by a start time and duration.
"""
startPeriods = request.POST.get("start"
, datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
startPeriods = datetime.strptime(startPeriods, '%Y-%m-%d %H:%M:%S')
start, duration = ScheduleTools().getSchedulingRange(
startPeriods
, request.POST.get("tz", "UTC")
, int(request.POST.get("duration", "1")) - 1)
# here's the steps we take to restore the schedule:
# 1. get rid of most periods in the pending stat
Period.delete_pending(start, duration)
# 2. bring back any elective periods that may have been deleted
Period.restore_electives(start, duration)
# 3. bring back certain windowed periods that may have been deleted
Period.restore_windows(start, duration)
revision.comment = get_rev_comment(request, None, "restore_schedule")
return HttpResponse(json.dumps({'success':'ok'}), mimetype = "text/plain")
######################################################################
# Declaring 'notifier' as a global allows it to keep state between
# 'GET' and 'POST' calls of scheduling_email.
######################################################################
try:
notifier = SchedulingNotifier()
except:
if DEBUG:
printException(formatExceptionInfo())
@catch_json_parse_errors
def scheduling_email(request, *args, **kwds):
address_key = ["observer_address", "changed_address", "staff_address"]
subject_key = ["observer_subject", "changed_subject", "staff_subject"]
body_key = ["observer_body", "changed_body", "staff_body"]
email_key = ["observer", "changed", "staff"]
if request.method == 'GET':
# Show the schedule from now until 8am eastern 'duration' days from now.
start = datetime.utcnow()
duration = int(request.GET.get("duration"))
end = TimeAgent.est2utc(TimeAgent.utc2est(start + timedelta(days = duration - 1))
.replace(hour = 8, minute = 0, second = 0,
microsecond = 0))
# The class that sets up the emails needs the periods in the
# scheduling range, and all the periods in the future.
currentPs = list(Period.objects.filter(start__gt = start
, start__lt = end))
futurePs = list(Period.objects.filter(start__gte = start).order_by("start"))
notifier.setPeriods(currentPs, futurePs)
return HttpResponse(
json.dumps({
'observer_address' : notifier.getAddresses("observer"),
'observer_subject' : notifier.getSubject("observer"),
'observer_body' : notifier.getBody("observer"),
'changed_address' : notifier.getAddresses("changed"),
'changed_subject' : notifier.getSubject("changed"),
'changed_body' : notifier.getBody("changed"),
'staff_address' : notifier.getAddresses("staff"),
'staff_subject' : notifier.getSubject("staff"),
'staff_body' : notifier.getBody("staff"),
'obs_periods' : [p.id for p in notifier.observingPeriods],
'changed_periods' : [p.id for p in notifier.changedPeriods]
})
, mimetype = "text/plain")
elif request.method == 'POST':
# here we are overriding what/who gets sent for the first round
# of emails
for i in xrange(3):
addr = str(request.POST.get(address_key[i], "")).replace(" ", "").split(",")
notifier.setAddresses(email_key[i], addr)
notifier.setSubject(email_key[i], request.POST.get(subject_key[i], ""))
notifier.setBody(email_key[i], request.POST.get(body_key[i], ""))
notifier.notify()
# Remember when we did this to allow time-tagging of the schedule
sn = Schedule_Notification(date = datetime.utcnow())
sn.save()
# Emails for a given period shouldn't be sent more then is
# necessary, so here we set the last_notification timestamp.
# However, the client can change the recipients and text of the
# 'changes' email - this ignores those changes.
# See Story: https://www.pivotaltracker.com/story/show/14550249
now = datetime.utcnow()
set_periods_last_notification(now, request, "changed_periods")
set_periods_last_notification(now, request, "obs_periods")
return HttpResponse(json.dumps({'success':'ok'})
, mimetype = "text/plain")
else:
return HttpResponse(
json.dumps({'error': 'request.method is neither GET or POST!'})
, mimetype = "text/plain")
def set_periods_last_notification(dt, request, key):
pidsStr = request.POST.get(key, "")
for pidStr in pidsStr.split(","):
try:
pid = int(pidStr.strip())
except:
pid = None
if pid is not None:
p = Period.objects.get(id = pid)
p.last_notification = dt
p.save()
@catch_json_parse_errors
def projects_email(request, *args, **kwds):
if request.method == 'GET':
pcodes = request.GET.get("pcodes", None)
pcode_list = pcodes.split(" ") if pcodes is not None else getPcodesFromFilter(request)
pi_list, pc_list, ci_list, ob_list, fs_list = getInvestigatorEmails(pcode_list)
templates = EmailTemplate.get_templates(pcode_list)
return HttpResponse(json.dumps({'PI-Addresses': pi_list
, 'PC-Addresses': pc_list
, 'CO-I-Addresses': ci_list
, 'OBS-Addresses': ob_list
, 'Friend-Addresses': fs_list
, 'PCODES': pcode_list
, 'Templates': templates})
, mimetype = "text/plain")
elif request.method == 'POST':
email_key = "projects_email"
pe_notifier = Notifier()
em_templ = EmailMessage("helpdesk-dss@gb.nrao.edu", "", "", "")
pe_notifier.registerTemplate(email_key, em_templ)
addr = str(request.POST.get("address", "")).replace(" ", "").split(",")
subject = request.POST.get("subject", "")
body = request.POST.get("body", "")
email = pe_notifier.cloneTemplate(email_key)
email.SetRecipients(addr)
email.SetSubject(subject)
email.SetBody(body)
pe_notifier.post(email)
pe_notifier.notify()
return HttpResponse(json.dumps({'success':'ok'}),
mimetype = "text/plain")
else:
return HttpResponse(
json.dumps({'error': 'request.method is neither GET or POST!'})
, mimetype = "text/plain")
@revision.create_on_success
@catch_json_parse_errors
def window_assign_period(request, *args, **kwds):
if len(args) != 2:
return HttpResponse(json.dumps({'success':'error'})
, mimetype = "text/plain")
# Get the window & assign the period
try:
win = Window.objects.get(id = int(args[0]))
except Window.DoesNotExist:
return HttpResponse(json.dumps({'success': 'error'})
, mimetype = "text/plain")
win.assignPeriod(int(args[1]), request.POST.get("default", True))
revision.comment = get_rev_comment(request, None, "window_assign_period")
return HttpResponse(json.dumps({'success':'ok'})
, mimetype = "text/plain")
@catch_json_parse_errors
def toggle_moc(request, *args, **kwds):
if len(args) != 1:
return HttpResponse(json.dumps({'success':'error'})
, mimetype = "text/plain")
period = Period.objects.get(id = args[0])
period.moc_ack = not period.moc_ack
period.save()
revision.comment = get_rev_comment(request, None, "toggle_moc")
return HttpResponse(json.dumps({'success':'ok'})
, mimetype = "text/plain")
def reservations(request, *args, **kws):
start = request.GET.get('start')
days = int(request.GET.get('days'))
end = (datetime.strptime(start, "%m/%d/%Y") + timedelta(days = days)).strftime("%m/%d/%Y")
useBos = True
if useBos:
reservations = getReservationsFromBOS(start, end)
else:
reservations = getReservationsFromDB(start, end)
return HttpResponse(json.dumps({'reservations' : reservations
, 'total' : len(reservations)
}))
tab_map = {
'/scheduler/investigators' : 'Investigator'
, '/scheduler/periods' : 'Period'
, '/scheduler/projects' : 'Project'
, '/scheduler/sessions' : 'Session'
, '/scheduler/users' : 'User'
, '/scheduler/windows' : 'Window'
}
def updateExplorerConfig(name, type, tab):
ec, _ = ExplorerConfiguration.objects.get_or_create(name = name, type = type, tab = tab)
# Clear out old values if we're updated an existing config
for c in ec.column_set.all():
c.delete()
for f in ec.filter_set.all():
f.delete()
return ec
def deleteExplorerConfig(id):
try:
ec = ExplorerConfiguration.objects.get(id = id)
except ExplorerConfiguration.DoesNotExist:
pass
else:
ec.delete()
return HttpResponse(json.dumps({'success':'ok'})
, mimetype = "text/plain")
def column_configurations_explorer(request, *args, **kws):
if request.method == 'POST':
if request.POST.get("method_") == "DELETE":
id, = args
return deleteExplorerConfig(id)
ec = updateExplorerConfig(request.POST.get('name')
, EXPLORER_CONFIG_TYPE_COLUMN
, tab_map.get(request.POST.get('explorer'), None))
# Get all values that look like they might be true, hidden
columns = [k for k, v in request.POST.iteritems() if v in ('true', ['true'] )]
# Save the columns that belong to this configuration
for name in columns:
c = Column(name = name, explorer_configuration = ec)
c.save()
return HttpResponse(json.dumps({'success':'ok', 'id' : ec.id})
, mimetype = "text/plain")
else:
try:
id, = args
except ValueError:
# If the id isn't there then get all configurations.
tab = tab_map.get(request.GET.get('explorer'))
configs = [(ec.name, ec.id)
for ec in ExplorerConfiguration.objects.filter(tab = tab, type = EXPLORER_CONFIG_TYPE_COLUMN)]
return HttpResponse(json.dumps({'configs' : configs})
, mimetype = "text/plain")
config = ExplorerConfiguration.objects.get(id = id
, type = EXPLORER_CONFIG_TYPE_COLUMN
)
if config is not None:
return HttpResponse(json.dumps({'columns' : [c.name for c in config.column_set.all()]})
, mimetype = "text/plain")
def filter_combinations_explorer(request, *args, **kws):
if request.method == 'POST':
if request.POST.get("method_") == "DELETE":
id, = args
return deleteExplorerConfig(id)
ec = updateExplorerConfig(name = request.POST.get('name')
, type = EXPLORER_CONFIG_TYPE_FILTER
, tab = tab_map.get(request.POST.get('explorer'), None))
# Save the filters that belong to this configuration
for k, v in request.POST.iteritems():
if k not in ('name', 'explorer'):
f = Filter(name = k, value = v, explorer_configuration = ec)
f.save()
return HttpResponse(json.dumps({'success':'ok', 'id' : ec.id})
, mimetype = "text/plain")
else:
try:
id, = args
except ValueError:
# If the id isn't there then get all configurations.
tab = tab_map.get(request.GET.get('explorer'))
configs = [(ec.name, ec.id)
for ec in ExplorerConfiguration.objects.filter(tab = tab, type = EXPLORER_CONFIG_TYPE_FILTER)]
return HttpResponse(json.dumps({'configs' : configs})
, mimetype = "text/plain")
config = ExplorerConfiguration.objects.get(id = id
, type = EXPLORER_CONFIG_TYPE_FILTER
)
if config is not None:
filters = {}
for f in config.filter_set.all():
filters.update({f.name : f.value})
return HttpResponse(json.dumps({'filters' : filters})
, mimetype = "text/plain")
@catch_json_parse_errors
def window_copy(request, *args, **kwds):
if len(args) != 1:
return HttpResponse(json.dumps({'success':'error'})
, mimetype = "text/plain")
# parse variables
id = int(args[0])
num = int(request.POST.get("number", 1))
# copy the window
copy_window(id, num)
revision.comment = get_rev_comment(request, None, "window_copy")
return HttpResponse(json.dumps({'success':'ok'})
, mimetype = "text/plain")
@catch_json_parse_errors
def elective_copy(request, *args, **kwds):
if len(args) != 1:
return HttpResponse(json.dumps({'success':'error'})
, mimetype = "text/plain")
# parse variables
id = int(args[0])
num = int(request.POST.get("number", 1))
# copy the elective
copy_elective(id, num)
revision.comment = get_rev_comment(request, None, "elective_copy")
return HttpResponse(json.dumps({'success':'ok'})
, mimetype = "text/plain")
| nrao/nell | scheduler/views.py | views.py | py | 36,425 | python | en | code | 0 | github-code | 13 |
74884806417 | import datetime
class Employee:
def __init__(self, name, title, start):
self.name = name
self.job_title = title
self.employment_start_date = start
class Company:
def __init__(self, name, address, industry):
self.business_name = name
self.address = address
self.industry_type = industry
self.employees = list()
tony = Employee("Tony Stark", "Owner", datetime.datetime.now())
pepper = Employee("Pepper Potts", "CEO", datetime.datetime.now())
peter = Employee("Peter Parker", "Spiderman", datetime.datetime.now())
thor = Employee("Thor, Son of Odin", "God of Thunder", datetime.datetime.now())
steve = Employee("Steve Rogers", "Captain America", datetime.datetime.now())
stark_industries = Company("Stark Industries", " New York City", "Defense")
stark_industries.employees.append(tony)
stark_industries.employees.append(pepper)
avengers = Company("Avengers", "Upstate New York", "Defense")
avengers.employees.append(peter)
avengers.employees.append(thor)
avengers.employees.append(steve)
# print(avengers)
# print(stark_industries)
companies = []
companies.append(stark_industries)
companies.append(avengers)
for company in companies:
print(f'{company.business_name} is in the {company.industry_type} industry and has the following employees:')
for employee in company.employees:
print(f' * {employee.name}')
| bparker12/python_class_practice | employees_departments.py | employees_departments.py | py | 1,401 | python | en | code | 0 | github-code | 13 |
38300884653 | from django.contrib import messages
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.contrib.messages.views import SuccessMessageMixin
from django.db.models import Q
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.urls import reverse_lazy
from django.views.generic import CreateView, DetailView, ListView, UpdateView
from django_summernote.fields import SummernoteTextFormField
import astu_inventory.apps.core.signals as signals
from astu_inventory.apps.core.forms import ReasonForm
from astu_inventory.apps.core.models import BorrowRequest
from astu_inventory.apps.inventory.models import Product
class InitiateBorrowRequestView(PermissionRequiredMixin, SuccessMessageMixin, CreateView):
model = BorrowRequest
fields = ("quantity", "start_date", "end_date", "reason")
permission_required = "core.can_initiate_borrow_request"
template_name = "core/borrow_request/add.html"
extra_context = {"title": "Initiate Borrow Request"}
success_url = reverse_lazy("core:dashboard")
def get_success_message(self, *args, **kwargs):
return (
f"You have successfuly request {self.object.quantity}"
f" {self.object.product.measurment} of {self.object.product}."
)
def form_valid(self, form):
if self.is_quantify_valid(form) and self.is_dates_valid(form):
form.instance.product = self.product
form.instance.user = self.request.user
response = super().form_valid(form)
signals.borrow_request_initialized.send(sender=self.model, instance=self.object)
return response
return super().form_invalid(form)
def is_dates_valid(self, form):
start_date = form.cleaned_data["start_date"]
end_date = form.cleaned_data["end_date"]
if end_date < start_date:
form.add_error("end_date", "End date must be after start date.")
return False
return True
def is_quantify_valid(self, form):
quantity = form.cleaned_data["quantity"]
if quantity > self.product.availables:
form.add_error(
"quantity",
f"There is only {self.product.availables} items, please consider lowering your quantity.",
)
return False
return True
def get_initial(self):
return {"quantity": self.product.availables}
def get_form(self, form_class=None):
form = super().get_form(form_class=form_class)
form["reason"].field = SummernoteTextFormField()
form["quantity"].field.widget.attrs.update({"max": self.product.availables, "min": 0})
return form
def setup(self, *args, **kwargs):
super().setup(*args, **kwargs)
self.product = get_object_or_404(
Product, slug=self.kwargs["slug"], department__short_name__iexact=self.kwargs["short_name"]
)
def get_context_data(self, **kwargs):
context_data = super().get_context_data(**kwargs)
context_data.update(
{
"previous_borrow_requests": BorrowRequest.objects.filter(product=self.product, status=0)
.order_by("date_requested")
.values("start_date", "end_date", "quantity")
}
)
return context_data
class ListActiveBorrowRequestView(PermissionRequiredMixin, ListView):
model = BorrowRequest
permission_required = "core.can_list_active_borrow_request"
context_object_name = "borrow_requests"
extra_context = {"title": "Active Borrow Requests List"}
template_name = "core/borrow_request/active/list.html"
def get_queryset(self):
qs = super().get_queryset().filter(Q(status=0) | Q(status=4))
user = self.request.user
if user.is_superuser or user.is_college_dean:
return qs
return qs.filter(
Q(user__department=user.department)
| (~Q(user__department=user.department) & Q(product__department=user.department) & Q(status=4))
)
class ActiveBorrowRequestDetailView(PermissionRequiredMixin, DetailView):
model = BorrowRequest
context_object_name = "borrow_request"
template_name = "core/borrow_request/active/detail.html"
permission_required = "core.can_view_active_borrow_request"
extra_context = {"title": "Active Borrow Request "}
def get_queryset(self):
qs = super().get_queryset().filter(Q(status=0) | Q(status=4))
user = self.request.user
if user.is_superuser or user.is_college_dean:
return qs
return qs.filter(product__department=user.department)
class ApproveBorrowRequestView(PermissionRequiredMixin, SuccessMessageMixin, UpdateView):
model = BorrowRequest
fields = ("status",)
permission_required = "auser.can_approve_borrow_request"
success_message = "Borrow request has been approved successfully."
http_method_names = ["post"]
success_url = reverse_lazy("core:active_borrow_requests_list")
def get_queryset(self):
qs = super().get_queryset().filter(Q(status=0) | Q(status=4))
user = self.request.user
if user.is_superuser or user.is_college_dean:
return qs
return qs.filter(product__department=user.department)
def has_next_process(self):
if self.object.status == 4 or self.object.product.department == self.object.user.department:
return False
return True
def is_quantify_valid(self, quantity, availables):
if quantity > availables:
return False
return True
def form_valid(self, form):
self.object = form.save(commit=False)
if self.is_quantify_valid(self.object.quantity, self.object.product.availables):
response = super().form_valid(form)
self.object.product.availables -= self.object.quantity
self.object.product.save()
if self.has_next_process():
signals.borrow_request_proccessed.send(sender=self.model, instance=self.object)
else:
signals.borrow_request_approved.send(sender=self.model, instance=self.object)
return response
messages.error(
self.request,
"This request cann't be approved, since currently there is no enough quantity item available.",
)
return HttpResponseRedirect(reverse_lazy("core:active_borrow_requests_detail", args=[self.object.pk]))
def get_form_kwargs(self):
form_kwargs = super().get_form_kwargs()
form_data = form_kwargs.get("data", {}).copy()
new_status = 4 if self.has_next_process() else 1
form_data.update({"status": new_status})
form_kwargs.update({"data": form_data})
return form_kwargs
class DeclineBorrowRequestView(PermissionRequiredMixin, SuccessMessageMixin, UpdateView):
model = BorrowRequest
fields = ("status",)
permission_required = "auser.can_declined_borrow_request"
success_message = "Borrow request has been declined."
http_method_names = ["post"]
success_url = reverse_lazy("core:active_borrow_requests_list")
def get_queryset(self):
qs = super().get_queryset().filter(Q(status=0) | Q(status=4))
user = self.request.user
if user.is_superuser or user.is_college_dean:
return qs
return qs.filter(product__department=user.department)
def form_valid(self, form):
reason = self.request.POST.get("reason")
reason_form = ReasonForm(data={"description": reason})
if reason_form.is_valid():
response = super().form_valid(form)
reason_obj = reason_form.save(commit=False)
reason_obj.borrow_request = self.object
reason_obj.save()
signals.borrow_request_declined.send(sender=self.model, instance=self.object)
return response
messages.error(
self.request,
"You have to give a reason before declining a request. Please try again.",
)
return HttpResponseRedirect(reverse_lazy("core:active_borrow_requests_detail", args=[self.object.pk]))
def get_form_kwargs(self):
form_kwargs = super().get_form_kwargs()
form_data = form_kwargs.get("data", {}).copy()
form_data.update({"status": 2})
form_kwargs.update({"data": form_data})
return form_kwargs
class ListApprovedBorrowRequestView(PermissionRequiredMixin, ListView):
model = BorrowRequest
permission_required = "core.can_list_approved_borrow_request"
context_object_name = "borrow_requests"
extra_context = {"title": "Approved Borrow Requests List"}
template_name = "core/borrow_request/approved/list.html"
def get_queryset(self):
qs = super().get_queryset().filter(Q(status=1))
user = self.request.user
if user.is_superuser or user.is_college_dean:
return qs
return qs.filter(product__department=user.department)
class ApprovedBorrowRequestDetailView(PermissionRequiredMixin, DetailView):
model = BorrowRequest
context_object_name = "borrow_request"
template_name = "core/borrow_request/approved/detail.html"
permission_required = "core.can_view_approved_borrow_request"
extra_context = {"title": "Borrow Request "}
def get_queryset(self):
qs = super().get_queryset().filter(Q(status=1))
user = self.request.user
if user.is_superuser or user.is_college_dean:
return qs
return qs.filter(product__department=user.department)
class CompleteBorrowRequestView(PermissionRequiredMixin, SuccessMessageMixin, UpdateView):
model = BorrowRequest
fields = ("status",)
permission_required = "auser.can_complete_borrow_request"
success_message = "Borrow request has been completed successfully."
http_method_names = ["post"]
success_url = reverse_lazy("core:approved_borrow_requests_list")
def form_valid(self, form):
response = super().form_valid(form)
signals.borrow_request_completed.send(sender=self.model, instance=self.object)
return response
def get_queryset(self):
qs = super().get_queryset().filter(Q(status=1))
user = self.request.user
if user.is_superuser or user.is_college_dean:
return qs
return qs.filter(product__department=user.department)
def get_form_kwargs(self):
form_kwargs = super().get_form_kwargs()
form_data = form_kwargs.get("data", {}).copy()
form_data.update({"status": 6})
form_kwargs.update({"data": form_data})
return form_kwargs
class RevokeBorrowRequestView(PermissionRequiredMixin, SuccessMessageMixin, UpdateView):
model = BorrowRequest
fields = ("status",)
permission_required = "auser.can_revoke_borrow_request"
success_message = "Borrow request has been revoked."
http_method_names = ["post"]
success_url = reverse_lazy("core:approved_borrow_requests_list")
def get_queryset(self):
qs = super().get_queryset().filter(Q(status=1))
user = self.request.user
if user.is_superuser or user.is_college_dean:
return qs
return qs.filter(product__department=user.department)
def form_valid(self, form):
reason = self.request.POST.get("reason")
reason_form = ReasonForm(data={"description": reason})
if reason_form.is_valid():
response = super().form_valid(form)
reason_obj = reason_form.save(commit=False)
reason_obj.borrow_request = self.object
reason_obj.save()
signals.borrow_request_revoked.send(sender=self.model, instance=self.object)
return response
messages.error(
self.request,
"You have to give a reason before revoking a request. Please try again.",
)
return HttpResponseRedirect(reverse_lazy("core:approved_borrow_requests_detail", args=[self.object.pk]))
def get_form_kwargs(self):
form_kwargs = super().get_form_kwargs()
form_data = form_kwargs.get("data", {}).copy()
form_data.update({"status": 5})
form_kwargs.update({"data": form_data})
return form_kwargs
class ListCompletedBorrowRequestView(PermissionRequiredMixin, ListView):
model = BorrowRequest
permission_required = "core.can_list_completed_borrow_request"
context_object_name = "borrow_requests"
extra_context = {"title": "Completed Borrow Requests List"}
template_name = "core/borrow_request/completed/list.html"
def get_queryset(self):
qs = super().get_queryset().filter(Q(status=6))
user = self.request.user
if user.is_superuser or user.is_college_dean:
return qs
return qs.filter(product__department=user.department)
class CompletedBorrowRequestDetailView(PermissionRequiredMixin, DetailView):
model = BorrowRequest
context_object_name = "borrow_request"
template_name = "core/borrow_request/completed/detail.html"
permission_required = "core.can_view_completed_borrow_request"
extra_context = {"title": "Completed Borrow Request "}
def get_queryset(self):
qs = super().get_queryset().filter(Q(status=6))
user = self.request.user
if user.is_superuser or user.is_college_dean:
return qs
return qs.filter(product__department=user.department)
class ReturnedBorrowRequestView(PermissionRequiredMixin, SuccessMessageMixin, UpdateView):
model = BorrowRequest
fields = ("status",)
permission_required = "auser.can_return_borrow_request"
success_message = "Borrowed product has been returned successfully."
http_method_names = ["post"]
success_url = reverse_lazy("core:completed_borrow_requests_list")
def get_queryset(self):
qs = super().get_queryset().filter(Q(status=6))
user = self.request.user
if user.is_superuser or user.is_college_dean:
return qs
return qs.filter(product__department=user.department)
def get_form_kwargs(self):
form_kwargs = super().get_form_kwargs()
form_data = form_kwargs.get("data", {}).copy()
form_data.update({"status": 7})
form_kwargs.update({"data": form_data})
return form_kwargs
def form_valid(self, form):
response = super().form_valid(form)
self.object.product.availables += self.object.quantity
self.object.product.save()
signals.borrow_request_returned.send(sender=self.model, instance=self.object)
return response
class ListBorrowRequestHistoryView(PermissionRequiredMixin, SuccessMessageMixin, ListView):
model = BorrowRequest
permission_required = "core.can_list_borrow_request_history"
context_object_name = "borrow_requests"
extra_context = {"title": "Borrow Requests History List"}
template_name = "core/borrow_request/history_list.html"
def get_queryset(self):
current_user = self.request.user
return (
super()
.get_queryset()
.filter(user=current_user)
.values("product__name", "quantity", "start_date", "end_date", "status")
)
| rebunitech/astu-store | astu_inventory/apps/core/views/borrow_request.py | borrow_request.py | py | 15,294 | python | en | code | 0 | github-code | 13 |
16132269163 | import boto3
def fetch_secret_from_aws(secret_name):
try:
session = boto3.session.Session()
client = session.client(service_name="secretsmanager", region_name="us-east-1")
get_secret_value_response = client.get_secret_value(SecretId=secret_name)
return get_secret_value_response["SecretString"]
except Exception as e:
print(e)
return None
def create_secret_in_aws(secret_name, secret_value):
try:
session = boto3.session.Session()
client = session.client(service_name="secretsmanager", region_name="us-east-1")
client.create_secret(Name=secret_name, SecretString=secret_value)
return True
except Exception as e:
print(e)
return False
| udhayprakash/PythonMaterial | python3/18_aws_cloud/a_AWS_Lambdas/e_boto3_usage/e_secretsmanager.py | e_secretsmanager.py | py | 750 | python | en | code | 7 | github-code | 13 |
26005042600 | import bgheatmaps as bgh
"""
This example shows how to use visualize a heatmap in 2D
"""
values = dict( # scalar values for each region
TH=1,
RSP=0.2,
AI=0.4,
SS=-3,
MO=2.6,
PVZ=-4,
LZ=-3,
VIS=2,
AUD=0.3,
RHP=-0.2,
STR=0.5,
CB=0.5,
FRP=-1.7,
HIP=3,
PA=-4,
)
f = bgh.heatmap(
values,
position=5000, # when using a named orientation you can pass a single value!
orientation="frontal", # 'frontal' or 'sagittal', or 'horizontal' or a tuple (x,y,z)
title="horizontal view",
vmin=-5,
vmax=3,
format="2D",
).show()
| brainglobe/bg-heatmaps | examples/heatmap_2d.py | heatmap_2d.py | py | 608 | python | en | code | 20 | github-code | 13 |
22103762415 | # OSPF_Dijsktras: The program implement's an OSPF protocol using Dijkstra's algorithm to find the
# shortest path between vertices. It also calculates reachable vertices from any given vertex in the graph.
# Arguments: The program needs 1 argument, Name of the text file which has the initial condition of the graph.
# Author: Sumant Sanjay Tapas
# UNCC ID: 800905142
# Email ID: stapas@uncc.edu
import sys
from sys import argv
# Function Implements minHeapify. The input arguments are list A which needs to be heapified, index i and length of the
# list A.
def minHeapify(A, i, n):
l = left(i)
r = right(i)
if l <= n and A[l][0] < A[i][0]:
smallest = l
else:
smallest = i
if r <= n and A[r][0] < A[smallest][0]:
smallest = r
if smallest != i:
A[i], A[smallest] = A[smallest], A[i]
minHeapify(A, smallest, n)
# This function builds a min heap. Input list A, and size of list A.
def buildMinHeap(A, n):
for i in range(int(n/2)-1, -1, -1):
minHeapify(A, i, n-1)
# Inserts a new element in the priority Queue.
# Input Arguments: List A, New element key
def insert(A, ele):
A.insert(len(A), ele)
buildMinHeap(A, len(A))
# This function changes the priority of an element in a min heap.
# Input Arguments: List A, index of the element of which the priority needs to be changed i, New value key
def decreasePriority(A, i, key):
A[i] = key
while i > 0 and A[parent(i)][0] > A[i][0]:
A[i], A[parent(i)] = A[parent(i)], A[i]
i = parent(i)
# Extracts the min value from the priority queue.
# Input Arguments: List A
def extractMin(A):
minimum = A[0]
A[0] = A[len(A)-1]
del A[len(A)-1]
minHeapify(A, 0, (len(A) - 1))
return minimum[1]
def left(i):
return 2*i+1
def parent(i):
if i % 2 == 0 and i != 0:
return int((i/2)-1)
elif i % 2 != 0:
return int(i/2)
else:
return 0
def right(i):
return 2*i+2
# Holds all the properties of a vertex
class Vertex:
def __init__(self, name):
self.name = name
self.adj = []
self.dist = sys.maxsize
self.prev = None
self.status = 1
def reset(self):
self.dist = sys.maxsize
self.prev = None
# Holds all the properties of an edge
class Edge:
def __init__(self, destination, weight):
self.vertex = destination
self.weight = weight
self.status = 1
# Holds all the properties of a graph. Most complicated class in the program.
# Major functions are: shortestPath()- to find the shortest path between the two given vertices.
# reachable()- Prints all the vertices reachable from any vertex in the graph
# printGraph()- Prints every vertex in the graph with its corresponding edges and weights.
class Graph:
vertexMap = {} # dictionary which holds all the vertex objects.
# returns a vertex object using an input argument as name of the vertex.
# If the vertex is not present a vertex object is created
def getVertex(self, vertexName):
if vertexName not in self.vertexMap:
v = Vertex(vertexName)
self.vertexMap.update({vertexName: v})
return v
else:
return self.vertexMap[vertexName]
def getAddedVertex(self, vertexName):
if vertexName in self.vertexMap:
return self.vertexMap[vertexName]
else:
return -1
def addEdgeIni(self, source, destination, weight):
v = self.getVertex(source)
w = self.getVertex(destination)
edgeVtoW = Edge(w, float(weight))
edgeWtoV = Edge(v, float(weight))
v.adj.insert(len(v.adj), edgeVtoW)
w.adj.insert(len(w.adj), edgeWtoV)
# Adds an edge from source to destination with the weight as passed in the input arguments.
def addEdge(self, source, destination, weight):
v = self.getVertex(source)
w = self.getVertex(destination)
for ver in v.adj:
if ver.vertex == w:
ver.weight = float(weight)
ver.status = 1
return
edge = Edge(w, float(weight))
v.adj.insert(len(v.adj), edge)
# Deletes an edge from source to destination.
def deleteEdge(self, source, destination):
v = self.getAddedVertex(source)
w = self.getAddedVertex(destination)
if v != -1 and w != -1:
for ver in v.adj:
if ver.vertex == w:
v.adj.remove(ver)
# self.vertexMap.update({v.name: v})
return
else:
if v != -1:
print("Source Vertex not found")
elif w != -1:
print("Destination Vertex not found")
# Makes the edge from source to destination down.
def edgeDown(self, source, destination):
v = self.getAddedVertex(source)
w = self.getAddedVertex(destination)
if v != -1 and w != -1:
for edge in v.adj:
if edge.vertex == w:
edge.status = 0
return
else:
if v != -1:
print("Source Vertex not found")
elif w != -1:
print("Destination Vertex not found")
# Makes the edge from source to destination up.
def edgeUp(self, source, destination):
v = self.getAddedVertex(source)
w = self.getAddedVertex(destination)
if v != -1 and w != -1:
for edge in v.adj:
if edge.vertex == w:
edge.status = 1
return
else:
if v != -1:
print("Source Vertex not found")
elif w != -1:
print("Destination Vertex not found")
# Makes the vertex passed as input parameter 'vertex' down.
def vertexDown(self, vertex):
v = self.getAddedVertex(vertex)
if v != -1:
v.status = 0
else:
print("Vertex not found")
# Makes the vertex passed as input parameter 'vertex' down.
def vertexUp(self, vertex):
v = self.getAddedVertex(vertex)
if v != -1:
v.status = 1
else:
print("Vertex not found")
# Function prints the path to a destination passed as an input argument. This function is called when the shortest
# algorithm is completed.
def printPath(self, destination):
v = self.getVertex(destination)
if v.dist == sys.maxsize:
print("Destination is not reachable..!!")
else:
# print("Distance is:", v.dist)
self.printPathV(v)
print(" ", end="")
print("%.2f" % v.dist)
# Recursive helper function of printPath function.
def printPathV(self, vertex):
if vertex.prev != None:
self.printPathV(vertex.prev)
print(" ", end="")
print(vertex.name, end="")
def getWeight(self, vertexA, vertexB):
for element in vertexA.adj:
if element.vertex == vertexB:
return element.weight
# Prints the current position/condition of the graph
def printGraph(self):
ver = list(self.vertexMap.values())
listver = []
for item in ver:
listver.insert(len(listver), item.name) # check if only active vertices needs to printed or not
listver.sort()
for item in listver:
v = self.getAddedVertex(item)
if v.status == 0:
print(v.name, "DOWN")
else:
print(v.name)
listver1 = []
weightList = {}
for item1 in v.adj:
listver1.insert(len(listver1), (item1.vertex.name, item1))
weightList.update({item1.vertex.name: item1.weight})
# listver1.sort()
for item2 in listver1:
w = self.getAddedVertex(item2[0])
print(" ", w.name, end="")
print(" ", end="")
if item2[1].status == 0:
print(weightList[w.name], end="")
print(" DOWN")
else:
print(weightList[w.name])
def clearAll(self):
for vertex in self.vertexMap.values():
vertex.reset()
# Implements Dijkstras algorithm to calculate shortest path. Input argument is the source vertex's name.
def shortestPath(self, source):
self.clearAll()
v = self.getVertex(source) # get the vertex object of the source.
v.dist = 0 # set the distance of the vertex as 0.
q = []
for w in self.vertexMap:
q.insert(len(q), (self.vertexMap[w].dist, self.vertexMap[w]))
buildMinHeap(q, len(q)) # build a binary min heap for the priority queue
s = []
while q:
v = extractMin(q) # extract the minimum distance element from the priority queue
if v.status == 0: # if the vertex is not active, skip the vertex
continue
else:
s.insert(len(s), v) # marked the vertex as visited
for element in v.adj: # for each element in the adjacency list of vertex v,
if element.vertex not in s and element.status == 1 and element.vertex.status == 1: # if the edge and vertex is active
prevDistance = element.vertex.dist
if element.vertex.dist > v.dist + self.getWeight(v, element.vertex): # if the current distance is greater than the distance of weight and prev distance change the distance and prev vertex
element.vertex.dist = v.dist + self.getWeight(v, element.vertex)
element.vertex.prev = v
index = q.index((prevDistance, element.vertex))
decreasePriority(q, index, (element.vertex.dist, element.vertex)) # change the priority of the vertex as the distance is now changed.
# Prints all reachable vertices from all the vertices of the graph. It implements a DFS like algorithm for
# each vertex. The time complexity of the algorithm in O(V*(V+E)).
def reachable(self):
ver = list(self.vertexMap.values())
listver = []
for item in ver:
if item.status == 1:
listver.insert(len(listver), item.name)
listver.sort() # step is to only arrange the vertices in an alphabetical order.
listver2 = []
for item1 in listver:
w = self.getAddedVertex(item1)
listver2.insert(len(listver2), w.name)
print(w.name) # print the current vertex name
marked = []
validPath = self.printValidPath(w, listver2, marked) # recursively call this function until all the vertices are visited
listver2.clear()
validPath.sort() # sort to print in alphabetical order.
for name in validPath:
print(" ", name)
validPath.clear() # clear this list to make it ready for the next vertex
# Helper function for reachable
def printValidPath(self, vertex, visited, marked1):
for edge in vertex.adj:
if edge.status == 1:
v = edge.vertex
if v.name not in visited and v.status == 1:
visited.insert(len(visited), v.name)
marked1.append(v.name)
self.printValidPath(v, visited, marked1)
return marked1
# Main function starts from here.
# Program reads a file provided as an input argument and constructs a graph.
# Ask's user for the input query and perform subsequent actions on the graph.
g = Graph()
fileobj = open(argv[1], "r") # Opens the file which contains the initial graph.
for line in fileobj:
node = line.split()
if len(node) != 3:
print("Ill formatted Line ", end="")
print(node)
exit()
else:
g.addEdgeIni(node[0], node[1], float(node[2])) # Adds an edge from each vertex of the graph.
fileobj.close()
print("File read...")
print(len(g.vertexMap), "vertices present in the graph") # Prints the number of vertices in the graph.
while True:
inputstr = input("\nQuery: ") # Get the input query from the user.
query = inputstr.split()
if len(query) == 3:
if query[0] == "path":
if query[1] not in g.vertexMap.keys():
print("Source Vertex Not found")
else:
g.shortestPath(query[1])
if query[2] not in g.vertexMap.keys():
print("Destination Vertex Not found")
else:
g.printPath(query[2])
elif query[0] == "deleteedge":
g.deleteEdge(query[1], query[2])
elif query[0] == "edgedown":
g.edgeDown(query[1], query[2])
elif query[0] == "edgeup":
g.edgeUp(query[1], query[2])
else:
print("Invalid Command, Please try again")
elif len(query) == 2:
if query[0] == "vertexdown":
g.vertexDown(query[1])
elif query[0] == "vertexup":
g.vertexUp(query[1])
else:
print("Invalid Command, Please try again")
elif len(query) == 1:
if query[0] == "print":
g.printGraph()
elif query[0] == "quit":
break
elif query[0] == "reachable":
g.reachable()
else:
print("Invalid Command, Please try again")
elif len(query) == 4:
if query[0] == "addedge":
g.addEdge(query[1], query[2], query[3])
else:
print("Invalid Command, Please try again")
else:
print("Wrong Command. Please try again")
| sumanttapas/Shortest-Paths-in-a-Network | OSPF_Dijkstras.py | OSPF_Dijkstras.py | py | 14,243 | python | en | code | 0 | github-code | 13 |
33763823049 | import pystan
import pickle
import argparse
import os
import pandas as pd
import numpy as np
import json
with open('SETTINGS.json', 'r') as f:
SETTINGS = json.load(f)
def create_stan_model():
'''
Compile model for stan
'''
model_code = '''
/*
pairwise logistic regression model of winning the game
*/
data {
int<lower=0> N_teams;
int<lower=0> N; // number of games in regular season
int<lower=0> N_tourney; // number of games in tournament
int<lower=1, upper=N_teams> j_team[N + N_tourney]; // index for team 1
int<lower=1, upper=N_teams> k_team[N + N_tourney]; // index for team 2
// features
real x1[N + N_tourney]; // score_mean_team1 - score_opp_team2
real x2[N + N_tourney]; // score_opp_team1 - score_mean_team2
// response variables
int<lower=0, upper=1> team1win[N];
real y[N]; // scorediff
}
transformed data {
}
parameters {
real alpha[N_teams]; // team effect
real beta[N_teams]; // team scorediff effect
real<lower=0> sigma_y; // std for point difference
real<lower=0> sigma_alpha; // std for team levels
real<lower=0> sigma_beta; // std for scorediff effect
real<lower=0> nu; // degrees of freedom for T distr
real<lower=0, upper=20> mult; // effect multiplier between two models
}
transformed parameters {
real<lower=0, upper=1> pi[N_tourney];
vector[N_tourney] eta_tourney; // linear predictor
for(n in 1:N_tourney)
eta_tourney[n] = alpha[j_team[N+n]] - alpha[k_team[N+n]]
+ beta[j_team[N+n]] * x1[N+n]
+ beta[k_team[N+n]] * x2[N+n];
// probability that team1 wins
for(n in 1:N_tourney) {
pi[n] = inv_logit(eta_tourney[n]);
}
}
model {
vector[N] eta; // linear predictor
for(n in 1:N)
eta[n] = alpha[j_team[n]] - alpha[k_team[n]]
+ beta[j_team[n]] * x1[n]
+ beta[k_team[n]] * x2[n];
sigma_alpha ~ normal(0, 20);
sigma_beta ~ normal(0, 20);
alpha ~ normal(0, sigma_alpha); // team levels
beta ~ normal(0, sigma_beta); // team levels
nu ~ gamma(2, 0.1);
// likelihoods
team1win ~ bernoulli_logit(eta);
y ~ student_t(nu, mult * eta, sigma_y);
}
generated quantities {
}
'''
sm = pystan.StanModel(model_code=model_code)
return sm
def create_stan_data(train_data_file, test_data_file):
train = pd.read_csv(train_data_file)
test = pd.read_csv(test_data_file)
data = pd.concat([train, test]).reset_index()
# teams
teams = set(data['team1'].unique()).union(data['team2'].unique())
team_f2id = dict(enumerate(teams, 1)) # start from 1 for stan's one-based indexing
team_id2f = {v:k for k, v in team_f2id.items()}
# data dict for stan
stan_data = {
'N_teams': len(teams),
'N': (data.tourney == 0).sum(),
'N_tourney': (data.tourney == 1).sum(),
'j_team': data['team1'].map(team_id2f).values,
'k_team': data['team2'].map(team_id2f).values,
'x1': data['score_team_mean1'] - data['score_opp_mean2'],
'x2': data['score_opp_mean1'] - data['score_team_mean2'],
'team1win': data.loc[data.tourney == 0, 'team1win'].values,
'y': data.loc[data.tourney == 0, 'score1'] - data.loc[data.tourney == 0, 'score2'],
}
return stan_data, data
if __name__ == '__main__':
##################################################
# parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('--model_name', type=str, default='logistic')
parser.add_argument('--num_iter', type=int, default=500)
parser.add_argument('--num_chains', type=int, default=1)
args = parser.parse_args()
##################################################
# create model and fit directories
model_directory = os.path.join(SETTINGS['MODEL_CHECKPOINT_DIR'],
args.model_name)
os.makedirs(model_directory, exist_ok=True)
##################################################
# file names for serialized model and prediction
model_fname = os.path.join(model_directory, 'model.pkl')
fit_fname = os.path.join(model_directory, 'fit.pkl')
prediction_fname = os.path.join(model_directory, 'prediction.csv')
##################################################
# compile model
stan_data, data = create_stan_data(SETTINGS['TRAIN_DATA_CLEAN_PATH'],
SETTINGS['TEST_DATA_CLEAN_PATH'])
if os.path.exists(model_fname):
# load pickled model
with open(model_fname, 'rb') as f:
pickle_data = pickle.load(f)
sm = pickle_data['sm']
else:
# compile model - takes a minute
sm = create_stan_model()
with open(model_fname, "wb") as f:
pickle.dump({'sm': sm}, f, protocol=-1)
##################################################
# sample from posterior (fit model)
fit = sm.sampling(data=stan_data, iter=args.num_iter, chains=args.num_chains)
with open(fit_fname, "wb") as f:
pickle.dump({'sm': sm, 'fit': fit}, f, protocol=-1)
##################################################
# extract model parameters and predictions
la = fit.extract()
alpha = la['alpha']
pi = la['pi']
# prediction
y_pred = np.median(pi, axis=0)
ID = data.loc[data['tourney'] == 1, 'ID'].values
df_pred = pd.DataFrame({'ID':ID, 'Pred':y_pred})
##################################################
# save prediction csv
df_pred[['ID', 'Pred']].to_csv(prediction_fname, index=False)
| YouHoo0521/kaggle-madtown-machine-learning-madness-2019 | train.py | train.py | py | 5,746 | python | en | code | 4 | github-code | 13 |
34195737434 | import pygame
import math
from enum import Enum
from random import randint
from Sound import Sound, Sounds
from GameObject import GameObject
from DuckHuntSprites import DuckAnimationState
class Duck(GameObject):
#TODO: make it use sprites instead of image
def __init__(self, display, stoper, positionVector, spriteMap, level):
super().__init__(display, stoper, positionVector)
self.gameDisplay = display
self.duckState = DuckState.FLYING
self.spriteMap = spriteMap
self.currentImageSet = self.spriteMap[DuckAnimationState.HORIZONTAL]
self.duckAnimationState = DuckAnimationState.HORIZONTAL
# TODO: calculate it somehow
self.imageCenterY = 16
self.imageCenterX = 16
self.image = self.currentImageSet.getFrame()
#Animation Stuff
self.animationIntervalInMs = 100
self.lastAnimationUpdate = 0
# movement stuff
self.movementSpeed = 0.1 + (0.005 * (level - 1))
self.angle = 0
self.setRandomAngle()
self.lastDirectionChange = 0
self.flyingDirectionChangeThreshold = randint(1000, 5000)
#Quacking
self.quackingThreshold = randint(1000, 15000)
self.lastQuacked = 0
#State
self.lastStateChanged = 0
def render(self):
if (self.renderable):
horizontalFlip = self.directionVector.x < 0
verticalFlip = self.directionVector.y > 0 and (self.duckAnimationState == DuckAnimationState.UP)
self.gameDisplay.blit(pygame.transform.flip(self.image, horizontalFlip, verticalFlip), self.positionVector)
return None
def tick(self):
super().tick()
#TODO: create some "AI" logic
#TODO: duck can "bounce" of the vertical walls of the screen. I think they might also bounce of, of the other ducks
if self.duckState == DuckState.FLYING:
self.flying()
return None
if self.duckState == DuckState.DEAD:
self.dead()
return None
if self.duckState == DuckState.SHOT:
self.shot()
return None
if self.duckState == DuckState.ESCAPING:
self.escaping()
return None
if self.duckState == DuckState.ESCAPED:
self.escaped()
return None
if self.duckState == DuckState.FALLING:
self.falling()
return None
return None
def flying(self):
self.performTimeSynchronizedMove()
self.checkForCollisionWithWall()
# make duck flying pattern more random
if self.stoper.getCurrentTicks() - self.lastDirectionChange > self.flyingDirectionChangeThreshold:
self.setRandomAngle()
self.lastDirectionChange = self.stoper.getCurrentTicks()
self.flyingDirectionChangeThreshold = randint(1000, 5000)
if self.stoper.getCurrentTicks() - self.lastQuacked > self.quackingThreshold:
Sound.play(Sounds.Quack)
self.quackingThreshold = randint(1000, 8000)
self.lastQuacked = self.stoper.getCurrentTicks()
def dead(self):
return None
def shot(self):
if self.stoper.getCurrentTicks() - self.lastStateChanged > 500:
self.duckState = DuckState.FALLING
self.currentImageSet = self.spriteMap[DuckAnimationState.FALLING]
self.setDirectionFromAngle(90)
self.movementSpeed = 0.15
Sound.play(Sounds.Falling)
return None
def escaping(self):
self.performTimeSynchronizedMove()
w,h = self.gameDisplay.get_size()
if (self.positionVector.x - self.imageCenterX) < -self.imageCenterX*3 or (self.positionVector.x + self.imageCenterX) > w + (self.imageCenterX*3)\
or (self.positionVector.y - self.imageCenterY) < -self.imageCenterY*3 or (self.positionVector.y + self.imageCenterY) > h + (self.imageCenterY*3):
self.duckState = DuckState.ESCAPED
# TODO: this is hardcoded and looks like p of s
self.duckState = DuckState.ESCAPED
return None
def escaped(self):
# Terminal status nothing to see here - removes tries
return None
def falling(self):
self.performTimeSynchronizedMove()
self.movementSpeed = self.movementSpeed + (0.001 * self.stoper.getDetla())
w, h = pygame.display.get_surface().get_size()
if self.positionVector.y > h:
self.duckState = DuckState.DEAD
self.renderable = False
return None
def checkIfShot(self, x, y):
if (self.duckState != DuckState.FLYING):
return False
spriteRect = self.image.get_rect().move(self.positionVector)
if spriteRect.collidepoint(x, y):
self.duckState = DuckState.SHOT
self.lastStateChanged = self.stoper.getCurrentTicks()
self.currentImageSet = self.spriteMap[DuckAnimationState.HIT]
return True
return False
def performTimeSynchronizedMove(self):
self.positionVector.x += self.movementSpeed * self.stoper.getDetla() * self.directionVector.x
self.positionVector.y += self.movementSpeed * self.stoper.getDetla() * self.directionVector.y
return None
def checkForCollisionWithWall(self):
w,h = self.gameDisplay.get_size()
if (self.positionVector.x - self.imageCenterX) < 0 or (self.positionVector.x + self.imageCenterX) > w :
self.directionVector.x = -self.directionVector.x
# TODO: this is hardcoded and looks like p of s
if (self.positionVector.y - self.imageCenterY) < 0 or (self.positionVector.y + self.imageCenterY) > h *0.8:
self.directionVector.y = -self.directionVector.y
def setRandomAngle(self):
self.setDirectionFromAngle(randint(0, 360))
def setDirectionFromAngle(self, angle):
if self.duckState == DuckState.FLYING or self.duckState == DuckState.ESCAPING:
if 80 <= angle <= 100 or 260 <= angle <= 280:
self.currentImageSet = self.spriteMap[DuckAnimationState.UP]
self.duckAnimationState = DuckAnimationState.UP
if 0 <= angle <= 30 or 160 <= angle <= 210 or 330 <= angle <= 0:
self.currentImageSet = self.spriteMap[DuckAnimationState.HORIZONTAL]
self.duckAnimationState = DuckAnimationState.HORIZONTAL
if 30 <= angle <= 80 or 110 <= angle <= 150 or 210 <= angle <= 250 or 270 <= angle <= 330:
self.currentImageSet = self.spriteMap[DuckAnimationState.DIAGONAL]
self.duckAnimationState = DuckAnimationState.DIAGONAL
rads = math.radians(angle)
x = math.cos(rads)
y = math.sin(rads)
self.directionVector = pygame.Vector2(x, y)
return None
def flyAway(self):
self.duckState = DuckState.ESCAPING
self.movementSpeed = 0.5
self.setDirectionFromAngle(randint(250, 290))
class DuckState(Enum):
FLYING = 0
DEAD = 1
SHOT = 2
ESCAPING = 3
ESCAPED = 4
FALLING = 5
| UcMarlo/DoocHunt | Duck.py | Duck.py | py | 7,126 | python | en | code | 2 | github-code | 13 |
73556714256 |
class Viajero_Frecuente:
__numviajero=0
__dni= " "
__nombre= " "
__apellido=" "
__millasacum=0
def __init__(self, num_viajero: int=0, DNI="", nombre="", apellido= "", millas_acum:int=0 ):
self.__numviajero= num_viajero
self.__dni= DNI
self.__nombre= nombre
self.__apellido= apellido
self.__millasacum= millas_acum
def __str__(self):
return('{} {} {} {} {}'.format(self.__numviajero, self.__dni, self.__nombre, self.__apellido, self.__millasacum))
def getviajero(self):
return self.__numviajero
def cantidadTotaldeMillas(self): # retorna la cantidad de millas acumuladas.
return self.__millasacum
def acumularMillas(self, newmillas:int):
if newmillas != 0:
self.__millasacum += newmillas
print("el nuevo total de millas acumuladas es:{}".format(self.__millasacum))
else:
print("no se ha podido encontrado al viajero solicitado")
return self.__millasacum
def canjearmillas(self, cant):
if cant <= self.__millasacum:
self.__millasacum -= cant
print("El canje ha sido realizado con éxito, millas restantes{}".format(self.__millasacum))
else:
print("No es posible realizar el canje")
| Merypi/POO | Ejercicio 2/viajero.py | viajero.py | py | 1,364 | python | es | code | 0 | github-code | 13 |
73447779537 | from node.domain.config.models import TemplateKeyName
# Root templates - used by most of the read/write fields
sequence_root = TemplateKeyName('sequence_root')
shot_root = TemplateKeyName('shot_root')
step_root = TemplateKeyName('shot_task_root')
asset_root = TemplateKeyName('asset_root')
asset_step_root = TemplateKeyName('asset_task_root')
# Maya Shot templates - used by "3D - MtoA Shot" and "Matte - Clarisse Shot"
maya_shot_render_folder = TemplateKeyName(work='Shot_MayaRender_Work_Generic_Name',
publish='Shot_MayaRender_Publish_Generic_Name')
maya_shot_render_layer = TemplateKeyName(work='Shot_MayaRender_Work_Layer', publish='Shot_MayaRender_Publish_Layer')
maya_shot_render_aov = TemplateKeyName(work='Shot_MayaRender_Work_Aov', publish='Shot_MayaRender_Publish_Aov')
maya_shot_render_sequence = TemplateKeyName(work='Shot_MayaRender_Work_Sequence',
publish='Shot_MayaRender_Publish_Sequence')
# Maya Asset templates - used by "3D - MtoA Asset" read fields
maya_asset_render_folder = TemplateKeyName(work='Asset_MayaRender_Work_Generic_Name',
publish='Asset_MayaRender_Publish_Generic_Name')
maya_asset_render_layer = TemplateKeyName(work='Asset_MayaRender_Work_Layer', publish='Asset_MayaRender_Publish_Layer')
maya_asset_render_aov = TemplateKeyName(work='Asset_MayaRender_Work_Aov', publish='Asset_MayaRender_Publish_Aov')
maya_asset_render_sequence = TemplateKeyName(work='Asset_MayaRender_Work_Sequence',
publish='Asset_MayaRender_Publish_Sequence')
# Houdini Shot templates - used by "3D - HtoA Shot" and "Matte - Clarisse Shot" read fields
htoa_shot_render_folder = TemplateKeyName(work='Shot_HoudiniRender_Work_Generic_Name',
publish='Shot_HoudiniRender_Publish_Generic_Name')
htoa_asset_render_folder = TemplateKeyName(work='Asset_HoudiniRender_Work_Generic_Name',
publish='Asset_HoudiniRender_Publish_Generic_Name')
htoa_shot_render_aov = TemplateKeyName(work='Shot_HoudiniRender_Work_Aov', publish='Shot_HoudiniRender_Publish_Aov')
htoa_asset_render_aov = TemplateKeyName(work='Asset_HoudiniRender_Work_Aov', publish='Asset_HoudiniRender_Publish_Aov')
htoa_shot_render_sequence = TemplateKeyName(work='Shot_HoudiniRender_Work_Sequence',
publish='Shot_HoudiniRender_Publish_Sequence')
htoa_asset_render_sequence = TemplateKeyName(work='Asset_HoudiniRender_Work_Sequence',
publish='Asset_HoudiniRender_Publish_Sequence')
# Clarisse Shot templates - unused by fields
clarisse_shot_render_folder = TemplateKeyName(work='Shot_ClarisseRender_Work_Generic_Name',
publish='Shot_ClarisseRender_Publish_Generic_Name')
clarisse_shot_render_image = TemplateKeyName(work='Shot_ClarisseRender_Work_Image',
publish='Shot_ClarisseRender_Publish_Image')
clarisse_shot_render_layer = TemplateKeyName(work='Shot_ClarisseRender_Work_Layer',
publish='Shot_ClarisseRender_Publish_Layer')
clarisse_shot_render_sequence = TemplateKeyName(work='Shot_ClarisseRender_Work_Sequence',
publish='Shot_ClarisseRender_Publish_Sequence')
# Maya Playblast templates - used by "3D - Maya Blast Shot" read fields
mayablast_shot_render_folder = TemplateKeyName(work='Shot_MayaBlast_Work_Generic_Name',
publish='Shot_MayaBlast_Publish_Generic_Name')
mayablast_shot_camera = TemplateKeyName(work='Shot_MayaBlast_Work_Camera', publish='Shot_MayaBlast_Publish_Camera')
mayablast_shot_render_sequence = TemplateKeyName(work='Shot_MayaBlast_Work_Sequence',
publish='Shot_MayaBlast_Publish_Sequence')
# Hiero templates - used by "Footage - Plate" read fields
footage_root = TemplateKeyName('Hiero_Footage_Root')
footage_render_sequence = TemplateKeyName('Hiero_Footage_Sequence')
# Nuke templates - used by various Nuke read/write fields
nuke_shot_render_folder = TemplateKeyName(work='Shot_NukeRender_Work_Generic_Name',
publish='Shot_NukeRender_Publish_Generic_Name')
nuke_shot_render_sequence = TemplateKeyName(work='Shot_NukeRender_Work_Sequence',
publish='Shot_NukeRender_Publish_Sequence')
nuke_asset_render_folder = TemplateKeyName(work='Asset_NukeRender_Work_Generic_Name',
publish='Asset_NukeRender_Publish_Generic_Name')
nuke_asset_render_sequence = TemplateKeyName(work='Asset_NukeRender_Work_Sequence',
publish='Asset_NukeRender_Publish_Sequence')
nuke_shot_element_render_root = TemplateKeyName('Shot_Element_NukeRender_Root')
nuke_asset_element_render_root = TemplateKeyName('Asset_Element_NukeRender_Root')
nuke_shot_element_render_folder = TemplateKeyName('Shot_Element_NukeRender_Generic_Name')
nuke_shot_element_render_sequence = TemplateKeyName('Shot_Element_NukeRender_Sequence')
nuke_asset_element_render_folder = TemplateKeyName('Asset_Element_NukeRender_Generic_Name')
nuke_asset_element_render_sequence = TemplateKeyName('Asset_Element_NukeRender_Sequence')
# Nuke Scene templates - unused (except part of them as constant string)
nuke_shot_scene = TemplateKeyName(work='Shot_NukeScene_Work', publish='Shot_NukeScene_Publish')
nuke_asset_scene = TemplateKeyName(work='Asset_NukeScene_Work', publish='Asset_NukeScene_PublishArea')
nuke_shot_proxy = TemplateKeyName('Shot_NukeProxy_Work_Sequence')
nuke_asset_proxy = TemplateKeyName('Asset_NukeProxy_Work_Sequence')
# Flame templates - used by "Finish - Flame Shot" read fields
flame_shot_render_folder = TemplateKeyName('Shot_FlameRender_Work_Generic_Name')
flame_shot_render_sequence = TemplateKeyName('Shot_FlameRender_Work_Sequence')
# Photoshop Shot templates - used by "2D - Photoshop Shot" read fields
photoshop_shot_render_sequence = TemplateKeyName(work='Shot_PhotoshopRender_Work_Image',
publish='Shot_PhotoshopRender_Publish_Image')
# Photoshop Asset templates - used by "2D - Photoshop Asset" read fields
photoshop_asset_render_sequence = TemplateKeyName(work='Asset_PhotoshopRender_Work_Image',
publish='Asset_PhotoshopRender_Publish_Image')
# Harmony Shot templates - used by "2D - Harmony Shot" read fields
harmony_shot_render_sequence = TemplateKeyName(work='Shot_HarmonyRender_Work_Image',
publish='Shot_HarmonyRender_Publish_Image')
# Harmony Asset templates - used by "2D - Harmony Asset" read fields
harmony_asset_render_sequence = TemplateKeyName(work='Asset_HarmonyRender_Work_Image',
publish='Asset_HarmonyRender_Publish_Image')
| Vincannes/vfxWrite | node/domain/config/fields.py | fields.py | py | 7,123 | python | en | code | 0 | github-code | 13 |
15186198662 | import torch
import torch.nn.functional as F
class FeatureMatchLoss(torch.nn.Module):
"""Feature matching loss module."""
def __init__(
self,
average_by_layers=True,
average_by_discriminators=True,
include_final_outputs=False,
):
"""Initialize FeatureMatchLoss module."""
super().__init__()
self.average_by_layers = average_by_layers
self.average_by_discriminators = average_by_discriminators
self.include_final_outputs = include_final_outputs
def forward(self, feats_hat, feats):
"""Calcualate feature matching loss.
Args:
feats_hat (list): List of list of discriminator outputs
calcuated from generater outputs.
feats (list): List of list of discriminator outputs
calcuated from groundtruth.
Returns:
Tensor: Feature matching loss value.
"""
feat_match_loss = 0.0
for i, (feats_hat_, feats_) in enumerate(zip(feats_hat, feats)):
feat_match_loss_ = 0.0
if not self.include_final_outputs:
feats_hat_ = feats_hat_[:-1]
feats_ = feats_[:-1]
for j, (feat_hat_, feat_) in enumerate(zip(feats_hat_, feats_)):
feat_match_loss_ += F.l1_loss(feat_hat_, feat_.detach())
if self.average_by_layers:
feat_match_loss_ /= j + 1
feat_match_loss += feat_match_loss_
if self.average_by_discriminators:
feat_match_loss /= i + 1
return feat_match_loss
| kan-bayashi/ParallelWaveGAN | parallel_wavegan/losses/feat_match_loss.py | feat_match_loss.py | py | 1,597 | python | en | code | 1,427 | github-code | 13 |
2866587768 | from evoflow.engine import OP
from evoflow import backend as B
class Shuffle(OP):
O_AUTOGRAPH = True
O_XLA = True
def __init__(self, population_fraction, **kwargs):
"""Shuffle genes within the chromsome.
Args:
population_fraction (float): How many chromosomes
should have a cross-over.
debug (bool, optional): print debug information and function
returns additional data.
Returns:
tensor: population with shuffled genes.
See:
https://en.wikipedia.org/wiki/Crossover_(genetic_algorithm)
"""
if not (0 < population_fraction <= 1.0):
raise ValueError("num_crossover_fraction must be in [0, 1]")
self.population_fraction = population_fraction
super(Shuffle, self).__init__(**kwargs)
def call(self, population):
# shuffle to make sure don't hit the same everytime
if not self.debug:
population = B.shuffle(population)
# how many chromosomes to shuffle
num_shuffle = int(population.shape[0] * self.population_fraction)
shuffled_population = population[:num_shuffle]
shuffled_population = B.full_shuffle(shuffled_population)
self.print_debug("shuffled population", shuffled_population.shape)
# recompose with non shuffled population
shuffled_population = B.concatenate(
[shuffled_population, population[num_shuffle:]])
return shuffled_population
if __name__ == '__main__':
from copy import copy
from evoflow.utils import op_optimization_benchmark
NUM_RUNS = 10
pop_shape = (1000, 1000, 100)
population = B.randint(0, 256, pop_shape)
population_fraction = 0.5
OP = Shuffle(population_fraction)
op_optimization_benchmark(population, OP, NUM_RUNS).report()
quit()
GENOME_SHAPE = (6, 4, 4)
population = B.randint(0, 256, GENOME_SHAPE)
population_fraction = 0.5
max_crossover_size_fraction = (0.5, 0.5)
print(population.shape)
original_population = copy(population)
population = Shuffle(population_fraction, debug=True)(population)
# diff matrix
diff = B.clip(abs(population - original_population), 0, 1)
print(diff)
| google-research/evoflow | evoflow/ops/shuffle.py | shuffle.py | py | 2,270 | python | en | code | 31 | github-code | 13 |
7600495606 | #!/usr/bin/env python3
import sys, getopt, fileinput
def main(argv):
help_string = 'USAGE: update_pip_version.py -v <versionstring>'
new_version = ''
try:
opts, args = getopt.getopt(argv, "hv:", ["version ="])
except:
print(help_string)
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print(help_string)
sys.exit()
elif opt in ("-v", "--version"):
new_version = arg
if new_version == "":
print(help_string)
sys.exit(2)
print("NEW VERSION: " + new_version)
# Write the new version to the file
with fileinput.FileInput("./setup.py", inplace=True, backup='.bak') as file:
for line in file:
if "version =" in line:
line = "\tversion = \"" + new_version + "\","
print(line.rstrip())
if __name__ == "__main__":
main(sys.argv[1:])
| mhulden/pyfoma | .github/workflows/update_pip_version.py | update_pip_version.py | py | 779 | python | en | code | 25 | github-code | 13 |
36021849318 | from Zadanie import bubble_sort
import time
import random
def faster_bubble_sort(x):
for i in range(len(x)):
any_change = False
for j in range(0, len(x) - 1):
if x[j] > x[j + 1]:
any_change = True
temp_b = x[j]
x[j] = x[j + 1]
x[j + 1] = temp_b
if any_change is False:
return x
return x
def slower_bubble_sort(x):
for i in range(len(x)):
j = 0
for g in range(0, len(x) - 1):
if x[j] > x[j + 1]:
temp_b = x[j]
x[j] = x[j + 1]
x[j + 1] = temp_b
j += 1
return x
test = []
for x in range(500):
test.append(random.randint(-10000, 10000))
normal_bubble = test.copy()
extended_bubble = test.copy()
slower_bubble = test.copy()
n_start = time.time()
bubble_sort(normal_bubble)
n_end = time.time()
f_start = time.time()
faster_bubble_sort(extended_bubble)
f_end = time.time()
s_start = time.time()
slower_bubble_sort(slower_bubble)
s_end = time.time()
print(f"Normal bubble sort's time equals {n_end - n_start} seconds.\n"
f"Faster bubble sort's time equals {f_end - f_start} seconds.\n"
f"Slower bubble sort's time equals {s_end - s_start} seconds for this dataset,"
f" so the second is the most effective.")
| MatPatCarry/Algorithms_univerity_classes | WDA_List_2/Extended bubble sorts.py | Extended bubble sorts.py | py | 1,416 | python | en | code | 0 | github-code | 13 |
5014100850 | from django.contrib.auth.models import User
from django.http import HttpRequest, HttpResponse
from django.views.decorators.csrf import csrf_exempt
from feedbacks.models import Feedback
# Create your views here.
@csrf_exempt
def post_detail(request: HttpRequest) -> HttpResponse:
if request.method == 'POST':
import json
post_data: dict = json.loads(request.body.decode())
issues = post_data['issues']
username = User.objects.get(username=post_data['username'])
description = post_data['description']
message = Feedback.objects.create(username=username, issues=issues, description=description)
data = {
'post_id': message.id,
'issues': issues,
}
return HttpResponse(json.dumps(data), headers={
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Credentials": True,
})
if request.method == 'GET':
import json
messages = Feedback.objects.all()
result: dict = {}
for message in messages:
key = str(message.username)
if key in result:
result[key] += message.comment
else:
result[key] = [message.comment]
return HttpResponse(json.dumps(result, default=str), headers={
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Credentials": True,
})
| AbhiShake1/music-app-backend | feedbacks/views.py | views.py | py | 1,429 | python | en | code | 1 | github-code | 13 |
72097642577 | from pyradioconfig.parts.ocelot.calculators.calc_fec import CALC_FEC_Ocelot
class Calc_FEC_Sol(CALC_FEC_Ocelot):
def calc_postamble_regs(self, model):
demod_select = model.vars.demod_select.value
fcs_type_802154 = model.vars.fcs_type_802154.value
if demod_select == model.vars.demod_select.var_enum.SOFT_DEMOD:
trailtxdata = 0
trailtxdataforce = 1
postambleen = 0
trailtxdatacnt = 7
if fcs_type_802154 == model.vars.fcs_type_802154.var_enum.TWO_BYTE:
fcs_len_byte = 2
else:
fcs_len_byte = 4
trailtxreplen = fcs_len_byte - 1
self._reg_write(model.vars.FRC_TRAILTXDATACTRL_TRAILTXDATA, trailtxdata)
self._reg_write(model.vars.FRC_TRAILTXDATACTRL_TRAILTXDATAFORCE, trailtxdataforce)
self._reg_write(model.vars.FRC_TRAILTXDATACTRL_POSTAMBLEEN, postambleen)
self._reg_write(model.vars.FRC_TRAILTXDATACTRL_TRAILTXDATACNT, trailtxdatacnt)
self._reg_write(model.vars.FRC_TRAILTXDATACTRL_TRAILTXREPLEN, trailtxreplen)
else:
super(Calc_FEC_Sol, self).calc_postamble_regs(model)
| jc-plhm/Z3GatewayHost_Sectronic | src/platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/sol/calculators/calc_fec.py | calc_fec.py | py | 1,199 | python | en | code | 1 | github-code | 13 |
11080909152 | import time
def time_checker(func):
def wrapper(**kwargs):
t0 = time.time()
func(**kwargs)
t1 = time.time()
print("Time elapsed:", t1-t0, "seconds")
return wrapper
@time_checker
def useful_function(number):
# number = kwargs['number']
counter = 0
for i in range(number+1):
for k in range(i+1, number+1):
counter += i * k
counter = 1000000000
print("Very useful function finished its very useful work! Bravo!")
useful_function(number=int(input()))
| osakhsa/decorators | 2.py | 2.py | py | 534 | python | en | code | 0 | github-code | 13 |
22869018992 | import pytest
from aiogram import Bot
from tests.factories.chat import ChatFactory
from tests.mocked_bot import MockedBot
@pytest.fixture()
def bot():
bot = MockedBot()
token = Bot.set_current(bot)
yield bot
Bot.reset_current(token)
bot.me.invalidate(bot)
@pytest.fixture()
def private_chat():
return ChatFactory()
| Abdo-Asil/abogram | tests/conftest.py | conftest.py | py | 344 | python | en | code | 0 | github-code | 13 |
19776156418 | #! /c/Users/HP/AppData/Local/Programs/Python/Python310/python
import re
import sys
from pathlib import Path
filePath = sys.argv[1]
myDictionaryList = Path('./dictionary.txt').read_text().lower().split("\n")
def getInputWordList(filePath):
# Get one long string of all words in the file with all values lowercased
theString = Path(filePath).read_text().replace("\n", " ").lower()
# Perform some sanity checks. Here, I did the following;
# 1. Remove all non-alphanumeric characters.
# 2. Accept words such as "ice-cream" (words that have a hyphen) between two words
# Note that we are not accepting words with a hyphen at the start or end
sanitizedWordList = re.sub(r'[^(a-z0-9)-?(a-z0-9)$]', ' ', theString)
print("Your input contained the following words: ")
print(sanitizedWordList.split())
print("")
return sanitizedWordList.split(" ")
def wordExists(testWord, dictionaryWordList=myDictionaryList):
searchRange = {"start": 0, "end": len(dictionaryWordList)-1}
while searchRange["start"] <= searchRange["end"]:
midPoint = int((searchRange["end"] + searchRange["start"])/2)
midWord = dictionaryWordList[midPoint]
if midWord == testWord:
return True
elif midWord < testWord:
searchRange["start"] = midPoint + 1
elif midWord > testWord:
searchRange["end"] = midPoint -1
return False
def CheckGrammarUsingBinarySearch(testWordList):
print("The following words were not found in the dictionary;")
index = 1
for word in testWordList:
if not wordExists(word):
print(index, ". ", word)
index += 1
else:
continue
CheckGrammarUsingBinarySearch(getInputWordList(filePath)) | VinceXIV/binary-search | grammar-checker.py | grammar-checker.py | py | 1,786 | python | en | code | 0 | github-code | 13 |
69976113298 | import numpy as np
import matplotlib.pylab as plt
import sklearn.datasets as skdata
import sklearn
numeros = skdata.load_digits()
target = numeros['target']
imagenes = numeros['images']
n_imagenes = len(target)
data = imagenes.reshape((n_imagenes, -1))
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
scaler = StandardScaler()
x_train, x_test, y_train, y_test = train_test_split(data, target, train_size=0.7)
def distancia(x_train,y_train):
numero = 1
dd = y_train==numero
cov = np.cov(x_train[dd].T)
valores, vectores = np.linalg.eig(cov)
valores = np.real(valores)
vectores = np.real(vectores)
ii = np.argsort(-valores)
valores = valores[ii]
vectores = vectores[:,ii]
#Predicción de la mínima distancia
vect = vectores[:,6]
x_train1=x_train[dd]
dist=[]
for i in x_train1:
dist.append(np.linalg.norm(np.dot(vect,i)))
dist=np.array(dist)
dist=np.mean(dist)
return [vect,dist]
def predict(imagen,x_train,y_train):
d=distancia(x_train,y_train)[1]
vect=distancia(x_train,y_train)[0]
uno=0
d1=np.linalg.norm(np.dot(vect,imagen))
if(d1<d):
uno=1
return uno
y_train1=y_train
index=np.where(y_train1!=1)
y_train1[index]=0
y_test1=y_test
index=np.where(y_test1!=1)
y_test1[index]=0
predict1=[]
for i in x_test:
predict1.append(predict(i,x_train,y_train1))
predict1=np.array(predict1)
predict2=[]
for i in x_train:
predict2.append(predict(i,x_train,y_train1))
predict2=np.array(predict2)
F1=sklearn.metrics.f1_score(y_test1, predict1)
F12=sklearn.metrics.f1_score(y_train1, predict2)
plt.figure()
plt.subplot(121)
plt.imshow(sklearn.metrics.confusion_matrix(y_test1, predict1))
plt.title("F1 para Test = {:.2f}".format(F1))
plt.subplot(122)
plt.imshow(sklearn.metrics.confusion_matrix(y_train1, predict2))
plt.title("F1 para Train = {:.2f}".format(F12))
plt.savefig('matriz_de confusión.png') | CharlesCo12/CordobaCarlos_Ejercicio10 | predice_uno.py | predice_uno.py | py | 1,976 | python | en | code | 0 | github-code | 13 |
22912263328 | #Python libraries for math and graphics
import numpy as np
import matplotlib.pyplot as plt
import cvxpy as cp
import sys, os #for path to external scripts
script_dir = os.path.dirname(__file__)
lib_relative = '../../../CoordGeo'
fig_relative = '../figs/fig1.pdf'
sys.path.insert(0,os.path.join(script_dir, lib_relative))
#if using termux
import subprocess
import shlex
#end if
#Declaring variables
l = cp.Variable(pos="true", name="l")
b = cp.Variable(pos="true", name="b")
h = cp.Variable(pos="true", name="h")
#GP 1: Use 3 variables, approximate equality constraints
#Objective function
V =l*b*h
#End point for iteration
delta = 0.001
error = 0.005
max_iter = 100
#Initial guess
h.value = 1; l.value = 8 - 2*h.value; b.value = 3 - 2*h.value
hk = h.value - 10*delta; lk = 8 - 2*hk; bk = 3 - 2*hk
num_iter = 0
while(np.abs(h.value - hk) > delta and num_iter < max_iter):
hk = h.value; lk = l.value; bk = b.value
f_val = lk + 2*hk
ceq1 = f_val * (l/lk) ** (lk/f_val) * (h/hk) ** (2*hk/f_val)
f_val = bk + 2*hk
ceq2 = f_val * (b/bk) ** (bk/f_val) * (h/hk) ** (2*hk/f_val)
constraints = [ceq1 == 8, ceq2 == 3, h >= hk/(1+error) , h <= min([(1+error)*hk, 1.5])]
prob = cp.Problem(cp.Maximize(V), constraints)
if(prob.is_dgp() == False):
print("Not DGP")
break
prob.solve(gp = True)
num_iter += 1
print("Number iterations:", num_iter)
print("Max volume:", prob.value)
print("Square side:", h.value)
#GP 2: Use 1 variable, approximate objective function
x = cp.Variable(pos="true", name="x")
#End point for iteration
delta = 0.001
error = 0.005
max_iter = 100
#Initial guess
x.value = 1
xk = x.value - 10*delta
num_iter = 0
while(np.abs(x.value - xk) > delta and num_iter < 100):
xk = x.value
f_val = 4*xk**3 - 22*xk**2 + 24*xk
V = f_val * (x/xk) ** ((xk/f_val)*(12*xk**2 - 44*xk + 24))
constraints = [x >= xk/(1+error) , x <= min([(1+error)*xk, 1.5])]
prob = cp.Problem(cp.Maximize(V), constraints)
if(prob.is_dgp() == False):
print("Not DGP")
break
prob.solve(gp = True)
num_iter += 1
print("Number iterations:", num_iter)
print("Max volume:", prob.value)
print("Square side:", x.value)
#Gradient ascent
#Defining f(x)
def f(x,a,b,c,d):
return a * x**3 + b * x**2 + c * x + d
a = 4
b = -22
c = 24
d = 0
label_str = "$4x^3 - 22x^2 + 24x$"
#For maxima using gradient ascent
cur_x = 0.5
alpha = 0.001
precision = 0.00000001
previous_step_size = 1
max_iters = 100000000
iters = 0
#Defining derivative of f(x)
df = lambda x: 3*a*x**2 + 2*b*x + c
#Gradient ascent calculation
while (previous_step_size > precision) & (iters < max_iters) :
prev_x = cur_x
cur_x += alpha * df(prev_x)
previous_step_size = abs(cur_x - prev_x)
iters+=1
max_val = f(cur_x,a,b,c,d)
print("Maximum value of f(x) is ", max_val, "at","x =",cur_x)
#Plotting f(x)
x=np.linspace(-1,5,100)
y=f(x,a,b,c,d)
plt.plot(x,y,label=label_str)
#Labelling points
plt.plot(cur_x,max_val,'o')
plt.text(cur_x, max_val,f'P({cur_x:.4f},{max_val:.4f})')
plt.xlabel("x-axis")
plt.ylabel("y-axis")
plt.grid()
plt.legend()
#if using termux
plt.savefig(os.path.join(script_dir, fig_relative))
#subprocess.run(shlex.split("termux-open "+os.path.join(script_dir, fig_relative)))
#else
#plt.show()
| Muhammed-Hamdan/iith-fwc-2022-23 | optimization/advanced_assignment/codes/main.py | main.py | py | 3,361 | python | en | code | 3 | github-code | 13 |
5480576547 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import sys
from sql_modules_utils import *
from typing import Union, List
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Stores documentation for CREATE {TABLE|VIEW} with comment split into
# segments.
class TableViewDocs:
def __init__(self, name: str, desc: List[str], columns: List[str], path: str):
self.name = name
self.desc = desc
self.columns = columns
self.path = path
# Contructs new TableViewDocs from whole comment, by splitting it on typed
# lines. Returns None for improperly structured schemas.
@staticmethod
def create_from_comment(path: str, comment: List[str], module: str,
matches: tuple) -> tuple["TableViewDocs", List[str]]:
obj_type, name = matches[:2]
# Ignore internal tables and views.
if re.match(r"^internal_.*", name):
return None, []
errors = validate_name(name, module)
col_start = None
# Splits code into segments by finding beginning of column segment.
for i, line in enumerate(comment):
# Ignore only '--' line.
if line == "--":
continue
m = re.match(typed_comment_pattern(), line)
# Ignore untyped lines
if not m:
continue
line_type = m.group(1)
if line_type == "column" and not col_start:
col_start = i
continue
if not col_start:
errors.append(f"No columns for {obj_type}.\n'{name}' in {path}'\n")
return None, errors
return (
TableViewDocs(name, comment[:col_start], comment[col_start:], path),
errors,
)
def check_comment(self) -> List[str]:
errors = validate_desc(self)
errors += validate_columns(self)
return errors
# Stores documentation for CREATE_FUNCTION with comment split into segments.
class FunctionDocs:
def __init__(
self,
path: str,
data_from_sql: dict,
module: str,
name: str,
desc: str,
args: List[str],
ret: List[str],
):
self.path = path
self.data_from_sql = data_from_sql
self.module = module
self.name = name
self.desc = desc
self.args = args
self.ret = ret
# Contructs new FunctionDocs from whole comment, by splitting it on typed
# lines. Returns None for improperly structured schemas.
@staticmethod
def create_from_comment(path: str, comment: List[str], module: str,
matches: tuple) -> tuple["FunctionDocs", List[str]]:
name, args, ret, sql = matches
# Ignore internal functions.
if re.match(r"^INTERNAL_.*", name):
return None, []
errors = validate_name(name, module, upper=True)
start_args, start_ret = None, None
# Splits code into segments by finding beginning of args and ret segments.
for i, line in enumerate(comment):
# Ignore only '--' line.
if line == "--":
continue
m = re.match(typed_comment_pattern(), line)
# Ignore untyped lines
if not m:
continue
line_type = m.group(1)
if line_type == "arg" and not start_args:
start_args = i
continue
if line_type == "ret" and not start_ret:
start_ret = i
continue
if not start_ret or not start_args:
errors.append(f"Function requires 'arg' and 'ret' comments.\n"
f"'{name}' in {path}")
return None, errors
args_dict, parse_errors = parse_args(args)
data_from_sql = {'name': name, 'args': args_dict, 'ret': ret, 'sql': sql}
return (
FunctionDocs(
path,
data_from_sql,
module,
name,
comment[:start_args],
comment[start_args:start_ret],
comment[start_ret:],
),
errors + parse_errors,
)
def check_comment(self) -> List[str]:
errors = validate_desc(self)
errors += validate_args(self)
errors += validate_ret(self)
return errors
# Stores documentation for CREATE_VIEW_FUNCTION with comment split into
# segments.
class ViewFunctionDocs:
def __init__(
self,
path: str,
data_from_sql: str,
module: str,
name: str,
desc: List[str],
args: List[str],
columns: List[str],
):
self.path = path
self.data_from_sql = data_from_sql
self.module = module
self.name = name
self.desc = desc
self.args = args
self.columns = columns
# Contructs new ViewFunctionDocs from whole comment, by splitting it on typed
# lines. Returns None for improperly structured schemas.
@staticmethod
def create_from_comment(path: str, comment: List[str], module: str,
matches: tuple[str]
) -> tuple["ViewFunctionDocs", List[str]]:
name, args, columns, sql = matches
# Ignore internal functions.
if re.match(r"^INTERNAL_.*", name):
return None, []
errors = validate_name(name, module, upper=True)
start_args, start_cols = None, None
# Splits code into segments by finding beginning of args and cols segments.
for i, line in enumerate(comment):
# Ignore only '--' line.
if line == "--":
continue
m = re.match(typed_comment_pattern(), line)
# Ignore untyped lines
if not m:
continue
line_type = m.group(1)
if line_type == "arg" and not start_args:
start_args = i
continue
if line_type == "column" and not start_cols:
start_cols = i
continue
if not start_cols or not start_args:
errors.append(f"Function requires 'arg' and 'column' comments.\n"
f"'{name}' in {path}")
return None, errors
args_dict, parse_errors = parse_args(args)
errors += parse_errors
cols_dict, parse_errors = parse_args(columns)
errors += parse_errors
data_from_sql = dict(name=name, args=args_dict, columns=cols_dict, sql=sql)
return (
ViewFunctionDocs(
path,
data_from_sql,
module,
name,
comment[:start_args],
comment[start_args:start_cols],
comment[start_cols:],
),
errors,
)
def check_comment(self) -> List[str]:
errors = validate_desc(self)
errors += validate_args(self)
errors += validate_columns(self, use_data_from_sql=True)
return errors
# Whether the name starts with module_name.
def validate_name(name: str, module: str, upper: bool = False) -> List[str]:
module_pattern = f"^{module}_.*"
if upper:
module_pattern = module_pattern.upper()
starts_with_module_name = re.match(module_pattern, name)
if module == "common":
if starts_with_module_name:
return [(f"Invalid name in module {name}. "
f"In module 'common' the name shouldn't "
f"start with '{module_pattern}'.\n")]
else:
if not starts_with_module_name:
return [(f"Invalid name in module {name}. "
f"Name has to begin with '{module_pattern}'.\n")]
return []
# Whether the only typed comment in provided comment segment is of type
# `comment_type`.
def validate_typed_comment(
comment_segment: str,
comment_type: str,
docs: Union["TableViewDocs", "FunctionDocs", "ViewFunctionDocs"],
) -> List[str]:
for line in comment_segment:
# Ignore only '--' line.
if line == "--":
continue
m = re.match(typed_comment_pattern(), line)
# Ignore untyped lines
if not m:
continue
line_type = m.group(1)
if line_type != comment_type:
return [(
f"Wrong comment type. Expected '{comment_type}', got '{line_type}'.\n"
f"'{docs.name}' in {docs.path}:\n'{line}'\n")]
return []
# Whether comment segment with description of the object contains content.
def validate_desc(
docs: Union["TableViewDocs", "FunctionDocs", "ViewFunctionDocs"]
) -> List[str]:
for line in docs.desc:
if line == "--":
continue
return []
return [(f"Missing documentation for {docs.name}\n"
f"'{docs.name}' in {docs.path}:\n'{line}'\n")]
# Whether comment segment about columns contain proper schema. Can be matched
# against parsed SQL data by setting `use_data_from_sql`.
def validate_columns(docs: Union["TableViewDocs", "ViewFunctionDocs"],
use_data_from_sql=False) -> List[str]:
errors = validate_typed_comment(docs.columns, "column", docs)
if errors:
return errors
if use_data_from_sql:
cols_from_sql = docs.data_from_sql["columns"]
for line in docs.columns:
# Ignore only '--' line.
if line == "--" or not line.startswith("-- @column"):
continue
# Look for '-- @column' line as a column description
m = re.match(column_pattern(), line)
if not m:
errors.append(f"Wrong column description.\n"
f"'{docs.name}' in {docs.path}:\n'{line}'\n")
continue
if not use_data_from_sql:
return errors
col_name = m.group(1)
if col_name not in cols_from_sql:
errors.append(f"There is no argument '{col_name}' specified in code.\n"
f"'{docs.name}' in {docs.path}:\n'{line}'\n")
continue
cols_from_sql.pop(col_name)
if not use_data_from_sql:
errors.append(f"Missing columns for {docs.name}\n{docs.path}\n")
return errors
if not cols_from_sql:
return errors
errors.append(
f"Missing documentation of columns: {list(cols_from_sql.keys())}.\n"
f"'{docs.name}' in {docs.path}:\n")
return errors
# Whether comment segment about columns contain proper schema. Matches against
# parsed SQL data.
def validate_args(docs: "FunctionDocs") -> List[str]:
errors = validate_typed_comment(docs.args, "arg", docs)
if errors:
return errors
args_from_sql = docs.data_from_sql["args"]
for line in docs.args:
# Ignore only '--' line.
if line == "--" or not line.startswith("-- @"):
continue
m = re.match(args_pattern(), line)
if m is None:
errors.append("The arg docs formatting is wrong. It should be:\n"
"-- @arg [a-z_]* [A-Z]* {desc}\n"
f"'{docs.name}' in {docs.path}:\n'{line}'\n")
return errors
arg_name, arg_type = m.group(1), m.group(2)
if arg_name not in args_from_sql:
errors.append(f"There is not argument '{arg_name}' specified in code.\n"
f"'{docs.name}' in {docs.path}:\n'{line}'\n")
continue
arg_type_from_sql = args_from_sql.pop(arg_name)
if arg_type != arg_type_from_sql:
errors.append(f"In the code, the type of '{arg_name}' is "
f"'{arg_type_from_sql}', but according to the docs "
f"it is '{arg_type}'.\n"
f"'{docs.name}' in {docs.path}:\n'{line}'\n")
if not args_from_sql:
return errors
errors.append(
f"Missing documentation of args: {list(args_from_sql.keys())}.\n"
f"{docs.path}\n")
return errors
# Whether comment segment about return contain proper schema. Matches against
# parsed SQL data.
def validate_ret(docs: "FunctionDocs") -> List[str]:
errors = validate_typed_comment(docs.ret, "ret", docs)
if errors:
return errors
ret_type_from_sql = docs.data_from_sql["ret"]
for line in docs.ret:
# Ignore only '--' line.
if line == "--" or not line.startswith("-- @ret"):
continue
m = re.match(function_return_pattern(), line)
if m is None:
return [("The return docs formatting is wrong. It should be:\n"
"-- @ret [A-Z]* {desc}\n"
f"'{docs.name}' in {docs.path}:\n'{line}'\n")]
docs_ret_type = m.group(1)
if ret_type_from_sql != docs_ret_type:
return [(f"The return type in docs is '{docs_ret_type}', "
f"but it is {ret_type_from_sql} in code.\n"
f"'{docs.name}' in {docs.path}:\n'{line}'\n")]
return []
# Parses string with multiple arguments with type separated by comma into dict.
def parse_args(args_str: str) -> tuple[dict, List[str]]:
errors = []
args = {}
for arg_str in args_str.split(","):
m = re.match(arg_str_pattern(), arg_str)
if m is None:
errors.append(f"Wrong arguments formatting for '{arg_str}'\n")
continue
args[m.group(1)] = m.group(2)
return args, errors
# After matching file to pattern, fetches and validates related documentation.
def validate_docs_for_sql_object_type(path: str, module: str, sql: str,
pattern: str, docs_object: type):
errors = []
line_id_to_match = match_pattern(pattern, sql)
lines = sql.split("\n")
for line_id, matches in line_id_to_match.items():
# Fetch comment by looking at lines over beginning of match in reverse
# order.
comment = fetch_comment(lines[line_id - 1::-1])
docs, obj_errors = docs_object.create_from_comment(path, comment, module,
matches)
errors += obj_errors
if docs:
errors += docs.check_comment()
return errors
def check(path: str):
errors = []
# Get module name
module_name = path.split("/stdlib/")[-1].split("/")[0]
with open(path) as f:
sql = f.read()
errors += validate_docs_for_sql_object_type(path, module_name, sql,
create_table_view_pattern(),
TableViewDocs)
errors += validate_docs_for_sql_object_type(path, module_name, sql,
create_function_pattern(),
FunctionDocs)
errors += validate_docs_for_sql_object_type(path, module_name, sql,
create_view_function_pattern(),
ViewFunctionDocs)
return errors
def main():
errors = []
metrics_sources = os.path.join(ROOT_DIR, "src", "trace_processor", "stdlib")
for root, _, files in os.walk(metrics_sources, topdown=True):
for f in files:
path = os.path.join(root, f)
if path.endswith(".sql"):
errors += check(path)
sys.stderr.write("\n\n".join(errors))
return 0 if not errors else 1
if __name__ == "__main__":
sys.exit(main())
| fred231084g/perfetto | tools/check_sql_modules.py | check_sql_modules.py | py | 14,349 | python | en | code | null | github-code | 13 |
1067889680 | from setuptools import setup, find_packages
import os
description = "CLI component for UCS"
author = "Intel Corporation"
license = "Apache"
etc = os.environ.get("DAIETC")
setup(name='ucs_cli',
version=os.environ.get("DAIVER"),
description=description,
author=author,
license=license,
packages=find_packages(exclude=['cli.src.tests']),
scripts=['ucs'],
data_files=[(etc, ['cli_config.json']), ('/etc/bash_completion.d', ['ucs_completion.sh'])],
install_requires=['requests',
'clustershell',
'python-dateutil',
'progress',
'texttable'],
test_suite='tests',
tests_require=['pytest',
'pytest-cov',
'pylint',
'mock'])
| unifiedcontrolsystem/dai-ds | cli/setup2.py | setup2.py | py | 834 | python | en | code | 5 | github-code | 13 |
36925807573 | #!/usr/bin/env python3
from asyncore import write
import subprocess
import re
import csv
def getPath():
return(subprocess.run(["pwd"], capture_output=True,
text=True).stdout.strip() + "/Desktop/week 6")
def get_user_statistics():
path = getPath()
user_dict = {}
with open(path + "/syslog.log", "r") as file:
try:
for line in file:
line = line.strip()
user_name = re.search(r"\((.*)\)$", line)[1]
if (user_name not in user_dict):
user_dict[user_name] = {"INFO": 0, "ERROR": 0}
if ("INFO" in line):
user_dict[user_name]["INFO"] += 1
elif ("ERROR" in line):
user_dict[user_name]["ERROR"] += 1
except TypeError:
pass
return user_dict
def get_error_stats():
path = getPath()
error_dict = {}
with open(path + "/syslog.log", "r") as file:
for line in file:
line = line.strip()
if("ERROR" in line):
error = line.split("ERROR ")[1].split(" (")[0].strip()
if(error not in error_dict):
error_dict[error] = 0
error_dict[error] += 1
return error_dict
def user_stats_header():
return("Username","INFO","ERROR")
def error_stats_header():
return(["Error","Count"])
def write_error_stats(error_stat_dict):
with open(getPath() + "/error_message.csv", "w") as file:
csv.writer(file).writerow(error_stats_header())
for key in sorted(error_stat_dict, key=error_stat_dict.get, reverse=True):
csv.writer(file).writerow([key, error_stat_dict[key]])
return
def write_user_stats(user_stat_dict):
with open(getPath() + "/user_statistics.csv", "w") as file:
csv.writer(file).writerow(user_stats_header())
for key in sorted(user_stat_dict):
csv.writer(file).writerow([key, user_stat_dict[key]["INFO"], user_stat_dict[key]["ERROR"]])
return
if __name__ == "__main__":
user_stat_dict = get_user_statistics()
error_stat_dict = get_error_stats()
write_error_stats(error_stat_dict)
write_user_stats(user_stat_dict) | imhariprakash/Courses | Google IT Automation with Python Professional Certificate/Using Python to Interact with the Operating System/week 6/ticky_check.py | ticky_check.py | py | 2,253 | python | en | code | 4 | github-code | 13 |
10669317546 | from backend.Utils import Utils
class User:
postgres = None
firebase_sdk = None
utils = Utils()
# Memorizza le sessioni attive
live_sessions = []
minutes_to_wait_before_generate_new_session = {
"walk": 6,
"bike": 5,
"car": 4
}
def __init__(self, postgres, firebase_sdk):
self.postgres = postgres
self.firebase_sdk = firebase_sdk
# Verifica che una session_id sia valida
def validate_session_id(self, session_id):
print("live_sessions - BEFORE validate_session_id():")
self.utils.print_array_of_json("Live sessions", self.live_sessions)
for session in self.live_sessions:
if session["session_id"] == session_id:
return True
return {
"result": False,
"type": "Error",
"message": "The {session_id} provived is not valid. Please register again."
}
# Calcola la freschezza di una sessione.
# Se l'ultimo update rilevato risale a più di x minuti fa
# genera un nuovo session_id
def check_freshness_session(self, session_id, activity):
for session in self.live_sessions:
if session["session_id"] == session_id:
date_from = session["date_time"]
date_to = self.utils.get_current_datetime()
diff_in_minutes = self.utils.get_datetime_difference_in_minutes(date_from, date_to)
if diff_in_minutes > self.minutes_to_wait_before_generate_new_session[activity]: # Old session -> destroy and create new session for the same user_id
user_id = session["user_id"]
self.remove_session_by_user_id(user_id)
new_session_id = self.register_new_session(user_id)
print("NEW session_id GENERATED: " + new_session_id)
return new_session_id
return False
# Restituisce l'user_id di una specifica session_id
def get_user_id_by_session_id(self, session_id):
for session in self.live_sessions:
if session["session_id"] == session_id:
return session["user_id"]
return None
# Restituisce l'ultima posizione comunicata di una specifica session_id
def get_previous_position_by_session_id(self, session_id):
for session in self.live_sessions:
if session["session_id"] == session_id:
return session["previous_position"]
return None
# Elimina l'eventuale sessione già esistente di 'new_user_id'
def remove_session_by_user_id(self, user_id):
for session in self.live_sessions:
if session["user_id"] == user_id:
self.live_sessions.remove(session)
return True
return False
# Aggiorna il datetime di ultimo utilizzo di una sessione
def update_session_datetime(self, session_id):
for session in self.live_sessions:
if session["session_id"] == session_id:
session["date_time"] = self.utils.get_current_datetime()
return True
return False
# Memorizza l'ultima posizione comunicata
def save_last_message_in_session(self, session_id, last_message):
for session in self.live_sessions:
if session["session_id"] == session_id:
session["previous_position"] = last_message
return True
return False
# Memorizza l'activity corrente
def save_current_activity_in_session(self, session_id, current_activity):
for session in self.live_sessions:
if session["session_id"] == session_id:
session["current_activity"] = current_activity
return True
return False
# Ritorna l'activity memorizzata nella sessione
def get_activity_in_session(self, session_id):
for session in self.live_sessions:
if session["session_id"] == session_id:
return session["current_activity"]
return False
# Memorizza l'id del geofence attivato (se esiste)
def save_current_geofence_triggered_in_session(self, session_id, id_geofence_triggered):
for session in self.live_sessions:
if session["session_id"] == session_id:
session["current_id_geofence_triggered"] = id_geofence_triggered
return True
return False
# Ritorna l'id del geofence attivato (se esiste) memorizzato nella sessione
def get_id_geofence_triggered_in_session(self, session_id):
for session in self.live_sessions:
if session["session_id"] == session_id:
return session["current_id_geofence_triggered"]
return False
# Ritorna se il geofence corrente è già stato triggerato
def get_current_geofence_is_already_triggered_in_session(self, session_id):
for session in self.live_sessions:
if session["session_id"] == session_id:
return session["current_geofence_is_already_triggered"]
return False
# Ritorna l'id del gruppo che ha già triggerato il geofence
def get_current_geofence_triggered_by_group_id_in_session(self, session_id):
for session in self.live_sessions:
if session["session_id"] == session_id:
return session["current_geofence_triggered_by_group_id"]
return False
# Salva se il geofence corrente è stato triggerato
def save_current_geofence_is_already_triggered_in_session(self, session_id, group_id, is_already_triggered):
for session in self.live_sessions:
if session["session_id"] == session_id:
session["current_geofence_triggered_by_group_id"] = group_id
session["current_geofence_is_already_triggered"] = is_already_triggered
return True
return False
# Registra una nuova sessione per uno specifico utente
def register_new_session(self, user_id, old_position=[]):
self.remove_session_by_user_id(user_id)
session_id = self.utils.generate_new_session_id(self.live_sessions)
now = self.utils.get_current_datetime()
session = {
"session_id": session_id,
"user_id": user_id,
"date_time": now,
"current_activity": None,
"current_id_geofence_triggered": None,
"current_geofence_is_already_triggered": False,
"current_geofence_triggered_by_group_id": None,
"previous_position": old_position,
}
self.live_sessions.append(session)
print("live_sessions - AFTER register_new_session():")
self.utils.print_array_of_json("Live sessions", self.live_sessions)
return session_id
# Riceve il registration_token e verifica se esiste già nel db,
# se non esiste crea il nuovo utente.
# In ogni caso ritorna un nuovo session_id
def register(self, registration_token):
# Check if the user is already registered, in this case return the user_id
user_id = self.postgres.get_user_id_by_registration_token(registration_token)
if user_id is None:
new_user_id = self.postgres.create_new_user(registration_token)
if new_user_id is not None:
new_session_id = self.register_new_session(new_user_id)
return {
"result": True,
"message": "User successfully registered.",
"session_id": new_session_id
}
else:
return {
"result": False,
"type": "Error",
"message": "User registration has failed."
}
else:
new_session_id = self.register_new_session(user_id)
return {
"result": True,
"message": "A user is already registered for the same registration_token.",
"session_id": new_session_id
}
# Inserisce la nuova posizione inviata dal dispositivo
def communicate_position(self, message):
message["session_id"] = message["properties"]["session_id"]
message["activity"] = message["properties"]["activity"]
message["position_id_device"] = message["properties"]["position_id_device"]
message["position_id_group"] = message["properties"]["position_id_group"]
message["latitude"] = message["geometry"]["coordinates"][0]
message["longitude"] = message["geometry"]["coordinates"][1]
user_id = self.get_user_id_by_session_id(message["session_id"])
session_id = message["session_id"]
if user_id is not None:
# Activity changed
last_position_changed = self.check_changed_activity(message["activity"], message["session_id"])
if last_position_changed is not None:
print("ACTIVITY CHANGED!")
session_id = self.register_new_session(user_id, last_position_changed)
# Inserisco un nuovo punto fittizio con: coordinate nuove ed activity e session_id vecchi
self.postgres.insert_new_position(user_id, message["longitude"], message["latitude"],
last_position_changed["activity"],
last_position_changed["session_id"],
is_auto_generated=True)
# New position
position_id = self.postgres.insert_new_position(user_id, message["longitude"], message["latitude"],
message["activity"],
session_id,
is_auto_generated=None)
if position_id is not None:
# Notification
geofence_triggered = self.postgres.position_inside_geofence(position_id, message["activity"])
print("User is inside geofence:")
print(geofence_triggered)
if geofence_triggered is not None:
geofence_triggered_id = geofence_triggered[0]
previous_activity = self.get_activity_in_session(session_id)
previous_id_geofence_triggered = self.get_id_geofence_triggered_in_session(session_id)
last_position = self.get_previous_position_by_session_id(session_id)
print(last_position)
if last_position:
previous_position_id_group = last_position["properties"]["position_id_group"]
else:
previous_position_id_group = None
current_geofence_is_already_triggered = self.get_current_geofence_is_already_triggered_in_session(session_id)
current_geofence_triggered_by_group_id = self.get_current_geofence_triggered_by_group_id_in_session(session_id)
if (previous_activity is None or message["activity"] != previous_activity or geofence_triggered_id != previous_id_geofence_triggered) or ((current_geofence_is_already_triggered and current_geofence_triggered_by_group_id == message["position_id_group"]) and (previous_position_id_group == message["position_id_group"] or previous_position_id_group is None)):
self.save_current_geofence_is_already_triggered_in_session(session_id, message["position_id_group"], True)
geofence_triggered_message = geofence_triggered[1]
registration_token = self.postgres.get_registration_token_by_user_id(user_id)
response = self.firebase_sdk.send_notification("ios", registration_token,
geofence_triggered_message, message["position_id_device"])
self.utils.print_json(response, "send_notification()")
self.save_current_geofence_triggered_in_session(session_id, geofence_triggered_id)
self.postgres.update_id_geofence_triggered_position(position_id, geofence_triggered_id)
else:
self.save_current_geofence_triggered_in_session(session_id, None)
self.save_current_geofence_is_already_triggered_in_session(session_id, message["position_id_group"], False)
self.save_current_activity_in_session(session_id, message["activity"])
self.save_last_message_in_session(session_id, message)
self.update_session_datetime(message["session_id"])
return {
"result": True,
"message": "Position successfully inserted.",
"session_id": session_id,
"position_id_device": message["position_id_device"]
}
else:
return {
"result": False,
"type": "Error",
"message": "Inserting position has failed."
}
def check_changed_activity(self, current_activity, session_id):
last_position = self.get_previous_position_by_session_id(session_id)
if last_position is not None and len(last_position) > 0:
last_activity = last_position["activity"]
if last_activity != current_activity:
return last_position
return None
| Krystian95/Context-Aware-Systems---Backend | backend/User.py | User.py | py | 13,620 | python | en | code | 0 | github-code | 13 |
43155016550 | import random
import string
import requests
import MySQLdb
import re
from cfg import *
from bs4 import BeautifulSoup
import urllib.parse
def python_web_crawler(url):
db = MySQLdb.connect(HOST, USERNAME, PASSWORD, DATABASE)
cursor = db.cursor() #made to execute sql commands
request = requests.get(url)
status = int(request.status_code) #to check request was successful or not
content_type = request.headers['Content-Type']
content_length = len(request.content)
path_name = None
bool = 0
latest_date = 'SELECT MAX(created_at) INTO @last_crawl_date FROM webscrapper WHERE source_link ="{}" AND is_crawled IS TRUE ORDER BY created_at DESC LIMIT 1'.format(url)
cursor.execute(latest_date)
if status == 200:
search = re.search("text/html(.*)",content_type)
if search:
res_text = request.text
soup = BeautifulSoup(res_text, "html.parser") #For pulling data out of html file
to_write = soup.prettify()
links = soup.find_all('a')
extension = '.html';
mode = 'w'
en = 'not_bin'
else:
to_write = request.content
extension = '.' + content_type
mode = 'wb'
en = 'bin'
name = "".join(random.choice(char) for n in range(11))
path_name = "files/" +name + extension
file = open('files/{}'.format(name) + extension, mode, encoding = encod[en])
file.write(to_write)
file.close()
elif status != 200:
sql_a = "INSERT INTO webscrapper(source_link, is_crawled, last_crawl_date, response_status, content_type, content_length, created_at) VALUES ('{}', {}, {}, '{}', '{}', '{}', {})".format(url, 'TRUE', '@last_crawl_date', status, content_type, content_length, 'NOW()')
cursor.execute(sql_a)
db.commit()
db.close()
return bool
for i in range(len(links)):
link_string = links[i].get('href')
if link_string == None:
continue
search1 = re.search('javascript:;',link_string)
if search1 or len(link_string) <= 2:
continue
link_string = urllib.parse.urljoin(url, link_string)
search2 = re.search("http(.*)",link_string)
result =urllib.parse.urlparse(link_string)
if all([result.scheme, result.netloc]) and search2:#scheme for starting
pass
else:
continue
sql_b = 'SELECT "{}" INTO @link_to_check'.format(link_string)
cursor.execute(sql_b)
sql_c = "INSERT INTO webscrapper(link, source_link, is_crawled, last_crawl_date, response_status, content_type, content_length, filepath, created_at) VALUES ('{}', '{}', {}, {}, '{}', '{}', '{}', '{}', {})".format(link_string, url, 'TRUE', '@last_crawl_date', status, content_type, content_length, path_name, 'NOW()')
cursor.execute(sql_c)
db.commit()
if status == 200:
list_of_found_links.append(link_string)
print(link_string)
if len(list_of_found_links) >= 5000:
print('Maximum limit reached')
bool = 1
db.rollback()
db.close()
return bool
else:
pass
db.close()
return bool
| AYUSH-TRIPATHI786/Python-web-crawler | utils.py | utils.py | py | 3,275 | python | en | code | 0 | github-code | 13 |
32567690341 | import time
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.support.select import Select
driver_service=Service(executable_path="C:\Python Notes\Python Selenium\drivers\chromedriver.exe")
driver=webdriver.Chrome(service=driver_service)
url="file:///C:/Python%20Notes/Python%20Selenium/Sample%20Webpages/dropdown1.html"
driver.get(url)
dropdown=driver.find_element(By.XPATH,"//select[@id='lang']")
sel=Select(dropdown)
sel.select_by_index(1)
time.sleep(1)
sel.select_by_value("golang")
time.sleep(1)
sel.select_by_visible_text("JavaScript")
time.sleep(1)
if sel.is_multiple:
print("it is a multiselect dd")
else:
print("it is a single select dd")
allOptions=sel.options
print("The no of options present in the dd is",len(allOptions))
for option in allOptions:
print(option.text)
| jeevanxyz/TestDemo | Demo1/SelectDemo.py | SelectDemo.py | py | 897 | python | en | code | 0 | github-code | 13 |
16847332845 | import pandas as pd
import os
import numpy as np
from tqdm import tqdm
import pickle
import string
import spacy
from spacy.lang.en.stop_words import STOP_WORDS
nlp = spacy.load('en_core_web_sm')
import copy
ADJ_LIST = '../../data/adjacency_list.pickle'
W2I = '../../data/wordlist.pickle'
I2W = '../../data/index2word.pickle'
class Data():
def __init__(self,
random_state = 1,
path = '../../data/wordlist.csv'):
self.path = path
self.data = pd.read_csv(self.path, encoding = 'latin1', header = None, engine = 'c' )
self.data = self.data.iloc[:, 1:]
self.data.fillna('')
self.data.columns = [col-1 for col in self.data.columns]
#print(self.data.columns)
self.wordlist = set()
self.mapping_wi = {}
self.mapping_iw = {}
self.adjacency_lst = {}
def get_definition(self, definition):
tokens = set()
try:
definition = nlp(definition)
for token in definition:
if token.text in string.punctuation or token.text == '\'s' or token.text == '' or token.text == ' ':
continue
if token.is_stop:
continue
token = token.lemma_
tokens.add(token)
return list(tokens)
except TypeError:
return []
def compile_dictionary(self):
"""
Applies the function definition() on the data
returns transformed data
"""
#print(self.data.head().iloc[:,1])
self.data.iloc[:,1] = self.logged_apply(
self.data.iloc[:,1],
self.get_definition
)
def get_processed_data(self, compile_dict = True, compile_map = True):
if compile_dict:
print('Compiling Dictionary')
self.compile_dictionary()
if os.path.exists('../../data/dictonary.pickle'):
os.remove('../../data/dictionary.pickle')
self.data.to_pickle('../../data/dictionary.pickle')
else:
pd.read_pickle('../../data/dictionary.pickle')
if compile_map:
print('Creating Wordlist')
for index, row in tqdm(self.data.iterrows()):
self.wordlist.add(row[0])
try:
for word in row[1]:
self.wordlist.add(word)
except TypeError:
pass
self.wordlist = list(self.wordlist)
print('Creating Word to Index and Index to Word Maps')
for i, word in tqdm(enumerate(self.wordlist)):
self.mapping_wi[word] = i
self.mapping_iw[i] = word
if os.path.exists(W2I):
os.remove(W2I)
pkl = open(W2I, 'wb')
pickle.dump(self.mapping_wi, pkl)
pkl.close()
if os.path.exists(I2W):
os.remove(I2W)
pkl = open(I2W, 'wb')
pickle.dump(self.mapping_iw, pkl)
pkl.close()
else:
pkl = open(W2I, 'rb')
self.mapping_wi = pickle.load(pkl)
pkl.close()
pkl = open(I2W, 'rb')
self.mapping_iw = pickle.load(pkl)
pkl.close()
print('Creating Empty Adjacency List')
for index in tqdm(self.mapping_iw.keys()):
self.adjacency_lst[index] = set()
print('Populating Adjacency List')
for index, row in tqdm(self.data.iterrows()):
lst = set()
try:
for word in row[1]:
lst.add(self.mapping_wi[word])
except TypeError:
print(row[1])
pass
try:
self.adjacency_lst[self.mapping_wi[row[0]]] = self.adjacency_lst[self.mapping_wi[row[0]]].union(lst)
except KeyError:
self.adjacency_lst[self.mapping_wi[row[0]]] = lst
for word in self.adjacency_lst.keys():
self.adjacency_lst[word] = list(self.adjacency_lst[word])
if os.path.exists(ADJ_LIST):
os.remove(ADJ_LIST)
pkl = open(ADJ_LIST, 'wb')
pickle.dump(self.adjacency_lst, pkl)
pkl.close()
print('done')
def logged_apply(self, g, func, *args, **kwargs):
"""
func - function to apply to the dataframe
*args, **kwargs are the arguments to func
The method applies the function to all the elements of the dataframe and shows progress
"""
step_percentage = 100. / len(g)
import sys
print('\rApply progress: 0%', end = "")
sys.stdout.flush()
def logging_decorator(func):
def wrapper(*args, **kwargs):
progress = wrapper.count * step_percentage
#sys.stdout.write('\033[D \033[D' * 4 + format(progress, '3.0f') + '%')
print('\rApply progress: {prog}%'.format(prog=progress), end = "")
sys.stdout.flush()
wrapper.count += 1
return func(*args, **kwargs)
wrapper.count = 0
return wrapper
logged_func = logging_decorator(func)
res = g.apply(logged_func, *args, **kwargs)
print('\rApply progress: 1 00%', end = "")
sys.stdout.flush()
print('\n')
return res
data = Data()
data.get_processed_data(True, True)
| shandilya1998/CS6251-project | source/dictionary_net/raw_data.py | raw_data.py | py | 5,561 | python | en | code | 0 | github-code | 13 |
35216272329 | import math
class Triangle():
def __init__(self, a, b, c):
self.a = a
self.b = b
self.c = c
def area(self):
p=int((self.a+self.b+self.c))/2
print( math.sqrt(p*(p-self.a) * (p-self.b) * (p-self.c)))
trg1=Triangle(2, 3, 4)
trg1.area()
input()
| Egor2725/Lesson6 | 4.py | 4.py | py | 295 | python | en | code | 0 | github-code | 13 |
43101931902 | import os
import sys
import yaml
import json
from confluent_kafka import Producer, KafkaError
from pb.spug_kafka_format import Timestamp, Sample, Samples
get_forecast_config = os.environ.get('GET_FORECAST_CONFIG', '/apps/config/settings.yaml')
with open(get_forecast_config) as f:
settings = yaml.safe_load(f)
def acked(err, msg):
if err is not None:
print("Failed to deliver message: %s: %s" % (str(msg), str(err)))
else:
pass
def publish_messages(df, kafka_topic, metric_name, tags, callback=acked):
p = Producer(settings.get('kafka'))
try:
data = pack_message(df, metric_name, tags)
p.produce(
kafka_topic,
data
)
finally:
p.flush()
def pack_message(df, metric_name, tags):
'''
df:
y
ds
1654528132 12345
'''
# df1 = df.reset_index().rename(columns={'index': 'ts'})
s_list = Samples()
for ts, y in df.iterrows():
s = Sample()
s.Name = metric_name
s.Timestamp.seconds = int(ts.timestamp())
s.Value = float(y)
for k, v in tags.items():
tag = Sample.TagsEntry()
tag.key = k
tag.value = v
s.Tags.append(tag)
s_list.Samples.append(s)
return s_list.encode_to_bytes()
| guo-tt/get-ai-dvc-deploy | src/utils/kafka_sink.py | kafka_sink.py | py | 1,346 | python | en | code | 0 | github-code | 13 |
35340690295 |
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torchvision
from torchvision import *
from torch.utils.data import Dataset, DataLoader
import matplotlib.pyplot as plt
import numpy as np
import time
import copy
import os
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
best_f1_match = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
running_true_pos_match = 0
running_false_neg_match = 0
running_false_pos_match = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
# For the match minority class
for i in range(len(preds)):
if preds[i] == 0 and labels.data[i] == 0:
running_true_pos_match += 1
if preds[i] == 1 and labels.data[i] == 0:
running_false_neg_match += 1
if preds[i] == 0 and labels.data[i] == 1:
running_false_pos_match += 1
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
# For the match minority class
eps = 0.001
epoch_precision_match = running_true_pos_match/(running_true_pos_match + running_false_pos_match + eps)
epoch_recall_match = running_true_pos_match/(running_true_pos_match + running_false_neg_match + eps)
epoch_f1_match = 2 * epoch_precision_match * epoch_recall_match/(epoch_precision_match + epoch_recall_match + eps)
print('{} Loss: {:.4f} Acc: {:.4f} Precision: {:4f} Recall: {:.4f} F1: {:.4f}'.format(
phase, epoch_loss, epoch_acc, epoch_precision_match, epoch_recall_match, epoch_f1_match))
# deep copy the model
#if phase == 'val' and epoch_acc > best_acc:
# best_acc = epoch_acc
# best_model_wts = copy.deepcopy(model.state_dict())
if phase == 'val' and epoch_f1_match > best_f1_match:
best_f1_match = epoch_f1_match
best_model_wts = copy.deepcopy(model.state_dict())
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
def visualize_model(model, num_images=6):
was_training = model.training
model.eval()
images_so_far = 0
fig = plt.figure()
with torch.no_grad():
for i, (inputs, labels) in enumerate(dataloaders['val']):
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
for j in range(inputs.size()[0]):
images_so_far += 1
ax = plt.subplot(num_images//2, 2, images_so_far)
ax.axis('off')
ax.set_title('predicted: {}'.format(class_names[preds[j]]))
imshow(inputs.cpu().data[j])
if images_so_far == num_images:
model.train(mode=was_training)
return
model.train(mode=was_training)
if __name__ == '__main__':
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
data_dir = 'ProcessedData'
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4,
shuffle=True, num_workers=4)
for x in ['train', 'val']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Get a batch of training data
inputs, classes = next(iter(dataloaders['train']))
# Make a grid from batch
out = torchvision.utils.make_grid(inputs)
#imshow(out, title=[class_names[x] for x in classes])
model_ft = models.resnet18(pretrained=True)
#Freezes the other layers
freeze_layers = False
if freeze_layers:
for param in model_ft.parameters():
param.requires_grad = False
num_ftrs = model_ft.fc.in_features
# Here the size of each output sample is set to 2.
# Alternatively, it can be generalized to nn.Linear(num_ftrs, len(class_names)).
model_ft.fc = nn.Linear(num_ftrs, 2)
model_ft = model_ft.to(device)
#Weight because of imblanaced classes
num_match = len(os.listdir('ProcessedData/Train/Match'))
num_nomatch = len(os.listdir('ProcessedData/Train/NoMatch'))
weights = [1/num_match, 1/num_nomatch] # [ 1 / number of instances for each class]
class_weights = torch.FloatTensor(weights).cuda()
criterion = nn.CrossEntropyLoss(weight=class_weights)
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=25) | coolseraj/231_Tinder | ClassifierNetwork.py | ClassifierNetwork.py | py | 7,849 | python | en | code | 0 | github-code | 13 |
25923640423 | loc = open("ship-loc.txt", "w")
com = open("ship-com.txt", "w")
classes = ["corvette", "destroyer", "cruiser", "battleship", "titan", "colossus", "juggernaut", "science", "colonizer", "constructor", "transport", "military_station_small", "ion_cannon"]
prefixes = [["Lord", "Lady"],["Baron", "Baroness"],["Count", "Countess"],["Duke", "Duchess"],["King", "Queen"],["Prince", "Princess"],["Emperor", "Empress"],["Sire", "Madame"],["Laird"],["Sir", "Madam"],["Thane"],["Marquess", "Marchioness"],["Marcher Lord", "Marcher Lady"]]
suffixes = ["Copper", "Pewter", "Iron", "Nickel", "Tungsten", "Zinc", "Titanium", "Cobalt", "Silver", "Platinum", "Gold", "Tin", "Lead", "Aluminum"]
for shipClass in classes:
com.write("\t\t" + shipClass + " = {\n")
com.write("\t\t\t")
for prefix in prefixes[classes.index(shipClass)]:
for suffix in suffixes:
var = "MYSTICAL1_SHIP_" + prefix.replace(" ", "") + "OF" + suffix
str = prefix + " of " + suffix
loc.write(" "+ var + ":0 \"" + str + "\"\n")
com.write(var + " ")
com.write("\n")
com.write("\t\t}\n")
com.write("\n")
loc.close()
com.close() | ThatLilSandi/mystical-name-lists | Mystical 1/Ship Names/shipnames-gen.py | shipnames-gen.py | py | 1,116 | python | en | code | 0 | github-code | 13 |
42628096665 | from pokedex import Pokedex
from database import Database
from helper.writeAJson import writeAJson
db = Database(database="pokedex", collection="pokemons")
db.resetDatabase()
Pokedex.acha_pokemon("Kakuna")
Pokedex.semMultipliers()
Pokedex.pokemon_com_2_fraquezas()
Pokedex.pokemon_fogo_ou_fraco_fogo()
Pokedex.pokemon_agua_e_grama() | coelhalice/Banco-de-Dados-II | relatorio_3_bd2/main.py | main.py | py | 353 | python | en | code | 0 | github-code | 13 |
43076524852 | import tensorflow as tf
from utils import conv2layer, pool2layer
from tensorflow.contrib.framework import arg_scope
from ops import *
from utils import *
import utils
from tensorflow.contrib.layers.python.layers import layers
########################################
############hyper parameters############
########################################
#init lr=0.01, wach 30 epochs *0.1
########################################
def make_png(att, scale):
#att_current = up_sample_bilinear1(att, scale_factor=scale)
att_current = tf.image.resize_bilinear(att, size=[128, 128])
att_current = tf.nn.relu(att_current)
att_current = tf.reduce_mean(att_current,axis=-1)
att_current = tf.stack([att_current, att_current, att_current])
att_current = tf.transpose(att_current, perm=[1, 2, 3, 0])
return att_current
def attention_cross(x, channels, sn=False, de=4, scope='attention_cross'):
with tf.variable_scope(scope):
f = conv(x, channels // de, kernel=1, stride=1, sn=sn, scope='f_conv') # [bs, h, w, c']
g = conv(x, channels // de, kernel=1, stride=1, sn=sn, scope='g_conv') # [bs, h, w, c']
h = conv(x, channels, kernel=1, stride=1, sn=sn, scope='h_conv') # [bs, h, w, c]
f1 = atrous_conv2d(x, channels // de, kernel=3, rate=2, sn=sn, scope='f1_conv') # [bs, h, w, c']
g1 = atrous_conv2d(x, channels // de, kernel=3, rate=2, sn=sn, scope='g1_conv') # [bs, h, w, c']
# N = h * w
s = tf.matmul(hw_flatten(g), hw_flatten(f), transpose_b=True) # # [bs, N, N]
s1 = tf.matmul(hw_flatten(g1), hw_flatten(f1), transpose_b=True) # # [bs, N, N]
beta_a = tf.nn.softmax(s, dim=-1) # attention map
beta_a1 = tf.nn.softmax(s1, dim=-1) # attention map
o = tf.matmul(beta_a, hw_flatten(h)) # [bs, N, C]
o1 = tf.matmul(beta_a1, hw_flatten(h)) # [bs, N, C]
gamma = tf.get_variable("gamma", [1], initializer=tf.constant_initializer(0.0))
gamma1 = tf.get_variable("gamma1", [1], initializer=tf.constant_initializer(0.0))
o = tf.reshape(o, shape=x.shape) # [bs, h, w, C]
o1 = tf.reshape(o1, shape=x.shape) # [bs, h, w, C]
att = gamma * o + gamma1 * o1
x = att + x
return x
def vgg16(images, n_classes, trainable=False, reuse=None, scope=None):
keep_prob = 1.0
end_points = {}
with tf.variable_scope(scope, reuse=reuse) as sc:
with arg_scope([conv2layer,pool2layer], stride=1, padding_mode='SAME'):
with arg_scope([conv2layer], activation='relu', bn=True, trainning=trainable):
net = conv2layer(images=images, kernel=[3, 3], output_channel=16, scope='conv1')
net = conv2layer(images=net, kernel=[3, 3], output_channel=16, scope='conv2')
net = pool2layer(images=net, kernel=[3, 3], stride=2, scope='pooling1')
net = conv2layer(images=net, kernel=[3, 3], output_channel=32, scope='conv3')
net = conv2layer(images=net, kernel=[3, 3], output_channel=32, scope='conv4')
net = pool2layer(images=net, kernel=[3, 3], stride=2, scope='pooling2')
#net = attention_cross(net, int(net.shape[-1]), sn=False, de=4, scope="attention0")
end_points['attention0'] = make_png(net, 4)
print('attention0', net)
net = conv2layer(images=net, kernel=[3, 3], output_channel=64, scope='conv5')
net = conv2layer(images=net, kernel=[3, 3], output_channel=64, scope='conv6')
#net = conv2layer(images=net, kernel=[3, 3], output_channel=256, scope='conv7')
net = pool2layer(images=net, kernel=[3, 3], stride=2, scope='pooling3')
#net = attention_cross(net, int(net.shape[-1]), sn=False, de=4, scope="attention1")
end_points['attention1'] = make_png(net, 8)
print('attention1', net)
net = conv2layer(images=net, kernel=[3, 3], output_channel=128, scope='conv8')
net = conv2layer(images=net, kernel=[3, 3], output_channel=128, scope='conv9')
#net = conv2layer(images=net, kernel=[3, 3], output_channel=512, scope='conv10')
net = pool2layer(images=net, kernel=[3, 3], stride=2, scope='pooling4')
#net = conv2layer(images=net, kernel=[3, 3], output_channel=512, scope='conv11')
#net = conv2layer(images=net, kernel=[3, 3], output_channel=512, scope='conv12')
#net = conv2layer(images=net, kernel=[3, 3], output_channel=512, scope='conv13')
#net = pool2layer(images=net, kernel=[3, 3], stride=2, scope='pooling5')
net = conv2layer(images=net, kernel=[8, 8], output_channel=512, padding_mode='VALID', scope='fc1')
net = tf.nn.dropout(net, keep_prob=keep_prob)
net = conv2layer(images=net, kernel=[1, 1], output_channel=512, scope='fc2')
net = tf.nn.dropout(net, keep_prob=keep_prob)
feature = tf.squeeze(net)
print(feature)
end_points['feature'] = feature
net = conv2layer(images=net, kernel=[1, 1], output_channel=n_classes, activation=None, scope='logits')
net = tf.squeeze(net, axis=[1, 2], name='squeeze_logits')
return net, end_points | Guo-Xiaoqing/SSL_WCE | nets/vgg.py | vgg.py | py | 5,414 | python | en | code | 14 | github-code | 13 |
19939681527 |
def as_str(i, positions, recipes):
text = "{:5d}: ".format(i)
for r in range(len(recipes)) :
if r == positions[0] :
t = "({}) ".format(recipes[r])
elif r == positions[1] :
t = "[{}] ".format(recipes[r])
else :
t = " {} ".format(recipes[r])
text += t
return text
def generate(positions, recipes):
total = recipes[positions[0]] + recipes[positions[1]]
# print("total = ", total)
text = str(total)
for c in text :
recipes.append(int(c))
# return number created
return len(c)
def shift(positions,recipes):
for p in range(len(positions)):
pos = positions[p]
# print("recipes = ",recipes)
advance = 1 + recipes[pos]
positions[p] = (positions[p]+advance)%len(recipes)
def rstr(arr):
return "".join([str(x) for x in arr])
def process(target):
recipes = [3,7]
positions = [0,1]
i = -1
while i < 1000000000 :
generate(positions, recipes)
shift(positions, recipes)
# print(as_str(i, positions, recipes))
candidate = rstr(recipes[i:i+len(target)])
if candidate == target :
print(i, "found", target)
return i
if i%10000 : print(".", )
# print(i, rstr(recipes[0:i]), candidate)
i += 1
return 0
assert( 5 == process("01245"))
assert( 9 == process("51589"))
assert( 18 == process("92510"))
assert( 2018 == process("59414"))
print(process("084601"))
| pconley/advent2018 | p14b.py | p14b.py | py | 1,516 | python | en | code | 0 | github-code | 13 |
31390567671 | #!/bin/env python3
import random
import math
from dataclasses import dataclass
import sys
from matplotlib import pyplot as plt
def Roll():
res = 0
dice = random.randint(1,6)
res += dice
while(dice == 6):
dice = random.randint(1,6)
res += dice
return res;
class State:
Okay = 0
Low = 1
Eliminated = 2
class Fighter:
def __init__(self,name,hp,atk,deff,bon):
self.Name = name
self.HealthMax = hp
self.HealthCurrent = self.HealthMax
self.Thresh = math.ceil(self.HealthMax/3) #Todo: This should not affect characters with extra max hp (their low threshold should be the same for all)
self.DefenseVal = deff
self.AttackStr = atk
self.BonusDmg = bon
def __repr__(self):
rec = f"Bojovník {self.Name}:\n" \
+ f"Životy: {self.HealthCurrent}/{self.HealthMax}\n"\
+ f"Útok: {self.AttackStr}/{self.BonusDmg}\n"\
+ f"Obrana: {self.DefenseVal}\n"\
return rec;
def GetState(self):
if self.HealthCurrent <= 1:
return State.Eliminated;
elif self.HealthCurrent < self.Thresh:
return State.Low;
else:
return State.Okay;
def CalcAtk(self):
roll = Roll()
atk = roll
if self.HealthCurrent < self.Thresh:
atk -= 1
atk += max(self.AttackStr,0)
return atk;
def CalcDef(self):
roll = Roll()
deff = roll
if self.HealthCurrent < self.Thresh:
deff -= 1
deff += max (self.DefenseVal,0)
return deff;
def Attack(self,enemy:'Fighter'):
atkValue = self.CalcAtk()
defValue = enemy.CalcDef()
damage = 0
if (atkValue > defValue):
damage = max(atkValue - defValue + self.BonusDmg, 1)
enemy.HealthCurrent = max(enemy.HealthCurrent - damage, 1)
return damage, enemy.GetState()
def GetPriority(self): #priority of others attacking this target
hperc = self.HealthCurrent/self.HealthMax
rng = random.random()
return hperc + rng
#Záznam soubojů jednoho kola
class Round():
def __init__(self,previous:'Round'=None,fighters:list=None):
if previous:
self.lastFP = previous.GetLast()
self.fighters = [f for f in previous.fighters if f.GetState() != State.Eliminated]
else:
self.lastFP = None
self.fighters = fighters
assert(fighters!=None)
self.attacks = [] #Attacks = kdo na koho útočil a za kolik
def GetLast(self):
return self.attacks[-1][0]
def SortAttackers(self):
fighters = self.fighters
fighters.sort(key=lambda x: random.random())
if fighters[0].Name == self.lastFP: #No double atk: assume the player has checked the Dl at another random time when somebody attacked already
first = fighters.pop()
if len(fighters) <= 1:
position = 0
else:
position = random.randint(0,len(fighters)-1)
if position == 0:
fighters.append(first) #Put at last
else:
fighters.insert(position,first)
#Vyber nejvhodnější cíl
def PickTarget(self,fighter):
assert(fighter.GetState()!=State.Eliminated)
prio = -2**10
chosen = None
cid = -100
f:Fighter
for id,f in enumerate(self.fighters):
if f.Name == fighter.Name:
continue
if f.GetState()==State.Eliminated:
continue
priority = f.GetPriority()
if priority > prio:
chosen = f
cid = id
prio = priority
return cid,chosen
#Vybere útočníky v náhodném pořadí. Útoky se vyhodnocují hned, ne všechny najednou
def Simulate(self):
self.SortAttackers()
A:Fighter
D:Fighter
for A in self.fighters:
if (A.GetState()==State.Eliminated):
continue
_,D = self.PickTarget(A)
if not D: #Není na koho útočit
return
dmg,state = A.Attack(D)
self.attacks.append( (A.Name,D.Name,dmg) )
return
#Simulace celého souboje, momentálně mají všichni stejně zdraví (parametr "verbose" vypisuje stav po jednotlivých kolech)
def OneFight(FighterCount,Health,Attack,Defense,Bonus,Names,verbose=False):
assert(len(Names)>=FighterCount)
Fighters = [Fighter(Names[i],Health,Attack,Defense,Bonus) for i in range(FighterCount)]
if(verbose):
for f in Fighters:
print(f)
continue
print("-"*20+"\n")
LastRound = None
rounds = []
while((not LastRound) or (len(LastRound.fighters)>1) ):
if not LastRound:
NewRound = Round(None,Fighters)
else:
NewRound = Round(LastRound)
NewRound.Simulate()
rounds.append(NewRound)
LastRound = NewRound
if(verbose):
for f in sorted(LastRound.fighters,key=lambda x: x.Name):
print(f)
print("-"*20 + "\n")
if(verbose):
print("Počet kol:",len(rounds))
return LastRound.fighters[0],len(rounds)
#Spočítá více soubojů, počet výher pro jednotlivé hráče a průměrnou délku
def EvalSims(FighterCount,Health,Attack,Defense,Bonus):
Names = "ABCDEFGHIJKLMN"
print("Počet:",FighterCount)
print("Zdraví:",Health)
print("Síla:",Attack)
print("Obrana:",Defense)
print("Útočnost:",Bonus)
total = 0
simulations = 1000
winners = {}
for i in range(simulations):
winning,rounds = OneFight(FighterCount,Health,Attack,Defense,Bonus,Names,False)
dwin = winners.get(winning.Name,0)
dwin += 1
winners[winning.Name]=dwin
total += rounds
print("Průměr:",total/simulations)
print("---")
winners = sorted(winners.items())
for name,wins in winners:
print(f"{name}:{wins}")
print("---\n")
return winners, total/simulations
def LeastSquareDiff(f1,f2): #Assuming the x are same
assert len(f1) == len(f2)
diff = 0
for i in range(len(f1)):
y1 = f1[i]
y2 = f2[i]
dist = (y1-y2) ** 2
diff += dist
return diff
def CalcVar(lst):
avg = sum(lst)/len(lst)
return sum ((it-avg)**2 for it in lst)
#Běží souboje s různými parametry, vabere ten s nejmeněím rozptylem
def CompareFights():
counts = list(range(2,10+1))
bestTotalVariance = 2**30
bestVals = (None,None,None,None)
bestAvgs = []
bestWinCounts = None
try:
for Health in range(10,30+1):
for Attack in range(0,4+1):
for Defense in range(0,1): #In this case, only the A-D really matters since we do not introduce any extra bonuses
for Bonus in range(0,1+1):
avgs = []
winning = []
normalized = []
for FighterCount in counts:
winners,avg = EvalSims(FighterCount,Health,Attack,Defense,Bonus)
avgs.append(avg)
winning.append([w[1] for w in winners])
normalized.append([w[1] * FighterCount/counts[-1] for w in winners]) #Normalize the wins relatively to amount of players
vars = [CalcVar(nw) for nw in normalized]
totalVariance = sum(v*v for v in vars)
if totalVariance < bestTotalVariance:
bestTotalVariance = totalVariance
bestVals = (Health,Attack,Defense,Bonus) #Change this when the extra bonuses get introduced
bestWinCounts = winning
except KeyboardInterrupt:
pass
except:
raise
finally:
print(bestVals)
print(bestWinCounts)
print(bestAvgs)
print(bestTotalVariance)
def main():
#CompareFights()
FighterCount = 5
Health = 17
Attack = 4
Defense = 3
Bonus = 1
winners, average = EvalSims(FighterCount,Health,Attack,Defense,Bonus)
print(winners,average)
if __name__ == "__main__":
main()
| Daar543/IF_Deadland_Simulace | IF_Deadland_Simulace/IF_Deadland_Simulace.py | IF_Deadland_Simulace.py | py | 8,400 | python | en | code | 0 | github-code | 13 |
11553523323 | #!usr/bin/env python
background_image_filename = 'sushiplate.jpg'
mouse_image_filename = 'fugu.png'
import pygame
#import pygame frame
from pygame.locals import *
#import some useful functions from pygame
from sys import exit
#borrow an exit function from sys frame
pygame.init()
#initial pygame, prepare for some hardware device
screen = pygame.display.set_mode((640,480), 0, 32)
pygame.display.set_caption('Hello, World!')
background = pygame.image.load(background_image_filename).convert()
mouse_cursor = pygame.image.load(mouse_image_filename).convert()
while True:
#main loop for our game
for event in pygame.event.get():
if event.type == QUIT:
#quit the game
exit()
screen.blit(background, (0,0))
x, y = pygame.mouse.get_pos()
x-=mouse_cursor.get_width()/2
y-=mouse_cursor.get_height()/2
#calculate the mouse left above corne position
screen.blit(mouse_cursor,(x,y))
pygame.display.update()
| ginlee/pygames | game1.py | game1.py | py | 967 | python | en | code | 0 | github-code | 13 |
37175493564 | import numpy as np
import kurucz_inten as ki
import scipy.constants as sc
import scipy.interpolate as si
"""
WINE: Waveband INtegrated Emission module
This set of routines calculate the integrated emission spectrum of
a signal over specified filter wavebands.
"""
def readfilter(filt):
"""
Load a filter bandpass from file.
Notes:
------
- The file can contains empty lines and comments (with '#' character)
before the data. No comments or empty lines after the data.
- The data must come in two columns. The first column must contain
the wavelength in microns, the second the filter response, other
columns will be ignored.
Parameters:
-----------
filt: String
Filter file name.
Return:
-------
wavenumber: 1D ndarray
The filter pass band wavenumber in cm^-1.
transmission: 1D ndarray
The filter spectral response. No specific units.
Modification History:
---------------------
2013-01-23 patricio Initial implementation. pcubillos@fulbrightmail.org
2014-03-26 patricio Changed input to the file name.
"""
# Open and read the filter file:
data = open(filt, "r")
lines = data.readlines()
data.close()
# Remove header comments and empty lines:
while lines[0].startswith("#") or not lines[0].strip():
comment = lines.pop(0)
# Allocate arrays for the wavelength and response:
nlines = len(lines)
wavel = np.zeros(nlines, np.double) # filter's wavelengths (in microns)
transm = np.zeros(nlines, np.double) # filter's pass bands
# Read the data and store in reverse order:
for i in np.arange(nlines):
wavel[nlines-1-i], transm[nlines-1-i] = lines[i].strip().split()[0:2]
m2cm = 1e-4 # Microns to cm conversion factor
# Get wavenumber in cm-1:
waven = 1.0 / (wavel*m2cm)
# Return statement:
return waven, transm
def readkurucz(kfile, temperature, logg):
"""
Load a the Kurucz stellar spectrum with parameters closest to requested
temperature and log(g).
Parameters:
-----------
kfile: String
Path to the kurucz file.
temperature: Scalar
Surface temperature in K.
logg: Scalar
log10 of surface gravity (g in cgs units).
Returns:
--------
starfl: 1D ndarray
Kurucz stellar flux in ergs s-1 cm-2 cm.
starwn: 1D ndarray
Array with wavenumber values in cm^-1.
tmodel: Scalar
Surface temperature of the model in K.
gmodel: Scalar
log10 of the surface gravity for the model (g in cgs units).
Modification History:
---------------------
2013-01-23 patricio Initial implementation. pcubillos@fulbrightmail.org
"""
inten, freq, grav, temp, nainten, head = ki.read(kfile, freq=True)
# Wavenumber in cm^-1
starwn = freq / sc.c * 1e-2
# Find the model index with the nearest temp and log(g):
# Nearest sampled temperature:
tmodel = temp[np.argmin(np.abs(temp-temperature))]
# Nearest sampled log(g):
gmodel = grav[np.argmin(np.abs(grav-logg))]
imodel = np.where((temp == tmodel) & (grav >= gmodel))[0][0]
# Get the stellar flux:
starfl = inten[imodel] # W m^-2 sr^-1 Hz^-1
# Convert F_freq to F_wavenumber (Hz-1 --> m):
# multiply by c.
# Convert units MKS to cgs:
# W m-2 = 1e3 ergs s-1 cm-2
# Convert intensity (astrophysical flux) to flux:
# sr-1 = pi
# Flux per wavenumber: ergs s-1 cm-2 cm
starfl = starfl * 1e3 * np.pi * (1e2 * sc.c)
return starfl, starwn, tmodel, gmodel
def resample(specwn, filterwn, filtertr, starwn, starfl):
"""
Resample the filtertr curve from the filterwn sampling into specwn
Parameters:
-----------
specwn: 1D ndarray
A wavenumber sampling array (in cm-1).
filterwn: 1D ndarray
Filter wavenumber sampling array (in cm-1).
filtertr: 1D ndarray
Filter transmission curve sampled as filterwn.
starwn: 1D ndarray
Stellar model wavenumber sampling array (in cm-1).
starfl: 1D ndarray
Stellar flux.
Returns:
--------
ifilter: 1D ndarray
The interpolated filter transmission curve.
istarfl: 1D ndarray
The interpolated stellar flux.
indices: 1D ndarray
The indices of specwn where the filter was interpolated into.
Modification History:
---------------------
2013-01-23 patricio Initial implementation.
2014-03-26 patricio Adapted for output converter as resample.
"""
# Indices in the spectrum wavenumber array included in the band
# wavenumber range:
wnindices = np.where((specwn < filterwn[-1]) & (filterwn[0] < specwn))
# Make function to spline-interpolate the filter and stellar flux:
finterp = si.interp1d(filterwn, filtertr)
sinterp = si.interp1d(starwn, starfl)
# Evaluate the stellar spectrum on specwn:
istarfl = sinterp(specwn[wnindices])
# Evaluate over the spectrum wavenumber array:
ifilter = finterp(specwn[wnindices])
# Normalize to integrate to 1.0:
nifilter = ifilter/np.trapz(ifilter, specwn[wnindices])
# Return the normalized interpolated filter and the indices:
return nifilter, istarfl, wnindices
def bandintegrate(spectrum, specwn, nifilter, wnindices):
"""
Integrate a spectrum over the band transmission.
Parameters:
-----------
spectrum: 1D ndarray
Spectral signal to be integrated
specwn: 1D ndarray
Wavenumber of spectrum in cm^-1
nifilter: 1D ndarray
The normalized interpolated filter transmission curve.
wnindices: 1D ndarray
Indices of specwn where bandtr is evaluated.
Modification History:
---------------------
2014-03-26 patricio Initial implementation.
"""
# Flux ratio:
# fratio = Fplanet / Fstar * rprs**2.0
return np.trapz(spectrum*nifilter, specwn[wnindices])
| exosports/BART | code/wine.py | wine.py | py | 5,665 | python | en | code | 31 | github-code | 13 |
19253736902 | # Вызывается tkinter и messagebox для дальнейшего использования
import tkinter as tk
from tkinter import messagebox
import area_data
root = tk.Tk()
min_game_area_size = 4
max_game_area_size = 10
game_area_size = 0
restart_button = None
game_area_data = []
buttons_list = []
# Функция для запуска игры с полем ввода размера и кнопкой для запуска
def init_game(start_button, input_element):
global game_area_data, game_area_size
game_area_size = input_element.get()
if game_area_size == '':
return
game_area_size = int(game_area_size)
game_area_size = get_correct_size(game_area_size)
game_area_data = area_data.init_game_area_data(game_area_size)
game_mines_count = area_data.get_mines_count(game_area_size)
game_area_data = area_data.set_mines_data(game_area_data, game_mines_count)
input_element.destroy()
start_button.destroy()
start_game(game_area_size)
# Указываются минимальные и максимальные размеры игрового поля
def get_correct_size(n):
if n < min_game_area_size:
return min_game_area_size
elif n > max_game_area_size:
return max_game_area_size
return n
# Функция по перезапуску игры
def restart_game():
global restart_buttond
# После нажатия кнопки перезапуска, игровое поле и кнопка пропадют
if restart_button:
restart_button.destroy()
for button in buttons_list:
button.destroy()
buttons_list.clear()
start()
# Проверка выигрыша с увеломлением об этом
def check_winnig():
for el in game_area_data:
if not el:
return
show_all_mines()
messagebox.showinfo('Уведомление', 'Победа!')
restart_game()
# Показывает все мины на игровом поле после победы или поражения
def show_all_mines():
for i in range(len(game_area_data)):
buttons_list[i].config(text=game_area_data[i])
# Функция в который описывается проигрыш после нажатия на 'мину'
def on_cell_click(number):
if game_area_data[number] == '☠':
show_all_mines()
messagebox.showinfo('Уведомление', 'Вы проиграли!')
restart_game()
return
count = area_data.get_count_near_mines(game_area_data, game_area_size, number)
game_area_data[number] = count
buttons_list[number].config(state='disabled', text=count)
check_winnig()
def start_game(size):
global game_area_data, restart_button
for i in range(size ** 2):
button = tk.Button(root, width=4, height=2, command=lambda number=i: on_cell_click(number))
buttons_list.append(button)
for g in range(len(buttons_list)):
row_num = g // size + 1
column_num = g % size
button = buttons_list[g]
button.grid(row=row_num, column=column_num)
restart_button = tk.Button(root, width=4, height=2, text='R', command=lambda: restart_game())
restart_button.grid()
def start():
input_element = tk.Entry(root)
start_button = tk.Button(root, width=5, height=1, text='start', command=lambda: init_game(start_button, input_element))
input_element.grid(row=0, column=0)
start_button.grid(row=0, column=1)
root.title('Сапёр')
root.minsize(480, 360)
root.resizable(width=True, height=True)
start()
root.mainloop()
| Egor-123/Sapper | main.py | main.py | py | 3,637 | python | ru | code | 0 | github-code | 13 |
8441271144 | import board
from digitalio import DigitalInOut, Direction, Pull
from oled import oled_display, oled_text
import countio
import time
import pwmio
# Count rising edges only.
pin_counter = countio.Counter(board.GP21, edge=countio.Edge.RISE, pull=Pull.DOWN)
# pump_pwm = pwmio.PWMOut(board.GP14, frequency=30, duty_cycle=int(65535 / 2))
flow_k = 23
oled_text("Oil Flowrate")
while True:
tn = time.monotonic()
ic = pin_counter.count
time.sleep(5)
ac = pin_counter.count
et = time.monotonic() - tn
f = (ac - ic) / et
print(ic, ac)
flow = f / flow_k
oled_text(f"LPM: {flow}")
if pin_counter.count >= 10000:
pin_counter.reset()
| greyliedtke/PyExplore | CircuitPython/OLD/Oil_flowrate/code.py | code.py | py | 674 | python | en | code | 0 | github-code | 13 |
70915540179 | import tensorflow as tf
from tensorflow import keras
class TensorRing_Based(keras.layers.Layer):
def __init__(self, units=1, activation=None, rank=10, local_dim=2,initializer=keras.initializers.glorot_normal(seed=None),regularizer=keras.regularizers.l2(0.0), **kwargs):
super().__init__(**kwargs)
self.units = units
self.activation = keras.activations.get(activation)
self.rank = rank
self.local_dim = local_dim
self.initializer=initializer
self.kernel_regularizer = regularizer
def build(self, batch_input_shape):
self.kernel = self.add_weight(name="kernel", shape=[self.local_dim, self.rank, self.rank, batch_input_shape[-2], self.units],
initializer=self.initializer,regularizer=self.kernel_regularizer)
super().build(batch_input_shape)
def call(self, X):
feat_tensor=X # NtxNxd
output_list=[]
for unit in range(0,self.units):
feat_tensor_reshaped=tf.transpose(feat_tensor,perm=[1,0,2]) # NxNtxd
weights=self.kernel[:,:,:,:,unit] # initial shape of weights is dxmxmxN
weights=tf.transpose(self.kernel[:,:,:,:,unit],perm=[3,0,1,2]) # Nxdxmxm
weights=tf.reshape(weights,shape=(-1,self.local_dim,self.rank**2)) # Nxdxm^2
test=tf.matmul(feat_tensor_reshaped,weights) # NxNtxm^2
test=tf.reshape(test,shape=(tf.shape(self.kernel)[3],-1,self.rank,self.rank)) #NxNtxmxm
# N mxm matrix multiplications
output=test[0,:,:,:]
for i in range(1,8):
output=tf.matmul(output,test[i,:,:,:])
# output now is Ntxmxm
output=tf.linalg.trace(output) # Ntx1
output_list.append(output)
to_return=tf.stack(output_list, axis=1)
return self.activation(to_return)
def compute_output_shape(self, batch_input_shape):
return tf.TensorShape(batch_input_shape.as_list()[:-1] + [self.units])
def get_config(self):
base_config = super().get_config()
return {**base_config, "units": self.units,
"activation": keras.activations.serialize(self.activation)}
class OurModel(keras.Model):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.layer1 = AllOrder()
def call(self, inputs):
return self.layer1(inputs)
| KritonKonstantinidis/CPD_Supervised_Learning | Regression Tasks/TensorRing_Model.py | TensorRing_Model.py | py | 2,593 | python | en | code | 1 | github-code | 13 |
14316459980 | # -*- coding: utf8 -*-
bl_info = {
"name": "Import XYZ to Mesh",
"author": "europrimus@free.fr",
"version": (0, 6),
"blender": (2, 7, 0),
"location": "File > Import > Import XYZ to Mesh",
"description": "Import text point file to new Mesh object",
"warning": "",
"wiki_url": "",
"tracker_url": "",
"category": "Import"}
import bpy
from mathutils import Vector
from math import sqrt
from bpy_extras.object_utils import AddObjectHelper, object_data_add
from bpy_extras.io_utils import ImportHelper
from bpy.props import StringProperty, BoolProperty, EnumProperty, FloatProperty, FloatVectorProperty
from bpy.types import Operator
from time import time
#GeoRef : gestion du géoréférencement
def GeoRef_get(context):
print("-"*5,"GeoRef_get","-"*5)
try:
DeltaX=context.scene["Georef X"]
DeltaY=context.scene["Georef Y"]
DeltaZ=context.scene["Georef Z"]
EPSG=context.scene["Georef EPSG"]
ScaleXY=context.scene["Georef ScaleXY"]
ScaleZ=context.scene["Georef ScaleZ"]
except KeyError:
return "NoGeoRef"
return {"EPSG":EPSG,"ScaleXY":ScaleXY,"ScaleZ":ScaleZ,"DeltaX":DeltaX,"DeltaY":DeltaY,"DeltaZ":DeltaZ}
def GeoRef_set(context,EPSG,X,Y,Z,ScaleXY=1,ScaleZ=1):
print("-"*5,"GeoRef_set","-"*5)
r=10
try:
DeltaX=context.scene["Georef X"]
DeltaY=context.scene["Georef Y"]
except KeyError:
DeltaX=round(X/r,0)*r
context.scene["Georef X"]=DeltaX
DeltaY=round(Y/r,0)*r
context.scene["Georef Y"]=DeltaY
else:
context.scene["Georef Z"]=0
try:
DeltaZ=context.scene["Georef Z"]
except KeyError:
DeltaZ=round(Z/r,0)*r
context.scene["Georef Z"]=DeltaZ
try:
EPSG=context.scene["Georef EPSG"]
except KeyError:
context.scene["Georef EPSG"]=EPSG
try:
ScaleXY=context.scene["Georef ScaleXY"]
except KeyError:
context.scene["Georef ScaleXY"]=ScaleXY
try:
ScaleZ=context.scene["Georef ScaleZ"]
except KeyError:
context.scene["Georef ScaleZ"]=ScaleZ
return {"EPSG":EPSG,"ScaleXY":ScaleXY,"ScaleZ":ScaleZ,"DeltaX":DeltaX,"DeltaY":DeltaY,"DeltaZ":DeltaZ}
def read_line(File,Config):
Line=File.readline()
if Config["Debug"]:
print("-"*3,"read_line","-"*3)
print("Line:",Line)
Line=Line.rstrip("\n")
Temp=Line.split(Config["Sep"])
if Config["Debug"]:print("Temp",Temp)
try :
X=round(float(Temp[Config["X"]]),Config["Round"])
Y=round(float(Temp[Config["Y"]]),Config["Round"])
Z=round(float(Temp[Config["Z"]]),Config["Round"])
if Config["Debug"]:print("XYZ",X,Y,Z)
except :
print("Error: X Y or Z are not a number")
return "ERROR"
return [X,Y,Z]
def subtract(A,B):
result=[]
n=0
for n in range(0,len(A)):
result.append(A[n]-B[n])
n=n+1
return result
def read_PointFile(context,FileName,Config):
print("-"*5,"read_PointFile","-"*5)
if Config["Debug"]:print("Config:",Config)
if Config["Debug"] : print("FileName:",FileName)
GeoRef=GeoRef_get(context)
if GeoRef == "NoGeoRef" :
File=open(FileName, 'rt',errors='surrogateescape')
coord=read_line(File,Config)
File.close()
print("coord:",coord)
# GeoRef={"EPSG":Config["EPSG"],"ScaleXY":1,"ScaleZ":1,"DeltaX":coord[0],"DeltaY":coord[1],"DeltaZ":coord[2]}
# GeoRef_set(context,GeoRef["EPSG"],GeoRef["DeltaX"],GeoRef["DeltaY"],GeoRef["DeltaZ"],GeoRef["ScaleXY"],GeoRef["ScaleZ"])
GeoRef=GeoRef_set(context,Config["EPSG"],coord[0],coord[1],coord[2])
# print("GeoRef:",GeoRef["EPSG"],GeoRef["ScaleXY"],GeoRef["ScaleZ"],GeoRef["DeltaX"],GeoRef["DeltaY"],GeoRef["DeltaZ"])
if Config["EPSG"] != GeoRef["EPSG"]:
print("-"*3,"Warning: EPSG code different than the scene. File to be loaded:",Config["EPSG"],"scene:",GeoRef["EPSG"])
if Config["Debug"]:print("GeoRef:",GeoRef)
Delta=[GeoRef["DeltaX"],GeoRef["DeltaY"],GeoRef["DeltaZ"]]
Config["Min"] = Vector((Config["Min"][0]-Delta[0], Config["Min"][1]-Delta[1],Config["Min"][2]-Delta[2]))
Config["Max"] = Vector((Config["Max"][0]-Delta[0], Config["Max"][1]-Delta[1],Config["Max"][2]-Delta[2]))
print ("-"*3,"Load_File","-"*3)
Verts=[] #Vecteurs des sommets
Edges=[] #Aretes
Faces=[]
NbPoints=0 #initialisation du nombre de point chargé
NbEdges=0 #initialisation du d'arrétes créé
NbLine=0
OldPoint=Vector()
NbPointsDisp=False
#lecture du fichier de points
File=open(FileName, 'rt',errors='surrogateescape')
coord=""
while coord != "ERROR" and NbLine < Config["MaxLine"]:
coord=read_line(File,Config)
if coord =="ERROR" : break
NbLine+=1
if NbPoints % 10**3 ==0 and NbPointsDisp :
print(NbPoints,"points loaded")
NbPointsDisp=False
if NbLine % Config["Decimate"] == 0:
NewPoint=Vector(subtract(coord,Delta))
if Config["Debug"]:print("NewPoint",NewPoint)
if Config["Min"][0] <= NewPoint[0] <= Config["Max"][0] and Config["Min"][1] <= NewPoint[1] <= Config["Max"][1] and Config["Min"][2] <= NewPoint[2] <= Config["Max"][2]:
if NewPoint==OldPoint:
if Config["Debug"]:print("point déja chargé",NewPoint,"=",OldPoint)
else:
Verts.append(NewPoint)
NbPoints+=1
NbPointsDisp=True
OldPoint=NewPoint
if NbPoints >= Config["MaxPoints"]:
if Config["Debug"]:print("Nombre de point dépassé",NbPoints,">",Config["MaxPoints"])
break
else:
if Config["Debug"]:print("hors selection",Config["Min"]," <=",NewPoint," <= ",Config["Max"])
File.close()
#Création des sommets
#print("Verts",Verts)
MeshName=FileName.split("/")[-1]
print("MeshName",MeshName)
Mesh = bpy.data.meshes.new(name=MeshName)
Mesh.from_pydata(Verts, Edges, Faces)
Mesh.validate(verbose=Config["Debug"])
if Config["Debug"]: print("Mesh:",Mesh)
bpy.context.scene.cursor_location = (0.0, 0.0, 0.0)
Object=object_data_add(context, Mesh)
return (NbPoints,NbLine)
class ImportPointFile(Operator, ImportHelper):
"""This appears in the tooltip of the operator and in the generated docs"""
bl_idname = "import_test.point_file" # important since its how bpy.ops.import_test.some_data is constructed
bl_label = "Import XYZ to Mesh"
bl_options = {'UNDO'}
# ImportHelper mixin class uses this
filename_ext = ".txt"
filter_glob = StringProperty(name="Filtre", default="*.txt;*.xyz;*.csv", options={'HIDDEN'} )
# List of operator properties, the attributes will be assigned
# to the class instance from the operator settings before calling.
#use_setting = BoolProperty(name="Example Boolean",description="Example Tooltip",default=True,)
#Configuration
Config_sep = StringProperty(name="separator",description="separator between column",default=" ",)
Config_StartLine = FloatProperty(name="Start line",description="Start at line n",default=1,min=0,max=10,soft_min=0,soft_max=10,step=1, precision=0,subtype="NONE",unit="NONE")
Config_MaxLine = FloatProperty(name="Max line",description="max of line to read",default=10**6,min=10,max=10**7,soft_min=10,soft_max=10**7,step=100, precision=0,subtype="NONE",unit="NONE")
Config_X = FloatProperty(name="column X",description="X are in column n",default=1,min=0,max=10,soft_min=0,soft_max=10,step=1, precision=0,subtype="NONE",unit="NONE")
Config_Y = FloatProperty(name="column Y",description="Y are in column n",default=2,min=0,max=10,soft_min=0,soft_max=10,step=1, precision=0,subtype="NONE",unit="NONE")
Config_Z = FloatProperty(name="column Z",description="Z are in column n",default=3,min=0,max=10,soft_min=0,soft_max=10,step=1, precision=0,subtype="NONE",unit="NONE")
Config_min=FloatVectorProperty(name="min", description="minimum bouding box corner", default=(0, 0, 0), step=10, precision=0, options={'ANIMATABLE'}, subtype='NONE', size=3, )
Config_max=FloatVectorProperty(name="max", description="maximum bouding box corner", default=(10**7, 10**7, 1000), step=10, precision=0, options={'ANIMATABLE'}, subtype='NONE', size=3, )
Config_maxpoints=FloatProperty(name="Max point",description="maximum points to be loaded",default=10**6,min=100,max=10**7,step=100, precision=0,subtype="NONE",unit="NONE")
Config_round=FloatProperty(name="round",description="rounding at n",default=3,min=0,max=6,step=1, precision=0,subtype="NONE",unit="NONE")
Config_decimate=FloatProperty(name="decimate",description="load 1 point each n",default=1,min=1,max=10**6,step=1, precision=0,subtype="NONE",unit="NONE")
# Config_MaxDist = FloatProperty(name="Distance max",description="Distance maximal pour créer des edges (0: ne pas créer les edges",default=0,min=0,max=10,soft_min=0,soft_max=10,step=0.1, precision=3,subtype="NONE",unit="NONE")
Config_EPSG = FloatProperty(name="EPSG code",description="Code EPSG (coordinate system)",default=3947,min=0,max=10000,soft_min=0,soft_max=10000,step=1, precision=0,subtype="NONE",unit="NONE")
Config_debug= BoolProperty(name="Debug",description="see debuging info in systeme console",default=False,)
def execute(self, context):
debut=time()
result = read_PointFile(context,self.filepath,{"Sep" : str(self.Config_sep), "StartLine" : int(self.Config_StartLine), "Debug" : self.Config_debug, "X" : int(self.Config_X)-1, "Y" : int(self.Config_Y)-1, "Z" : int(self.Config_Z)-1, "Min":self.Config_min, "Max":self.Config_max, "MaxPoints":int(self.Config_maxpoints), "Round":int(self.Config_round), "EPSG":self.Config_EPSG, "Decimate":self.Config_decimate, "MaxLine":self.Config_MaxLine})
duree=round(time()-debut,3)
print(result[1],"Lines read and",result[0],"points loaded in",duree,"s")
if result[1] > 1:
result={'FINISHED'}
else:
result={'ERROR'}
return result
# Only needed if you want to add into a dynamic menu
def menu_func_import(self, context):
self.layout.operator(ImportPointFile.bl_idname, text="Import XYZ to Mesh")
def register():
bpy.utils.register_class(ImportPointFile)
bpy.types.INFO_MT_file_import.append(menu_func_import)
def unregister():
bpy.utils.unregister_class(ImportPointFile)
bpy.types.INFO_MT_file_import.remove(menu_func_import)
if __name__ == "__main__":
print("-"*10,"register","-"*10)
register()
# test call
bpy.ops.import_test.point_file('INVOKE_DEFAULT')
#unregister()
#print("unregister") | europrimus/Blender_Script | io_xyz2mesh.py | io_xyz2mesh.py | py | 10,450 | python | en | code | 0 | github-code | 13 |
43803196075 | """Module containing :class:`~song_match.song.songs.rain_rain_go_away.RainRainGoAway`."""
from typing import List
from cozmo.lights import Light
from song_match.cube.lights import BLUE_LIGHT
from song_match.cube.lights import CYAN_LIGHT
from song_match.cube.lights import PINK_LIGHT
from song_match.song import Song
from song_match.song.instrument import Instrument
from song_match.song.note import HALF_NOTE
from song_match.song.note import Note
from song_match.song.note import QUARTER_NOTE
MEDIUM = 8
LONG = 16
class RainRainGoAway(Song):
"""Rain Rain Go Away"""
@property
def _song_id(self) -> str:
return 'rrga'
@property
def _notes(self) -> List[Note]:
instrument = self.get_instrument().get_instrument_str()
return [
Note('E5', instrument),
Note('G5', instrument),
Note('A5', instrument)
]
@property
def _sequence(self) -> List[Note]:
instrument = self.get_instrument().get_instrument_str()
g_quarter = Note('G5', instrument, QUARTER_NOTE)
e_quarter = Note('E5', instrument, QUARTER_NOTE)
a_quarter = Note('A5', instrument, QUARTER_NOTE)
g_half = Note('G5', instrument, HALF_NOTE)
e_half = Note('E5', instrument, HALF_NOTE)
return [g_half, e_half,
g_quarter, g_quarter, e_half,
g_quarter, g_quarter, e_quarter, a_quarter, g_quarter, g_quarter, e_half,
g_quarter, e_half,
g_quarter, g_quarter, e_half,
g_quarter, g_quarter, e_quarter, a_quarter, g_quarter, g_quarter, e_half]
@property
def _cube_lights(self) -> List[Light]:
return [
PINK_LIGHT,
BLUE_LIGHT,
CYAN_LIGHT
]
@property
def _difficulty_markers(self) -> List[int]:
return [
MEDIUM,
LONG
]
| samuelschuler/CS4500 | cozmo-song-match-master/song_match/song/songs/rain_rain_go_away.py | rain_rain_go_away.py | py | 1,906 | python | en | code | 0 | github-code | 13 |
30310844710 | from xivo_bus.resources.common.event import TenantEvent, UserEvent
class _BaseUserEvent(TenantEvent):
def __init__(self, user_id, user_uuid, subscription_type, created_at, tenant_uuid):
content = {
'id': int(user_id),
'uuid': str(user_uuid),
'subscription_type': subscription_type,
'created_at': str(created_at) if created_at is not None else None,
'tenant_uuid': str(tenant_uuid),
}
super().__init__(content, tenant_uuid)
class UserCreatedEvent(_BaseUserEvent):
service = 'confd'
name = 'user_created'
routing_key_fmt = 'config.user.created'
class UserDeletedEvent(_BaseUserEvent):
service = 'confd'
name = 'user_deleted'
routing_key_fmt = 'config.user.deleted'
class UserEditedEvent(_BaseUserEvent):
service = 'confd'
name = 'user_edited'
routing_key_fmt = 'config.user.edited'
class UserFallbackEditedEvent(UserEvent):
service = 'confd'
name = 'user_fallback_edited'
routing_key_fmt = 'config.users.fallbacks.edited'
def __init__(self, user_id, tenant_uuid, user_uuid):
content = {
'id': int(user_id),
'uuid': str(user_uuid),
'subscription_type': None,
'created_at': None,
'tenant_uuid': str(tenant_uuid),
}
super().__init__(content, tenant_uuid, user_uuid)
class UserServiceEditedEvent(UserEvent):
service = 'confd'
name = 'users_services_{service_name}_updated'
routing_key_fmt = 'config.users.{user_uuid}.services.{service_name}.updated'
def __init__(self, user_id, service_name, service_enabled, tenant_uuid, user_uuid):
self.name = type(self).name.format(service_name=service_name)
content = {
'user_id': int(user_id),
'user_uuid': str(user_uuid),
'tenant_uuid': str(tenant_uuid),
'enabled': service_enabled,
}
super().__init__(content, tenant_uuid, user_uuid)
self.service_name = service_name
class UserForwardEditedEvent(UserEvent):
service = 'confd'
name = 'users_forwards_{forward_name}_updated'
routing_key_fmt = 'config.users.{user_uuid}.forwards.{forward_name}.updated'
def __init__(
self,
user_id,
forward_name,
forward_enabled,
forward_dest,
tenant_uuid,
user_uuid,
):
self.name = type(self).name.format(forward_name=forward_name)
content = {
'user_id': int(user_id),
'user_uuid': str(user_uuid),
'tenant_uuid': str(tenant_uuid),
'enabled': forward_enabled,
'destination': forward_dest,
}
super().__init__(content, tenant_uuid, user_uuid)
self.forward_name = forward_name
| wazo-platform/xivo-bus | xivo_bus/resources/user/event.py | event.py | py | 2,810 | python | en | code | 1 | github-code | 13 |
38916394313 | import matplotlib.pyplot as plt
import numpy as np
actual_steering = list()
computed_steering = list()
# get the standard deviation of the steering angle
std_steering = np.std(computed_steering)
# check whether the computed steering angle is within the threshold +/- 0.05
# and calculate the number of correct predictions
correct_predictions = 0
for i in range(len(computed_steering)):
if abs(computed_steering[i] - actual_steering[i]) < 0.05:
correct_predictions += 1
# print the results
print('The standard deviation of the steering angle is: ' + str(std_steering))
print('The number of correct predictions is: ' + str(correct_predictions))
print('The percentage of correct predictions is: ' + str(correct_predictions / len(computed_steering)))
# print the total number of data points
print('The total number of data points is: ' + str(len(computed_steering)))
| DrakeAxelrod/Cyber-Physical-Systems-and-Systems-of-Systems | scripts/domagic.py | domagic.py | py | 876 | python | en | code | 24 | github-code | 13 |
73990908177 |
import re
import json
from decimal import *
from docpart import DocPart
from conditionparser import ConditionParser
class ProseMaker(object):
def __init__(self):
self._data = {}
self._json = ''
self._source = ''
## data property ------------------------------------------------
@property
def data(self):
""" The 'data' property """
return self._data
@data.setter
def data(self, value):
self._data = value
self._json = json.dumps(value, indent=4)
return self._data
@data.deleter
def data(self):
del self._data
## dataJSON property --------------------------------------------
@property
def dataJSON(self):
""" 'dataJSON' property, data as a JSON string """
return self._json
@dataJSON.setter
def dataJSON(self, value):
self._json = value
self._data = json.loads(value, parse_float=Decimal)
return self._json
@dataJSON.deleter
def dataJSON(self):
del self._json
del self._data
## source property ----------------------------------------------
@property
def source(self):
""" The 'source' property """
return self._source
@source.setter
def source(self, value):
self._source = value
raw_parts = self._source.split('[[')
self._parts = [DocPart(raw_part) for raw_part in raw_parts]
return self._source
@source.deleter
def source(self):
del self._source
## doc property -------------------------------------------------
@property
def doc(self):
""" The 'doc' property """
resolved_parts = [self.resolve_document_part(part) for part in self._parts]
return(''.join(resolved_parts))
# ---------------------------------------------------------------
def resolve_document_part(self, doc_part):
if self.resolve_condition(doc_part.condition):
return self.resolve_content(doc_part.content)
else:
return ''
# ---------------------------------------------------------------
def resolve_condition(self, condition):
# force the condition to be stringy
condition = str(condition)
# use a condition parser to get the answer
cp = ConditionParser(condition, self._data)
return cp.result
# ---------------------------------------------------------------
def resolve_content(self, content):
content = str(content) # force it into a string form (probably it's already a string)
if len(self.data) > 0: # no need to insert vars if there aren't any vars
replacements = 1
# repeatedly perform variable replacement until we didn't do any
# replacements. That means you can have a replacement inside
# a varname. For example, if you have this source:
#
# "Today you have to wake up at {{alarm_{{daytype}}_time}} sharp!"
#
# And this data:
#
# { daytype: 'weekday',
# alarm_weekday_time: '6am',
# alarm_weekend_time: '9am' }
#
# Your result will be:
#
# "Today you have to wake up at 6am sharp!"
#
# But if you change the daytype to "weekend", you'll get 9am in the
# result. Cool hey.
#
def var_lookup(match):
if self.data[match.group(1)]:
return self.data[match.group(1)]
while (replacements > 0):
replacements = 0
# this regex will catch {{placeholders}} that have no inner
# placeholders, so the most nested {{curlies}} get resolved
# first.
content, replacements = re.subn(
r'{{\s*([^\{\}]+?)\s*}}',
self.resolve_replacement,
content
)
return content
# ---------------------------------------------------------------
def resolve_replacement(self, match):
# match.group(1) is a comma-separated list of stuff. the 1st
# thing is the varname. That's optionally followed by
# a list of transformations.
transforms = re.split('\s*,\s*', match.group(1))
start_value = transforms.pop(0)
if self.data[start_value]:
# if it's a var we know, we can do something with it
val = self.data[start_value]
try:
# Decimal(1.1) gives 1.100000000000000088817841970012523233890533447265625
# Decimal(repr(1.1)) gives 1.1
val = Decimal(repr(val))
except InvalidOperation:
# that's okay, it doesn't want to be a Decimal
pass
# function for doing rounding
def round(val, unit, method):
unit = Decimal(unit)
val = val / unit
val = val.quantize(Decimal('1'), context=Context(rounding=method))
val = val * unit
# turn -0 into 0
if val.is_zero():
val = Decimal('0')
return val
for transform in transforms:
trans_args = transform.split() # strips whitespace too
trans_name = trans_args.pop(0).lower()
if trans_name == 'absolute':
val = abs(val)
continue
if trans_name == 'round':
unit = Decimal(trans_args[0]) if len(trans_args) > 0 else Decimal('1')
val = round(val, unit, ROUND_HALF_EVEN)
# val = val / unit
# val = val.quantize(Decimal('1'), context=Context(rounding=ROUND_HALF_EVEN))
# val = val * unit
# # turn -0 into 0 and 1.00 into 1
# val = Decimal('0') if val.is_zero() else val.normalize()
continue
if trans_name == 'roundup':
unit = Decimal(trans_args[0]) if len(trans_args) > 0 else Decimal('1')
val = round(val, unit, ROUND_UP)
continue
if trans_name == 'rounddown':
unit = Decimal(trans_args[0]) if len(trans_args) > 0 else Decimal('1')
val = val = round(val, unit, ROUND_DOWN)
continue
if trans_name == 'plural':
plural_part = trans_args[0] if len(trans_args) > 0 else 's'
single_part = trans_args[1] if len(trans_args) > 1 else ''
if val == 1:
val = single_part
else:
val = plural_part
continue
raise Exception('transformation "%s" is not implemented.' % trans_name)
# loop repeats for each transform
if isinstance(val, Decimal):
val = val.normalize() # turns 1.00 into 1
val = '{0:f}'.format(val) # turns 1E+1 into 10
return str(val)
else:
# if we didn't recognise the start value, just leave the entire placeholder as is
return str(match.group(0))
#
| DanielBaird/CliMAS-Next-Generation | climas-ng/climasng/parsing/prosemaker.py | prosemaker.py | py | 7,458 | python | en | code | 0 | github-code | 13 |
41906442102 | import os
import requests
from typing import Dict, List, Tuple
from pathlib import Path
from definitions import (TOKEN, HYPLAG_USER, HYPLAG_PASSWORD, HYPLAG_BACKEND_AUTH_TOKEN, HYPLAG_ID,
HYPLAG_BACKEND_POST_DOCUMENT, HYPLAG_BACKEND_GET_DOCUMENT, XML_FILES,
PDF_FILES)
from glob import glob
from datetime import datetime, timedelta
from grobid_client.grobid_client import GrobidClient
import concurrent.futures
def get_current_token() -> str:
"""Return JWT (Java Web Token) for authentication if avaiable and valid, else requests one from
the Hyplag backend.
Returns:
str: JWT authentication
"""
if os.path.exists(TOKEN):
with open(TOKEN, "w+") as file:
token_time: str = file.readline()[:-1]
if token_time:
token_time = datetime.strptime(token_time, "%Y-%m-%d %H:%M:%S.%f")
old_token: str = file.readline()
current_time = datetime.utcnow()
time_diff = current_time - token_time
if time_diff < timedelta(hours=2):
token = old_token
else:
token, token_time = get_auth_token()
else:
token, token_time = get_auth_token()
file.writelines([str(token_time) + "\n", token])
else:
with open(TOKEN, "x") as file:
token, token_time = get_auth_token()
file.writelines([str(token_time) + "\n", token])
return token
def get_auth_token(
username: str = HYPLAG_USER, password: str = HYPLAG_PASSWORD
) -> Tuple[str, str]:
""" HTTP GET request for requesting a JWT.
Args:
username (str, optional): Hyplag username. Defaults to HYPLAG_USER.
password (str, optional): Hyplag password. Defaults to HYPLAG_PASSWORD.
Returns:
Tuple[str, str]: (JWT, utc time)
"""
token_time = str(datetime.utcnow())
headers: Dict[str, str] = {"Content-Type": "application/json", "Accept": "application/json"}
payload: str = f'{{"name": "{username}", "password": "{password}"}}'
response = requests.post(url=HYPLAG_BACKEND_AUTH_TOKEN, data=payload, headers=headers)
return response.json()["token"], token_time
def post_document(path_to_document: Path, token: str, external_id: str = HYPLAG_ID) -> int:
"""Post document to the Hyplag backend.
Args:
path_to_document (Path): Path to pdf document.
token (str): JWT.
external_id (str, optional): ID for Hyplag. Defaults to HYPLAG_ID.
Returns:
int: Document id.
"""
headers: Dict = {"Authorization": f"Bearer {token}"}
files = {
"external_id": (None, f"{external_id}"),
"multipartFile": (path_to_document.name, open(path_to_document, "rb")),
}
response = requests.post(url=HYPLAG_BACKEND_POST_DOCUMENT, headers=headers, files=files)
return response.json()
def post_documents_from_list(
list_of_documents: List[Path], token: str, external_id: str = HYPLAG_ID, num_threads: int = 10
) -> List[int]:
"""Posts documents from list of file pathes to the Hyplag backend.
Args:
list_of_documents (List[Path]): List of pathes to pdf document.
token (str): JWT.
external_id (str, optional): ID for Hyplag. Defaults to HYPLAG_ID.
num_threads (int, optional): Number of threads to use. Defaults to 10.
Returns:
List[int]: List of document ids.
"""
document_id_list = []
with concurrent.futures.ThreadPoolExecutor(
max_workers=min(num_threads, len(list_of_documents))
) as executor:
doc_ids = [
executor.submit(post_document, str(path), token, external_id)
for path in list_of_documents
]
for doc_id in concurrent.futures.as_completed(doc_ids):
document_id_list.append(doc_id.result())
return document_id_list
def post_documents_from_folder(
folder: Path, token: str, external_id: str = HYPLAG_ID
) -> List[int]:
"""Post documents from folder to the Hyplag backend.
Args:
folder (Path): Path to folder with pdf documents.
token (str): JWT.
external_id (str, optional): ID for Hyplag. Defaults to HYPLAG_ID.
Returns:
List[int]: List of document ids.
"""
documents = glob(str(folder) + "/*.pdf")
document_id_list = post_documents_from_list(documents, token, external_id)
return document_id_list
def get_document(document_id: int, token: str) -> str:
"""Get xml document of the given id from Hyplag backend.
Args:
document_id (int): Hyplag document id.
token (str): JWT.
Returns:
str: XML file string.
"""
get_url = HYPLAG_BACKEND_GET_DOCUMENT + str(document_id) + "/tei"
headers = {"Authorization": f"Bearer {token}", "Accept": "application/json"}
response = requests.get(url=get_url, headers=headers)
return response.text
def get_documents_from_list(list_document_ids: List[int], token: str, num_threads: int = 10) -> List[str]:
"""Get xml documents from the given list of document ids from the Hyplag backend.
Args:
list_document_ids (List[int]): List of Hyplagd document ids.
token (str): JWT.
num_threads (int): Number of threads to use. Defaults to 10.
Returns:
List[str]: List of xml file strings.
"""
assert num_threads >= 1, "Number of threads has to be equal or greater than 1."
xml_document_list = []
with concurrent.futures.ThreadPoolExecutor(
max_workers=min(num_threads, len(list_document_ids))
) as executor:
paper_xml_download = [
executor.submit(get_document, doc_id, token) for doc_id in list_document_ids
]
for xml in concurrent.futures.as_completed(paper_xml_download):
xml_document_list.append(xml.result())
return xml_document_list
def save_xml_doc(file_name: str, xml_doc: str):
"""Save xml file to XML_FILES.
Args:
file_name (str): File name of save file.
xml_doc (str): XML string.
"""
with open(XML_FILES + str(file_name), "w") as file:
file.write(xml_doc)
def process_documents_grobid(path_to_pdf: Path = Path(PDF_FILES), num_threads: int = 10):
"""Process pdf files from path_to_pdf with grobid and output them as xml file to XML_FILES.
Args:
path_to_pdf (Path, optional): Path to pdf files. Defaults to Path(PDF_FILES).
num_threads (int, optional): Number of threads. Defaults to 10.
"""
client = GrobidClient(config_path="./grobid_config.json")
client.process(
service="processFulltextDocument",
input_path=str(path_to_pdf),
output=str(XML_FILES),
n=num_threads,
consolidate_header=False,
)
if __name__ == "__main__":
# token = get_current_token()
# doc_id = post_document(Path(PDF_FILES + "acssuschemeng.7b03870.pdf"), token)
# doc = get_document(doc_id, token)
# save_xml_doc(path_to_save=Path("test2.xml"), xml_doc=doc)
process_documents_grobid()
| gipplab/chem_formula_extractor | src/hyplag_backend.py | hyplag_backend.py | py | 7,134 | python | en | code | 2 | github-code | 13 |
17046629754 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayTradePaygrowthPayabilityQueryModel(object):
def __init__(self):
self._biz_identity = None
self._open_id = None
self._real_pay_amount = None
self._request_from = None
self._user_id = None
@property
def biz_identity(self):
return self._biz_identity
@biz_identity.setter
def biz_identity(self, value):
self._biz_identity = value
@property
def open_id(self):
return self._open_id
@open_id.setter
def open_id(self, value):
self._open_id = value
@property
def real_pay_amount(self):
return self._real_pay_amount
@real_pay_amount.setter
def real_pay_amount(self, value):
self._real_pay_amount = value
@property
def request_from(self):
return self._request_from
@request_from.setter
def request_from(self, value):
self._request_from = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.biz_identity:
if hasattr(self.biz_identity, 'to_alipay_dict'):
params['biz_identity'] = self.biz_identity.to_alipay_dict()
else:
params['biz_identity'] = self.biz_identity
if self.open_id:
if hasattr(self.open_id, 'to_alipay_dict'):
params['open_id'] = self.open_id.to_alipay_dict()
else:
params['open_id'] = self.open_id
if self.real_pay_amount:
if hasattr(self.real_pay_amount, 'to_alipay_dict'):
params['real_pay_amount'] = self.real_pay_amount.to_alipay_dict()
else:
params['real_pay_amount'] = self.real_pay_amount
if self.request_from:
if hasattr(self.request_from, 'to_alipay_dict'):
params['request_from'] = self.request_from.to_alipay_dict()
else:
params['request_from'] = self.request_from
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayTradePaygrowthPayabilityQueryModel()
if 'biz_identity' in d:
o.biz_identity = d['biz_identity']
if 'open_id' in d:
o.open_id = d['open_id']
if 'real_pay_amount' in d:
o.real_pay_amount = d['real_pay_amount']
if 'request_from' in d:
o.request_from = d['request_from']
if 'user_id' in d:
o.user_id = d['user_id']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/AlipayTradePaygrowthPayabilityQueryModel.py | AlipayTradePaygrowthPayabilityQueryModel.py | py | 2,978 | python | en | code | 241 | github-code | 13 |
31712665584 | class Solution:
def canConstruct(self, s, k):
if len(s) == k:
return True
count = {}
for char in s:
count[char] = count.get(char, 0) + 1
odd, even_sum = 0, 0
for key, value in count.items():
if value % 2 == 1:
odd = odd + 1
even_sum = even_sum + (value - 1)
else:
even_sum = even_sum + value
if k >= odd and k <= odd + int(even_sum / 2):
return True
return False
| gsy/leetcode | dp/canConstruct.py | canConstruct.py | py | 535 | python | en | code | 1 | github-code | 13 |
74083524176 | from numpy import sqrt, pi, linspace
import matplotlib.pyplot as plt
from ODESolver import ODESolver, ForwardEuler, RungeKutta4
def f(u,t):
M = 1 # SolarMasses
G = 4*pi**2 # AU^3/(yr^2*SM)
x, y, vx, vy = u
dx = vx
dy = vy
radius = sqrt(x**2 + y**2)
dvx = -G*M*x/radius**2
dvy = -G*M*y/radius**2
return [dx, dy, dvx, dvy]
planet = RungeKutta4(f)
x = 1; y = 0 # AU
vx = 0; vy = 2*pi # AU/yr
U0 = [x, y, vx, vy]
planet.set_initial_condition(U0)
time_points = linspace(0,10,1001)
u, t = planet.solve(time_points)
x, y, vx, vy = u[:,0], u[:,1], u[:,2], u[:,3]
plt.plot(x, y)
plt.axis('equal')
plt.show()
| jgslunde/PythonPhysicsExercises | Python_solutions/ChapterE/orbits.py | orbits.py | py | 644 | python | en | code | 0 | github-code | 13 |
20976283975 | from torch.optim.lr_scheduler import _LRScheduler
class WarmupScheduler(_LRScheduler):
def __init__(self, optimizer, lr:float, num_warmup_iters:int, warmup_factor:float):
self.lr = lr
self.optimizer = optimizer
self.num_warmup_iters = num_warmup_iters
self.warmup_factor = warmup_factor
super(WarmupScheduler, self).__init__(optimizer)
def get_lr(self):
self._check_state()
# Linear interpolation from [self.warmup_factor * self.lr] to [self.lr]
alpha = self.last_epoch / self.num_warmup_iters
lr = self.lr * (self.warmup_factor * (1 - alpha) + alpha)
return [lr] * len(self.base_lrs)
def _check_state(self):
assert self.last_epoch >= 0
assert self.last_epoch <= self.num_warmup_iters, \
f'You are trying to use warmup after warmup period:' \
f'Last epoch: {self.last_epoch}. Num warmup iters: {self.num_warmup_iters}'
| universome/human-pose | src/optims/warmup_scheduler.py | warmup_scheduler.py | py | 958 | python | en | code | 3 | github-code | 13 |
38538088767 | import cgi
import urllib
import re
import uuid
import os
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext import db
from google.appengine.api import mail
from google.appengine.ext.webapp import template
class Phone(db.Model):
number = db.StringProperty(required = True)
email = db.StringProperty(required = True)
status = db.StringProperty(required = True)
verify_code = db.StringProperty(required = True)
updated = db.DateTimeProperty(auto_now_add=True)
class ProcessSMS(webapp.RequestHandler):
def get(self):
number = cgi.escape(self.request.get('p').strip())
text = cgi.escape(self.request.get('t').strip())
signup = re.compile('signup (.*)')
#signup = s.match(text)
if number == '' or text == '':
return
else:
if signup.match(text):
r = self.send_signup(signup.match(text).group(1), number)
else:
r = self.send_reminder(text, number)
self.response.out.write(r)
def send_reminder(self, text, number):
body = "http://google.com/search?q=" + urllib.quote(text)
subject = '[rmmbr] ' + text
email = Phone.get_by_key_name('p_' + number).email
mail.send_mail('rmmbr.mail@gmail.com', email, subject, body)
def send_signup(self, email, number):
p = Phone(
key_name = 'p_'+number,
number = number,
email = email,
status = 'signup',
verify_code = str(uuid.uuid1())
)
Phone.put(p)
body = '''
Welcome to rmmbr!
You're signed up now for phone# ''' + number + '''
Whenever you want to send yourself a reminder, just send the following text to 41411: "rmmbr <message>".
Enjoy!
'''
subject = '[rmmbr] Welcome to rmmbr!'
mail.send_mail('rmmbr.mail@gmail.com', email, subject, body)
class MainPage(webapp.RequestHandler):
def get(self):
template_values = { 'sitename' : 'rmmbr'}
path = os.path.join(os.path.dirname(__file__), 'views/index.html')
# handle rmbr -> rmmbr (TODO: something cleaner, with WSGI framework)
url = self.request.url
rmmbr_url = url.replace('http://rmbr.appspot.com', 'http://rmmbr.appspot.com')
if rmmbr_url != url:
self.redirect(rmmbr_url)
else:
self.response.out.write(template.render(path, template_values))
application = webapp.WSGIApplication(
[
('/send', ProcessSMS),
('/', MainPage)
],
debug=True)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
| niryariv/rmmbr | main.py | main.py | py | 2,787 | python | en | code | 1 | github-code | 13 |
35216277709 |
class Square():
def __init__(self, s):
self.size = s
def change_size(self, n):
self.size += n
result = self.size**2
print(result)
square1 = Square(10)
square1.change_size(1)
input()
| Egor2725/Lesson6 | 6.py | 6.py | py | 242 | python | en | code | 0 | github-code | 13 |
10402122535 | # -*- coding: utf-8 -*-
"""Script destinado a obtener datos del equipo mediante el comando systeminfo,
solo valido en windows, y posterior alta del mismo utilizando la api de la
aplicacion de inventario"""
import platform as pf
import urllib3
import json
import os
URL_BASE = 'http://127.0.0.1:8080'
URL_LOC = '/api/location'
URL_DEV = '/api/device'
def device(loc, desc, marca, modelo):
"""Funcion que arma el diccionario de datos
para que puedan ser pasados a json"""
datos = {}
datos['name'] = pf.node()
system = pf.system()
if system == 'Windows':
datos['system'] = "w" + pf.release()
elif system == 'Linux':
datos['system'] = "ld"
datos['marca'] = marca
datos['model'] = modelo
datos['location'] = loc
datos['description'] = desc
return datos
http = urllib3.PoolManager()
#obtencion del lista de localidades
#response = http.request('GET', URL_BASE + URL_LOC)
#locations = json.loads(response.data.decode('utf-8'))
#generacion de una lista de id de localidad para usarlo como validacion
#loc_ids = [l.get('id') for l in locations]
#obtencion de los datos del comando systeminfo
command = 'systeminfo'
result = os.popen(command)
datos = result.readlines()
lista = []
desc = ''
marca = ''
modelo = ''
#Modelado de la descripcion a partir de datos del comando systeminfo
for d in datos:
if d.find('Nombre de host:') == 0:
desc += d.replace('\n', '<BR>')
lista.append(d)
if d.find('Nombre del sistema operativo:') == 0:
desc += d.replace('\n', '<BR>')
lista.append(d)
if d.find('Versi¢n del sistema operativo:') == 0:
desc += d.replace('\n', '<BR>').replace('¢', 'o')
lista.append(d)
if d.find('Fecha de instalaci¢n original:') == 0:
desc += d.replace('\n', '<BR>').replace('¢', 'o')
lista.append(d)
if d.find('Fabricante del sistema:') == 0:
desc += d.replace('\n', '<BR>')
marca = d.split(" ")[-1].split("\n")[0]
lista.append(d)
if d.find('Modelo el sistema:') == 0:
desc += d.replace('\n', '<BR>')
lista.append(d)
modelo = d.split(" ")[-1].split("\n")[0]
if d.find('Tipo de sistema:') == 0:
desc += d.replace('\n', '<BR>')
lista.append(d)
if d.find('Cantidad total de memoria f¡sica:') == 0:
desc += d.replace('\n', '<BR>').replace('¡', 'i')
lista.append(d)
if d.find('Dominio:') == 0:
desc += d.replace('\n', '<BR>')
lista.append(d)
#Seleccion y validacion del id de localidad elejido por el usuario
#print(locations)
#lid = eval(input('Ingrese el id de la localidad: '))
#isApple = False if int(lid) in loc_ids else True
#while isApple:
# print('Seleccione un id correcto')
# print(locations)
# lid = eval(input('Ingrese el id de la localidad: '))
# isApple = False if int(lid) in loc_ids else True
#Armado del diccionario de datos y Codificacion del diccionario a json
r = device(1, desc, marca, modelo)
encoded_data = json.dumps(r).encode('utf-8')
response = http.request('POST', URL_BASE + URL_DEV,
body=encoded_data, headers={'Content-Type': 'application/json'})
#Imprimo codigo de estado debuento por el servidor y json devuelto por api
print('*=========RESPONSE===========*')
print(response.status)
print(json.loads(response.data.decode('utf-8')))
| Wolksvidia/flask_inventory | get_machine_data.py | get_machine_data.py | py | 3,360 | python | es | code | 1 | github-code | 13 |
1553093051 | ####### Import Statements ############
import os
import sys
from PyInquirer import style_from_dict, prompt, Separator
from examples import custom_style_2
####### Global Variables ############
class DirectoryList:
"""
Class used to list the home folders and its child based on click.
"""
def __init__(self):
""" Init method """
self.path = os.path.expanduser("~")
def get_directory_list(self, path=None):
"""
Method used to get the list of directories in home path. It will skip the hidden files.
"""
if not path:
path = self.path
folder_list = []
for folder in os.listdir(path):
if not folder.startswith('.'):
folder_list.append(folder)
folder_list.append('Exit')
return folder_list
def ask_directory_name(self, path=None):
"""
Method used to prompt the folder list for user selection.
"""
folder_list = self.get_directory_list(path)
home_dir_prompt = [{
'type': 'list',
'name': 'folder',
'message': 'Please select the folder path to search.',
'choices': folder_list
}]
directory = prompt(home_dir_prompt, style=custom_style_2).get('folder')
if directory != 'Exit':
self.path = os.path.join(self.path, directory)
self.ask_directory_name(self.path)
else:
print(self.path)
class WordSearcher:
"""
Class used to grep the word in the given folder and search in the mentioned file extension.
Retruns: line number and file.
"""
pass
d = DirectoryList()
d.ask_directory_name()
| sridharselvan/wordSearcher | base.py | base.py | py | 1,472 | python | en | code | 0 | github-code | 13 |
6369756051 | from flask import Flask
from flask_restful import Api, Resource, fields, marshal_with, marshal
import re
from flask import make_response, current_app
from flask_restful.utils import PY3
import json
app = Flask(__name__)
api = Api(app)
# 用来模拟要返回的数据对象的类
class User(object):
def __init__(self, user_id, name, age):
self.user_id = user_id
self.name = name
self.age = age
# 声明需要序列化处理的字段
resoure_fields = {
'user_id': fields.Integer,
'name': fields.String
}
class DemoResource(Resource):
# 采用函数的方式
def get(self):
user = User(1, 'huang', 22)
# marshal函数将模型类对象序列化成字典:参数data为模型类,fields为序列化字段,envelope(信封)表示将序列化后的字典内嵌到一个key='envelope值'的字典中
data = marshal(data=user, fields=resoure_fields, envelope=None)
return data, 200, {}
# return {'code': 0, 'msg': 'success', 'data': data}, 200, {}
# 采用装饰器的方式,实际是对marshal函数的封装调用,详看源码
@marshal_with(resoure_fields, envelope='data') # marshal_with为实例装饰器,先初始化实例,在装饰时调用__call__,返回wrapper,对post函数返回值拦截处理后再返回
def post(self):
user = User(1, 'huang', 22)
return user
api.add_resource(DemoResource, '/marshal')
# flask_restful视图函数中返回时可以直接返回字典是因为自动被转成JSON
# 转换函数output_json的源码在flask_restful.representations.json.output_json
# representation装饰器可以根据不同的Content-Type进行不同的自定义序列化返回格式
@api.representation('application/json')
def output_json(data, code, headers=None):
"""重写自定义output_json函数,补充逻辑,返回自定义的json格式"""
# ****************************自定义格式添加*******************************
# data = {"user_id": 1, "name": "huang"}
if 'msg' not in data:
data = {
'code': 0,
'msg': 'success',
'data': data
}
# {"code": 0, "msg": "success", "data": {"user_id": 1, "name": "huang"}}
# ***********************************************************************
settings = current_app.config.get('RESTFUL_JSON', {})
# If we're in debug mode, and the indent is not set, we set it to a
# reasonable value here. Note that this won't override any existing value
# that was set. We also set the "sort_keys" value.
if current_app.debug:
settings.setdefault('indent', 4)
settings.setdefault('sort_keys', not PY3)
# always end the json dumps with a new line
# see https://github.com/mitsuhiko/flask/pull/1262
dumped = json.dumps(data, **settings) + "\n"
resp = make_response(dumped, code)
resp.headers.extend(headers or {})
return resp
| HZreal/flask-learn | 14_app_restful_response_marshal.py | 14_app_restful_response_marshal.py | py | 2,961 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.