seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
70062472105 | import socket
from _thread import start_new_thread as s
from threading import Thread
from multiprocessing import Process
import threading
from random import randint as r
from random import choice
def eratosthenes2(n):
multiples = set()
for i in range(2, n+1):
if i not in multiples:
yield i
multiples.update(range(i*i, n+1, i))
##eratosthenes2 code taken from https://stackoverflow.com/questions/33395903/efficient-method-for-generating-lists-of-large-prime-numbers
def gcd(a,b):
if(not a):
return b
if(not b):
return a
return gcd(b,a%b)
li=r(300,350)
l=list(eratosthenes2(li))
p=choice(l)
q=choice(l)
n=p*q
global e
e=2
f=(p-1)*(q-1)
while(e<f):
if(gcd(e,f)==1):
break
else:
e+=1
k=0
while(1):
z=(k*f)+1
if(z%e):
k+=1
continue
else:
d=((k*f)+1)//e
break
dic={}
clients = set()
clients_lock = threading.Lock()
def encrypt(b,c):
n,e=dic[c]
r=""
for i in b:
r+=chr(pow(ord(i),e,n))
return r
def decrypt(s):
global n
global d
q=""
for i in s:
q+=chr(pow(ord(i),d,n))
return q
def client_thread(conn):
global e
#conn.send((str(n)+" "+str(e)).encode('utf-8'))
publicn,publice=conn.recv(1024).decode('utf-8').split()
if conn not in dic:
dic[conn]=(int(publicn),int(publice))
user=conn.recv(1024).decode('utf-8')
with clients_lock:
if(len(clients)):
for i in clients:
m=user+" ha entrado al chat"
m=encrypt(m,i)
i.sendall((m).encode('utf-8'))
clients.add(conn)
try:
while True:
data = conn.recv(1024)
b=data.decode('utf-8')
#print("Encrypted: "+b)
b=decrypt(b)
#print(b)
aux=b
if not data:
break
if b=="disconnect":
with clients_lock:
for c in clients:
if c!=conn:
m=user+" se ha desconectado."
m=encrypt(m,c)
c.sendall((m).encode('utf-8'))
break
with clients_lock:
for c in clients:
if c!=conn:
b=aux
b=user+":"+b
b=encrypt(b,c)
#print("Encrypted for user: "+b)
c.send((b).encode('utf-8'))
finally:
with clients_lock:
clients.remove(conn)
conn.close()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("localhost",8081)) #Using IP address
#sock.bind(("localhost", 8081)) #Using localhost. also works with loopback 127.0.0.1, Assigning the server port and host
sock.listen(10)
print("Server on")
while True:
# blocking call, waits to accept a connection
conn, addr = sock.accept()
conn.send((str(n)+" "+str(e)).encode('utf-8'))
print("[-] Connected to " + addr[0] + ":" + str(addr[1]))
s(client_thread, (conn,))
#conn, addr = sock.accept() #returns tupple
#print("Connection from: "+str(addr))
sock.close()
| Brian-RG/RSA-client-server-chat | server.py | server.py | py | 3,312 | python | en | code | 0 | github-code | 36 |
8114541825 | """
Defines a function wrapper that can be used to group calls to a 'listable'
function into batches.
"""
import sys
import traceback
import asyncio
from fsc.export import export
from . import wrap_to_coroutine
@export
class BatchSubmitter:
"""
Function wrapper that collects calls to a function of one parameter, and submits
it in batches to a function which can take a list of parameters.
Arguments
---------
func: Callable
Function or coroutine which is "listable", i.e. given a list of input
parameters it will return a list of results.
loop: EventLoop
The event loop on which the batch submitter runs. Uses
``asyncio.get_event_loop()`` by default.
timeout: float
Maximum time after which the batch submitter will submit all current
tasks, even if the minimum batch size is not reached.
sleep_time : float
Time the batch submitter will sleep between checking if the minimum
batch size has been reached.
wait_batch_size : int
Minimum batch size that will be submitted before the timeout has been
reached. Is set to the same value as ``max_batch_size`` unless
specified explicitly.
max_batch_size : int
The maximum size of a batch that will be submitted.
"""
def __init__(
self,
func,
*,
loop=None,
timeout=0.1,
sleep_time=0.,
wait_batch_size=None,
max_batch_size=1000
):
self._func = wrap_to_coroutine(func)
self._loop = loop or asyncio.get_event_loop()
self._timeout = timeout
self._sleep_time = sleep_time
if max_batch_size <= 0:
raise ValueError('max_batch_size must be positive')
self._max_batch_size = max_batch_size
if wait_batch_size is None:
wait_batch_size = self._max_batch_size
if wait_batch_size <= 0:
raise ValueError('wait_batch_size must be positive')
self._wait_batch_size = wait_batch_size
self._tasks = asyncio.Queue()
self._batches = dict()
self._submit_loop_task = None
self._last_call_time = None
async def __call__(self, x):
"""
Adds a task for the given input, and starts the submission loop if needed.
"""
fut = self._loop.create_future()
self._tasks.put_nowait((x, fut))
self._last_call_time = self._loop.time()
if self._submit_loop_task is None or self._submit_loop_task.done():
self._submit_loop_task = asyncio.Task(
self._submit_loop(), loop=self._loop
)
self._submit_loop_task.add_done_callback(self._abort_on_exception)
return await fut
async def _submit_loop(self):
"""
Waits for tasks and then creates the batches which evaluate the function.
"""
while self._tasks.qsize() > 0:
await self._wait_for_tasks()
self._launch_batch()
@staticmethod
def _abort_on_exception(fut):
"""
Callback that forces a SystemExit when there is an exception in the submit loop.
"""
try:
fut.result()
except Exception: # pylint: disable=broad-except
sys.exit(''.join(traceback.format_exception(*sys.exc_info())))
async def _wait_for_tasks(self):
"""
Waits until either the timeout has passed or the queue size is big enough.
"""
assert self._tasks.qsize() > 0
while self._loop.time() - self._last_call_time < self._timeout:
if self._tasks.qsize() >= self._wait_batch_size:
return
await asyncio.sleep(self._sleep_time)
def _launch_batch(self):
"""
Launch a calculation batch.
"""
inputs = []
futures = []
for _ in range(self._max_batch_size):
try:
key, fut = self._tasks.get_nowait()
inputs.append(key)
futures.append(fut)
except asyncio.QueueEmpty:
break
task = asyncio.ensure_future(self._func(inputs))
task.add_done_callback(self._process_finished_batch)
self._batches[task] = futures
def _process_finished_batch(self, batch_future):
"""
Assign the results / exceptions to the futures of all finished batches.
"""
task_futures = self._batches.pop(batch_future)
try:
results = batch_future.result()
assert len(results) == len(task_futures)
for fut, res in zip(task_futures, results):
fut.set_result(res)
except Exception as exc: # pylint: disable=broad-except
for fut in task_futures:
fut.set_exception(exc)
| FrescolinoGroup/pyasynctools | fsc/async_tools/_batch_submit.py | _batch_submit.py | py | 4,827 | python | en | code | 0 | github-code | 36 |
5275367948 | import os
import json
import psycopg2
import psycopg2.extras
from sentence_transformers import SentenceTransformer
from sklearn.metrics.pairwise import cosine_similarity
model = SentenceTransformer('distilbert-base-nli-mean-tokens')
alpha = 0.9
#Function that will take a list of goals and goal ids and output the similar goals
def my_calc_similarity(list_of_goals, list_of_ids):
sentence_embeddings = model.encode(list_of_goals)
#I want to turn this into a matrix of cosine similarity scores
sim_scores = cosine_similarity(sentence_embeddings)
#Set up a list to hold the potential matches goal ids
match_goal_ids = [[] for x in range(len(list_of_goals))]
for j in range(len(list_of_goals)):
#ignore the diagnol entry
cur_sim_scores = sim_scores[j]
cur_sim_scores[j] = 0
#index where greater than alpha
cur_potential_idx = [i for i,v in enumerate(cur_sim_scores) if v > alpha]
for k in range(len(cur_potential_idx)):
#Need to append the goal ID to the list
match_goal_ids[j].append(list_of_ids[cur_potential_idx[k]])
if j > cur_potential_idx[k]:
continue
#Print the current goals as potential matches
print("Potential Match: Goals ", j," and ", cur_potential_idx[k])
print("Potential Match: Goal ", j, ": ", list_of_goals[j],". and Goal ", cur_potential_idx[k],": ", list_of_goals[cur_potential_idx[k]])
return(match_goal_ids)
conn = connect_to_db()
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
cur.execute("SELECT * FROM public.goals;")
rows = cur.fetchall()
cur_goals_list = [row['goal'] for row in rows]
cur_goal_ids = [row['id'] for row in rows]
match_ids = my_calc_similarity(cur_goals_list,cur_goal_ids)
| kcirtapfromspace/cloudfoundry_circleci | src/bert/cleaned_bert_similarity.py | cleaned_bert_similarity.py | py | 1,797 | python | en | code | 1 | github-code | 36 |
28523528787 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington
# See opus_core/LICENSE
from opus_core.model_coordinators.model_system import ModelSystem as CoreModelSystem
from urbansim.model_coordinators.model_system import ModelSystem as UrbanSimModelSystem
from opus_core.model_coordinators.model_system import main
class ModelSystem(UrbanSimModelSystem):
"""
Uses the information in configuration to run/estimate a set of models.
"""
#def _run_each_year_as_separate_process(self, start_year, end_year, seed_array, resources, log_file_name='run_multiprocess.log'):
def _run_each_year_as_separate_process(self, iyear, year,
seed=None,
resources=None,
profiler_name=None,
log_file=None):
skip_first_year_of_urbansim = resources.get('skip_urbansim', False)
if iyear == 0 and skip_first_year_of_urbansim:
return True
success = True
#run urbansim
if iyear > 0 or not resources.get('skip_non_travel_models_first_year', False):
success = success and CoreModelSystem._run_each_year_as_separate_process(self, iyear, year,
seed=seed,
resources=resources,
profiler_name=profiler_name,
log_file=log_file
)
success = success and self._run_travel_models_from_resources_in_separate_processes(year, resources)
return success
if __name__ == "__main__":
try: import wingdbstub
except: pass
main(ModelSystem) | psrc/urbansim | zurich_parcel/model_coordinators/model_system.py | model_system.py | py | 2,157 | python | en | code | 4 | github-code | 36 |
9084026058 | import requests
import json
import re
import unittest
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.common.exceptions import NoSuchElementException
# Create a new Firefox session
capabilities = DesiredCapabilities.FIREFOX.copy()
capabilities['marionette'] = False
driver = webdriver.Firefox(capabilities=capabilities)
driver.implicitly_wait(10)
#--------------------------------------- LOOKTIME -------------------------------------------------------#
# DESCRIPTION: Get the time (as a string) that a person spend in a place through Selenium
# PARAMETERS:
# INPUT: name: Name from a place with vicinity as a result from the request to the Google Places API
# OUTPUT: tiempo: Average time that a person spends in a place (Google's query)
#--------------------------------------------------------------------------------------------------------#
def looktime(name):
# Navigate to the application home page
driver.get('https://www.google.com')
search_field = driver.find_element_by_name('q')
search_field.clear()
print('looktime')
print(name)
# Enter search keyword and submit
search_field.send_keys(name)
search_field.submit()
# Currently on result page using find_elements_by_xpath method
# Exception in case don't find the time
try:
tiempo = driver.find_element_by_xpath("//div[@class='_B1k']/b").text
if len(tiempo) != 0:
return tiempo
except NoSuchElementException:
return False
#--------------------------------------- GET_TIME -------------------------------------------------------#
# DESCRIPTION: Tranform the time as a string into an integer (through regular expressions) and get
# the average time (in minutes)
# PARAMETERS:
# INPUT: tiempo: Average time as a string that a person spends in a place (Google's query)
# This is an output from the LOOKTIME function
# OUTPUT: fminutos: Average time in minutes as an integer
#--------------------------------------------------------------------------------------------------------#
def get_time(tiempo):
fminutos=0
count=0
# Get minutes
minutos = re.findall(r'\b\d+\bminutos|\d+-\d+\b|\d+\b min', tiempo)
if minutos:
if (str(minutos[0])).find('-') >= 1 :
min1,min2=minutos[0].split("-")
fminutos=int(min1)+int(min2)
count=count+2
else:
minutos2=[int (m) for m in minutos[0].split() if m.isdigit()]
fminutos=fminutos+int(minutos2[0])
count=count+1
# Get hours
tiempo = tiempo.replace(",",".")
horas = re.findall(r'\b\d+.\d+\b horas|\b\d+.\d+\b h|\b\d+\b horas|\b\d+\b h', tiempo)
horas2=[]
if horas:
for t in horas[0].split():
try:
horas2.append(float(t))
except ValueError:
pass
fminutos=fminutos+(int(horas2[0]*60))
count=count+1
# Get the average in case have the time more than once
if count >= 2:
fminutos=fminutos/2
horas2.clear()
# Take back the time in minutes
return fminutos
#------------------------------------------ MAIN --------------------------------------------------------#
# DESCRIPTION: Add to the puntos_de_interes.json file the name and the average time in minutes that
# a person spends in a point of interest
#
# INPUT FILE: puntos_de_interes.json
# This file was generated by data_processing.py program and already contains the
# Valencia's points of interest with population, traffic and tweets
#
# OUTPUT FILE: puntos_de_interes.json
# Update of the input file
#--------------------------------------------------------------------------------------------------------#
def main():
# Read the original JSON
path_input_file = 'puntos_de_interes.JSON'
with open(path_input_file, "r") as input_file:
data = json.load(input_file)
result = {}
result['type'] = data['type']
result['crs'] = data['crs']
result['features'] = []
# Keys to associate with each search
# Google Place API gives a list of types, on the other hand the accuracy of the results depends of
# the choice from the types
clave = {
'PID': ("train_station"),
'GTR': ("bus_station"),
'GSP': ("hospital"),
'TER': ("shopping_mall"), # Other type: department_store
'EDA': ("park"), # Other type: gym, stadium
}
cont=0
for feature in data['features']:
y = str(feature['geometry']['coordinates'][0])
x = str(feature['geometry']['coordinates'][1])
cal = clave.get(feature['properties']['califi'], ("")) # Make the relation with the key
# Request to Google Places API with a distance of 20 meters from the point of interest
r = requests.get('https://maps.googleapis.com/maps/api/place/nearbysearch/json?location='+x+','+y+'&rankby=distance&distance=20&type='+str(cal)+'&key=AIzaSyBydM3PpubE1x3_Et1e_ApoFRujEvbUer8')
# Other possible request, focusing on the radio
# 'https://maps.googleapis.com/maps/api/place/nearbysearch/json?location='+x+','+y+'&radius=500&type='+str(cal)+'&key=AIzaSyCAJQUnW6GpmM5PmDa22kJuNFtOrwJTHhI'
# A JSON generated by the API request
data2 = json.loads(r.content.decode("utf8"))
for out in data2['results']:
# Get the first 5 results for each request to the Google Places API
if cont <= 4 :
# We only need the name and the vicinity to get the time with LOOKTIME function
z = str(out['name'] + ' ' + out['vicinity'])
print(z)
time = looktime(z)
if time:
time = get_time(time)
break
else:
cont = cont +1
continue
else:
z = str(data2['results'][0]['name'] + ' ' + data2['results'][0]['vicinity']) #In case thata any of the firts five results have time, by default sets the first one
time=0
# Update the puntos_de_interes.json file
feature['properties']['nombre'] = z # Add the 'nombre' property
feature['properties']['tiempo_medio'] = time # Add the 'tiempo_medio' property
# Screen output to view program execution
print("\n Name: " + z)
print("\n Tiempo: " + str(time))
result['features'].append(feature)
z = ""
time=""
cont=0
# Write the output file
path_output_file = 'puntos_de_interes.JSON'
with open(path_output_file, "w") as output_file:
json.dump((result), output_file, indent=3)
driver.quit()
if __name__ == "__main__":
main()
| Axelflg/get_info_JSON | Find_info.py | Find_info.py | py | 7,050 | python | en | code | 0 | github-code | 36 |
7507981867 | import os, sys, random
from datetime import datetime
platforms = {
"darwin": "MacOS",
"win32": "Windows32",
"win64": "Windows64",
"linux": "Linux",
}
def run_tool():
while True:
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
command = input(f"[{current_time}] > ")
if command == "os":
print(f"Detected operating system: {platforms.get(sys.platform, 'Unknown OS')}")
elif command == "clear":
os.system("clear")
elif "+" in command:
nums = command.split("+")
result = 0
for x in nums:
result += int(x)
print(result)
elif command == "games":
print("""1. Guess the word
2. Rock paper scissor
3. Mad libs generator""")
which = input("\nWhich game would you like to play? ")
if which == "1":
try:
with open("words.txt", "r") as file:
words = [word.strip() for word in file]
word_to_guess = random.choice(words)
guess = input(f"What word has a length of {len(word_to_guess)} characters, starts with {word_to_guess[0]} and ends with {word_to_guess[-1]}? ")
if guess != word_to_guess:
print(f"Incorrect. The word was {word_to_guess}.")
else:
print("Correct!")
except FileNotFoundError:
print("Can't find words.txt. Make sure it's in the same directory and not named something else.")
elif which == "2":
choices = ["rock", "paper", "scissor"]
user_choice = input("Enter your choice: ").lower()
computer_choice = random.choice(choices)
if user_choice == computer_choice:
print("Tie.")
elif user_choice == "rock" and computer_choice == "scissor":
print("Rock beats scissor. You win!")
elif user_choice == "rock" and computer_choice == "paper":
print("Paper beats rock. Computer wins!")
elif user_choice == "scissor" and computer_choice == "rock":
print("Rock beats scissor. Computer wins!")
elif user_choice == "scissor" and computer_choice == "paper":
print("Scissor beats paper. You win!")
elif user_choice == "paper" and computer_choice == "scissor":
print("Scissor beats paper. Computer wins!")
elif user_choice == "paper" and computer_choice == "rock":
print("Paper beats rock. You win!")
else:
print("Invalid choice.")
elif which == "3":
give_me_words = input("Enter 5 words: ").split()
print(f"The {give_me_words[0]} was slimy, it tasted like {give_me_words[1]}. I stayed at {give_me_words[2]} place. He made me {give_me_words[3]} in the morning. We packed up at {give_me_words[4]}.")
elif command == "quit":
sys.exit()
else:
print(f'Unknown command: "{command}" ')
run_tool()
| Vincent2212/CommandLine | main.py | main.py | py | 3,462 | python | en | code | 0 | github-code | 36 |
2039679921 | #!/usr/bin/env python3
import requests, json, dewiki, sys
class ArgvError(Exception):
def __init__(self):
Exception.__init__(self, "Put in only '1' Value")
class Search:
ep = 'https://en.wikipedia.org/w/api.php'
session = requests.Session()
#endpoint
text = ''
def __init__(self, keyword):
self.keyword = keyword
self.params = {
"action": "parse",
"format": "json",
"page": keyword,
"redirects": 1,
"prop": "wikitext",
"contentmodel" : "wikitext",
"formatversion": "2",
}
self.result()
def result(self):
data = self.session.get(url = self.ep, params = self.params).json()
try:
if 'error' in data.keys():
raise Exception("%s is not Exist!" % self.keyword)
except Exception as e:
print(e)
exit()
text = '\t\t' + dewiki.from_string(data['parse']['title']) + '\n\n'
for item in dewiki.from_string(data['parse']['wikitext']).split('\n'):
if (len(item) != 0 and item[0] == '*'):
text += '\t' + item.split('<ref>')[0] + '\n'
try:
if (text == None):
raise Exception("Failed Search")
except Exciption as e:
print (e)
exit()
with open(self.keyword + '.wiki', 'w+') as fd:
fd.write(text)
return text
def __str__(self):
return self.result()
if (__name__ == '__main__'):
try:
if (len(sys.argv) != 2):
raise ArgvError()
except ArgvError as e:
print(e)
exit()
search = Search(sys.argv[1])
| youngmoneee/django_piscine | day03/ex02/request_wikipedia.py | request_wikipedia.py | py | 1,638 | python | en | code | 0 | github-code | 36 |
18903389392 | from uuid import uuid4
from logging import getLogger
from uchicagoldrtoolsuite import log_aware
from uchicagoldrtoolsuite.core.lib.convenience import log_init_attempt, \
log_init_success
from .abc.transformer import Transformer
from ..structures.archive import Archive
from ..structures.stage import Stage
__author__ = "Tyler Danstrom, Brian Balsamo"
__email__ = " tdanstrom@uchicago.edu, balsamo@uchicago.edu"
__company__ = "The University of Chicago Library"
__copyright__ = "Copyright University of Chicago, 2016"
__publication__ = ""
__version__ = "0.0.1dev"
log = getLogger(__name__)
class ArchiveToStageTransformer(Transformer):
"""The StageToARrchiveTransformer takes an instance of a Stage structure
and copies its contents into an instance of an Archive structure
"""
@log_aware(log)
def __init__(self, origin_structure):
"""instantiates an instance of a StageToArchiveTansformer
It starts with the origin structure passed as a parameter
and sets an empty destination structure.
___Args__
1. origin_structure (Archive) : a fully realized instance of a
Archive structure
"""
log_init_attempt(self, log, locals())
self.origin_structure = origin_structure
self.destination_structure = None
log_init_success(self, log)
@log_aware(log)
def transform(self, stage_identifier=None):
"""returns a fully realized Archive structure containing the contents
of the origin Stage structure.
It copies the contents of the Stage structure into the new Archive
structure and sets the data attribute destination_structure before
returning said destination structure data attribute value.
"""
log.info("Transforming an Archive into a Stage")
if self.destination_structure is not None:
raise TypeError("a transformation already occured.")
if stage_identifier is None:
log.debug("No stage identifier provided, setting to a uuid")
stage_identifier = uuid4().hex
self.destination_structure = Stage(stage_identifier)
log.debug("Moving materialsuites into the Stage")
for n_materialsuite in self.origin_structure.materialsuite_list:
self.destination_structure.add_materialsuite(
n_materialsuite
)
log.debug("Moving accession records into the Stage")
for n_accessionrecord in self.origin_structure.accessionrecord_list:
self.destination_structure.add_accessionrecord(
n_accessionrecord
)
log.debug("Moving legalnotes into the Stage")
for n_legalnote in self.origin_structure.legalnote_list:
self.destination_structure.add_legalnote(
n_legalnote
)
log.debug("Moving adminnotes into the Stage")
for n_adminnote in self.origin_structure.adminnote_list:
self.destination_structure.add_adminnote(
n_adminnote
)
log.debug("Transformation complete, returning result")
return self.destination_structure
@log_aware(log)
def get_origin_structure(self):
"""returns the origin structure, in this case a fully-realized Stage structure
"""
return self._origin_structure
@log_aware(log)
def set_origin_structure(self, value):
"""sets the origin structure: it will only accept a Stage structure
"""
if isinstance(value, Archive):
self._origin_structure = value
else:
raise ValueError("ArchiveToStageTransformerr must have an " +
"instace of an Archive in origin_structure")
@log_aware(log)
def get_destination_structure(self):
"""returns the destination structure, or the structure created from
transform method
"""
return self._destination_structure
@log_aware(log)
def set_destination_structure(self, value):
"""sets the destination structure, an Archive structure
"""
self._destination_structure = value
@log_aware(log)
def __repr__(self):
return "< transform from archive {} to stage {}".\
format(id(self.origin_structure),
id(self.destination_structure))
destination_structure = property(get_destination_structure,
set_destination_structure)
origin_structure = property(get_origin_structure, set_origin_structure)
| uchicago-library/uchicagoldr-toolsuite | uchicagoldrtoolsuite/bit_level/lib/transformers/archivetostagetransformer.py | archivetostagetransformer.py | py | 4,571 | python | en | code | 0 | github-code | 36 |
31180051801 | from dataclasses import dataclass
from datetime import datetime
from sqlalchemy_serializer import SerializerMixin
from app import db
@dataclass
class Result(db.Model, SerializerMixin):
__tablename__ = 'results'
name: str
last_build: int
last_result: str
last_exception: str
traceback: str
url: str
last_update: datetime
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String())
last_build = db.Column(db.Integer)
last_result = db.Column(db.String())
last_exception = db.Column(db.Text())
traceback = db.Column(db.Text())
url = db.Column(db.String())
last_update = db.Column(db.DateTime, nullable=False,
default=datetime.utcnow)
| TomerCohen95/JenkinsViewer | models.py | models.py | py | 740 | python | en | code | 0 | github-code | 36 |
74039762662 | from rest_framework.decorators import api_view, permission_classes
from rest_framework.response import Response
from rest_framework import status
from rest_framework import serializers
from .models import *
from .permissions import ValidApiKey
import random
#Supporting Functions
def random_string(range_max,string=None,unique=False):
if not string:
string = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789?><.:;@#()'
random_string = ''
for i in range(0,range_max):
new_char = random.choice(string)
#If unique param is sent we want to remove the chosen character
if unique:
partitioned = string.rpartition(new_char)
string = partitioned[0] + partitioned[2]
random_string += new_char
return random_string
def get_results_for_word_length(word_length,words,special,combos):
#Get only one instance of each word
unique_words = list(set(words))
score = 0
for word in unique_words:
if word in combos:
if word_length == 9:
score = 150
else:
if word.find(special) != -1:
score += word_length*2
else:
score += word_length
return {
'result': {
'score' : score,
'scoredWords' : [word for word in unique_words if word in combos],
'unscoredWords' : [word for word in unique_words if word not in combos],
'wordsChecked' : words,
}
}
#Returns a dictionary object with the scores for the actual solution
def score_solution(word_list,special):
response_data = {
'result' : {},
'totalScore' : 0,
'allPossibleWords' : word_list,
}
for i in range(3,10):
result = get_results_for_word_length(i,[word for word in word_list if len(word) == i],special,word_list)
response_data['result'][str(i)+'letter'] = result['result']
response_data['totalScore'] += result['result']['score']
return response_data
#Serializers
class NonogramSerializer(serializers.ModelSerializer):
class Meta:
model = Nonogram
fields = ('id','word','combos')
#VIEWS
@api_view(['GET'])
def test(request):
print('I am a test', random_string(128))
return Response("hello", status=status.HTTP_200_OK)
@api_view(['GET'])
@permission_classes([ValidApiKey])
def get_nonogram(request):
words = Nonogram.objects.all().values('word','id')
random_word = random.choice(words)
#randomize the letters in the word
random_word['word'] = random_string(9,random_word['word'],True)
return Response(random_word, status=status.HTTP_200_OK)
############# SCORING RULES ##################
# must have 3 or more letters
# 1 point for letter
# If contains special character multiply by number of letters
# If nine letter words score 150
@api_view(['GET','POST'])
@permission_classes([ValidApiKey])
def score_word(request):
#Get the nonogram
try:
nonogram = Nonogram.objects.get(id=request.data['id'])
except Exception as e:
print(e)
return Response({'message':'Word does not exist'}, status=status.HTTP_404_NOT_FOUND)
#Collect from payload
word_list = request.data.get('word_list')
special_letter = request.data.get('special')
if not special_letter:
return Response({'message':'Please provide the special character'}, status=status.HTTP_400_BAD_REQUEST)
if word_list:
#Setup dictionary result object
payload = {
'id' : nonogram.id,
'solvedWord' : nonogram.word,
'sentWord' : request.data['word'],
'specialLetter' : special_letter,
'result' : {},
'totalScore' : 0,
'scoredWords' : [word for word in word_list if word in nonogram.combos],
'unscoredWords' : [word for word in word_list if word not in nonogram.combos],
'sentWords' : word_list,
'solution' : score_solution(nonogram.combos,special_letter),
}
for i in range(3,10):
result = get_results_for_word_length(i,[word for word in word_list if len(word) == i],special_letter,nonogram.combos)
payload['result'][str(i)+'letter'] = result['result']
payload['totalScore'] += result['result']['score']
else:
return Response({'message':'Please provide a word list'}, status=status.HTTP_400_BAD_REQUEST)
#words = Nonogram.objects.all().values('word')
return Response(payload, status=status.HTTP_200_OK)
@api_view(['GET','POST'])
@permission_classes([ValidApiKey])
def get_solution(request):
try:
nonogram = Nonogram.objects.get(id=request.data['id'])
except Exception as e:
print(e)
return Response({'message':'Word does not exist'}, status=status.HTTP_404_NOT_FOUND)
return Response(NonogramSerializer(nonogram).data, status=status.HTTP_200_OK)
@api_view(['GET','POST'])
@permission_classes([ValidApiKey])
def get_solution_with_score(request):
try:
nonogram = Nonogram.objects.get(id=request.data['id'])
except Exception as e:
print(e)
return Response({'message':'Word does not exist'}, status=status.HTTP_404_NOT_FOUND)
if request.data.get('special'):
special_letter = request.data['special']
else:
return Response({'message':'No speical letter has been sent.'}, status=status.HTTP_404_NOT_FOUND)
response_data = {
'id' : nonogram.id,
'word' : nonogram.word,
'solution' : score_solution(nonogram.combos,special_letter)
}
return Response(response_data, status=status.HTTP_200_OK)
| ChrisBriant/nonogram_backend | api/views.py | views.py | py | 5,695 | python | en | code | 0 | github-code | 36 |
11580214091 | import os
import time
import random
import sys
import time
import math
from datetime import datetime
from getpass import getpass
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
Hour = 3600
Minute = 60
TotalNum=1
NumForOnePage = 16
def IntTryParse(val):
try:
return int(val)
except ValueError:
return val
def LeaveFootPrint(repeatStr):
global TotalNum
num=1
currentNum = 1
maxNum = random.randint(500,700)
print("Leave %s footprints"%str(maxNum))
while currentNum < maxNum:
src= "https://pairs.lv/#/search/one/%s"%str(num)
driver.get(src)
print("Current: {0}/{1}, Total: {2}, {3}".format(str(currentNum), maxNum, str(TotalNum), repeatStr))
if num == NumForOnePage:
num = 0
num += 1
currentNum += 1
TotalNum += 1
time.sleep(random.randint(3,7))
def ShowElapsedTime(startTime):
elapsed_time = time.time() - startTime
hour = math.floor(elapsed_time / Hour)
elapsedHour = hour * Hour
minite = math.floor((elapsed_time - elapsedHour) / Minute)
sec = str(elapsed_time - elapsedHour - minite * Minute)[:2]
print("所要時間は「%s時間%s分%s秒」"%(str(hour), str(minite), str(sec)))
def TakeRest():
minutesToRest = random.randint(15,30)
print("Take a rest for {0} minutes".format(str(minutesToRest)))
nowTime = datetime.now()
print("will end %s:%s"%(str(nowTime.hour), str(nowTime.minute + minutesToRest)))
driver.get("https://pairs.lv/#/search/grid/1")
time.sleep(minutesToRest * 60)
def GetRepeatString(counter, maxRepeatNum):
repeatStr = "Repeat: "
if maxRepeatNum == 0:
repeatStr += "なし"
elif maxRepeatNum > 0:
repeatStr += "{0}/{1}".format(str(counter), str(maxRepeatNum))
else:
repeatStr += "無限"
return repeatStr
driver = webdriver.Chrome(r'./chromedriver.exe')
driver.get("https://pairs.lv/#/login")
element = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.ID, "registerBtn1"))
)
if element is not None:
print(element)
driver.execute_script("arguments[0].click();", element)
key = input('pairsのトップページが出たらrepeat回数を指定してください(マイナスの値は無限ループ)')
while not isinstance(IntTryParse(key), int):
print("数字を入力してください")
key = input()
print("Start!")
maxRepeatNum = int(key)
counter = 1
while True:
startTime = time.time()
print("%s回目"%str(counter))
LeaveFootPrint(GetRepeatString(counter, maxRepeatNum))
ShowElapsedTime(startTime)
if (maxRepeatNum > -1 and counter > maxRepeatNum):
print("End")
break
TakeRest()
counter += 1 | tomo-jp/pairsBot | pairs_automation.py | pairs_automation.py | py | 2,928 | python | en | code | 0 | github-code | 36 |
71789959143 | AIRFLOW_API_URL = 'localhost:8080'
AIRFLOW_API_AUTH = ('airflow', 'airflow')
AIRFLOW_API_HEADERS = {
'Accept': 'application/json',
'Content-Type': 'application/json'
}
DAG_ID_TO_CLONE = 'test_base_dag'
CLONED_DAG_ID = 'test_base_8431_dag'
| dmitriidavs/practice | AWS/lambda/creds.py | creds.py | py | 250 | python | en | code | 0 | github-code | 36 |
19633985319 | from pydub import AudioSegment
from pydub.silence import split_on_silence
import os
import multiprocessing
def split_audio(filename, audio_folder='audio_folder', output_folder='segments'):
# Check if output folder exists and if not, create it
if not os.path.isdir(output_folder):
os.mkdir(output_folder)
filepath = os.path.join(audio_folder, filename)
audio_file = AudioSegment.from_mp3(filepath)
# Split track where the silence is 300 milliseconds or more and get chunks
chunks = split_on_silence(
audio_file,
# Must be silent for at least 300 milliseconds
min_silence_len=300,
# Consider it silent if quieter than -36 dBFS
silence_thresh=-36
)
# If chunks shorter than 2 seconds, append to the previous chunk
min_len = 2 * 1000 # 2 seconds in ms
chunks_corrected = []
for chunk in chunks:
if len(chunk) < min_len and chunks_corrected:
chunks_corrected[-1] += chunk
else:
chunks_corrected.append(chunk)
# Export all of the individual chunks as .mp3 files
for i, chunk in enumerate(chunks_corrected):
# Remove the last 4 characters of filename (.mp3)
out_file = os.path.join(output_folder, f"{filename[:-4]}_segment{i}.mp3")
chunk.export(out_file, format="mp3")
print(f"Finished splitting{out_file}")
def main(audio_folder):
pool = multiprocessing.Pool(multiprocessing.cpu_count())
audio_files = [f for f in os.listdir(audio_folder) if f.endswith('.mp3')]
pool.starmap(split_audio, [(f, audio_folder) for f in audio_files])
if __name__ == "__main__":
main(r"C:\Users\Harsh\Documents\gap\gapvoice\audio_preprocessing\mp3")
| harshbhatia66/BallsDeepLearning | DataPipeline/segment_audio.py | segment_audio.py | py | 1,714 | python | en | code | 0 | github-code | 36 |
6795419791 | from django.conf import settings
from rest_framework import viewsets, exceptions
from rest_framework.decorators import action
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from utils.certification_helpers import CertificationWorkshopWrapper
from utils.drf.authentication import UsernameAuthentication
from ...models import Event
from ...helpers import EventPermissionHelper
from ...tasks import SummitRequestTask
from ..serializers import (
EventAvailableSerializer,
EventTypesSerializer,
EventChangeStatusSerializer,
EventDetailSerializer,
EventSerializer,
EventListSerializer,
RequestSummitSerializer,
)
from .custom_pagination import StandardResultsSetPagination
class EventViewSet(viewsets.ModelViewSet):
model = Event
permission_classes = (IsAuthenticated, )
pagination_class = StandardResultsSetPagination
authentication_classes = (JSONWebTokenAuthentication, UsernameAuthentication, )
lookup_field = 'uuid'
serializers = {
'default': EventListSerializer,
'create': EventSerializer,
'update': EventSerializer,
'change_status': EventChangeStatusSerializer,
'retrieve': EventDetailSerializer,
'permissions': EventAvailableSerializer,
'events_types': EventTypesSerializer,
'request_summit': RequestSummitSerializer,
}
def get_serializer_class(self):
return self.serializers.get(
self.action,
self.serializers['default'],
)
def get_queryset(self):
return self.model.objects.filter_by_user(self.request.user).distinct()
def check_edit_permissions(self):
event = self.get_object()
can_edit = self.request.user.has_perm(
settings.EVENT_PERMS_EDIT_EVENT,
event,
)
if not can_edit and self.request.user.uuid != event.created_by.uuid:
raise exceptions.PermissionDenied
def create(self, request, *args, **kwargs):
helper = EventPermissionHelper()
can_create = helper.has_perm(
request.user,
'create_{}'.format(request.data.get('category')),
)
if not can_create:
raise exceptions.PermissionDenied
return super().create(request, *args, **kwargs)
def perform_create(self, serializer):
serializer.save(user_from=self.request.user)
@action(methods=['get'], detail=False)
def permissions(self, request):
helper = EventPermissionHelper()
user_objects = helper.get_events_available(request.user)
serializer = EventAvailableSerializer(
list(filter(lambda x: x[0] in user_objects, settings.EVENT_TYPE_CHOICES)),
many=True,
)
return Response(serializer.data)
@action(methods=['get'], detail=False)
def events_types(self, request):
event_available_data = []
helper = EventPermissionHelper()
for event_type in settings.EVENT_TYPE_CHOICES:
event_available_data.append([
event_type[0],
event_type[1],
helper.has_perm(request.user, 'create_{}'.format(event_type[0])),
])
serializer = EventTypesSerializer(event_available_data, many=True)
return Response(serializer.data)
@action(methods=['put'], detail=True)
def change_status(self, request, uuid):
self.check_edit_permissions()
serializer = self.get_serializer(
instance=self.get_object(),
data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save(user_from=request.user)
return Response(serializer.data)
@action(methods=['post'], detail=False)
def request_summit(self, request):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
SummitRequestTask().s(
user_uuid=self.request.user.uuid.__str__(),
**serializer.validated_data,
).apply_async()
return Response(serializer.data)
def perform_destroy(self, instance):
self.check_edit_permissions()
instance.status = (self.request.user, settings.EVENT_CH_STATUS_DELETED)
@action(methods=['post'], detail=True, url_path='send-certificates')
def send_certificates(self, request, uuid):
event = self.get_object()
certification_wrapper = CertificationWorkshopWrapper(event)
certification_wrapper.release_group_credential(request.user, event)
return Response()
| tomasgarzon/exo-services | service-exo-events/event/api/views/event.py | event.py | py | 4,675 | python | en | code | 0 | github-code | 36 |
24837779560 | nombre = ['Edu', 'Joel', 'Richard', 'Mike', 'Rick']
otros = [1, 'Agosto', False, [1,3,4], 10.5]
#Tuplas : solo que no se puede modificar (inmutables)
notas = (1, 10, 20, 13, 14, 'Eduardo')
# Diccionarios: es ordenado pero por sus llaves No por posiciones
usuarios = {
'nombre':'Eduardo',
'apellido':'de Rivero',
'edad':30,
'soltero': True
}
#Conjuntos(Set) : es el unica coleccion de datos desordenada y si se puede modificar
datos = { 10, 80, 'juan','arequipa'}
print(datos) | blasdch18/Code10 | Backend/backend10/testFlask/colecciondata.py | colecciondata.py | py | 494 | python | es | code | 0 | github-code | 36 |
74144129703 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# =======================================
# Author : NareN
# git : https://github.com/DEVELByte
# =======================================
import logging
import os
import re
import argparse
from develbyte import create_app
logger = logging.getLogger("default")
def purge(directory, pattern):
for f in os.listdir(directory):
if re.search(pattern, f):
os.remove(os.path.join(directory, f))
def arguments():
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--configName', '-c', help='pass a absolute path to the config file')
return parser.parse_args()
def print_config(config):
for _config in config.keys():
logger.info("{key: <50}: {value}".format(key=_config, value=config[_config]))
if __name__ == '__main__':
purge(".", "nohup.out")
args = arguments()
app = create_app(args.configName)
print_config(app.config)
app.run(port=app.config.PORT, threaded=True)
| im-naren/flask-starter-kit | runserver.py | runserver.py | py | 1,027 | python | en | code | 0 | github-code | 36 |
11642826592 | from django.shortcuts import render
from django.views.generic import CreateView, UpdateView, ListView
from .models import Empresa
from .forms import EmpresaForm
from notafiscal.models import NotaFiscal
def empresa_list(request):
template_name ='empresa_list.html'
objects = Empresa.objects.all()
context ={'object_list' : objects}
return render(request, template_name,context)
def notafiscal_list2(request, empresa):
template_name ='notafiscal_list.html'
obj = Empresa.objects.filter(empresa = empresa)
context ={'object' : obj}
return render(request, template_name,context)
def empresa_add(request):
template_name='empresa_form.html'
return render(request,template_name)
class EmpresaCreate(CreateView):
model = Empresa
template_name='empresa_form.html'
form_class=EmpresaForm
class EmpresaList(ListView):
model = Empresa
template_name = 'empresa_list.html'
paginate_by = 10
class EmpresaUpdate(UpdateView):
model = Empresa
template_name='empresa_form.html'
form_class=EmpresaForm | vvalcristina/notafiscal | nfe/empresa/views.py | views.py | py | 1,063 | python | en | code | 2 | github-code | 36 |
31292738689 | import os
import configparser
def num_cpus():
try:
return len(os.sched_getaffinity(0))
except AttributeError:
return os.cpu_count()
def max_workers():
return (num_cpus() * 2) + 1
# Read configuration file
config = configparser.ConfigParser()
config.read('config.ini')
# Set bind variable from configuration file
port = config['SETTINGS']['port']
bind = f"0.0.0.0:{port}"
logfile = "gunicorn.log"
workers = max_workers()
timeout = 300
| SaltisRS/ChatEndpoint-osrs | gunicorn_config.py | gunicorn_config.py | py | 467 | python | en | code | 0 | github-code | 36 |
12896234781 | """
Provides RESTful URLs for Route objects
"""
from flask import Blueprint, request
from back_end.api import jsonify_decorator, token_decorator
from back_end.db import routes
from back_end.exceptions import InvalidContent
ROUTES = Blueprint('route', __name__)
@ROUTES.route('/<routeid>', methods=['GET'])
@jsonify_decorator
@token_decorator
def get_route(routeid, userid):
"""
Returns the route in JSON format with the `routeid` provided in URL.
"""
return routes.get_from_id(routeid, userid), 200
@ROUTES.route('/<routeid>/events', methods=['GET'])
@jsonify_decorator
@token_decorator
def get_events_from_plan(routeid, userid):
"""
Returns the list of events in JSON format containing all events
belonging to a route with the `routeid` provided in URL.
"""
return routes.get_from_id(routeid, userid).events, 200
@ROUTES.route('/<routeid>/vote', methods=['POST'])
@jsonify_decorator
@token_decorator
def vote_route(routeid, userid):
"""
Updates user's vote on the route specified by `routeid` provided in URL.
Vote is extracted in a JSON object received in request.
Returns the updated route in JSON format.
"""
json = request.get_json()
if json is None:
raise InvalidContent("A problem occurred when voting on the route")
return routes.vote(routeid, userid, json.get('vote')), 201
@ROUTES.route('', methods=['POST'])
@jsonify_decorator
@token_decorator
def create_route(userid):
"""
Creates a route with the properties specified in JSON object recieved in request.
Returns the created route in JSON format.
"""
json = request.get_json()
if json is None:
raise InvalidContent("A problem occurred when creating the route")
return routes.create(json.get('planid'), json.get('name'), json.get('eventidList'), userid), 201
| vedantchokshi/various-plans | back_end/api/routes.py | routes.py | py | 1,842 | python | en | code | 1 | github-code | 36 |
42243204047 | import numpy as np
import argparse
import sys
import matplotlib.pyplot as plt
from datetime import datetime
max_value = 6
def classify(w, sample):
return (np.sign(np.dot(w, sample)))
def generate_dataset(num_data_points, dimension):
# generate x0 of each data point (always 1)
x0 = np.ones(shape=(num_data_points, 1))
# generate x1..xN
data_points = 2 * max_value * np.random.random(size=(num_data_points, dimension)) - max_value
# concatenate them
return np.concatenate((x0, data_points), axis=1)
def plot_data(f, data_points, labels, w):
x = np.array([-max_value, max_value])
# compute the g classifier boundary
f_line = - (f[0] + x * f[1]) / f[2]
plt.plot(x, f_line, label="f")
# compute the f classifier boundary
if w is not None:
w_line = - (w[0] + x * w[1]) / w[2]
plt.plot(x, w_line, label="g")
plt.legend()
# find the positive examples (label = 1) and negative examples (label = -1)
positive_examples = [idx for idx, label in enumerate(labels) if label == 1.0]
negative_examples = [idx for idx, label in enumerate(labels) if label == -1.0]
# plot them
plt.plot(data_points[positive_examples, 1], data_points[positive_examples, 2], "go")
plt.plot(data_points[negative_examples, 1], data_points[negative_examples, 2], "rx")
# change the plot max values (x and y)
plt.axis([-max_value, max_value, -max_value, max_value])
plt.show()
def generate_random_f(data_points, dimension):
# generate a boundary plane and check that it's inside our zone of interest
while True:
f = np.random.random(dimension+1) - 0.5
y_value = - (f[0] + 0 * f[1]) / f[2]
# if the value at 0 is inside de range (-max_value, max_value), it's good enough
if (abs(y_value) <= max_value):
break
# generate the labels for the given f
labels = [classify(f, sample) for sample in data_points]
if plot_data_flag & (dimension == 2):
plot_data(f, data_points, labels, None)
return f, labels
def train_perceptron(data_points, labels, dimension):
start = datetime.now()
# random initialization
w = np.random.random(dimension + 1) - 0.5
steps = 0
while True:
correction = False
for idx, data in enumerate(data_points):
# if there's a mistake, try to correct it
if classify(w, data) != labels[idx]:
steps += 1
w += labels[idx] * data
correction = True
# if there are no more errors, break
if correction == False:
break
time_diff = datetime.now() - start
time_diff_ms = time_diff.total_seconds() * 1000
print("Finished training in " + "{0:.5f}".format(time_diff_ms) + " milliseconds " + str(steps) + " training steps.")
return w, time_diff_ms, steps
def run(num_data_points, dimension=2):
data_points = generate_dataset(num_data_points, dimension)
f, labels = generate_random_f(data_points, dimension)
w, train_time, steps = train_perceptron(data_points, labels, dimension)
if plot_data_flag & (dimension == 2):
plot_data(f, data_points, labels, w)
return train_time, steps
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Play with a perceptron.')
parser.add_argument("num_data_points", type=int,
help='num of data points to be generated')
parser.add_argument("--D", '--dimension', dest='dimension', type=int,
help='space dimension')
parser.add_argument("--I", '--iterations', dest='iterations', type=int,
help='iterations', default=1)
args = parser.parse_args()
if args.iterations > 1:
plot_data_flag = False
else:
plot_data_flag = True
time_list = np.zeros(shape=args.iterations)
steps_list = np.zeros(shape=args.iterations)
for iteration in range(args.iterations):
if args.dimension:
train_time, steps = run(args.num_data_points, args.dimension)
else:
train_time, steps = run(args.num_data_points)
time_list[iteration] = train_time
steps_list[iteration] = steps
print()
print("Average training time: " + str(time_list.mean()) + " and variance: " + str(time_list.var()))
print("Average steps: " + str(steps_list.mean()) + " and variance: " + str(steps_list.var())) | mjuvilla/ML-UPF-Homework | H1/ml_h1.py | ml_h1.py | py | 4,452 | python | en | code | 1 | github-code | 36 |
31888132367 | # -*- coding: utf-8 -*-
import argparse
import os
import schedule
import logging
import logging.config
import sys
import time
from redtrics.core.runner import Runner
logging.config.fileConfig(os.path.join(os.path.dirname(__file__), '..', 'etc', 'logging.ini'))
logger = logging.getLogger(__name__)
def main():
parser = argparse.ArgumentParser(prog='redtrics-generate', description='RedMart Github Metrics')
parser.add_argument('--base', help='base/branch to run the metrics on', default='master')
parser.add_argument('--run-now', help='run only one time. Otherwise will run as scheduler', action="store_true")
args = parser.parse_args()
try:
runner = Runner(args.base)
if args.run_now:
runner.run()
else:
schedule.every().monday.at("00:30").do(runner.run)
while True:
schedule.run_pending()
time.sleep(1)
except Exception as e:
logger.error(e)
sys.exit(1)
| tuananh-nguyen/redtrics | redtrics/cli/app.py | app.py | py | 994 | python | en | code | 0 | github-code | 36 |
2744613256 | from django import forms
from events.models import Event
from datetime import datetime
from django.contrib.admin import widgets
class EventInputForm(forms.ModelForm):
class Meta:
model = Event
fields = ['organization', 'name', 'start_time','end_time', 'description', 'mask_profile', 'contact_name', 'url']
mask_profile = forms.Select(choices=("Yes", "No"))
start_time = forms.DateTimeField(input_formats=["%Y/%m/%d %H:%M"])
end_time = forms.DateTimeField(input_formats=["%Y/%m/%d %H:%M"])
url = forms.URLField(required=False)
labels = {
'organization': 'Organization',
'name': 'Name',
'start_time': 'Event Start Time',
'end_time': 'Event End Time',
'description': 'Description',
'mask_profile': 'Mask Profile?',
'contact_name': 'Contact Name',
'url': 'Event URL'
}
widgets = {
'organization': forms.Select(attrs={'placeholder': 'Select Organization'}),
'name': forms.TextInput(attrs={'placeholder': 'Event Name'}),
# 'start_time': forms.DateTimeInput(format="%Y/%m/%d %H:%M"),
# 'end_time': forms.DateTimeInput(format="%Y/%m/%d %H:%M"),
# 'time': widgets.AdminSplitDateTime(),
'description': forms.Textarea(attrs={'placeholder': 'Event Description'}),
'contact_name': forms.TextInput(attrs={'placeholder': 'Name of contact'}),
# 'url': forms.URLInput()
}
| DigitalEmpowermentATX/DECAwebsite | digital_inclusion/events/forms.py | forms.py | py | 1,532 | python | en | code | 0 | github-code | 36 |
70096834663 | #!/usr/local/bin/python3
import os
# Part 1
print("------- Part 1 -------")
data = {}
script_dir = os.path.dirname(__file__)
with open(os.path.join(script_dir, "data.txt")) as file:
for line in file:
line = line.split()
data[int(line[1])] = int(line[4])
turn = 0
det_die = 0
players = {k: [v] for k, v in data.items()}
while True:
turn += 1
count = 0
for x in range(3):
det_die = det_die + 1 if det_die < 100 else 1
count += det_die
players[(turn - 1) % 2 + 1].append(
(players[(turn - 1) % 2 + 1][-1] + count - 1) % 10 + 1
)
if sum(players[(turn - 1) % 2 + 1][1:]) >= 1000:
break
print(f"The resulting score is {min([sum(x[1:]) for x in players.values()])*turn*3}.")
# Part 2
print("------- Part 2 -------")
dice = {3: 1, 4: 3, 5: 6, 6: 7, 7: 6, 8: 3, 9: 1}
current = [0, 0, {k: [[1, 0, v]] for k, v in data.items()}]
turn = 0
while sum([len(x) for x in current[2].values()]) > 0:
turn += 1
situations = []
for situation in current[2][(turn - 1) % 2 + 1]:
for k, v in dice.items():
new_field = (situation[2] + k - 1) % 10 + 1
if situation[1] + new_field >= 21:
current[(turn - 1) % 2] += situation[0] * v
else:
situations.append(
[situation[0] * v, situation[1] + new_field, new_field]
)
try:
new_situations = sum([x[0] for x in situations]) / sum(
[x[0] for x in current[2][(turn - 1) % 2 + 1]]
)
except ZeroDivisionError:
new_sitations = 1
current[2][(turn - 1) % 2 + 1] = situations
current[2][turn % 2 + 1] = [
[x[0] * new_situations, x[1], x[2]] for x in current[2][turn % 2 + 1]
]
print(f"The better player wins in {int(max(current[:2]))} universes.")
| derBanz/AdventOfCode | AoC2021/day21/script.py | script.py | py | 1,830 | python | en | code | 0 | github-code | 36 |
10978100631 | from gensim import corpora, models, similarities
import logging
from Preprocess.py import MyCorpus
processed_dir = 'Users/mlinegar/Data/LDA/BoW'
_num_topics = 10
dictionary = corpora.Dictionary.load(processed_dir + "firsttry.dict")
corpus = corpora.MmCorpus(processed_dir + "firsttry.mm")
tfidf = models.TfidfModel(corpus)
corpus_tfidf = tfidf(corpus)
lda = models.LdaModel(corpus_tfidf, id2word=dictionary, num_topics=_num_topics) | mlinegar/RedditScraping | Analysis/Tranformations.py | Tranformations.py | py | 435 | python | en | code | 0 | github-code | 36 |
35377030158 | from clean_doc import clean_doc
from gensim.models.doc2vec import LabeledSentence
from gensim.models import Doc2Vec
import gensim.models.doc2vec
import gensim
import multiprocessing
cores = multiprocessing.cpu_count()
assert gensim.models.doc2vec.FAST_VERSION > -1, "this will be painfully slow otherwise"
class LabeledLineSentence(object):
def __init__(self, data):
self.data = data
def __iter__(self):
for text,url in self.data:
yield LabeledSentence(words = text , tags = url)
def doc2vec(data , alpha , window , min_alpha , min_count , epoch , model_path):
#Labeled Sentences
sentences = LabeledLineSentence(data)
model = Doc2Vec(alpha = alpha , window = window , min_alpha = min_alpha , min_count = min_count , workers=cores) # use fixed learning rate
model.build_vocab(sentences)
for epoch_count in range(epoch):# Change for good performance
model.train(sentences)
model.alpha -= 0.002 # decrease the learning rate
model.min_alpha = model.alpha # fix the learning rate, no decay
# store the model to mmap-able files
model.save(model_path)
| wrat/Semantic-Relationship-Between-News | Doc2vec_model/doc2vec.py | doc2vec.py | py | 1,124 | python | en | code | 0 | github-code | 36 |
21477557993 | import sys
n, b = map(int, input().split())
arr = [list(map(int, sys.stdin.readline().split())) for _ in range(n)]
c = 1000
def matrixmult(A,B):
n = len(A)
C=[[0]*n for _ in range(n)]
for i in range(n):
for j in range(n):
for k in range(n):
C[i][j]+=A[i][k]*B[k][j]
C[i][j] = C[i][j] % c
return C
def g(a: list ,b: int):
temp = [[0]*n for _ in range(n)]
if b == 1:
for i in range(n):
for j in range(n):
a[i][j] %= c
return a
else:
matrix =g(a, b//2)
if b % 2 == 0:
temp = matrixmult(matrix, matrix)
return temp
else:
temp = matrixmult(matrixmult(matrix, matrix), a)
return temp
for ans in g(arr, b):
print(*ans)
| Minsoo-Shin/jungle | week02/10830_행렬제곱.py | 10830_행렬제곱.py | py | 820 | python | en | code | 0 | github-code | 36 |
32274499758 | #!/opt/csw/bin/python
# coding=utf-8
import re
import fileinput
from time import time
from datetime import datetime
urlRe = re.compile('(http://www\.|https://www\.|http://|https://|www\.)(?P<link>\S+)')
youtubeUrlRe = re.compile('(youtube\.com/watch\?v=|youtube\.com/watch\?.*&v=|youtu.be/)(?P<id>[A-Za-z0-9_-]{11})')
def getResponseType():
return "MSG"
def get(msg, author, folder):
urls = re.findall(urlRe, msg)
if (not urls):
return
urls = [prepareUrl(url) for url in urls if not is4chan(url)]
urls = list(set(urls))
f = open(folder + "/links.txt","r")
lines = f.readlines()
f.close()
response = []
for index, line in enumerate(lines):
if not urls:
break;
data = line.rstrip().split(" ")
found = None
for url in urls:
if (data[0] != url):
continue
count = int(data[1])
countStr = "(x" + str(count) + ")" if count > 1 else ""
nick = "<" + data[2] + ">"
firstTime = datetime.fromtimestamp(int(data[3])).strftime("%d/%m/%Y %H:%M:%S")
response.append("old!!! " + countStr + " Algselt linkis " + nick + " " + firstTime)
lines[index] = buildLine(data[0], count + 1, data[2], data[3])
found = url
if found is not None:
urls.remove(found)
f = open(folder + "/links.txt","w")
for line in lines:
f.write(line)
for url in urls:
timestamp = str(int(time()))
line = buildLine(url, 1, author, timestamp)
f.write(line)
f.close()
return response
def buildLine(url, count, nick, timestamp):
count = str(count)
return url + " " + count + " " + nick + " " + timestamp + "\n"
def is4chan(url):
return "4cdn.org" in url[1]
def prepareUrl(url):
url = url[1]
youtubeUrl = re.findall(youtubeUrlRe, url)
if (youtubeUrl):
return youtubeUrl[0][1]
if url[-1:] == "/":
url = url[:-1]
return url | sviik/marju | plugin/interceptor/old/__init__.py | __init__.py | py | 2,001 | python | en | code | 1 | github-code | 36 |
74729792745 | def print_board(board):
print(f" {board[0]} | {board[1]} | {board[2]} ")
print("---+---+---")
print(f" {board[3]} | {board[4]} | {board[5]} ")
print("---+---+---")
print(f" {board[6]} | {board[7]} | {board[8]} ")
def check_win(board, player):
win_combinations = ((0, 1, 2), (3, 4, 5), (6, 7, 8), (0, 3, 6), (1, 4, 7), (2, 5, 8), (0, 4, 8), (2, 4, 6))
for combo in win_combinations:
if board[combo[0]] == board[combo[1]] == board[combo[2]] == player:
return True
return False
def get_move(board, player):
valid_move = False
while not valid_move:
move = input(f"Player {player}, enter a move (0-8): ")
if move.isdigit() and int(move) >= 0 and int(move) <= 8 and board[int(move)] == "":
return int(move)
else:
print("Invalid move. Try again.")
def tic_tac_toe():
board = [""] * 9
players = ["X", "O"]
current_player = 0
while "" in board and not check_win(board, players[current_player]):
print_board(board)
move = get_move(board, players[current_player])
board[move] = players[current_player]
current_player = (current_player + 1) % 2
print_board(board)
if check_win(board, players[current_player]):
print(f"Player {players[current_player]} wins!")
else:
print("Tie game.")
tic_tac_toe()
| bolojutsu/tic-tac-toe | main.py | main.py | py | 1,382 | python | en | code | 0 | github-code | 36 |
70808020583 | from flask import Flask, request, jsonify
from a_entities.bank_account import BankAccount
from a_entities.customer import Customer
from b_data_access_layer.postgres_bank_account_dao import BankAccountPostgresDAO
from b_data_access_layer.postgres_customer_dao import CustomerPostgresDAO
from c_service_layer.postgres_bank_account_service import BankAccountPostgresService
from c_service_layer.postgres_customer_service import CustomerPostgresService
from c_service_layer.custom_exceptions import *
import logging
logging.basicConfig(filename="records.log", level=logging.DEBUG, format=f"%(asctime)s %(levelname)s %(message)s")
# Created the Flask object to use flask environment. Also created the DAO and the Service layer instances so that all
# of the information for both layers are available here.
app = Flask(__name__)
customer_dao = CustomerPostgresDAO()
customer_service = CustomerPostgresService(customer_dao)
bank_account_dao = BankAccountPostgresDAO()
bank_account_service = BankAccountPostgresService(bank_account_dao)
@app.post("/customer")
def create_customer():
try:
# We retrieve the request that the API sent to this server.
customer_data = request.get_json()
# We format the data so that it is read correctly by the server. The API user is passing their information to us
# so we need to give the database a way to read it.
new_customer = Customer(customer_data["firstName"], customer_data["lastName"], customer_data["customerId"])
# We pass this retrieved and formatted data into our service layer.
customer_to_return = customer_service.service_create_customer(new_customer)
# The objects crunched by the DAO and service layers are passed back to the server and turned into a dictionary.
customer_as_dictionary = customer_to_return.customer_dictionary()
# Converting the dictionary into a JSON.
customer_as_json = jsonify(customer_as_dictionary)
# Sending the jsonified dictionary to the user (Postman).
return customer_as_json
except WrongInformationException as w:
exception_dictionary = {"Message" : str(w)}
jsonify_exception = jsonify(exception_dictionary)
return jsonify_exception
@app.post("/account")
def create_bank_account():
account_data = request.get_json()
new_account = BankAccount(account_data["accountId"], account_data["customerId"], account_data["balance"])
account_to_return = bank_account_service.service_create_bank_account(new_account)
account_as_dictionary = account_to_return.bank_account_dictionary()
account_as_json = jsonify(account_as_dictionary)
return account_as_json
@app.get("/customer/<customer_id>")
def get_customer_information(customer_id: str):
# There is no body returned to the server with this verb there is only the request to send information back out to
# the API.
result = customer_service.service_get_customer_information(int(customer_id))
result_as_dictionary = result.customer_dictionary()
result_as_json = jsonify(result_as_dictionary)
return result_as_json
@app.get("/account/<account_id>")
def get_account_information(account_id: str):
account_info = bank_account_service.service_view_bank_account(int(account_id))
info_as_dictionary = account_info.bank_account_dictionary()
info_as_json = jsonify(info_as_dictionary)
return info_as_json
@app.patch("/customer/<customer_id>")
def update_customer_information(customer_id: str):
customer_data = request.get_json()
new_customer = Customer(customer_data["firstName"],
customer_data["lastName"],
int(customer_id))
customer_service.service_update_customer_information(new_customer)
return "Hooray! Customer with id {} updated successfully.".format(customer_id)
@app.patch("/account/deposit/<account_id>/<balance>")
def deposit(account_id: str, balance: str):
money_data = request.get_json()
new_balance = BankAccount(int(account_id), money_data["customerId"], money_data["balance"])
bank_account_service.service_deposit(int(balance), new_balance)
return "The balance in account {} has been updated.".format(account_id)
# Database, Postman not catching the insufficient funds exception!!!!
@app.patch("/account/withdraw/<account_id>/<balance>")
def withdraw(account_id: str, balance: str):
try:
# The request from the API comes in as string information so the account id and balance has to be converted back
# to the proper data types into the method.
# The front end is not sending us a body of information so we don't need to do the request.get_json function.
bank_account_service.service_withdraw(int(account_id), float(balance))
return "The balance in account {} has been updated.".format(account_id)
except InsufficientFundsException as i:
exception_dictionary = {"Message": str(i)}
jsonify_exception = jsonify(exception_dictionary)
return jsonify_exception
@app.patch("/account/<account_one>/<account_two>/<balance>")
def transfer_funds(account_one: str, account_two: str, balance: str):
try:
transfer_data = request.get_json()
transfer_one = BankAccount(int(account_one), transfer_data["customerId"], transfer_data["balance"])
transfer_two = BankAccount(int(account_two), transfer_data["customerId"], transfer_data["balance"])
bank_account_service.service_transfer_funds(int(balance), transfer_one, transfer_two)
return "The transfer of ${} has been completed.".format(balance)
except InsufficientFundsException as i:
exception_dictionary = {"Message" : str(i)}
jsonify_exception = jsonify(exception_dictionary)
return jsonify_exception
@app.get("/customer")
def view_all_customers():
# The front end is not sending us a body of information so we don't need to do the request.get_json function.
all_customers = customer_service.service_view_all_customers()
customers_as_dictionaries = []
for cust in all_customers:
dictionary_customers = cust.customer_dictionary()
customers_as_dictionaries.append(dictionary_customers)
return jsonify(customers_as_dictionaries)
@app.get("/account/<customer_id>")
def view_accounts_per_customer(customer_id: str):
customer_accounts = bank_account_service.service_view_accounts_per_customer(int(customer_id))
cust_accounts_as_dictionaries = []
for cust in customer_accounts:
cust_dictionary_accounts = cust.bank_account_dictionary()
cust_accounts_as_dictionaries.append(cust_dictionary_accounts)
return jsonify(cust_accounts_as_dictionaries)
@app.get("/account")
def view_all_bank_accounts():
all_accounts = bank_account_service.service_view_all_bank_accounts()
accounts_as_dictionaries = []
for account in all_accounts:
dictionary_accounts = account.bank_account_dictionary()
accounts_as_dictionaries.append(dictionary_accounts)
return jsonify(accounts_as_dictionaries)
@app.delete("/customer/<customer_id>")
def delete_customer(customer_id: str):
try:
customer_service.service_delete_customer(int(customer_id))
return "Customer with id {} has been deleted.".format(customer_id)
except DeletionErrorException as d:
exception_dictionary = {"Message" : str(d)}
jsonify_exception = jsonify(exception_dictionary)
return jsonify_exception
@app.delete("/account/<account_id>")
def delete_bank_account(account_id: str):
bank_account_service.service_delete_bank_account(int(account_id))
return "Bank account with id {} has been deleted.".format(account_id)
app.run() | bluedragonscales/project0_banking | main.py | main.py | py | 7,753 | python | en | code | 0 | github-code | 36 |
69829306344 | from torchvision import transforms, datasets
import h5py
import os
import numpy as np
import torch
from torch.utils.data.dataset import Dataset
from torch.utils.data import DataLoader
#参考:https://blog.csdn.net/shwan_ma/article/details/100012808
#https://github.com/pbizopoulos/signal2image-modules-in-deep-neural-networks-for-eeg-classification/blob/master/dataset.py
class DataFromMat(Dataset):
def __init__(self, filepath, training_test , standardize=True):
electrodes = 22 #22路脑电电极
X, y = [], []
#------------------加载所有的.mat数据------------------
for i in range(9):
A01T = h5py.File(filepath +'A0'+ str(i + 1) + 'T_slice.mat', 'r')
X1 = np.copy(A01T['image'])
X1 = X1[:, :electrodes, :]
X.append(np.asarray(X1,dtype=np.float32))
y1 = np.copy(A01T['type'])
y1 = y1[0, 0:X1.shape[0]:1] #每个对象每次试验的标签
y.append(np.asarray(y1, dtype=np.int32))
#-----------------------删除受试对象中存在空值的某次实验-------------------------
for subject in range(9):
delete_list = [] #删除列表,删除存在空值的某次实验
for trial in range(288):
if np.isnan(X[subject][trial, :, :]).sum() > 0:
delete_list.append(trial)
# print('delete_list',delete_list)
X[subject] = np.delete(X[subject], delete_list, 0)
y[subject] = np.delete(y[subject], delete_list)
y = [y[i] - np.min(y[i]) for i in range(len(y))] #9个对象的标签,转换成0,1,2,3
#把所有人的脑电信号都放在一起
signals_all = np.concatenate((X[0], X[1], X[2], X[3], X[4], X[5], X[6], X[7], X[8])) #信号
labels_all = np.concatenate((y[0], y[1], y[2], y[3], y[4], y[5], y[6], y[7], y[8])) #标签
# print('signals_all.shape',signals_all.shape)
# print('labels_all.shape',labels_all.shape)
last_training_index = int(signals_all.shape[0]*0.8)
#--------------按照0.8/0.2的比例划分训练/测试---------------
if training_test == 'train':
self.data = torch.tensor(signals_all[:last_training_index, :], dtype=torch.float)
self.labels = torch.tensor(labels_all[:last_training_index])
elif training_test == 'test':
self.data = torch.tensor(signals_all[last_training_index:, :], dtype=torch.float)
self.labels = torch.tensor(labels_all[last_training_index:])
#如果是标准化的,则减去均值,并除以方差
if standardize:
data_mean = self.data.mean(0)
data_var = np.sqrt(self.data.var(0))
self.data = (self.data -data_mean)/data_var
def __getitem__(self, idx):
data = self.data[idx]
label = self.labels[idx]
return data,label
def __len__(self):
return self.data.shape[0]
def get_data(filepath, standardize=True):
train_dataset = DataFromMat(filepath, 'train')
test_dataset = DataFromMat(filepath, 'test')
train_loaders = DataLoader(train_dataset, batch_size=64,shuffle=True, num_workers=4)
test_loaders = DataLoader(test_dataset, batch_size=64,shuffle=True, num_workers=4)
train_sizes = len(train_dataset)
test_sizes = len(test_dataset)
return train_loaders, test_loaders,train_sizes,test_sizes
if __name__ == '__main__':
filepath = "./data/"
#将一个的部分数据作为测试集
train_loader,test_loader = get_data(filepath)
for signals, labels in test_loader:
print('signals.shape',signals.shape)
print('labels.shape',labels.shape)
| im-wll/EEG-process | dataset/dataloader.py | dataloader.py | py | 3,865 | python | en | code | 1 | github-code | 36 |
16178639967 | # -*- coding: utf-8 -*-
from high2low import Changer_low as ch_low
import utils as util
#from low2high import Changer_high as ch_high
import is_horl as is_horl
txt = input("Enter Korean Sentence: ")
ch = ch_low()
#ch_high = ch_high()
hi = is_horl.isHigh()
detect=hi.isThisHigh(txt)
# 높임말
if detect ==1:
hi.getState(detect)
output = ch.processText(txt)
print("Converted Result:", output)
# 반말
else:
hi.getState(detect)
output = util.tohigh(txt)
print("Converted Result:", output)
| joowhan/Translation_Project | lab/highlow_factory/ver3_chari/src/test.py | test.py | py | 523 | python | en | code | 2 | github-code | 36 |
32153562900 | # From codereview.stackexchange.com
def partitions(set_):
if not set_:
yield []
return
for i in range(2**len(set_)//2):
parts = [set(), set()]
for item in set_:
parts[i&1].add(item)
i >>= 1
for b in partitions(parts[1]):
yield [parts[0]]+b
def get_partitions(set_):
for partition in partitions(set_):
yield [list(elt) for elt in partition]
#The above solution was creating set partitions which cannot contain duplicates, with the following conditions:
##The empty set has exactly one partition, namely emptyset .
##For any nonempty set X, P = {X} is a partition of X, called the trivial partition.
##Particularly, every singleton set {x} has exactly one partition, namely { {x} }.
##For any non-empty proper subset A of a set U, the set A together with its complement form a partition of U, namely, {A, U \ A}.
##The set { 1, 2, 3 } has these five partitions (one partition per item):
###{ {1}, {2}, {3} }, sometimes written 1|2|3.
###{ {1, 2}, {3} }, or 12|3.
###{ {1, 3}, {2} }, or 13|2.
###{ {1}, {2, 3} }, or 1|23.
###{ {1, 2, 3} }, or 123 (in contexts where there will be no confusion with the number).
##The following are not partitions of { 1, 2, 3 }:
###{ {}, {1, 3}, {2} } is not a partition (of any set) because one of its elements is the empty set.
###{ {1, 2}, {2, 3} } is not a partition (of any set) because the element 2 is contained in more than one block.
###{ {1}, {2} } is not a partition of {1, 2, 3} because none of its blocks contains 3; however, it is a partition of {1, 2}.
#this solution however creates 'list_partitions'
#allowing a list to contain duplicates.
def list_partitions(list_):
"""
Requires a sorted list from smallest to largest
else will produce redundant data
"""
sorted_list = sorted(list_)
if not sorted_list:
yield []
return
for i in range(2**len(sorted_list)//2):
parts = [list(), list()]
for item in sorted_list:
parts[i&1].append(item)
i >>= 1
for b in list_partitions(parts[1]):
yield [parts[0]]+b
def get_list_partitions(list_):
#produces redundant data but is faster
for partition in list_partitions(list_):
yield [list(elt) for elt in partition]
#takes care of redundancy but is slower
## distinct_partitions = []
## for partition in list_partitions(list_):
## sorted_partition = sorted(partition)
## if sorted_partition not in distinct_partitions:
## distinct_partitions.append(sorted_partition)
## yield [list(elt) for elt in sorted_partition]
| dawiditer/MIT6002-pset1 | ps1_partition.py | ps1_partition.py | py | 2,699 | python | en | code | 0 | github-code | 36 |
10230915470 | def binary_search(list, item):
""" Данная функция реализует бинарный поиск.
Параметры функции: A - список, item - искомое занчение.
Функция вернет номер искомого значениея (его индекс).
Функция начинает проверять среднее значение, если искомое значение больше среднего,
то значения меньше среднего откидываются и функция повторяется (и наоборот).
Если значение так и не было найдено, функция вернет "No the number".
"""
#hi
low = 0
higt = len(list) - 1
while low <= higt:
mid = round((low + higt) / 2)
gusse = list[mid]
if gusse < item:
low = mid + 1
elif gusse > item:
higt = mid - 1
else:
return mid
else:
return None
def test_binary_search(f):
print("testcase #1: ", end=" ")
A = [1, 2, 3, 4, 5]
item = 5
number_test_item = 4
f(A, item)
print("Ok" if f(A, item) == number_test_item else "Fail")
print("testcase #2: ", end=" ")
A = [1, 2, 3, 4, 5]
item = -1
number_test_item = None
f(A, item)
print("Ok" if f(A, item) == number_test_item else "Fail")
print("testcase #3: ", end=" ")
A = [1, 2, 3, 4, 5]
item = 2
number_test_item = 1
f(A, item)
print("Ok" if f(A, item) == number_test_item else "Fail")
test_binary_search(binary_search)
| Ragocha/PyWork | 123.py | 123.py | py | 1,664 | python | ru | code | 0 | github-code | 36 |
39238016680 | import cv2
import numpy as np
def get_seg_size(images, threshold=23, pmapxy=None):
seg_sizes = []
index = 0
for image in images:
"""
if index % 10 == 0:
cv2.imshow("thres", thresh_image)
cv2.waitKey(0)
"""
index += 1
if pmapxy is not None:
thresh_image = np.multiply(image, pmapxy)
seg_sizes.append(thresh_image.sum(1).sum(0))
continue
seg_sizes.append(cv2.countNonZero(thresh_image))
return seg_sizes
def get_seg_perimeter(images):
seg_perimeters = []
index = 0
for image in images:
edge_image = cv2.Canny(image, threshold1=30, threshold2=40)
"""
if index % 10 == 0:
cv2.imshow("edge", edge_image)
cv2.waitKey(0)
cv2.imshow("image", image)
cv2.waitKey(0)
"""
seg_perimeters.append(cv2.countNonZero(edge_image))
index += 1
return seg_perimeters | Alice-OSENSE/feature_err_analysis | feature_func.py | feature_func.py | py | 989 | python | en | code | 0 | github-code | 36 |
30467468047 | class Solution(object):
def findErrorNums(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
ans = []
for num in nums: #标记数组,把存在的index都标成负的
index = abs(num) - 1 #这里用abs,是因为在变负的过程中,可能没遍历到的,已经被变负了。
if nums[index] > 0:
nums[index] = -nums[index]
else:
ans.append(index+1) #如果是负的,说明yij 标记过, 说明这个是duplicate的那个
for i in range(len(nums)):
if nums[i] > 0:
ans.append(i+1) #这个为正的是Miss的那个
return ans
if __name__ == "__main__":
x = Solution()
s = [2,2]
print(x.findErrorNums(s))
| dundunmao/LeetCode2019 | 645. Set Mismatch.py | 645. Set Mismatch.py | py | 810 | python | zh | code | 0 | github-code | 36 |
18282435233 | import glob, os
#class_NEs=set(['new york','islip', ' suffolk ', 'long island'])
#class_words= set(['rain', 'raining', 'water', 'inches', 'record', 'wednesday', 'weather', 'flood', 'damage', 'storm' ])
small_NEs = set(['newtown', 'connecticut', 'sandy hook elementary school', 'Adam Lanza'])
small_words=set(['shooting', 'victims', 'gunman', 'school', 'children', 'students', 'police', 'dead','wounded', 'weapon', 'firearm', 'motive'])
files = glob.glob('../lib/ClassEvent/*.txt')
for fileName in files:
f= open(fileName)
name= os.path.basename(fileName)
f1= open('../lib/ClassEventParas/'+name, 'w')
fullText= f.read().lower()
paras= fullText.split('\n')
for para in paras:
wordsInPara= para.split()
if any(word in small_NEs for word in wordsInPara):
if any(word in small_words for word in wordsInPara):
f1.write(para)
f1.close()
f.close()
| macmania/Team-A-NLP | TeamJ/Unit8/para_cleaner.py | para_cleaner.py | py | 946 | python | en | code | 1 | github-code | 36 |
8195166573 | from PIL import Image, ImageOps, ImageDraw, ImageFont
from bot.config import PICS_DIRECTORY, QUOTATION_DATABASE
import textwrap
import sqlite3
import random
import os
import io
def get_random_quote():
conn = sqlite3.connect(QUOTATION_DATABASE)
cursor = conn.cursor()
count = cursor.execute('SELECT COUNT(*) FROM quotes;').fetchone()[0]
random_id = random.randint(1, count)
return cursor.execute('SELECT author, quote FROM quotes WHERE id = ?', (random_id, )).fetchone()[1]
def create_quote_photo():
img_quote = Image.new(mode="RGB", size=(850, 400))
img_quote = ImageOps.expand(img_quote, border=2, fill='white')
img_komaru = Image.open(os.path.join(PICS_DIRECTORY, random.choice(os.listdir(PICS_DIRECTORY))))
img_komaru = img_komaru.resize((int(img_komaru.size[0] * (350 / img_komaru.size[1])), 350))
img_quote.paste(img_komaru, (25, 25))
quote = get_random_quote()
font1 = ImageFont.truetype('times.ttf', size=20)
font2 = ImageFont.truetype('times.ttf', size=24)
draw_text = ImageDraw.Draw(img_quote)
margin = 420
offset = 25
for line in textwrap.wrap(quote, width=45):
draw_text.text((margin, offset), line, font=font1, fill="white")
offset += font1.getsize(line)[1]
author = '- Комару -'
draw_text.text((790 - font2.getsize(author)[0], 310), author, font=font2, fill="white")
byte_arr = io.BytesIO()
img_quote.save(byte_arr, format='PNG')
byte_arr.seek(0)
return byte_arr
| Ku6iKRu6Ika/quote-bot | bot/utils.py | utils.py | py | 1,504 | python | en | code | 0 | github-code | 36 |
21247224214 | __author__ = 'yuerzx'
import csv
import pymongo
from pymongo import MongoClient
data_client = MongoClient()
data_base = data_client.Locations
#add authenticate for the MongoDB
data_base.authenticate('EZYProperty', '8jshf7asd')
super_c = data_base.supermarket
counter = 0
err_counter = 0
with open("/home/yuerzx/Desktop/woolworth_geo.csv", 'r', newline = '') as market_list:
reader = csv.reader(market_list, delimiter = ',', quoting = csv.QUOTE_MINIMAL)
next(reader)
for row in reader:
data = { "loc" :
{ "type": "Point", "coordinates": [ float(row[8]), float(row[7]) ] },
"S_Type" : row[0],
"S_Id" : row[1],
"S_Name" : row[2],
"Suburb" : row[3],
"State" : row[4],
"PCode" : row[5],
"Phone" : row[6],
"F_Address": row[9],
}
results = super_c.insert(data)
if results:
counter += 1
print("Done with %s"%row[2])
else:
err_counter += 1
print("Error")
print(results)
print("Total result is %d with %d errors"%(err_counter+counter, err_counter)) | yuerzx/python_information | supermarket_location/import_into_mongodb.py | import_into_mongodb.py | py | 1,237 | python | en | code | 0 | github-code | 36 |
74753775465 | from prophepy import Mock
from .builtin_matchers import get_matcher
from .exceptions import CustomMatcherError
from .utils import map_for_dict, reveal_if_needed
class Subject:
'''
This class represents the specced object.
'''
def __init__(self, value, object_behavior):
'''
It is instanciated with the real object, and the spec
'''
self.__value = value
self.__object_behavior = object_behavior
def _get_value(self):
'''
Get the real specced object
'''
return self.__value
def match_with_custom_matcher(self, matcher_name, matcher, *args):
'''
Launch a test against a custom matcher and raise a CustomMatcherError
if it fails
'''
if not matcher(self.__value, *args):
raise CustomMatcherError(f'Custom matcher "{matcher_name}" failed.')
return self.__value
def __getattr__(self, attr_name):
'''
If the method is a _should_ one, it will try to find a matcher
(builtin or custom one). If not, it will executes the action
on the internal specced object and return a new Subject instance.
'''
if attr_name.startswith('_should_'):
matcher_type = attr_name[len('_should_'):]
# custom matcher
if matcher_type in self.__object_behavior._matchers().keys():
matcher = self.__object_behavior._matchers()[matcher_type]
def custom_matcher_wrapper(*args):
return Subject(
self.match_with_custom_matcher(matcher_type, matcher, *args),
self.__object_behavior
)
return custom_matcher_wrapper
# builtin matcher
matcher = get_matcher(matcher_type)
def checker_wrapper(expected_value):
matcher(self.__value, expected_value)
return Subject(
self.__value,
self.__object_behavior
)
return checker_wrapper
def action_wrapper(*args, **kwargs):
args = map(reveal_if_needed, args)
kwargs = map_for_dict(reveal_if_needed, kwargs)
return Subject(
getattr(self.__value, attr_name)(*args, **kwargs),
self.__object_behavior
)
return action_wrapper
| Einenlum/specify | specify/subject.py | subject.py | py | 2,436 | python | en | code | 0 | github-code | 36 |
15648874127 | from __future__ import annotations
from copy import deepcopy
from math import prod
class Dot(object):
x: float
y: float
def __init__(self, x: float, y: float):
self.x, self.y = x, y
def __str__(self):
return "(" + str(self.x) + ";" + str(self.y) + ")"
class Polynomial(object):
terms: list[callable[float]:float]
def __init__(self, terms: list[callable[float]:float]):
self.terms = terms
def __call__(self, arg: float) -> float:
return sum([term(arg) for term in self.terms])
class NewtonPolynomial(Polynomial):
@staticmethod
def build(points: list[Dot], arg: Dot, n: int) -> NewtonPolynomial:
table = NewtonPolynomial._make_table(points, arg, n)
return NewtonPolynomial(
[lambda x:table[1][0]] +
[NewtonPolynomial._term(table[i][0], table[0][:i - 1])
for i in range(2, len(table))]
)
@staticmethod
def _term(va: float, vl: list[float]) -> callable:
return lambda x: va * prod(map(lambda a: (x - a), vl))
@staticmethod
def _make_table(points: list[Dot], arg: Dot, n: int) -> list[list[Dot]]:
base = sorted(
sorted(points, key=lambda p: abs(p.x - arg.x))[:n + 1],
key=lambda p: p.x
)
t = [[None for i in range(len(base))] for j in range(n + 2)]
for i in range(len(t[0])):
t[0][i], t[1][i] = base[i].x, base[i].y
for i in range(2, len(t)):
for j in range(len(base) - i + 1):
t[i][j] = (t[i - 1][j] - t[i - 1][j + 1]) / \
(t[0][j] - t[0][j + i - 1])
return t
class BiNewtonPolynomial(Polynomial):
__second_interp_set: list
__ny: float
def __init__(self, temp: list, ny: int):
self.__second_interp_set = temp
self.__ny = ny
@staticmethod
def build(dots: list[list[Dot]], arg: Dot, nx: int, ny: int) -> BiNewtonPolynomial:
points = deepcopy(dots)
xrow, ycol, matrix = BiNewtonPolynomial.__split_data(points)
baseX, baseY = BiNewtonPolynomial.__get_bases(xrow, ycol, arg, nx, ny)
t = [[None for i in range(nx + 1)] for j in range(ny + 1)]
k = 0
for i in range(xrow.index(baseX[0]), xrow.index(baseX[0]) + nx + 1):
l = 0
for j in range(ycol.index(baseY[0]), ycol.index(baseY[0]) + ny + 1):
t[l][k] = matrix[i][j]
l += 1
k += 1
second_set = []
for i in range(len(t)):
for j in range(len(baseX)):
t[i][j].y = baseX[j].x
t[i][j].x, t[i][j].y = t[i][j].y, t[i][j].x
second_set += [
Dot(
baseY[i].x,
NewtonPolynomial.build(t[i], Dot(arg.y, arg.x), nx)
)
]
return BiNewtonPolynomial(
second_set,
ny
)
@staticmethod
def __split_data(matrix: list[list[Dot]]) -> tuple[list[Dot], list[Dot], list[list[Dot]]]:
xrow = matrix[0]
matrix = matrix[1:]
ycol = [row[0] for row in matrix]
matrix = [row[1:] for row in matrix]
return xrow, ycol, matrix
@staticmethod
def __get_bases(x: list[Dot], y: list[Dot], arg: Dot, nx: int, ny: int):
baseX = sorted(
sorted(x, key=lambda p: abs(p.x - arg.x))[:nx + 1],
key=lambda p: p.x
)
baseY = sorted(
sorted(y, key=lambda p: abs(p.x - arg.y))[:ny + 1],
key=lambda p: p.x
)
return baseX, baseY
def __call__(self, arg: Dot) -> float:
t = [Dot(i.x, i.y(arg.y)) for i in self.__second_interp_set]
return NewtonPolynomial.build(t, arg, self.__ny)(arg.x)
| migregal/bmstu-iu7-ca | lab_02/src/polynomial.py | polynomial.py | py | 3,795 | python | en | code | 0 | github-code | 36 |
10346160454 | import pandas as pd
from pulp import *
import itertools
df = pd.read_excel('beers.xlsx')
costs = df.\
assign(Warehouse_to_Bar = lambda x: x.Warehouse_to_Bar.astype(str)).\
set_index('Warehouse_to_Bar').\
to_dict()
warehouses = ['A','B']
bars = [str(x) for x in range(1,6)]
supply_ = [1000,4000]
demand_ = [500,900,1800,200,700]
supply = dict(zip(warehouses,supply_))
demand = dict(zip(bars, demand_))
routes = list(itertools.product(warehouses,bars))
'''
dv : how many beers for every warehouse - bar pair
obj : minimize dv * cost
1) supply : sum of beers leaving a warehouse <= supply
2) demand : sum of beers reaching a bar == demand
'''
# instantiate minimization problem
prob = LpProblem('Beer_Transportation', LpMinimize)
# define decision variables
decision_vars = LpVariable.dicts('routes',(warehouses,bars),0,None,LpInteger)
# define objective function
prob += lpSum([decision_vars[w][b] * costs[w][b] for w,b in routes])
# define supply constrains
for w in warehouses:
prob += lpSum([decision_vars[w][b] for b in bars]) <= supply[w]
# define demand constrains
for b in bars:
prob += lpSum([decision_vars[w][b] for w in warehouses]) >= demand[b]
# solve problem
prob.solve()
# The status of the solution is printed to the screen
print("Status:", LpStatus[prob.status])
# print out solution
for w in warehouses:
for b in bars:
if value(decision_vars[w][b]):
print(f'from {w} to {b} - {value(decision_vars[w][b])}')
| Takfes/notes-operations-research | src/A.misc/03.pulp-transportation/pulp-transportation.py | pulp-transportation.py | py | 1,493 | python | en | code | 0 | github-code | 36 |
42682908850 | import eventlet
from Skull import ServerSkull
from Ezgame import Ezgame
from Vote import Vote
class Player:
def __init__(self, name, sid):
self.name = name
self.sid = sid
self.connected = True
class Room:
def __init__(self, sio):
self.sio = sio
self.ev = "room"
self.playing = []
self.watching = []
self.game = None
self.current_vote = None
sio.on('connect', self.on_connect)
sio.on('disconnect', self.on_disconnect)
sio.on('message', self.io_handler)
self.proposed_game = None
self.game_dict = {}
self.game_dict["skull"] = ServerSkull
self.game_dict["nines"] = Ezgame
def on_connect(self, sid, environ):
print('connect ', sid)
self.sio.emit(self.ev, ("display", self.get_emit_data()), room=sid);
print("emit data sent")
def on_disconnect(self, sid):
print('disconnect ', sid)
for pp in self.playing:
if pp.sid == sid:
pp.connected = False
for i in reversed(range(len(self.watching))):
if self.watching[i].sid == sid:
self.watching.pop(i)
def io_handler(self, sid, data):
print("room::io_handler: ", data)
if "room" in data:
rd = data["room"]
if "name" in rd:
self.set_name(sid, rd["name"])
return
if rd == "play":
self.ready(sid)
return
if "kwalexadmin" in rd:
self.start_game(rd["kwalexadmin"])
if "ping" in rd:
self.notify_main("pong", sid)
if rd == "watch":
self.watch(sid)
if "poll" in rd:
self.poll(rd["poll"], sid)
if "gamevote" in rd:
self.gamevote(rd["gamevote"],sid)
# if "callvote" in rd:
# if self.find_player(sid=sid) != None:
# cd = rd["callvote"]
# if "type" not in cd or "text" not in cd:
# self.notify_main("garbage callvote")
# return
# self.callvote(cd["type"], cd["text"])
# else:
# self.notify_main(sid, "you aren't playing yet")
if "vote" in rd:
self.vote(sid, rd["vote"])
# vote_type = cd["type"]
# vote_text = cd["text"]
elif self.game_ev in data:
self.game.io_handler(sid, data)
# eventlet.spawn_after(3, self.notify_main, sid, "pong2")
def find_player(self, sid=None, name=None):
l = self.playing
if sid != None:
m = [p for p in l if p.sid == sid]
elif name != None:
m = [p for p in l if p.name == name]
else:
m = []
if len(m) > 0:
return m[0]
else:
return None
def poll(self, text=None, sid=None):
# self.notify_side(msg="poll: {}".format(vote_text))
if text == None:
self.notify_main("garbage poll", sid)
return
if self.current_vote != None:
self.notify_main("There's already a vote happening", sid)
return
if self.find_player(sid=sid) == None:
self.notify_main("You aren't even playing yet", sid)
return
st = str(text)
plist = [p.name for p in self.playing]
self.current_vote = Vote(plist, "poll", st, self.poll_result, 24)
self.send_vote_data()
def gamevote(self, g, sid=None):
gs = str(g)
print("gamevote", gs)
if self.current_vote != None:
self.notify_main("There's already a vote happening", sid)
return
if gs not in self.game_dict:
self.notify_main("what game are *you* trying to play?", sid)
return
if self.find_player(sid=sid) == None:
self.notify_main("You aren't even playing yet", sid)
return
self.proposed_game = gs
plist = [p.name for p in self.playing]
self.current_vote = Vote(plist, "game", "Vote to play: {}".format(gs), self.play_result, 24)
self.send_vote_data()
# def callvote(self, vote_type, vote_text):
# if self.current_vote == None:
# n_players = len(self.playing)
# plist = [p.name for p in self.playing]
# if vote_type == "poll":
# # self.notify_side(msg="poll: {}".format(vote_text))
# self.current_vote = Vote(plist, vote_type, vote_text, self.poll_result, 24)
# self.send_vote_data()
# if vote_type == "play":
# self.proposed_game = vote_text
# self.current_vote = Vote(plist, vote_type, "Vote to play: {}".format(vote_text), self.play_result, 24)
# self.send_vote_data()
# else:
# self.notify_main("There's already a vote in progress.")
def vote(self, sid, v):
if self.current_vote != None:
if v == "yes":
tf = True
elif v == "no":
tf = False
else:
print("bad vote: ", sid, v)
self.notify_main("bad vote...", sid)
return
fp = self.find_player(sid=sid)
if fp == None:
self.notify_main("you aren't playing", sid)
return
r = self.current_vote.vote(fp.name, tf)
if r == False:
self.notify_main("you can't vote", sid)
self.send_vote_data()
def send_vote_data(self):
if self.current_vote == None:
return
dd = self.current_vote.data()
self.sio.emit(self.ev, ("voteData", dd))
def poll_result(self, res):
self.send_vote_data()
self.current_vote = None
print("poll_result: ", res)
def play_result(self, res):
self.send_vote_data()
self.current_vote = None
if res == True:
self.start_game()
def set_name(self, sid, longname):
ls = "{}".format(longname)
name = longname[0:8]
# print("set name: ", name)
# watching_names = [cp.name for cp in self.watching]
# watching_sockets = [cp.sid for cp in self.watching]
# playing_names = [cp.name for cp in self.playing]
# playing_sockets = [cp.sid for cp in self.playing]
#
# match_watching = [cp for cp in self.watching if cp.sid == sid]
# match_playing = [cp for cp in self.playing if cp.sid == sid]
mws = [cp for cp in self.watching if cp.sid == sid]
mps = [cp for cp in self.playing if cp.sid == sid]
mwn = [cp for cp in self.watching if cp.name == name]
mpn = [cp for cp in self.playing if cp.name == name]
matched_watcher = mws[0] if len(mws) > 0 else None
matched_player = mps[0] if len(mps) > 0 else None
if len(mwn) > 0:
self.notify_main("someone already got that one", sid)
return
if matched_player:
self.notify_main("can't change it now", sid)
return
elif len(mpn) > 0:
if mpn[0].connected == False:
matched_player = mpn[0]
else:
self.notify_main("name taken you idiots", sid)
return
if not matched_player and not matched_watcher:
self.notify_main("logging in as: {}".format(name), sid)
self.watching.append(Player(name, sid))
if self.game:
self.emit_start_game(sid)
elif not matched_player and matched_watcher:
self.notify_main("name changed to: {}".format(name), sid)
matched_watcher.name = name
elif matched_player and not matched_watcher:
matched_player.sid = sid
matched_player.connected = True
self.notify_main("logging back in as: {}".format(name), sid)
self.emit_display()
def ready(self, sid):
print("ready: ", sid)
if self.find_player(sid=sid) != None:
if self.game:
self.emit_start_game(sid)
return
for i in reversed(range(len(self.watching))):
if self.watching[i].sid == sid:
if self.game:
self.notifyMain(sid, "Wait for the current game to end.")
return
else:
# console.log("ready: ", id, self.watching.length)
self.playing.append(self.watching[i])
self.watching.pop(i)
self.emit_display()
def watch(self, sid):
for i in reversed(range(len(self.playing))):
if self.playing[i].sid == sid:
cp = self.playing[i]
self.playing.pop(i)
self.watching.append(cp)
self.emit_display()
def start_game(self, gamename=None):
self.current_vote = None
gamename = gamename if gamename else self.proposed_game
if gamename == "skull":
self.game_ev = gamename
self.game = ServerSkull(self);
elif gamename == "nines":
self.game_ev = gamename
self.game = Ezgame(self);
if self.game:
for cp in self.playing:
self.emit_start_game(cp.sid)
self.game.start()
self.emit_display()
def emit_start_game(self, sid):
gamename = self.game_ev
iData = self.game.initialData(sid)
fData = ["startGame", gamename]
fData.extend(iData)
self.sio.emit(self.ev, tuple(fData), room=sid)
def end_game(self):
self.game = None;
self.sio.emit(self.ev, "endGame");
def notify_main(self, msg, sid=None):
self.sio.emit(self.ev, ("notifyMain", msg), room=sid)
# print("notify_main: ", msg)
def notify_vote(self, msg, sid=None):
self.sio.emit(self.ev, ("notifyVote", msg), room=sid)
def notify_side(self, sid=None, msg=None):
self.sio.emit(self.ev, ("notifySide", msg),room=sid)
def get_emit_data(self):
output = {"playing":[cp.name for cp in self.playing],
"watching":[cp.name for cp in self.watching],
"game":self.game.ev if self.game else None}
return output
def emit_display(self):
self.sio.emit(self.ev, ("display", self.get_emit_data()))
def emit_player(self, player, d):
self.sio.emit(self.game_ev, d, room=player.sid)
def emit_game(self, d):
self.sio.emit(self.game_ev, d)
| marvqin/zoomhanabi | Room.py | Room.py | py | 10,755 | python | en | code | 0 | github-code | 36 |
8882346001 | import simplejson as json
from datetime import datetime
DEBUG = False
# Zigbee catch-all decoder, just adds the following properties:
# Only changes topic:
# csn-zigbee/acp_id -> acp/acp_id/csn-zigbee
class Decoder(object):
def __init__(self, settings=None):
print(" zigbee_catchall init()")
return
def test(self, topic, message_bytes):
if DEBUG:
print("zigbee_catchall test() {} {}".format(topic, message_bytes))
#regular topic format:
#cambridge-sensor-network/devices/zigbee_catchall-test-3/up
if ("csn-zigbee" in topic): #check if application name appears in the topic
if DEBUG:
print("zigbee_catchall test() success")
return True
#elif ("dev_id" in msg): #dev_id for example, can be any other key
# msg=json.loads(message.payload)
# if (decoder_name in msg["dev_id"]):
# return True
# #elif...
# else:
# return False
if DEBUG:
print("zigbee_catchall test() fail")
return False
def decode(self, topic, message_bytes):
inc_msg = str(message_bytes,'utf-8')
if DEBUG:
print("zigbee_catchall decode str {}".format(inc_msg))
# Zigbee topic is "csn-zigbee/<acp_id>[/<other stuff>]"
topic_parts = topic.split('/',2) # split into max 4 topic_parts
output_topic = "acp/"+topic_parts[1]+"/"+topic_parts[0]
if len(topic_parts) > 2:
output_topic += "/" + topic_parts[2]
# For this version of the decoder the original message from
# deconz2acp will be published unchanged.
msg_dict = json.loads(message_bytes)
return msg_dict
# end zigbee_catchall
| AdaptiveCity/acp_local_mqtt | acp_decoders/decoders/zigbee_catchall.py | zigbee_catchall.py | py | 1,788 | python | en | code | 1 | github-code | 36 |
73819276585 | from transformers import BertForTokenClassification, BertJapaneseTokenizer, get_linear_schedule_with_warmup
from flask import Flask, render_template, request
import argparse
import json
from predict import predict
import torch
import sys
from pathlib import Path
base_dir = Path(__file__).resolve().parents[1]
sys.path.append(str(base_dir))
from utils import iob2json
device = 'cuda' if torch.cuda.is_available() else 'cpu'
app = Flask(__name__)
@app.route("/", methods=['POST'])
def index():
word = request.form["text"]
input_json = json.loads(word)
input_x = [input_json[str(i)] for i in range(len(input_json))]
input_x = [tokenizer.tokenize(t) for t in input_x]
input_x = [tokenizer.convert_tokens_to_ids(['[CLS]'] + x) for x in input_x]
tags = predict(model, input_x, device)
labels = [[id2label[t] for t in tag] for tag in tags]
input_x = [tokenizer.convert_ids_to_tokens(t)[1:] for t in input_x]
output = [zip(x, l) for x, l in zip(input_x, labels)]
output = [iob2json.decode_iob(i) for i in output]
return iob2json.create_json(output)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Train BERT')
parser.add_argument('--model_dir', type=str, help='data path')
args = parser.parse_args()
tokenizer = BertJapaneseTokenizer.from_pretrained("bert-base-japanese-char")
with open(args.model_dir + '/label_vocab.json', 'r') as f:
label_vocab = json.load(f)
id2label = {v:k for k, v in label_vocab.items()}
model = BertForTokenClassification.from_pretrained('bert-base-japanese-char', num_labels=len(label_vocab))
model_path = args.model_dir + '/final.model'
model.load_state_dict(torch.load(model_path))
app.run(port='8000', host='0.0.0.0', debug=True)
| ujiuji1259/NER | BERT/api.py | api.py | py | 1,783 | python | en | code | 0 | github-code | 36 |
30800987858 | print('1')
#from src.__init__ import main
from flask import Flask, request
from flask_restx import Api, Namespace, fields, Resource
## local
from settings import model_path, vocab_path, cnn_path
from torch import nn
# from src.controller.analyzeController import Analyze
from src.controller.keywordController import Keyword
from src.controller.testController import Sample
from src.controller.divideHighlightController import Divide_Highlight
if __name__ == "__main__" :
from load_models import koBERT_CNN_Classifier
prediction = koBERT_CNN_Classifier(model_path=model_path, vocab_path=vocab_path, cnn_path=cnn_path)
# app.run(debug=True, host='0.0.0.0')
class Classifier(nn.Module):
def __init__(self,
hidden_size=768,
num_classes=8,
dr_rate=0.0):
super(Classifier, self).__init__()
# 16, 2848
# 32, 5696
# 1312
self.kernel_num = 16
self.conv1d_maxpooling1 = nn.Sequential(
nn.Conv1d(hidden_size, self.kernel_num, 4, stride=2),
nn.ReLU(),
nn.MaxPool1d(2, 1),
nn.Dropout(dr_rate)
)
self.conv1d_maxpooling2 = nn.Sequential(
nn.Conv1d(hidden_size, self.kernel_num, 8, stride=2),
nn.ReLU(),
nn.MaxPool1d(2, 1),
nn.Dropout(dr_rate)
)
self.conv1d_maxpooling3 = nn.Sequential(
nn.Conv1d(hidden_size, self.kernel_num, 16, stride=2),
nn.ReLU(),
nn.MaxPool1d(2, 1),
nn.Dropout(dr_rate)
)
self.classifier = nn.Linear(1312, num_classes)
def forward(self, x) :
out1 = self.conv1d_maxpooling1(x.transpose(1, 2))
out2 = self.conv1d_maxpooling2(x.transpose(1, 2))
out3 = self.conv1d_maxpooling3(x.transpose(1, 2))
out = torch.cat((out1, out2, out3), 2)
out = out.reshape(out.size(0), -1)
return self.classifier(out)
#from load_models import koBERT_CNN_Classifier
from settings import model_path, cnn_path, vocab_path
from torch import nn
import torch
from src.preprocessor.textPreprocessor import textPreprocessor
print('2')
app = Flask(__name__)
api = Api(
app,
version='0.1',
title="PS HELPER API Server",
description="PS HELPER API 문서입니다.",
terms_url="/",
contact_url="donghoon149@gmail.com / hmcck27@gmail.com",
license="MIT"
)
Analyze = Namespace(
name="Analyze Algorithm",
description='문제 지문을 받고 적절한 <strong>알고리즘 태그</strong>를 반환합니다.',
)
api.add_namespace(Divide_Highlight, '/api/v1/divide_highlight')
api.add_namespace(Keyword, '/api/v1/keyword')
api.add_namespace(Analyze, '/api/v1/analyze')
api.add_namespace(Sample, '/api/v1/test')
# Model 객체 생성
analyze_fields = Analyze.model('Problem', {
'problem_id': fields.Integer(description='문제 번호', required=True, example="1007"),
'content': fields.String(description='문제 지문', required=True,
example="평면 상에 N개의 점이 찍혀있고, 그 점을 집합 P라고 하자. 하지만 집합 P의 벡터 매칭은 벡터의 집합인데, 모든 벡터는 집합 P의 한 점에서 시작해서, 또 다른 점에서 끝나는 벡터의 집합이다. 또, P에 속하는 모든 점은 한 번씩 쓰여야 한다.V에 있는 벡터의 개수는 P에 있는 점의 절반이다.평면 상의 점이 주어졌을 때, 집합 P의 벡터 매칭에 있는 벡터의 합의 길이의 최솟값을 출력하는 프로그램을 작성하시오."),
'input': fields.String(description='문제 입력사항', required=False,
example="첫째 줄에 테스트 케이스의 개수 T가 주어진다. 각 테스트 케이스는 다음과 같이 구성되어있다. 테스트 케이스의 첫째 줄에 점의 개수 N이 주어진다. N은 짝수이다. 둘째 줄부터 N개의 줄에 점의 좌표가 주어진다. N은 20보다 작거나 같은 자연수이고, 좌표는 절댓값이 100,000보다 작거나 같은 정수다. 모든 점은 서로 다르다."),
})
algorithm_fields = fields.Wildcard(fields.String)
analyze_response = Analyze.model('Problem_response', {
'problem_id': fields.String(description='문제 번호', required=True, example="1007"),
'problem_url': fields.String(description="문제 url", required=True, example="www.psHelper.de"),
'algorithm_type': algorithm_fields
})
''' test '''
print('sdfsdfsdfsdf')
@Analyze.route('')
class AnalyzeController(Resource):
@Analyze.expect(analyze_fields)
@Analyze.response(201, "Success", analyze_response)
def post(self):
content = request.json.get('content')
text_preprocessor = textPreprocessor()
'''
TO-DO
0. preprocess text
1. analyze the description
'''
preprocessed_text = text_preprocessor.preprocessing(content)
# tag = TagAnalyzer.findTag(preprocessed_text)
tag,ratio = prediction.predict(preprocessed_text)
# print(content)
return {
'problem_id': request.json.get('problem_id'),
'problem_url': "https://www.acmicpc.net/problem/" + str(request.json.get('problem_id')),
'algorithm_type' : tag,
'algorithm_ratio' : ratio
}, 201
print('sdfsdfwerwer')
# class Classifier(nn.Module):
# def __init__(self,
# hidden_size=768,
# num_classes=8,
# dr_rate=0.0):
# super(Classifier, self).__init__()
# # 16, 2848
# # 32, 5696
# # 1312
# self.kernel_num = 16
# self.conv1d_maxpooling1 = nn.Sequential(
# nn.Conv1d(hidden_size, self.kernel_num, 4, stride=2),
# nn.ReLU(),
# nn.MaxPool1d(2, 1),
# nn.Dropout(dr_rate)
# )
# self.conv1d_maxpooling2 = nn.Sequential(
# nn.Conv1d(hidden_size, self.kernel_num, 8, stride=2),
# nn.ReLU(),
# nn.MaxPool1d(2, 1),
# nn.Dropout(dr_rate)
# )
# self.conv1d_maxpooling3 = nn.Sequential(
# nn.Conv1d(hidden_size, self.kernel_num, 16, stride=2),
# nn.ReLU(),
# nn.MaxPool1d(2, 1),
# nn.Dropout(dr_rate)
# )
#
# self.classifier = nn.Linear(1312, num_classes)
#
# def forward(self, x) :
# out1 = self.conv1d_maxpooling1(x.transpose(1, 2))
# out2 = self.conv1d_maxpooling2(x.transpose(1, 2))
# out3 = self.conv1d_maxpooling3(x.transpose(1, 2))
# out = torch.cat((out1, out2, out3), 2)
# out = out.reshape(out.size(0), -1)
# return self.classifier(out)
#if __name__ == "__main__":
# app.run(debug=True, host='0.0.0.0')
| hmcck27/pshelper-server | src/app_for_server.py | app_for_server.py | py | 6,957 | python | ko | code | null | github-code | 36 |
29391322952 | from collections import defaultdict
class Solution:
def longestStrChain(self, words: List[str]) -> int:
n = len(words)
words.sort(key=lambda word: len(word))
graph = defaultdict(set)
for i, word in enumerate(words):
for j in range(len(word)):
graph[word[:j] + word[j + 1:]].add(i)
dists = [1] * n
res = 1
for u in range(n):
for v in graph[words[u]]:
dists[v] = max(dists[v], dists[u] + 1)
res = max(res, dists[v])
return res
| AnotherPianist/LeetCode | 1129-longest-string-chain/1129-longest-string-chain.py | 1129-longest-string-chain.py | py | 587 | python | en | code | 1 | github-code | 36 |
21254500663 | import json
import boto3
from datetime import datetime
current_date_time = datetime.now()
sqs = boto3.resource('sqs', region_name='us-east-1')
def lambda_handler(event, context):
queue = sqs.get_queue_by_name (QueueName='CustomerOrders')
date_time = current_date_time.strftime("%d/%m/%Y %H:%M:%S")
message = ("The current date and time at point of trigger was " + str(date_time) + ".")
response = queue.send_message (MessageBody=message)
return {
'statusCode': 200,
'body': json.dumps(message)
} | tmachek98/python-boto3 | Lambda.py | Lambda.py | py | 563 | python | en | code | 0 | github-code | 36 |
32630908689 | import os
import subprocess
path = os.getcwd()
# train the new model
qu_train_data = path + "/data/quarterly.csv"
qu_model = "linear_quarterly"
exec_cmd1 = "python3 " + path + "/train_linear.py " + qu_train_data + " ./data/test.csv " + qu_model
exec_cmd2 = "python3 " + path + "/train_log.py " + qu_train_data + " ./data/test.csv " + qu_model
subprocess.call([exec_cmd1, exec_cmd2], shell=True) | chivesab/Vending-Machine-Management-System | ML/cronjob_quarterly.py | cronjob_quarterly.py | py | 396 | python | en | code | 0 | github-code | 36 |
36266836523 | # Create a class Car with attributes Model, year, and price
# and a method cost() for displaying price.
# Create two instances of the class and call the method for each instance
class car:
def __init__(self,mod,yr,prz):
self.Model=mod
self.year=yr
self.price=prz
def cost(self):
print("cost: ",self.price)
c1=car("jk4302",2022,10000000)
c2=car("san",2021,5000000)
c1.cost()
c2.cost() | JK432/Car-class | main.py | main.py | py | 409 | python | en | code | 0 | github-code | 36 |
2856409659 | from matplotlib import pyplot as plt
# 题目:某月气温变化表
# 知识点总结:
# 1.x,y的输入
# 2.绘制 plt.plot()
# 3.设置图片大小 plt.figure()
# 4,保存到本地 plt.savefig()
# 5.描述信息 xlabel() ylable() title()
# 6.调整x,y的间距 xticks(),yticks()
# 7.线条的样式 linestyle=
# 8.标记出特殊的点
# 9.添加水印
# 设置图片大小
plt.figure(figsize=(16, 8), dpi=80)
# 左闭右开
x = range(2, 26, 2)
y = [15, 13, 14.5, 17, 20, 25, 26, 26, 24, 22, 18, 15]
# 传入x,y通过plot绘制出折线图
plt.plot(x, y)
# 设置x,y轴的刻度
# plt.xticks(x)
# plt.xticks(range(2, 25))
num = []
for i in range(2, 25):
num.append(i)
num.append(i + 0.5)
# plt.xticks(num[::3])
num = [i / 2 for i in range(4, 50)]
plt.xticks(num)
plt.yticks(range(min(y), max(y) + 1))
# 保存
# plt.savefig("./SaveDate/p1.png")
# 输出
plt.show()
# 关于range:
# 在 Python 3 中,range() 函数返回的对象并不是列表,而是一个类型为 range 的可迭代对象(iterable),
# 它按需生成指定范围内的整数。这种方式称为“惰性求值”(lazy evaluation),它可以节省空间和时间。
# 因为 range 对象并不是列表,所以它并不占用与列表相同的内存空间。
# 相反,它只需要存储 start、stop 和 step 这三个参数,以及根据这些参数生成整数序列的算法。
# 当使用 range 对象时,Python 会根据需要逐个生成序列中的元素,而不是一次性生成所有元素并存储在内存中。
# 如果您需要将 range 对象转换为列表,可以使用 list() 函数将其转换为列表
| srguf/DataAnalysis | matplotlib/折线图plot/单线/折线图.py | 折线图.py | py | 1,705 | python | zh | code | 0 | github-code | 36 |
34796334569 | from django.urls import resolve, reverse
from .. import views
from .test_recipes_base import RecipesTestBase
class RecipesSearchViewTest(RecipesTestBase):
def test_search_view_function_is_correct(self):
view = resolve(reverse('recipes:search'))
self.assertIs(view.func, views.search)
def test_search_loads_correct_template(self):
response = self.client.get(reverse('recipes:search') + '?q=test')
self.assertTemplateUsed(response, 'recipes/pages/search.html')
def test_search_raises_404_if_no_search_term(self):
response = self.client.get(reverse('recipes:search'))
self.assertEqual(response.status_code, 404)
def test_search_term_is_on_page_title_and_escaped(self):
response = self.client.get(reverse('recipes:search') + '?q=<test>')
self.assertIn('Search for "<test>"',
response.content.decode('utf-8'))
def test_search_can_find_by_title(self):
title1 = 'This is recipe one'
title2 = 'This is recipe two'
recipe1 = self.create_recipe(
slug='one', title=title1, author={'username': 'one'})
recipe2 = self.create_recipe(
slug='two', title=title2, author={'username': 'two'})
search_url = reverse('recipes:search')
response1 = self.client.get(f'{search_url}?q={title1}')
response2 = self.client.get(f'{search_url}?q={title2}')
response3 = self.client.get(f'{search_url}?q=this')
self.assertIn(recipe1, response1.context['recipes'])
self.assertNotIn(recipe2, response1.context['recipes'])
self.assertIn(recipe2, response2.context['recipes'])
self.assertNotIn(recipe1, response2.context['recipes'])
self.assertIn(recipe1, response3.context['recipes'])
self.assertIn(recipe2, response3.context['recipes'])
| giovcandido/django-course-project1 | recipes/tests/test_recipes_search_view.py | test_recipes_search_view.py | py | 1,872 | python | en | code | 0 | github-code | 36 |
74644166183 | # Updated code for part 1 in order to get part 2
# and didn't save copy. Only have Part II code
with open('d8p1.txt', 'r') as f:
data = [p.split() for p in f.read().strip().split('\n')]
accumulator = 0
visited = {}
start = 0
next_code = 1
stop_code = False
last_index = len(data) - 1
occurrances = 0
while not stop_code:
if start == last_index:
stop_code = True
instruction, value = data[start][0], int(data[start][1])
if instruction in ["jmp", "nop"]:
occurrances += 1
if instruction == 'jmp' and occurrances == next_code:
instruction = 'nop'
elif instruction == "nop" and occurrances == next_code:
instruction = 'jmp'
if start in visited:
visited = {}
next_code += 1
accumulator = 0
start = 0
occurrances = 0
else:
visited[start] = 1
if instruction == 'acc':
start += 1
accumulator += value
elif instruction == 'jmp':
start += value
else:
start += 1
print(f'value of accumulator: {accumulator}')
| pdotneff/AdventOfCode | aoc_2020/day8/day8_part2.py | day8_part2.py | py | 1,134 | python | en | code | 0 | github-code | 36 |
1783613538 | import sqlite3
import time
from telethon import TelegramClient
from telethon import sync, events
import re
import json
db = sqlite3.connect('Account.db')
cur = db.cursor()
x = 1
m = 0
while(True):
if x == 23:
print("Всего добыто:")
print(m)
break
cur.execute(f"SELECT PHONE FROM Account WHERE ID = '{x}'")
time.sleep(0.4)
Phone = str(cur.fetchone()[0])
print("Входим в аккаунт: " + Phone)
cur.execute(f"SELECT API_ID FROM Account WHERE ID = '{x}'")
time.sleep(0.4)
api_id = str(cur.fetchone()[0])
cur.execute(f"SELECT API_HASH FROM Account WHERE ID = '{x}'")
time.sleep(0.4)
api_hash = str(cur.fetchone()[0])
session = str("anon" + str(x))
client = TelegramClient(session, api_id, api_hash)
client.start()
dlgs = client.get_dialogs()
for dlg in dlgs:
if dlg.title == 'LTC Click Bot':
tegmo = dlg
client.send_message('LTC Click Bot', "/balance")
time.sleep(3)
msgs = client.get_messages(tegmo, limit=1)
for mes in msgs:
str_a = str(mes.message)
zz = str_a.replace('Available balance: ', '')
qq = zz.replace(' LTC', '')
print(qq)
waitin = float(qq)
m = m + waitin
#print(m)
x = x + 1
time.sleep(1)
| Black-Triangle-code/Telegram_coin_bot | balance.py | balance.py | py | 1,310 | python | en | code | 123 | github-code | 36 |
71903174184 | #! /usr/bin/env python
import matplotlib.pyplot as plt
from scipy.fftpack import fft
from scipy.io import wavfile # get the api
from scipy.signal import fftconvolve, convolve, stft, butter
import numpy as np
from scipy import signal
import warnings
warnings.filterwarnings("ignore")
from numpy import array, diff, where, split
from scipy import arange
# fs, data = wavfile.read('Ding.wav') # load OG file
# a = data.T[0]
# fs2, data2 = wavfile.read('Long.wav') # load the data
# a2 = data2.T[0]
# d = fftconvolve(a, a2)
# print(d.shape)
# for i in range(len(d)):
# if d[i] > 0.85: #Tune this for the DING
# print('Do something')
# break
# plt.plot(d)
# plt.show()
#import keyboard
def findPeak(magnitude_values, noise_level=2000):
splitter = 0
# zero out low values in the magnitude array to remove noise (if any)
magnitude_values = np.asarray(magnitude_values)
low_values_indices = magnitude_values < noise_level # Where values are low
magnitude_values[low_values_indices] = 0 # All low values will be zero out
indices = []
flag_start_looking = False
both_ends_indices = []
length = len(magnitude_values)
for i in range(length):
if magnitude_values[i] != splitter:
if not flag_start_looking:
flag_start_looking = True
both_ends_indices = [0, 0]
both_ends_indices[0] = i
else:
if flag_start_looking:
flag_start_looking = False
both_ends_indices[1] = i
# add both_ends_indices in to indices
indices.append(both_ends_indices)
return indices
def extractFrequency(indices, freq_bins, freq_threshold=2):
extracted_freqs = []
for index in indices:
freqs_range = freq_bins[index[0]: index[1]]
avg_freq = round(np.average(freqs_range))
if avg_freq not in extracted_freqs:
extracted_freqs.append(avg_freq)
# group extracted frequency by nearby=freq_threshold (tolerate gaps=freq_threshold)
group_similar_values = split(extracted_freqs, where(diff(extracted_freqs) > freq_threshold)[0]+1 )
# calculate the average of similar value
extracted_freqs = []
for group in group_similar_values:
extracted_freqs.append(round(np.average(group)))
#print("freq_components", extracted_freqs)
return extracted_freqs
import pyaudio
ding_left = np.load('ding_select_floor2_left_mic.npy')
ding_right = np.load('ding_select_floor7_right_mic.npy')
CHUNK = 4096 # number of data points to read at a time
RATE = 48000 # time resolution of the recording device (Hz)
p=pyaudio.PyAudio() # start the PyAudio class
stream=p.open(format=pyaudio.paInt16,channels=1,rate=RATE,input=True,
frames_per_buffer=CHUNK) #uses default input device
while 1:
data_buffer = np.array([])
# create a numpy array holding a single read of audio data
for i in range(10): #to it a few times just to see
data = np.frombuffer(stream.read(CHUNK),dtype=np.int16)
data_buffer = np.concatenate([data_buffer, data])
fs = RATE
# f1, t1, ding_left2 = signal.stft(ding_left, fs, nperseg=1000)
# f2, t2, ding_right2 = signal.stft(ding_right, fs, nperseg=1000)
# f,t,data_buffer2= signal.stft(data_buffer, fs, nperseg=1000)
number_samples = len(data_buffer)
freq_bins = arange(number_samples) * RATE/number_samples
#ding_left2 = fft(ding_left)
#ding_right2 = fft(ding_right)
data_buffer_fft = fft(data_buffer)
#data_buffer_fft = np.fft.fftfreq(len(data_buffer), data_buffer)
#print(data_buffer2)
normalization_data = data_buffer_fft/number_samples
magnitude_values = normalization_data[range(len(data_buffer_fft)//2)]
magnitude_values = np.abs(magnitude_values)
indices = findPeak(magnitude_values=magnitude_values, noise_level=100)
frequencies = extractFrequency(indices, freq_bins)
#print(frequencies)
# amp = 2 * np.sqrt(2)
# plt.pcolormesh(t1, f1, np.abs(ding_left), vmin=0)
# plt.pcolormesh(t2, f2, np.abs(ding_right), vmin=0)
# plt.ylabel('Frequency [Hz]')
# plt.xlabel('Time [sec]')
# plt.show()
#x = np.abs(data_buffer2).mean()
x = max(frequencies)
#x = x/1000
print(x)
if x > 750 and x < 800:
print("RIGHT DING MAYBE")
# if x > 270 and x < 350:
# print("LEFT DING MAYBE")
# if x > 1300 and x < 1400:
# print("RIGHT DING MAYBE")
# if x > 500 and x < 550:
# print("LEFT DING MAYBE")
#print(np.abs(data_buffer).max())
# d_left = convolve(ding_left, data_buffer)
# d_right = convolve(ding_right, data_buffer)
# dlmax = d_left.mean()
# drmax = d_right.mean()
#print("left ding is:" +str(dlmax) + "right ding is:" +str(drmax))
#print("right new is:" + str(d_right_fft.mean()))
#FLOOR 7
# if dlmax > 20173224741.999992:
# print('Left DING')
# if drmax > 30888468567.000004:
# print('Right DING')
# if dlmax > 10008361056.999992:
# print('Left DING')
# if drmax > 2000511377.789566:
# print('Right DING')
# data_buffer = np.load('ding2.npy')[73000: 130000]
# np.save('ding_select.npy', data_buffer)
# plt.plot(data_buffer)
# plt.show()
# d = fftconvolve(a, data)
# plt.plot(d)
# print(d.max())
# plt.show()
# close the stream gracefully
stream.stop_stream()
stream.close()
p.terminate() | buoyancy99/BobaBot | voice_utils/src/old_detection.py | old_detection.py | py | 5,313 | python | en | code | 2 | github-code | 36 |
8446584828 |
import warnings
from cupy import testing
import cupyx.scipy.signal.windows as cu_windows
import pytest
from pytest import raises as assert_raises
try:
import scipy.signal.windows as cpu_windows # NOQA
import scipy.fft # NOQA
except ImportError:
pass
window_funcs = [
('boxcar', ()),
('triang', ()),
('parzen', ()),
('bohman', ()),
('blackman', ()),
('nuttall', ()),
('blackmanharris', ()),
('flattop', ()),
('bartlett', ()),
('barthann', ()),
('hamming', ()),
('kaiser', (1,)),
('gaussian', (0.5,)),
('general_gaussian', (1.5, 2)),
('chebwin', (1,)),
('cosine', ()),
('hann', ()),
('exponential', ()),
('taylor', ()),
('tukey', (0.5,)),
]
class TestBartHann:
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-15, atol=1e-15)
def test_basic(self, xp, scp):
w1 = scp.signal.windows.barthann(6, sym=True)
w2 = scp.signal.windows.barthann(7)
w3 = scp.signal.windows.barthann(6, False)
return w1, w2, w3
class TestBartlett:
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-15, atol=1e-15)
def test_basic(self, xp, scp):
w1 = scp.signal.windows.bartlett(6)
w2 = scp.signal.windows.bartlett(7)
w3 = scp.signal.windows.bartlett(6, False)
return w1, w2, w3
class TestBlackman:
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-15, atol=1e-15)
def test_basic(self, xp, scp):
return (scp.signal.windows.blackman(6, sym=False),
scp.signal.windows.blackman(7, sym=False),
scp.signal.windows.blackman(6),
scp.signal.windows.blackman(7, True))
class TestBlackmanHarris:
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-15, atol=1e-15)
def test_basic(self, xp, scp):
return (scp.signal.windows.blackmanharris(6, False),
scp.signal.windows.blackmanharris(7, sym=False),
scp.signal.windows.blackmanharris(6),
scp.signal.windows.blackmanharris(7, sym=True))
class TestTaylor:
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-15, atol=1e-15)
def test_normalized(self, xp, scp):
"""Tests windows of small length that are normalized to 1. See the
documentation for the Taylor window for more information on
normalization.
"""
w1 = scp.signal.windows.taylor(1, 2, 15)
w2 = scp.signal.windows.taylor(6, 2, 15)
return w1, w2
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-15, atol=1e-15)
def test_non_normalized(self, xp, scp):
"""Test windows of small length that are not normalized to 1. See
the documentation for the Taylor window for more information on
normalization.
"""
return (scp.signal.windows.taylor(5, 2, 15, norm=False),
scp.signal.windows.taylor(6, 2, 15, norm=False))
@testing.numpy_cupy_allclose(scipy_name='scp')
def test_correctness(self, xp, scp):
"""This test ensures the correctness of the implemented Taylor
Windowing function. A Taylor Window of 1024 points is created, its FFT
is taken, and the Peak Sidelobe Level (PSLL) and 3dB and 18dB bandwidth
are found and checked.
A publication from Sandia National Laboratories was used as reference
for the correctness values [1]_.
References
-----
.. [1] Armin Doerry, "Catalog of Window Taper Functions for
Sidelobe Control", 2017.
https://www.researchgate.net/profile/Armin_Doerry/publication/316281181_Catalog_of_Window_Taper_Functions_for_Sidelobe_Control/links/58f92cb2a6fdccb121c9d54d/Catalog-of-Window-Taper-Functions-for-Sidelobe-Control.pdf
"""
M_win = 1024
N_fft = 131072
# Set norm=False for correctness as the values obtained from the
# scientific publication do not normalize the values. Normalizing
# changes the sidelobe level from the desired value.
w = scp.signal.windows.taylor(
M_win, nbar=4, sll=35, norm=False, sym=False)
f = scp.fft.fft(w, N_fft)
spec = 20 * xp.log10(xp.abs(f / xp.amax(f)))
first_zero = xp.argmax(xp.diff(spec) > 0)
PSLL = xp.amax(spec[first_zero:-first_zero])
BW_3dB = 2 * xp.argmax(spec <= -3.0102999566398121) / N_fft * M_win
BW_18dB = 2 * xp.argmax(spec <= -18.061799739838872) / N_fft * M_win
return PSLL, BW_3dB, BW_18dB
class TestBohman:
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_basic(self, xp, scp):
return (scp.signal.windows.bohman(6),
scp.signal.windows.bohman(7, sym=True),
scp.signal.windows.bohman(6, False))
class TestBoxcar:
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_basic(self, xp, scp):
return (scp.signal.windows.boxcar(6),
scp.signal.windows.boxcar(7),
scp.signal.windows.boxcar(6, False))
class TestChebWin:
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_basic(self, xp, scp):
with warnings.catch_warnings():
# sup.filter(UserWarning, "This window is not suitable")
ret = (scp.signal.windows.chebwin(6, 100),
scp.signal.windows.chebwin(7, 100),
scp.signal.windows.chebwin(6, 10),
scp.signal.windows.chebwin(7, 10),
scp.signal.windows.chebwin(6, 10, False))
return ret
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_cheb_odd_high_attenuation(self, xp, scp):
with warnings.catch_warnings():
# sup.filter(UserWarning, "This window is not suitable")
cheb_odd = scp.signal.windows.chebwin(53, at=-40)
return cheb_odd
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_cheb_even_high_attenuation(self, xp, scp):
with warnings.catch_warnings():
# sup.filter(UserWarning, "This window is not suitable")
cheb_even = scp.signal.windows.chebwin(54, at=40)
return cheb_even
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_cheb_odd_low_attenuation(self, xp, scp):
with warnings.catch_warnings():
# sup.filter(UserWarning, "This window is not suitable")
cheb_odd = scp.signal.windows.chebwin(7, at=10)
return cheb_odd
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_cheb_even_low_attenuation(self, xp, scp):
with warnings.catch_warnings():
# sup.filter(UserWarning, "This window is not suitable")
cheb_even = scp.signal.windows.chebwin(8, at=-10)
return cheb_even
exponential_data = {
(4, None, 0.2, False): True,
(4, None, 0.2, True): True,
(4, None, 1.0, False): True,
(4, None, 1.0, True): True,
(4, 2, 0.2, False): True,
(4, 2, 0.2, True): False,
(4, 2, 1.0, False): True,
(4, 2, 1.0, True): False,
(5, None, 0.2, True): True,
(5, None, 1.0, True): True,
(5, 2, 0.2, True): False,
(5, 2, 1.0, True): False
}
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_exponential(xp, scp):
for args, valid in exponential_data.items():
if not valid:
assert_raises(ValueError, scp.signal.windows.exponential, *args)
else:
win = scp.signal.windows.exponential(*args)
return win
class TestFlatTop:
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_basic(self, xp, scp):
return (scp.signal.windows.flattop(6, sym=False),
scp.signal.windows.flattop(7, sym=False),
scp.signal.windows.flattop(6),
scp.signal.windows.flattop(7, True),)
class TestGaussian:
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_basic(self, xp, scp):
return (scp.signal.windows.gaussian(6, 1.0),
scp.signal.windows.gaussian(7, 1.2),
scp.signal.windows.gaussian(7, 3),
scp.signal.windows.gaussian(6, 3, False),)
class TestGeneralCosine:
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_basic(self, xp, scp):
return (scp.signal.windows.general_cosine(5, [0.5, 0.3, 0.2]),
scp.signal.windows.general_cosine(4, [0.5, 0.3, 0.2],
sym=False),)
class TestGeneralHamming:
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_basic(self, xp, scp):
return (scp.signal.windows.general_hamming(5, 0.7),
scp.signal.windows.general_hamming(5, 0.75, sym=False),
scp.signal.windows.general_hamming(6, 0.75, sym=True),)
class TestHamming:
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_basic(self, xp, scp):
return (scp.signal.windows.hamming(6, False),
scp.signal.windows.hamming(7, sym=False),
scp.signal.windows.hamming(6),
scp.signal.windows.hamming(7, sym=True),)
class TestHann:
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_basic(self, xp, scp):
return (scp.signal.windows.hann(6, sym=False),
scp.signal.windows.hann(7, sym=False),
scp.signal.windows.hann(6, True),
scp.signal.windows.hann(7),)
class TestKaiser:
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_basic(self, xp, scp):
return (scp.signal.windows.kaiser(6, 0.5),
scp.signal.windows.kaiser(7, 0.5),
scp.signal.windows.kaiser(6, 2.7),
scp.signal.windows.kaiser(7, 2.7),
scp.signal.windows.kaiser(6, 2.7, False),)
@pytest.mark.skip('This has not been implemented yet in CuPy')
class TestKaiserBesselDerived:
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_basic(self, xp, scp):
M = 100
w = scp.signal.windows.kaiser_bessel_derived(M, beta=4.0)
w2 = scp.signal.windows.get_window(
('kaiser bessel derived', 4.0), M, fftbins=False)
w3 = scp.signal.windows.kaiser_bessel_derived(2, beta=xp.pi / 2)
w4 = scp.signal.windows.kaiser_bessel_derived(4, beta=xp.pi / 2)
w5 = scp.signal.windows.kaiser_bessel_derived(6, beta=xp.pi / 2)
return w, w2, w3, w4, w5
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_exceptions(self, xp, scp):
M = 100
# Assert ValueError for odd window length
msg = ("Kaiser-Bessel Derived windows are only defined for even "
"number of points")
with assert_raises(ValueError, match=msg):
scp.signal.windows.kaiser_bessel_derived(M + 1, beta=4.)
# Assert ValueError for non-symmetric setting
msg = ("Kaiser-Bessel Derived windows are only defined for "
"symmetric shapes")
with assert_raises(ValueError, match=msg):
scp.signal.windows.kaiser_bessel_derived(M + 1, beta=4., sym=False)
class TestNuttall:
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_basic(self, xp, scp):
return (scp.signal.windows.nuttall(6, sym=False),
scp.signal.windows.nuttall(7, sym=False),
scp.signal.windows.nuttall(6),
scp.signal.windows.nuttall(7, True),)
class TestParzen:
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_basic(self, xp, scp):
return (scp.signal.windows.parzen(6),
scp.signal.windows.parzen(7, sym=True),
scp.signal.windows.parzen(6, False),)
class TestTriang:
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_basic(self, xp, scp):
return (scp.signal.windows.triang(6, True),
scp.signal.windows.triang(7),
scp.signal.windows.triang(6, sym=False),)
tukey_data = [
(4, 0.5, True),
(4, 0.9, True),
(4, 1.0, True),
(4, 0.5, False),
(4, 0.9, False),
(4, 1.0, False),
(5, 0.0, True),
(5, 0.8, True),
(5, 1.0, True),
(6, 0),
(7, 0),
(6, .25),
(7, .25),
(6,),
(7,),
(6, .75),
(7, .75),
(6, 1),
(7, 1),
]
class TestTukey:
@pytest.mark.parametrize('args', tukey_data)
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_basic(self, args, xp, scp):
# Test against hardcoded data
win = scp.signal.windows.tukey(*args)
return win
dpss_data = [
(4, 0.1, 2),
(3, 1.4, 3),
(5, 1.5, 5),
(100, 2, 4),
]
@pytest.mark.skip('This has not been implemented yet in CuPy')
class TestDPSS:
@pytest.mark.parametrize('args', tukey_data)
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_basic(self, args, xp, scp):
win, ratios = scp.signal.windows.dpss(*args, return_ratios=True)
return win, ratios
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_unity(self, xp, scp):
# Test unity value handling (gh-2221)
results = []
for M in range(1, 21):
# corrected w/approximation (default)
win = scp.signal.windows.dpss(M, M / 2.1)
results.append(win)
# corrected w/subsample delay (slower)
win_sub = scp.signal.windows.dpss(M, M / 2.1, norm='subsample')
if M > 2:
# @M=2 the subsample doesn't do anything
results.append(win_sub)
# not the same, l2-norm
win_2 = scp.signal.windows.dpss(M, M / 2.1, norm=2)
results.append(win_2)
return results
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_extremes(self, xp, scp):
# Test extremes of alpha
lam1 = scp.signal.windows.dpss(31, 6, 4, return_ratios=True)[1]
lam2 = scp.signal.windows.dpss(31, 7, 4, return_ratios=True)[1]
lam3 = scp.signal.windows.dpss(31, 8, 4, return_ratios=True)[1]
return lam1, lam2, lam3
@pytest.mark.parametrize('windows', [cu_windows, cpu_windows])
def test_degenerate(self, windows):
# Test failures
assert_raises(ValueError, windows.dpss, 4, 1.5, -1) # Bad Kmax
assert_raises(ValueError, windows.dpss, 4, 1.5, -5)
assert_raises(TypeError, windows.dpss, 4, 1.5, 1.1)
assert_raises(ValueError, windows.dpss, 3, 1.5, 3) # NW must be < N/2.
assert_raises(ValueError, windows.dpss, 3, -1, 3) # NW must be pos
assert_raises(ValueError, windows.dpss, 3, 0, 3)
assert_raises(ValueError, windows.dpss, -1, 1, 3) # negative M
@pytest.mark.skip('This has not been implemented yet in CuPy')
class TestLanczos:
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_basic(self, xp, scp):
# Analytical results:
# sinc(x) = sinc(-x)
# sinc(pi) = 0, sinc(0) = 1
# Hand computation on WolframAlpha:
# sinc(2 pi / 3) = 0.413496672
# sinc(pi / 3) = 0.826993343
# sinc(3 pi / 5) = 0.504551152
# sinc(pi / 5) = 0.935489284
return (scp.signal.windows.lanczos(6, sym=False),
scp.signal.windows.lanczos(6),
scp.signal.windows.lanczos(7, sym=True),)
@pytest.mark.parametrize('windows', [cu_windows, cpu_windows])
def test_array_size(self, windows):
for n in [0, 10, 11]:
assert len(windows.lanczos(n, sym=False)) == n
assert len(windows.lanczos(n, sym=True)) == n
class TestGetWindow:
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_boxcar(self, xp, scp):
w1 = scp.signal.windows.get_window('boxcar', 12)
# window is a tuple of len 1
w2 = scp.signal.windows.get_window(('boxcar',), 16)
return w1, w2
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_cheb_odd(self, xp, scp):
with warnings.catch_warnings():
# sup.filter(UserWarning, "This window is not suitable")
w = scp.signal.windows.get_window(
('chebwin', -40), 53, fftbins=False)
return w
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_cheb_even(self, xp, scp):
with warnings.catch_warnings():
# sup.filter(UserWarning, "This window is not suitable")
w = scp.signal.windows.get_window(
('chebwin', 40), 54, fftbins=False)
return w
@pytest.mark.skip('This has not been implemented yet in CuPy')
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_dpss(self, xp, scp):
win1 = scp.signal.windows.get_window(('dpss', 3), 64, fftbins=False)
win2 = scp.signal.windows.dpss(64, 3)
return win1, win2
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_kaiser_float(self, xp, scp):
win1 = scp.signal.windows.get_window(7.2, 64)
win2 = scp.signal.windows.kaiser(64, 7.2, False)
return win1, win2
@pytest.mark.parametrize('windows', [cu_windows, cpu_windows])
def test_invalid_inputs(self, windows):
# Window is not a float, tuple, or string
assert_raises(ValueError, windows.get_window, set('hann'), 8)
# Unknown window type error
assert_raises(ValueError, windows.get_window, 'broken', 4)
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_array_as_window(self, xp, scp):
# scipy github issue 3603
osfactor = 128
sig = xp.arange(128)
win = scp.signal.windows.get_window(('kaiser', 8.0), osfactor // 2)
if hasattr(scp.signal, 'resample'):
with assert_raises(ValueError, match='must have the same length'):
scp.signal.resample(sig, len(sig) * osfactor, window=win)
return win
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_general_cosine(self, xp, scp):
return (scp.signal.get_window(('general_cosine', [0.5, 0.3, 0.2]), 4),
scp.signal.get_window(('general_cosine', [0.5, 0.3, 0.2]), 4,
fftbins=False))
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_general_hamming(self, xp, scp):
return (
scp.signal.get_window(('general_hamming', 0.7), 5),
scp.signal.get_window(('general_hamming', 0.7), 5, fftbins=False),)
@pytest.mark.skip('This has not been implemented yet in CuPy')
def test_lanczos(self, xp, scp):
return (scp.signal.get_window('lanczos', 6),
scp.signal.get_window('lanczos', 6, fftbins=False),
scp.signal.get_window('lanczos', 6),
scp.signal.get_window('sinc', 6))
@pytest.mark.parametrize('window_info', window_funcs)
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_windowfunc_basics(window_info, xp, scp):
window_name, params = window_info
if window_name in {'parzen', 'tukey'}:
pytest.skip()
window = getattr(scp.signal.windows, window_name)
results = []
with warnings.catch_warnings():
# Check symmetry for odd and even lengths
w1 = window(8, *params, sym=True)
w2 = window(7, *params, sym=False)
results += [w1, w2]
w1 = window(9, *params, sym=True)
w2 = window(8, *params, sym=False)
results += [w1, w2]
# Check that functions run and output lengths are correct
results.append(len(window(6, *params, sym=True)))
results.append(len(window(6, *params, sym=False)))
results.append(len(window(7, *params, sym=True)))
results.append(len(window(7, *params, sym=False)))
# Check invalid lengths
assert_raises((ValueError, TypeError), window, 5.5, *params)
assert_raises((ValueError, TypeError), window, -7, *params)
# Check degenerate cases
results.append(window(0, *params, sym=True))
results.append(window(0, *params, sym=False))
results.append(window(1, *params, sym=True))
results.append(window(1, *params, sym=False))
# Check normalization
results.append(window(10, *params, sym=True))
results.append(window(10, *params, sym=False))
results.append(window(9, *params, sym=True))
results.append(window(9, *params, sym=False))
# Check that DFT-even spectrum is purely real for odd and even
results.append(scp.fft.fft(window(10, *params, sym=False)).imag)
results.append(scp.fft.fft(window(11, *params, sym=False)).imag)
return results
@pytest.mark.parametrize('windows', [cu_windows, cpu_windows])
def test_needs_params(windows):
for winstr in ['kaiser', 'ksr', 'kaiser_bessel_derived', 'kbd',
'gaussian', 'gauss', 'gss',
'general gaussian', 'general_gaussian',
'general gauss', 'general_gauss', 'ggs',
'dss', 'dpss', 'general cosine', 'general_cosine',
'chebwin', 'cheb', 'general hamming', 'general_hamming',
]:
assert_raises(ValueError, windows.get_window, winstr, 7)
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_not_needs_params(xp, scp):
for winstr in ['barthann',
'bartlett',
'blackman',
'blackmanharris',
'bohman',
'boxcar',
'cosine',
'flattop',
'hamming',
'nuttall',
'parzen',
'taylor',
'exponential',
'poisson',
'tukey',
'tuk',
'triangle',
]:
win = scp.signal.get_window(winstr, 7)
return win
| cupy/cupy | tests/cupyx_tests/scipy_tests/signal_tests/test_windows.py | test_windows.py | py | 22,594 | python | en | code | 7,341 | github-code | 36 |
73811093863 | import logging
logger = logging.getLogger(__name__)
def app(environ, start_response):
path = environ.get('PATH_INFO', '')
if path == '/exception':
raise Exception('My exception!')
data = "Request on %s \n" % path
logger.info(data, extra={'tags': ['role:web', 'env:prod']})
start_response("200 OK", [
("Content-Type", "text/plain"),
("Content-Length", str(len(data)))
])
return iter([data])
| sebest-blog/gunicorn-with-docker | myapp.py | myapp.py | py | 453 | python | en | code | 15 | github-code | 36 |
11141391697 | import numpy as np
import random as r
# In this module 2-point crossover and non-(uniform) mutation will be implemented
pc = r.uniform(0.4, 0.7) #probability of crossover
pm = r.uniform(0.001, 0.3) #Probability of mutation
def two_point_crossover(x1, x2):
# X1: first parent chromosome
# X2: Second parent Chromosome
p1 = int(r.uniform(1, len(x1)-1)) # First crossover point
p2 = int(r.uniform(1, len(x1)-1)) # Second crossover point
# if two points are identical re-generate them
# Hint : it will be a strange accident if p1 equals p2 after re-generation
if (p1 == p2):
p1 = int(r.uniform(1, len(x1)-1))
p2 = int(r.uniform(1, len(x1)-1))
# Crossover indicator
rc = r.uniform(0, 1)
print(p1, p2)
print(rc, pc)
if (rc <= pc):
# Perform Crossover
if (p1 < p2): # Where point1 < point 2
temp = x1[p1:p2+1]
x1[p1:p2+1] = x2[p1:p2+1]
x2[p1:p2+1] = temp
else : # Where point 1 > point 2
temp[p2:p1+1] = x1[p2:p1+1]
x1[p2:p1+1] = x2[p2:p1+1]
x2[p2:p1+1] = temp
return list([x1, x2])
else:
#NO Crossover
return list([x1, x2]) # Return Parents without Change
def uniform_mutation(chrom, LU_list):
# chrom: chromosome which it's fitness lower than the other selected
Delta = None # Delta var which indicates xnew
# LU_list : lower and upper bound list passed from another function
# to make sure the sequence is safe
# first step : calculate delta lower and delta upper for each gene
for i in range(0, len(chrom)):
rm = r.uniform(0,1)
if (rm <= pm):
# Calculating delta lower and delta upper
lb, ub = LU_list[i]
delta_lo = chrom[i] - lb
delta_hi = ub - chrom[i]
r1 = r.uniform(0,1)
if (r1 <= 0.5):
Delta = delta_lo
r2 = r.uniform(0, Delta)
chrom[i] = chrom[i] - r2
else :
Delta = delta_hi
r2 = r.uniform(0, Delta)
chrom[i] = r2 - chrom[i]
return chrom
def nonuniform_mutation(chrom, LU_list, t, T):
# Like uniform mutation but value of mutation is calculated another way
# t : Current Generation
# T : max number of generations
Y = None # Delta indicator
for i in range(0, len(chrom)):
rm = r.uniform(0,1)
if (rm <= pm):
# Calculating delta lower and delta upper
lb, ub = LU_list[i]
delta_lo = chrom[i] - lb
delta_hi = ub - chrom[i]
r1 = r.uniform(0,1)
if (r1 <= 0.5):
Y = delta_lo
ra = r.uniform(0,1)
b = r.uniform(0.5, 5.5)
mut_fun = Y * (1-ra)**(1-t/T)**b
#print(mut_fun)
chrom[i] = chrom[i] - mut_fun
else :
Y = delta_hi
ra = r.uniform(0,1)
b = r.uniform(0.5, 5.5)
mut_fun = Y * (1-ra)**(1-t/T)**b
#print(mut_fun)
chrom[i] = mut_fun - chrom[i]
return chrom
#print(uniform_mutation([0.08, 0.12, 0.07, 0.11], [(2.7,58),(20.5,38),(5,18),(10,30)]))
#print(two_point_crossover([0.1, 0.5, 0.6, 0.3, 0.2, 0.56, 0.11]
# ,[1, 5, 6, 3, 2, 56, 11]))
#print(nonuniform_mutation([0.08, 0.12, 0.07, 0.11], [(2.7,58),(20.5,38),(5,18),(10,30)], 2, 7))
### Debugged AND TESTED ###
| Mohamedyasserhelmy/Marketing_Budget_Allocation | crossover.py | crossover.py | py | 3,651 | python | en | code | 0 | github-code | 36 |
23077134371 | from spa.clientside.asyncdbhandler import CAsyncDBHandler
from spa import BaseServiceID, tagBaseRequestID
class CPostgres(CAsyncDBHandler):
# Asynchronous and SQL streaming postgreSQL service id
sidPostgres = BaseServiceID.sidReserved + 0x6FFFFFF4
"""
An Open flag option, which is specific to PostgreSQL plugin.
It is noted that this flag option is not implemented within SocketPro plugin yet.
"""
ROWSET_META_FLAGS_REQUIRED = 0x40000000
"""
An Open flag option, which is specific to PostgreSQL plugin.
When the flag option is used with the method Open or open,
it forces fetching data from remote PostgreSQL server to SocketPro plugin row-by-row instead of all.
The flag option should be used if there is a large number of data within a rowset.
"""
USE_SINGLE_ROW_MODE = 0x20000000
# error codes for unexpected programming errors
ER_NO_DB_OPENED_YET = -1981
ER_BAD_END_TRANSTACTION_PLAN = -1982
ER_NO_PARAMETER_SPECIFIED = -1983
ER_BAD_PARAMETER_COLUMN_SIZE = -1984
ER_BAD_PARAMETER_DATA_ARRAY_SIZE = -1985
ER_DATA_TYPE_NOT_SUPPORTED = -1986
ER_BAD_TRANSTACTION_STAGE = -1987
def __init__(self, sid=sidPostgres):
super(CPostgres, self).__init__(sid)
| udaparts/socketpro | bin/spa/clientside/upostgres.py | upostgres.py | py | 1,257 | python | en | code | 27 | github-code | 36 |
14302599067 | import os
import cv2 as cv
import numpy as np
people = []
for i in os.listdir(r'C:\Users\Atul\Downloads\OpenCV_course\opencv-course-master\Resources\Faces\train'):
people.append(i) #each folder in the faces folder corresponds to one person ben affleck,mindy,etc and name of folder is person's name so we store target variables
DIR = r'C:\Users\Atul\Downloads\OpenCV_course\opencv-course-master\Resources\Faces\train'
haar_cascade = cv.CascadeClassifier('haar_face.xml') #calling haarcascade detector
# Creating the training set
features = []
labels = []
def create_train(): #loop over all folders in the training folder and then loop over all images within and store in training set. Within each image detect only the face and crop it out using haarcascade face detector
for person in people:
path = os.path.join(DIR, person) #to get path for folder of each person
label = people.index(person) #text classes need to be converted to numerical categories
for img in os.listdir(path):
img_path = os.path.join(path, img) #create path for each image in each person's folder
img_array = cv.imread(img_path)
gray = cv.cvtColor(img_array, cv.COLOR_BGR2GRAY)
faces_rect = haar_cascade.detectMultiScale(gray, scaleFactor = 1.1, minNeighbors=4)
for (x,y,w,h) in faces_rect:
faces_roi = gray[y:y+h ,x:x+w] #cropping out just the face from the image
features.append(faces_roi)
labels.append(label)
create_train()
print('Training data created -------------')
features = np.array(features, dtype='object') #all pixels of each image are flattened out to a single row of all pixel values for an image
labels = np.array(labels)
face_recognizer = cv.face.LBPHFaceRecognizer_create() #instantiating out in-built face recognizer model
# Train recognizer on features list and labels list
face_recognizer.train(features,labels)
face_recognizer.save('face_trained.yml') # OpenCv allows us to save our trained model as a yaml which can be reused in other files instead of going through the entire training process again
np.save('features.npy', features)
np.save('labels.npy', labels)
| ajinkeya17/OpenCV-Course | Codebase/face_recognition_training.py | face_recognition_training.py | py | 2,247 | python | en | code | 0 | github-code | 36 |
37290989369 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('user_profile', '0003_remove_userprofile_title'),
]
operations = [
migrations.CreateModel(
name='RegistrationPath',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=100)),
],
),
migrations.AlterField(
model_name='userprofile',
name='mobile_number',
field=models.CharField(max_length=11, null=True, blank=True),
),
migrations.AddField(
model_name='userprofile',
name='registration_path',
field=models.ForeignKey(blank=True, to='user_profile.RegistrationPath', null=True),
),
]
| bitapardaz/diabet | user_profile/migrations/0004_auto_20171023_0837.py | 0004_auto_20171023_0837.py | py | 952 | python | en | code | 0 | github-code | 36 |
8806299537 | from django.test import TestCase
from django.urls import resolve
from django.http import HttpRequest
from django.template.loader import render_to_string
from django.utils.html import escape
from lists.views import home_page, view_list
from lists.models import Item, List
from lists.forms import STR_EMPYT_LIST_ERROR, ItemForm, ExistingListItemForm
# Create your tests here.
class SmokeTest(TestCase):
def test_bad_maths(self):
self.assertEqual(1 + 1, 2)
class HomePageTest(TestCase):
def test_root_url_resolves_to_home_page_view(self):
found = resolve("/")
self.assertEqual(found.func, home_page)
def test_0001_home_page_returns_correct_html(self):
request = HttpRequest()
response = home_page(request)
content = response.content.decode("utf-8-sig").encode('utf-8')
# print(f'{type(content) =}, {content}')
self.assertTrue(content.startswith(b"<html>"))
self.assertIn(b'<title>To-Do Lists</title>', content)
self.assertTrue(content.endswith(b"</html>"))
# failed for csrf
# self.assertEqual(response.content.decode(), render_to_string('home.html'))
'''
def test_0004_home_page_displays_all_list_item(self):
list_ = List.objects.create()
Item.objects.create(text='itemey 1', list=list_)
Item.objects.create(text='itemey 2', list=list_)
request = HttpRequest()
response = view_list(request)
self.assertIn('itemey 1', response.content.decode())
self.assertIn('itemey 2', response.content.decode())
'''
class ListViewTest(TestCase):
'''
def test_users_list_template(self):
response = self.client.get('/lists/all/')
self.assertTemplateUsed(response, 'list.html')
'''
def test_0002_users_list_template(self):
list_ = List.objects.create()
response = self.client.get(f'/lists/{list_.id}/')
self.assertTemplateUsed(response, 'list.html')
def test_0002a_users_list_template(self):
list_ = List.objects.create()
response = self.client.get(f'/lists/{list_.id}/')
self.assertIsInstance(response.context['form'], ExistingListItemForm)
self.assertContains(response, 'name="text"')
def test_0003_display_only_items_for_that_list(self):
list1 = List.objects.create()
Item.objects.create(text='itemey 1.1', list=list1)
Item.objects.create(text='itemey 1.2', list=list1)
list2 = List.objects.create()
Item.objects.create(text='itemey 2.1', list=list2)
Item.objects.create(text='itemey 2.2', list=list2)
response = self.client.get(f"/lists/{list1.id}/")
content = response.content.decode("utf-8-sig").encode('utf-8')
# print(f'{type(content) =}, {content}')
self.assertContains(response, 'itemey 1.1')
self.assertContains(response, 'itemey 1.2')
self.assertNotContains(response, 'itemey 2.1')
self.assertNotContains(response, 'itemey 2.2')
"""
def test_display_all_items(self):
list_ = List.objects.create()
Item.objects.create(text='itemey 1', list=list_)
Item.objects.create(text='itemey 2', list=list_)
response = self.client.get("/lists/all/")
self.assertContains(response, 'itemey 1')
self.assertContains(response, 'itemey 2')
"""
def test_0005_passes_correst_list_to_templeate(self):
list2 = List.objects.create()
list1 = List.objects.create()
response = self.client.get(f'/lists/{list1.id}/')
self.assertEqual(response.context['list'], list1)
def test_0006_validation_error_end_up_on_list_page(self):
list_ = List.objects.create()
response = self.client.post(f'/lists/{list_.id}/', data={"text": ''})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'list.html')
content = response.content.decode("utf-8-sig").encode('utf-8')
# print(f'test_0006_validation_error_end_up_on_list_page {type(content) =}, {content}')
self.assertContains(response, escape("You can't have an empty list item"))
def post_invalid_input(self):
list_ = List.objects.create()
response = self.client.post(f'/lists/{list_.id}/', data={"text": ''})
return response
def test_0007_invalid_input_nothing_saved_to(self):
self.post_invalid_input()
self.assertEqual(Item.objects.count(), 0)
def test_0008_invalid_input_renders_list_template(self):
response = self.post_invalid_input()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'list.html')
def test_0009_invalid_input_passes_form_to_template(self):
response = self.post_invalid_input()
self.assertIsInstance(response.context['form'], ExistingListItemForm)
def test_0010_invalid_input_shows_error_on_page(self):
response = self.post_invalid_input()
self.assertContains(response, escape(STR_EMPYT_LIST_ERROR))
class NewListTest(TestCase):
def test_0001_saving_a_POST_request(self):
# print(f'Before post')
self.client.post('/lists/new', data={'text': 'A new list item'})
# print(f'{Item.objects.count()}, {Item.objects =}')
self.assertEqual(Item.objects.count(), 1)
new_item = Item.objects.first()
self.assertEqual(new_item.text, 'A new list item')
def test_0003_home_page_redirect_after_post(self):
response = self.client.post('/lists/new', data={'text': 'A new list item'})
list_ = List.objects.first()
self.assertEqual(response.status_code, 302)
self.assertEqual(response['location'], f'/lists/{list_.id}/')
def test_0004a_validation_errors_are_sent_back_to_home_page_template(self):
response = self.client.post('/lists/new', data={'text': ''})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'home.html')
def test_0004b_validation_errors_are_sent_back_to_home_page_template(self):
response = self.client.post('/lists/new', data={'text': ''})
self.assertContains(response, escape(STR_EMPYT_LIST_ERROR))
def test_0004c_validation_errors_are_sent_back_to_home_page_template(self):
response = self.client.post('/lists/new', data={'text': ''})
self.assertIsInstance(response.context['form'], ItemForm)
def test_0005_invalid_list_items_arent_saved(self):
response = self.client.post('/lists/new', data={'text': ''})
self.assertEqual(List.objects.count(), 0)
self.assertEqual(Item.objects.count(), 0)
class NewItemTest(TestCase):
def test_0001_can_save_a_POST_request_to_an_existing_list(self):
list2 = List.objects.create()
list1 = List.objects.create()
response = self.client.post(f'/lists/{list1.id}/',
data={'text': 'A new list item for existing list'})
# print(f'test_0001_can_save_a_POST_request_to_an_existing_list: {response.status_code}, {response = }')
# print(f'{list(Item.objects.all()) = }')
self.assertEqual(Item.objects.count(), 1)
new_item = Item.objects.first()
self.assertEqual(new_item.text, 'A new list item for existing list')
self.assertEqual(new_item.list, list1)
def test_0002_redirect_to_list_view(self):
list2 = List.objects.create()
list1 = List.objects.create()
response = self.client.post(f'/lists/{list1.id}/',
data={'text': 'A new list item for existing list'})
self.assertRedirects(response, f'/lists/{list1.id}/')
| juewuer/python-web-dev | superlists/lists/tests/test_views.py | test_views.py | py | 7,695 | python | en | code | 0 | github-code | 36 |
23702247236 | # -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
__all__ = ["IterativeTwoDSearch"]
import os
import h5py
import numpy as np
from .two_d_search import TwoDSearch
from ._grid_search import grid_search
class IterativeTwoDSearch(TwoDSearch):
cache_ext = ".h5"
query_parameters = dict(
min_period=(None, True),
max_period=(None, True),
delta_log_period=(None, False),
dt=(None, False),
alpha=(None, False),
npeaks=(3, False),
mask_frac=(2.0, False),
min_points=(500, False),
min_transits=(3, False),
)
def get_alpha(self, query, parent_response):
a = query.get("alpha", None)
if a is not None:
return float(a)
lcs = parent_response.model_light_curves
n = sum(len(lc.time) for lc in lcs)
k = parent_response.nbasis
return k * np.log(n)
def get_result(self, query, parent_response):
periods = self.get_period_grid(query, parent_response)
dt = self.get_offset_spacing(query, parent_response)
alpha = self.get_alpha(query, parent_response)
# Get the parameters of the time grid from the 1-d search.
time_spacing = parent_response.time_spacing
mean_time = parent_response.mean_time_1d
tmin = parent_response.min_time_1d - mean_time
tmax = parent_response.max_time_1d - mean_time
time_grid = np.arange(0, tmax-tmin, time_spacing)
# Get the results of the 1-d search.
depth_1d = np.array(parent_response.depth_1d)
depth_ivar_1d = np.array(parent_response.depth_ivar_1d)
dll_1d = np.array(parent_response.dll_1d)
# Find the peaks.
peaks = []
for _ in range(query["npeaks"]):
# Run a 2D search.
results = grid_search(query["min_transits"], alpha,
tmin, tmax, time_spacing, depth_1d,
depth_ivar_1d, dll_1d, periods, dt)
(t0_2d, phic_same, phic_same_2, phic_variable, depth_2d,
depth_ivar_2d) = results
# Profile over duration.
inds = np.arange(len(phic_same)), np.argmax(phic_same, axis=1)
t0_2d = t0_2d[inds]
depth_2d = depth_2d[inds]
depth_ivar_2d = depth_ivar_2d[inds]
phic_same = phic_same[inds]
phic_variable = phic_variable[inds]
phic_same_2 = phic_same_2[inds]
# Find the top peak.
s2n = depth_2d * np.sqrt(depth_ivar_2d)
top_peak = np.argmax(s2n)
p, t0 = periods[top_peak], t0_2d[top_peak]
duration = query["durations"][inds[1][top_peak]]
# Save the peak.
peaks.append(dict(
period=p, t0=(t0 + tmin + mean_time) % p,
duration=duration,
depth=depth_2d[top_peak],
depth_ivar=depth_ivar_2d[top_peak],
s2n=s2n[top_peak],
phic_same=phic_same[top_peak],
phic_same_second=phic_same_2[top_peak],
phic_variable=phic_variable[top_peak],
duty_cycle=np.sum(depth_ivar_1d > 0.0) / len(depth_ivar_1d),
data_span=tmax - tmin,
))
# Mask out these transits.
m = (np.abs((time_grid-t0+0.5*p) % p-0.5*p)
< query["mask_frac"]*duration)
depth_1d[m] = 0.0
depth_ivar_1d[m] = 0.0
dll_1d[m] = 0.0
if (np.sum(np.any(depth_ivar_1d > 0.0, axis=1))
< query["min_points"]):
break
return dict(
peaks=peaks,
)
def save_to_cache(self, fn, response):
try:
os.makedirs(os.path.dirname(fn))
except os.error:
pass
# Parse the peaks into a structured array.
peaks = response["peaks"]
if len(peaks):
dtype = [(k, np.float64) for k in sorted(peaks[0].keys())]
peaks = [tuple(peak[k] for k, _ in dtype) for peak in peaks]
peaks = np.array(peaks, dtype=dtype)
with h5py.File(fn, "w") as f:
f.create_dataset("peaks", data=peaks, compression="gzip")
def load_from_cache(self, fn):
if os.path.exists(fn):
with h5py.File(fn, "r") as f:
try:
peaks = [dict((k, peak[k]) for k in peak.dtype.names)
for peak in f["peaks"]]
return dict(
peaks=peaks,
)
except KeyError:
pass
return None
| dfm/ketu | ketu/iterative.py | iterative.py | py | 4,714 | python | en | code | 10 | github-code | 36 |
12420365719 | from django.urls import path
from . import views
# register app namespace which is going to be used in URL names
app_name = "my_app"
urlpatterns = [
path("", views.example_view, name="example"),
path("variable/", views.variable_view, name="variable")
] | felixdusengimana/python-django-web-development | 04 Templates/template_study/my_app/urls.py | urls.py | py | 262 | python | en | code | 0 | github-code | 36 |
4557856269 | __author__ = 'Ong See Hai'
__date__ = 'Nov 2020'
__copyright__ = '(C) 2020, Ong See Hai'
from qgis.core import (QgsProcessing,
QgsProcessingAlgorithm,
QgsProcessingParameterVectorLayer,
QgsProcessingParameterMapLayer,
QgsProcessingParameterVectorDestination,
QgsProcessingParameterRasterLayer,
QgsProcessingException,
QgsProcessingUtils,
QgsExpressionContextUtils,
QgsCoordinateReferenceSystem,
QgsProject, QgsProperty)
import processing
class Platformx (QgsProcessingAlgorithm):
INPUT = 'INPUT'
INPUT2 = 'INPUT2'
INPUT3 = 'INPUT3'
INPUT4 = 'INPUT4'
OUTPUT = 'OUTPUT'
def name(self):
return 'platformx'
def displayName(self):
return 'Platform extended to road center line'
def group(self):
return 'IMP Tools'
def groupId(self):
return 'imp'
def createInstance(self):
return Platformx()
def shortHelpString(self):
return ('Create platform polygons extended to road center lines (platformx) '
' and calculate platform elevation '
' based on a slightly lower value than the mean z value of the Digital elevation model cells'
'\n'
'The Platform line input map layer can be an AutoCAD DXF file.'
'\n'
'The meanz field in the output map layer is the mean z value of DEM cells covered by the polygon. '
)
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterVectorLayer(
self.INPUT, 'INPUT: Road node',
defaultValue='Node',types=[QgsProcessing.TypeVectorPoint]) )
self.addParameter(QgsProcessingParameterVectorLayer(
self.INPUT2, 'INPUT2: Road segment',
defaultValue='Segment',types=[QgsProcessing.TypeVectorLine]) )
self.addParameter(QgsProcessingParameterVectorLayer(
self.INPUT3, 'INPUT3: Platform line',
defaultValue='Platform_line',types=[QgsProcessing.TypeVectorLine]) )
self.addParameter(QgsProcessingParameterRasterLayer(
self.INPUT4, 'INPUT4: Digital elevation model',
defaultValue='DEM_SRTM') )
self.addParameter(QgsProcessingParameterVectorDestination(
self.OUTPUT, 'Platformx') )
def processAlgorithm(self, parameters, context, feedback):
# Project variables
project = QgsProject.instance()
scope = QgsExpressionContextUtils.projectScope(project)
projfold = scope.variable('project_folder')
try:
projcrs = QgsCoordinateReferenceSystem( scope.variable('project_crs') )
except:
raise QgsProcessingException ('Project coordinate reference system not set')
# Buffer and boundary nodes to connect to segment lines
layer = processing.run('native:buffer',
{'DISSOLVE': False,
'DISTANCE': 6,
'END_CAP_STYLE': 0,
'INPUT': parameters[self.INPUT],
'JOIN_STYLE': 0,
'MITER_LIMIT': 2,
'SEGMENTS': 5,
'OUTPUT': 'TEMPORARY_OUTPUT' },
context=context, feedback=feedback, is_child_algorithm=True
) ['OUTPUT']
# Boundary
layer = processing.run('native:boundary',
{'INPUT': layer,
'OUTPUT': 'TEMPORARY_OUTPUT' },
context=context, feedback=feedback, is_child_algorithm=True
) ['OUTPUT']
# Merge vector layers
layer = processing.run('native:mergevectorlayers',
{'CRS': projcrs,
'LAYERS': [ layer, parameters[self.INPUT3], parameters[self.INPUT2] ],
'OUTPUT': 'TEMPORARY_OUTPUT' },
context=context, feedback=feedback, is_child_algorithm=True
) ['OUTPUT']
# Polygonize
layer = processing.run('native:polygonize',
{'INPUT': layer,
'KEEP_FIELDS': False,
'OUTPUT': 'TEMPORARY_OUTPUT' },
context=context, feedback=feedback, is_child_algorithm=True
) ['OUTPUT']
# Remove small areas with Extract by expression
layer = processing.run('native:extractbyexpression',
{'EXPRESSION': '$area>200',
'INPUT': layer,
'OUTPUT': 'TEMPORARY_OUTPUT' },
context=context, feedback=feedback, is_child_algorithm=True
) ['OUTPUT']
# Mean elevation with Zonal statistics
processing.run('native:zonalstatistics',
{'INPUT_RASTER': parameters[self.INPUT4],
'INPUT_VECTOR': layer,
'COLUMN_PREFIX': '_',
'RASTER_BAND': 1,
'STATISTICS': [2] },
context=context, feedback=feedback, is_child_algorithm=True
)
# Refactor fields
layer = processing.run('native:refactorfields',
{'FIELDS_MAPPING': [
{'expression': '$id','length': 0,'name': 'platid','precision': 0,'type': 2},
{'expression': 'round( (\"_mean\" * .996 ) ,1)','length': 0,'name': 'platz','precision': 0,'type': 6},
{'expression': 'round(\"_mean\",1)','length': 0,'name': 'meanz','precision': 0,'type': 6} ],
'INPUT': layer,
'OUTPUT': 'TEMPORARY_OUTPUT' },
context=context, feedback=feedback, is_child_algorithm=True
) ['OUTPUT']
# Set Z value
algout = self.parameterAsOutputLayer(parameters, self.OUTPUT, context)
algoutlay = processing.run('native:setzvalue',
{'INPUT': layer,
'Z_VALUE': QgsProperty.fromExpression('"platz"'),
'OUTPUT': algout },
context=context, feedback=feedback, is_child_algorithm=True
) ['OUTPUT']
# necc to overcome bug in runAndLoadResults in QPy console
context.addLayerToLoadOnCompletion(algoutlay,context.LayerDetails(
name='Platformx',project=context.project() ))
layer = QgsProcessingUtils.mapLayerFromString(algoutlay, context)
platqml = projfold + '\\qsettings\\Platform_rdm_z_downcen.qml'
layer.loadNamedStyle(platqml)
feedback.pushInfo( '\n\n ##################################\n')
feedback.pushInfo( '\n\n{} PLATFORMX CREATED'.format(layer.featureCount() ) )
feedback.pushInfo( '\n\nOshPlatformx.py v2.1\n'
'##################################\n\n')
return {self.OUTPUT: algoutlay}
| ebhoward/QgisIMPTools | scripts/OshPlatformx.py | OshPlatformx.py | py | 6,875 | python | en | code | 0 | github-code | 36 |
10073007087 | import numpy as np
import json
import timeit
import os
import argparse
from pathlib import Path
import sys
from shapely.geometry import Polygon
import numpy as np
import numba
from inspect import getmembers
sys.path.append(os.path.realpath('hausdorff'))
from hausdorff_dist import hausdorff_distance
sys.path.append(os.path.realpath('yolov4'))
from tool.utils import *
from config import config
from utils import *
def parse_args():
argparser = argparse.ArgumentParser(
description='Data preparation for vehicle counting')
argparser.add_argument('-j', '--json_dir', type=str,
default='../data/json/', help='Json directory')
argparser.add_argument('-v', '--video_dir', type=str,
default='../data/video/', help='Video directory')
argparser.add_argument('-t', '--track_dir', type=str,
default='data/track', help='Detection result directory')
argparser.add_argument('-s', '--save_dir', type=str,
default='data/count', help='Save result')
args = vars(argparser.parse_args())
return args
def load_zone_anno(json_filename):
with open(json_filename) as jsonfile:
dd = json.load(jsonfile)
polygon = [(int(x), int(y)) for x, y in dd['shapes'][0]['points']]
paths = {}
for it in dd['shapes'][1:]:
kk = str(int(it['label'][-2:]))
paths[kk] = [(int(x), int(y)) for x, y in it['points']]
return polygon, paths
def check_bbox_overlap_with_roi(box, roi):
roi_poly = Polygon(roi)
x1, y1 = box[0], box[1]
x2, y2 = box[2], box[3]
box_poly = Polygon([(x1,y1), (x2, y1), (x2, y2), (x1, y2)])
return box_poly.intersects(roi_poly)
def is_same_direction(traj1, traj2, angle_thr):
vec1 = np.array([traj1[-1][0] - traj1[0][0], traj1[-1][1] - traj1[0][1]])
vec2 = np.array([traj2[-1][0] - traj2[0][0], traj2[-1][1] - traj2[0][1]])
L1 = np.sqrt(vec1.dot(vec1))
L2 = np.sqrt(vec2.dot(vec2))
if L1 == 0 or L2 == 0:
return False
cos = vec1.dot(vec2)/(L1*L2)
angle = np.arccos(cos) * 360/(2*np.pi)
return angle < angle_thr
def count(json_dir, video_dir, track_dir, save_dir):
starttime = timeit.default_timer()
remove_wrong_classes = config['remove_wrong_classes']
min_track_len = config['tracker']['min_len']
Path(save_dir).mkdir(parents=True, exist_ok=True)
cam_datas = get_list_data(json_dir)
results = []
for cam_data in cam_datas:
cam_name = cam_data['camName']
width = int(cam_data['imageWidth'])
height = int(cam_data['imageHeight'])
track_res_path = os.path.join(track_dir, cam_name + '.npy')
tracks = np.load(track_res_path, allow_pickle=True)
mm_track = {}
tipical_trajs = {}
for mm_id, mm in enumerate(cam_data['shapes'][1:]):
if 'tracklets' in mm.keys():
tipical_trajs[mm_id] = [mm['tracklets']]
else:
tipical_trajs[mm_id] = [mm['points']]
track_dict = []
for class_id, class_tracks in enumerate(tracks):
track_dict.append({})
for frame_id, vehicle_tracks in enumerate(class_tracks):
for track in vehicle_tracks:
x1 = track[0]
y1 = track[1]
x2 = track[2]
y2 = track[3]
cx = int((x1 + x2) / 2)
cy = int((y1 + y2) / 2)
track_id = int(track[5])
if track_id in track_dict[class_id]:
track_dict[class_id][track_id]['endframe'] = frame_id
track_dict[class_id][track_id]['bbox'].append([frame_id, x1, y1, x2, y2, class_id])
track_dict[class_id][track_id]['tracklet'].append([cx, cy])
else:
track_dict[class_id][track_id] = {'startframe' : frame_id,
'endframe' : frame_id,
'bbox' : [[frame_id, x1, y1, x2, y2, class_id]],
'tracklet' : [[cx, cy]]}
for class_id, _ in enumerate(track_dict):
mm_track[class_id] = {}
track_ids = sorted([k for k in track_dict[class_id].keys()])
for track_id in track_ids:
if len(track_dict[class_id][track_id]['tracklet']) < config['tracker']['min_len']:
continue
track_traj = track_dict[class_id][track_id]['tracklet']
# calc hausdorff dist with tipical trajs, assign the movement with the min dist
all_dists_dict = {k: float('inf') for k in tipical_trajs}
for m_id, m_t in tipical_trajs.items():
for t in m_t:
tmp_dist = hausdorff_distance(np.array(track_traj), np.array(t), distance='euclidean')
if tmp_dist < all_dists_dict[m_id]:
all_dists_dict[m_id] = tmp_dist
# check direction
all_dists = sorted(all_dists_dict.items(), key=lambda k: k[1])
min_idx, min_dist = None, config['counter']['dist_thr']
for i in range(0, len(all_dists)):
m_id = all_dists[i][0]
m_dist = all_dists[i][1]
if m_dist >= config['counter']['dist_thr']: #if min dist > dist_thr, will not assign to any movement
break
else:
if is_same_direction(track_traj, tipical_trajs[m_id][0], config['counter']['angle_thr']): #check direction
min_idx = m_id
min_dist = m_dist
break # if match, end
else:
continue # direction not matched, find next m_id
if min_idx == None and min_dist >= config['counter']['dist_thr']:
continue
#save counting results
mv_idx = min_idx
#get last frameid in roi
bboxes = track_dict[class_id][track_id]['bbox']
bboxes.sort(key=lambda x: x[0])
dst_frame = bboxes[0][0]
last_bbox = bboxes[-1]
roi = cam_data['shapes'][0]['points']
if check_bbox_overlap_with_roi(last_bbox, roi) == True:
dst_frame = last_bbox[0]
else:
for i in range(len(bboxes) - 2, 0, -1):
bbox = bboxes[i]
if check_bbox_overlap_with_roi(bbox, roi) == True:
dst_frame = bbox[0]
break
else:
continue
track_types = [k[5] for k in bboxes]
track_type = max(track_types, key=track_types.count)
mm_track[class_id][track_id] = mv_idx
results.append([cam_name, dst_frame, mv_idx, class_id])
filepath = os.path.join(save_dir, cam_name + '.json')
with open(filepath, 'w') as f:
json.dump(mm_track, f)
results.sort(key=lambda x: ([x[0], x[1], x[2], x[3]]))
result_filename = os.path.join(save_dir, 'result.txt')
with open(result_filename, 'w') as result_file:
for result in results:
result_file.write('{} {} {} {}\n'.format(result[0], result[1] + 1, result[2] + 1, result[3] + 1))
endtime = timeit.default_timer()
print('Count time: {} seconds'.format(endtime - starttime))
if __name__=='__main__':
args = parse_args()
json_dir = args['json_dir']
video_dir = args['video_dir']
track_dir = args['track_dir']
save_dir = args['save_dir']
count(json_dir, video_dir, track_dir, save_dir) | PhanVinhLong/vehicle-counting-aichcmc2020 | count2.py | count2.py | py | 6,482 | python | en | code | 1 | github-code | 36 |
19782973821 | from datetime import datetime
from scipy import misc
import tensorflow as tf
import os
import src.facenet.detect_face
import cv2
import matplotlib.pyplot as plt
from helper import get_images_from_file_list, get_box_from_ellipse
import math
import pickle
import dlib
# ============================================
# Global variables
# ============================================
AVG_FACE_HEIGHT = 142.58539351061276
AVG_FACE_WIDTH = 94.11600875170973
# CNN global vars
gpu_memory_fraction = 1.0
minsize = 50 # minimum size of face
threshold = [0.5, 0.6, 0.7] # three steps's threshold
factor = 0.800 # scale factor
# Haar and Dlib global vars
face_cascade = cv2.CascadeClassifier('src/haarcascades/haarcascade_frontalface_default.xml')
dlib_face_detector = dlib.get_frontal_face_detector()
# ============================================
# Face detection methods
# ============================================
# For a given image, uses the dlib face detection algorithm to predict
# all of the faces present in the image. The algorithm used is based on
# a 29-layer ResNet network architecture. Returns a list of dlib.rectangle
# objects
def dlib_face_detect(image, upscale=1):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
rects = dlib_face_detector(gray, upscale)
return rects
# For a given image, uses the FaceNet CNN detector to predict all of the faces
# present in the given image. Returns a list of bounding boxes (x,y,w,h) of the
# faces. This code was largely borrowed from the blog of Charles Jekel, found here:
# http://jekel.me/2017/How-to-detect-faces-using-facenet/
def cnn_face_detect(image):
# Configuring facenet in facenet/src/compare.py
with tf.Graph().as_default():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
with sess.as_default():
pnet, rnet, onet = src.facenet.detect_face.create_mtcnn(sess, None)
# run detect_face from the facenet library
bounding_boxes, _ = src.facenet.detect_face.detect_face(image, minsize, pnet, rnet, onet, threshold, factor)
# for each face detection, compute bounding box and add as tuple
face_detections = []
for (x1, y1, x2, y2, acc) in bounding_boxes:
# skip detections with < 60% confidence
if acc < .6:
continue
w = x2 - x1
h = y2 - y1
face_detections.append((x1, y1, w, h))
return face_detections
# For a given image, use the Haar Cascade detector provided by OpenCV to detect
# all of the faces present in the given image. Uses the parameters scale_factor and
# min_neighbors. Returns a list of bounding boxes (x,y,w,h) of the faces
def haar_face_detect(image, scale_factor, min_neighbors, use_grayscale=True, cascade=None):
if use_grayscale:
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Can provide a different cascade type if desired. Cascades found in src/haarcascades
if not cascade:
return face_cascade.detectMultiScale(image, scale_factor, min_neighbors)
else:
return cascade.detectMultiScale(image, scale_factor, min_neighbors)
# ============================================
# Helper functions
# ============================================
# For a given fold number [1-10], retrieve a nested list of bounding boxes for faces for each image
# in the fold. Ex data: [[img1_face1, img1_face2], [img2_face1], ...] where each face bounding box
# is a tuple of (x, y, width, height)
def retrieve_face_list(fold_num):
assert fold_num > 0 and fold_num <= 10
fold_file = 'img/FDDB-folds/FDDB-fold-{:02}-ellipseList.txt'.format(fold_num)
rectangle_file = 'img/FDDB-folds/FDDB-fold-{:02}-rectangleList.pkl'.format(fold_num)
# If this list has already been created, can load it from a pickle file
if os.path.exists(rectangle_file):
with open(rectangle_file, 'rb') as f:
face_list = pickle.load(f)
else:
face_list = []
count, face_count = 0, 0
with open(fold_file, 'r') as f:
file_name = f.readline().rstrip()
while file_name:
num_faces = int(f.readline().rstrip())
count += 1
face_count += num_faces
# iterates over each of the faces in image
faces = []
for i in range(num_faces):
major, minor, angle, h, k, _ = map(float, f.readline().rstrip().split())
faces.append(get_box_from_ellipse(major, minor, angle, h, k))
face_list.append(faces)
# go to next file
file_name = f.readline().rstrip()
print('num images: {}, total num faces: {}'.format(count, face_count))
with open(rectangle_file, 'wb') as w:
pickle.dump(face_list, w)
return face_list
def retrieve_manual_face_labels(fold_num, file_names):
file_list = 'img/FDDB-folds/FDDB-fold-{:02}.txt'.format(fold_num)
rectangle_file = 'img/manual/face_labels.pkl'
if os.path.exists(rectangle_file):
print("loading from pickle")
with open(rectangle_file, 'rb') as f:
face_list = pickle.load(f)
return face_list
with open(file_list, 'r') as f:
file_list = [x.rstrip() for x in f.readlines()]
rectangles = retrieve_face_list(fold_num)
face_list = []
for f in file_names:
for i, file in enumerate(file_list):
if f == file:
face_list.append(rectangles[i])
break
with open(rectangle_file, 'wb') as f:
pickle.dump(face_list, f)
return face_list
# ============================================
# Testing methods
# ============================================
# TODO: replace with a max flow?
def compute_accuracy(labels, predictions):
faces_found, false_pos = 0, 0
for prediction in predictions:
if type(prediction) == dlib.dlib.rectangle:
x_p, y_p, w_p, h_p = prediction.left(), prediction.top(), prediction.right()-prediction.left(), prediction.bottom()-prediction.top()
else:
x_p, y_p, w_p, h_p = prediction
center_px, center_py = x_p + w_p/2, y_p + h_p/2
found_one = False
for label in labels:
x_l, y_l, w_l, h_l = label
center_lx, center_ly = x_l + w_l/2, y_l + h_l/2
if (abs(center_lx - center_px) < .4*w_l and abs(center_ly - center_py) < .4*h_l
and .5*w_l < w_p and w_p < 1.5*w_l and .5*h_l < h_p and h_p < 1.5*h_l):
# num_correct += 1
faces_found += 1
found_one = True
break
if found_one is False:
false_pos += 1
if faces_found > len(labels):
diff = faces_found - len(labels)
false_pos += diff
faces_found = len(labels)
return faces_found, len(labels), false_pos
def write_detections(fold_num, file_names, face_images, face_labels):
directory = 'pred/facenet/{:03}-{}{}{}'.format(int(factor*1000), int(threshold[0]*10), int(threshold[1]*10), int(threshold[2]*10))
file = directory + '/fold-{}.pkl'.format(fold_num)
print(file)
# return
if os.path.exists(file):
print('file {} already exists'.format(file))
return
if not os.path.exists(directory):
os.makedirs(directory)
all_predictions = []
for image in face_images:
predictions = cnn_face_detect(image)
all_predictions.append(predictions)
with open(file, 'wb') as f:
pickle.dump(all_predictions, f)
def test_detection(fold_num, file_names, face_images, face_labels):
total_faces, total_num_correct, total_false_pos = 0, 0, 0
count = 0
for image, label_set in zip(face_images, face_labels):
file = file_names[count]
count += 1
# choose detector
# predictions = haar_face_detect(image, 1.25, 5)
predictions = cnn_face_detect(image)
# predictions = dlib_face_detect(image)
num_correct, num_faces, false_pos = compute_accuracy(label_set, predictions)
total_num_correct += num_correct
total_faces += num_faces
total_false_pos += false_pos
# print("found {} out of {} faces in ".format(total_num_correct, total_faces))
# print("accuracy: {}".format(num_correct/total_faces))
return total_num_correct, total_faces, total_false_pos
def test_dlib_detection(fold_num, file_names, face_images, face_labels, upscale):
total_faces, total_num_correct, total_false_pos = 0, 0, 0
for image, label_set in zip(face_images, face_labels):
predictions = dlib_face_detect(image, upscale=upscale)
num_correct, num_faces, false_pos = compute_accuracy(label_set, predictions)
total_faces += num_faces
total_num_correct += num_correct
total_false_pos += false_pos
return total_num_correct, total_faces, total_false_pos
def test_haar_detection(fold_num, file_names, face_images, face_labels, scale_factor, min_neighbors):
total_faces, total_num_correct, total_false_pos = 0, 0, 0
for image, label_set in zip(face_images, face_labels):
predictions = haar_face_detect(image, scale_factor, min_neighbors)
num_correct, num_faces, false_pos = compute_accuracy(label_set, predictions)
total_faces += num_faces
total_num_correct += num_correct
total_false_pos += false_pos
return total_num_correct, total_faces, total_false_pos
def test_cnn_detection(fold_num, file_names, face_images, face_labels):
directory = 'predictions/facenet/{:03}-{}{}{}'.format(int(factor*1000), int(threshold[0]*10), int(threshold[1]*10), int(threshold[2]*10))
pkl_file = directory + '/fold-{}.pkl'.format(fold_num)
total_faces, total_num_correct, total_false_pos = 0, 0, 0
if os.path.exists(pkl_file):
print('found file, loading')
with open(pkl_file, 'rb') as f:
fold_predictions = pickle.load(f)
# iterates over each image in the fold
for face_detections, labels in zip(fold_predictions, face_labels):
num_correct, num_faces, false_pos = compute_accuracy(labels, face_detections)
total_num_correct += num_correct
total_faces += num_faces
total_false_pos += false_pos
return total_num_correct, total_faces, total_false_pos
# predictions do not already exist for the fold, so make them and then write them
count = 0
fold_predictions = []
for image, label_set in zip(face_images, face_labels):
file = file_names[count]
count += 1
predictions = cnn_face_detect(image)
fold_predictions.append(predictions)
num_correct, num_faces, false_pos = compute_accuracy(label_set, predictions)
total_num_correct += num_correct
total_faces += num_faces
total_false_pos += false_pos
with open(pkl_file, 'wb') as f:
pickle.dump(fold_predictions, f)
return total_num_correct, total_faces, total_false_pos
def test_on_one_image(file_names, face_labels):
name = '2002/08/05/big/img_3688'
img = cv2.imread('img/FDDB-pics/{}.jpg'.format(name))
index = -1
for i, file in enumerate(file_names):
if name in file:
index = i
break
print('found file at index {}'.format(i))
# faces = cnn_face_detect(img)
faces = haar_face_detect(img, 1.3, 4)
label_set = face_labels[i]
print("detections: (x,y,w,h)")
# for i in range(len(label_set)):
for i, prediction in enumerate(faces):
print("*************** prediction {} *************".format(i))
x_p, y_p, w_p, h_p = prediction
print(x_p,y_p,w_p,h_p)
cv2.rectangle(img,(int(x_p),int(y_p)),(int(x_p+w_p),int(y_p+h_p)),(255,0,0),2)
center_px, center_py = x_p + w_p/2, y_p + h_p/2
found_one = False
for label in label_set:
x_l, y_l, w_l, h_l = label
print(x_l, y_l, w_l, h_l)
center_lx, center_ly = x_l + w_l/2, y_l + h_l/2
print(abs(center_lx - center_px) < .3*w_l)
print(abs(center_ly - center_py) < .3*h_l)
print(.5*w_l < w_p and w_p < 1.5*w_l)
print(.5*h_l < h_p and h_p < 1.5*h_l)
print("//////////////////")
if (abs(center_lx - center_px) < .3*w_l and abs(center_ly - center_py) < .3*h_l
and .5*w_l < w_p and w_p < 1.5*w_l and .5*h_l < h_p and h_p < 1.5*h_l):
# num_correct += 1
# faces_found_in_img += 1
found_one = True
break
if found_one is False:
print('false pos found for prediction {}'.format(i))
# false_pos += 1
# for (x,y,w,h) in faces:
# print(x,y,w,h)
# cv2.rectangle(img,(int(x),int(y)),(int(x+w),int(y+h)),(255,0,0),2)
print('labels:')
print(face_labels[i])
plt.figure()
plt.imshow(img)
plt.show()
# The main method is used to compare the accuracies of the FaceNet detector and Haar Cascade detector
#
def test_accuracy():
total_correct, total_faces, total_false_pos = 0, 0, 0
start_time = datetime.now()
for fold_num in [2,3,4,5]:
img_list_file = 'img/FDDB-folds/FDDB-fold-{:02}.txt'.format(fold_num)
with open(img_list_file, 'r') as f:
file_names = [x.rstrip() for x in f.readlines()]
face_images = get_images_from_file_list(file_names)
face_labels = retrieve_face_list(fold_num)
with open(img_list_file, 'r') as f:
file_names = [x.rstrip() for x in f.readlines()]
# num_correct, num_faces, false_pos = test_detection(fold_num, file_names, face_images, face_labels)
num_correct, num_faces, false_pos = test_cnn_detection(fold_num, file_names, face_images, face_labels)
total_correct += num_correct
total_faces += num_faces
total_false_pos += false_pos
delta = datetime.now() - start_time
print('******** TOTALS ***********')
print('found {}/{} faces'.format(total_correct, total_faces))
print('total false pos: {}'.format(total_false_pos))
print('accuracy: {}'.format(total_correct/total_faces))
print('Time elapsed (hh:mm:ss.ms) {}'.format(delta))
def test_one_image():
fold_num = 5
img_list_file = 'img/FDDB-folds/FDDB-fold-{:02}.txt'.format(fold_num)
with open(img_list_file, 'r') as f:
file_names = [x.rstrip() for x in f.readlines()]
face_images = get_images_from_file_list(file_names)
face_labels = retrieve_face_list(fold_num)
test_on_one_image(file_names, face_labels)
def test_on_manual_labels():
img_list_file = 'img/manual/image_list.txt'
with open(img_list_file, 'r') as f:
file_names = [x.rstrip() for x in f.readlines()]
face_images = get_images_from_file_list(file_names)
start_time = datetime.now()
face_labels = retrieve_manual_face_labels(1, file_names)
# num_correct, num_faces, false_pos = test_detection(1, file_names, face_images, face_labels)
num_correct, num_faces, false_pos = test_cnn_detection(1, file_names, face_images, face_labels)
delta = datetime.now() - start_time
print('found {}/{} faces'.format(num_correct, num_faces))
print('total false pos: {}'.format(false_pos))
print('accuracy: {}'.format(num_correct/num_faces))
print('Time elapsed (hh:mm:ss.ms) {}'.format(delta))
def test_haar():
folds = [2,3,4,5]
# prepare fold info
fold_to_info_dict = {}
for fold_num in folds:
img_list_file = 'img/FDDB-folds/FDDB-fold-{:02}.txt'.format(fold_num)
with open(img_list_file, 'r') as f:
file_names = [x.rstrip() for x in f.readlines()]
face_images = get_images_from_file_list(file_names)
face_labels = retrieve_face_list(fold_num)
fold_to_info_dict[fold_num] = (file_names, face_images, face_labels)
for min_neighbors in [0,1,2,3,4,5]:
scale = 1.05
while scale < 1.5:
start = datetime.now()
total_correct, total_faces, total_false_pos = 0, 0, 0
for fold_num in folds:
file_names, face_images, face_labels = fold_to_info_dict[fold_num]
num_correct, num_faces, false_pos = test_haar_detection(fold_num, file_names, face_images, face_labels, scale, min_neighbors)
total_correct += num_correct
total_faces += num_faces
total_false_pos += false_pos
delta = datetime.now() - start
print('minNeighbors={}, scale={}: accuracy={}, avgFalsePos={}, ttlFP={}, timing={}'.format(min_neighbors, scale, total_correct/total_faces, total_false_pos/len(folds), total_false_pos, delta))
scale += .05
def test_dlib():
folds = [2,3,4,5]
# prepare fold info
fold_to_info_dict = {}
for fold_num in folds:
img_list_file = 'img/FDDB-folds/FDDB-fold-{:02}.txt'.format(fold_num)
with open(img_list_file, 'r') as f:
file_names = [x.rstrip() for x in f.readlines()]
face_images = get_images_from_file_list(file_names)
face_labels = retrieve_face_list(fold_num)
fold_to_info_dict[fold_num] = (file_names, face_images, face_labels)
for upscale in [0,1,2,3]:
start = datetime.now()
total_correct, total_faces, total_false_pos = 0, 0, 0
for fold_num in folds:
file_names, face_images, face_labels = fold_to_info_dict[fold_num]
num_correct, num_faces, false_pos = test_dlib_detection(fold_num, file_names, face_images, face_labels, upscale)
total_correct += num_correct
total_faces += num_faces
total_false_pos += false_pos
delta = datetime.now() - start
print('upscale={}: accuracy={}, avgFalsePos={}, ttlFP={}, time: {}'.format(upscale, total_correct/total_faces, total_false_pos/len(folds), total_false_pos, delta))
if __name__ == "__main__":
# main()
test_haar()
# test_dlib()
# test_one_image()
# test_on_manual_labels()
| ryan-mccaffrey/glasses-for-everyone | detect_face.py | detect_face.py | py | 18,364 | python | en | code | 2 | github-code | 36 |
33825739371 | from domain.Errors import InvalidClassException
from domain.Rental import Rental
class RentalValidator():
def validate(self, rental):
errors = ""
if type(rental.get_id()) != int:
errors += "Invalid id! \n"
if len(rental.get_dvd_name()) == 0:
errors += "Invalid dvd name! \n "
if len(rental.get_client_name()) == 0:
errors += "Invalid client name! \n"
if errors != "" :
raise InvalidClassException(errors)
"""
def test():
validator = RentalValidator()
newRental = Rental(5,"shg","kfkas")
validator.validate(newRental)
newRental = Rental("lala","shg","kfkas")
try:
validator.validate(newRental)
except InvalidClassException as ics:
print (ics)
newRental = Rental(5,"","kfkas")
try:
validator.validate(newRental)
except InvalidClassException as ics:
print (ics)
newRental = Rental(5,"shg","")
try:
validator.validate(newRental)
except InvalidClassException as ics:
print (ics)
test()
"""
| projectworldsofficial/Movie-rental-in-python | domain/RentalValidator.py | RentalValidator.py | py | 1,369 | python | en | code | 1 | github-code | 36 |
3237711712 | from django.db import models
from rest_framework import serializers
gender=(
("male","Male"),
("female","Female"),
)
status=(
("Done","Done"),
("Pending","Pending"),
)
Data=(
("share_all_data","Share All Data"),
("share_alerts_only","Share Alerts Only"),
)
communication=(
("email","Email"),
("sms","SMS"),
)
relationship=(
("parent","parent"),
("spouse","spouse"),
("children","children"),
)
Device = (
('ios','ios'),
('android','android'),
('web','web'),
)
# Create your models here.
class Patient_Account(models.Model):
Patient_Account_Id = models.AutoField(primary_key=True)
Full_Name=models.TextField(default="")
First_Name=models.CharField(max_length=100, default="")
Last_Name=models.CharField(max_length=100, default="")
Email=models.TextField(default="")
Username=models.TextField(default="")
Gender=models.CharField(max_length=100, default="")
Date_of_Birth=models.CharField(max_length=500, default="")
Password=models.TextField(default="")
Street_Address=models.CharField(max_length=500, default="")
City=models.CharField(max_length=500, default="")
State=models.CharField(max_length=500, default="")
Country=models.CharField(max_length=500, default="")
Role=models.CharField(max_length=100,default="patient")
Patient_Account_Image=models.ImageField(upload_to='Patient/',default="dummyprofile.jpg")
Mobile_Number = models.CharField(max_length=200, default="")
Email_Verification_Code = models.CharField(max_length=200, default="")
Email_Verification_Timestatus = models.CharField(max_length=200, default="False")
Email_Verification_usestatus = models.CharField(max_length=200, default="False")
OTP_Verification = models.CharField(max_length=200, default="12345")
ohip_number = models.TextField(default="")
date_of_issue = models.TextField(default="")
date_of_expiry = models.TextField(default="")
ohip_Status = models.CharField(max_length=500, default="")
Email_Verificatication_Status = models.CharField(max_length=500, default="False")
Sender_ID = models.TextField(default="")
Device_type = models.CharField(max_length=100,choices=Device,default="android")
Message_Count = models.CharField(max_length=20,default="0")
HospitalAccount_Id=models.ForeignKey('ear_health_professional.HospitalAccount' , on_delete=models.CASCADE,blank=True,null=True)
Clinics_BranchId=models.ForeignKey('ear_health_professional.Clinics_Branch' , on_delete=models.CASCADE,blank=True,null=True)
Clinic_Remove_Status = models.CharField(max_length=500, default="True")
def __str__(self):
return self.Full_Name
class Card_detail(models.Model):
Card_detail_Id=models.AutoField(primary_key=True)
Patient_id=models.ForeignKey(Patient_Account,on_delete=models.CASCADE,blank=True,null=True)
Card_number=models.CharField(max_length=100,default="0")
Cvc=models.IntegerField(default="12345")
expiration_date=models.DateField(blank=True, null=True)
created_at=models.DateTimeField(auto_now_add=True,blank=True, null=True)
Charge_Day=models.DateTimeField(auto_now_add=True,blank=True, null=True)
HospitalAccount_Id=models.ForeignKey('ear_health_professional.HospitalAccount' , on_delete=models.CASCADE,blank=True,null=True)
Clinics_BranchId=models.ForeignKey( 'ear_health_professional.Clinics_Branch', on_delete=models.CASCADE,blank=True,null=True)
def __str__(self):
return self.Card_number
class Billing_Details(models.Model):
Billing_Details_id=models.AutoField(primary_key=True)
Patient_id=models.ForeignKey(Patient_Account,on_delete=models.CASCADE,blank=True,null=True)
Street_Address=models.TextField(default="")
Country=models.TextField(default="")
State=models.TextField(default="")
City=models.TextField(default="")
Postal_Code=models.TextField(default="")
Email=models.TextField(default="")
HospitalAccount_Id=models.ForeignKey('ear_health_professional.HospitalAccount' , on_delete=models.CASCADE,blank=True,null=True)
Clinics_BranchId=models.ForeignKey( 'ear_health_professional.Clinics_Branch', on_delete=models.CASCADE,blank=True,null=True)
def __str__(self):
return self.Country
class Insurance(models.Model):
Insurance_id = models.AutoField(primary_key=True)
Patient_id=models.ForeignKey(Patient_Account,on_delete=models.CASCADE,blank=True,null=True)
insuarance_number =models.TextField(default="")
date_of_issue = models.TextField(default="")
date_of_expiry = models.TextField(default="")
insurance_company_name = models.TextField(default="")
class Book_Appointment(models.Model):
Book_Appointment_id= models.AutoField(primary_key=True)
Problem=models.TextField(default="+4lISovpyV6DwPqRNcKmFvtDUyL3LLzPP9wCR3oIKMbT44gGXvC2F3EL1IvyY9MP3SmuuP5L69iN0ZJ8dJXEAQ==")
Completion=models.TextField(default="akjMaPmdwYqc2btwftgMOLe5H1/7BQpJUJMTLVdnVZbfcEVgXZvf8W8njyEEotQF8Q1hq850qnBFDLA/FZ9c6Q==")
Billing_Details_id=models.ForeignKey(Billing_Details,on_delete=models.CASCADE,blank=True,null=True)
Health_Professional_id=models.ForeignKey('ear_health_professional.Health_Professional_Account',on_delete=models.CASCADE,blank=True,null=True)
Date=models.CharField(max_length=500,default="")
Time=models.CharField(max_length=500,default="")
Date_of_origin=models.DateTimeField(auto_now_add=True,blank=True, null=True)
Patient_id=models.ForeignKey(Patient_Account,on_delete=models.CASCADE,blank=True,null=True)
Status=models.CharField(default="Pending",max_length=20)
Doctor_Online_Status=models.CharField(default="False",max_length=20)
Hospital_id=models.ForeignKey('ear_health_professional.Hospital',on_delete=models.CASCADE,blank=True,null=True)
Channel_id = models.CharField(max_length=500,default="")
Appointment_Rating = models.IntegerField(default=0)
HospitalAccount_Id=models.ForeignKey('ear_health_professional.HospitalAccount' , on_delete=models.CASCADE,blank=True,null=True)
Clinics_BranchId=models.ForeignKey( 'ear_health_professional.Clinics_Branch', on_delete=models.CASCADE,blank=True,null=True)
Cash_on_Arrival = models.CharField(max_length=500,default="False")
Online_Payment = models.CharField(max_length=500,default="False")
is_Paid = models.CharField(max_length=8,default="False")
Paypal_Payment = models.CharField(max_length=500,default="False")
Ohipe_Payment = models.CharField(max_length=500,default="False")
Insurance_Payment = models.CharField(max_length=500,default="False")
Accept_Reject_Status = models.CharField(max_length=500,default="Pending")
Doctor_Slot_Timing = models.CharField(max_length=500,default="")
Doctor_Notes = models.TextField(default="wt1lvNv9BdDP4iPKwsHoJwlWUg65Z3kIEGdEn4AZbEU/mRiiiz3TLZE5HZMCx7qWt8uJvAsH7WufJRhc+0OeeA==")
Doctor_Prescription = models.TextField(default="wt1lvNv9BdDP4iPKwsHoJwlWUg65Z3kIEGdEn4AZbEU/mRiiiz3TLZE5HZMCx7qWt8uJvAsH7WufJRhc+0OeeA==")
Medical_Diagnosis = models.TextField(default="yqrxWDqA9m4g/fkhdmp1jkBC1pXHyh60EwwBzdCLGGM=")
Doctor_Read_Message = models.CharField(max_length=20,default="0")
Patient_Read_Message = models.CharField(max_length=20,default="0")
Patient_rating_Status = models.CharField(max_length=20,default="False")
PDF_data = models.TextField(default="")
class General_Patient_Information(models.Model):
General_Patient_Information_id = models.AutoField(primary_key=True)
Book_Appointment_id=models.ForeignKey(Book_Appointment,on_delete=models.CASCADE,blank=True,null=True)
Patient_Gender = models.TextField(default="gjPDPWj20N5YisvSFYRVnzZm77L0i9YxFpf/TfngMNI=")
Patient_Name = models.TextField(default="")
Patient_First_Name = models.TextField(default="gjPDPWj20N5YisvSFYRVnzZm77L0i9YxFpf/TfngMNI=")
Patient_Last_Name = models.TextField(default="gjPDPWj20N5YisvSFYRVnzZm77L0i9YxFpf/TfngMNI=")
Patient_DOB = models.TextField(default="gjPDPWj20N5YisvSFYRVnzZm77L0i9YxFpf/TfngMNI=")
Patient_Height = models.TextField(default="gjPDPWj20N5YisvSFYRVnzZm77L0i9YxFpf/TfngMNI=")
Patient_Weight = models.TextField(default="gjPDPWj20N5YisvSFYRVnzZm77L0i9YxFpf/TfngMNI=")
Patient_Email = models.TextField(default="gjPDPWj20N5YisvSFYRVnzZm77L0i9YxFpf/TfngMNI=")
Patient_reason = models.TextField(default="gjPDPWj20N5YisvSFYRVnzZm77L0i9YxFpf/TfngMNI=")
# Patient Medical History
Patient_drug_allergies = models.TextField(default="gjPDPWj20N5YisvSFYRVnzZm77L0i9YxFpf/TfngMNI=")
Patient_disease_list = models.TextField(default="")
Patient_other_illness = models.TextField(default="gjPDPWj20N5YisvSFYRVnzZm77L0i9YxFpf/TfngMNI=")
Patient_List_any_operations = models.TextField(default="gjPDPWj20N5YisvSFYRVnzZm77L0i9YxFpf/TfngMNI=")
Patient_List_of_Current_Medications = models.TextField(default="gjPDPWj20N5YisvSFYRVnzZm77L0i9YxFpf/TfngMNI=")
# Healthy & Unhealthy Habits
Exercise = models.TextField(default="gjPDPWj20N5YisvSFYRVnzZm77L0i9YxFpf/TfngMNI=")
Eating_following_a_diet =models.TextField(default="gjPDPWj20N5YisvSFYRVnzZm77L0i9YxFpf/TfngMNI=")
Alcohol_Consumption = models.TextField(default="gjPDPWj20N5YisvSFYRVnzZm77L0i9YxFpf/TfngMNI=")
Caffeine_Consumption = models.TextField(default="gjPDPWj20N5YisvSFYRVnzZm77L0i9YxFpf/TfngMNI=")
Do_you_smoke = models.TextField(default="gjPDPWj20N5YisvSFYRVnzZm77L0i9YxFpf/TfngMNI=")
Medical_History = models.TextField(default="gjPDPWj20N5YisvSFYRVnzZm77L0i9YxFpf/TfngMNI=")
class Ser_Appointment(serializers.ModelSerializer):
Patient_Name=serializers.ReadOnlyField(source="Patient_id.Username")
Patient_Username=serializers.ReadOnlyField(source="Patient_id.Username")
Patient_Gender=serializers.ReadOnlyField(source="Patient_id.Gender")
Patient_Country=serializers.ReadOnlyField(source="Patient_id.Country")
Date_of_Birth=serializers.ReadOnlyField(source="Patient_id.Date_of_Birth")
Health_Professional_id=serializers.ReadOnlyField(source="Health_Professional_id.Health_Professional_Id")
Health_Professional_Username=serializers.ReadOnlyField(source="Health_Professional_id.Username")
Health_Professional_Full_Name=serializers.ReadOnlyField(source="Health_Professional_id.Full_Name")
Hospital_id = serializers.ReadOnlyField(source="Hospital_id.Hospital_id")
Hospital_Name = serializers.ReadOnlyField(source="Hospital_id.Hospital_Name")
About = serializers.ReadOnlyField(source="Hospital_id.About")
Status = serializers.ReadOnlyField(source="Hospital_id.Status")
More_Mapinfo = serializers.ReadOnlyField(source="Hospital_id.More_Mapinfo")
HospitalAccount_Id=models.ForeignKey('ear_health_professional.HospitalAccount' , on_delete=models.CASCADE,blank=True,null=True)
Clinics_BranchId=models.ForeignKey( 'ear_health_professional.Clinics_Branch', on_delete=models.CASCADE,blank=True,null=True)
Appointment_Status = serializers.ReadOnlyField(source = "Status")
class Meta:
model = Book_Appointment
fields = ('Patient_id','Patient_Name','Patient_Username','Patient_Gender','Patient_Country','Problem','Completion','Date','Time','Date_of_origin','Status','Book_Appointment_id','Date_of_Birth','Doctor_Notes','Doctor_Prescription','Health_Professional_id','Health_Professional_Username','Health_Professional_Full_Name','Hospital_id','Hospital_Name','About','Status','More_Mapinfo','Doctor_Online_Status','Channel_id','Accept_Reject_Status','Cash_on_Arrival','Online_Payment','Ohipe_Payment','Insurance_Payment','Appointment_Status')
class Messages(models.Model):
Messages_id = models.AutoField(primary_key=True)
Message = models.TextField(default="")
Book_Appointment_id=models.ForeignKey(Book_Appointment , on_delete=models.CASCADE,blank=True,null=True)
Health_Professional_id=models.ForeignKey('ear_health_professional.Health_Professional_Account',on_delete=models.CASCADE,blank=True,null=True)
Patient_id=models.ForeignKey(Patient_Account,on_delete=models.CASCADE,blank=True,null=True)
Role = models.CharField(max_length=20,default="")
Status = models.CharField(max_length=20,default="False")
Doctor_Read_Status = models.CharField(max_length=20,default="False")
Patient_Read_Status = models.CharField(max_length=20,default="False")
Date = models.CharField(max_length=20,default="False")
Time = models.CharField(max_length=20,default="False")
class SerMessage(serializers.ModelSerializer):
class Meta:
model = Messages
fields = '__all__'
class Doctor_Image(models.Model):
Doctor_Image_id=models.AutoField(primary_key=True)
Book_Appointment_id=models.ForeignKey(Book_Appointment, on_delete=models.CASCADE)
img=models.ImageField(upload_to='Appointment/',default="dummy.jpg")
HospitalAccount_Id=models.ForeignKey('ear_health_professional.HospitalAccount' , on_delete=models.CASCADE,blank=True,null=True)
Clinics_BranchId=models.ForeignKey( 'ear_health_professional.Clinics_Branch', on_delete=models.CASCADE,blank=True,null=True)
def __str__(self):
return str(self.Doctor_Image_id)
class MultipleImages(models.Model):
MultipleImages_id=models.AutoField(primary_key=True)
Book_Appointment_id=models.ForeignKey(Book_Appointment, on_delete=models.CASCADE)
img=models.ImageField(upload_to='Appointment/',default="dummy.jpg")
HospitalAccount_Id=models.ForeignKey('ear_health_professional.HospitalAccount' , on_delete=models.CASCADE,blank=True,null=True)
Clinics_BranchId=models.ForeignKey( 'ear_health_professional.Clinics_Branch', on_delete=models.CASCADE,blank=True,null=True)
def __str__(self):
return str(self.MultipleImages_id)
class Add_Caregiver(models.Model):
Add_Caregiver_id=models.AutoField(primary_key=True)
Patient_id=models.ForeignKey(Patient_Account,on_delete=models.CASCADE,blank=True,null=True)
Name=models.CharField(max_length=500, default="")
Email=models.EmailField(max_length=500, default="")
Mobile_Number=models.CharField(max_length=500, default="")
Relationship=models.CharField(max_length=500, default="",choices=relationship)
Data=models.CharField(max_length=500, default="",choices=Data)
Communication=models.CharField(max_length=500, default="",choices=communication)
HospitalAccount_Id=models.ForeignKey('ear_health_professional.HospitalAccount' , on_delete=models.CASCADE,blank=True,null=True)
Clinics_BranchId=models.ForeignKey( 'ear_health_professional.Clinics_Branch', on_delete=models.CASCADE,blank=True,null=True)
def __str__(self):
return self.Name
class Patient_Recent_visit(models.Model):
Patient_Recent_visit_id = models.AutoField(primary_key=True)
Patient_id=models.ForeignKey(Patient_Account,on_delete=models.CASCADE,blank=True,null=True)
Health_Professional_id=models.ForeignKey('ear_health_professional.Health_Professional_Account',on_delete=models.CASCADE,blank=True,null=True)
class Patient_Favorited(models.Model):
Patient_Favorited_id = models.AutoField(primary_key=True)
Patient_id=models.ForeignKey(Patient_Account,on_delete=models.CASCADE,blank=True,null=True)
Health_Professional_id=models.ForeignKey('ear_health_professional.Health_Professional_Account',on_delete=models.CASCADE,blank=True,null=True)
| AdnanSiddiqui96/Projects-Backup | TestProject/Patient/models.py | models.py | py | 15,374 | python | en | code | 0 | github-code | 36 |
12486515502 | import cv2
from datetime import datetime, timedelta
import geojson
from geotiff import GeoTiff
from models import Model0
import netCDF4
import numpy as np
import pandas as pd
import os
from os import path
from scipy import interpolate
from scipy.io import loadmat, savemat
import torch
import wget
import tarfile
import data
cosd = lambda x: np.cos(np.radians(x))
jdays = list(range(0,183+21,7))+list(range(329-21,364,7)) #load Julian days
def getconstfeatures(workdir, uregions, awsurl, print=print):
datadir = path.join(workdir,'..')
print(f"getconstfeatures: datadir={datadir} list={os.listdir(datadir)}")
file = path.join(workdir,'grid_cells.geojson')
print(f"Loading {file}")
with open(file) as f:
grid0 = geojson.load(f)
grid0 = pd.DataFrame([{'cell_id':g['properties']['cell_id'], 'region':g['properties']['region'],
'corners': np.array(g['geometry']['coordinates'])} for g in grid0['features']]).set_index('cell_id')
file = path.join(workdir,'ground_measures_metadata.csv')
print(f"Loading {file}")
stmeta0 = pd.read_csv(file).set_index('station_id')
stmetafile = path.join(workdir,'stmeta.csv')
gridfile = path.join(workdir,'grid.csv')
read = path.isfile(stmetafile) and path.isfile(gridfile)
if read:
print(f'Loading stmeta from {stmetafile} and grid from {gridfile}')
stmeta = pd.read_csv(stmetafile).set_index('station_id')
grid = pd.read_csv(gridfile).set_index('cell_id')
noex = set(stmeta0.index).difference(set(stmeta.index)).union(set(grid0.index).difference(set(grid.index)))
if len(noex) > 0:
print('unvalid stmeta / grid for {noex}')
read = False
else:
lonr = 1.5
lon1 = np.floor(min(grid['longitude'].values.min(),stmeta['longitude'].values.min())/lonr-1.)*lonr
lon2 = np.ceil(max(grid['longitude'].values.max(),stmeta['longitude'].values.max())/lonr+1.)*lonr
latr = 1.
lat1 = np.floor(min(grid['latitude'].values.min(),stmeta['latitude'].values.min())/latr-1.)*latr
lat2 = np.ceil(max(grid['latitude'].values.max(),stmeta['latitude'].values.max())/latr+1.)*latr
if not read:
print('Creating stmeta and grid')
grid = grid0
stmeta = stmeta0
gll = np.vstack(grid['corners'].values)
grid['latitude'] = gll[:,:,1].mean(1)
grid['longitude'] = gll[:,:,0].mean(1)
lonr = 1.5; latr = 1.
lon1 = np.floor(min(gll[:,:,0].min(),stmeta['longitude'].values.min())/lonr-1.)*lonr
lon2 = np.ceil(max(gll[:,:,0].max(),stmeta['longitude'].values.max())/lonr+1.)*lonr
lat1 = np.floor(min(gll[:,:,1].min(),stmeta['latitude'].values.min())/latr-1.)*latr
lat2 = np.ceil(max(gll[:,:,1].max(),stmeta['latitude'].values.max())/latr+1.)*latr
for lab in uregions:
grid[lab] = np.array([grid['region'][k]==lab for k in range(grid.shape[0])]).astype(np.float32)
stmeta[lab] = np.zeros(stmeta.shape[0])
for lab in ['CDEC', 'SNOTEL']:
stmeta[lab] = np.array([stmeta.index[k][:len(lab)]==lab for k in range(stmeta.shape[0])]).astype(np.float32)
grid[lab] = np.zeros(grid.shape[0])
rgauss = 2.0
def getaver (lon,lat,elev,r):
ry = r/(111.*(lat[1]-lat[0]))
rx = r/(111.*(lon[1]-lon[0])*cosd((lat1+lat2)*0.5))
av = elev.copy()
cv2.GaussianBlur(elev, (2*int(rgauss*rx)+1, 2*int(rgauss*ry)+1), rx, av, ry)
f = interpolate.interp2d(lon, lat, av, kind='linear')
return lambda lons, lats: np.array([f(lons[k], lats[k])[0] for k in range(lons.shape[0])])
demfile = f"dem_N{lat1}_{lat2}_W{-lon1}_{-lon2}.mat"
fname = path.join(datadir, demfile)
if not path.isfile(fname):
print('Creating DEM features')
dem = data.getdem(lat1,lat2,lon1,lon2,dir=path.join(datadir,'dem'), matfile=fname)
else:
print(f'Loading {demfile}')
dem = loadmat(fname)
demlon = dem.pop('lon').squeeze()
demlat = dem.pop('lat').squeeze()
print('Calculation DEM features')
for key in dem:
if key[:2] != '__':
elev = dem[key]
if key == 'elev':
rads = [3, 10, 30, 100]
f = getaver(demlon,demlat,elev,1.)
grid['elevation_m'] = f(grid['longitude'], grid['latitude'])
for r in rads:
f_av = getaver(demlon,demlat,elev,r)
name = 'elevation_'+str(r)
for d in [stmeta, grid]:
d[name] = f_av(d['longitude'], d['latitude']) - d['elevation_m']
else:
rads1 = [1, 3, 10, 30]
for r in rads1:
f_av = getaver(demlon,demlat,elev,r)
name = key+str(r)
for d in [stmeta, grid]:
d[name] = f_av(d['longitude'], d['latitude'])
ev = getaver(demlon,demlat,dem['elev'],1.)(stmeta['longitude'], stmeta['latitude'])
print(f"dem elevation/stmeta elevation = {ev/stmeta['elevation_m']}")
del demlon,demlat,dem
print('Loading GLOBCOVER')
for d in [stmeta, grid]:
for key in [key for key in d.keys() if key[:9]=='GLOBCOVER']:
d.pop(key)
ncname = path.join(datadir,'C3S-LC-L4-LCCS-Map-300m-P1Y-2020-v2.1.1.nc')
if not path.isfile(ncname):
arch = 'land_cover_map.tar.gz'
fname = path.join(datadir,arch)
if not path.isfile(fname):
print('Downloading '+arch)
wget.download(awsurl+arch, out=fname)
tar = tarfile.open(fname, "r:gz").extractall(datadir)
# ncname = path.join(datadir, tar.getmembers()[0].get_info()['name'])
os.remove(fname)
print(f'Loading GLOBCOVER from {ncname}')
nc = netCDF4.Dataset(ncname)
lon = np.array(nc.variables['lon'][:])
lat = np.array(nc.variables['lat'][:])
ok = ((lat>=lat1)&(lat<=lat2)).nonzero()[0]
ilat0 = ok[0]; ilat1 = ok[-1]+1
ok = ((lon>=lon1)&(lon<=lon2)).nonzero()[0]
ilon0 = ok[0]; ilon1 = ok[-1]+1
arr = np.array(nc.variables['lccs_class'][0,ilat0:ilat1,ilon0:ilon1])
lon = lon[ilon0:ilon1]
lat = lat[ilat0:ilat1]
nc.close()
printvalstat = lambda arr: print ({t: (arr==t).sum()/arr.size*100. for t in np.unique(arr.reshape(-1))})
printvalstat (arr)
arr[(arr>=10) & (arr<30)] = 30
arr[arr==110] = 100; arr[arr==120] = 100
arr[(arr>130)&(arr<160)] = 130
arr[arr==72] = 70; arr[arr==71] = 70
arr[arr==201] = 200
types = [30,70,90,100,130,200,210,220]
printvalstat (arr)
gstep=1./360.
# rads = [1, 3, 10, 30]
rads = [3]
print('Calculation GLOBCOVER features')
def calcfeatures(arr,types,gstep,prefix):
for t in types:
eq = (arr==t).astype(np.float32)
for r in rads:
ry = r/(111.*gstep)
rx = r/(111.*gstep*cosd((lat1+lat2)*0.5))
av = eq.copy()
cv2.GaussianBlur(eq, (2*int(rgauss*rx)+1, 2*int(rgauss*ry)+1), rx, av, ry)
for d in [stmeta, grid]:
ilon = ((d['longitude'].values-lon1)/(lon2-lon1)*arr.shape[1]).astype(np.int64)
ilat = ((lat2-d['latitude'].values)/(lat2-lat1)*arr.shape[0]).astype(np.int64)
d[prefix+str(t)+'_'+str(r)] = np.array([av[ilat[i]:ilat[i]+2,ilon[i]:ilon[i]+2].mean() for i in range(ilon.shape[0])])
del eq,av
calcfeatures(arr,types,gstep,'GLOBCOVER')
del arr
print('Loading SOIL')
for d in [stmeta, grid]:
for key in [key for key in d.keys() if key[:4]=='SOIL']:
d.pop(key)
tiffile = 'global_soil_regions_geoTIFF/so2015v2.tif'
tifname = path.join(datadir,tiffile)
if not path.isfile(tifname):
arch = 'soil_regions_map.tar.gz'
fname = path.join(datadir,arch)
if not path.isfile(fname):
print('Downloading '+arch)
wget.download(awsurl+arch, out=fname)
tar = tarfile.open(fname, "r:gz").extract('./'+tiffile, datadir)
os.remove(fname)
print(f'Loading SOIL from {tifname}')
arr = np.array(GeoTiff(tifname).read_box([(lon1,lat1),(lon2,lat2)]))
printvalstat (arr)
# types = [7,21,50,54,64,74,75,81,83,92]
arr[arr>10] = np.floor(arr[arr>10]/10)*10
arr[arr==5] = 7; arr[arr==6] = 7
printvalstat (arr)
types = [7,20,50,60,70,80,90]
# types = np.unique(arr.reshape(-1))
gstep = 1./30.
# rads = [3, 10, 30]
rads = [10]
print('Calculation SOIL features')
calcfeatures(arr,types,gstep,'SOIL')
del arr
# clm = 'ba'
# print('Loading '+clm)
# badir = path.join(datadir, clm+'-nc')
# if not path.isdir(badir):
# arch = 'burned_areas_occurrence_map.tar.gz'
# fname = path.join(datadir,arch)
# if not path.isfile(fname):
# print('Downloading '+arch)
# wget.download(awsurl+arch, out=fname)
# tar = tarfile.open(fname, "r:gz").extractall(datadir)
# os.remove(fname)
# rads = [10, 30]
# for jd in jdays:
# if all([clm+str(r)+'_'+str(jd) in grid for r in rads]):
# continue
# tday = (datetime(2001,1,1)+timedelta(days=jd)).strftime('%m%d')
# file = path.join(badir,'ESACCI-LC-L4-'+clm+'-Cond-500m-P13Y7D-2000'+tday+'-v2.0.nc')
# print(f'Loading {clm} {tday} from {file}')
# nc = netCDF4.Dataset(file)
# lon = np.array(nc.variables['lon'][:])
# lat = np.array(nc.variables['lat'][:])
# ok = ((lat>=lat1)&(lat<=lat2)).nonzero()[0]
# ilat0 = ok[0]; ilat1 = ok[-1]+1
# ok = ((lon>=lon1)&(lon<=lon2)).nonzero()[0]
# ilon0 = ok[0]; ilon1 = ok[-1]+1
# v = np.array(nc.variables[clm.lower()+'_occ'][ilat0:ilat1,ilon0:ilon1]).astype(np.float32)
# lon = lon[ilon0:ilon1]
# lat = lat[ilat0:ilat1]
# for r in rads:
# f = getaver(lon, lat, v, r)
# for d in [stmeta, grid]:
# d[clm+str(r)+'_'+str(jd)] = f (d['longitude'], d['latitude'])
# nc.close()
stmeta = stmeta.copy()
grid = grid.copy()
print('Saving stmeta to {stmetafile} and grid to {gridfile}')
stmeta.to_csv(stmetafile)
grid.to_csv(gridfile)
print({key: grid[key].mean() for key in grid.keys() if key not in ['region', 'corners']})
print({key: stmeta[key].mean() for key in stmeta.keys() if key not in ['name','state']})
print('Interpolate regions tags')
dtype = torch.float32
x = {'xlo': stmeta['longitude'].values, 'xla': stmeta['latitude'].values,
'ylo': grid['longitude'].values, 'yla': grid['latitude'].values}
x = {key: torch.tensor(x[key], dtype=dtype)[None] for key in x}
for lab in ['CDEC', 'SNOTEL']:
x['xval'] = torch.tensor(stmeta[lab].values, dtype=dtype)[None,:,None]
grid[lab] = Model0(x)[0,:,0].detach().numpy()
x = {key: x[('y' if key[0]=='x' else 'x')+key[1:]] for key in x if key[1:] in ['lo','la']}
for lab in uregions:
x['xval'] = torch.tensor(grid[lab].values, dtype=dtype)[None,:,None]
stmeta[lab] = Model0(x)[0,:,0].detach().numpy()
constfeatures = ['CDEC', 'elevation_m']
rads = [100, 30, 10, 3]
# rads = [100, 10]
# rads = [30, 10, 3]
constfeatures += ['elevation_'+str(r) for r in rads]
for d in [stmeta, grid]:
for r,r2 in zip(rads[1:],rads[:-1]):
d['elevation_'+str(r2)] -= d['elevation_'+str(r)]
# rads = [1, 3, 10, 30]
rads = [1, 3, 30]
for key in ['south', 'east']:
constfeatures += [key+str(r) for r in rads]
for r,r2 in zip(rads[1:],rads[:-1]):
for d in [stmeta, grid]:
# print([key,r2,np.abs(d[key+str(r2)]).mean(), r,np.abs(d[key+str(r)]).mean(),np.abs(d[key+str(r2)] - d[key+str(r)]).mean()])
d[key+str(r2)] -= d[key+str(r)]
rads = [1, 3, 10, 30]
for key in ['aspect']:
constfeatures += [key+str(r) for r in rads]
for r,r2 in zip(rads[1:],rads[:-1]):
for d in [stmeta, grid]:
d[key+str(r2)] -= d[key+str(r)]
# constfeatures += [key for key in grid if key[:9]=='GLOBCOVER' and key[-2:] in ['_1','10']] # and key[9:12] != '220'
# constfeatures += [key for key in grid if key[:4]=='SOIL' and key[-2:] in ['_3','30']]
constfeatures += [key for key in grid if key[:9]=='GLOBCOVER' and key[-2:] in ['_3']]
constfeatures += [key for key in grid if key[:4]=='SOIL' and key[-2:] in ['10']]
# constfeatures += [key for key in grid if (key[:9]=='GLOBCOVER') or (key[:4]=='SOIL')]
print(f"constfeatures : {constfeatures}")
return stmeta,grid,constfeatures | drivendataorg/snowcast-showdown | 1st Place/src/features/constfeatures.py | constfeatures.py | py | 13,858 | python | en | code | 12 | github-code | 36 |
40372645232 | import datetime
from django.utils import timezone
from django.test import TestCase
from django.urls import reverse
from .models import Question
def create_question(question_text, days):
"""Create a question with the given `question_text` and published the given numer of `days`
offset to now (negative for questions published in the past, positive for questions that have yet to be published"""
time = timezone.now() + datetime.timedelta(days=days)
return Question.objects.create(question_text=question_text, pub_date=time)
class QuestionIndexViewTests(TestCase):
def test_not_questions(self):
"""
If no questions exists, an appropiate message is displayed
"""
response = self.client.get(reverse('polls:index'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "No hay encuestas")
def test_past_question(self):
"""
Questions with a past pub_date are displayed
"""
create_question(question_text="Past question", days=-30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question>']
)
def test_future_question(self):
"""
Questions with a past pub_date are NOT displayed
"""
create_question(question_text="Future question", days=30)
response = self.client.get(reverse('polls:index'))
self.assertContains(response, "No hay encuestas")
self.assertQuerysetEqual(
response.context['latest_question_list'],
[]
)
def test_future_and_past_question(self):
"""
Even if both past and future questions exist, only
past questions are displayed
"""
create_question(question_text="Future question", days=30)
create_question(question_text="Past question", days=-30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question>']
)
def test_two_past_questions(self):
create_question(question_text="Q1", days=-30)
create_question(question_text="Q2", days=-50)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Q1>', '<Question: Q2>']
)
class QuestionDetailViewTests(TestCase):
def test_future_question(self):
"""
Questions with a past pub_date are NOT displayed
"""
future_q = create_question(question_text="Future question", days=5)
url = reverse('polls:detail', args=(future_q.id,))
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_past_question(self):
"""
Questions with a past pub_date are NOT displayed
"""
past_q = create_question(question_text="Past question", days=-5)
url = reverse('polls:detail', args=(past_q.id,))
response = self.client.get(url)
self.assertContains(response, past_q.question_text)
class QuestionModelTests(TestCase):
def test_was_published_recently_with_future_question(self):
"""
was_published_recently() returns False for questions whose pub_date is in the future.
"""
time = timezone.now() + datetime.timedelta(days=30)
future_question = Question(pub_date=time)
self.assertIs(future_question.was_published_recently(), False)
def test_was_published_recently_with_old_question(self):
"""
was_published_recently() returns False for questions whose pub_date is older than 1 day.
"""
time = timezone.now() - datetime.timedelta(days=1, seconds=1)
old_question = Question(pub_date=time)
self.assertIs(old_question.was_published_recently(), False)
def test_was_published_recently_with_recent_question(self):
"""
was_published_recently() returns True for questions whose pub_date is within the last day.
"""
time = timezone.now() - datetime.timedelta(hours=23, minutes=59, seconds=59)
recent_question = Question(pub_date=time)
self.assertIs(recent_question.was_published_recently(), True)
| gpuma/phuyu | polls/tests.py | tests.py | py | 4,413 | python | en | code | 1 | github-code | 36 |
28713706413 | # ----encoding='utf-8'-----
# Chentian's code paradise
import smtplib
import email
# 负责构造文本
from email.mime.text import MIMEText
# 负责构造图片
from email.mime.image import MIMEImage
# 负责将多个对象集合起来
from email.mime.multipart import MIMEMultipart
from email.header import Header
mail_host="smtp.163.com"
mail_sender="chentstudent@163.com"
mail_slience=""
mail_receivers=["2627601379@qq.com"]
mm=MIMEMultipart('related')
subject_content="""python邮件测试"""
mm["From"]="sender_name<chentstudent@163.com>"
mm["to"]="receiver_name<2627601379@qq.com>"
mm["subiect"]=Header(subject_content,"utf-8")
body_content="""你好,这是一个测试邮件,来自于陈添"""
message_text=MIMEMultipart(body_content,"plian","utf=8")
mm.attach(message_text)
imagine_data=open("FILE-20150725-1818LV48VS71BW2J.jpg","rb")
message_imagine=MIMEImage(imagine_data.read())
imagine_data.close()
mm.attach(message_imagine)
exc=MIMEText(open("大唐建设集团-2022年5月工资.xlsx","rb").read(),'base64','utf-8')
exc["Content-Disposition"]='attachment;filename="sample.xlex"'
mm.attach(exc)
stp=smtplib.SMTP()
stp.connect(mail_host,25)
stp.set_debuglevel(1)
stp.login(mail_sender,mail_license)
stp.sendmail(mail_sender, mail_receivers, mm.as_string())
print("邮件发送成功")
stp.quit()
| codefreshstudent/day8 | 测试文件夹/测试发邮件.py | 测试发邮件.py | py | 1,338 | python | en | code | 0 | github-code | 36 |
25845948589 | #!/usr/bin/python3
'''Unittest for class ``State``'''
import unittest
from models.review import Review
from models.base_model import BaseModel
class TestReview(unittest.TestCase):
'''Init class to test State'''
def test_class(self):
'''Test to inheritance of class'''
with self.subTest(msg='Inheritance'):
self.assertTrue(issubclass(Review, BaseModel))
def test_attributes(self):
'''Test for attributes'''
with self.subTest(msg='Attributes'):
self.assertIsInstance(Review.place_id, str)
self.assertIsInstance(Review.user_id, str)
self.assertIsInstance(Review.text, str)
if __name__ == '__main__':
unittest.main()
| darbumas/AirBnB_clone | tests/test_models/test_review.py | test_review.py | py | 715 | python | en | code | 1 | github-code | 36 |
27101659507 | '''
Test Code
Enter the radius of cylinder: 4
Enter the height of the radius: 5
Volume of cylinder is 251.3 cubic units
'''
import math
radius = float(input('Enter the radius of cylinder: '))
height = float(input('Enter the height of the radius: '))
volume = math.pi * ((radius**2) * height)
print(f'Volume of cylinder is {round(volume,1)} cubic units') # Using formatted string | osipiosita/python-bootcamp | python/LAB/lab1/question4.py | question4.py | py | 378 | python | en | code | 0 | github-code | 36 |
26211274731 | import sys
sys.stdin = open('input.txt')
T = 10
for tc in range(1, T+1):
N = int(input())
arr = []
for i in range(100):
arr.append(list(map(int, input().split())))
# 100줄 읽어오기★★
sum_a = []
sum_b = []
for i in range(100):
sum_a.append(sum(arr[i])) # a 가로
summ = 0
for j in range(100): # b 세로
summ += arr[j][i]
# print(f'arr[{i}][{j}]')
sum_b.append(summ)
sum_c = 0
# c 대각선1
sum_c += arr[i][i]
sum_d = 0
# d 대각선2
sum_d += arr[99-i][i]
m_a = max(sum_a)
m_b = max(sum_b)
print(f'#{tc} {max(m_a, m_b, sum_c, sum_d)}')
| hong00009/algo | swea/1209/sol.py | sol.py | py | 728 | python | en | code | 0 | github-code | 36 |
15698017832 | from django.shortcuts import render
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.permissions import IsAuthenticated
from django.db.models import Q
from .serializer import GetCommentSerializer,AddCommentModelserializer,AddReplyCommentModelSerializer
from .models import Comment
from rest_framework.pagination import PageNumberPagination
from rest_framework import generics
# class GetComments (APIView):
# def get (self,request,id):
# comment = Comment.objects.filter(Q(product__id=id) & Q(reply=None) & Q(status=True))
# serializer = GetCommentSerializer(comment,many=True)
# return Response(serializer.data , status=status.HTTP_200_OK)
class StandardResultsSetPagination(PageNumberPagination):
page_size = 3
page_size_query_param = 'page_size'
max_page_size = 10
class GetComments (generics.ListAPIView):
# queryset = Product.objects.filter(category=8)
serializer_class = GetCommentSerializer
pagination_class = StandardResultsSetPagination
lookup_url_kwarg = "id"
def get_queryset(self):
id_product = self.kwargs.get(self.lookup_url_kwarg)
comment = Comment.objects.filter(Q(product__id=id_product) & Q(reply=None) & Q(status=True)).order_by("-id")
return comment
class AddComment (APIView):
permission_classes=[IsAuthenticated]
def post(self,request):
serializer = AddCommentModelserializer(data = request.data)
serializer.is_valid(raise_exception=True)
serializer.validated_data['user']=request.user
serializer.save()
return Response(status=status.HTTP_200_OK)
class AddReplyComment(APIView):
permission_classes=[IsAuthenticated]
def post (self , request):
serializer = AddReplyCommentModelSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.validated_data['user']=request.user
serializer.save()
return Response(status=status.HTTP_200_OK) | mohammad-reza-sasani/online-shop-react-django | backend/comment/views.py | views.py | py | 2,202 | python | en | code | 0 | github-code | 36 |
5663610357 | import methods
import matplotlib.pyplot as plt
def func(x, y):
return 1 + 1.8*y*methods.np.sin(x) - y**2
x0 = 0
y0 = 0
x_end = 6
h = 0.1
x1, y1, h = methods.runge_kutta(x0, y0, x_end, func, h, True)
x1_halved, y1_halved = methods.runge_kutta(x0, y0, x_end, func, h/2, False)
x2, y2 = methods.adams(x1[:4], y1[:4], x_end, func, h)
x2_halved, y2_halved = methods.adams(x1_halved[:4], y1_halved[:4], x_end, func, h/2)
e1 = methods.evaluate_error_runge(y1, y1_halved, 4)
e2 = methods.evaluate_error_runge(y2, y2_halved, 4)
print(f" N x Runge-Kutta e1 Adams e2")
for i in range(len(x1)):
print(f" {i:>3} {round(x1[i], 5):<6} {y1[i]:8.6f} {e1[i]:8.1e} {y2[i]:8.6f} {e2[i]:8.1e}")
fig = plt.gcf() # to be able to change window title
fig.canvas.set_window_title("Розв'язок")
plt.plot(x1, y1, 'b', label = "Метод Рунге-Кутта")
plt.plot(x2, y2, 'y', label = "Метод Адамса")
plt.legend(loc="best")
plt.show()
fig = plt.gcf() # to be able to change window title
fig.canvas.set_window_title("Похибка")
plt.plot(x1, e1, 'b', label = "Похибка методу Рунге-Кутта")
plt.plot(x2, e2, 'y', label = "Похибка методу Адамса")
plt.legend(loc="best")
plt.show() | Melkye/Labs | Math/Lab8_Koshi_problem/Lab8_Koshi_problem/Lab8_Koshi_problem.py | Lab8_Koshi_problem.py | py | 1,270 | python | en | code | 0 | github-code | 36 |
4430105383 | # Time complexity - O(n)
class Node():
def __init__(self, data):
self.data = data
self.next = None
def printLL(head):
while head is not None:
print(str(head.data)+"->", end="")
head = head.next
print("None")
return
def takeInput():
head = None
tail = None
input_list = [int(ele) for ele in input().split()]
for currdata in input_list:
if currdata == -1:
break
newNode = Node(currdata)
if head is None:
head = newNode
tail = newNode
else:
tail.next = newNode
tail = newNode
return head
head = takeInput()
printLL(head)
| Riyachauhan11/Python-learning-Concepts | linked lists/optimized_input_ll.py | optimized_input_ll.py | py | 728 | python | en | code | 0 | github-code | 36 |
17836371460 | """
A abordagem abaixo, utilizando RDD ao invés do Dataframe, começou a dar um erro de 'list index out of range', o qual não consegui corrigir.
Então como estava levando muito tempo nesta abordagem, fiz utilizando Dataframe (arquivo NasaFileAnalysis.py).
"""
from operator import add
textFile = sc.textFile("teste_semantix/access_log_Jul95, teste_semantix/access_log_Aug95")
lines = textFile.flatMap(lambda line: line.split("\n"))
lines.count()
lines_splited = lines.map(lambda row: row.split(" "))
pre_hosts = lines_splited.map(lambda row: (row[0], 1))
hosts = pre_hosts.reduceByKey(add)
uniqueHostsCount = hosts.filter(lambda tup: tup[1] == 1)
# total de hosts únicos
uniqueHostsCount.count()
# total de erros 404
err404 = lines.map(lambda line: ' 404 ' in line)
err404.count()
# outra abordagem para obter total de erros 404
err = rdd.filter(lambda x: x[8] == '404').map(lambda x: (x[0], 1))
err = err.reduceByKey(add)
| apenas-ana/teste_spark | abordagemQueNaoFuncionou.py | abordagemQueNaoFuncionou.py | py | 939 | python | pt | code | 0 | github-code | 36 |
31524008748 | import fitz
from loguru import logger
def get_page_from_sheet(sheet: str, pdf_fpath=None, doc=None):
"""
Get the page number from the sheet.
"""
if (pdf_fpath is not None) and (doc is not None):
raise ValueError("Only one of pdf_fpath or doc can be specified.")
if pdf_fpath:
doc = fitz.open(pdf_fpath)
# check each page
for i in range(len(doc)-1, -1, -1): # iterate backwards over all pages
page = doc[i]
# define the rectangles representing the corners of the page
parts = 4
corners = [
fitz.Rect(0, 0, page.rect.width / parts, page.rect.height / parts), # top left
fitz.Rect(page.rect.width / parts, 0, page.rect.width, page.rect.height / parts), # top right
fitz.Rect(0, page.rect.height / parts, page.rect.width / parts, page.rect.height), # bottom left
fitz.Rect(page.rect.width / parts, page.rect.height / parts, page.rect.width, page.rect.height) # bottom right
]
# check each of the four corners of the page for the sheet number
for corner in corners:
matches = page.search_for(sheet, hit_max=1, area=corner)
if matches: # if the sheet number is found
logger.info(f"Sheet number {sheet} found on page {i} at location {matches[0]}")
return i, matches[0] # return the page number (0-indexed)
return None # if the sheet number is not found on any page | fuzzy-tribble/meche-copilot | meche_copilot/pdf_helpers/get_page_from_sheet.py | get_page_from_sheet.py | py | 1,478 | python | en | code | 1 | github-code | 36 |
2838526539 | from Action import Action
class Game:
def __init__(self, p1, p2):
self.p1, self.p2 = p1, p2
self.winner, self.a1, self.a2 = None, None, None
#Runs a game
def play_game(self):
self.a1, self.a2 = self.p1.choose_action(), self.p2.choose_action()
# Send the actions played to the players (for MostFrequent og Historian)
self.p1.receive_result(self)
self.p2.receive_result(self)
# Make a1,a2, that are actions, into Action-objects that can be compared
a1, a2 = Action(self.a1), Action(self.a2)
# Choose the winner
if a1 == a2:
return
elif a1 > a2:
self.winner = self.p1
else:
self.winner = self.p2
# print action
@staticmethod
def print_action(a):
if a == 0:
return "Rock"
elif a == 1:
return "Paper"
elif a == 2:
return "Scissor"
else:
return "N/A"
# toString-method
def __str__(self):
text = "Player "+self.p1.name+" used " + self.print_action(self.a1) + "\n" + "Player "+self.p2.name+" used" +\
self.print_action(self.a2) + "\n"
if self.winner is None:
return text+"It was a draw"
elif self.winner == self.p1:
return text+"Player "+self.p1.name+" won!"
else:
return text+"Player "+self.p2.name+" won!" | Magssch/TDT4113-computer-science-programming-project | project 2/Game.py | Game.py | py | 1,436 | python | en | code | 0 | github-code | 36 |
7538069613 | # Aloysio
def solve(k, ns, m):
if k < 0:
return 0
if k == 0:
return 1
if m[k] >= 0:
return m[k]
sol = 0
for n in ns:
sol+= solve(k-n, ns, m)
m[k] = sol
return sol
if __name__ == "__main__":
n = 2
ns = [1, 2]
m = [-1 for i in range(n+1)]
print(solve(n, ns, m))
| daily-coding-x-br/daily-coding | may/14/aloysio.py | aloysio.py | py | 342 | python | en | code | 2 | github-code | 36 |
36728617897 | from pwn import *
from time import sleep
pw= '8d16635db965bc4e0a97521e8105fad2'
p = process('./lol.bin')
#p = remote("auto-pwn.chal.csaw.io", 11001)
#pid = gdb.attach(p, gdbscript="""
# b * runChallenge
# """)
#parse it
p.sendlineafter('> ',pw)
#sleep(1)
p.recvuntil('\nMain is at ')
#sleep(2)
main = int(p.recvline().replace('\n',''),16)
print('main: ',hex(main))
base = main - 0x1421
print("base: ",hex(base))
run_chall = main -0x20
pop_rdi_ret=0x00000000000012f2 + base
puts_got = 0x3fa0 + base
puts_plt = 0x0010f0 + base
ret = 0x000000000000101a + base
#rop leak libc
padd = "A"*9
payload = padd
payload += p64(pop_rdi_ret)
payload += p64(puts_got)
payload += p64(puts_plt)
payload += p64(run_chall)
#p.recv(100)
p.sendline(payload)
#libc_puts = u64(p.recv(6).ljust(8,'\0'))
#print('libc_puts: ',hex(libc_puts))
#system = libc_puts -0x29b10
#bin_sh = libc_puts +0xf8c89
#payload2 = padd
#payload2 += p64(pop_rdi_ret)
#payload2 += p64(bin_sh)
#payload2 += p64(ret)
#payload2 += p64(system)
#p.recv(100)
#p.sendline(payload2)
p.interactive()
| Aleks-dotcom/ctf_lib_2021 | csaw_finals/pwn/crafty/sol.py | sol.py | py | 1,079 | python | en | code | 1 | github-code | 36 |
75052351464 | import random
import socket, os
class RAT_SERVER:
def __init__(self, host, port):
self.host = host
self.port = port
def build_connection(self):
global client, addr, s
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((self.host, self.port))
s.listen(5)
print("[*] Waiting for the client...")
client, addr = s.accept()
print()
ipcli = client.recv(1024).decode()
print(f"[*] Connection is established successfully with {ipcli}")
print()
def server(self):
try:
from vidstream import StreamingServer
global server
server = StreamingServer(self.host, 8080)
server.start_server()
except:
print("Module not found...")
def stop_server(self):
server.stop_server()
def result(self):
client.send(command.encode())
result_output = client.recv(1024).decode()
print(result_output)
def banner(self):
print("======================================================")
print(" Commands ")
print("======================================================")
print("System: ")
print("======================================================")
print(f'''
help all commands available
writein <text> write the text to current opened window
browser enter quiery to browser
turnoffmon turn off the monitor
turnonmon turn on the monitor
reboot reboot the system
drivers all drivers of PC
kill kill the system task
sendmessage send messagebox with the text
cpu_cores number of CPU cores
systeminfo (extended) all basic info about system (via cmd)
tasklist all system tasks
localtime current system time
curpid PID of client's process
sysinfo (shrinked) basic info about system (Powers of Python)
shutdown shutdown client's PC
isuseradmin check if user is admin
extendrights extend system rights
disabletaskmgr disable Task Manager
enabletaskmgr enable Task Manager
disableUAC disable UAC
monitors get all used monitors
geolocate get location of computer
volumeup increase system volume to 100%
volumedown decrease system volume to 0%
setvalue set value in registry
delkey delete key in registry
createkey create key in registry
setwallpaper set wallpaper
exit terminate the session of RAT
''')
print("======================================================")
print("Shell: ")
print("======================================================")
print(f'''
pwd get current working directory
shell execute commands via cmd
cd change directory
[Driver]: change current driver
cd .. change directory back
dir get all files of current directory
abspath get absolute path of files
''')
print("======================================================")
print("Network: ")
print("======================================================")
print(f'''
ipconfig local ip
portscan port scanner
profiles network profiles
profilepswd password for profile
''')
print("======================================================")
print("Input devices: ")
print("======================================================")
print(f'''
keyscan_start start keylogger
send_logs send captured keystrokes
stop_keylogger stop keylogger
disable(--keyboard/--mouse/--all)
enable(--keyboard/--mouse/--all)
''')
print("======================================================")
print("Video: ")
print("======================================================")
print(f'''
screenshare overseing remote PC
webcam webcam video capture
breakstream break webcam/screenshare stream
screenshot capture screenshot
webcam_snap capture webcam photo
''')
print("======================================================")
print("Files:")
print("======================================================")
print(f'''
delfile <file> delete file
editfile <file> <text> edit file
createfile <file> create file
download <file> <homedir> download file
upload upload file
cp <file1> <file2> copy file
mv <file> <path> move file
searchfile <file> <dir> search for file in mentioned directory
mkdir <dirname> make directory
rmdir <dirname> remove directory
startfile <file> start file
readfile <file> read from file
''')
print("======================================================")
def execute(self):
self.banner()
while True:
global command
command = input('Command >> ')
if command == 'shell':
client.send(command.encode())
while 1:
command = str(input('>> '))
client.send(command.encode())
if command.lower() == 'exit':
break
result_output = client.recv(1024).decode()
print(result_output)
client.close()
s.close()
elif command == 'drivers':
self.result()
elif command == 'setvalue':
client.send(command.encode())
const = str(input("Enter the HKEY_* constant [HKEY_CLASSES_ROOT, HKEY_CURRENT_USER, HKEY_LOCAL_MACHINE, HKEY_USERS, HKEY_CURRENT_CONFIG]: "))
root = str(input('Enter the path to store key [ex. SOFTWARE\\test]: '))
key = str(input('Enter the key name: '))
value = str(input('Enter the value of key [None, 0, 1, 2 etc.]: '))
client.send(const.encode())
client.send(root.encode())
client.send(key.encode())
client.send(value.encode())
result_output = client.recv(1024).decode()
print(result_output)
elif command == 'delkey':
client.send(command.encode())
const = str(input("Enter the HKEY_* constant [HKEY_CLASSES_ROOT, HKEY_CURRENT_USER, HKEY_LOCAL_MACHINE, HKEY_USERS, HKEY_CURRENT_CONFIG]: "))
root = str(input('Enter the path to key: '))
client.send(const.encode())
client.send(root.encode())
result_output = client.recv(1024).decode()
print(result_output)
elif command == 'createkey':
client.send(command.encode())
const = str(input("Enter the HKEY_* constant [HKEY_CLASSES_ROOT, HKEY_CURRENT_USER, HKEY_LOCAL_MACHINE, HKEY_USERS, HKEY_CURRENT_CONFIG]: "))
root = str(input('Enter the path to key: '))
client.send(const.encode())
client.send(root.encode())
result_output = client.recv(1024).decode()
print(result_output)
elif command == 'disableUAC':
self.result()
elif command == 'reboot':
self.result()
elif command == 'usbdrivers':
self.result()
elif command == 'volumeup':
self.result()
elif command == 'volumedown':
self.result()
elif command == 'monitors':
self.result()
elif command[:4] == 'kill':
if not command[5:]:
print("No process mentioned to terminate")
else:
self.result()
elif command == 'extendrights':
self.result()
elif command == 'geolocate':
self.result()
elif command == 'turnoffmon':
self.result()
elif command == 'turnonmon':
self.result()
elif command == 'setwallpaper':
client.send(command.encode())
text = str(input("Enter the filename: "))
client.send(text.encode())
result_output = client.recv(1024).decode()
print(result_output)
elif command == 'keyscan_start':
client.send(command.encode())
result_output = client.recv(1024).decode()
print(result_output)
elif command == 'send_logs':
client.send(command.encode())
result_output = client.recv(1024).decode()
print(result_output)
elif command == 'stop_keylogger':
client.send(command.encode())
result_output = client.recv(1024).decode()
print(result_output)
elif command[:7] == 'delfile':
if not command[8:]:
print("No file to delete")
else:
self.result()
elif command[:10] == 'createfile':
if not command[11:]:
print("No file to create")
else:
self.result()
elif command == 'tasklist':
self.result()
elif command == 'ipconfig':
self.result()
elif command[:7] == 'writein':
if not command[8:]:
print("No text to output")
else:
self.result()
elif command == 'sendmessage':
client.send(command.encode())
text = str(input("Enter the text: "))
client.send(text.encode())
title = str(input("Enter the title: "))
client.send(title.encode())
result_output = client.recv(1024).decode()
print(result_output)
elif command == 'profilepswd':
client.send(command.encode())
profile = str(input("Enter the profile name: "))
client.send(profile.encode())
result_output = client.recv(2147483647).decode()
print(result_output)
elif command == 'profiles':
self.result()
elif command == 'cpu_cores':
self.result()
elif command[:2] == 'cd':
if not command[3:]:
print("No directory")
else:
self.result()
elif command == 'cd ..':
self.result()
elif command[1:2] == ':':
self.result()
elif command == 'dir':
self.result()
elif command == 'portscan':
self.result()
elif command == 'systeminfo':
self.result()
elif command == 'localtime':
self.result()
elif command[:7] == 'abspath':
if not command[8:]:
print("No file")
else:
self.result()
elif command[:8] == 'readfile':
if not command[9:]:
print("No file to read")
else:
client.send(command.encode())
result_output = client.recv(2147483647).decode()
print("===================================================")
print(result_output)
print("===================================================")
elif command.startswith("disable") and command.endswith("--keyboard"):
self.result()
elif command.startswith("disable") and command.endswith("--mouse"):
self.result()
elif command.startswith("disable") and command.endswith("--all"):
self.result()
elif command.startswith("enable") and command.endswith("--all"):
self.result()
elif command.startswith("enable") and command.endswith("--keyboard"):
self.result()
elif command.startswith("enable") and command.endswith("--mouse"):
self.result()
elif command[:7] == 'browser':
client.send(command.encode())
quiery = str(input("Enter the quiery: "))
client.send(quiery.encode())
result_output = client.recv(1024).decode()
print(result_output)
elif command[:2] == 'cp':
self.result()
elif command[:2] == 'mv':
self.result()
elif command[:8] == 'editfile':
self.result()
elif command[:5] == 'mkdir':
if not command[6:]:
print("No directory name")
else:
self.result()
elif command[:5] == 'rmdir':
if not command[6:]:
print("No directory name")
else:
self.result()
elif command[:10] == 'searchfile':
self.result()
elif command == 'curpid':
self.result()
elif command == 'sysinfo':
self.result()
elif command == 'pwd':
self.result()
elif command == 'screenshare':
client.send(command.encode("utf-8"))
self.server()
elif command == 'webcam':
client.send(command.encode("utf-8"))
self.server()
elif command == 'breakstream':
self.stop_server()
elif command[:9] == 'startfile':
if not command[10:]:
print("No file to launch")
else:
self.result()
elif command[:8] == 'download':
try:
client.send(command.encode())
file = client.recv(2147483647)
with open(f'{command.split(" ")[2]}', 'wb') as f:
f.write(file)
f.close()
print("File is downloaded")
except:
print("Not enough arguments")
elif command == 'upload':
client.send(command.encode())
file = str(input("Enter the filepath to the file: "))
filename = str(input("Enter the filepath to outcoming file (with filename and extension): "))
data = open(file, 'rb')
filedata = data.read(2147483647)
client.send(filename.encode())
print("File has been sent")
client.send(filedata)
elif command == 'disabletaskmgr':
self.result()
elif command == 'enabletaskmgr':
self.result()
elif command == 'isuseradmin':
self.result()
elif command == 'help':
self.banner()
elif command == 'screenshot':
client.send(command.encode())
file = client.recv(2147483647)
path = f'{os.getcwd()}\\{random.randint(11111,99999)}.png'
with open(path, 'wb') as f:
f.write(file)
f.close()
path1 = os.path.abspath(path)
print(f"File is stored at {path1}")
elif command == 'webcam_snap':
client.send(command.encode())
file = client.recv(2147483647)
with open(f'{os.getcwd()}\\{random.randint(11111,99999)}.png', 'wb') as f:
f.write(file)
f.close()
print("File is downloaded")
elif command == 'exit':
client.send(command.encode())
output = client.recv(1024)
output = output.decode()
print(output)
s.close()
client.close()
rat = RAT_SERVER('127.0.0.1', 4444)
if __name__ == '__main__':
rat.build_connection()
rat.execute() | FZGbzuw412/Python-RAT | server.py | server.py | py | 18,162 | python | en | code | 86 | github-code | 36 |
25917900708 | from __future__ import print_function
import argparse
import random
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from os import listdir
from os.path import join
from moviepy.editor import *
model = torch.hub.load('pytorch/vision', 'deeplabv3_resnet101', pretrained=True)
people_class = 15
model.eval()
print ("Model Loaded")
blur = torch.FloatTensor([[[[1.0, 2.0, 1.0],[2.0, 4.0, 2.0],[1.0, 2.0, 1.0]]]]) / 16.0
# move the input and model to GPU for speed if available
if torch.cuda.is_available():
model.to('cuda')
blur = blur.to('cuda')
import urllib
from torchvision import transforms
preprocess = transforms.Compose([
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
def makeSegMask(img):
frame_data = torch.FloatTensor( img ) / 255.0
input_tensor = preprocess(frame_data.permute(2, 0, 1))
input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
# move the input and model to GPU for speed if available
if torch.cuda.is_available():
input_batch = input_batch.to('cuda')
with torch.no_grad():
output = model(input_batch)['out'][0]
segmentation = output.argmax(0)
bgOut = output[0:1][:][:]
a = (1.0 - F.relu(torch.tanh(bgOut * 0.30 - 1.0))).pow(0.5) * 2.0
people = segmentation.eq( torch.ones_like(segmentation).long().fill_(people_class) ).float()
people.unsqueeze_(0).unsqueeze_(0)
for i in range(3):
people = F.conv2d(people, blur, stride=1, padding=1)
# combined_mask = F.hardtanh(a * b)
combined_mask = F.relu(F.hardtanh(a * (people.squeeze().pow(1.5)) ))
combined_mask = combined_mask.expand(1, 3, -1, -1)
res = (combined_mask * 255.0).cpu().squeeze().byte().permute(1, 2, 0).numpy()
return res
def processMovie(args):
print("Processing {}... This will take some time.".format(args.input))
if args.width != 0:
target=[args.width, None]
else:
target=None
realityClip = VideoFileClip(args.input, target_resolution=target)
realityMask = realityClip.fl_image(makeSegMask)
realityMask.write_videofile(args.output)
def main():
parser = argparse.ArgumentParser(description='BGRemove')
parser.add_argument('--input', metavar='N', required=True,
help='input movie path')
parser.add_argument('--output', metavar='N', required=True,
help='output movie path')
parser.add_argument('--width', metavar='N', type=int, default=0,
help='target width (optional, omit for full width)')
args = parser.parse_args()
processMovie(args)
if __name__ == '__main__':
main()
| WhiteNoise/deep-bgremove | createmask.py | createmask.py | py | 2,592 | python | en | code | 61 | github-code | 36 |
37777774456 | '''
Server端流程
1. 建立socket,socket是负责具体通信的一个实例(可接两端的插口)
2. 绑定,为创建的socket指派固定的端口和ip地址
3. 接受对方发送的内容
4. 给对方反馈, 此步骤为非必须
'''
# socket 模块负责socket编程
import socket
# 模拟服务器的函数
def serverFunc():
# 1.建立socket
# socket.AF_INET:使用ipv4协议族
# socket.SOCK_DGRAM:使用UDP通信
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# 2. 绑定ip和port
# 127.0.0.1:这个ip地址代表的是机器本身
# 7852:随手指定的端口号
# ip地址是一个tuple类型,(ip, port)
addr = ('127.0.0.1', 7852)
sock.bind(addr)
# 接受对方消息
# 等待方式为:死等, 没有其他可能性
# recvfrom接受的返回值是一个tuple,前一项表示数据,后一项表示地址
# 参数的含义是缓冲区大小
# rst = sock.recvfrom(500)
data, addr = sock.recvfrom(500)
print(data)
print(type(data))
# 发送过来的数据是bytes格式,必须通过解码才能得到str格式内容
# decode默认参数是utf8
text = data.decode()
print(type(text))
print(text)
# 给对方返回的消息
rsp = 'i am not hungry'
# 发送的数据需要编码成bytes格式
# 默认的是utf8
data = rsp.encode()
sock.sendto(data, addr)
if __name__ == '__main__':
print('Starting server.....')
serverFunc()
print('Ending server....')
| lzybb/ljbgit | 普通课程/3-高级语法/net编程/v01.py | v01.py | py | 1,529 | python | zh | code | 0 | github-code | 36 |
31618419583 | import torch
from pathlib import Path
import copy
import time
import torch.nn.functional as F
import matplotlib.pyplot as plt
import numpy as np
import pdb
import skimage
from distutils.version import LooseVersion
from skimage.transform import resize as sk_resize
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def resize(image, output_shape, order=1, mode='constant', cval=0, clip=True,
preserve_range=False, anti_aliasing=False, anti_aliasing_sigma=None):
"""A wrapper for Scikit-Image resize().
Scikit-Image generates warnings on every call to resize() if it doesn't
receive the right parameters. The right parameters depend on the version
of skimage. This solves the problem by using different parameters per
version. And it provides a central place to control resizing defaults.
"""
if LooseVersion(skimage.__version__) >= LooseVersion("0.14"):
# New in 0.14: anti_aliasing. Default it to False for backward
# compatibility with skimage 0.13.
return skimage.transform.resize(
image, output_shape,
order=order, mode=mode, cval=cval, clip=clip,
preserve_range=preserve_range, anti_aliasing=anti_aliasing,
anti_aliasing_sigma=anti_aliasing_sigma)
else:
return skimage.transform.resize(
image, output_shape,
order=order, mode=mode, cval=cval, clip=clip,
preserve_range=preserve_range)
def minimize_mask(bbox, mask, mini_shape):
"""Resize masks to a smaller version to reduce memory load.
Mini-masks can be resized back to image scale using expand_masks()
See inspect_data.ipynb notebook for more details.
"""
mini_mask = np.zeros(mini_shape + (mask.shape[-1],), dtype=bool)
for i in range(mask.shape[-1]):
# Pick slice and cast to bool in case load_mask() returned wrong dtype
m = mask[:, :, i].astype(bool)
y1, x1, y2, x2 = bbox[i][:4]
m = m[y1:y2, x1:x2]
if m.size == 0:
raise Exception("Invalid bounding box with area of zero")
# Resize with bilinear interpolation
m = resize(m, mini_shape)
mini_mask[:, :, i] = np.around(m).astype(np.bool)
return mini_mask
def expand_mask(bbox, mini_mask, image_shape):
"""Resizes mini masks back to image size. Reverses the change
of minimize_mask().
See inspect_data.ipynb notebook for more details.
"""
mask = np.zeros((mini_mask.shape[0],) +image_shape[:2] , dtype=bool)
for i in range(mask.shape[0]):
m = mini_mask[i, :, :]
y1, x1, y2, x2 = bbox[i][:4]
h = y2 - y1
w = x2 - x1
# Resize with bilinear interpolation
m = resize(m, (h, w))
mask[i, y1:y2, x1:x2] = np.around(m).astype(np.bool)
return mask
def unmold_mask(mask, bbox, image_shape):
"""Converts a mask generated by the neural network to a format similar
to its original shape.
mask: [height, width] of type float. A small, typically 28x28 mask.
bbox: [y1, x1, y2, x2]. The box to fit the mask in.
Returns a binary mask with the same size as the original image.
"""
threshold = 0.5
y1, x1, y2, x2 = bbox
mask = resize(mask, (y2 - y1, x2 - x1))
mask = np.where(mask >= threshold, 1, 0).astype(np.bool)
# Put the mask in the right location.
full_mask = np.zeros(image_shape, dtype=np.bool)
full_mask[y1:y2, x1:x2] = mask
return full_mask
def model_out_to_unmold(outputs28):
batch_size = outputs28.size(0)
outputs28_np = outputs28.detach().cpu().numpy() # has shape (batch_size, 1, 28, 28)
outputs28_np = outputs28_np[:, 0, :, :].transpose(1, 2, 0) # makes it (28, 28, batch_size)
preds224 = unmold_mask(outputs28_np, [0, 0, 223, 223], (224, 224, batch_size))[np.newaxis, ...]\
.transpose(3, 0, 1, 2)\
.astype(np.float32) # outputs (224,224, batch_size) - insert axis at 0, do another transpose
return torch.from_numpy(preds224)
def viz_prediction(track_sample, pred, epoch):
scans, label = track_sample
scans, label = scans.numpy().transpose((1, 2, 0)), label.numpy()[0][..., np.newaxis]
pred = pred[0].numpy()[..., np.newaxis]
scans_stack = np.concatenate([scans, label, pred], axis=-1)
fig = plt.figure(figsize=(20, 6))
fig.suptitle('TRACKING Sample')
for slice_, scan in enumerate(['dwi', 'flair', 't1', 't2', 'label', 'predicted']):
ax = plt.subplot(1, 6, slice_ + 1)
show_single_img(scans_stack[:, :, slice_], (scan == 'label' or scan == 'predicted'))
plt.tight_layout()
ax.set_title(scan)
ax.axis('off')
# plt.show()
plt.savefig('sample_tracking/'+ str(epoch)+ '.jpg')
def actual_predicted(actual, predicted, save_path):
fig = plt.figure(figsize=(10,5))
fig.suptitle('Actual-Predicted')
ax = plt.subplot(1, 2, 1)
show_single_img(actual)
plt.tight_layout()
ax.set_title('Actual')
ax.axis('off')
ax = plt.subplot(1, 2, 2)
show_single_img(predicted)
plt.tight_layout()
ax.set_title('Predicted')
ax.axis('off')
# plt.show()
plt.savefig(save_path)
def show_single_img(image, label=False):
"""Show image"""
cmap = 'gray'
if label:
cmap = 'binary'
plt.imshow(image, cmap = cmap)
def get_prob_map28(outputs28):
# based on argmax
max_prob, pred28_argmax = torch.max(outputs28, dim=1, keepdim=True) # (batch_size, 1, 28,28)
# based on prob
pred28 = outputs28.data
pred28[:, 0, :, :] = 1 - outputs28[:, 0, :, :]
one_hot = F.one_hot(pred28_argmax.squeeze()).permute(0, 3, 1, 2).bool() # (batch_size, 2 classes, 28,28)
pred28_prob = torch.sum(pred28 * one_hot, dim=1, keepdim=True) # (batch_size, 1 val, 28, 28)
# pdb.set_trace()
return pred28_prob
def dice_loss(input, target):
smooth = 1.
iflat = input.view(-1)
tflat = target.view(-1)
intersection = (iflat * tflat).sum()
return 1 - ((2. * intersection + smooth) /
(iflat.sum() + tflat.sum() + smooth))
# borrow functions and modify it from https://github.com/Kaixhin/FCN-semantic-segmentation/blob/master/main.py
# Calculates class intersections over unions
def iou(pred, target):
ious = []
n_class = 2
for cls in range(n_class):
pred_inds = pred == cls
target_inds = target == cls
intersection = pred_inds[target_inds].sum()
union = pred_inds.sum() + target_inds.sum() - intersection
if union == 0:
ious.append(float('nan')) # if there is no ground truth, do not include in evaluation
else:
ious.append(float(intersection) / max(union, 1))
# print("cls", cls, pred_inds.sum(), target_inds.sum(), intersection, float(intersection) / max(union, 1))
return ious
def pixel_acc(pred, target):
correct = (pred == target).sum()
total = (target == target).sum()
return correct / total
def iou_boxes(box1, box2):
xa1, ya1, xa2, ya2 = box1
anchor_area = (ya2 - ya1) * (xa2 - xa1)
xb1, yb1, xb2, yb2 = box2
box_area = (yb2 - yb1) * (xb2 - xb1)
inter_x1 = max([xb1, xa1])
inter_y1 = max([yb1, ya1])
inter_x2 = min([xb2, xa2])
inter_y2 = min([yb2, ya2])
if (inter_x1 < inter_x2) and (inter_y1 < inter_y2):
iter_area = (inter_y2 - inter_y1 + 1) * \
(inter_x2 - inter_x1 + 1)
iou = iter_area / \
(anchor_area + box_area - iter_area)
else:
iou = 0.
return iou
def get_max_ious_boxes_labels(scans, label224, valid_boxes):
max_boxes = 10
mask = label224
# If there is some lesion on the mask, that is, if
if len(np.unique(mask)) != 1:
masked_labels = skimage.measure.label(mask)
# instances are encoded as different colors
obj_ids = np.unique(masked_labels)
# first id is the background, so remove it
obj_ids = obj_ids[1:]
# split the color-encoded mask into a set
# of binary masks
masks = masked_labels == obj_ids[:, None, None]
# get bounding box coordinates for each mask
num_objs = len(obj_ids)
boxes = []
for i in range(num_objs):
pos = np.where(masks[i])
xmin = np.min(pos[0])
xmax = np.max(pos[0])
ymin = np.min(pos[1])
ymax = np.max(pos[1])
boxes.append([xmin, ymin, xmax, ymax])
# only choose the top 10 boxes from this.
ious = np.empty((len(valid_boxes), len(boxes)), dtype=np.float32)
ious.fill(0)
for num1, i in enumerate(valid_boxes):
for num2, j in enumerate(boxes):
ious[num1, num2] = iou_boxes(i, j)
# choose the highest valued bounding boxes
patches_for_objs = max_boxes // num_objs
maxarg_ious = np.argsort(ious, axis=0)[::-1]
selected_ious_args = []
for obj in range(num_objs):
obj_max_indices = maxarg_ious[:patches_for_objs, obj].tolist()
maxarg_ious = np.delete(maxarg_ious, obj_max_indices, axis=0)
selected_ious_args.extend(obj_max_indices)
# Return, the selected anchor boxes coords and the class_labels
sel_anchors = valid_boxes[selected_ious_args]
# and the all ones class labels
class_labels = [1.0] * max_boxes
return sel_anchors, class_labels
# so there's no lesion at all in any part of the mask
else:
# box_for_scan_area
cornerVal = scans[0, 0, 0]
pos = np.where(scans[0, :, :] != cornerVal)
if len(pos[0]):
x1_scan = np.min(pos[0])
x2_scan = np.max(pos[0])
y1_scan = np.min(pos[1])
y2_scan = np.max(pos[1])
else:
return None
box = (x1_scan, y1_scan, x2_scan, y2_scan)
iou_vals = np.empty((len(valid_boxes)), dtype=np.float32)
for index, anchor_box in enumerate(valid_boxes):
iou_vals[index] = iou_boxes(anchor_box, box)
maxarg_ious = np.argsort(iou_vals, axis=0)[::-1][:max_boxes]
# Wont work as there s no way an entire anchor box in filled in this brain region
# filter valid bounding boxes
# valid_anchor_boxes_indices = np.where(
# (self.valid_anchor_boxes[:, 0] >= x1_scan) &
# (self.valid_anchor_boxes[:, 1] >= y1_scan) &
# (self.valid_anchor_boxes[:, 2] <= x2_scan) &
# (self.valid_anchor_boxes[:, 3] <= y2_scan)
# )[0]
sel_anchors = valid_boxes[maxarg_ious]
class_labels = [0.0] * max_boxes
return sel_anchors, class_labels
| hariharan98m/ischemic-stroke-detection | fcn_roialign/master2/utils.py | utils.py | py | 10,688 | python | en | code | 0 | github-code | 36 |
38191743871 | import json
leer = []
for linea in open('202006_movements.json','r'):
leer.append(json.loads(linea))
#print (linea)
datos = []
def leer():
for linea in open('202006_movements.json','r'):
datos.append(json.loads(linea))
def recogidasPorPunto():
resultado = dict()
for obj in datos:
clave = "Punto " + str(obj['idunplug_station'])
resultado[clave] = resultado.get(clave, 0) + 1
print(resultado)
resultadoSort = list()
for i in range(len(resultado)):
resultadoSort.append(resultado.get("Punto " + str(i)))
print (resultadoSort)
return
def recogidasPorEdad():
resultado = dict()
for obj in datos:
clave = "ageRange " + str(obj['ageRange'])
resultado[clave] = resultado.get(clave, 0) + 1
print(resultado)
resultadoSort = list()
for i in range(len(resultado)):
resultadoSort.append(resultado.get("ageRange " + str(i)))
print (resultadoSort)
return
def puntoRecYDev():
resultado = list()
for obj in datos:
if not obj["idplug_station"] in resultado:
if obj["idplug_station"] == obj["idunplug_station"]:
resultado.append(obj["idplug_station"])
resultado.sort()
print(resultado)
print(len(resultado))
return
leer()
recogidasPorEdad() | dalevale/GIW2020-21 | practica2.py | practica2.py | py | 1,371 | python | es | code | 0 | github-code | 36 |
42866864407 | """
3
/ \
9 20
/ \
15 7
return its zigzag level order traversal as:
[
[3],
[20,9],
[15,7]
]
"""
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
import collections
class Solution:
def zigzagLevelOrder(self, root: TreeNode) -> List[List[int]]:
if not root:
return []
queue = collections.deque([root])
z_traversal = []
num_level = 0
while queue:
level = collections.deque([])
for i in range(len(queue)):
head = queue.popleft()
# KEY: different order to put in list
if num_level % 2 == 0:
level.append(head.val)
else:
level.appendleft(head.val)
if head.left:
queue.append(head.left)
if head.right:
queue.append(head.right)
z_traversal.append(level)
num_level += 1
return z_traversal | allen791210/LeetCode | 103_Binary_Tree_Zigzag_Level_Order_Traversal.py | 103_Binary_Tree_Zigzag_Level_Order_Traversal.py | py | 1,289 | python | en | code | 0 | github-code | 36 |
4979629437 | #Conversão de Bases Numéricas
print("""Digite 1 para Conversão em Binário;
Digite 2 para Conversão em Octal;
Digite 3 para Conversão em Hexádecimal;
""")
esc = int(input('Escolha:'))
num = int(input('Valor para Conversão:'))
bin = "{:b}" .format(num)
oc = "{:o}" .format(num)
hex = "{:x}" .format(num)
if esc == 1:
print(bin)
elif esc == 2:
print(oc)
elif esc == 3:
print(hex)
else:
print('Função não disponível')
| SloBruno/Curso_Em_Video_Python_Exercicios | ex037.py | ex037.py | py | 446 | python | pt | code | 0 | github-code | 36 |
5049970089 | import copy
import json
import math
import struct
from functools import partial
from pathlib import Path, PosixPath
import numpy as np
# isort: off
import torch
import tensorrt as trt
# isort: on
# numpy doesn't know bfloat16, define abstract binary type instead
np_bfloat16 = np.dtype('V2', metadata={"dtype": "bfloat16"})
def torch_to_numpy(x: torch.Tensor):
assert isinstance(x, torch.Tensor), \
f'x must be a torch.Tensor object, but got {type(x)}.'
if x.dtype != torch.bfloat16:
return x.detach().cpu().numpy()
return x.view(torch.int16).detach().cpu().numpy().view(np_bfloat16)
def numpy_to_torch(x):
if x.dtype != np_bfloat16:
return torch.tensor(x)
return torch.tensor(x.view(np.int16)).view(torch.bfloat16)
def numpy_to_dtype(x, dtype: str):
if x.dtype == np_bfloat16:
# BF16 --> non-BF16 or BF16
if dtype != 'bfloat16':
torch_to_numpy(numpy_to_torch(x).to(str_dtype_to_torch(dtype)))
else:
return x
else:
# non-BF16 types --> non-BF16 or BF16
if dtype != 'bfloat16':
return x.astype(str_dtype_to_np(dtype))
else:
return torch_to_numpy(torch.from_numpy(x).to(torch.bfloat16))
fp32_array = partial(np.array, dtype=np.float32)
fp16_array = partial(np.array, dtype=np.float16)
int32_array = partial(np.array, dtype=np.int32)
def bf16_array(x):
x = torch.tensor(x, dtype=torch.bfloat16)
x = torch_to_numpy(x)
return x
def trt_version():
return trt.__version__
def torch_version():
return torch.__version__
_str_to_np_dict = dict(
float16=np.float16,
float32=np.float32,
int32=np.int32,
bfloat16=np_bfloat16,
)
def str_dtype_to_np(dtype):
ret = _str_to_np_dict.get(dtype)
assert ret is not None, f'Unsupported dtype: {dtype}'
return ret
_str_to_torch_dtype_dict = dict(
bfloat16=torch.bfloat16,
float16=torch.float16,
float32=torch.float32,
int32=torch.int32,
int8=torch.int8,
)
def str_dtype_to_torch(dtype):
ret = _str_to_torch_dtype_dict.get(dtype)
assert ret is not None, f'Unsupported dtype: {dtype}'
return ret
_str_to_trt_dtype_dict = dict(float16=trt.float16,
float32=trt.float32,
int64=trt.int64,
int32=trt.int32,
int8=trt.int8,
bool=trt.bool,
bfloat16=trt.bfloat16,
fp8=trt.fp8)
def str_dtype_to_trt(dtype):
ret = _str_to_trt_dtype_dict.get(dtype)
assert ret is not None, f'Unsupported dtype: {dtype}'
return ret
_np_to_trt_dtype_dict = {
np.int8: trt.int8,
np.int32: trt.int32,
np.float16: trt.float16,
np.float32: trt.float32,
# hash of np.dtype('int32') != np.int32
np.dtype('int8'): trt.int8,
np.dtype('int32'): trt.int32,
np.dtype('float16'): trt.float16,
np.dtype('float32'): trt.float32,
np_bfloat16: trt.bfloat16,
np.bool_: trt.bool,
}
def np_dtype_to_trt(dtype):
ret = _np_to_trt_dtype_dict.get(dtype)
assert ret is not None, f'Unsupported dtype: {dtype}'
return ret
_trt_to_np_dtype_dict = {
trt.int8: np.int8,
trt.int32: np.int32,
trt.float16: np.float16,
trt.float32: np.float32,
trt.bool: np.bool_,
trt.bfloat16: np_bfloat16,
}
def trt_dtype_to_np(dtype):
ret = _trt_to_np_dtype_dict.get(dtype)
assert ret is not None, f'Unsupported dtype: {dtype}'
return ret
_torch_to_np_dtype_dict = {
torch.float16: np.float16,
torch.float32: np.float32,
}
def torch_dtype_to_np(dtype):
ret = _torch_to_np_dtype_dict.get(dtype)
assert ret is not None, f'Unsupported dtype: {dtype}'
return ret
_trt_to_torch_dtype_dict = {
trt.float16: torch.float16,
trt.float32: torch.float32,
trt.int32: torch.int32,
trt.int8: torch.int8,
trt.bfloat16: torch.bfloat16
}
def trt_dtype_to_torch(dtype):
ret = _trt_to_torch_dtype_dict.get(dtype)
assert ret is not None, f'Unsupported dtype: {dtype}'
return ret
def dim_to_trt_axes(dim):
"""Converts torch dim, or tuple of dims to a tensorrt axes bitmask"""
if not isinstance(dim, tuple):
dim = (dim, )
# create axes bitmask for reduce layer
axes = 0
for d in dim:
axes |= 1 << d
return axes
def dim_resolve_negative(dim, ndim):
if not isinstance(dim, tuple):
dim = (dim, )
pos = []
for d in dim:
if d < 0:
d = ndim + d
pos.append(d)
return tuple(pos)
def mpi_comm():
from mpi4py import MPI
return MPI.COMM_WORLD
def mpi_rank():
return mpi_comm().Get_rank()
def mpi_world_size():
return mpi_comm().Get_size()
def pad_vocab_size(vocab_size, tp_size):
return int(math.ceil(vocab_size / tp_size) * tp_size)
def to_dict(obj):
return copy.deepcopy(obj.__dict__)
def to_json_string(obj):
if not isinstance(obj, dict):
obj = to_dict(obj)
return json.dumps(obj, indent=2, sort_keys=True) + "\n"
def to_json_file(obj, json_file_path):
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(to_json_string(obj))
def numpy_fp32_to_bf16(src):
# Numpy doesn't support bfloat16 type
# Convert float32 to bfloat16 manually and assign with bf16 abstract type
original_shape = src.shape
src = src.flatten()
src = np.ascontiguousarray(src)
assert src.dtype == np.float32
dst = np.empty_like(src, dtype=np.uint16)
for i in range(len(dst)):
bytes = struct.pack('<f', src[i])
dst[i] = struct.unpack('<H', struct.pack('BB', bytes[2], bytes[3]))[0]
return dst.reshape(original_shape).view(np_bfloat16)
def fromfile(dir_path, name, shape=None, dtype=None):
dtype = np_dtype if dtype is None else dtype
p = dir_path
if not isinstance(p, PosixPath):
p = Path(p)
p = p / name
if Path(p).exists():
t = np.fromfile(p, dtype=dtype)
if shape is not None:
t = t.reshape(shape)
return t
return None
| NVIDIA/TensorRT-LLM | tensorrt_llm/_utils.py | _utils.py | py | 6,159 | python | en | code | 3,328 | github-code | 36 |
13239914664 | #
import os
import csv
csv_path = os.path.join('..', 'Resources', 'budget_data.csv')
# csv_path = 'D:\\UCIBootcamp\\Module3_challenge\\python_challenge\\PyBank\\Resources\\budget_data.csv'
#
total_month = 0
net_amount = 0
date = []
changes = []
profit_change_list = []
profit_change_list_2 =[]
# decrease = []
#
with open(csv_path) as csvfile:
csvreader = csv.reader(csvfile, delimiter=",")
csv_header = next(csvreader)
# find the total amount
for row in csvreader:
#calculate total row
total_month += 1
#calculate total change in budget
net_amount = net_amount + int(row[1])
date.append(row[0])
changes.append(int(row[1]))
for i in range(1, len(changes)):
profit_change_list.append(changes[i]-changes[i-1])
# calculate average change
avg_profit = round(sum(profit_change_list)/len(profit_change_list),2)
# point out max min in profit
max_profit = max(profit_change_list)
min_profit = min(profit_change_list)
# return date have max/min profit
great_profit_increase = profit_change_list.index(max(profit_change_list)) + 1
great_profit_decrease = profit_change_list.index(min(profit_change_list)) + 1
print("Financial Analysis")
print("--------------------")
print("Total months: ", total_month)
print("Total: $",net_amount)
print(f'Average Change: ${avg_profit}')
print(f'Greatest Increase in Profit: {date[great_profit_increase]} ${max_profit}')
print(f'Greatest Decrease in Profit: {date[great_profit_decrease]} ${min_profit}')
output = os.path.join('..', 'analysis', 'analysis_summary.txt')
with open(output, 'w') as txtfile:
txtfile.write("Financial Analysis")
txtfile.write("\n")
txtfile.write("-------------------")
txtfile.write("\n")
txtfile.write(f'Total months: {total_month}')
txtfile.write("\n")
txtfile.write(f'Total: ${net_amount}')
txtfile.write("\n")
txtfile.write(f'Average Change: ${avg_profit}')
txtfile.write("\n")
txtfile.write(f'Greatest Increase in Profit: {great_profit_increase} ${max_profit}')
txtfile.write("\n")
txtfile.write(f'Greatest Decrease in Profit: {great_profit_decrease} ${min_profit}') | dtnpht/python_challenge | PyBank/code/main.py | main.py | py | 2,251 | python | en | code | 0 | github-code | 36 |
3572315429 | import numpy
matrix = numpy.loadtxt("similarity.txt")
key_file = open("pair.txt", "r")
keys = {}
line = key_file.readline()
while (line!=""):
words = line.split(':', 1)
keys[int(words[0])] = words[1].replace("\n", "")
line = key_file.readline()
def my_cmp(x,y):
if x[1] < y[1]:
return -1
elif x[1] > y[1]:
return 1
else:
return 0
output = open("output","w")
for i in range(0,341):
list = []
for j in range(0,341):
pair = []
pair.append(j)
pair.append(matrix[i][j])
list.append(pair)
list.sort(my_cmp, reverse=True)
for k in range(1,7):
output.write(keys[i] + " " + keys[list[k][0]] + " " + "\n")
| HenryZhou0333/CS411 | similar_courses/process.py | process.py | py | 632 | python | en | code | 2 | github-code | 36 |
19651817490 | from binance.client import Client
import pandas as pd
import matplotlib.pyplot as plt
import ta
data = Client().get_historical_klines("BTCUSDT", Client.KLINE_INTERVAL_1DAY, "01 JANUARY 2018")
df = pd.DataFrame(data, columns = ['timestamp', 'open', 'high', 'low', 'close', 'volume', 'close_time', 'quote_av', 'trades', 'tb_base_av', 'tb_quote_av', 'ignore'])
somme_investi = 0
benef = 0
del df['ignore']
del df['close_time']
del df['quote_av']
del df['trades']
del df['tb_base_av']
del df['tb_quote_av']
df['close'] = pd.to_numeric(df['close'])
df['high'] = pd.to_numeric(df['high'])
df['low'] = pd.to_numeric(df['low'])
df['open'] = pd.to_numeric(df['open'])
df['timestamp'] = pd.to_datetime(df['timestamp'], unit='ms')
df['SMA50'] = ta.trend.sma_indicator(df['close'], 50)
df['SMA200'] = ta.trend.sma_indicator(df['close'], 200)
for i in range(len(df['SMA200']) - 1):
if df['SMA200'][i] > df['SMA50'][i] and df['SMA200'][i+1] < df['SMA50'][i+1]:
plt.annotate('BUY',
ha = 'center', va = 'bottom',
xytext = (df['timestamp'][i+1], df['SMA200'][i+1] + 5000),xy = (df['timestamp'][i+1], df['SMA200'][i+1]),arrowprops = {'facecolor' : 'green'})
benef -= df['open'][i+1]
somme_investi += df['open'][i+1]
print("ACHAT: " + str(df['open'][i+1]) + " USDT")
elif df['SMA200'][i] < df['SMA50'][i] and df['SMA200'][i+1] > df['SMA50'][i+1]:
plt.annotate('SELL',
ha = 'center', va = 'bottom',
xytext = (df['timestamp'][i+1], df['SMA200'][i+1] + 5000),xy = (df['timestamp'][i+1], df['SMA200'][i+1]),arrowprops = {'facecolor' : 'red'})
benef += df['open'][i+1]
print("VENTE: " + str(df['open'][i+1]) + " USDT")
print("SOMME INVESTIE: " + str(somme_investi - benef))
print("BENEFICE TOTAL: " + str(benef))
plt.plot(df['timestamp'], df['open'])
plt.plot(df['timestamp'], df['SMA50'], color='r')
plt.plot(df['timestamp'], df['SMA200'], color='g')
plt.show() | RaphaelFontaine/Trading | src/moving_average_crossing.py | moving_average_crossing.py | py | 1,984 | python | en | code | 0 | github-code | 36 |
22420951914 | from django.shortcuts import render,redirect
from django.contrib import messages
from .models import Courses
def new(request):
context = {
'course': Courses.objects.all()
}
return render(request, 'new.html', context)
def create(request):
errors = Courses.objects.basic_validator(request.POST)
if len(errors) > 0:
for key, value in errors.items():
messages.error(request, value)
return redirect('/')
else:
Courses.objects.create(
name=request.POST['name'],
description=request.POST['description'],
)
return redirect('/')
def destroy(request, course_id):
one_course = Courses.objects.get(id=course_id)
context = {
'course': one_course
}
return render(request, 'destroy.html', context)
def delete(request, course_id):
to_delete =Courses.objects.get(id=course_id)
to_delete.delete()
return redirect('/')
| Wendy-Wu-Chiang/Python_stack | django/full_stack_django/courses_proj/courses_app/views.py | views.py | py | 952 | python | en | code | 0 | github-code | 36 |
22545958900 | class Solution:
def sortColors(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
n=len(nums)
low=0
mid=0
high=n-1
while mid<=high:
if nums[mid]==0:
nums[mid],nums[low]=nums[low],nums[mid]
low+=1;mid+=1
elif nums[mid]==1:
mid+=1
else:
nums[mid],nums[high]=nums[high],nums[mid]
high-=1
| rudrasgithub/My-LeetCode-Submissions | 0075-sort-colors/0075-sort-colors.py | 0075-sort-colors.py | py | 524 | python | en | code | 1 | github-code | 36 |
10558928752 | def getPriority(a):
if ord(a) >= 97:
return ord(a)-96
else:
return ord(a)-38
f = open("input.txt", "r")
lines = []
ans1 = 0
ans2 = 0
for line in f.readlines():
lines.append(line[:len(line)-1])
first = line[:len(line)//2]
second = line[len(line)//2:]
for i in first:
if i in second:
ans1 += getPriority(i)
break
print(ans1)
g = 0
while g < len(lines):
group = lines[g:g+3]
for k in group[0]:
if (k in group[1]) and (k in group[2]):
ans2 += getPriority(k)
break
g += 3
print(ans2)
| YuehuaYin/Advent-Of-Code-22 | Day-3/solution.py | solution.py | py | 597 | python | en | code | 0 | github-code | 36 |
11883990410 | from lcp.core.interfaces.module import Module
from lcp.modules.camerafeed.camera_feed import CameraFeed
import cv2 as cv
import _thread
class FaceDetector(Module):
__name = "Face Detector"
__version = "1.0"
__dependencies = [CameraFeed]
def __init__(self, config):
super().__init__(self.__name, self.__version, self.__dependencies)
self.__face_classifier_file = config.get('face_classifier', fallback='classifier.xml')
self.__face_classifier = []
self.__absolute_face_size = 0
self.__tracked_faces = []
self.__frame_width = 0
self.__frame_height = 0
self.__camera_feed = []
self.__detector_thread = []
def install(self, modules):
modules = super().install(modules)
self.__camera_feed = modules['CameraFeed']
self.__face_classifier = cv.CascadeClassifier('..\\modules\\facedetector\\classifiers\\' + self.__face_classifier_file)
def start(self):
self.__detector_thread = _thread.start_new_thread(self.__detect_faces, ())
def get_detected_faces(self):
return self.__tracked_faces
def get_frame_dimensions(self):
return self.__frame_width, self.__frame_height
def __detect_faces(self):
while True:
frame = self.__camera_feed.get_frame()
gray_frame = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
gray_frame = cv.equalizeHist(gray_frame)
self.__frame_height, self.__frame_width, _ = frame.shape
if self.__absolute_face_size == 0:
height, width = gray_frame.shape[:2]
if float(height) * 0.2 > 0:
self.__absolute_face_size = int(height * 0.2)
self.__tracked_faces = self.__face_classifier.detectMultiScale(gray_frame, scaleFactor=1.1, minNeighbors=2, minSize=(self.__absolute_face_size, self.__absolute_face_size))
| huybthomas/LCP-Core-Old | src/lcp/modules/facedetector/face_detector.py | face_detector.py | py | 1,900 | python | en | code | 0 | github-code | 36 |
39983009651 | # Find out if a given number is an "Armstrong Number".
# An n-digit number that is the sum of the nth powers of its digits
# is called an n-Armstrong number. Examples :
# 371 = 33 + 73 + 13;
# 9474 = 94 + 44 + 74 + 44;
# 93084 = 95 + 35 + 05 + 85 + 45.
# Write a Python program that;
# takes a positive integer number from the user,
# checks the entered number if it is Armstrong,
# consider the negative, float and any entries
# other than numeric values then display a warning message to the user.
# Examples
# Sample inputs Outputs
# 407 407 is an Armstrong number
# 5 5 is an Armstrong number
# -153 It is an invalid entry. Don't use non-numeric, float, or negative values!
# 153.87 or 153,87 It is an invalid entry. Don't use non-numeric, float, or negative values!
# one It is an invalid entry. Don't use non-numeric, float, or negative values!
# 121 121 is not an Armstrong number
while True:
number = input("Enter a number: ")
if number.isdigit() and int(number) > 0: # or not(type(number) is int)):
number_list = list(number)
exp = len(number)
sum_number = 0
for i in number_list:
sum_number += int(i) ** exp
if sum_number == int(number):
print(f"{number} is an Armstrong number.")
else:
print(f"{number} is not an Armstrong number.")
new_check = input("If would you like to check an other number enter 'yes': ")
new_check = new_check.lower()
if new_check == "yes":
continue
else:
print("See you next time, thank you")
break
else:
print("It is an invalid entry. Don't use non-numeric, float, or negative values!")
| MattCon70/mypython | assigments/armstrong_number.py | armstrong_number.py | py | 1,737 | python | en | code | 0 | github-code | 36 |
24537866239 | from operator import itemgetter, add
from pathlib import Path
banks = list(map(int, Path("day6.txt").read_text().split()))
n, history = len(banks), {}
while tuple(banks) not in history:
history[tuple(banks)] = len(history)
i, mx = max(enumerate(banks), key = itemgetter(1))
banks[i] = 0
for i in range(i + 1, i + 1 + mx):
banks[i % len(banks)] += 1 # mx ended up being small, so this is fine
if False:
div, rem = divmod(mx, n)
banks[:] = map(add, map(add, banks, [div] * n), [0 if (i - (n - rem)) < j <= i or (i - (n - rem) + 1 < 0 and n - ((n - rem) - i) < j < n) else 1 for j in range(n)])
print(len(history), len(history) - history[tuple(banks)])
| AlexBlandin/Advent-of-Code | 2017/day6.py | day6.py | py | 675 | python | en | code | 0 | github-code | 36 |
70666328104 | import xml.etree.ElementTree as ET
bd=ET.Element("base")
ventana=ET.SubElement(bd,"ventana", name="ventana-consultas")
ventana_hide=ET.SubElement(ventana,"ventana-hide",)
ventana_hide.set("option-hide","false")
ET.dump(bd)
tree = ET.ElementTree(bd)
tree.write("C:/Users/ricar/Desktop/pruebas v1/pruebasv1.xml")
estructura_xml = ET.parse("C:/Users/ricar/Desktop/pruebas v1/pruebasv1.xml")
# Obtiene el elemento raíz:
raiz = estructura_xml.getroot()
'''for ventana in raiz.findall('ventana'):
print(ventana)
print("espacio1")
print(ventana.get("option-hide"))
print("nada")
'''
for ventana in raiz.iter('ventana'):
print("get: "+str(ventana.get("option-hide")))
ventana.set("option-hide","0")
print(ventana.get("option-hide"))
estructura_xml.write("C:/Users/ricar/Desktop/pruebas v1/pruebasv1.xml")
| ColqueRicardo/v-version | pruebas/pruebas aisladas/archivos xml.py | archivos xml.py | py | 829 | python | es | code | 0 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.