seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
43967300296 | # Cryptomath Module
# http://inventwithpython.com/hacking (BSD Licensed)
def gcd(a, b):
# Return the GCD of a and b using Euclid's Algorithm
while a != 0:
a, b = b % a, a
return b
def succesiveSquaring(base, exp, mod):
if exp == 0:
x = 1
else:
half = succesiveSquaring(base, exp // 2, mod) # just / in Python 2
x = half * half
if exp % 2 == 1:
x *= base
return x % mod
def gcdr(a, b):
# Return the GCD of a and b using Euclid's Algorithm
k=0
while a != 0:
q = b // a
r = b % a
d = b
a, b = b % a, a
if k > 0:
print('{} = {} x {} + {}'.format(d,q,b,r))
k+=1
return b
def findModInverse(a, m):
# Returns the modular inverse of a % m, which is
# the number x such that a*x % m = 1
if gcdr(a, m) != 1:
assert "Not relatively prime!"
return None # no mod inverse if a & m aren't relatively prime
# Calculate using the Extended Euclidean Algorithm:
u1, u2, u3 = 1, 0, a
v1, v2, v3 = 0, 1, m
while v3 != 0:
q = u3 // v3 # // is the integer division operator
v1, v2, v3, u1, u2, u3 = (u1 - q * v1), (u2 - q * v2), (u3 - q * v3), v1, v2, v3
return u1 % m
if __name__ == '__main__':
print(findModInverse(1051,17))
print(succesiveSquaring(2,1234,789))
| rugbyprof/CMPS-Cryptography | cryptomath.py | cryptomath.py | py | 1,424 | python | en | code | 4 | github-code | 36 |
42360576812 | """
__title__ = ''
__author__ = 'Thompson'
__mtime__ = '2018/5/23'
# code is far away from bugs with the god animal protecting
I love animals. They taste delicious.
┏┓ ┏┓
┏┛┻━━━┛┻┓
┃ ☃ ┃
┃ ┳┛ ┗┳ ┃
┃ ┻ ┃
┗━┓ ┏━┛
┃ ┗━━━┓
┃ 神兽保佑 ┣┓
┃ 永无BUG! ┏┛
┗┓┓┏━┳┓┏┛
┃┫┫ ┃┫┫
┗┻┛ ┗┻┛
"""
import requests
# 根据协议类型,选择不同的代理
proxies = {
"http": "http://118.190.95.35:9001",
}
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36"}
response = requests.get("http://www.baidu.com", proxies = proxies,headers=headers)
print(response.content.decode()) | hwzHw/python37 | day0109/requests_04_代理IP.py | requests_04_代理IP.py | py | 1,014 | python | en | code | 0 | github-code | 36 |
2080670146 | # -*- coding : utf-8 -*-
import numpy as np
import torch
from torch import nn
class DNN(nn.Module):
def __init__(self,args):
super().__init__()
self.outDim = args.outDim
self.seqLen = args.seqLen
self.hiddenDim1 = args.hiddenDim1
self.hiddenDim2 = args.hiddenDim2
self.hiddenDim3 = args.hiddenDim3
self.fc1 = nn.Linear(self.seqLen,self.hiddenDim1)
self.bn1 = nn.BatchNorm1d(self.hiddenDim1)
# self.relu = nn.RReLU()
self.relu = nn.RReLU()
self.fc2 = nn.Linear(self.hiddenDim1,self.hiddenDim2)
self.bn2 = nn.BatchNorm1d(self.hiddenDim2)
self.fc3 = nn.Linear(self.hiddenDim2,self.hiddenDim3)
self.bn3 = nn.BatchNorm1d(self.hiddenDim3)
self.out = nn.Linear(self.hiddenDim3,self.outDim)
self.dnn = nn.Sequential(
self.fc1,
self.bn1,
self.relu,
self.fc2,
self.bn2,
self.relu,
self.fc3,
self.bn3,
self.relu,
self.out,
)
def forward(self,seq):
#assure seq is 1 dim
seq.view(-1)
out = self.dnn(seq)
return out
| Ylizin/RWSim | ylSim/DNN.py | DNN.py | py | 1,213 | python | en | code | 2 | github-code | 36 |
17881377685 | #When newlist = oldlist, both will get modified when one is edited !!!!!!!!!!!!!!!!!
nums = [1,2,3,4,5,6,7]
k = 3
length = len(nums)
temp = nums.copy()
def indexRet(index, length, k):
newIndexVal = (i+k)%length
return newIndexVal
for i in range(length):
newIndex = indexRet(i, length, k)
nums[newIndex] = temp[i]
print(nums) | qdotdash/Competitive_Coding | Data Structures and Algorithms - Udemy/Arrays Exercises/5. shiftArrays.py | 5. shiftArrays.py | py | 335 | python | en | code | 0 | github-code | 36 |
29647725517 | import sqlite3
import pandas as pd
import time
import sys
from drugbank.drugbank_index_query import drugbank_search
from hpo.hpo_index_query import hpo_search
from omim.omim_index_query import omim_search
from stitch.stitch_chemical_sources_index_query import stitch_chemical_sources_search
from stitch.stitch_br08303_index_query import stitch_br08303_search
from python_requests import *
from usefull_temp import *
from link import *
'''
GLOBAL LISTS :
disease_list = {disease_name : [occurrence, source]}
curing_drug_list = {drugname : [occurrence, description, indication, toxicity, sources]}
side_effects_from_drug_list = {drugname : [occurrence, description, indication, toxicity, sources]}
'''
## SEARCH FROM HPO
def correction_hpo_disease_label(label):
if (len(label) > 0 and label[0]=='#'):
label = label.split(" ", 1)[1]
if (len(label) > 0 and ',' in label):
label = label.split(",", 1)[0]
if (len(label) > 0 and ';' in label):
label = label.split(";", 1)[0]
return label
def get_diseases_from_hpo(hpo_id):
disease_list = []
DATABASE = "../data/HPO/hpo_annotations.sqlite"
conn = sqlite3.connect(DATABASE)
curs = conn.cursor()
hpo_id = hpo_id.replace('_', ':')
req = f"SELECT disease_label FROM phenotype_annotation WHERE sign_id = \"{hpo_id}\""
curs.execute(req)
for disease_tuple in curs.fetchall():
disease = disease_tuple[0]
disease = disease.lower()
disease = correction_hpo_disease_label(disease)
disease_list.append(disease)
conn.commit()
curs.close()
return disease_list
## SEARCH FROM SIDER
SIDER_FILE = "../data/MEDDRAS/meddra_all_se.csv"
def get_sider_id(symptom):
content = []
df = pd.read_csv(SIDER_FILE, sep=',')
n = len(df)
for k in range(n):
if symptom in df['side_effect_name'][k].lower():
id1 = df['stitch_compound_id1'][k]
id2 = df['stitch_compound_id2'][k]
content.append((id1, id2))
return content
## SEARCH FROM DRUGBANK
## GLOBAL SEARCH FUNCTION
def search_disease_from_symptom(symptom, disease_list):
## get symptoms
hpo_query = create_hpo_query(symptom)
content_hpo = hpo_search(hpo_query)
## complete symptoms
## Count lost items
Total_hpo_count = len(content_hpo)
count = 0
for elem in content_hpo:
hpo_id = elem[0]
disease_list_from_hpo = get_diseases_from_hpo(hpo_id)
if disease_list_from_hpo == []:
count += 1
else:
for disease in disease_list_from_hpo:
if disease in disease_list:
disease_list[disease][0] += 1
else:
disease_list[disease] = [1, "hpo"]
disease_list = dict(sorted(disease_list.items(), key=lambda item: item[1], reverse=True))
return disease_list
def search_side_effects_drug_from_content_sider_id(content_sider_id, side_effects_from_drug_list):
## link with stitch
content_stitch_atc = []
for elem in content_sider_id:
id1 = elem[0]
id2 = elem[1]
content_stitch_atc += sider_to_stitch_compoundid1(id1, id2)
if len(content_stitch_atc) > 500:
content_stitch_atc = content_stitch_atc[:500]
## link with drugbank
content_drugbank = []
for atc_code in content_stitch_atc:
content_drugbank += stitch_atc_code_to_drugbank(atc_code)
for item in content_drugbank:
name = item[0]
if name in side_effects_from_drug_list:
side_effects_from_drug_list[name][0] += 1
else:
description = item[1]
indication = item[2]
toxicity = item[3]
bloc = [1, description, indication, toxicity, 'sider / stitch / drugbank']
side_effects_from_drug_list[name] = bloc
side_effects_from_drug_list = dict(sorted(side_effects_from_drug_list.items(), key=lambda item: item[1], reverse=True))
return side_effects_from_drug_list
def search_side_effects_drug_from_drugbank(symptom, side_effects_from_drug_list):
query = create_drugbank_query_side_effect(symptom)
content_drugbank = drugbank_search(query)
for item in content_drugbank:
name = item[1]
if name in side_effects_from_drug_list:
side_effects_from_drug_list[name][0] +=1
else:
description = item[2]
indication = item[3]
toxicity = item[4]
sources = 'sider / stitch / drugbank'
bloc = [1, description, indication, toxicity, sources]
side_effects_from_drug_list[name] = bloc
side_effects_from_drug_list = dict(sorted(side_effects_from_drug_list.items(), key=lambda item: item[1], reverse=True))
return side_effects_from_drug_list
def search_curing_drug_from_symtom(symptom, curing_drug_list):
query = create_drugbank_query(symptom)
content_drugbank = drugbank_search(query)
for item in content_drugbank:
name = item[1]
if name in curing_drug_list:
curing_drug_list[name][0] += 1
else:
description = item[2]
indication = item[3]
toxicity = item[4]
sources = "drugbank"
bloc = [1, description, indication, toxicity, sources]
curing_drug_list[name] = bloc
curing_drug_list = dict(sorted(curing_drug_list.items(), key=lambda item: item[1], reverse=True))
return curing_drug_list
def main():
symptom = "abdominal"
# correction of the input
symptom = symptom.lower()
## THINGS TO PRINT : {0: disease_list, 1: curing_drug_list, 2: side_effects_from_drug_list, 3: All}
print_value = 3
## CHECK ARGS
args = sys.argv
if "-s" in args:
pos = args.index("-s")
symptom = args[pos+1]
if "-p" in args:
pos = args.index("-p")
print_value = int(args[pos+1])
# initiation of global lists
disease_list = {}
curing_drug_list = {}
content_sider_id = []
side_effects_from_drug_list = {}
def print_function(print_value, disease_list, curing_drug_list, side_effects_from_drug_list):
if print_value==0:
disease_list = search_disease_from_symptom(symptom, disease_list)
print(len(disease_list))
printlist(disease_list)
elif print_value==1:
curing_drug_list = search_curing_drug_from_symtom(symptom, curing_drug_list)
print(len(curing_drug_list))
printlist(curing_drug_list)
elif print_value==2:
content_sider_id = get_sider_id(symptom)
content_sider_id = content_sider_id[:5]
side_effects_from_drug_list = search_side_effects_drug_from_content_sider_id(content_sider_id, side_effects_from_drug_list)
print(len(side_effects_from_drug_list))
printlist(side_effects_from_drug_list)
start = time.time()
if print_value in [0, 1, 2]:
print_function(print_value, disease_list, curing_drug_list, side_effects_from_drug_list)
if print_value == 3:
print_function(0, disease_list, curing_drug_list, side_effects_from_drug_list)
print_function(1, disease_list, curing_drug_list, side_effects_from_drug_list)
print_function(2, disease_list, curing_drug_list, side_effects_from_drug_list)
end = time.time()
print("#####")
print()
print(f"time : {end - start}")
if __name__ == '__main__':
main() | Hamza-ABDOULHOUSSEN/gmd2k22 | python/data_query.py | data_query.py | py | 7,485 | python | en | code | 0 | github-code | 36 |
5784254080 | from unittest import mock
import bson
import pytest
from test.tools import anything, in_any_order
from slivka import JobStatus
from slivka.db.documents import JobRequest, ServiceState
from slivka.db.helpers import delete_many, insert_many, pull_many
from slivka.scheduler import Runner, Scheduler
from slivka.scheduler.runners import Job, RunnerID
from slivka.scheduler.scheduler import (
ERROR,
REJECTED,
ExecutionDeferred,
ExecutionFailed,
)
def new_runner(service, name, command=None, args=None, env=None):
return Runner(
RunnerID(service, name),
command=command or [],
args=args or [],
outputs=[],
env=env or {},
)
@pytest.fixture()
def mock_batch_start():
with mock.patch.object(Runner, "batch_start") as mock_method:
yield mock_method
@pytest.fixture()
def mock_check_status():
with mock.patch.object(Runner, "check_status") as mock_method:
yield mock_method
@pytest.fixture()
def mock_submit():
with mock.patch.object(Runner, "submit") as mock_method:
yield mock_method
def test_group_requests(job_directory):
scheduler = Scheduler(job_directory)
runner1 = new_runner("example", "runner1")
runner2 = new_runner("example", "runner2")
scheduler.add_runner(runner1)
scheduler.add_runner(runner2)
scheduler.selectors["example"] = lambda inputs: inputs.get("use")
requests = [
JobRequest(service="example", inputs={"use": "runner1"}),
JobRequest(service="example", inputs={"use": "runner2"}),
JobRequest(service="example", inputs={"use": None}),
JobRequest(service="example", inputs={"use": "runner1"}),
]
grouped = scheduler.group_requests(requests)
assert grouped == {
runner1: in_any_order(requests[0], requests[3]),
runner2: in_any_order(requests[1]),
REJECTED: in_any_order(requests[2]),
}
def test_group_requests_if_runner_does_not_exist(job_directory):
scheduler = Scheduler(job_directory)
runner1 = new_runner("example", "runner1")
scheduler.add_runner(runner1)
scheduler.selectors["example"] = lambda inputs: "runner2"
requests = [JobRequest(service="example", inputs={})]
grouped = scheduler.group_requests(requests)
assert grouped == {ERROR: in_any_order(*requests)}
def create_requests(count=1, service="example"):
return [
JobRequest(
_id=bson.ObjectId(), service=service, inputs={"input": "val%d" % i}
)
for i in range(count)
]
def test_start_requests_if_successful_start(job_directory, mock_batch_start):
scheduler = Scheduler(job_directory)
runner = new_runner("example", "example")
requests = [
JobRequest(
_id=bson.ObjectId(), service="example", inputs={"input": "val"}
),
JobRequest(
_id=bson.ObjectId(), service="example", inputs={"input": "val2"}
),
]
mock_batch_start.side_effect = lambda inputs, cwds: (
[Job("%04x" % i, cwd) for i, cwd in enumerate(cwds)]
)
started = scheduler._start_requests(runner, requests)
assert started == in_any_order(
*((req, Job("%04x" % i, anything())) for i, req in enumerate(requests))
)
def test_start_requests_deferred_execution_if_error_raised(
job_directory, mock_batch_start
):
scheduler = Scheduler(job_directory)
runner = new_runner("example", "example")
requests = create_requests(2)
mock_batch_start.side_effect = OSError
with pytest.raises(ExecutionDeferred):
scheduler._start_requests(runner, requests)
def test_start_request_failed_execution_if_too_many_errors_raised(
job_directory, mock_batch_start
):
scheduler = Scheduler(job_directory)
runner = new_runner("example", "example")
requests = create_requests(3)
scheduler.set_failure_limit(0)
mock_batch_start.side_effect = OSError
with pytest.raises(ExecutionFailed):
scheduler._start_requests(runner, requests)
class TestJobStatusUpdates:
@pytest.fixture()
def requests(self, database):
requests = create_requests(5)
insert_many(database, requests)
yield requests
delete_many(database, requests)
@pytest.fixture()
def scheduler(self, job_directory):
scheduler = Scheduler(job_directory)
runner = new_runner("example", "example")
scheduler.add_runner(runner)
scheduler.selectors["example"] = lambda inputs: "example"
return scheduler
@pytest.mark.parametrize("status", list(JobStatus))
def test_check_status_updates_requests(
self,
scheduler,
requests,
database,
mock_batch_start,
mock_check_status,
status,
):
# must start the job, before moving to status check stage
mock_batch_start.side_effect = lambda inputs, cwds: (
[Job("%04x" % i, cwd) for i, cwd in enumerate(cwds)]
)
mock_check_status.return_value = status
scheduler.main_loop()
pull_many(database, requests)
assert all(req.state == status for req in requests)
def test_submit_deferred_job_status_not_updated(
self, scheduler, requests, database, mock_submit
):
mock_submit.side_effect = OSError
scheduler.main_loop()
pull_many(database, requests)
assert all(req.state == JobStatus.ACCEPTED for req in requests)
def test_submit_failed_job_status_set_to_error(
self, scheduler, requests, database, mock_submit
):
mock_submit.side_effect = OSError
scheduler.set_failure_limit(0)
scheduler.main_loop()
pull_many(database, requests)
assert all(req.state == JobStatus.ERROR for req in requests)
class TestServiceStatusUpdates:
@pytest.fixture(autouse=True)
def requests(self, database):
requests = create_requests(5)
insert_many(database, requests)
yield requests
delete_many(database, requests)
@pytest.fixture()
def scheduler(self, job_directory):
scheduler = Scheduler(job_directory)
runner = new_runner("example", "default")
scheduler.add_runner(runner)
return scheduler
def test_service_start_successful(
self, database, scheduler, mock_submit, mock_check_status
):
mock_submit.side_effect = lambda cmd: Job("0x00", cmd.cwd)
mock_check_status.return_value = JobStatus.QUEUED
scheduler.main_loop()
state = ServiceState.find_one(
database, service="example", runner="default"
)
assert state.state == ServiceState.OK
def test_service_start_soft_fail(self, database, scheduler, mock_submit):
mock_submit.side_effect = OSError
scheduler.main_loop()
state = ServiceState.find_one(
database, service="example", runner="default"
)
assert state.state == ServiceState.WARNING
def test_service_start_hard_fail(self, database, scheduler, mock_submit):
scheduler.set_failure_limit(0)
mock_submit.side_effect = OSError
scheduler.main_loop()
state = ServiceState.find_one(
database, service="example", runner="default"
)
assert state.state == ServiceState.DOWN
@pytest.mark.xfail(reason="service status should not rely on erroneous jobs")
def test_service_check_status_returned_all_errors(
self, database, scheduler, mock_submit, mock_check_status
):
mock_submit.side_effect = lambda cmd: Job("0x00", cmd.cwd)
mock_check_status.return_value = JobStatus.ERROR
scheduler.main_loop()
state = ServiceState.find_one(
database, service="example", runner="default"
)
assert state.state == ServiceState.DOWN
def test_service_check_status_throws_exception(
self, database, scheduler, mock_submit, mock_check_status
):
mock_submit.side_effect = lambda cmd: Job("0x00", cmd.cwd)
mock_check_status.side_effect = Exception
scheduler.main_loop()
state = ServiceState.find_one(
database, service="example", runner="default"
)
assert state.state == ServiceState.WARNING
| bartongroup/slivka | test/scheduler/test_scheduler.py | test_scheduler.py | py | 8,266 | python | en | code | 7 | github-code | 36 |
10914202370 | #!/usr/bin/env python3
import os
import mod_resource
import mod_something
if __name__ == "__main__":
print("Hello, world! -> something returns: {}".format(mod_something.something()))
res_path = \
os.path.join(os.path.dirname(mod_resource.__file__), 'resource.txt')
with open(res_path) as f:
text = f.read()
print("From mod_resource:resource.txt -> {}".format(text))
| borntocodeRaj/sphinx_configuration | tests/roots/test-apidoc-toc/mypackage/main.py | main.py | py | 403 | python | en | code | 1 | github-code | 36 |
28121817798 | import tornado.httpserver
import tornado.ioloop
import tornado.web
import tornado.options
import settings
from handlers import *
def make_app():
db = None
handlers = [
(r"/", MainHandler),
(r"/covert", CovertHandler)
]
config = {"template_path":settings.TEMPLATE_PATH, "static_path":settings.ASSETS_PATH, "cookie_secret":settings.COOKIE_SECRET, "debug":True}
return tornado.web.Application(handlers, **config)
if __name__ == '__main__':
tornado.options.parse_command_line()
app = make_app()
http_server = tornado.httpserver.HTTPServer(app)
http_server.listen(settings.SERVER_PORT)
tornado.ioloop.IOLoop.instance().start()
| caroltc/lrc2srt | app.py | app.py | py | 672 | python | en | code | 2 | github-code | 36 |
41278650678 | # -*- coding: utf-8 -*-
"""
Created on Sun Sep 8 16:46:19 2019
@author: sanjain6
"""
if __name__ == '__main__':
s = "abcab"
c = 'abc'
p = len(s)
q = 0
for i in range(len(s) - len(c) + 1):
if c in s[i: i + len(c)]:
q +=1
print(q)
| San0506/Use-Cases-Python-String-Manipulation | String_mutate.py | String_mutate.py | py | 339 | python | en | code | 0 | github-code | 36 |
12004997372 | import socket
import os
import sys
# 第三方库
from 网络 import 创建网络
if __name__ == '__main__':
创建网络.本地服务器地址='127.0.0.1'
创建网络.本地服务器端口=1082
插座= 创建网络.申请插座()
print("服务器的socket建立了")
插座.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
创建网络.绑定插座(插座)
print('服务器的socket绑定到%s:%d'%(创建网络.本地服务器地址,创建网络.本地服务器端口))
创建网络.监听插座(插座,500)
print('服务器开始监听端口%d,等待连接。'%创建网络.本地服务器端口)
while True:
连线,客户端地址 = 创建网络.插座接受的连接(插座)
print("服务器sock连接到客户端地址:",客户端地址)
try:
创建网络.连线超时(连线, 5)
接收缓冲区=创建网络.连线接收的内容(连线, 2048)
print('客户端发来数据:%s'%接收缓冲区)
# if headers["Method"]=="CONNECT":
发送的内容=b'HTTP/1.1 200 Connection Established\r\n\r\n'
创建网络.连线发送(连线, 发送的内容)
print("向客户端发送了HTTP/1.1 200 Connection Established\r\n\r\n")
接收缓冲区=创建网络.连线接收的内容(连线, 2048)
print('客户端再次发来数据:%s'%接收缓冲区)
#else:
#print("error")
except socket.timeout:
print ('time out')
创建网络.关闭连接(连线)
创建网络.关闭插座(插座)
| littleguyy/python-practice | http代理原理.py | http代理原理.py | py | 1,671 | python | zh | code | 2 | github-code | 36 |
2964066145 | #!/bin/python3
import os
import sys
class Node(object):
def __init__(self, n):
self.n = n
self.neighbors = {}
self.feet = 0
def copy(self):
n = Node(self.n)
n.neighbors = self.neighbors.copy()
n.feet = self.feet
return n
class Graph(object):
def __init__(self, edges=[], t=None):
self.nodes = {}
self.t = t
self.crabs = 0
for edge in edges:
self.insert_edge(edge)
def walk(self, edge=None):
vertex = edge[0] if edge else 1
q = [vertex]
visited = [edge]
while q:
node = q.pop()
for neighbor in self.nodes[node].neighbors:
if [neighbor, node] in visited or [node, neighbor] in visited:
continue
q.append(neighbor)
visited.append([node, neighbor])
yield [node, neighbor]
def insert(self, node):
if node not in self.nodes:
self.nodes[node] = Node(node)
def insert_edge(self, edge):
v0, v1 = edge
self.insert(v0)
self.insert(v1)
self.nodes[v0].neighbors[v1] = self.nodes[v1]
self.nodes[v1].neighbors[v0] = self.nodes[v0]
def violates_rules(self, edge):
v0, v1 = edge
if (self.nodes[v0].feet <= self.t) and (self.nodes[v1].feet == 1):
print("False")
return False
if (self.nodes[v1].feet <= self.t) and (self.nodes[v0].feet == 1):
print("False")
return False
print("True")
return True
def turn_on(self, edge):
v0, v1 = edge
self.insert_edge(edge)
self.nodes[v0].feet += 1
self.nodes[v1].feet += 1
self.crabs += self.nodes[v0].feet==1
self.crabs += self.nodes[v1].feet==1
def turn_off(self, edge):
v0, v1 = edge
self.nodes[v0].feet -= 1
self.nodes[v1].feet -= 1
self.crabs -= self.nodes[v0].feet==0
self.crabs -= self.nodes[v1].feet==0
def copy(self):
g = Graph()
g.nodes = {k: v.copy() for k,v in self.nodes.items()}
g.t = self.t
g.crabs = self.crabs
return g
def __str__(self):
s = ""
for node in self.nodes:
s += "{}: {}, ".format(node, self.nodes[node].feet)
return s
#
# Complete the crabGraphs function below.
#
def crabGraphs(n, t, graph):
#
# Write your code here.
#
# dp -
# grow the graph edge by edge. for each edge keep the best graph with and without it.
# for the next edge, check all neighboring visited edges, and take the best graph without it and the best graph with it (restriction - must connect to head)
# try to grow the graph edge by edge,
# for each edge:
# check every other edge, if violates the rules turn of, else turn on
# if the graph is better with the edge, keep it on, else turn off
graph = Graph(graph)
crab_graph = Graph(t=t)
num_crabs = 0
for edge in graph.walk():
print(edge)
tmp_graph = crab_graph.copy()
tmp_graph.turn_on(edge)
for edge_ in tmp_graph.walk(edge):
if tmp_graph.violates_rules(edge_):
tmp_graph.turn_off(edge_)
if tmp_graph.crabs > num_crabs:
num_crabs = tmp_graph.crabs
crab_graph = tmp_graph
print(crab_graph)
return num_crabs
if __name__ == '__main__':
c = int(input())
for c_itr in range(c):
ntm = input().split()
n = int(ntm[0])
t = int(ntm[1])
m = int(ntm[2])
graph = []
for _ in range(m):
graph.append(list(map(int, input().rstrip().split())))
result = crabGraphs(n, t, graph)
print(str(result) + '\n')
| jvalansi/interview_questions | crab.py | crab.py | py | 3,838 | python | en | code | 0 | github-code | 36 |
11526198960 | import asyncio
import json
import multiprocessing as mp
from importlib import import_module
from django import http
from django.conf import settings
from django.core.cache import caches
from django.core.handlers.asgi import ASGIRequest
from django.contrib import auth
from django.utils import timezone
from asgiref.sync import sync_to_async
from loguru import logger
from worlds.models import Job, StreamLog
def add_websocket(app):
async def websocket_app(scope, receive, send):
if scope["type"] == "websocket":
await logging_socket(scope, receive, send)
return
await app(scope, receive, send)
return websocket_app
class AsyncWarpzoneRequest(ASGIRequest):
def __init__(self, scope, body_file):
scope['method'] = 'GET'
super().__init__(scope, body_file)
self.WS = http.QueryDict(scope.get('query_string', b'').decode())
def init_request(request):
engine = import_module(settings.SESSION_ENGINE)
session_key = request.COOKIES.get(settings.SESSION_COOKIE_NAME)
request.session = engine.SessionStore(session_key)
request.user = auth.get_user(request)
def get_job(jid, obj=False):
job = Job.objects.filter(id=jid).first()
if job:
if obj:
return job
return job.to_json()
return {}
def get_log(job, pod, obj=False):
return StreamLog.objects.filter(job=job, pod=pod).first()
async def watch_log_data(job, pod, send, log_queue):
lines = 0
wait = 0.1
while 1:
try:
await asyncio.sleep(wait)
wait = 3.0
log = await sync_to_async(get_log, thread_sensitive=True)(job, pod)
if log:
if log.lines != lines:
lines_send = ''
line_array = []
for i in range(lines, log.lines):
line_array.append(f'{pod}-{i}')
if line_array:
line_dict = caches['default'].get_many(line_array)
msg_lines = ''
for l in line_array:
m = line_dict.get(l, None)
if m is not None:
msg_lines += m
if msg_lines:
msg = {'type': 'log', 'data': msg_lines}
await send({'type': 'websocket.send', 'text': json.dumps(msg)})
lines = log.lines
if log.status in ['completed', 'failed']:
break
except:
import traceback
traceback.print_exc()
raise
try:
if log_queue.get_nowait():
log_queue.task_done()
caches['default'].set(f'shutdown-{pod}', 'shutdown', 60)
return
except asyncio.QueueEmpty:
pass
async def watch_job_data(job, send, queue):
jdata = await sync_to_async(get_job, thread_sensitive=True)(job)
last = timezone.now()
while 1:
await asyncio.sleep(0.1)
now = timezone.now()
diff = now - last
if diff.total_seconds() > 5:
last = now
new_data = await sync_to_async(get_job, thread_sensitive=True)(job)
if new_data['modified'] != jdata['modified']:
jdata = new_data
msg = {'type': 'job', 'data': jdata}
logger.info('Sending job update: {} {}', jdata['id'], jdata['status'])
await send({'type': 'websocket.send', 'text': json.dumps(msg)})
try:
if queue.get_nowait():
queue.task_done()
return
except asyncio.QueueEmpty:
pass
async def logging_socket(scope, receive, send):
request = AsyncWarpzoneRequest(scope, None)
await sync_to_async(init_request, thread_sensitive=True)(request)
task = None
log_task = None
log_queue = None
connected = False
while 1:
event = await receive()
job = request.WS.get('job')
pod = request.WS.get('pod')
if event['type'] == 'websocket.connect':
logger.info('Websocket Connected')
if not request.user.is_authenticated:
logger.info('User not authenticated, Closing Socket')
await send({'type': 'websocket.close'})
return
job_queue = asyncio.Queue()
task = asyncio.create_task(watch_job_data(job, send, job_queue))
if pod:
log_queue = asyncio.Queue()
log_task = asyncio.create_task(watch_log_data(job, pod, send, log_queue))
await send({'type': 'websocket.accept'})
connected = True
if connected and event['type'] == 'websocket.disconnect':
logger.info('Websocket Disconnected')
await job_queue.put(True)
await job_queue.join()
task.cancel()
await asyncio.gather(task, return_exceptions=True)
if log_queue:
await log_queue.put(True)
await log_queue.join()
log_task.cancel()
await asyncio.gather(log_task, return_exceptions=True)
return
if connected and event['type'] == 'websocket.receive':
logger.info('Received Message')
| cognitive-space/warpzone | worlds/websocket.py | websocket.py | py | 5,411 | python | en | code | 1 | github-code | 36 |
38221958113 | class Solution:
def spiralOrder(self, matrix: List[List[int]]) -> List[int]:
top = 0
bottom = len(matrix)
left = 0
right = len(matrix[0])
arr = []
while (top < bottom and left < right):
for j in range(left,right):
arr.append(matrix[top][j])
top += 1
for j in range(top,bottom):
arr.append(matrix[j][right-1])
right -= 1
if (not (left < right and top < bottom)):
break
for k in range(right-1,left-1,-1):
arr.append(matrix[bottom-1][k])
bottom -= 1
for l in range(bottom-1,top - 1,-1):
arr.append(matrix[l][left])
left += 1
return arr | vaibhavTekk/leetcoding | problems/spiral_matrix/solution.py | solution.py | py | 782 | python | en | code | 1 | github-code | 36 |
34088291435 | from abc import ABC, abstractmethod
def process_for_variable(var, table_info):
for tableName in table_info.keys():
splited = var.split(tableName)
if len(splited)>1:
pure_variable = splited[-1][1:]
if pure_variable == '':
sql = table_info[tableName]['subQuery'].export_sql()
return f'({sql})'
alias = table_info[tableName]['alias']
if not alias:
return f'{tableName}.{pure_variable}'
return f'{alias}.{pure_variable}'
return var
def divide_symbols_and_values(condition):
symbols = ['<=', '>=', '>', '<','=', 'in ', 'IN ']
lst = []
for field, con in condition.items():
for symbol in symbols:
splited = con.split(symbol)
if len(splited) > 1:
value = splited[-1].strip()
symbol = symbol.strip()
expression = (field, symbol, value)
lst.append(expression)
break
return lst
class Clause(ABC):
def update_table(self, table):
self.table = table
def set_aliasState(self, aliasState):
self.aliasState = aliasState
@abstractmethod
def export_sql(self, aliasState):
pass
class Select(Clause):
def __init__(self, fields):
self.fields = fields
def export_sql(self, aliasState):
self.set_aliasState(aliasState=aliasState)
if self.fields:
adjusted_fields = [process_for_variable(field, self.table) for field in self.fields]
select_part = ', '.join(adjusted_fields)
else:
select_part = '*'
return f'SELECT {select_part}'
class From(Clause):
def __init__(self, tableName):
self.tableName_list = tableName
def check_inlineView(self, tableName):
if not tableName in self.table:
return False
if self.table[tableName]['subQuery'] :
return True
return False
def export_sql(self, aliasState):
self.set_aliasState(aliasState=aliasState)
table_list = []
for tableName in self.tableName_list:
if self.check_inlineView(tableName=tableName):
inleview_clause = self.table[tableName]['subQuery'].export_sql(aliasState=aliasState)
self.table[tableName]['alias'] = self.aliasState.get_alias()
alias = self.table[tableName]['alias']
table_list.append(f'({inleview_clause}) {alias}')
else:
if len(self.tableName_list)>1:
if tableName in self.table:
self.table[tableName]['alias'] = self.aliasState.get_alias()
alias = self.table[tableName]['alias']
table_list.append(f'{tableName} {alias}')
else:
table_list.append(f'{tableName}')
else:
table_list.append(tableName)
from_part = ', '.join(table_list)
return f'FROM {from_part}'
class Join(Clause):
def __init__(self, tableName, how):
self.tableName=tableName
self.how=how
def check_inlineView(self, tableName):
if self.table[tableName]['subQuery'] :
return True
return False
def export_sql(self, aliasState):
if not self.tableName in self.table:
return f'{self.how} JOIN {self.tableName}'
self.set_aliasState(aliasState=aliasState)
if self.check_inlineView(self.tableName):
inleview_clause = self.table[self.tableName]['subQuery'].export_sql(aliasState=aliasState)
self.table[self.tableName]['alias'] = self.aliasState.get_alias()
alias = self.table[self.tableName]['alias']
return f'{self.how} JOIN ({inleview_clause}) {alias}'
else:
if self.tableName in self.table:
alias = self.table[self.tableName]['alias']
return f'{self.how} JOIN {self.tableName} {alias}'
else:
return f'{self.how} JOIN {self.tableName}'
class On(Clause):
def __init__(self, condition):
self.condition = condition
def export_sql(self, aliasState):
self.set_aliasState(aliasState=aliasState)
var_symbols_var_tuple_lst = divide_symbols_and_values(self.condition)
processed = []
for tuple_ in var_symbols_var_tuple_lst:
processed.append([process_for_variable(i, self.table) for i in tuple_])
condition_lst = [' '.join(tuple_) for tuple_ in processed]
on_part = ' AND '.join(condition_lst)
return f'ON {on_part}'
class Where(Clause):
def __init__(self, condition):
self.condition = condition
def export_sql(self, aliasState):
self.set_aliasState(aliasState=aliasState)
var_symbols_var_tuple_lst = divide_symbols_and_values(self.condition)
processed = []
for tuple_ in var_symbols_var_tuple_lst:
processed.append([process_for_variable(i, self.table) for i in tuple_])
condition_lst = [' '.join(tuple_) for tuple_ in processed]
where_part = ' AND '.join(condition_lst)
return f'WHERE {where_part}'
class GroupBy(Clause):
def __init__(self, fields):
self.fields = fields
def export_sql(self, aliasState):
self.set_aliasState(aliasState=aliasState)
adjusted_fields = [process_for_variable(field, self.table) for field in self.fields]
groupBy_part = ', '.join(adjusted_fields)
return f'GROUP BY {groupBy_part}'
class Calculation(Clause):
def __init__(self, field, kind):
self.field = field
self.kind = kind
self.sql_fuction = {'sum': 'SUM', 'min':'MIN', 'max':'MAX', 'avg':'AVG', 'count':'COUNT' ,'variance':'VARIANCE', 'stddev':'STDDEV', 'lag':'LAG'}
def export_sql(self, aliasState):
self.set_aliasState(aliasState=aliasState)
adjusted_field = process_for_variable(self.field, self.table)
sql_function = self.sql_fuction[self.kind]
return f'{sql_function}({adjusted_field})'
class OrderBy(Clause):
def __init__(self, fields):
self.fields = fields
def export_sql(self, aliasState):
self.set_aliasState(aliasState=aliasState)
adjusted_fields = [process_for_variable(field, self.table) for field in self.fields]
groupBy_part = ', '.join(adjusted_fields)
return f'ORDER BY {groupBy_part}'
class Alias:
def __init__(self) :
self.alias_candidate = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k']
self.alias_num = 0
def get_alias(self):
alias = self.alias_candidate[self.alias_num]
self.alias_num += 1
return alias
class QuerySql:
def __init__(self, tableName, subQuery=None):
self.table = tableName
self.table_info = dict()
self.subQuery = dict()
self.select_part = None
self.from_part = None
self.join_part = None
self.on_part = None
self.where_part = None
self.groupBy_part = None
self.calculation_part = []
self.orderBy_part = None
self.alias_state = None
self.set_table(tableName=tableName, subQuery=subQuery)
def set_table(self, tableName, subQuery=None):
self.table_info[tableName] = {'subQuery':subQuery, 'alias': None}
if self.select_part:
self.select_part.update_table(table=self.table_info)
if self.from_part:
self.from_part.update_table(table=self.table_info)
if self.join_part:
self.join_part.update_table(table=self.table_info)
if self.where_part:
self.where_part.update_table(table=self.table_info)
return self
def select(self, *fields):
self.select_part = Select(fields=fields)
self.select_part.update_table(table=self.table_info)
if self.table :
self.from_(self.table)
return self
def from_(self, *tableName):
self.from_part = From(tableName=tableName)
self.from_part.update_table(table=self.table_info)
return self
def join(self, tableName, how='LEFT', **condition):
self.join_part = Join(tableName=tableName, how=how.upper())
self.join_part.update_table(table=self.table_info)
self.on_part = On(condition=condition)
self.on_part.update_table(table=self.table_info)
return self
def on(self, **condition):
self.on_part = On(condition=condition)
self.on_part.update_table(table=self.table_info)
return self
def where(self, **condition):
self.where_part = Where(condition=condition)
self.where_part.update_table(table=self.table_info)
return self
def group_by(self, *fields):
self.groupBy_part = GroupBy(fields)
self.groupBy_part.update_table(table=self.table_info)
return self
def sum(self, fields):
self.calculation_part.append(Calculation(fields, kind='sum'))
for part in self.calculation_part:
part.update_table(table=self.table_info)
return self
def min(self, fields):
self.calculation_part.append(Calculation(fields, kind='min'))
for part in self.calculation_part:
part.update_table(table=self.table_info)
return self
def max(self, fields):
self.calculation_part.append(Calculation(fields, kind='max'))
for part in self.calculation_part:
part.update_table(table=self.table_info)
return self
def avg(self, fields):
self.calculation_part.append(Calculation(fields, kind='avg'))
for part in self.calculation_part:
part.update_table(table=self.table_info)
return self
def variance(self, fields):
self.calculation_part.append(Calculation(fields, kind='variance'))
for part in self.calculation_part:
part.update_table(table=self.table_info)
return self
def stddev(self, fields):
self.calculation_part.append(Calculation(fields, kind='stddev'))
for part in self.calculation_part:
part.update_table(table=self.table_info)
return self
def count(self, fields):
self.calculation_part.append(Calculation(fields, kind='count'))
for part in self.calculation_part:
part.update_table(table=self.table_info)
return self
def lag(self, fields):
self.calculation_part.append(Calculation(fields, kind='lag'))
for part in self.calculation_part:
part.update_table(table=self.table_info)
return self
def order_by(self, *fields):
self.orderBy_part = OrderBy(fields)
self.orderBy_part.update_table(table=self.table_info)
return self
def export_sql(self, aliasState=None):
if not aliasState:
self.alias_state = Alias()
aliasState = self.alias_state
loop_order = {'from':self.from_part, 'join':self.join_part, 'on':self.on_part, 'groupBy':self.groupBy_part, 'select':self.select_part, 'calculation':self.calculation_part, 'where':self.where_part, 'orderBy': self.orderBy_part}
clause_order = {'select':'', 'from':'', 'join':'', 'on':'', 'where':'', 'groupBy':'', 'orderBy':''}
for clause, part in loop_order.items():
if part:
if clause=='calculation':
lst = [i.export_sql(aliasState) for i in part]
if self.select_part:
lst = [clause_order['select']] + lst
clause_order['select'] = ', '.join(lst)
else:
clause_order['select'] = 'SELECT ' + ', '.join(lst)
else:
clause_order[clause] = part.export_sql(aliasState)
part_list = list(v for v in clause_order.values() if v != '')
return ' '.join(part_list)
| ajcltm/Isql_v3 | Isql/querySql.py | querySql.py | py | 12,110 | python | en | code | 0 | github-code | 36 |
74540116904 | loadModule('/Capella/Capella')
import sys
def capella_query(query_class, e_obj, cls = None):
"""Call a query from the semantic browser from the qualified class name of the query and the EObect to pass as parameter"""
res = []
for e in callQuery(query_class, e_obj.get_java_object()):
e_object_class = getattr(sys.modules["__main__"], "EObject")
specific_cls = e_object_class.get_class(e)
if specific_cls is not None:
res.append(specific_cls(e))
elif cls is not None:
res.append(cls(e))
return res
def capella_query_by_name(e_obj, query_name, cls = None):
"""Call a query from the semantic browser from the query name and the EObect to pass as parameter"""
res = []
for e in getSBQuery(e_obj.get_java_object(), query_name):
e_object_class = getattr(sys.modules["__main__"], "EObject")
specific_cls = e_object_class.get_class(e)
if specific_cls is not None:
res.append(specific_cls(e))
elif cls is not None:
res.append(cls(e))
return res
def available_query_names(e_obj):
"""List all available query names for the given EObject"""
return getAvailableSBQueries(e_obj.get_java_object())
def capella_version():
return getCapellaVersion()
def get_label(e_obj):
return getLabel(e_obj.get_java_object())
def is_system(component):
return isSystem(component)
def get_libraries(system_engineering):
res = []
if system_engineering is not None:
lib_cls = getattr(sys.modules["__main__"], "CapellaLibrary")
for value in getLibraries(system_engineering.get_java_object()):
lib = lib_cls()
lib.open(value)
res.append(lib)
return res
| kaynl/ROxE | Python4Capella/java_api/Capella_API.py | Capella_API.py | py | 1,766 | python | en | code | 3 | github-code | 36 |
71335904423 | from os import *
from sys import *
from collections import *
from math import *
def findInMatrix(x, arr):
# Write your code here
## We know that each column and each row is sorted.
## Let us begin from the Right Top most point
row, column = 0, len(arr[0])-1
while row<len(arr) and column>=0:
extremity_value = arr[row][column]
if extremity_value == x:
return True
elif extremity_value > x:
column -=1
else:
row +=1
return False | architjee/solutions | CodingNinjas/Search in a 2D matrix II.py | Search in a 2D matrix II.py | py | 521 | python | en | code | 0 | github-code | 36 |
70489044264 | import pytest
from unittest.mock import AsyncMock, patch
from api.exceptions import InvalidParameterError
from crawler.default.instances.second_instance import SecondInstance
# Mock para a resposta do ClientSession
mock_response = AsyncMock()
mock_response.text.return_value = 'Sample Text'
@pytest.mark.asyncio
async def test_capturar_numero_processo_codigo_invalid():
instance = SecondInstance("TJ", "http://example.com")
with pytest.raises(InvalidParameterError):
await instance._capturar_numero_processo_codigo("123456")
@pytest.mark.asyncio
@patch('crawler.default.instances.second_instance.ClientSession')
async def test_consultar_processo(mock_session):
mock_session.return_value.__aenter__.return_value.get.return_value = mock_response
instance = SecondInstance("TJ", "http://example.com")
result = await instance._consultar_processo("789")
assert result == "Sample Text"
| BrunoPisaneschi/JusBrasil | tests/unit/crawler/default/instances/test_second_instance.py | test_second_instance.py | py | 921 | python | en | code | 0 | github-code | 36 |
74949364585 | import matplotlib.pyplot as plt
import numpy as np
import torch
from torch import nn
from RNN_torch.model import RNN
# Hyper parameters
BATCH_SIZE = 64
EPOCH = 1
TIME_STEP = 28 # 考虑多少个时间点的数据
INPUT_SIZE = 1 # 每个时间点给RNN多少个数据点
LR = 0.01
rnn = RNN(INPUT_SIZE)
print(rnn)
optimizer = torch.optim.Adam(rnn.parameters(), lr=LR) # optimize all cnn parameters
loss_func = nn.MSELoss()
h_state = None
plt.figure(1, figsize=(12, 5))
plt.ion()
for step in range(50):
start, end = step * np.pi, (step + 1) * np.pi
# use sin pre cos
steps = np.linspace(start, end, TIME_STEP, dtype=np.float32)
x_np = np.sin(steps)
y_np = np.cos(steps)
x = torch.from_numpy(x_np[np.newaxis, :, np.newaxis]) # shape(batch, time_step, input_size)
y = torch.from_numpy(y_np[np.newaxis, :, np.newaxis])
prediction, h_state = rnn(x, h_state)
h_state = h_state.data # !!! this step is important
loss = loss_func(prediction, y)
optimizer.zero_grad() # clear gradient for next train
loss.backward() # back propagation, compute gradient
optimizer.step()
# plot
plt.plot(steps, y_np.flatten(), 'r-')
plt.plot(steps, prediction.data.numpy().flatten(), 'b-')
plt.draw()
plt.pause(0.5)
plt.ioff()
plt.show()
| xjtulyc/PKU_Weekly_Summary_repo | 20220719/cs231n assignment/assignment_3.py | assignment_3.py | py | 1,352 | python | en | code | 2 | github-code | 36 |
34343311578 | '''Write a Python program to count the number of strings where
the string length is 2 or more and the first and last character
are samefrom a given list of strings.'''
def give_str(words):
selected_words = []
for word in words:
if len(word) > 2 and word[0] == word[-1]:
selected_words.append(word)
return selected_words
words = ['abc','xyz','aba','1221']
print(give_str(words))
print("No of selected_words: "+str(len(give_str(words))))
| ABDULSABOOR1995/Python-List-Exercises | List Exercises/string_manipulation.py | string_manipulation.py | py | 479 | python | en | code | 2 | github-code | 36 |
73037104745 | import collections.abc
import copy
import typing
import enpheeph.injections.plugins.indexing.abc.indexingpluginabc
import enpheeph.utils.constants
import enpheeph.utils.dataclasses
import enpheeph.utils.enums
import enpheeph.utils.typings
class IndexingPlugin(
enpheeph.injections.plugins.indexing.abc.indexingpluginabc.IndexingPluginABC
):
# it is Optional so that we can use None
active_dimension_index: typing.Optional[
typing.List[enpheeph.utils.typings.ActiveDimensionIndexType]
]
dimension_dict: enpheeph.utils.typings.DimensionDictType
def __init__(
self, dimension_dict: enpheeph.utils.typings.DimensionDictType
) -> None:
self.dimension_dict = dimension_dict
self.reset_active_dimensions()
# to select a set of dimensions to be used as active when selecting tensor indices
# by default no dimension is considered active
def select_active_dimensions(
self,
dimensions: collections.abc.Container[enpheeph.utils.enums.DimensionType],
# if True, we will move all the indices so that the first index is 0
# and the last is -1
autoshift_to_boundaries: bool = False,
# if True we fill the empty indices with the filler
# if False we will skip them
fill_empty_index: bool = True,
# the filler to use, defaults to : for a single dimension,
# which is slice(None, None)
filler: typing.Any = slice(None, None),
) -> typing.List[enpheeph.utils.typings.ActiveDimensionIndexType]:
# we invert the dimension dict to easily look it up
# as we will be using the indices to look it up instead of the names
inverted_dimension_dict = {v: k for k, v in self.dimension_dict.items()}
# we get the highest index for both the positive and the negative indices
# in terms of absolute value
# we filter the Ellipsis to avoid mypy errors
# **NOTE**: improve the typing here
no_ellipsis_dimension_dict_values: typing.List[int] = typing.cast(
typing.List[int,],
[x for x in self.dimension_dict.values() if x != Ellipsis],
)
longest_positive_range: int = max(
(x for x in no_ellipsis_dimension_dict_values if x >= 0),
# we use -1 default so that range(-1 + 1) = []
default=-1,
)
longest_negative_range: int = min(
(x for x in no_ellipsis_dimension_dict_values if x < 0),
# we use the number right outside the range to get an empty list
default=0,
)
# this list contains all the possible indices including Ellipsis
total_indices: typing.List[enpheeph.utils.typings.DimensionIndexType] = list(
# we cover all the indices to the maximum,
# including the maximum itself,
# hence the + 1
range(longest_positive_range + 1),
)
# we need to split the list creation otherwise mypy complains of different types
total_indices += [Ellipsis]
total_indices += list(
# we create the list going from the most negative index to 0
# 0 is excluded
range(
longest_negative_range,
0,
),
)
# we save the filling and the valid indices in the following list
dimension_index: typing.List[
enpheeph.utils.typings.ActiveDimensionIndexType,
] = []
for index in total_indices:
# the index is saved if it is present in the dimensions to be selected
# here we still don't consider the autoshift
if (
index in inverted_dimension_dict
and inverted_dimension_dict[index] in dimensions
):
dimension_index.append(inverted_dimension_dict[index])
# if the index is not included, we then check if we need to fill it
# due to fill_empty_index
elif fill_empty_index:
dimension_index.append(filler)
if autoshift_to_boundaries:
# we remove all the elements at the beginning/end of the list
# that are fillers
i = 0
# infinite loop, but there is a break
# **NOTE**: probably it can be optimized further
while 1:
# we start from 0, and for each filler we match we remove it
if dimension_index[i] == filler:
del dimension_index[i]
# if the element is not a filler than the start is done and we check the
# end using -1
elif i == 0:
i = -1
# if both the element is not a filler and the index is at the end, it
# means we are done
else:
break
# we copy the dimensions and we return them
self.active_dimension_index = copy.deepcopy(dimension_index)
return copy.deepcopy(self.active_dimension_index)
# to reset the active dimensions to the empty dimension dict
def reset_active_dimensions(self) -> None:
self.active_dimension_index = None
# to join indices following the order provided by the active_dimension dict
def join_indices(
self,
dimension_indices: enpheeph.utils.typings.DimensionLocationIndexType,
) -> enpheeph.utils.typings.AnyIndexType:
if self.active_dimension_index is None:
raise ValueError(
"First select the active dimensions with select_active_dimensions"
)
index: typing.List[enpheeph.utils.typings.Index1DType] = []
for i in self.active_dimension_index:
# if we have an enum as index we check it from the given dimensions
if isinstance(i, enpheeph.utils.enums.DimensionType):
# to check if we have a sequence of sequence we want each element
# to be a sequence and have no elements which are integers, as
# the other allowed values represent sequences
sequence_of_sequence = isinstance(
dimension_indices[i], collections.abc.Sequence
) and not any(
isinstance(j, int)
# we use typing.cast to avoid mypy complaining
for j in typing.cast(
typing.Sequence[typing.Any],
dimension_indices[i],
)
)
# if it is a sequence of sequences we extend the index with all the
# sub-sequences, as it will cover multiple dimensions
if sequence_of_sequence:
index.extend(
typing.cast(
typing.Tuple[enpheeph.utils.typings.Index1DType, ...],
dimension_indices[i],
),
)
# otherwise it covers only 1 dimension so we append the element directly
else:
index.append(
typing.cast(
enpheeph.utils.typings.Index1DType,
dimension_indices[i],
),
)
# if the element is not an enum it will be a filler,
# so we append it directly
else:
index.append(i)
return copy.deepcopy(tuple(index))
# to filter a size/shape array depending on the active dimension index
# by selecting only the dimensions with the enum
def filter_dimensions(
self,
# a normal size/shape array
dimensions: typing.Sequence[int],
) -> typing.Tuple[int, ...]:
if self.active_dimension_index is None:
raise ValueError(
"First select the active dimensions with select_active_dimensions"
)
enum_types = [
e
for e in self.active_dimension_index
if isinstance(e, enpheeph.utils.enums.DimensionType)
]
active_dimension_index: typing.List[
enpheeph.utils.typings.ActiveDimensionIndexType
] = copy.deepcopy(self.active_dimension_index)
for e in enum_types:
if self.dimension_dict[e] == Ellipsis:
while len(dimensions) > len(active_dimension_index):
active_dimension_index.insert(active_dimension_index.index(e), e)
# this is executed if the loop exits normally
else:
if len(dimensions) != len(active_dimension_index):
raise ValueError(
"dimensions must be the same length of active_dimension_index "
"if no Ellipsis are used"
)
return_dimensions = []
for d, ind in zip(dimensions, active_dimension_index):
if isinstance(ind, enpheeph.utils.enums.DimensionType):
return_dimensions.append(d)
return tuple(return_dimensions)
| Alexei95/enpheeph | src/enpheeph/injections/plugins/indexing/indexingplugin.py | indexingplugin.py | py | 9,122 | python | en | code | 1 | github-code | 36 |
19509126438 | import pandas as pd
import requests
from datetime import datetime
DISCORD_URL = "https://discord.com/api/v9/invites/UQZpTQbCT4?with_counts=true"
STARTED_AT = datetime.now()
request = requests.get(DISCORD_URL)
data = request.json()
new_dataframe = pd.json_normalize(data, max_level=2)
new_dataframe["_started_at"] = STARTED_AT.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
old_dataframe = pd.read_parquet("../data/discord.parquet")
current_dataframe = pd.concat([new_dataframe, old_dataframe])
current_dataframe.to_parquet("../data/discord.parquet", compression="gzip")
| ndrluis/soberana-data-poc | extract/scripts/discord.py | discord.py | py | 566 | python | en | code | 2 | github-code | 36 |
8086245877 | #!/usr/bin/env python
"""
The file contains the class and methods for loading and aligning datasets
"""
from __future__ import print_function, division
import pickle
import numpy as np
from scipy.io import loadmat
import pandas as pd
from .utils import p2fa_phonemes
import warnings
from collections import OrderedDict
from copy import deepcopy
import sys
__author__ = "Prateek Vij"
__copyright__ = "Copyright 2017, Carnegie Mellon University"
__credits__ = ["Amir Zadeh", "Prateek Vij", "Soujanya Poria"]
__license__ = "GPL"
__version__ = "1.0.1"
__status__ = "Production"
if sys.version_info <= (3,):
FileNotFoundError = IOError # python 2 doesn't have FileNotFoundError
class Dataset(object):
"""Primary class for loading and aligning dataset"""
def __init__(self, dataset_file='', stored=False):
"""
Initialise the Dataset class. Support two loading mechanism -
from dataset files and from the pickle file, decided by the param
stored.
:param stored: True if loading from pickle, false if loading from
dataset feature files. Default False
:param dataset_file: Filepath to the file required to load dataset
features. CSV or pickle file depending upon the
loading mechanism
:timestamps: absolute or relative.
"""
self.feature_dict = None
self.timestamps = 'absolute' # this is fixed, we no longer support relative timestamps
self.stored = stored
self.dataset_file = dataset_file
self.phoneme_dict = p2fa_phonemes
self.loaded = False
def __getitem__(self, key):
"""Adding direct access of internal data"""
return self.feature_dict[key]
def keys(self):
"""Wrapper for .keys() for the feature_dict"""
return self.feature_dict.keys()
def items(self):
"""Wrapper for .items() for the feature_dict"""
return self.feature_dict.items()
def load(self):
"""
Loads feature dictionary for the input dataset
:returns: Dictionary of features for the dataset with each modality
as dictionary key
"""
# Load from the pickle file if stored is True
if self.stored:
self.dataset_pickle = self.dataset_file
self.feature_dict = pickle.load(open(self.dataset_pickle))
return self.feature_dict
# Load the feature dictionary from the dataset files
self.dataset_csv = self.dataset_file
self.feature_dict = self.controller()
self.loaded = True
return self.feature_dict
def controller(self):
"""
Validates the dataset csv file and loads the features for the dataset
from its feature files
"""
def validate_file(self):
data = pd.read_csv(self.dataset_csv, header=None)
data = np.asarray(data)
#data = data[:,:7]
self.dataset_info = {}
modality_count = len(data[0]) - 4
self.modalities = {}
for i in range(modality_count):
# key = 'modality_' + str(i)
key = str(data[0][i + 4])
info = {}
info["level"] = str(data[1][i + 4])
info["type"] = str(data[0][i + 4])
self.modalities[key] = info
for record in data[2:]:
video_id = str(record[0])
segment_id = str(record[1])
if video_id not in self.dataset_info:
self.dataset_info[video_id] = {}
if segment_id in self.dataset_info[video_id]:
raise NameError("Multiple instances of segment "
+ segment_id + " for video " + video_id)
segment_data = {}
segment_data["start"] = float(record[2])
segment_data["end"] = float(record[3])
for i in range(modality_count):
# key = 'modality_' + str(i)
key = str(data[0][i + 4])
segment_data[key] = str(record[i + 4])
self.dataset_info[video_id][segment_id] = segment_data
return
def load_features(self):
feat_dict = {}
data = self.dataset_info
modalities = self.modalities
# timestamps = self.timestamps
for key, value in modalities.items():
api = value['type']
level = value['level']
loader_method = Dataset.__dict__["load_" + api]
modality_feats = {}
print("Loading features for", api)
for video_id, video_data in data.items():
video_feats = {}
for segment_id, segment_data in video_data.items():
filepath = str(segment_data[key])
start = segment_data["start"]
end = segment_data["end"]
video_feats[segment_id] = loader_method(self,
filepath, start,
end, timestamps=self.timestamps,
level=level)
modality_feats[video_id] = video_feats
modality_feats = OrderedDict(sorted(modality_feats.items(), key=lambda x: x[0]))
feat_dict[key] = modality_feats
return feat_dict
validate_file(self)
feat_dict = load_features(self)
return feat_dict
def load_opensmile(self, filepath, start, end, timestamps='absolute', level='s'):
"""
Load OpenSmile Features from the file corresponding to the param
filepath
:param start: Start time of the segment
:param end: End time of the segment
:param filepath: Path to the opensmile feature files
:param level: 's' if the file contains features only for the segment,
i.e. interval (start, end), 'v' if for the entire video
:param timestamps: relative or absolute
:returns: List of tuples (feat_start, feat_end, feat_value)
corresponding to the features in the interval.
Note: Opensmile support features for entire segment or video only and
will return None if level is 'v' and start time is
"""
features = []
start_time, end_time = start, end
if timestamps == 'relative':
start_time = 0.0
end_time = end - start
if level == 's' or start == 0.0:
feats = open(filepath).readlines()[-1].strip().split(',')[1:]
feats = [float(feat_val) for feat_val in feats]
feat_val = np.asarray(feats, dtype=np.float32)
features.append((start_time, end_time, feat_val))
else:
print("Opensmile support features for the entire segment")
return None
return features
def load_covarep(self, filepath, start, end, timestamps='absolute', level='s'):
"""
Load COVAREP Features from the file corresponding to the param
filepath
:param start: Start time of the segment
:param end: End time of the segment
:param filepath: Path to the opensmile feature files
:param level: 's' if the file contains features only for the segment,
i.e. interval (start, end), 'v' if for the entire video
:param timestamps: relative or absolute
:returns: List of tuples (feat_start, feat_end, feat_value)
corresponding to the features in the interval.
"""
features = []
time_period = 0.01
try:
f_content = loadmat(filepath)
except (FileNotFoundError, TypeError, ValueError) as e: # depending on loadmat("XXX") or loadmat("XXX.mat"), error will be different
print(".mat file cannot load at {}!".format(filepath))
return [] # if no feature file present, return an empty list
feats = f_content['features']
start_time, end_time = start, end
if timestamps == "relative":
start_time, end_time = 0.0, end - start
if level == 's':
feat_start = start_time
for feat in feats:
feat_end = feat_start + time_period
feat_val = np.asarray(feat)
features.append((max(feat_start - start_time, 0), max(feat_end - start_time, 0), feat_val))
feat_start += time_period
else:
feat_count = feats.shape[0]
start_index = int(min((start / time_period), feat_count))
end_index = int(min((end / time_period), feat_count))
feat_start = start_time
for feat in feats[start_index:end_index]:
feat_end = feat_start + time_period
feat_val = np.asarray(feat)
features.append((max(feat_start - start_time, 0), max(feat_end - start_time, 0), feat_val))
feat_start += time_period
return features
def load_phonemes(self, filepath, start, end, timestamps='relative', level='s'):
"""
Load P2FA phonemes as Features from the file corresponding to the
param filepath
:param start: Start time of the segment
:param end: End time of the segment
:param filepath: Path to the opensmile feature files
:param level: 's' if the file contains features only for the segment,
i.e. interval (start, end), 'v' if for the entire video
:param timestamps: relative or absolute
:returns: List of tuples (feat_start, feat_end, feat_value)
corresponding to the features in the interval.
"""
features = []
start_time, end_time = start, end
if timestamps == "relative":
start_time, end_time = 0.0, end - start
if level == 's':
with open(filepath, 'r') as f_handle:
for line in f_handle.readlines():
line = line.strip()
if not line:
break
feat_start = float(line.split(",")[0])
feat_end = float(line.split(",")[1])
feat_val = [float(val) for val in line.split(",")[2:]]
feat_val = np.asarray(feat_val)
features.append((max(feat_start, 0), max(feat_end, 0), feat_val))
else:
with open(filepath, 'r') as f_handle:
for line in f_handle.readlines():
line = line.strip()
if not line:
break
feat_start = float(line.split(",")[0])
feat_end = float(line.split(",")[1])
feat_time = feat_end - feat_start
if ((feat_start <= start and feat_end > end)
or (feat_start >= start and feat_end < end)
or (feat_start <= start
and start - feat_start < feat_time / 2)
or (feat_start >= start
and end - feat_start > feat_time / 2)):
feat_start = feat_start - start
feat_end = feat_end - start
feat_val = [float(val) for val in line.split(",")[2:]]
feat_val = np.asarray(feat_val)
features.append((max(feat_start, 0), max(feat_end, 0), feat_val))
return features
def load_embeddings(self, filepath, start, end, timestamps='relative', level='s'):
"""
Load Word Embeddings from the file corresponding to the param
filepath
:param start: Start time of the segment
:param end: End time of the segment
:param filepath: Path to the opensmile feature files
:param level: 's' if the file contains features only for the segment,
i.e. interval (start, end), 'v' if for the entire video
:param timestamps: relative or absolute
:returns: List of tuples (feat_start, feat_end, feat_value)
corresponding to the features in the interval.
"""
features = []
start_time, end_time = start, end
if timestamps == "relative":
start_time, end_time = 0.0, end - start
if level == 's':
with open(filepath, 'r') as f_handle:
for line in f_handle.readlines():
line = line.strip()
if not line:
break
feat_start = float(line.split(",")[0])
feat_end = float(line.split(",")[1])
feat_val = [float(val) for val in line.split(",")[2:]]
feat_val = np.asarray(feat_val)
features.append((max(feat_start, 0), max(feat_end, 0), feat_val))
else:
with open(filepath, 'r') as f_handle:
for line in f_handle.readlines():
line = line.strip()
if not line:
break
feat_start = float(line.split(",")[0])
feat_end = float(line.split(",")[1])
feat_time = feat_end - feat_start
if ((feat_start <= start and feat_end > end)
or (feat_start >= start and feat_end < end)
or (feat_start <= start
and start - feat_start < feat_time / 2)
or (feat_start >= start
and end - feat_start > feat_time / 2)):
feat_start = feat_start - start
feat_end = feat_end - start
feat_val = [float(val) for val in line.split(",")[2:]]
feat_val = np.asarray(feat_val)
features.append((max(feat_start, 0), max(feat_end, 0), feat_val))
return features
def load_words(self, filepath, start, end, timestamps='relative', level='s'):
"""
Load one hot embeddings for words as features from the file
corresponding to the param filepath
:param start: Start time of the segment
:param end: End time of the segment
:param filepath: Path to the opensmile feature files
:param level: 's' if the file contains features only for the segment,
i.e. interval (start, end), 'v' if for the entire video
:param timestamps: relative or absolute
:returns: List of tuples (feat_start, feat_end, feat_value)
corresponding to the features in the interval.
"""
features = []
start_time, end_time = start, end
if timestamps == "relative":
start_time, end_time = 0.0, end - start
if level == 's':
with open(filepath, 'r') as f_handle:
for line in f_handle.readlines():
line = line.strip()
if not line:
break
feat_start = float(line.split(",")[0])
feat_end = float(line.split(",")[1])
try:
feat_val = int(line.split(",")[2])
except:
print(filepath, start, end)
# feat_val = [float(val) for val in line.split(",")[2:]]
# assert len(feat_val) == 1
# feat_val = np.asarray(feat_val)[0]
#print(feat_start, feat_end)
#assert False
features.append((max(feat_start, 0), max(feat_end, 0), feat_val))
else:
with open(filepath, 'r') as f_handle:
for line in f_handle.readlines():
line = line.strip()
if not line:
break
feat_start = float(line.split(",")[0])
feat_end = float(line.split(",")[1])
feat_time = feat_end - feat_start
if ((feat_start <= start and feat_end > end)
or (feat_start >= start and feat_end < end)
or (feat_start <= start
and start - feat_start < feat_time / 2)
or (feat_start >= start
and end - feat_start > feat_time / 2)):
feat_start = feat_start - start
feat_end = feat_end - start
feat_val = [float(val) for val in line.split(",")[2:]]
feat_val = np.asarray(feat_val)
features.append((max(feat_start, 0), max(feat_end, 0), feat_val))
return features
def load_openface(self, filepath, start, end, timestamps='absolute', level='s'):
"""
Load OpenFace features from the file corresponding to the param
filepath
:param start: Start time of the segment
:param end: End time of the segment
:param filepath: Path to the opensmile feature files
:param level: 's' if the file contains features only for the segment,
i.e. interval (start, end), 'v' if for the entire video
:param timestamps: relative or absolute
:returns: List of tuples (feat_start, feat_end, feat_value)
corresponding to the features in the interval.
"""
features = []
time_period = 0.0333333
start_time, end_time = start, end
if timestamps == "relative":
start_time, end_time = 0.0, end - start
if level == 's':
with open(filepath, 'r') as f_handle:
for line in f_handle.readlines()[1:]:
line = line.strip()
if not line:
break
feat_start = float(line.split(",")[1])
feat_end = feat_start + time_period
feat_val = [float(val) for val in line.split(",")[2:]]
feat_val = np.asarray(feat_val, dtype=np.float32)
features.append((max(feat_start, 0), max(feat_end, 0), feat_val))
else:
with open(filepath, 'r') as f_handle:
for line in f_handle.readlines()[1:]:
line = line.strip()
if not line:
break
feat_start = float(line.split(",")[1])
if (feat_start >= start and feat_start < end):
# To adjust the timestamps
feat_start = feat_start - start
feat_end = feat_start + time_period
feat_val = [float(val) for val in line.split(",")[2:]]
feat_val = np.asarray(feat_val, dtype=np.float32)
features.append((max(feat_start, 0), max(feat_end, 0), feat_val))
return features
# note that this is implicity new facet
def load_facet(self, filepath, start, end, timestamps='absolute', level='v'):
"""
Load FACET features from the file corresponding to the param
filepath
:param start: Start time of the segment
:param end: End time of the segment
:param filepath: Path to the opensmile feature files
:param level: 's' if the file contains features only for the segment,
i.e. interval (start, end), 'v' if for the entire video
:param timestamps: relative or absolute
:returns: List of tuples (feat_start, feat_end, feat_value)
corresponding to the features in the interval.
"""
features = []
# load a subset of current segment and infer its format
start_row = 0
start_col = 0
with open(filepath, 'r') as f_handle:
splitted = []
for line in f_handle.readlines()[0:10]:
splitted.append(line.split(","))
# check if the first row is a header by checking if the first field is a number
try:
float(splitted[start_row][start_col])
except:
start_row = 1
# check if the first column is a index column by checking if it increments by 1 everytime
for i in range(1, len(splitted) - 1):
if (float(splitted[i+1][0]) - float(splitted[i][0])) != 1:
start_col = 0
break
start_col = 1
time_period = float(splitted[start_row][start_col])
start_time, end_time = start, end
# if timestamps == "relative":
# start_time, end_time = 0.0, end - start
if level == 's':
with open(filepath, 'r') as f_handle:
for line in f_handle.readlines()[start_row:]:
line = line.strip()
if not line:
break
feat_start = float(line.split(",")[start_col])
feat_end = feat_start + time_period
feat_val = []
for val in line.split(",")[start_col + 1:-1]:
try:
feat_val.append(float(val))
except:
feat_val.append(0.0)
feat_val = np.asarray(feat_val, dtype=np.float32)
features.append((max(feat_start, 0), max(feat_end, 0), feat_val))
else:
with open(filepath, 'r') as f_handle:
for line in f_handle.readlines()[start_row:]:
line = line.strip()
if not line:
break
feat_start = float(line.split(",")[start_col])
if (feat_start >= start and feat_start < end):
# To adjust the timestamps
feat_start = feat_start - start
feat_end = feat_start + time_period
feat_val = []
for val in line.split(",")[start_col + 1:-1]:
try:
feat_val.append(float(val))
except:
feat_val.append(0.0)
feat_val = np.asarray(feat_val, dtype=np.float32)
features.append((max(feat_start, 0), max(feat_end, 0), feat_val))
return features
def load_facet1(self, filepath, start, end, timestamps='absolute', level='v'):
"""
Load FACET features from the file corresponding to the param
filepath
:param start: Start time of the segment
:param end: End time of the segment
:param filepath: Path to the opensmile feature files
:param level: 's' if the file contains features only for the segment,
i.e. interval (start, end), 'v' if for the entire video
:param timestamps: relative or absolute
:returns: List of tuples (feat_start, feat_end, feat_value)
corresponding to the features in the interval.
"""
return self.load_facet(filepath, start, end, timestamps=timestamps, level=level)
def load_facet2(self, filepath, start, end, timestamps='absolute', level='v'):
"""
Load FACET features from the file corresponding to the param
filepath
:param start: Start time of the segment
:param end: End time of the segment
:param filepath: Path to the opensmile feature files
:param level: 's' if the file contains features only for the segment,
i.e. interval (start, end), 'v' if for the entire video
:param timestamps: relative or absolute
:returns: List of tuples (feat_start, feat_end, feat_value)
corresponding to the features in the interval.
"""
return self.load_facet(filepath, start, end, timestamps=timestamps, level=level)
def align(self, align_modality):
aligned_feat_dict = {}
modalities = self.modalities
alignments = self.get_alignments(align_modality)
for modality in modalities:
if modality == align_modality:
aligned_feat_dict[modality] = OrderedDict(sorted(self.feature_dict[modality].items(), key=lambda x: x[0]))
continue
aligned_modality = self.align_modality(modality, alignments)
aligned_feat_dict[modality] = OrderedDict(sorted(aligned_modality.items(), key=lambda x: x[0]))
self.aligned_feature_dict = aligned_feat_dict
return aligned_feat_dict
def get_alignments(self, modality):
alignments = {}
aligned_feat_dict = self.feature_dict[modality]
for video_id, segments in aligned_feat_dict.items():
segment_alignments = {}
for segment_id, features in segments.items():
segment_alignments[segment_id] = []
for value in features:
timing = (value[0], value[1])
segment_alignments[segment_id].append(timing)
alignments[video_id] = segment_alignments
return alignments
def align_modality(self, modality, alignments, merge_type="mean"):
aligned_feat_dict = {}
modality_feat_dict = self.feature_dict[modality]
warning_hist = set() # Keep track of all the warnings
for video_id, segments in alignments.items():
aligned_video_feats = {}
for segment_id, feat_intervals in segments.items():
aligned_segment_feat = []
for start_interval, end_interval in feat_intervals:
time_interval = end_interval - start_interval
feats = modality_feat_dict[video_id][segment_id]
try:
if modality == "words":
aligned_feat = feats[0][2] - feats[0][2]
else:
aligned_feat = np.zeros(len(feats[0][2]))
except:
if (video_id, segment_id) not in warning_hist:
print("\nModality {} for video {} segment {} is (partially) missing and is thus being replaced by zeros!\n".format(modality.split("_")[-1], video_id, segment_id))
warning_hist.add((video_id, segment_id))
# print(modality, video_id, segment_id, feats)
for sid, seg_data in modality_feat_dict[video_id].items():
if seg_data != []:
feats = seg_data
break
try:
if modality == "words":
aligned_feat = feats[0][2] - feats[0][2]
else:
aligned_feat = np.zeros(len(feats[0][2]))
except:
if modality == "words":
aligned_feat = 0
else:
aligned_feat = np.zeros(0)
for feat_tuple in feats:
feat_start = feat_tuple[0]
feat_end = feat_tuple[1]
feat_val = feat_tuple[2]
if (feat_start < end_interval
and feat_end >= start_interval):
feat_weight = (min(end_interval, feat_end) -
max(start_interval, feat_start)) / time_interval
weighted_feat = np.multiply(feat_val, feat_weight)
if np.shape(aligned_feat) == (0,):
aligned_feat = weighted_feat
else:
aligned_feat = np.add(aligned_feat, weighted_feat)
aligned_feat_tuple = (start_interval, end_interval,
aligned_feat)
aligned_segment_feat.append(aligned_feat_tuple)
aligned_video_feats[segment_id] = aligned_segment_feat
aligned_feat_dict[video_id] = aligned_video_feats
return aligned_feat_dict
@staticmethod
def merge(dataset1, dataset2):
# ensure the merged objects are indeed Datasets
assert isinstance(dataset1, Dataset)
assert isinstance(dataset2, Dataset)
# merge the feature_dict and modalities attributes
merged_modalities = Dataset.merge_dict(dataset1.modalities, dataset2.modalities)
merged_feat_dict = Dataset.merge_dict(dataset1.feature_dict, dataset2.feature_dict)
mergedDataset = Dataset()
mergedDataset.feature_dict = merged_feat_dict
mergedDataset.modalities = merged_modalities
return mergedDataset
@staticmethod
def merge_dict(dict1, dict2):
merged = deepcopy(dict1)
merged.update(dict2)
return merged
| codeislife99/Multimodal_Emotion_Analysis | mmdata/dataset.py | dataset.py | py | 29,605 | python | en | code | 1 | github-code | 36 |
6864404682 | import math, random
import gym
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.autograd as autograd
import torch.nn.functional as F
import matplotlib.pyplot as plt
USE_CUDA = torch.cuda.is_available()
Variable = lambda *args, **kwargs: autograd.Variable(*args, **kwargs).cuda() if USE_CUDA else autograd.Variable(*args, **kwargs)
from collections import deque
env_id = "CartPole-v0"
env = gym.make(env_id)
# env = env.unwrapped
path = "/Users/saumya/Desktop/CriticalStates_results/"
results_dir = "vanillaDQN"
'''
Double DQN code adapted and modified from https://github.com/higgsfield/RL-Adventure/blob/master/2.double%20dqn.ipynb
'''
class ReplayBuffer(object):
def __init__(self, capacity):
self.buffer = deque(maxlen=capacity)
def push(self, state, action, reward, next_state, done):
state = np.expand_dims(state, 0)
next_state = np.expand_dims(next_state, 0)
self.buffer.append((state, action, reward, next_state, done))
def sample(self, batch_size):
state, action, reward, next_state, done = zip(*random.sample(self.buffer, batch_size))
return np.concatenate(state), action, reward, np.concatenate(next_state), done
def __len__(self):
return len(self.buffer)
class DQN(nn.Module):
def __init__(self, num_inputs, num_actions):
super(DQN, self).__init__()
self.layers = nn.Sequential(
# nn.Linear(env.observation_space.shape[0], 128),
# nn.ReLU(),
# nn.Linear(128, 128),
# nn.ReLU(),
# nn.Linear(128, env.action_space.n)
# Function approximator for Q function - modified to less hidden neurons
nn.Linear(env.observation_space.shape[0], 32),
nn.ReLU(),
nn.Linear(32, 32),
nn.ReLU(),
nn.Linear(32, env.action_space.n)
)
def forward(self, x):
return self.layers(x)
def act(self, state, epsilon):
"""
choose action using epsilon-greedy strategy
"""
if random.random() > epsilon:
state = Variable(torch.FloatTensor(state).unsqueeze(0), volatile=True)
q_value = self.forward(state)
action = q_value.max(1)[1].item()
else:
action = random.randrange(env.action_space.n)
return action
def update_target(current_model, target_model):
target_model.load_state_dict(current_model.state_dict())
def compute_td_loss(batch_size):
"""
Compute the TD loss after sampling transitions(of size - "batch_size") from the replay buffer
"""
state, action, reward, next_state, done = replay_buffer.sample(batch_size)
state = Variable(torch.FloatTensor(np.float32(state)))
next_state = Variable(torch.FloatTensor(np.float32(next_state)))
action = Variable(torch.LongTensor(action))
reward = Variable(torch.FloatTensor(reward))
done = Variable(torch.FloatTensor(done))
q_values = current_model(state)
next_q_values = current_model(next_state)
next_q_state_values = target_model(next_state)
q_value = q_values.gather(1, action.unsqueeze(1)).squeeze(1)
next_q_value = next_q_state_values.gather(1, torch.max(next_q_values, 1)[1].unsqueeze(1)).squeeze(1)
expected_q_value = reward + gamma * next_q_value * (1 - done)
loss = (q_value - Variable(expected_q_value.data)).pow(2).mean()
optimizer.zero_grad()
loss.backward()
optimizer.step()
return loss
def plot(frame_idx, rewards, losses, iter):
# clear_output(True)
plt.figure(figsize=(20,5))
plt.subplot(131)
# plt.title('frame %s. reward: %s' % (frame_idx, np.mean(rewards[-10:])))
plt.title('frame %s' % (frame_idx))
plt.plot(rewards)
plt.subplot(132)
plt.title('loss')
plt.plot(losses)
plt.savefig(path+results_dir+"/cartpole_dqn_plots_iter_"+str(iter))
def load_model(model_path):
current_model = DQN(env.observation_space.shape[0], env.action_space.n)
current_model.load_state_dict(torch.load(model_path,map_location=torch.device('cpu')))
return current_model
def play(model_path):
"""
Play or rollout the learnt policy and observe the mean reward obtained over 1000 episodes
"""
current_model = load_model(model_path)
avg_test_reward = []
for t in range(1000):
# print('play: ',t)
state = env.reset()
done = False
reward_per_episode = 0
while not done:
action = current_model.act(state, 0)
next_state, reward, done, info = env.step(action)
# env.render()
reward_per_episode+=reward
if done:
# print('rewards: ',reward_per_episode)
avg_test_reward.append(reward_per_episode)
break
else:
state = next_state
env.close()
print(np.mean(avg_test_reward))
if __name__ == "__main__":
## Hyperparameters
epsilon_start = 1.0
epsilon_final = 0.01
epsilon_decay = 500
num_frames = 400000 # increased num of timesteps from 160000
batch_size = 64
gamma = 0.99
update_target_net = 100
learning_rate = 1e-4 # reduced learning rate from 1e-3
epsilon_by_frame = lambda frame_idx: epsilon_final + (epsilon_start - epsilon_final) * math.exp(
-1. * frame_idx / epsilon_decay)
## Running for 5 iteration to obtain a mean and std of the reward plots
for iter in range(5):
print("iteration: ",iter)
current_model = DQN(env.observation_space.shape[0], env.action_space.n)
target_model = DQN(env.observation_space.shape[0], env.action_space.n)
if USE_CUDA:
current_model = current_model.cuda()
target_model = target_model.cuda()
optimizer = optim.Adam(current_model.parameters(), lr = learning_rate)
replay_buffer = ReplayBuffer(100000) # increased buffer size from 1000
update_target(current_model, target_model)
losses = []
all_rewards = []
episode_reward = 0
ep_num = 0
## If the environment is solved is_win is set true
is_win = False
state = env.reset()
for frame_idx in range(1, num_frames + 1):
epsilon = epsilon_by_frame(frame_idx)
action = current_model.act(state, epsilon)
next_state, reward, done, _ = env.step(action)
replay_buffer.push(state, action, reward, next_state, done)
state = next_state
episode_reward += reward
if done:
state = env.reset()
all_rewards.append(episode_reward)
episode_reward = 0
ep_num+=1
avg_reward = float(np.mean(all_rewards[-100:]))
print('Best 100-episodes average reward', ep_num, avg_reward)
## Using the following "solving" criteria
if len(all_rewards) >= 100 and avg_reward >= 198 and all_rewards[-1] > 198:
if not is_win:
is_win = True
torch.save(current_model.state_dict(), path+results_dir+'/CartPole_dqn_model_iter_'+str(iter))
print('Ran %d episodes best 100-episodes average reward is %3f. Solved after %d trials ✔' % (
ep_num, avg_reward, ep_num - 100))
last_saved = ep_num
torch.save(current_model.state_dict(),
path+results_dir+'/Final_CartPole_dqn_model_iter_' + str(
iter))
## Update the loss
if len(replay_buffer) > batch_size:
loss = compute_td_loss(batch_size)
losses.append(loss.item())
if frame_idx % 200 == 0:
plot(frame_idx, all_rewards, losses, iter)
## Update the target network
if frame_idx % update_target_net == 0:
update_target(current_model, target_model)
## Save the reward list - rewards obtained per episode
np.save(path+results_dir+"/rewards_iter_"+str(iter),all_rewards)
if not is_win:
print('Did not solve after %d episodes' % ep_num)
torch.save(current_model.state_dict(), path+results_dir+'/CartPole_dqn_model_iter_'+str(iter))
# play(path+results_dir+'/CartPole_dqn_model_iter_'+str(iter))
# play(path+results_dir+'/Final_CartPole_dqn_model_iter_' + str(iter))
# Iteration: 0
# 199.969
# 200.0
# iteration: 1
# 200.0
# 195.842
# iteration: 2
# 200.0
# 182.442
# iteration: 3
# 200.0
# 200.0
# iteration: 4
# 197.461
# 199.972
| saumyasinha/learning_better_policies_with_critical_states | Qlearning/dqn_for_CartPole.py | dqn_for_CartPole.py | py | 8,842 | python | en | code | 0 | github-code | 36 |
34338165702 | # https://leetcode.com/problems/construct-binary-tree-from-preorder-and-inorder-traversal/
from typing import List
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def buildTree(self, preorder: List[int], inorder: List[int]) -> TreeNode:
if not preorder:
return None
if len(preorder) == 1:
return TreeNode(preorder[0], None, None)
root = TreeNode(preorder[0])
leftLen = inorder.index(preorder[0])
root.left = self.buildTree(preorder[1:1+leftLen], inorder[:leftLen])
root.right = self.buildTree(preorder[leftLen+1:], inorder[leftLen+1:])
return root
| 0x0400/LeetCode | p105.py | p105.py | py | 788 | python | en | code | 0 | github-code | 36 |
70955291624 | """add admin flag to user
Revision ID: dd535b1f37a1
Revises: 4519159d3019
Create Date: 2019-01-06 13:39:21.042745
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'dd535b1f37a1'
down_revision = '4519159d3019'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('users', schema=None) as batch_op:
batch_op.add_column(sa.Column('is_admin', sa.Boolean(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('users', schema=None) as batch_op:
batch_op.drop_column('is_admin')
# ### end Alembic commands ###
| euphwes/cubers.io | migrations/versions/014_dd535b1f37a1_add_admin_flag_to_user.py | 014_dd535b1f37a1_add_admin_flag_to_user.py | py | 797 | python | en | code | 27 | github-code | 36 |
4084609951 | import customtkinter as ctk
class ConfirmDeleteOldestBackupDialog(ctk.CTkToplevel):
def __init__(self, parent, controller, *args, **kwargs):
super().__init__(parent, *args, **kwargs)
# Configure variables
self.controller = controller
self.label_text = "You are only allowed 10 backup files. If you save\nthis backup the oldest backup file will be deleted.\n\nAre you sure you want to continue with the backup?"
# Configure window
self.geometry("400x180")
self.title = f"Confirm delete last backup."
# Configure grid layout
self.grid_rowconfigure(0, weight=1)
self.grid_columnconfigure((0, 1), weight=1)
# Create label
self.label = ctk.CTkLabel(self, text=self.label_text)
self.label.grid(row=0, column=0, columnspan=2, padx=20, pady=20,
sticky="nsew")
# Create button YES
self.yes_button = ctk.CTkButton(self, text="Yes", command=lambda: self.controller.save_backup_dialog_event(input=True, dialog=self))
self.yes_button.grid(row=1, column=0, padx=20, pady=20, sticky="nsew")
# Create button NO
self.no_button = ctk.CTkButton(self, text="Cancel", command=lambda: self.controller.save_backup_dialog_event(input=False, dialog=self))
self.no_button.grid(row=1, column=1, padx=20, pady=20, sticky="nsew")
| berndklare/flashcards | dialogs/confirm_delete_oldest_backup_dialog.py | confirm_delete_oldest_backup_dialog.py | py | 1,389 | python | en | code | 0 | github-code | 36 |
26425057259 | #coding: latin-1
#
# Exemple 5.5 dimensionnemnt approche suédoise
#
from geothermal_md import *
import numpy as np
from matplotlib.pyplot import *
from time import *
# fichier de fonction g (Eskilson) tabuléees pour champ 2 x 2 pour b = 0.05,0.1,0.2,0.4,0.8
zo = 0
rb = 0.25/2
b = 0.05
H = 4
z = 1.8
Ht = H/rb
bt = b/rb
t = np.arange(0,1500,50)
Cp = 800
k = 0.26
rho = 1397
alp = k/(rho*Cp)
alm = alp*60
Fov = alm*t/rb**2
r = 0.25
rt = r/rb
zot = zo/rb
zt = z/rb
phi = 0
q = 400
qp = q/H
nt = len(Fov)
g1 = np.zeros(nt)
g2 = np.zeros(nt)
g3 = np.zeros(nt)
g4 = np.zeros(nt)
g5 = np.zeros(nt)
tic = time()
for i in range(0,nt):
Fo = Fov[i]
g1[i] = qp*G_function_fsc(Fo,rt,zt,Ht,zot)/k
toc = time() - tic
print ('fin du calcul de la fonction g',toc )
tic = time()
for i in range(0,nt):
Fo = Fov[i]
g2[i] = qp*G_function_ring(Fo,rt,zt,Ht,zot,bt)/k
toc = time() - tic
print ('fin du calcul de la fonction g',toc )
tic = time()
for i in range(0,nt):
Fo = Fov[i]
g3[i] = qp*G_function_spiral(Fo,rt,zt,phi,Ht,zot,bt)/k
toc = time() - tic
print ('fin du calcul de la fonction g',toc )
tic = time()
for i in range(0,nt):
Fo = Fov[i]
g4[i] = qp*G_function_ils(Fo,rt)/k
toc = time() - tic
print ('fin du calcul de la fonction g',toc )
tic = time()
for i in range(0,nt):
Fo = Fov[i]
g5[i] = qp*G_function_ics(Fo,rt)/k
toc = time() - tic
print ('fin du calcul de la fonction g',toc )
x = np.log(Fov)
#x = t
p1 = plot(x,g1,color = 'k',linewidth = 2,label = 'Man''s solid cylinder')
p2 = plot(x,g2,color = 'k',linestyle = 'None',marker = 'o',markersize=8,label = 'Cui ring''s model')
p3 = plot(x,g3,color = 'k',linestyle = 'None',marker = 'x',markersize=8,label = 'Park''s spiral model')
p4 = plot(x,g4,color = 'k',linestyle = 'None',marker = '+',markersize=8,label = 'ILS')
p5 = plot(x,g5,color = 'k',linestyle = 'None',marker = 's',markersize=8,label = 'ICS')
legend(fontsize = 14)
ax = gca()
grid(True,which='both')
fts = 16
ftst = 14
xlabel(' log(Fo)',fontname='Times new Roman',fontsize = fts)
ylabel(' $ \Delta T$',fontsize = fts,fontname='Times new Roman')
xticks(fontsize=ftst)
yticks(fontsize=ftst)
tight_layout()
show()
| LouisLamarche/Fundamentals-of-Geothermal-Heat-Pump-Systems | chapter13/Example13_5.py | Example13_5.py | py | 2,165 | python | en | code | 1 | github-code | 36 |
5987566968 | from django.urls import path
from .views import ListingsView, ListingView, SearchView
# Declare the URL for the listings app here.
urlpatterns = [
path('', ListingsView.as_view(),name="ListALL"),
path('search', SearchView.as_view()),
path('<slug>', ListingView.as_view()), # Used for lising a particular view, not by PK(id) but by Slug field.
] | testusername190/Realest_Estate_Backend | backend/listings/urls.py | urls.py | py | 374 | python | en | code | 0 | github-code | 36 |
4863814184 | import numpy as np
import pandas as pd
import itertools
from sklearn import metrics
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import cross_val_score
# models that are being considered
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import StackingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
"""
def grid_search_cross_validation(x_train, y_train, grid, model):
gridCV = GridSearchCV(model, grid, cv=10)
gridCV.fit(x_train, y_train.T.squeeze())
return gridCV.best_params_
def get_svc_best_params(x_train, y_train):
kernel = ['poly', 'sigmoid']
degree = [3, 4, 5]
tol = [ 10**(-3)]
grid = {
'kernel' : kernel,
'degree' : degree,
}
res = grid_search_cross_validation(x_train, y_train, grid, SVC())
print(res)
"""
def print_accuracy_scores(performance_data):
print('Accuracy scores:')
for i, data in enumerate(performance_data):
model_name = data[0]
pred = data[1]
test = data[2]
acc = metrics.accuracy_score(y_true=pred, y_pred=test, normalize=True)
print(model_name + ' accuracy: ', acc)
def print_f1_score(performance_data):
print('f1 scores:')
for i, data in enumerate(performance_data):
model_name = data[0]
pred = data[1]
test = data[2]
acc = metrics.f1_score(y_true=pred, y_pred=test, average='macro')
print(model_name + ' f1 score: ', acc)
def cross_validation_acc_score(x, y, clf):
skfold = StratifiedKFold(n_splits=10).split(x, y)
score = cross_val_score(clf, x, y, cv=skfold)
print('Accuracy {}%'.format(score.mean()*100))
"""
def find_model():
label='Vote'
x_train = pd.read_csv("x_train.csv", header=0)
y_train = pd.read_csv("y_train.csv", squeeze=True, header=None)
x_valid = pd.read_csv("x_valid.csv", header=0)
y_valid = pd.read_csv("y_valid.csv", squeeze=True, header=None)
x_test = pd.read_csv("x_test.csv", header=0)
y_test = pd.read_csv("y_test.csv", squeeze=True, header=None)
#get_random_forest_best_params(x_train, y_train)
x = x_train
y = y_train
# Best parameters for Random Tree Forest: {'criterion': 'gini', 'max_depth': 30, 'max_features': 'auto', 'min_samples_split': 2, 'n_estimators': 50}
rand_forest_clf = RandomForestClassifier(criterion='gini', max_depth=50, min_samples_split=5, n_estimators=50)
cross_validation_acc_score(x, y, rand_forest_clf)
rand_forest_clf.fit(x, y)
prediction_rand_forest = rand_forest_clf.predict(x_valid)
# Best parameters for SVC {'degree': 4, 'kernel': 'poly'}
svm_poly_clf = SVC(kernel='poly', degree=4, probability=True)
svm_poly_clf.fit(x, y)
prediction_svm_poly = svm_poly_clf.predict(x_valid)
# Multi-layer perceptron classifier
perceptron_clf = MLPClassifier(activation="relu", alpha=0.1, hidden_layer_sizes=(10, 10, 10),
learning_rate="constant", max_iter=2000)
perceptron_clf.fit(x, y)
prediction_perceptron = perceptron_clf.predict(x_valid)
estimators = [
('Random Forest', RandomForestClassifier(criterion='gini', max_depth=50, min_samples_split=5, n_estimators=50)),
('SVC', SVC(kernel='poly', degree=4, probability=True)),
('Percepton', MLPClassifier(activation="relu", alpha=0.1, hidden_layer_sizes=(10, 10, 10),
learning_rate="constant", max_iter=2000))
]
blend_clf = StackingClassifier(estimators)
blend_clf.fit(x, y)
prediction_blend = blend_clf.predict(x_valid)
# evaluate and plot confusion matrices
performance_data = [('Random Forest', prediction_rand_forest, y_valid),
('SVM Polinomial Kernel', prediction_svm_poly, y_valid),
('Perceptron', prediction_perceptron, y_valid),
('Blending ', prediction_blend, y_valid)
]
print_accuracy_scores(performance_data)
print_f1_score(performance_data)
prediction = prediction_blend
parties = np.unique(prediction)
num_votes_for_party = lambda party: len([vote for vote in prediction if vote == party])
list_of_parties = [(party, num_votes_for_party(party)) for party in parties]
num_votes = len(y_test.index)
winner = max(list_of_parties, key=lambda item: item[1])
print('Party with most probable majority of votes')
print(winner[0], ':', winner[1], ',', winner[1] * 100 / num_votes, '%')
# 2. Division of voters between the parties
print('Amount of votes per party')
for party_votes in sorted(list_of_parties, key=lambda votes: votes[1], reverse=True):
print(party_votes[0], ':', party_votes[1], ',', party_votes[1] * 100 / num_votes, '%')
"""
if __name__ == '__main__':
label = 'Vote'
x_train = pd.read_csv("x_train.csv", header=0)
y_train = pd.read_csv("y_train.csv", squeeze=True, header=None)
x_valid = pd.read_csv("x_valid.csv", header=0)
y_valid = pd.read_csv("y_valid.csv", squeeze=True, header=None)
x_test = pd.read_csv("x_test.csv", header=0)
y_test = pd.read_csv("y_test.csv", squeeze=True, header=None)
# get_random_forest_best_params(x_train, y_train)
x = x_train
y = y_train
estimators = [
('Random Forest', RandomForestClassifier(criterion='gini', max_depth=50, min_samples_split=5, n_estimators=50)),
('SVC', SVC(kernel='poly', degree=4, probability=True)),
('Percepton', MLPClassifier(activation="relu", alpha=0.1, hidden_layer_sizes=(10, 10, 10),
learning_rate="constant", max_iter=2000))
]
blend_clf = StackingClassifier(estimators)
blend_clf.fit(x, y)
prediction = blend_clf.predict(x_test)
# evaluate and plot confusion matrices
parties = np.unique(prediction)
num_votes_for_party = lambda party: len([vote for vote in prediction if vote == party])
list_of_parties = [(party, num_votes_for_party(party)) for party in parties]
num_votes = len(y_test.index)
winner = max(list_of_parties, key=lambda item: item[1])
print('Party with most probable majority of votes')
print(winner[0], ':', winner[1], ',', winner[1] * 100 / num_votes, '%')
# 2. Division of voters between the parties
print('Amount of votes per party')
for party_votes in sorted(list_of_parties, key=lambda votes: votes[1], reverse=True):
print(party_votes[0], ':', party_votes[1], ',', party_votes[1] * 100 / num_votes, '%')
performance_data = [('Blending ', prediction, y_test)]
print_accuracy_scores(performance_data)
print_f1_score(performance_data)
| grikkaq/ml_hw5 | elections_results.py | elections_results.py | py | 7,045 | python | en | code | 0 | github-code | 36 |
22161452898 | #!/usr/bin/env python3
import glob
import os.path
import re
import statistics
import sys
from collections import defaultdict
from typing import List, Dict
"""
USAGE:
./simple_spec_summary.py # all files in /spec/result/
./simple_spec_summary.py 1 10 # result 1-10 from /spec/result/
./simple_Spec_summary.py <list> <of> <csv> <files>
"""
def draw_table(table: List[List[str]], hline_after=()):
column_width = defaultdict(lambda: 0)
for row in table:
for i, col in enumerate(row):
column_width[i] = max(column_width[i], len(col))
txt = []
for i, row in enumerate(table):
for j, col in enumerate(row):
txt.append(col + ' ' * (column_width[j] - len(col)))
if j < len(row) - 1:
txt.append(' | ')
txt.append('\n')
if i in hline_after:
# txt.append('-' * (sum(column_width.values()) + 3 * len(row) - 3) + '\n')
txt.append('-|-'.join('-' * v for k, v in sorted(column_width.items(), key=lambda x: x[0])) + '\n')
return ''.join(txt)
def load_spec_files(files: List[str]) -> Dict[str, Dict[str, List[float]]]:
"""
:param files:
:return: {benchmark type: {benchmark name: [list, of, results]}}
"""
results = {}
for fname in files:
if not os.path.exists(fname):
print('MISSING FILE', fname)
continue
with open(fname, 'r') as f:
text = f.read()
name = [l[12:-1] for l in text.split('\n') if l.startswith('"test name: ')][0]
if name == 'llvm-o3-typegraph':
name = 'llvm-o3-typro'
if name not in results:
results[name] = {}
table = text.split('"Selected Results Table"')[1].split('"Run number:"')[0]
for l in table.split('\n'):
if l.startswith('4'):
elements = l.split(',')
if elements[2]:
bench_name = elements[0]
if re.match(r'\d{3}\.\w+', bench_name):
bench_name = bench_name.split('.', 1)[1]
if bench_name not in results[name]:
results[name][bench_name] = []
results[name][bench_name].append(float(elements[2]))
return results
def summarize_spec_files(files: List[str]):
results = load_spec_files(files)
assert 'llvm-o3-typro' in results, 'No typro runs!'
assert 'llvm-o3-ref' in results, 'No reference runs!'
benchmarks = list(sorted(results['llvm-o3-typro']))
table = [['Benchmark', 'Typro runtime (stdev)', 'Ref runtime (stdev)', 'Overhead']]
for bench in benchmarks:
runtime_typro = sum(results['llvm-o3-typro'][bench]) / len(results['llvm-o3-typro'][bench])
runtime_ref = sum(results['llvm-o3-ref'][bench]) / len(results['llvm-o3-ref'][bench])
stdev_typro = statistics.stdev(results['llvm-o3-typro'][bench]) / runtime_typro
stdev_ref = statistics.stdev(results['llvm-o3-ref'][bench]) / runtime_ref
overhead = runtime_typro / runtime_ref - 1
table.append([
bench,
f'{runtime_typro:5.1f} s (+-{stdev_typro*100:4.1f}%)',
f'{runtime_ref:5.1f} s (+-{stdev_ref*100:4.1f}%)',
f'{overhead * 100:5.2f}%'])
print(draw_table(table, (0,)))
if __name__ == '__main__':
if len(sys.argv) == 3 and re.match(r'\d+', sys.argv[1]) and re.match(r'\d+', sys.argv[2]):
files = []
for i in range(int(sys.argv[1]), int(sys.argv[2]) + 1):
files.append(f'/spec/result/CINT2006.{i:03d}.ref.csv')
files.append(f'/spec/result/CFP2006.{i:03d}.ref.csv')
summarize_spec_files(files)
elif len(sys.argv) > 1:
summarize_spec_files(sys.argv[1:])
else:
summarize_spec_files(glob.glob('/spec/result/*.ref.csv'))
| typro-type-propagation/TyPro-CFI | scripts/simple_spec_summary.py | simple_spec_summary.py | py | 3,896 | python | en | code | 3 | github-code | 36 |
21107255277 | import sqlite3
#Her oprettes en forbindelse til databasefilen
#Hvis filen ikke findes, vil sqlite oprette en ny tom database.
con = sqlite3.connect('start.db')
print('Database åbnet')
try:
con.execute("""CREATE TABLE personer (
id INTEGER PRIMARY KEY AUTOINCREMENT,
navn STRING,
alder INTEGER)""")
print('Tabel oprettet')
except Exception as e:
print('Tabellen findes allerede')
c = con.cursor()
c.execute('INSERT INTO personer (navn,alder) VALUES (?,?)', ("Hans", 38))
c.execute('INSERT INTO personer (navn,alder) VALUES (?,?)', ("Kim", 37))
#Efter at have ændret i databasen skal man kalde funktionen commit.
con.commit()
#Denne variabel bruges til at modtage input fra brugeren
inp = ''
print('')
print('Kommandoer: ')
print(' vis - Viser alle personer i databasen')
print(' ny - Opret ny person')
print(' q - Afslut program')
while not inp.startswith('q'):
inp = input('> ')
if inp == 'vis':
c = con.cursor()
c.execute('SELECT navn,alder FROM personer')
for p in c:
print('{} er {} år'.format(p[0], p[1]))
elif inp == 'ny':
n = input('Indtast navn: ')
a = input('Indtast alder: ')
c = con.cursor()
c.execute('INSERT INTO personer (navn,alder) VALUES (?,?)', (n, a))
con.commit()
| jonascj/learn-programming-with-python | ch-database/src/database_start.py | database_start.py | py | 1,314 | python | da | code | 2 | github-code | 36 |
43109381353 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 18 21:28:29 2021
@author: apolloseeds
"""
from dataset import *
import matplotlib.pyplot as plt
import numpy as np
from scipy.io import loadmat
from sklearn import model_selection
from toolbox_02450 import train_neural_net, draw_neural_net, visualize_decision_boundary
import torch
from scipy import stats
from toolbox_02450 import feature_selector_lr, bmplot, rlr_validate, mcnemar
from toolbox_02450 import train_neural_net, draw_neural_net, visualize_decision_boundary
N2, M2 = contX.shape
def trainANN(X,y,h, K = 10): #returns the optimal h (number of hidden units)
CV = model_selection.KFold(K,shuffle=True)
n_replicates = 1 # number of networks trained in each k-fold
max_iter = 10000
# Define the model structure
# The lambda-syntax defines an anonymous function, which is used here to
# make it easy to make new networks within each cross validation fold
model = lambda: torch.nn.Sequential(
torch.nn.Linear(M2, h), #M features to H hiden units
# 1st transfer function, either Tanh or ReLU:
torch.nn.Tanh(), #torch.nn.ReLU(),
torch.nn.Linear(h, 1) # H hidden units to 1 output neuron
)
loss_fn = torch.nn.MSELoss() # notice how this is now a mean-squared-error loss
print('Training model of type:\n\n{}\n'.format(str(model())))
errors = [] # make a list for storing generalizaition error in each loop
for (k, (train_index, test_index)) in enumerate(CV.split(X,y)):
print('\nCrossvalidation fold: {0}/{1}'.format(k+1,K))
# Extract training and test set for current CV fold, convert to tensors
X_train = torch.Tensor(X[train_index,:])
y_train = torch.Tensor(y[train_index])
X_test = torch.Tensor(X[test_index,:])
y_test = torch.Tensor(y[test_index])
for i in range(0, len(h)):
#Iterate over every h
testedH = h[i]
# Train the net on training data
net, final_loss, learning_curve = train_neural_net(model,
loss_fn,
X=X_train,
y=y_train,
n_replicates=n_replicates,
max_iter=max_iter)
print('\n\tBest loss: {}\n'.format(final_loss))
# Determine estimated class labels for test set
y_sigmoid = net(X_test)
y_test_est = (y_sigmoid>.5).type(dtype=torch.uint8)
# Determine errors and errors
y_test = y_test.type(dtype=torch.uint8)
e = y_test_est != y_test
error_rate = (sum(e).type(torch.float)/len(y_test)).data.numpy()
errors.append(error_rate) # store error rate for current CV fold
optimalHIndex = errors.index(min(errors))
optimalH = h[optimalHIndex]
# Print the average classification error rate
print('\nEstimated generalization error, RMSE: {0}'.format(round(np.sqrt(np.mean(errors)), 4)))
return optimalH
def annRegression(X_train, X_test, y_train, y_test, hRange, K = 10):
# Parameters for neural network classifier
n_replicates = 1 # number of networks trained in each k-fold
max_iter = 10000 # stop criterion 2 (max epochs in training)
loss_fn = torch.nn.MSELoss() # notice how this is now a mean-squared-error loss
opt_hidden_unit = trainANN(X_train, y_train, hRange, K)
model = lambda: torch.nn.Sequential(
torch.nn.Linear(M, opt_hidden_unit), #M features to H hiden units
torch.nn.Tanh(), # 1st transfer function,
torch.nn.Linear(opt_hidden_unit, 1), # H hidden units to 1 output neuron
)
# print('Training model of type:\n\n{}\n'.format(str(model())))
X_train = torch.Tensor(X_train)
y_train = torch.Tensor(y_train)
X_test = torch.Tensor(X_test)
y_test = torch.Tensor(y_test)
# Train the net on training data
net, final_loss, learning_curve = train_neural_net(model,
loss_fn,
X=X_train,
y=y_train,
n_replicates=n_replicates,
max_iter=max_iter)
print('\n\tBest loss: {}\n'.format(final_loss))
# Determine estimated class labels for test set
y_test_est = net(X_test)
# Determine errors and errors
se = (y_test_est.float()-y_test.float())**2 # squared error
mse = (sum(se).type(torch.float)/len(y_test)).data.numpy() #mean
return opt_hidden_unit, mse, y_test_est
C = 2
# Normalize data
annX = stats.zscore(contX)
# Parameters for neural network classifier
h = 1 # number of hidden units, !!!!SELECT A RANGE BY TESTING
serumC = np.array(np.asarray(X[:, 7]), dtype=int)
#y_rings = np.array(np.asarray(rings), dtype=np.int).reshape(-1, 1)
K = 5
lambdas = np.linspace(0.01, 10, 1000)
inner_cvf = 10
CV = model_selection.KFold(K, shuffle=True)
coefficient_norm = np.zeros(K)
# Parameters for neural network classifier
hRange = range(1, 8)
n_replicates = 2 # number of networks trained in each k-fold
max_iter = 10000 # stop criterion 2 (max epochs in training)
square_err_regression_base = np.empty(K)
square_err_regression_RLR = np.empty(K)
square_err_regression_ANN = np.empty(K)
regression_RLR_opt_lambdas = np.empty(K)
regression_opt_hidden_units = np.empty(K)
error_rate_classification_base = np.empty(K)
error_rate_classification_logistic = np.empty(K)
error_rate_classification_ANN = np.empty(K)
classification_opt_hidden_units = np.empty(K)
classification_opt_lambdas = np.empty(K)
w_est_logistic_arr = np.empty((K, X.shape[1]))
y_est_Reg_ANN = []
y_est_Reg_RLR = []
y_est_claf_ANN = []
y_est_claf_logistic = []
y_sex_real = []
y_rings_real = []
for k, (train_index, test_index) in enumerate(CV.split(annX,serumC)):
X_train = annX[train_index,:]
X_test = annX[test_index,:]
y_train = serumC[train_index]
y_test = serumC[test_index]
"""
y_rings_train = y_rings[train_index]
y_rings_test = y_rings[test_index]
y_sex_real.append(y_sex_test)
y_rings_real.append(y_rings_test)
"""
regression_opt_hidden_unit, ANN_mse, y_est_ANN_regression = annRegression(X_train, X_test, y_train, y_test, hRange, inner_cvf)
regression_opt_hidden_units[k] = regression_opt_hidden_unit
square_err_regression_ANN[k] = ANN_mse
y_est_Reg_ANN.append(y_est_ANN_regression)
print("square_err_regression_ANN: ", square_err_regression_ANN)
| ralph-elhaddad/02450-Intro-ML | Project2/2b.py | 2b.py | py | 7,248 | python | en | code | 0 | github-code | 36 |
840464409 | import random
usernames_array = ["giraffe", "raccoon", "ant", "tiger", "sheep", "deer", "panda", "liger", "fox", "hippo", "alligator",
"dog", "dolphin", "eagle", "zebra", "rabbit", "bear", "monkey", "leopard", "frog", "squirrel",
"elephant", "bee", "duck", "kangaroo", "penguin"]
username_groupID = ['1', '1', '2', '2', '2', '3', '3', '3', '4', '4', '4', '5', '5', '5', '6', '6', '6', '7', '7',
'7', '8', '8', '8', '9', '9', '9']
dictionary = dict(zip(usernames_array, username_groupID))
# print(dictionary)
group1 = {}
group2 = {}
group3 = {}
group4 = {}
group5 = {}
group6 = {}
length = len(username_groupID)
#print(length)
def swapelement(group, id_rep, name_rep):
for ID in group:
if ID not in group6:
group6[ID] = group[ID]
del group[ID]
break
group[id_rep] = name_rep;
print("Total number of animals: "+str(len(dictionary.keys())))
random.shuffle(usernames_array)
class randomGroupGenerator():
def creategroup(self):
group_list = []
length = len(username_groupID)
while length > 0:
name = usernames_array.pop()
ID = dictionary[name]
if len(list(group1.keys()))< 6 and ID not in group1.keys():
group1[ID] = name
elif len(list(group2.keys()))< 4 and ID not in group2.keys():
group2[ID] = name
elif len(list(group3.keys()))<4 and ID not in group3.keys():
group3[ID] = name
elif len(list(group4.keys()))<4 and ID not in group4.keys():
group4[ID] = name
elif len(list(group5.keys()))<4 and ID not in group5.keys():
group5[ID] = name
elif len(list(group6.keys())) < 4 and ID not in group6.keys():
group6[ID] = name
else:
if ID not in group1.keys():
swapelement(group1, ID, name)
elif ID not in group2.keys():
swapelement(group2, ID, name)
elif ID not in group3.keys():
swapelement(group2, ID, name)
elif ID not in group4.keys():
swapelement(group2, ID, name)
elif ID not in group5.keys():
swapelement(group2, ID, name)
length -= 1
#convert dictionaries into list
group_list.append([x for x in group1.values()])
group_list.append([x for x in group2.values()])
group_list.append([x for x in group3.values()])
group_list.append([x for x in group4.values()])
group_list.append([x for x in group5.values()])
group_list.append([x for x in group6.values()])
return group_list;
# creategroup()
# print(group1)
# print(group2)
# print(group3)
# print(group4)
# print(group5)
# print(group6)
| rifav/UbicosAppServer | textbook/app/randomGroupGenerator.py | randomGroupGenerator.py | py | 2,901 | python | en | code | null | github-code | 36 |
28512647903 | # Import libraries
import numpy as np
import pandas as pd
pd.options.mode.chained_assignment = None
from sqlalchemy import create_engine
from googlesearch import search
from tqdm import tqdm
tqdm.pandas()
# Read data
df = pd.read_csv('data/user-item-interactions.csv')
df_content = pd.read_csv('data/articles.csv')
del df['Unnamed: 0']
del df_content['Unnamed: 0']
# <----- CLEAN DATA [start] ----->
# Remove duplicate articles
df_content = df_content.drop_duplicates(keep='first').reset_index(drop=True)
df_content = df_content.drop_duplicates(subset='article_id', keep='first').reset_index(drop=True)
# Format matching columns to same type
df = df.astype({'article_id': int})
# Make User-id column in df to identify users
user_id_dict = dict()
i=0
for email in df.email:
if email not in user_id_dict:
user_id_dict[email] = i
i+=1
df['user_id'] = df.email.apply(lambda x: user_id_dict[x])
df.drop('email', axis=1, inplace=True)
# Fill in missing document descriptions with empty strings
df_content.doc_description[df_content.doc_description.isnull()] = ''
# <----- CLEAN DATA [finished] ----->
# Merge data-sets on article id
df_merged = df.drop('title', axis=1).merge(df_content[['article_id', 'doc_full_name', 'doc_description']], on='article_id', how='outer')
# Fill in missing document titles
no_title_ids = df_merged.article_id[df_merged.doc_full_name.isnull()].unique().tolist()
for id in no_title_ids:
title = df.title[df.article_id == id].tolist()[0]
df_merged.doc_full_name[df_merged.article_id == id] = title
# Fill in missing descriptions with empty string
df_merged.doc_description[df_merged.doc_description.isnull()] = ''
# Make subset of merged dataframe and drop all duplicates
df_subset = df_merged[['article_id', 'doc_full_name', 'doc_description']].drop_duplicates(keep='first').reset_index(drop=True)
# Extract article links through google searches for all articles in the subset dataframe
doc_identifier = df_subset.doc_full_name + ' ' + df_subset.doc_description
def extract_link(text):
try:
link = list(search(text, tld="com", num=1, stop=1))[0]
except:
link = "https://www.google.com"
return link
df_subset['link'] = doc_identifier.progress_apply(extract_link)
# Distribute links to all rows of the merged dataframe
df_merged['link'] = df_merged.article_id.apply(lambda x: df_subset.link[df_subset.article_id==x].tolist()[0])
# Save data to database
engine = create_engine('sqlite:///data/data.db')
df_merged.to_sql('user-article-interactions', engine, index=False, if_exists='replace') | sameedakber-ai/ibm-recommendations-2 | data/process_data.py | process_data.py | py | 2,578 | python | en | code | 0 | github-code | 36 |
35056823314 | import pickle
import json
import yaml
import numpy as np
import torch
import torch.optim as optim
import time
from data_manager import DataManager
from model import BiLSTMCRF
from utils import f1_score, get_tags, format_result
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter(log_dir='./tensorboard/bioes')
class ChineseNER(object):
def __init__(self, entry="train"):
self.load_config()
self.__init_model(entry)
def __init_model(self, entry):
if entry == "train":
self.train_manager = DataManager(batch_size=self.batch_size, tags=self.tags)
self.total_size = len(self.train_manager.batch_data)
data = {
"batch_size": self.train_manager.batch_size,
"input_size": self.train_manager.input_size,
"vocab": self.train_manager.vocab,
"tag_map": self.train_manager.tag_map,
}
self.save_params(data)
self.dev_manager = DataManager(batch_size=60, data_type="dev")
# 验证集
# self.dev_batch = self.dev_manager.iteration()
self.model = BiLSTMCRF(
tag_map=self.train_manager.tag_map,
batch_size=self.batch_size,
vocab_size=len(self.train_manager.vocab),
dropout=self.dropout,
embedding_dim=self.embedding_size,
hidden_dim=self.hidden_size,
)
self.model = self.model.cuda()
self.restore_model()
elif entry == "predict" or "evaluate":
# python main.py predict
data_map = self.load_params()
input_size = data_map.get("input_size")
self.tag_map = data_map.get("tag_map")
self.vocab = data_map.get("vocab")
print('input_size',input_size)
print('tag_map',self.tag_map)
self.model = BiLSTMCRF(
tag_map=self.tag_map,
vocab_size=input_size,
embedding_dim=self.embedding_size,
hidden_dim=self.hidden_size
)
self.model = self.model.cuda()
self.test_manager = DataManager(batch_size=60, data_type="dev")
self.restore_model()
# 加载配置项
def load_config(self):
try:
fopen = open("models/config.yml")
config = yaml.load(fopen)
fopen.close()
except Exception as error:
print("Load config failed, using default config {}".format(error))
fopen = open("models/config.yml", "w")
config = {
"embedding_size": 300,
"hidden_size": 128,
"batch_size": 30,
"dropout":0.5,
"model_path": "models/",
"tags": ["TREATMENT", "BODY","SIGNS","CHECK","DISEASE"]
}
yaml.dump(config, fopen)
fopen.close()
self.embedding_size = config.get("embedding_size")
self.hidden_size = config.get("hidden_size")
self.batch_size = config.get("batch_size")
self.model_path = config.get("model_path")
self.tags = config.get("tags")
self.dropout = config.get("dropout")
# 保存模型各种训练参数
def restore_model(self):
try:
self.model.load_state_dict(torch.load(self.model_path + "params_6all.pkl"))
print("model restore success!")
except Exception as error:
print("model restore faild! {}".format(error))
# 保存模型超参数
def save_params(self, data):
with open("models/data_6all.pkl", "wb") as fopen:
pickle.dump(data, fopen)
# 加载模型超参数
def load_params(self):
with open("models/data_6all.pkl", "rb") as fopen:
data_map = pickle.load(fopen)
return data_map
def train(self):
optimizer = optim.Adam(self.model.parameters(),weight_decay=0.002,lr=0.0000004) # 0.000001
# optimizer = optim.SGD(self.model.parameters(), lr=0.00000008,weight_decay=0.001,momentum=0.9) #4e-7
scheduler_lr = optim.lr_scheduler.ReduceLROnPlateau(optimizer,mode='min',factor=0.5,patience=2,cooldown=5,verbose=True,min_lr=1e-8,eps=1e-8)
best_loss = 240
lossList = [0] * self.total_size
for epoch in range(268,401):
losses = []
index = 0
startTime = time.process_time()
for batch in self.train_manager.get_batch():
start = time.process_time()
index += 1
self.model.zero_grad()
sentences, tags, length = zip(*batch)
# lenght 是句子的原本长度
# shape (batch_size,max.len(sentence) (20,332) batch_size 和 每个batch最长句子的长度
sentences_tensor = torch.tensor(sentences, dtype=torch.long).cuda()
tags_tensor = torch.tensor(tags, dtype=torch.long).cuda()
length_tensor = torch.tensor(length, dtype=torch.long).cuda()
loss = self.model.neg_log_likelihood(sentences_tensor, tags_tensor, length_tensor)
losses.append(loss.cpu().item())
progress = ("█"*int(index * 60 / self.total_size)).ljust(60)
loss.backward()
optimizer.step()
# torch.save(self.model.state_dict(), self.model_path + 'params_6all.pkl')
end = time.process_time()
dur = end - start
print("""epoch [{}] |{}| {}/{}\n\tloss {:.3f}\t\tlast_loss {:.3f}\t\ttime {}\t\tbest_avg_loss {:.3f}""".format(
epoch, progress, index, self.total_size, loss.cpu().tolist()[0],lossList[index - 1],str(dur),best_loss
)
)
lossList[index - 1] = loss.cpu().item()
print("-" * 90)
endTime = time.process_time()
totalTime = endTime - startTime
avg_loss = np.mean(losses)
# 保存最好的模型
if avg_loss < best_loss:
best_loss = avg_loss
torch.save(self.model.state_dict(), self.model_path + 'params_6all.pkl')
writer.add_scalar('BiLstm_CRF:avg_loss-epoch', avg_loss, epoch)
print('epoch ',epoch,' avg_loss ', avg_loss,' total_time ',totalTime)
if epoch % 5 == 0:
self.evaluate(epoch/5,manager=self.dev_manager)
print("-"*100)
scheduler_lr.step(avg_loss)
writer.close()
# train: BODY 7507, SIGNS 6355, CHECK 6965, DISEASE 474, TREATMENT 805
# test:
# 计算f1,评估模型
def evaluate(self,epoch,manager,add_scalar = True):
print('正在开始评估')
all_origins = all_founds = all_rights = 0
for tag in self.tags:
origins = founds = rights = 0
for batch in manager.get_batch():
sentences, labels, length = zip(*batch)
_, paths = self.model(sentences)
origin, found, right = f1_score(labels, paths, tag, self.model.tag_map)
origins += origin
founds += found
rights += right
all_origins += origins
all_founds += founds
all_rights += rights
recall = 0. if origins == 0 else (rights / origins)
precision = 0. if founds == 0 else (rights / founds)
f1 = 0. if recall + precision == 0 else (2 * precision * recall) / (precision + recall)
print("\t{}\torigins:{}\t\t\tfounds:{}\t\t\trights:{}".format(tag, origins, founds, rights))
print("\t\t\trecall:{}\tprecision:{}\tf1:{}".format(recall, precision, f1))
if add_scalar:
tag_epoch = tag + '-5epoch'
writer.add_scalars(tag_epoch,{
'recall':recall,
'precision':precision,
'f1':f1
}, epoch)
all_recall = 0. if all_origins == 0 else (all_rights / all_origins)
all_precision = 0. if all_founds == 0 else (all_rights / all_founds)
all_f1 = 0. if all_recall + all_precision == 0 else (2 * all_precision * all_recall) / (all_precision + all_recall)
print("\tall_origins:{}\t\t\tall_founds:{}\t\t\tall_rights:{}".format(all_origins, all_founds, all_rights))
print("\tall_recall:{}\tall_precision:{}\tall_f1:{}".format(all_recall, all_precision, all_f1))
if add_scalar:
writer.add_scalars("ALL-5epoch", {
'all_recall': all_recall,
'all_precision': all_precision,
'all_f1': all_f1
}, epoch)
print('评估结束')
return all_recall, all_precision, all_f1
# 预测方法
def predict(self, input_str=""):
if not input_str:
input_str = input("请输入文本: ")
# 获取输入句子所有汉字的在vocab的索引
input_vec = [self.vocab.get(i, 0) for i in input_str]
# convert to tensor
sentences = torch.tensor(input_vec,dtype=torch.long).view(1, -1)
sentences = sentences.cuda()
# paths 预测出来的标签索引 shape 为 [1,1]
_, paths = self.model(sentences)
entities = []
# "tags": ["ORG", "PER"]
for tag in self.tags:
tags = get_tags(paths[0], tag, self.tag_map)
entities += format_result(tags, input_str, tag)
print(entities)
print(json.dumps(entities,indent=4,ensure_ascii=False))
return entities
if __name__ == "__main__":
entry = input('请输入train or predict or evaluate: ')
while entry:
if entry == 'train':
cn = ChineseNER("train")
cn.train()
break
elif entry == 'predict':
cn = ChineseNER("predict")
while True:
inputText = input("请输入文本(q退出): ")
if inputText != 'q':
cn.predict(inputText)
else:
break
break
elif entry == 'evaluate':
cn = ChineseNER("evaluate")
cn.evaluate(epoch=0,manager=cn.test_manager,add_scalar=False)
break
else:
print("请输入正确的命令(train or predict or evaluate)")
entry = input('请输入train or predict or evaluate: ')
| ravesky/medical_ner_pytorch | main.py | main.py | py | 10,508 | python | en | code | 44 | github-code | 36 |
33380799983 | import requests
from bs4 import BeautifulSoup
import time
import plotly
import numpy as np
import pandas as pd
import datetime as dt
import cufflinks as cf
import subprocess
import traceback
from sys import exit
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import base64
import pickle
import os.path
from googleapiclient.discovery import build
# Set working directory to the project folder
os.chdir("<replace_with_path_to_project_folder>")
def extract_price(game_page):
"""Finds and returns the price of a game on a playstation store
web page. If no price is found, returns null.
"""
soup = BeautifulSoup(game_page.text, features="html.parser")
price = soup.find('span', class_='psw-h3')
if price is None:
return np.nan
else:
# Remove '£' from price and return the value
return float(price.get_text().replace('£', ''))
def get_latest_non_null(row):
"""Returns the most recent, non-null price of a row.
Returns -1 if all prices in row are null.
"""
# Value returned if no non-null value exists
price = -1
# Loops through the row backwards and returns first non-null value
for element in reversed(row):
# Check element is not null (null values dont equal themselves)
if element == element:
price = element
break
return price
def create_message(sender, to, subject, price_drop, failures, nans):
"""Create, Encode and return Gmail email.
Checks if there are rows for the price_drop, failures and nans
dataframes. If a dataframe has rows, it is included as a table
in the html body of them email.
"""
message = MIMEMultipart()
# html contains the HTML code for the email body
html = """<html>
<head></head>
<body>"""
# If price_drop df has rows, its table is included in the email
if price_drop.shape[0] > 0:
html += '<p><b>Price Drops:</b></p>'
html += price_drop.to_html(escape=False, index = False, justify = 'center')
# If failures df has rows, its table is included in the email
if len(failures) > 0:
html += '<br><p><b>Failed to Scrape:</b></p>'
html += '<br>'.join(failures)
# If nans df has rows, its table is included in the email
if nans.shape[0] > 0:
html += '<br><p><b>Price Not Found:</b></p>'
html += nans.to_html()
html += """<br></body>
</html>"""
part1 = MIMEText(html, 'html')
message.attach(part1)
message['to'] = to
message['from'] = sender
message['subject'] = subject
# Message encoded as required for the Gmail API
raw_message = base64.urlsafe_b64encode(message.as_string().encode("utf-8"))
return {'raw': raw_message.decode("utf-8")}
# Wait 10 seconds in case computer was asleep (give time for
# an internet connection to be established)
time.sleep(10)
# Attempt to retrieve google.com to confirm internet connection,
# wait 5 minutes and try again if there is a error (no connection).
# If an error occurs the second time, a Pop-up error message is
# displayed and script is terminated.
try:
requests.get('https://www.google.com/')
except:
time_of_error = time.time()
while time.time() - time_of_error < 300:
time.sleep(1)
try:
requests.get('https://www.google.com/')
except:
# Create Mac OS popup error message
applescript = """
display dialog "Playstation_scraper could not connect to the internet." ¬
with title "Internet Connection Error" ¬
with icon caution ¬
buttons {"OK"}
"""
subprocess.call("osascript -e '{}'".format(applescript), shell=True)
exit('exit')
# The game price data is stored in Game_prices.csv. Each row
# corresponds to a different game. The first column ('game')
# contains the name of the game. The second column ('game id')
# contains the unique ID for the game on the playstation store.
# The remaining columns contain the price of the game on
# each day the script was run. The header for each column is
# the date the price was found. When the script is run for the
# first time there will be no price data (there will only be
# the 'game' and 'game id' columns)
df = pd.read_csv('game_prices.csv', ',', index_col='game')
# Convert the date column headers to date-time format
category_headers = df.columns[:1].tolist()
date_headers = df.columns[1:]
converted_date_headers = pd.to_datetime(date_headers, format='%d/%m/%Y').date.tolist()
df.columns = category_headers + converted_date_headers
# The full url for a game is the base url with the game ID added at
# the end.
base_url = 'https://store.playstation.com/en-gb/product/'
# time_delay is the seconds waited between subsequent GET requests
time_delay = 10
# game_price records the price of each game today
game_price = []
time_last_request = time.time()
# failures records the game url's which result in an error when requested.
failures = []
# The game_id column of df defines the game_id for each game.
# The code loops through this and for each game id it makes a
# get request and scrapes the price of that game from its webpage.
for game_id in df.game_id:
# Waits between subsequent GET requests.
while time.time() - time_last_request < time_delay:
time.sleep(1)
try:
# full game url is base_url + game id
game_page = requests.get(base_url + game_id)
time_last_request = time.time()
game_price.append(extract_price(game_page))
# If GET request or price extraction failed, wait 300 seconds
# and try again
except:
time_error = time.time()
while time.time() - time_error < 300:
time.sleep(1)
try:
game_page = requests.get(base_url + game_id)
time_last_request = time.time()
game_price.append(extract_price(game_page))
except:
# both GET requests failed so record as failure
failures.append(base_url + game_id)
# Record game price today as null
game_price.append(np.nan)
# Add todays game prices as new column in df
date = dt.date.today()
df[date] = game_price
# Below generates a separate plot of price over time for each game in df.
n_rows = df.shape[0]
# plotly layout used to define the layout of the plot.
layout1 = cf.Layout(xaxis=dict(autorange=True, dtick='M1'),
yaxis=dict(title=dict(standoff=0, text='')),
height=150 * n_rows,
width=1200,
margin=dict(pad=0, t=100, l=0.9, r=0.9, b=1),
showlegend=False,
title=dict(text='Price of Games on Playstation Store',
x=0.5, y=0.99, xanchor='center')
)
# df is transposed so each column is a game, with the price on
# each dates in the rows. The game_id column in excluded
# by .iloc[1:,]
plotting_df = df.T.iloc[1:, ]
# Sub-plots will be in 2 columns, this is defined by the shape
# paramater, which takes a tuple (rows, columns). To calculate
# the rows we divide the number of games (total rows in df) by 1.9 and
# round the answer. e.g. if there are 7 games, we divide by 1.9 and
# round up giving us 4 rows. We use 1.9 because if we divide by 2 Python
# sometimes rounds numbers ending in 0.5 down rather than up.
shape1 = (round(n_rows / 1.9), 2)
# Plot price variation over time for each game
fig = plotting_df.iplot(subplots=True, shape=shape1,
subplot_titles=True, vertical_spacing=0.08,
horizontal_spacing=0.1, layout=layout1,
asFigure=True, color='orange', width=2)
fig.update_layout(hovermode='x unified')
# Fixes the opacity of the lines so all lines are fully visible
# (by default cufflinks gave variable opacity to the lines).
for i in fig['data']:
i['line']['color'] = "rgba(255, 153, 51, 1.0)"
# Sets color and style of the subplot titles
for i in fig['layout']['annotations']:
i['font'] = dict(size=14, color='orange')
# Adds date selector buttons (e.g. 'last month') to plots
fig.update_xaxes(
rangeselector = dict(
yanchor='bottom',
buttons=list([
dict(count=1, label="1m", step="month", stepmode="backward"),
dict(count=6, label="6m", step="month", stepmode="backward"),
dict(count=1, label="YTD", step="year", stepmode="todate"),
dict(count=1, label="1y", step="year", stepmode="backward"),
dict(step="all")
])
)
)
# Set y axis range
fig.update_yaxes(nticks=8, rangemode='tozero', range=[0,60])
fig.write_html("Game Prices.html")
# The next section identifies price drops and prices that couldn't
# be found (nan_prices)
price_drops = []
nan_prices = []
# Excludes dataframes with data from only 1 date and only runs if the latest
# data is from today
if (df.shape[1] > 2):
# We want to find the latest price before todays data so
# we exclude todays column and the game_id column
# This is to account for any NAN values in the data.
df_prices_before_today = df.iloc[:, 1:-1]
# Most recent non-null price for each game is found. Note that if
# no non-null old price exists, the most recent price will be -1
most_recent_price = [get_latest_non_null(row)
for row in df_prices_before_today.to_numpy()]
# Loops through the games and identifies any price drops
for game, game_id, new_price, old_price in zip(df.index, df.game_id,
game_price, most_recent_price):
# Price drops only calculated if there is a valid price for
# today (the value is not null) and a valid last price to
# compare it to (most_recent_price is not -1)
if (new_price == new_price) & (old_price > 0):
price_delta = old_price - new_price
# Only notify price drops larger than £0.5
if price_delta > 0.5:
html_link = '<a href="' + base_url \
+ game_id + '"><div style="height:100%;width:100%">' \
+ game + '</div></a>'
price_drops.append([html_link, old_price, new_price, price_delta])
# Also tracks any prices today that have returned a nan value
elif new_price != new_price:
nan_prices.append([game, base_url + game_id])
# Replace nan prices today in df with the latest non-null value
# (assume price has stayed the same if no price was found today)
for price_today, game, most_recent_price in zip(game_price,
df.index.values.tolist(),
most_recent_price):
if (price_today != price_today) & (most_recent_price >0):
df.loc[game,date] = most_recent_price
drops = len(price_drops)
fails = len(failures)
nans = len(nan_prices)
# Checks if there is anything to email (will email price drops,
# request failures and nan prices).
if drops + fails + nans > 0:
# Builds subject line for email including number of drops. failures
# or null prices
subject = 'Rupe Playstation Price Drop Alerts: '
if drops > 0:
subject += str(drops) + ' Drops, '
if fails > 0:
subject += str(fails) + ' Failures, '
if nans > 0:
subject += str(nans) + ' Price Not Found'
# Create dataframe of price drop info to be emailed as a table
price_drop_df = pd.DataFrame(price_drops,
columns=['Game', 'Old Price',
'New Price', 'Price Drop']
)
price_drop_df = price_drop_df.sort_values(by=['Price Drop'],
ascending = False)
# Create dataframe of null prices (no price found) to be emailed
# as a table
nan_prices_df = pd.DataFrame(nan_prices, columns=['Game', 'Game_ID'])
# Create email using Gmail API
try:
# Create email message
mail = create_message('me', 'ruperthart92@gmail.com', subject,
price_drop_df, failures, nan_prices_df)
# Check that a token.pickle exists containing the gmail
# credentials and load them
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# Create the gmail service using credentials and send message
service = build('gmail', 'v1', credentials=creds)
message = (service.users().messages().send(userId='me', body=mail)
.execute())
print('email sent')
except:
# Mac OS error alert in case gmail email fails to send
applescript = """
display dialog "Playstation_scraper email failed to send." ¬
with title "Playstation_scraper: Email Failed" ¬
with icon caution ¬
buttons {"OK"}
"""
subprocess.call("osascript -e '{}'".format(applescript), shell=True)
print('email failed')
traceback.print_exc()
# Convert date time headers to strings with the same format as the
# original csv (this is the format that excel uses when you save as csv)
dates = df.columns[1:].tolist()
dates_as_strings = [date_obj.strftime('%d/%m/%Y') for date_obj in dates]
df.columns = df.columns[:1].tolist() + dates_as_strings
df.to_csv('game_prices.csv')
print('ran on ', date)
| rhart-rup/Playstation-Store-Price-Drop-Alert | main.py | main.py | py | 13,550 | python | en | code | 1 | github-code | 36 |
27895109817 | def see_and_say_util(number_str):
i = 0
result = []
while i < len(number_str):
j = i
count = 0
while j < len(number_str) and number_str[j] == number_str[i]:
j += 1
count += 1
result.append(str(count))
result.append(number_str[i])
i = j
return ''.join(result)
def see_and_say(n):
cur = 1
result = [cur]
for i in range(n):
next = see_and_say_util(str(cur))
result.append(next)
cur = next
return result
if __name__ == "__main__":
print(see_and_say(3))
print(see_and_say(5))
print(see_and_say(7))
| stgleb/algorithms-and-datastructures | strings/see_and_say.py | see_and_say.py | py | 638 | python | en | code | 0 | github-code | 36 |
17523919887 | #This tool is useful whenever you get some rough data deliverables
#I used this whenever I had ~100 individually names folders, all containing a single shapefile with the same name, "Data.shp"
#The goal was to rename the shapes from Data.shp, to the parent folder's name.shp
import os, arcpy
from subprocess import Popen
import subprocess
from os import listdir
from os.path import isfile, join
myDir = ("Path to Folder, containing all of the sub folders")
shpList = []
csvList = []
newshpList = []
for root, dirs, files in os.walk(myDir):
for file in files:
if file.endswith(".shp"):
shpList.append(os.path.join(root, file))
for rawShapefile in shpList:
folderName = str(os.path.basename(os.path.dirname(rawShapefile)))
renameShapefile = str(myDir)+"\\"+folderName+"\\"+folderName+".shp"
print(renameShapefile)
arcpy.Rename_management(rawShapefile,renameShapefile)
| hotlikesauce/Public-Tools | Rename Shapefile to Parent Folder.py | Rename Shapefile to Parent Folder.py | py | 922 | python | en | code | 0 | github-code | 36 |
25714644305 | import numpy as np
import matplotlib.pyplot as plt
def plot_with_exponential_averaging(x, y, label, alpha):
y_ema = [y[0],]
for y_i in y[1:]:
y_ema.append(y_ema[-1] * alpha + y_i * (1 - alpha))
p = plt.plot(x, y_ema, label=label)
plt.plot(x, y, color=p[0].get_color(), alpha=0.2)
def plot_train_result(result, label="", alpha=0.95, save_path="./", threshold=None):
rewards = [r['r'] for r in result]
lengths = [r['l'] for r in result]
plot_with_exponential_averaging(np.cumsum(lengths), rewards, label, alpha)
plt.axhline(y=threshold if threshold else int(max(rewards)*1.1), color='grey', linestyle='-')
plt.xlabel("Training Steps")
plt.ylabel("Episode Reward")
plt.legend()
plt.title(label)
plt.savefig(save_path)
plt.cla()
| olenmg/dopamine-rl | utils/plot.py | plot.py | py | 804 | python | en | code | 0 | github-code | 36 |
72287095464 | #!/usr/local/python3/bin/python3
import sys
sys.path.append("..")
import tushare as ts
import re
import datetime
import basicdata.basic_mgr as sk
import time
import os
import pandas as pd
g_update_newest=False #True|False
#是否下载最新的概念,一般不需要
g_ctcode_name=None
#g_ctcode_name['TS56']='电改'
g_tscode_concept=None
#g_tscode_concept['000008.SZ']={'id':['TS56','TS59'],'name':['电改','特斯拉']}
def get_ctname_by_tscode(pro, tscode):
global g_tscode_concept
if g_tscode_concept is None:
init_tscode_concept(pro)
return g_tscode_concept[tscode]['name']
def get_ctcode_by_tscode(pro, tscode):
global g_tscode_concept
if g_tscode_concept is None:
init_tscode_concept(pro)
return g_tscode_concept[tscode]['id']
def init_tscode_concept(pro):
global g_tscode_concept
global g_update_newest
if g_tscode_concept is None:
g_tscode_concept = {}
ts_codes=sk.get_tscodes(pro)
for i in range(len(ts_codes)):
ts_code=ts_codes[i]
path='./concept-data/'+ts_code+'.concept.csv'
if g_update_newest == False and os.path.exists(path) == True:
conceptdf=pd.read_csv(path)
else:
conceptdf=pro.concept_detail(ts_code=ts_code)
if conceptdf is not None:
conceptdf.to_csv(path)
time.sleep(1)
print("download", path)
if conceptdf is not None:
conceptids=conceptdf['id'].values.tolist()
conceptnames=conceptdf['concept_name'].values.tolist()
g_tscode_concept[ts_code]={'id':conceptids, 'name':conceptnames}
def get_concept_map(pro):
global g_ctcode_name
if g_ctcode_name is None:
init_ctcode_name(pro)
return g_ctcode_name
def get_name(pro, code):
global g_ctcode_name
if g_ctcode_name is None:
init_ctcode_name(pro)
return g_ctcode_name[code]
def init_ctcode_name(pro):
global g_ctcode_name
if g_ctcode_name is None:
g_ctcode_name = {}
conceptdf=pro.concept(src='ts')
conceptcodes=conceptdf['code'].values.tolist()
conceptnames=conceptdf['name'].values.tolist()
for i in range(len(conceptcodes)):
g_ctcode_name[conceptcodes[i]]= conceptnames[i]
if __name__== '__main__':
pro = ts.pro_api('08aedc1cc54171e54a64bbe834ec1cb45026fa2ab39e9e4cb8208cad')
init_ctcode_name(pro)
print(g_ctcode_name)
print(get_name(pro, 'TS2'))
print(get_ctcode_by_tscode(pro, '600848.SH'))
#conceptdf.to_csv('./concept.csv')
| haianhua/stock | stock/conceptdata/concept_mgr.py | concept_mgr.py | py | 2,624 | python | en | code | 0 | github-code | 36 |
71961526185 | # -*- coding: utf-8 -*-
from numpy import zeros
from copy import deepcopy
from numpy import cast
from numpy import dot
from numpy import linalg
class SSMOperation:
def localNormalize(self, M):
M = deepcopy(M)
maxValue = M.max()
for rowIdx in range(M.shape[0]):
M[rowIdx] = map(lambda value: value / maxValue, M[rowIdx])
return M
def enhance(self, M, length):
M = deepcopy(M)
forwardM = zeros(M.shape)
backwardM = zeros(M.shape)
for i in range(M.shape[0]):
for j in range(M.shape[1]):
#forwardM[i, j] = (M[slice(i - self.__ENLEN / 2, i + self.__ENLEN / 2),
#slice(j - self.__ENLEN / 2, j + self.__ENLEN / 2)].trace()) / self.__ENLEN
forwardM[i, j] = (M[slice(i, i + length), slice(j, j + length)].trace()) / length
startI = i - length
startJ = j - length
if startI < 0:
startI = 0
if startJ <0:
startJ = 0
#backwardM[i, j] = (M[slice(startI, i), slice(startJ, j)].trace()) / self.__ENLEN
#M = (forwardM + backwardM) / 2
return forwardM
def secondOrder(self, M):
M = deepcopy(M)
secondM = zeros(M.shape)
for i in range(secondM.shape[0]):
for j in range(secondM.shape[1]):
rowVec = M[i, :]
colVec = M[:, j]
secondM[i, j] = dot(rowVec, colVec) / (linalg.norm(rowVec) * linalg.norm(colVec))
return secondM
#return dot(M, M)
#return dot(M, M)
def masker(self, M, threshold):
M = deepcopy(M)
for rowIdx in range(M.shape[0]):
M[rowIdx] = map(lambda similarity: (similarity >= threshold), M[rowIdx])
return cast['int'](M)
| fukuball/lyrics-match | p-library/lyrics_form_analysis/SSMOperation.py | SSMOperation.py | py | 1,578 | python | en | code | 19 | github-code | 36 |
35018437018 | import line
import cv2
import time
import serial
# Camera
vid = cv2.VideoCapture(0)
# Elegoo
power_forward = 100
power_sideway_minimal = 130
power_sideway_maximal = 200
compteur = 0
ips = 0
after = time.time() + 1
imprimer_taille_image = True
left_begin = 0
left_end = 85
right_begin = 95
right_end = 180
compteur_did_not_find_lines = 0
def power_engine_from_angle(begin, end, angle):
diff = end - begin
diff_angle_percentage = angle / diff
power = power_sideway_minimal + ((power_sideway_maximal - power_sideway_minimal) * diff_angle_percentage)
if power > 255:
power = 255
return int(power)
def send_command(left, right):
try:
cmd = str(left) + ',' + str(right) + ','
arduino.write(cmd.encode())
time.sleep(0.1) # wait for arduino to answer
arduino.flushOutput()
arduino.flushInput()
except Exception as ex:
print(ex)
if __name__ == '__main__':
with serial.Serial("/dev/ttyACM0", 9600, timeout=1) as arduino:
time.sleep(0.1) # wait for serial to open
video = input("Voulez vous la vidéo ? Y or N ")
if video == "Y":
video = True
else:
video = False
suivi = input("Voulez vous le suivi de commande ? Y or N ")
if suivi == "Y":
suivi = True
else:
suivi = False
hist_size = input("Quelle taille d'historique voulez vous ? > 0")
angle_hist = line.Historique(hist_size=int(hist_size))
if arduino.isOpen():
print("{} connected!".format(arduino.port))
# Detection de ligne
while True:
ret, original = vid.read()
ips, compteur, after = line.caclulate_ips(ips, compteur, after)
# si ips == 0 alors les ips ne sont pas affiché
angle, size, img_line_plus_mean, did_not_find_lines = line.line_detection(hist=angle_hist, ips=ips,
display_image=False,
display_mean=video,
original_picture=original)
# print image size once
if imprimer_taille_image:
print(size)
imprimer_taille_image = False
# stop the program by pressing q
if cv2.waitKey(1) & 0xFF == ord('q') & video:
break
if did_not_find_lines:
compteur_did_not_find_lines += 1
# Reaction to angle
# Les moteur sont inversé
# ENA, ENB
if did_not_find_lines and compteur_did_not_find_lines > 10:
commande = "Backward"
send_command(10, 10) # ceci est un code
power = power_forward
compteur_did_not_find_lines = 0
elif left_end > angle >= left_begin:
commande = "left"
power = power_engine_from_angle(left_begin, left_end, angle)
send_command(power, 0) # Le robot tourna a droite peu efficace
elif right_end >= angle > right_begin:
commande = "right"
power = power_engine_from_angle(right_begin, right_end, angle)
send_command(0, power) # Le robot toune a gauche tres efficace
elif right_begin >= angle >= left_end:
commande = "Forward"
send_command(power_forward, power_forward)
power = power_forward
if suivi:
print("Commande = " + commande + " " * (10-len(commande)) + " Angle = " + str(angle) + " " * (10-len(str(angle))) + " Power_engine = " + str(power))
| GuillaumeCariou/I3S_Tutorship_Internship | Python/Line_Following/Line/main_rgb.py | main_rgb.py | py | 3,992 | python | en | code | 0 | github-code | 36 |
10660236943 | # coding=utf-8
import mysql.connector
from mysql.connector import Error
import requests
import json
import datetime
dias_semana = ['Domingo', 'Segunda-feira', 'Terça-feira', 'Quarta-feira', 'Quinta-feira', 'Sexta-feira', 'Sábado']
try:
# recupera dataset do chat
url_json = "http://raw.githubusercontent.com/camilabianchi/graces_desafio/master/datasets/chatOnline.jsonl"
req = requests.get(url_json)
dicionario = json.loads(req.text)
if len(dicionario) > 0:
# abre conexao com o banco
connection = mysql.connector.connect(host='localhost', port='3306', database='[db]', user='[user]',
password='[pwd]')
# percorre registros
for item in dicionario:
# data em formato string
data_inicio_str = item["Data da conversa (Inicio)"].replace("/", "-")
data_fim_str = item["Data da conversa (Fim)"].replace("/", "-")
# calculo data final com base na duracao da chamada
dt_inicio = datetime.datetime.strptime(data_inicio_str, '%d-%m-%Y %H:%M')
dt_termino = datetime.datetime.strptime(data_fim_str, '%d-%m-%Y %H:%M')
# valores do insert
email = item["Visitor_Email"]
nome = item["Visitor_Email"]
agente = item["Agente"]
status = "Atendido" if item["Atendido"] == "Sim" else "Não atendido"
origem = 'Chat'
semana = 0 if dt_inicio.weekday() == 6 else dt_inicio.weekday() + 1
semana_nome = dias_semana[semana]
if connection.is_connected():
cursor = connection.cursor()
sql_insert = """INSERT INTO contatos(email, nome, data_inicio, data_termino, agente, status, origem, semana, semana_nome)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s) """
record = (email, nome, dt_inicio, dt_termino, agente, status, origem, semana, semana_nome)
try:
cursor.execute(sql_insert, record)
connection.commit()
except Error as e:
sql_insert = """INSERT INTO log_erros(log_mensagem) VALUES (%s) """
record = (e.msg.replace("'", ""),)
cursor.execute(sql_insert, record)
connection.commit()
finally:
cursor.close()
# fecha conexao com o banco
if connection.is_connected():
connection.close()
except Error as e:
print("Error while connecting to MySQL", e.msg)
| camilabianchi/graces_desafio | 2_importacao_python_airflow/importa_chat.py | importa_chat.py | py | 2,617 | python | pt | code | 0 | github-code | 36 |
20405590464 | import matplotlib.pyplot as plt
from tespy.networks import Network
from tespy.connections import Connection
from tespy.components import (Source, Sink, Condenser, Pump)
# Create a TESPy network
nw = Network(fluids=['water', 'NH3'])
# Add components and connections to the network
source = Source('source')
sink = Sink('sink')
condenser = Condenser('condenser')
pump = Pump('pump')
nw.add_conns(Connection(source, 'out1', condenser, 'in1'))
nw.add_conns(Connection(condenser, 'out1', sink, 'in1'))
nw.add_conns(Connection(condenser, 'out2', pump, 'in1'))
nw.add_conns(Connection(pump, 'out1', condenser, 'in2'))
# Solve the network
nw.solve('design')
# Extract the components and connections information
components = nw.components.keys()
connections = nw.connections.keys()
# Create a figure and axis
fig, ax = plt.subplots()
# Plot the components
for component in components:
x = nw.components[component].x
y = nw.components[component].y
ax.scatter(x, y, label=component)
# Plot the connections
for connection in connections:
x = [nw.connections[connection].inl.x, nw.connections[connection].outl.x]
y = [nw.connections[connection].inl.y, nw.connections[connection].outl.y]
ax.plot(x, y, '-', label=connection)
# Add labels and legend
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.legend()
# Show the plot
plt.show()
| JubranKhattab/testing_tespy_projects | subsystems/ploting.py | ploting.py | py | 1,346 | python | en | code | 0 | github-code | 36 |
22193337403 | # Задайте последовательность чисел.
# Напишите программу, которая выведет список
# неповторяющихся элементов исходной последовательности.
list_1 = []
list_2 = []
for i in range(int(input('Введите количество чисел: '))):
list_1.append(int(input(f'Введите число № {i + 1}: ')))
if list_1[i] not in list_2:
list_2.append(list_1[i])
print(f'Уникальные элементы списка: {list_2}')
| Minions-Wave/GB-Minions-Wave | The Big Brain Solutions/Personal Zone/Zhuravlevivan Solutions/Python/HW/04/Task_03.py | Task_03.py | py | 571 | python | ru | code | 2 | github-code | 36 |
26634573192 | import requests
def request_demo():
url = "https://qyapi.weixin.qq.com/cgi-bin/gettoken"
param = {
"corpid":"ww93348658d7c66ef4",
"corpsecret":"T0TFrXmGYel167lnkzEydsjl6bcDDeXVmkUnEYugKIw"
}
proxy = {
"http": "http://127.0.0.1:8080",
"https": "http://127.0.0.1:8080"
}
res = requests.get(url=url, params=param, proxies =proxy, verify = False)
if __name__ == '__main__':
request_demo() | ceshiren/HogwartsSDET17 | test_mock/requests_demo.py | requests_demo.py | py | 447 | python | en | code | 7 | github-code | 36 |
12243691897 | from django.test import TestCase, RequestFactory
from django.urls import reverse
from django.contrib.auth.models import User, Permission
from django.contrib import admin
from django_comment import models
from .test_app.models import TestModel
from django_comment.admin import CommentedItemAdmin, CommentedItemInline
class CommentedItemAdminTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.a_model = TestModel.objects.create()
cls.author = User.objects.create(username='author')
cls.superuser = User.objects.create(username='superuser',
is_superuser=True)
cls.request_factory = RequestFactory()
url = reverse('admin:django_comment_commenteditem_add')
cls.add_request = cls.request_factory.get(url)
cls.commented_item_admin = CommentedItemAdmin(
models.CommentedItem,
admin.site
)
def test_item(self):
comment = self.a_model.comments.create(comment='test comment',
author=self.author)
self.assertEqual(self.commented_item_admin.item(comment), self.a_model)
def test_has_add_permission(self):
self.assertFalse(self.commented_item_admin.has_add_permission(
self.add_request
))
def test_has_delete_permission_with_author(self):
comment = self.a_model.comments.create(comment='test comment',
author=self.author)
url = reverse('admin:django_comment_commenteditem_delete',
args=(comment.id,))
request = self.request_factory.get(url)
request.user = self.author
self.assertFalse(self.commented_item_admin.has_delete_permission(
request, obj=comment
))
def test_has_delete_permission_with_superuser(self):
comment = self.a_model.comments.create(comment='test comment',
author=self.author)
url = reverse('admin:django_comment_commenteditem_delete',
args=(comment.id,))
request = self.request_factory.get(url)
request.user = self.superuser
self.assertTrue(self.commented_item_admin.has_delete_permission(
request, obj=comment
))
class CommentedItemInlineTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.a_model = TestModel.objects.create()
cls.author = User.objects.create(username='author')
cls.request_factory = RequestFactory()
cls.commented_item_inline = CommentedItemInline(
TestModel,
admin.site
)
def test_has_change_permission(self):
comment = self.a_model.comments.create(comment='test comment',
author=self.author)
url = reverse('admin:test_app_testmodel_change',
args=(self.a_model.id,))
request = self.request_factory.get(url)
request.user = self.author
self.assertFalse(self.commented_item_inline.has_change_permission(
request, obj=self.a_model
))
class HasCommentsAdminTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.a_model = TestModel.objects.create()
cls.author = User.objects.create(username='author', is_staff=True)
permissions = set(Permission.objects.filter(
codename__contains='testmodel'
)) | set(Permission.objects.filter(
codename__contains='commenteditem'
))
cls.author.user_permissions.add(*permissions)
def test_save_formset(self):
url = reverse('admin:test_app_testmodel_change',
args=(self.a_model.id,))
self.client.force_login(user=self.author)
prefix = 'django_comment-commenteditem-content_type-object_id-'
response = self.client.post(url, follow=True, data={
prefix + 'TOTAL_FORMS': 1,
prefix + 'INITIAL_FORMS': 0,
prefix + '0-comment': 'test comment',
'_continue': 'Save+and+continue+editing',
})
self.assertEqual(response.status_code, 200)
comment = self.a_model.comments.first()
self.assertEqual(comment.author, self.author)
| genosltd/django-comment | tests/test_admin.py | test_admin.py | py | 4,334 | python | en | code | 0 | github-code | 36 |
72425931625 | from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from bs4 import BeautifulSoup
import pandas as pd
from selenium.webdriver.common.by import By
import re
from webdriver_manager.chrome import ChromeDriverManager
import json
import time
import os
products_categories = [
't-shirts-tank-tops',
'pants',
'hoodies-sweatshirts',
'shirts',
'suits-blazers',
'cardigans-sweaters',
'jeans',
'jackets-coats',
'shorts',
'swimwear',
'sportswear',
'underwear',
'socks',
'accessories',
'shoes',
'sleepwear-loungewear',
'premium-selection',
'cardigans-sweaters',
'jackets-coats',
'knitwear'
]
parent_link = 'https://www2.hm.com/en_us'
additional_link = '?sort=stock&image-size=small&image=model&offset=0&page-size={}'
gender_spec_url = 'men/products'
# get the category from the keys
options = webdriver.ChromeOptions()
options.add_argument('--ignore-certificate-errors')
# options.add_argument('--incognito')
# options.add_argument('--headless')
driver = webdriver.Chrome(options=options)
products_links = {}
for category in categories:
products_links[category] = []
print(category)
# create url for this category
if category in products_categories:
cat_url = os.path.join(parent_link, gender_spec_url, category+'.html')
print(cat_url)
# open this url and get the count of number of items for this product
driver.get(cat_url)
time.sleep(0.2)
# now get the total count of products present in this page
product_count_element = driver.find_element(By.CLASS_NAME, "filter-pagination")
product_count_element_text = product_count_element.text
product_count_str = product_count_element_text.split(' ')[0]
if product_count_str=='':
continue
total_count = int(product_count_str)
print(total_count)
all_products_url = cat_url+additional_link.format(total_count)
driver.get(all_products_url)
element_by_class = driver.find_element(By.CLASS_NAME, "products-listing")
products_elements = element_by_class.find_elements(By.CLASS_NAME, "product-item")
for pe in products_elements:
single_product = driver.find_element(By.CLASS_NAME, "item-link.remove-loading-spinner")
href = single_product.get_attribute("href")
title = single_product.get_attribute('title')
products_links[category].append([title,href])
f = open('product_links_men.json','w')
json.dump(products_links, f)
| umairahmad89/h-m-scraper | scraper.py | scraper.py | py | 2,718 | python | en | code | 0 | github-code | 36 |
10836961705 | import pandas as pd
from flask import Flask, jsonify, request,json
import pickle
model = pickle.load(open('model.pkl','rb'))
app = Flask(__name__)
@app.route('/', methods=['POST'])
def predict():
# get data
body_dict = json.loads(request.get_data().decode('utf-8'))
data = body_dict['0']
# predictions
prediction=[]
for v in data.values():
p=model.predict([v]).tolist()
#print(p)
prediction.append(p[0])
#prediction = model.predict([data['0']]).tolist()
#print(prediction)
result = {'prediction': prediction}
# return data
return jsonify(prediction)
if __name__ == '__main__':
app.run(port = 5000, debug=True) | liJiansheng/Catchup | LR Model API/app.py | app.py | py | 695 | python | en | code | 0 | github-code | 36 |
18113301417 | import pygame
#зарускаем программу
pygame.init()
#add colors
black=( 0, 0, 0)
white=( 255, 255, 255)
green=( 0, 255, 0)
red=( 255, 0, 0)
size = [700,700]
screen=pygame.display.set_mode(size)
pygame.display.set_caption("Professor Craven's Cool Game")
done = True
clock=pygame.time.Clock()
screen.fill(white)
pygame.display.flip()
while done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done=False
clock.tick(20)
pygame.quit()
| AndreiTsukov/PythonFiles | Classwork/pygame/lesson1/snegovik.py | snegovik.py | py | 532 | python | en | code | 0 | github-code | 36 |
496475437 | from dagster_pandas import DataFrame
from google.cloud.bigquery.job import LoadJobConfig, QueryJobConfig
from google.cloud.bigquery.table import EncryptionConfiguration, TimePartitioning
from dagster import InputDefinition, List, Nothing, OutputDefinition, Path, check, solid
from .configs import (
define_bigquery_create_dataset_config,
define_bigquery_delete_dataset_config,
define_bigquery_load_config,
define_bigquery_query_config,
)
from .types import BigQueryLoadSource
_START = 'start'
def _preprocess_config(cfg):
destination_encryption_configuration = cfg.get('destination_encryption_configuration')
time_partitioning = cfg.get('time_partitioning')
if destination_encryption_configuration is not None:
cfg['destination_encryption_configuration'] = EncryptionConfiguration(
kms_key_name=destination_encryption_configuration
)
if time_partitioning is not None:
cfg['time_partitioning'] = TimePartitioning(**time_partitioning)
return cfg
def bq_solid_for_queries(sql_queries):
"""
Executes BigQuery SQL queries.
Expects a BQ client to be provisioned in resources as context.resources.bigquery.
"""
sql_queries = check.list_param(sql_queries, 'sql queries', of_type=str)
@solid(
input_defs=[InputDefinition(_START, Nothing)],
output_defs=[OutputDefinition(List[DataFrame])],
config=define_bigquery_query_config(),
required_resource_keys={'bigquery'},
metadata={'kind': 'sql', 'sql': '\n'.join(sql_queries)},
)
def bq_solid(context): # pylint: disable=unused-argument
query_job_config = _preprocess_config(context.solid_config.get('query_job_config', {}))
# Retrieve results as pandas DataFrames
results = []
for sql_query in sql_queries:
# We need to construct a new QueryJobConfig for each query.
# See: https://bit.ly/2VjD6sl
cfg = QueryJobConfig(**query_job_config) if query_job_config else None
context.log.info(
'executing query %s with config: %s'
% (sql_query, cfg.to_api_repr() if cfg else '(no config provided)')
)
results.append(
context.resources.bigquery.query(sql_query, job_config=cfg).to_dataframe()
)
return results
return bq_solid
BIGQUERY_LOAD_CONFIG = define_bigquery_load_config()
@solid(
input_defs=[InputDefinition('paths', List[Path])],
output_defs=[OutputDefinition(Nothing)],
config=BIGQUERY_LOAD_CONFIG,
required_resource_keys={'bigquery'},
)
def import_gcs_paths_to_bq(context, paths):
return _execute_load_in_source(context, paths, BigQueryLoadSource.GCS)
@solid(
input_defs=[InputDefinition('df', DataFrame)],
output_defs=[OutputDefinition(Nothing)],
config=BIGQUERY_LOAD_CONFIG,
required_resource_keys={'bigquery'},
)
def import_df_to_bq(context, df):
return _execute_load_in_source(context, df, BigQueryLoadSource.DataFrame)
@solid(
input_defs=[InputDefinition('path', Path)],
output_defs=[OutputDefinition(Nothing)],
config=BIGQUERY_LOAD_CONFIG,
required_resource_keys={'bigquery'},
)
def import_file_to_bq(context, path):
return _execute_load_in_source(context, path, BigQueryLoadSource.File)
def _execute_load_in_source(context, source, source_name):
destination = context.solid_config.get('destination')
load_job_config = _preprocess_config(context.solid_config.get('load_job_config', {}))
cfg = LoadJobConfig(**load_job_config) if load_job_config else None
context.log.info(
'executing BQ load with config: %s for source %s'
% (cfg.to_api_repr() if cfg else '(no config provided)', source)
)
context.resources.bigquery.load_table_from_source(
source_name, source, destination, job_config=cfg
).result()
@solid(
input_defs=[InputDefinition(_START, Nothing)],
config=define_bigquery_create_dataset_config(),
required_resource_keys={'bigquery'},
)
def bq_create_dataset(context):
'''BigQuery Create Dataset.
This solid encapsulates creating a BigQuery dataset.
Expects a BQ client to be provisioned in resources as context.resources.bigquery.
'''
(dataset, exists_ok) = [context.solid_config.get(k) for k in ('dataset', 'exists_ok')]
context.log.info('executing BQ create_dataset for dataset %s' % (dataset))
context.resources.bigquery.create_dataset(dataset, exists_ok)
@solid(
input_defs=[InputDefinition(_START, Nothing)],
config=define_bigquery_delete_dataset_config(),
required_resource_keys={'bigquery'},
)
def bq_delete_dataset(context):
'''BigQuery Delete Dataset.
This solid encapsulates deleting a BigQuery dataset.
Expects a BQ client to be provisioned in resources as context.resources.bigquery.
'''
(dataset, delete_contents, not_found_ok) = [
context.solid_config.get(k) for k in ('dataset', 'delete_contents', 'not_found_ok')
]
context.log.info('executing BQ delete_dataset for dataset %s' % dataset)
context.resources.bigquery.delete_dataset(
dataset, delete_contents=delete_contents, not_found_ok=not_found_ok
)
| helloworld/continuous-dagster | deploy/dagster_modules/libraries/dagster-gcp/dagster_gcp/bigquery/solids.py | solids.py | py | 5,243 | python | en | code | 2 | github-code | 36 |
21120272187 | import sys
import pickle
import torch as T
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
sys.path.append("../") # nopep8
from model.dialog_acts import Encoder
from DataLoader.bucket_and_batch import bucket_and_batch
import numpy as np
import string
import random
device = T.device('cuda' if T.cuda.is_available() else 'cpu')
max_grad_norm = 1
with open("../data/processed_data.pkl", "rb") as fp:
data = pickle.load(fp)
labels2idx = data["labels2idx"]
idx2labels = {i: v for v, i in labels2idx.items()}
train_queries_vec = data["train_queries_vec"]
train_acts_vec = data["train_acts_vec"]
test_queries_vec = data["test_queries_vec"]
test_acts_vec = data["test_acts_vec"]
model = Encoder(D=test_queries_vec.shape[-1], classes_num=len(labels2idx))
model = model.cuda()
parameter_count = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Parameter Count: ", parameter_count)
optimizer = T.optim.Adam(model.parameters(), lr=1e-3)
def loss_fn(logits, labels, l2=1e-6):
regularization = T.tensor(0.).to(device) # .to(device)
for name, param in model.named_parameters():
if 'bias' not in name and 'embedding' not in name:
regularization += T.norm(param).pow(2)
loss = nn.MSELoss()
output = loss(logits, labels) + l2*regularization
return output
batches_train_queries, batches_train_classes = bucket_and_batch(
train_queries_vec, train_acts_vec, 64, len(labels2idx))
batches_test_queries, batches_test_classes = bucket_and_batch(
test_queries_vec, test_acts_vec, 64, len(labels2idx))
def predict(queries, classes, train=True):
global model
if train:
model = model.train()
else:
model = model.eval()
logits = model(T.tensor(queries).to(device))
loss = loss_fn(logits, T.tensor(classes).float().to(device))
_, sorted_idx = T.sort(logits, dim=-1, descending=True)
sorted_idx = sorted_idx[:, 0:2]
# print(sorted_idx.size())
sorted_idx = sorted_idx.cpu().numpy().tolist()
_, gold_sorted_idx = T.sort(T.tensor(classes).to(device), dim=-1, descending=True)
gold_sorted_idx = gold_sorted_idx[:, 0:2]
# print(gold_sorted_idx.size())
gold_sorted_idx = gold_sorted_idx.cpu().numpy().tolist()
score = 0
total = 0
for sorted_id, gold_sorted_id in zip(sorted_idx, gold_sorted_idx):
for id in sorted_id:
if id in gold_sorted_id:
score += 1
total += 1
return loss, (score/total)
best_val_accuracy = 0
for epoch in range(100):
i = 0
for batch_X, batch_Y in zip(batches_train_queries, batches_train_classes):
loss, accuracy = predict(batch_X, batch_Y, train=True)
loss.backward()
T.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)
optimizer.step()
optimizer.zero_grad()
if i % 100 == 0:
print("Step {}, Loss: {}, Accuracy: {}".format(i, loss, accuracy))
i += 1
print("\n\nStarting Validation\n\n")
total_val_accuracy = 0
i = 0
for batch_X, batch_Y in zip(batches_test_queries, batches_test_classes):
with T.no_grad():
loss, accuracy = predict(batch_X, batch_Y, train=False)
total_val_accuracy += accuracy
if i % 100 == 0:
print("Step {}, Loss: {}, Accuracy: {}".format(i, loss, accuracy))
i += 1
mean_accuracy = total_val_accuracy/len(batches_test_queries)
print("\n\nEpoch {}, Validation Result: Accuracy: {}\n".format(epoch, mean_accuracy))
if mean_accuracy > best_val_accuracy:
best_val_accuracy = mean_accuracy
T.save({
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict()
}, "../Model_Backup/model.pt")
print("\nCheckpoint Saved\n")
| JRC1995/Chatbot | Classifier/train_and_test/train.py | train.py | py | 3,877 | python | en | code | 79 | github-code | 36 |
23435816279 | import unittest
import arcpy
import os
import UnitTestUtilities
import Configuration
class RadialLineOfSightTestCase(unittest.TestCase):
''' Test all tools and methods related to the Radial Line Of Sight tool
in the Military Tools toolbox'''
inputTable = None
outputPoints = None
def setUp(self):
if Configuration.DEBUG == True: print(" RadialLineOfSightTestCase.setUp")
UnitTestUtilities.checkArcPy()
if(Configuration.militaryScratchGDB == None) or (not arcpy.Exists(Configuration.militaryScratchGDB)):
Configuration.militaryScratchGDB = UnitTestUtilities.createScratch(Configuration.militaryDataPath)
arcpy.env.scratchWorkspace = Configuration.militaryScratchGDB
self.observers = os.path.join(Configuration.militaryInputDataGDB, "RLOS_Observers")
self.inputSurface = os.path.join(Configuration.militaryInputDataGDB, "ElevationUTM_Zone10")
self.outputRLOS = os.path.join(Configuration.militaryScratchGDB, "outputRadialLineOfSight")
if arcpy.CheckExtension("Spatial") == "Available":
arcpy.CheckOutExtension("Spatial")
arcpy.AddMessage("Spatial checked out")
def tearDown(self):
if Configuration.DEBUG == True: print(" RadialLineOfSightTestCase.tearDown")
arcpy.CheckInExtension("Spatial");
UnitTestUtilities.deleteScratch(Configuration.militaryScratchGDB)
def test_radial_line_of_sight_desktop(self):
''' Test Radial Line Of Sight in ArcGIS Desktop'''
try:
runToolMessage = ".....RadialLineOfSightTestCase.test_Radial_line_of_sight_desktop"
arcpy.ImportToolbox(Configuration.military_DesktopToolboxPath, "mt")
arcpy.AddMessage(runToolMessage)
Configuration.Logger.info(runToolMessage)
arcpy.RadialLineOfSight_mt(self.observers, self.inputSurface, self.outputRLOS)
self.assertTrue(arcpy.Exists(self.outputRLOS), "Output dataset does not exist or was not created")
featureCount = int(arcpy.GetCount_management(self.outputRLOS).getOutput(0))
expectedFeatures = int(3501)
self.assertEqual(featureCount, expectedFeatures, "Expected %s features, but got %s" % (str(expectedFeatures), str(featureCount)))
except arcpy.ExecuteError:
self.fail(runToolMessage + "\n" + arcpy.GetMessages())
UnitTestUtilities.handleArcPyError()
def test_radial_line_of_sight_pro(self):
''' Test Radial Line Of Sight in ArcGIS Pro'''
try:
runToolMessage = ".....RadialLineOfSightTestCase.test_Radial_line_of_sight_pro"
arcpy.ImportToolbox(Configuration.military_ProToolboxPath, "mt")
arcpy.AddMessage(runToolMessage)
Configuration.Logger.info(runToolMessage)
arcpy.RadialLineOfSight_mt(self.observers, self.inputSurface, self.outputRLOS)
self.assertTrue(arcpy.Exists(self.outputRLOS), "Output dataset does not exist or was not created")
featureCount = int(arcpy.GetCount_management(self.outputRLOS).getOutput(0))
expectedFeatures = int(3501)
self.assertEqual(featureCount, expectedFeatures, "Expected %s features, but got %s" % (str(expectedFeatures), str(featureCount)))
except arcpy.ExecuteError:
self.fail(runToolMessage + "\n" + arcpy.GetMessages())
UnitTestUtilities.handleArcPyError()
| tomwuvip/military-tools-geoprocessing-toolbox | utils/test/visibility_tests/RadialLineOfSightTestCase.py | RadialLineOfSightTestCase.py | py | 3,458 | python | en | code | null | github-code | 36 |
477381730 | import re
class DbStructure:
""" Mock class used when working with classes which read the database structure/rows
This handles just a few simple queries: SHOW TABLES, SHOW CREATE TABLE,
and SELECT * FROM
"""
def __init__(self, tables, table_rows):
self.tables = tables
self.table_rows = table_rows
self.executed = False
def cursor(self, cursor_type=None):
""" Mock for the db `cursor` method. Returns the equivalent of the default cursor
cursor_type is ignored but included for consisitency with API standard
:returns: self
:rtype: self
"""
return self
def execute(self, query):
""" Checks the query to see what is being executed and loads up the appropriate result set
raises a ValueError if an unsupported query is found
:returns: None
"""
# You could imagine each query being handled by a separate object, but
# there are only three queries we are supporting, and that is unlikely
# to change anytime soon, so I'm going to keep this all in the same class
normalized = re.sub(r'\s+', ' ', query).lower()
if normalized == 'show tables':
self._load_show_tables_result()
elif normalized[:17] == 'show create table':
self._load_show_create_result(normalized)
else:
self._load_select_all_result(normalized)
self.executed = True
self.iterator_index = -1
def _load_show_tables_result(self):
""" Loads up a result set internally to act as if a SHOW TABLES query was executed
"""
tables = [val for val in self.tables.keys()]
tables.sort()
# results needs to be a list with a dictionary of table names as values (the key is ignored)
self.results = [{table: table for table in tables}]
def _load_show_create_result(self, query):
""" Loads up a result set internally to act as if a SHOW CREATE TABLE query was executed
:param query: The query being executed
:type query: string
"""
table_name = query.split()[-1]
if table_name not in self.tables:
raise ValueError(
"Cannot mock cursor.execute for query %s because table %s was not set" % (query, table_name)
)
self.results = [{'Create Table': self.tables[table_name]}]
def _load_select_all_result(self, query):
""" Loads up a result set internally to act as if a SELECT * FROM query was executed
:param query: The query executed
:type query: string
"""
m = re.match('select \* from (\S+)', query)
if not m:
raise ValueError(
"Cannot mock cursor.execute for query %s because I was expecting a select query but cannot find the table name"
% query
)
table_name = m.groups()[0].strip('`')
if not table_name in self.table_rows:
raise ValueError(
"Cannot mock cursor.execute for query %s because table %s was not set to have any rows" %
(query, table_name)
)
self.results = self.table_rows[table_name]
def __iter__(self):
""" Initializes iteration through the result set
"""
return self
def __next__(self):
""" Returns the next result set from the iterator
:returns: Tuple from result set
:rtype: Tuple
"""
return self.fetchone()
def fetchone(self):
"""Returns the next result from the result set """
if not self.executed:
raise ValueError("Cannot fetch query results before executing")
self.iterator_index += 1
if self.iterator_index >= len(self.results):
raise StopIteration
return self.results[self.iterator_index]
def fetchall(self):
""" Returns the result set """
return self.results
def rowcount(self):
""" Returns the number of results in the result set
:returns: The number of records in the result set
:rtype: integer
"""
return len(self.results)
def close(self):
self.executed = False
| cmancone/mygrations | mygrations/tests/mocks/db/mysql/db_structure.py | db_structure.py | py | 4,255 | python | en | code | 10 | github-code | 36 |
36725320029 | # -*- coding: utf-8 -*-
from preprocess import Channel
from workflow.cf_workflow import run as user_cf
from workflow.if_workflow import run as user_if
from workflow.rsif_workflow import run as user_rsif
from workflow.lfm_workflow import run as lfm
from workflow.prank_workflow import run as prank
from flask import Flask, jsonify, abort, make_response, request
from workflow.turi_workflow import runByUser as tcUser
from workflow.turi_workflow import runByItems as tcItems
from workflow.turi_workflow import runPopular as tcPopular
from workflow.turi_workflow import runSaveUserData as tcSaveUserData
from workflow.turi_workflow import runGetUserData as tcGetUserData
app = Flask(__name__)
@app.route('/recommend/<method_name>', methods=['GET', 'POST'])
def methods(method_name):
if method_name == 'preprocess':
Channel().process()
elif method_name == 'cf':
return cfMed()
elif method_name == 'rsif':
return rsifMed()
elif method_name == 'if':
return ifMed()
elif method_name == 'lfm':
return lfmMed()
elif method_name == 'prank':
return prankMed()
elif method_name == 'tcUser':
return tcUserMed()
elif method_name == 'tcItems':
return tcItemsMed()
elif method_name == 'tcPopular':
return tcPopularMed()
elif method_name == 'setData':
return tcSetData()
elif method_name == 'getData':
return tcGetData()
else:
abort(404)
def cfMed():
userId = request.args.get('userId', default=None, type=int)
if userId is None:
abort(404)
topN = request.args.get('topN', default=10, type=int)
return jsonify(user_cf(user_id=userId, topItems=topN))
def ifMed():
userId = request.args.get('userId', default=None, type=int)
if userId is None:
abort(404)
topN = request.args.get('topN', default=10, type=int)
return jsonify(user_if(user_id=userId, topItems=topN))
def rsifMed():
userId = request.args.get('userId', default=None, type=int)
if userId is None:
abort(404)
topN = request.args.get('topN', default=10, type=int)
return jsonify(user_rsif(user_id=userId, topItems=topN))
def lfmMed():
userId = request.args.get('userId', default=None, type=int)
if userId is None:
abort(404)
topN = request.args.get('topN', default=10, type=int)
return jsonify(lfm(userId=userId, topItems=topN))
def prankMed():
userId = request.args.get('userId', default=None, type=int)
if userId is None:
abort(404)
topN = request.args.get('topN', default=10, type=int)
return jsonify(prank(userId=userId, topItems=topN))
def tcUserMed():
userId = request.args.get('userId', default=None, type=int)
if userId is None:
abort(404)
topN = request.args.get('topN', default=10, type=int)
return tcUser(userId=userId, topItems=topN)
def tcItemsMed():
itemId = request.args.get('itemId', default=None, type=int)
if itemId is None:
abort(404)
topN = request.args.get('topN', default=10, type=int)
return tcItems(itemId=itemId, topItems=topN)
def tcPopularMed():
userId = request.args.get('userId', default=None, type=int)
topN = request.args.get('topN', default=10, type=int)
return tcPopular(userId=userId, topItems=topN)
def tcSetData():
contentType = request.headers['Content-Type']
if contentType == 'application/json':
jsonStr = request.json
infoArray = jsonStr['info']
for info in infoArray:
#key = userId, itemId, rating
tcSaveUserData(info)
return jsonify(infoArray)
else:
abort(415)
def tcGetData():
userId = request.args.get('userId', default=None, type=int)
return tcGetUserData(userId)
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
@app.errorhandler(415)
def errorType_415(error):
return make_response(jsonify({'error': 'Unsupported Content Type'}), 415)
if __name__ == '__main__':
#app.run(host='192.168.1.241', debug=True)
app.run(host='127.0.0.1', debug=True)
| ang0410/recommend | manage.py | manage.py | py | 4,408 | python | en | code | 0 | github-code | 36 |
32844059000 | import xlrd
import product
def excel_reader(file_name):
# open excel sheet
loc = "C:/Users/andym/PycharmProjects/FacebookScraper/" + file_name
read_list = []
temp_list = []
wb = xlrd.open_workbook(loc)
sheet = wb.sheet_by_index(0)
sheet.cell_value(0, 0)
rows_total = sheet.nrows
col_total = sheet.ncols
# create a list of all of the cells in the sheet
for i in range(rows_total):
for r in range(col_total):
temp_list.append(sheet.cell_value(i, r))
# create a list of products, from temp_list
for i in range(rows_total):
temp_product = product.Product()
for r in range(col_total):
if r == 0:
temp_product.date = sheet.cell_value(i, r)
elif r == 1:
temp_product.desc = sheet.cell_value(i, r)
elif r == 2:
temp_product.price = sheet.cell_value(i, r)
elif r == 3:
temp_product.link = sheet.cell_value(i, r)
else:
print("Possible overflow detected in excelRead?")
read_list.append(temp_product)
return read_list
| andymangibbs/CraigslistScraper | excelRead.py | excelRead.py | py | 1,154 | python | en | code | 0 | github-code | 36 |
37228662161 | from .optimizer import optimizer
import numpy as np
from copy import copy
class DaviesSwannCampey(optimizer):
def __init__(self, func,
x_0 = None,
initial_increment = None,
scaling_constant = 0.1,
interval = [-100, 100],
maxIter = 1e3,
xtol = 1e-6,
ftol = 1e-6):
super().__init__(func = func, maxIter = maxIter, xtol = xtol, ftol = ftol, interval=interval)
self.x = [np.random.uniform(low = self.interval[0], high = self.interval[1])] if x_0 == None else [x_0]
self.increment = 0.1*np.abs(self.x[0]) if initial_increment == None else initial_increment
assert (scaling_constant > 0) and (scaling_constant < 1), "Scaling constanst must be in the range 0 to 1."
self.scaling_constant = scaling_constant
def find_min(self):
for _ in range(1, self.maxIter):
step7 = False
# step 2
self.x = [self.x[0]]
self.x_left = self.x[0] - self.increment
self.x += [self.x[0] + self.increment]
self.f = [self.objectiveFunction(self.x[0])]
self.f += [self.objectiveFunction(self.x[1])]
self.n = len(self.f) - 1
# step 3
if self.f[0] > self.f[1]:
self.p = 1
# step 4
self._compute_new_f()
else: # f[0] <= f[1]
self.f_left = self.objectiveFunction(self.x_left)
if self.f_left < self.f[0]:
self.p = -1
# step 4
self._compute_new_f()
else: # f[-1] >= f[0] <= f[1]
# step 7
aux = self._compute_new_x0_based_on_x0()
if aux == False:
break
self.x[0] = aux
if self.increment <= self.xtol:
break
else:
self.increment = self.scaling_constant*self.increment
step7 = True
if step7 == False:
# step 5
self.x_m = self.x[self.n - 1] + 2**(self.n - 2)*self.p*self.increment
self.f_m = self.objectiveFunction(self.x_m)
# step 6
if self.f_m >= self.f[self.n-1]:
aux = self._compute_new_x0_based_on_xn()
if aux == False:
break
self.x[0] = aux
else: # f_m < f[n-1]
aux = self._compute_new_x0_based_on_xm()
if aux == False:
break
self.x[0] = aux
if 2**(self.n - 2)*self.increment <= self.xtol:
break
else:
self.increment = self.scaling_constant*self.increment
# step 8
return self.x[0]
# step 4
def _compute_new_f(self):
while True:
self.x += [self.x[self.n] + 2**(self.n)*self.p*self.increment]
self.f += [self.objectiveFunction(self.x[self.n + 1])]
self.n = self.n + 1
if self.f[self.n] > self.f[self.n-1]:
break
# step 7
def _compute_new_x0_based_on_x0(self):
numerator = self.increment*(self.f_left - self.f[1])
denominator = 2*(self.f_left - 2*self.f[0] + self.f[1])
if np.isclose(denominator, 0, atol=self.xtol):
return False
else:
return self.x[0] + numerator/denominator
# step 6
def _compute_new_x0_based_on_xn(self):
numerator = 2**(self.n-2)*self.p*self.increment*(self.f[self.n-2] - self.f_m)
denominator = 2*(self.f[self.n-2] - 2*self.f[self.n-1] + self.f_m)
if np.isclose(denominator, 0, atol=self.xtol):
return False
else:
return self.x[self.n-1] + numerator/denominator
# step 6
def _compute_new_x0_based_on_xm(self):
numerator = 2**(self.n-2)*self.p*self.increment*(self.f[self.n-1] - self.f[self.n])
denominator = 2*(self.f[self.n-1] - 2*self.f_m + self.f[self.n])
if np.isclose(denominator, 0, atol=self.xtol):
return False
else:
return self.x_m + numerator/denominator | crucis/ConvexOptimization | models/optimizers/DaviesSwannCampey.py | DaviesSwannCampey.py | py | 4,457 | python | en | code | 3 | github-code | 36 |
40961470159 | # coding: utf-8
import datetime
from simpleai.search import astar, SearchProblem
from simpleai.search.viewers import BaseViewer
class RobotProblem(SearchProblem):
def __init__(self, pallets_a_entregar):
'''
En el estado necesitamos llevar la posición de los pallets, la del
robot, si tenemos un pallet cargado cual es y la lista de pallets por
llevar. El estado entonces lo vamos a representar con una tupla con
estos elementos, las posiciones serán tuplas de coordenadas y para los
pallets una tupla de posiciones para cada pallet. Si el pallet deja de
estar en el tablero la posición sera None.
Las coordenadas arrancan en (0, 0). Por ejemplo, la posicion de entrega
es (2, 4)
'''
self.posicion_entrega = (2, 4)
pallets = ((0, 2), (1, 0), (3, 0), (2, 0), (0, 2),
(4, 0), (4, 1), (2, 2), (0, 4), (1, 1))
robot = (1, 4)
cargado = None
inicial = (pallets, robot, cargado, tuple([p-1 for p in pallets_a_entregar]))
super(RobotProblem, self).__init__(inicial)
def is_goal(self, state):
'Nuestra meta es que todos los pallets hayan sido entregados'
return len(state[3]) == 0
def actions(self, state):
'''
Las acciones posibles son moverse hacia los 4 lados, dejar y agarrar.
Para poder moverse no debemos salir del tablero o entrar en la casilla
de un pallet que no vamos a tomar.
Para agarrar debemos estar en la misma posicion que el pallet. Si
estamos en la misma posición que un pallet, entonces estamos obligados
a tomarlo.
Para dejar un pallet tenemos que estar en la posición de entrega con un
pallet cargado.
'''
acciones = []
pallets, robot, cargado, pendientes = state
x, y = robot
pallet_en_posicion = self.buscar_pallet_en_coordenadas(x, y, pallets)
if pallet_en_posicion is not None:
acciones.append(('Agarrar', None, None))
else:
acciones.extend(self.calcular_movimientos(state))
if cargado is not None and robot == self.posicion_entrega:
acciones.append(('Dejar', None, None))
return acciones
def calcular_movimientos(self, state):
posibles_movimientos = [
('Arriba', -1, 0),
('Abajo', 1, 0),
('Izquierda', 0, -1),
('Derecha', 0, 1),
]
movimientos = []
pallets, robot, cargado, pendientes = state
cx, cy = robot
for accion, dx, dy in posibles_movimientos:
nx, ny = cx + dx, cy + dy
if 0 <= nx <= 4 and 0 <= ny <= 4:
p = self.buscar_pallet_en_coordenadas(nx, ny, pallets)
if p is None or (p in pendientes and cargado is None):
movimientos.append((accion, dx, dy))
return movimientos
def buscar_pallet_en_coordenadas(self, x, y, pallets):
for pallet, posicion in enumerate(pallets):
if (x, y) == posicion:
return pallet
return None
def result(self, state, action):
pallets, robot, cargado, pendientes = state
x, y = robot
accion, dx, dy = action
if accion == 'Dejar':
pendientes = tuple([w for w in pendientes if w != cargado])
cargado = None
elif accion == 'Agarrar':
cargado = self.buscar_pallet_en_coordenadas(x, y, pallets)
pallet_list = list(pallets)
pallet_list[cargado] = None
pallets = tuple(pallet_list)
else:
robot = (x + dx, y + dy)
return (pallets, robot, cargado, pendientes)
def cost(self, state1, action, state2):
'El costo de la acción es siempre 1'
return 1
def heuristic(self, state):
'''
Una posible heuristica es la suma de las distancias de Manhattan de
cada uno de los pallets a quitar
'''
pallets, robot, cargado, pendientes = state
posiciones_pendientes = [pallets[x] for x in pendientes if x != cargado]
if cargado is not None:
posiciones_pendientes.append(robot)
return sum([manhattan(x, self.posicion_entrega)
for x in posiciones_pendientes])
def state_representation(self, state):
pallets, robot, cargado, pendientes = state
template = [[' ']*5 for x in range(5)]
for pallet, pos in enumerate(pallets):
if pos is not None:
fila, columna = pos
template[fila][columna] = str(pallet+1)
x, y = self.posicion_entrega
template[x][y] = 'E'
r = 'R'
if cargado:
r = 'R' + str(cargado+1)
x, y = robot
template[x][y] = r
return '\n'.join([' | '.join(fila) for fila in template])
def manhattan(pos1, pos2):
x1, y1 = pos1
x2, y2 = pos2
return abs(x2 - x1) + abs(y2 - y1)
def main():
problema = RobotProblem([8, 3, 9])
visor = BaseViewer()
inicio = datetime.datetime.now()
resultado = astar(problema, graph_search=True, viewer=visor)
tiempo = (datetime.datetime.now() - inicio).total_seconds()
for i, (accion, estado) in enumerate(resultado.path()):
print('Acción N: {} {} ## Estado: {}'.format(i, accion, estado))
print("Costo: {}".format(resultado.cost))
print("Nodos explorados: {}".format(visor.stats['visited_nodes']))
print("Tamaño máximo frontera: {}".format(visor.stats['max_fringe_size']))
print("Tiempo transcurrido: {} segundos".format(tiempo))
if __name__ == '__main__':
main()
| ucse-ia/ucse_ia | practicas/robot_pallets.py | robot_pallets.py | py | 5,721 | python | es | code | 5 | github-code | 36 |
70807051303 | testCase = int(input())
for i in range(1, testCase+1):
money = int(input())
first = money//50000
money %= 50000
second = money//10000
money %= 10000
third = money//5000
money %= 5000
fourth = money//1000
money %= 1000
fifth = money//500
money %= 500
sixth = money//100
money %= 100
seventh = money//50
money %= 50
eighth = money//10
money %= 10
print(f'#{i}\n{first} {second} {third} {fourth} {fifth} {sixth} {seventh} {eighth}') | unho-lee/TIL | CodeTest/Python/SWEA/D2_1970.py | D2_1970.py | py | 515 | python | en | code | 0 | github-code | 36 |
23129694122 | import speech_recognition as sr
from state import State
from ClientThread import*
import threading
class VoiceRecognizer:
State.event = 'create'
def __init__(self):
self.client = ClientThread()
self.r = sr.Recognizer()
self.speech = ''
self.recognitionResult = ''
self.dictionary = ["draw","click","clear","delete","delete all","right","left","up","middle","down","red","white","green","pink","create","create here","create this here",
"create that here","create that shape","create shape here","create this shape",
"create that shape here","create the shape here","in the right","in the left","in the middle"]
def recognize_voice(self):
with sr.Microphone() as source:
self.r.adjust_for_ambient_noise(source)
print("\n")
print("Microphone activated...")
print("Recognizing what's been said...")
audio = self.r.listen(source,phrase_time_limit=3)
try:
self.recognitionResult = self.r.recognize_google(audio)
print('You said : {}'.format(self.recognitionResult))
print("\n")
except:
print("please say it again !")
def sendData(self):
while(True):
if(self.recognitionResult in self.dictionary):
self.client.send(self.recognitionResult)
self.recognitionResult = ''
def startVoiceReco(self):
new_thread = threading.Thread(target=self.sendData)
new_thread.start()
while(True):
self.recognize_voice()
| Moufdi96/Projet_IHM_Multimodal | speecheRecognizer.py | speecheRecognizer.py | py | 1,682 | python | en | code | 0 | github-code | 36 |
22543357667 | import jwt
import json
import logging
import time
from jwt import ExpiredSignatureError
logger = logging.getLogger("handler_logger")
logger.setLevel(logging.DEBUG)
def jwt_encode(obj):
try:
return jwt.encode(obj,
'#0wc-0-#@#14e8rbk#bke_9rg@nglfdc3&6z_r6nx!q6&3##l=',
algorithm='HS256').decode('utf-8')
except ValueError:
logger.debug("Failed: Unable to generate JWT")
return ""
def jwt_decode(token):
try:
return jwt.decode(token,
"#0wc-0-#@#14e8rbk#bke_9rg@nglfdc3&6z_r6nx!q6&3##l=",
algorithms="HS256")
except ExpiredSignatureError as e:
return {"exp": int(time.time()-1)}
except ValueError:
logger.debug("Failed: Unable to decode JWT token")
return ""
| gaurav3g/chat-sls-server | backend/utils/jwt_utils.py | jwt_utils.py | py | 842 | python | en | code | 0 | github-code | 36 |
31552686748 | import sys
import numpy as np
def transform_mat(h,V,T,N,C):
from transformVMat import transformV
print("\nTransforming h,v,T,N,C into converged basis...\n")
return C.T * h * C, \
transformV(V,C), \
C.T * T * C, \
C.T * N * C
def two_idx_mat_add_spin(Mat):
dim = Mat.shape[0]
return np.matrix( np.vstack(( np.hstack(( Mat.copy(), np.zeros((dim,dim)) )) , np.hstack(( np.zeros((dim,dim)), Mat.copy() )) )) )
def four_idx_mat_add_spin(Mat):
# for Electronic Repulsion Matrix
from buildSpin import spin_rep_mat
return spin_rep_mat(Mat)
def add_spin(S, h, V, T, N, C):
print("\nAdding alpha, beta spins into matrices C, S, h, V, T, N.")
return two_idx_mat_add_spin(S), \
two_idx_mat_add_spin(h), \
four_idx_mat_add_spin(V), \
two_idx_mat_add_spin(T), \
two_idx_mat_add_spin(N), \
two_idx_mat_add_spin(C)
def save_ON_spin_mat(prefix, s,h,v,T,N,C):
print("\nSaving C, S, h, V, T and N Info in SPIN ORBITAL BASIS for {}".format(prefix))
np.save(prefix + "_S_spin.npy", s)
np.save(prefix + "_h_spin.npy", h)
np.save(prefix + "_V_spin.npy", v)
np.save(prefix + "_T_spin.npy", T)
np.save(prefix + "_N_spin.npy", N)
np.save(prefix + "_C_HF_spin.npy", C)
print("Info Saved...\n")
def transform_main(prefix, S_mat, h_mat, V_mat, T_mat, N_mat, C_mat):
h_on, V_on, T_on, N_on = transform_mat( h_mat, V_mat, T_mat, N_mat, C_mat )
S_sp, h_sp, V_sp, T_sp, N_sp, C_sp = add_spin( S_mat, h_on , V_on , T_on , N_on , C_mat )
save_ON_spin_mat(prefix, S_sp, h_sp, V_sp, T_sp, N_sp, C_sp)
if __name__ == "__main__":
prefix_in = sys.argv[1]
fields = prefix_in.split("_")
if fields[0]=="Be2": prefix_out = fields[0] +"_"+ fields[1] +"."+ "_".join(fields[2:])
else: prefix_out = prefix_in
transform_main( prefix_out, \
np.matrix(np.load(prefix_in + '_S.npy')), \
np.matrix(np.load(prefix_in + '_h.npy')), \
np.matrix(np.load(prefix_in + '_V.npy')), \
np.matrix(np.load(prefix_in + '_T.npy')), \
np.matrix(np.load(prefix_in + '_N.npy')), \
np.matrix(np.load(prefix_in + '_C_HF.npy'))
)
| sskhan67/GPGPU-Programming- | QODE/Applications/Be_n/dimer_H/run_template/pyquante_scf/pyquante_to_mine.py | pyquante_to_mine.py | py | 2,255 | python | en | code | 0 | github-code | 36 |
37349168777 | from ase.units import Ha
import numpy as np
from my_gpaw.xc.fxc import KernelWave, XCFlags, FXCCache
from my_gpaw.xc.rpa import GCut
from my_gpaw.response.pair_functions import SingleQPWDescriptor
from my_gpaw.pw.descriptor import PWMapping
class G0W0Kernel:
def __init__(self, xc, context, **kwargs):
self.xc = xc
self.context = context
self.xcflags = XCFlags(xc)
self._kwargs = kwargs
def calculate(self, qpd):
if self.xc == 'RPA':
return np.eye(qpd.ngmax)
return calculate_spinkernel(
qpd=qpd,
xcflags=self.xcflags,
context=self.context,
**self._kwargs)
def calculate_spinkernel(*, ecut, xcflags, gs, qd, ns, qpd, context):
assert xcflags.spin_kernel
xc = xcflags.xc
ibzq_qc = qd.ibzk_kc
iq = np.argmin(np.linalg.norm(ibzq_qc - qpd.q_c[np.newaxis], axis=1))
assert np.allclose(ibzq_qc[iq], qpd.q_c)
ecut_max = ecut * Ha # XXX very ugly this
cache = FXCCache(comm=context.comm,
tag=gs.atoms.get_chemical_formula(mode='hill'),
xc=xc, ecut=ecut_max)
handle = cache.handle(iq)
if not handle.exists():
# Somehow we calculated many q even though this function
# only works on one q? Very confusing.
kernel = KernelWave(
q_empty=iq, ibzq_qc=qd.ibzk_kc,
xc=xcflags.xc,
ecut=ecut_max, gs=gs,
context=context)
# The first time we miss the cache, we calculate /all/ iq.
# (Whether that's the best strategy can be discussed.)
for iq_calculated, array in kernel.calculate_fhxc():
cache.handle(iq_calculated).write(array)
fv = handle.read()
assert fv is not None
# If we want a reduced plane-wave description, create qpd mapping
if qpd.ecut < ecut:
# Recreate nonreduced plane-wave description corresponding to ecut_max
qpdnr = SingleQPWDescriptor.from_q(qpd.q_c, ecut, qpd.gd,
gammacentered=qpd.gammacentered)
pw_map = PWMapping(qpd, qpdnr)
gcut = GCut(pw_map.G2_G1)
fv = gcut.spin_cut(fv, ns=ns)
return fv
| f-fathurrahman/ffr-learns-gpaw | my_gpaw/response/g0w0_kernels.py | g0w0_kernels.py | py | 2,219 | python | en | code | 0 | github-code | 36 |
17392881824 | from sorter import Sorter
class QuickSort(Sorter):
name = "Quick Sort"
def __init__(self):
super(QuickSort, self).__init__()
def sort(self, L):
self._log(L)
return self.quick_sort(L, 0, len(L)-1)
def partition(self, L, lo, hi):
# choose pivot
pivot = L[hi]
# partition list by values < and > pivot
i = lo
for j in range(lo, hi): # don't do index hi, because that's where the pivot is
if L[j] < pivot:
L[i], L[j] = L[j], L[i]
self._log(L)
i += 1
L[i], L[hi] = L[hi], L[i]
self._log(L)
return i
def quick_sort(self, L, lo, hi):
if lo < hi:
p = self.partition(L, lo, hi)
self.quick_sort(L, lo, p-1)
self._log(L)
self.quick_sort(L, p+1, hi)
self._log(L)
return L
if __name__ == "__main__":
S = QuickSort()
S.test(n=50) | ekeilty17/Personal-Projects-In-Python | Sorting/quick_sort.py | quick_sort.py | py | 1,027 | python | en | code | 1 | github-code | 36 |
21499220627 | 'Implementation of the Insertion Sort Algorithm.'
print("***** Implementation of Insertion Sort Algorithm *****")
def InsertionSort(theSeq):
n = len(theSeq)
for i in range(1, n):
'saving the value to the position.'
value = theSeq[i]
'Finding the position where values fits in the ordered part of the list.'
pos = i
while pos > 0 and value < theSeq[pos-1]:
'During search of items, shift the items to the right side of list'
theSeq[pos] = theSeq[pos - 1]
pos -= 1
'Putting the saved value into the open slot.'
theSeq[pos] = value
if __name__ == "__main__":
theSeq = [3, 5, 8, 4, 1, 9, -1, 6, 0, 2, 7]
print("Before implementing insertion sort algorithm :", theSeq)
InsertionSort(theSeq)
print("After implementing insertion sort algorithm :", theSeq)
| amshrestha2020/ConsoleAppPython | InsertionSort.py | InsertionSort.py | py | 870 | python | en | code | 0 | github-code | 36 |
31061296375 |
from ..utils import Object
class GetBackgroundUrl(Object):
"""
Constructs a persistent HTTP URL for a background
Attributes:
ID (:obj:`str`): ``GetBackgroundUrl``
Args:
name (:obj:`str`):
Background name
type (:class:`telegram.api.types.BackgroundType`):
Background type
Returns:
HttpUrl
Raises:
:class:`telegram.Error`
"""
ID = "getBackgroundUrl"
def __init__(self, name, type, extra=None, **kwargs):
self.extra = extra
self.name = name # str
self.type = type # BackgroundType
@staticmethod
def read(q: dict, *args) -> "GetBackgroundUrl":
name = q.get('name')
type = Object.read(q.get('type'))
return GetBackgroundUrl(name, type)
| iTeam-co/pytglib | pytglib/api/functions/get_background_url.py | get_background_url.py | py | 801 | python | en | code | 20 | github-code | 36 |
26166080106 | import argparse
import os
import cv2
import matplotlib.pyplot as plt
import maxflow
import networkx as nx
import numpy as np
class GraphCuts:
def __init__(self, src, target, mask, save_graph=False):
"""
Initialize the graph and computes the min-cut.
:param src: image to be blended
:param target: background image
:param mask: manual mask with constrained pixels
:param save_graph: if true, graph is saved
"""
assert (src.shape == target.shape), \
f"Source and target dimensions must be same: {str(src.shape)} != {str(target.shape)}"
# Creating the graph and adding nodes
graph = maxflow.Graph[float]()
node_ids = graph.add_grid_nodes((src.shape[0], src.shape[1]))
self.compute_edge_weights(src, target) # self.edge_weights is inside func(compute_edge_weights)
# Adding non-terminal edges
patch_height = src.shape[0]
patch_width = src.shape[1]
for row_idx in range(patch_height):
for col_idx in range(patch_width):
# Horizontal edge
if col_idx + 1 < patch_width:
weight = self.edge_weights[row_idx, col_idx, 0]
graph.add_edge(node_ids[row_idx][col_idx],
node_ids[row_idx][col_idx + 1],
weight,
weight)
# Vertical edge
if row_idx + 1 < patch_height:
weight = self.edge_weights[row_idx, col_idx, 1]
graph.add_edge(node_ids[row_idx][col_idx],
node_ids[row_idx + 1][col_idx],
weight,
weight)
# Adding terminal edge capacities for the pixels constrained to belong to the source/sink.
# http://pmneila.github.io/PyMaxflow/maxflow.html
# 검토) add_tedge 대신 다른 api 쓸 순 없을까? np.inf 넣기 싫은데.
if np.array_equal(mask[row_idx, col_idx, :], [0, 255, 255]):
graph.add_tedge(node_ids[row_idx][col_idx], 0, np.inf)
elif np.array_equal(mask[row_idx, col_idx, :], [255, 128, 0]):
graph.add_tedge(node_ids[row_idx][col_idx], np.inf, 0)
# Plot graph
if save_graph:
nxg = graph.get_nx_graph()
self.plot_graph_2d(nxg, (patch_height, patch_width))
# 디버깅
# print('nxg {}'.format(nxg)) # nxg
# print('type of nxg {}'.format(type(nxg))) # type of nxg <class 'networkx.classes.digraph.DiGraph'>
# Computing maxflow / mincut
flow = graph.maxflow()
self.sgm = graph.get_grid_segments(node_ids)
def compute_edge_weights(self, src, target):
"""
Compute edge weights based on matching quality cost.
:param src: image to be blended (foreground)
:param target: background image
"""
self.edge_weights = np.zeros((src.shape[0], src.shape[1], 2))
# Create shifted versions of the matrics for vectorized operations.
src_left_shifted = np.roll(src, -1, axis=1)
target_left_shifted = np.roll(target, -1, axis=1)
src_up_shifted = np.roll(src, -1, axis=0)
target_up_shifted = np.roll(target, -1, axis=0)
eps = 1e-10 # Numerical stability
# Horizontal weights
horizontal_weight = np.sum(np.square(src - target, dtype=np.float) +
np.square(src_left_shifted - target_left_shifted, dtype=np.float),
axis=2)
horizontal_norm_factor = np.sum(np.square(src - src_left_shifted, dtype=np.float) +
np.square(target - target_left_shifted, dtype=np.float),
axis=2)
self.edge_weights[:, :, 0] = horizontal_weight / (horizontal_norm_factor + eps)
# Vertical weights
vertical_weight = np.sum(np.square(src - target, dtype=np.float) +
np.square(src_up_shifted - target_up_shifted, dtype=np.float),
axis=2)
vertical_norm_factor = np.sum(np.square(src - src_up_shifted, dtype=np.float) +
np.square(target - target_up_shifted, dtype=np.float),
axis=2)
self.edge_weights[:, :, 1] = vertical_weight / (vertical_norm_factor + eps)
def plot_graph_2d(self, graph, nodes_shape,
plot_weights=True,
plot_terminals=True,
font_size=7):
"""
Plot the graph to be used in graph cuts
:param graph: Maxflow graph
:param nodes_shape: patch shape
:param plot_weights: if true, edge weights are shown
:param plot_terminals: if true, the terminal nodes are shown
:param font_size: text font size
"""
X, Y = np.mgrid[:nodes_shape[0], :nodes_shape[1]]
aux = np.array([Y.ravel(), X[::-1].ravel()]).T
positions = {i: v for i, v in enumerate(aux)}
positions['s'] = (-1, nodes_shape[0] / 2.0 - 0.5)
positions['t'] = (nodes_shape[1], nodes_shape[0] / 2.0 - 0.5)
# nx.draw(graph, cmap=plt.get_cmap('jet')) maxflow로 안 가져오고 networkx에서 바로 그리기
plt.show()
nxgraph = graph.get_nx_graph()
print("nxgraph created")
if not plot_terminals:
nxgraph.remove_nodes_from(['s', 't'])
plt.clf()
nx.draw(nxgraph, pos=positions)
if plot_weights:
edge_labels = {}
for u, v, d in nxgraph.edges(data=True):
edge_labels[(u, v)] = d['weight']
nx.draw_networkx_edge_labels(nxgraph,
pos=positions,
edge_labels=edge_labels,
label_pos=0.3,
font_size=font_size)
plt.axis('equal')
plt.show()
def blend(self, src, target):
"""
Blends the target image with the source image based on the graph cut.
:param src: Source image
:param target: Target image
:return target : Blended image
"""
target[self.sgm] = src[self.sgm]
return target
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-i', dest='image_dir', required=True, help='Saved Path of Source & Target Images.')
args = parser.parse_args()
# Read the images and the mask.
image_dir = args.image_dir
src = cv2.imread(os.path.join(image_dir, 'src.jpg'))
target = cv2.imread(os.path.join(image_dir, 'target.jpg'))
mask = cv2.imread(os.path.join(image_dir, 'mask.png'))
# Compute the min-cut.
graphcuts = GraphCuts(src, target, mask)
# Save the output.
target = graphcuts.blend(src, target)
cv2.imwrite(os.path.join(image_dir, "result.png"), target)
| c1a1o1/graphcut-textures | src/graphcut_textures.py | graphcut_textures.py | py | 7,197 | python | en | code | 0 | github-code | 36 |
534546914 | import pygame as p
from Chess import ChessEngine, SmartMoveFinder, DataToLearn, VisualizData
import time
import xml.etree.ElementTree as gfg
import os.path
from Chess.DataTree import TreeNode
WIDTH = HEIGHT = 512
DIMENSION = 8
SQ_SIZE = HEIGHT // DIMENSION
MAX_FPS = 15
IMAGES = {}
WHITE_PIECE_CAPTUED = []
BLACK_PIECE_CAPTUED = []
BLACK_EVALUATION = 0.5 # Win percentage at start
test = p.Rect(200 + HEIGHT * (1 - BLACK_EVALUATION), HEIGHT + 20 + 60,
WIDTH * BLACK_EVALUATION, 50) # Show win percentage
# Here we load the image ones
def loadImages():
pieces = ['P', 'R', 'N', 'B', 'K', 'Q', 'p', 'r', 'n', 'b', 'k', 'q']
pics_name = ['wP', 'wR', 'wN', 'wB', 'wK', 'wQ', 'p', 'r', 'n', 'b', 'k', 'q']
for i in range(len(pieces)):
IMAGES[pieces[i]] = p.transform.scale(p.image.load("allData/images/" + pics_name[i] + ".png"),
(SQ_SIZE, SQ_SIZE))
def main():
global MOVE_TIME_START
global TREE
global CURRENT_NODE
p.init()
# loading the history
if os.stat('Tree_history_next.xml').st_size == 0:
start = "<Level-0><State><FEN>rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR</FEN><ImportedEvaluation>0</ImportedEvaluation><Evaluation>0</Evaluation><Wins>0</Wins><Plays>0</Plays><Move /></State></Level-0>"
fileer = open("Tree_history_next.xml", "wb")
fileer.write(start)
fileer.close()
MOVE_TIME_START = time.time()
tree = gfg.parse('Tree_history_next.xml').getroot()
TREE = TreeNode([tree.getchildren()[0].getchildren()[0].text, tree.getchildren()[0].getchildren()[1].text,
tree.getchildren()[0].getchildren()[2].text, tree.getchildren()[0].getchildren()[3].text,
tree.getchildren()[0].getchildren()[4].text], "")
if len(tree.getchildren()[0].getchildren()) > 6:
TREE.read_tree(tree.getchildren()[0].getchildren()[6])
print("Time to load: ", time.time() - MOVE_TIME_START)
CURRENT_NODE = TREE
VisualizData.visualize_data_print(CURRENT_NODE)
loadImages()
moveSound = p.mixer.Sound('allData/sound/moveSound.wav')
conquerePieceSound = p.mixer.Sound('allData/sound/conquerePieceSound.mp3')
screen = p.display.set_mode((WIDTH + 400, HEIGHT + 80 + 60))
p.display.set_caption("K(ing) AI")
clock = p.time.Clock()
screen.fill(p.Color("beige"))
gs = ChessEngine.GameState()
gs.moveLogLong.append(CURRENT_NODE)
first_move = True
validMoves = gs.getValidMoves()
moveMade = False # flag variable for a valid move
animate = False # flag whether eh should animate
running = True
sqSelected = () # keep track of the last click of the user tuple (row,col)
playerClicks = [] # keeps track of player clicks (two tuples: [(row,col),(row,col)]
gameOver = False
playerOne = False # If human is playing white = true
playerTwo = False # If human is playing black = true
while running:
humanTurn = (gs.whiteToMove and playerOne) or (not gs.whiteToMove and playerTwo)
for e in p.event.get():
if e.type == p.QUIT:
running = False
# Mouse handler
elif e.type == p.MOUSEBUTTONDOWN:
if not gameOver and humanTurn:
location = p.mouse.get_pos() # (x,y) of mouse
col = (location[0] - 200) // SQ_SIZE
row = (location[1] - 10 - 60) // SQ_SIZE
if location[0] > 200 and location[0] < WIDTH + 200 and location[1] > 10 + 60 and location[
1] < HEIGHT + 10 + 60:
if sqSelected == (row, col): # check whether the user clicked the square twice
sqSelected = () # deselect
playerClicks = [] # clear player clicks
else:
sqSelected = (row, col)
playerClicks.append(sqSelected)
if len(playerClicks) == 2:
move = ChessEngine.Move(playerClicks[0], playerClicks[1], gs.board)
for i in range(len(validMoves)):
if move == validMoves[i]:
if len(CURRENT_NODE.children) < 1:
timer = time.time()
SmartMoveFinder.addToTree(gs, CURRENT_NODE)
print("time to add: ", time.time() - timer)
gs.makeMove(validMoves[i])
CURRENT_NODE = changeCurrentNode(gs)
gs.moveLogLong.append(CURRENT_NODE)
moveMade = True
animate = True
first_move = False
print("Player-time: ", time.time() - MOVE_TIME_START)
MOVE_TIME_START = time.time()
if move.pieceCaptured != "--":
conquerePieceSound.play()
else:
moveSound.play()
# recalculate the win percentage
BLACK_EVALUATION = 0.5 - float(CURRENT_NODE.evaluation) / 1000
if BLACK_EVALUATION > 0.99:
BLACK_EVALUATION = 0.99
test.update(200 + HEIGHT * (1 - BLACK_EVALUATION), HEIGHT + 20 + 60,
WIDTH * BLACK_EVALUATION, 50)
sqSelected = ()
playerClicks = []
if not moveMade:
playerClicks = [sqSelected]
# key handers
elif e.type == p.KEYDOWN:
if e.key == p.K_z: # undo if "z" is pressed
gs.undoMove()
moveMade = True
animate = False
gameOver = False
if e.key == p.K_r: # reset the board when pressing "r"
gs = ChessEngine.GameState()
validMoves = gs.getValidMoves()
sqSelected = ()
playerClicks = []
moveMade = False
animate = False
gameOver = False
if e.key == p.K_1: # save data
MOVE_TIME_START = time.time()
TREE.save_tree("Tree_history_next.xml")
print("Time to save: ", time.time() - MOVE_TIME_START)
if e.key == p.K_2:
MOVE_TIME_START = time.time()
# updatePlayAndWins(gs) # save?
gs.checkMate = True
gameOver = True
print("Time to update the data: ", time.time() - MOVE_TIME_START)
if e.key == p.K_3:
gs.staleMate = True
gameOver = True
# white lost (from surrender)
if e.key == p.K_4:
if gs.whiteToMove:
gs.checkMate = True
gameOver = True
else:
gs.whiteToMove = True
gs.checkMate = True
gameOver = True
# black lost (from surrender)
if e.key == p.K_5:
if not gs.whiteToMove:
gs.checkMate = True
gameOver = True
else:
gs.whiteToMove = False
gs.checkMate = True
gameOver = True
# AI Movefinder
if not gameOver and not humanTurn:
if len(CURRENT_NODE.children) < 1:
timer = time.time()
SmartMoveFinder.addToTree(gs, CURRENT_NODE)
print("time to add: ", time.time() - timer)
timerrr = time.time()
AImove = SmartMoveFinder.findBestMoveMinMax(gs, validMoves, CURRENT_NODE)
print("find new Move: ", time.time() - timerrr)
if AImove is None:
AImove = SmartMoveFinder.findRandomMove(validMoves)
gs.makeMove(AImove)
moveMade = True
animate = True
CURRENT_NODE = changeCurrentNode(gs)
gs.moveLogLong.append(CURRENT_NODE)
print("AI: ", time.time() - MOVE_TIME_START)
MOVE_TIME_START = time.time()
if AImove.pieceCaptured != "--":
conquerePieceSound.play()
else:
moveSound.play()
# recalculate the win percentage
BLACK_EVALUATION = 0.5 - float(CURRENT_NODE.evaluation) / 1000
if BLACK_EVALUATION > 0.99:
BLACK_EVALUATION = 0.99
test.update(200 + HEIGHT * (1 - BLACK_EVALUATION), HEIGHT + 20 + 60,
WIDTH * BLACK_EVALUATION, 50)
if moveMade:
if animate:
animateMove(gs.moveLog[-1], screen, gs.board, clock)
validMoves = gs.getValidMoves()
moveMade = False
animate = False
drawGameState(screen, gs, validMoves, sqSelected)
if gs.checkMate:
gameOver = True
if gs.whiteToMove:
font1 = p.font.SysFont('Black wins by checkmate', 64)
img1 = font1.render('Black wins by checkmate', True, "dark red")
screen.blit(img1, (210, 280))
else:
font1 = p.font.SysFont('White wins by checkmate', 64)
img1 = font1.render('White wins by checkmate', True, "dark red")
screen.blit(img1, (210, 280))
# drawText(screen, 'White wins by checkmate')
updatePlayAndWins(gs)
MOVE_TIME_START = time.time()
TREE.save_tree("Tree_history_next.xml")
print("Time to save: ", time.time() - MOVE_TIME_START)
running = False
time.sleep(60)
elif gs.staleMate:
font1 = p.font.SysFont('Stalemate', 64)
img1 = font1.render('Stalemate', True, "dark red")
screen.blit(img1, (210, 280))
updatePlayAndWins(gs)
MOVE_TIME_START = time.time()
TREE.save_tree("Tree_history_next.xml")
print("Time to save: ", time.time() - MOVE_TIME_START)
running = False
time.sleep(60)
clock.tick(MAX_FPS)
p.display.flip()
# Higlicght the square selected
def highlightSqaures(screen, gs, validMoves, sqSelected):
if sqSelected != ():
r, c = sqSelected
if (gs.board[r][c].islower() and not gs.whiteToMove) or (not gs.board[r][c].islower() and gs.whiteToMove):
# if gs.board[r][c][0] == ('w' if gs.whiteToMove else 'b'): # sqSelected a piece that can be moved
# highlight selected squares
s = p.Surface((SQ_SIZE, SQ_SIZE))
s.set_alpha(100) # transparancy value
s.fill(p.Color('blue'))
screen.blit(s, ((c * SQ_SIZE) + 200, (r * SQ_SIZE) + 10 + 60))
# highlight moves from that square
s.fill(p.Color('yellow'))
for move in validMoves:
if move.startRow == r and move.startCol == c:
screen.blit(s, ((SQ_SIZE * move.endCol) + 200, (move.endRow * SQ_SIZE) + 10 + 60))
def drawGameState(screen, gs, validMoves, sqSelected):
drawBoard(screen) # draw squares on the board
highlightSqaures(screen, gs, validMoves, sqSelected)
highlightLastMove(gs, screen)
drawPieces(screen, gs.board) # draw pieces on the top of those squares
def drawBoard(screen):
global colors
p.draw.rect(screen, "black", p.Rect(194, 64, HEIGHT + 12, WIDTH + 12))
p.draw.rect(screen, "white", p.Rect(200 + WIDTH / 4, 10, WIDTH / 4, 40))
p.draw.rect(screen, "grey", p.Rect(200 + WIDTH / 2, 10, WIDTH / 4, 40))
p.draw.rect(screen, "white", p.Rect(200, HEIGHT + 20 + 60, WIDTH, 50))
p.draw.rect(screen, "grey", test)
colors = [p.Color("white"), p.Color("grey")]
for r in range(DIMENSION):
for c in range(DIMENSION):
color = colors[((r + c) % 2)]
p.draw.rect(screen, color, p.Rect(c * SQ_SIZE + 200, r * SQ_SIZE + 10 + 60, SQ_SIZE, SQ_SIZE))
def drawPieces(screen, board):
for r in range(DIMENSION):
for c in range(DIMENSION):
piece = board[r][c]
if piece != "--":
screen.blit(IMAGES[piece], p.Rect(c * SQ_SIZE + 200, r * SQ_SIZE + 10 + 60, SQ_SIZE, SQ_SIZE))
def animateMove(move, screen, board, clock):
global colors
dR = move.endRow - move.startRow
dC = move.endCol - move.startCol
framesPerSquare = 4 # frames to move one square
frameCount = (abs(dR) + abs(dC)) * framesPerSquare
for frame in range(frameCount + 1):
r, c = (move.startRow + dR * frame / frameCount, move.startCol + dC * frame / frameCount)
drawBoard(screen)
drawPieces(screen, board)
# erase the piece moved from its ending sqaure
color = colors[(move.endRow + move.endCol) % 2]
endSquare = p.Rect((move.endCol * SQ_SIZE) + 200, (move.endRow * SQ_SIZE) + 10 + 60, SQ_SIZE, SQ_SIZE)
p.draw.rect(screen, color, endSquare)
# draw captured piece onto rectangle
if move.pieceCaptured != '--':
screen.blit(IMAGES[move.pieceCaptured], endSquare)
# draw moving piece
screen.blit(IMAGES[move.pieceMoved], p.Rect((c * SQ_SIZE) + 200, (r * SQ_SIZE) + 10 + 60, SQ_SIZE, SQ_SIZE))
p.display.flip()
clock.tick(60)
if move.pieceCaptured.isupper():
screen.blit(IMAGES[move.pieceCaptured],
p.Rect(((len(WHITE_PIECE_CAPTUED) % 3)) * SQ_SIZE,
(len(WHITE_PIECE_CAPTUED) // 3) * SQ_SIZE + 10 + 60, SQ_SIZE, SQ_SIZE))
WHITE_PIECE_CAPTUED.append(move.pieceCaptured)
elif move.pieceCaptured.islower():
screen.blit(IMAGES[move.pieceCaptured],
p.Rect(((len(BLACK_PIECE_CAPTUED) % 3)) * SQ_SIZE + WIDTH + 210,
(len(BLACK_PIECE_CAPTUED) // 3) * SQ_SIZE + 10 + 60, SQ_SIZE, SQ_SIZE))
BLACK_PIECE_CAPTUED.append(move.pieceCaptured)
# highlight last move made
def highlightLastMove(gs, screen):
if len(gs.moveLog) > 0:
s = p.Surface((SQ_SIZE, SQ_SIZE))
s.set_alpha(100) # transparancy value
s.fill(p.Color('red'))
screen.blit(s, ((gs.moveLog[-1].startCol * SQ_SIZE) + 200, (gs.moveLog[-1].startRow * SQ_SIZE) + 10 + 60))
screen.blit(s, ((gs.moveLog[-1].endCol * SQ_SIZE) + 200, (gs.moveLog[-1].endRow * SQ_SIZE) + 10 + 60))
def drawText(screen, text):
font = p.font.SysFont("Helvitca", 32, True, False)
textObject = font.render(text, 0, p.Color('Gray'))
textLocation = p.Rect(0, 0, WIDTH, HEIGHT).move(WIDTH / 2 - textObject.get_width() / 2,
HEIGHT / 2 - textObject.get_height() / 2)
screen.blit(textObject, textLocation)
textObject = font.render(text, 0, p.Color('Black'))
screen.blit(textObject, textLocation.move(2, 2))
# change the current Node
def changeCurrentNode(gs):
fen_now = DataToLearn.createFEN(gs)
for states in CURRENT_NODE.children:
if states.fEN == fen_now:
return states
def updatePlayAndWins(gs):
print (len(gs.moveLogLong))
gs.checkMate = True
TREE.plays = 1 + int(TREE.plays)
if gs.checkMate and not gs.whiteToMove:
TREE.wins = int(TREE.wins) + 1
helperUpdatePlayAndWins(gs, TREE, level=1)
def helperUpdatePlayAndWins(gs, current_node, level):
print(level)
for state in current_node.children:
if state == gs.moveLogLong[level]:
state.plays = float(state.plays) + 1
current_node = state
if gs.checkMate and not gs.whiteToMove:
state.wins = float(state.wins) + 1
elif gs.staleMate:
state.wins = float(state.wins) + 0.5
if len(gs.moveLogLong) - 1 > level:
helperUpdatePlayAndWins(gs, current_node, level + 1)
if __name__ == "__main__":
main()
# todo: Add more evaluations (pieces covered)
# todo: try to get evaluation from the imported data
# todo: Add Player Specific Bot
# todo: make heurustic: which is the first move to watch (makes it faster)
| KaiBaeuerle/chessAI | Chess/ChessMain.py | ChessMain.py | py | 16,775 | python | en | code | 0 | github-code | 36 |
11439201598 | from itertools import product
from typing import Union
Coor = Union[tuple[int, int, int], tuple[int, int, int, int]]
CubeMap = set[Coor]
def get_input() -> CubeMap:
with open('input.txt', 'r') as f:
return {(i, j, 0) for i, l in enumerate(f.readlines()) for j, ch in enumerate(l) if l and ch == '#'}
def neigh(c: Coor, space: int) -> set[Coor]:
def coor_sum(a, b):
return tuple(x + y for x, y in zip(a, b))
return {coor_sum(c, nv) for nv in product([-1, 0, 1], repeat=space) if not all(x == 0 for x in nv)}
def evolve(active: CubeMap, space: int=3) -> CubeMap:
next_active, visited = set(), set()
for c in active:
for x in neigh(c, space) - visited:
if x in active and len(neigh(x, space) & active) in [2, 3]:
next_active.add(x)
elif not x in active and len(neigh(x, space) & active) == 3:
next_active.add(x)
visited.add(x)
return next_active
def part_1(initial) -> int:
active = initial
for _ in range(6):
active = evolve(active)
return len(active)
def part_2(initial) -> int:
active = {(*c, 0) for c in initial} # convert to 4d
for _ in range(6):
active = evolve(active, space=4)
return len(active)
if __name__ == "__main__":
initial = get_input()
print(f'Part 1 answer: {part_1(initial)}')
print(f'Part 2 answer: {part_2(initial)}')
| markopuzav/aoc-2020 | day17/solution.py | solution.py | py | 1,413 | python | en | code | 0 | github-code | 36 |
16209591268 | # -*- coding: utf-8 -*-
#@author: Lalo Valle
import math
from Programa import *
programa = Programa.programa()
""" Lista de nombre de los tokens """
tokens = [
'NUMERO',
'INDEFINIDA',
'VARIABLE',
'FUNCION',
'CONSTANTE',
'CADENA',
'PRINT',
'INCREMENTO',
'DECREMENTO',
'OR', # Operadores lógicos
'AND',
'MAYORIGUAL',
'MENORIGUAL',
'IGUALREL',
'DIFERENTE',
'WHILE',
'IF',
'ELSE',
'FOR',
'TRUE',
'FALSE'
]
""" Tokens compuestos por unicamente un símbolo """
literals = [
'+','-',
'*','/','%',
'^',
'=',
';',
'(','{',
')','}',
'>','<',
'!',
','
]
"""
RECURSOS MATEMÁTICOS
>>>>>>>>>>>>>>>>>>>>
"""
"""
Funciones con validación de dominio
"""
def Log(x):
try: return math.log(x)
except Error:
mostrarError('ErrorLog :',Error)
raise SyntaxError
return None
def Log10(x):
try: return math.log10(x)
except Error:
mostrarError('ErrorLog10 :',Error)
raise SyntaxError
return None
def Exp(x):
try: return math.exp(x)
except Error:
mostrarError('ErrorExp :',Error)
raise SyntaxError
return None
def Sqrt(x):
try: return math.sqrt(x)
except Error:
mostrarError('ErrorSqrt',Error)
raise SyntaxError
return None
""" Diccionario de constantes matemáticas y su valor """
constantes = {
'π':math.pi, 'PI':math.pi,
'e':math.e, 'E':math.e,
'Γ':0.57721566490153286060 , 'GAMMA':0.57721566490153286060,
'DEG':57.29577951308232087680,
'φ':1.6180339887498948482, 'PHI':1.6180339887498948482,
}
""" Diccionario de funciones matemáticas y la referencia a la función """
funciones = {
'sin':math.sin, 'cos':math.cos, 'tan':math.tan,
'log':Log, 'log10':Log10,
'exp':Exp,
'sqrt':Sqrt,
'abs':math.fabs,
'int':int
}
variables = {} # Diccionario que almacena el nombre(key) y valor(value) de las variables
tipoInstruccion = {
'STOP':False,
'constpush':programa.constpush,
'varpush':programa.varpush,
'evaluacion':programa.evaluacion,
'suma':programa.suma,
'resta':programa.resta,
'multiplicacion':programa.multiplicacion,
'division':programa.division,
'modulo':programa.modulo,
'negacion':programa.negacion,
'potencia':programa.potencia,
'asignacion':programa.asignacion,
'funcion':programa.funcion,
'print':programa.print,
'mayorque':programa.mayorque,
'menorque':programa.menorque,
'igual':programa.igual,
'mayorigual':programa.mayorigual,
'menorigual':programa.menorigual,
'diferente':programa.diferente,
'and':programa.andcode,
'or':programa.orcode,
'not':programa.notcode,
'if':programa.ifcode,
'while':programa.whilecode,
'for':programa.forcode
}
"""
MENSAJES CONSOLA
>>>>>>>>>>>>>>>>
"""
def imprimirError(tipo,mensaje):
print('\x1b[0;m'+'\x1b[3;31m'+'{} : {}'.format(tipo,mensaje)+'\x1b[0;m')
def imprimirNotificacion(mensaje):
print('\x1b[1;33m'+'\n--- %s ---\n',mensaje)
def imprimirResultado(resultado):
programa._output.append('HOC5 >> ' + str(resultado))
| LaloValle/HOC5 | Recursos.py | Recursos.py | py | 2,900 | python | es | code | 0 | github-code | 36 |
30677471039 | #!/usr/bin/env python
import os
import sys
import glob
from mars_utils import *
SCRIPT_PATH = os.path.split(os.path.realpath(__file__))[0]
BUILD_OUT_PATH = 'cmake_build/watchos'
INSTALL_PATH = BUILD_OUT_PATH + '/Darwin.out'
WATCH_BUILD_SIMULATOR_CMD = 'cmake ../.. -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=../../ios.toolchain.cmake -DIOS_PLATFORM=SIMULATOR_WATCHOS -DIOS_DEPLOYMENT_TARGET=2.0 -DIOS_ARCH="i386" -DENABLE_ARC=0 -DENABLE_BITCODE=1 -DENABLE_VISIBILITY=1 && make -j8 && make install'
WATCH_BUILD_OS_CMD = 'cmake ../.. -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=../../ios.toolchain.cmake -DIOS_PLATFORM=WATCHOS -DIOS_DEPLOYMENT_TARGET=2.0 -DIOS_ARCH="armv7k;arm64_32" -DENABLE_ARC=0 -DENABLE_BITCODE=1 -DENABLE_VISIBILITY=1 && make -j8 && make install'
GEN_WATCH_OS_PROJ = 'cmake ../.. -G Xcode -DCMAKE_TOOLCHAIN_FILE=../../ios.toolchain.cmake -DIOS_PLATFORM=OS -DIOS_DEPLOYMENT_TARGET=2.0 -DIOS_ARCH="armv7;arm64" -DCMAKE_XCODE_ATTRIBUTE_VALID_ARCHS="armv7k;arm64_32" -DENABLE_ARC=0 -DENABLE_BITCODE=1 -DENABLE_VISIBILITY=1'
def build_watch(tag=''):
gen_mars_revision_file('comm', tag)
clean(BUILD_OUT_PATH)
os.chdir(BUILD_OUT_PATH)
ret = os.system(WATCH_BUILD_OS_CMD)
os.chdir(SCRIPT_PATH)
if ret != 0:
print('!!!!!!!!!!!build os fail!!!!!!!!!!!!!!!')
return False
libtool_os_dst_lib = INSTALL_PATH + '/os'
if not libtool_libs(glob.glob(INSTALL_PATH + '/*.a'), libtool_os_dst_lib):
return False
clean(BUILD_OUT_PATH)
os.chdir(BUILD_OUT_PATH)
ret = os.system(WATCH_BUILD_SIMULATOR_CMD)
os.chdir(SCRIPT_PATH)
if ret != 0:
print('!!!!!!!!!!!build simulator fail!!!!!!!!!!!!!!!')
return False
libtool_simulator_dst_lib = INSTALL_PATH + '/simulator'
if not libtool_libs(glob.glob(INSTALL_PATH + '/*.a'), libtool_simulator_dst_lib):
return False
lipo_src_libs = []
lipo_src_libs.append(libtool_os_dst_lib)
lipo_src_libs.append(libtool_simulator_dst_lib)
lipo_dst_lib = INSTALL_PATH + '/mars'
if not lipo_libs(lipo_src_libs, lipo_dst_lib):
return False
dst_framework_path = INSTALL_PATH + '/mars.framework'
make_static_framework(lipo_dst_lib, dst_framework_path, COMM_COPY_HEADER_FILES, '../')
print('==================Output========================')
print(dst_framework_path)
return True
def gen_watch_project():
gen_mars_revision_file('comm')
clean(BUILD_OUT_PATH)
os.chdir(BUILD_OUT_PATH)
ret = os.system(GEN_WATCH_OS_PROJ)
os.chdir(SCRIPT_PATH)
if ret != 0:
print('!!!!!!!!!!!gen fail!!!!!!!!!!!!!!!')
return False
print('==================Output========================')
print('project file: %s/%s' %(SCRIPT_PATH, BUILD_OUT_PATH))
return True
def main():
while True:
if len(sys.argv) >= 2:
build_watch(sys.argv[1])
break
else:
num = input('Enter menu:\n1. Clean && build.\n2. Gen Watch Project.\n3. Exit\n')
if num == '1':
build_watch()
break
elif num == '2':
gen_watch_project()
break
elif num == '3':
break
else:
build_watch()
break
if __name__ == '__main__':
main()
| Tencent/mars | mars/build_watch.py | build_watch.py | py | 3,361 | python | en | code | 16,975 | github-code | 36 |
4619575632 | #!/usr/bin/env python
import django
from net_system.models import NetworkDevice, Credentials
from pprint import pprint
rtrs = {
"test-sw1": {
"port": "22",
"username": "admin1",
"eapi_port": "443",
"password": "99saturday",
"ip": "1.1.1.1",
"device_type": "arista_eos"
},
"test-sw2": {
"port": "22",
"username": "admin1",
"eapi_port": "443",
"password": "99saturday",
"ip": "2.2.2.2",
"device_type": "arista_eos"
}
}
def dump_devices():
for obj in NetworkDevice.objects.all():
pprint(obj.__dict__)
def dump_credentials():
for obj in Credentials.objects.all():
pprint(obj.__dict__)
def main():
django.setup()
curCred = Credentials.objects.get(username='admin1')
# Add Device
dbDevice = NetworkDevice(
device_name='test-sw4',
device_type='cisco',
ip_address='2.2.2.2',
port='22',
vendor='cisco',
credentials=curCred)
dbDevice.save()
# Add device get_or_create
dbDevice = NetworkDevice.objects.get_or_create(
device_name='test-sw5',
device_type='cisco',
ip_address='2.2.2.2',
port='22',
vendor='cisco',
credentials=curCred)
dump_devices()
dump_credentials()
if __name__ == '__main__':
main()
| jerry-bonner/pynet | class8/ex3.py | ex3.py | py | 1,335 | python | en | code | 0 | github-code | 36 |
11917002254 | from django.db import models
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
import uuid
from users.models import Profile
from ckeditor.fields import RichTextField
# Create your models here.
def user_directory_path(instance,filename):
return 'blogs/{0}/{1}'.format(instance.id,filename)
class Category(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Blog(models.Model):
owner = models.ForeignKey(
Profile, null=True, blank=True, on_delete=models.CASCADE)
title = models.CharField(max_length=200)
content = models.TextField()
likes = models.ManyToManyField(Profile,related_name="blogs",null=True,blank=True)
category = models.ForeignKey(Category,on_delete=models.PROTECT,default=1)
favorites = models.ManyToManyField(Profile,related_name='favorite',default=None,blank=True)
likes = models.ManyToManyField(Profile,related_name='like',default=None,blank=True)
featured_image = models.ImageField(null=True, blank=True,upload_to=user_directory_path, default="default.jpg")
created = models.DateTimeField(auto_now_add=True)
id = models.UUIDField(default=uuid.uuid4, unique=True,primary_key=True, editable=False)
def __str__(self):
return self.title
class Meta:
ordering = ['created']
@property
def imageURL(self):
try:
url = self.featured_image.url
except:
url = ''
return url
@property
def total_likes(self):
return self.likes.count()
@property
def total_comments(self):
return self.comments.count()
@property
def reviewers(self):
queryset = self.review_set.all().values_list('owner__id', flat=True)
return queryset
@property
def getVoteCount(self):
reviews = self.review_set.all()
upVotes = reviews.filter(value='up').count()
totalVotes = reviews.count()
ratio = (upVotes / totalVotes) * 100
self.vote_total = totalVotes
self.vote_ratio = ratio
self.save()
class Comment(models.Model):
owner = models.ForeignKey(Profile,null=True,blank=True,on_delete=models.CASCADE)
blog = models.ForeignKey(Blog,on_delete=models.CASCADE,related_name="comments")
content = models.TextField(null=True, blank=True)
created = models.DateTimeField(default=timezone.now)
id = models.UUIDField(default=uuid.uuid4,unique=True,primary_key=True,editable=False)
class Meta:
ordering = ['-created']
def __str__(self):
return f"comment by {self.owner}"
| minarefaat1002/blog_website | blogs project/blog/models.py | models.py | py | 2,654 | python | en | code | 0 | github-code | 36 |
14172483861 | from pprint import pprint as pp
from os import SEEK_SET
lst_0 = [
'Андрей Говорухи\t\t 6 6 1 4 9 9 10 4 8 2 3 8\n',
'Василий Петров\t\t 2 9 4 7 6 6 3 6 5 5 2 4\n',
'Гавриил Варфаломеев\t 10 10 4 10 7 9 4 6 8 1 1 1\n',
'Игнат Тюльпанов\t\t 8 1 4 1 1 5 2 5 2 2 10 8\n',
'Илья Муромцев\t\t 1 6 4 7 10 9 5 3 7 4 7 2\n',
'Кощей Бессмертный\t 3 10 1 4 1 8 10 6 2 10 7 4\n',
'Максим Мухин\t\t 10 8 9 9 5 8 6 5 7 2 4 10\n',
'Маргарита Мартынова\t 9 1 5 1 10 10 2 4 4 9 8 10\n',
'Петр Николаев\t\t 2 9 5 9 1 2 8 7 8 1 9 1\n',
'Полина Гусева\t\t 9 2 8 7 3 9 9 5 1 9 2 6\n',
'Спиридов Тереньтьев\t 4 7 7 3 10 9 7 2 10 9 8 1\n',
'Станислав Трердолобов\t 8 1 6 1 4 1 10 8 8 1 8 8\n'
]
file = open('file1.txt', 'w')
file.seek(0, 0)
for line in lst_0:
file.write(line)
file.close()
file = open('file1.txt', 'r')
file.seek(0, 0)
lst = []
lst = file.readlines()
file.close()
########## bad code
lst1 = list(map(lambda x: x.strip('\n'), lst))
lst2 =(list(map(lambda x: x.replace('\t', ''), lst1)))
lst3 = list(map(lambda x: x.split(), lst2))
# print(lst3)
# print(sum(lst3[0][2:]))
s = list(map(lambda x: (x[0], '{}\t\t'.format(x[1])), lst3))
#sum(int(x[2:])/(len(x)-2)))
1
print(s)
| Bidlevskyi/hometasks_python | htask14.py | htask14.py | py | 1,360 | python | ru | code | 0 | github-code | 36 |
17230717962 | import numpy as np
def load_data(path):
f = open(path)
x = []
y = []
for line in f.readlines():
data = line.strip().split('\t')
feature = data[0].split(' ')
feature.insert(0, '1')
x.append(feature)
y.append(data[-1])
x = np.array(x, dtype=np.float)
y = np.array(y, dtype=np.int)
return x, y
def sign(x):
if type(x) is np.ndarray:
n_samples = x.shape[0]
result = np.ones(n_samples)
result[x <= 0] = -1
return result
else:
if x <= 0:
return -1
else:
return 1
| VJaGG/machine-learning | foundations/code/utils.py | utils.py | py | 638 | python | en | code | 0 | github-code | 36 |
12785925952 | from py_reconhecimento import TReconhecimento
from py_cadastro import TCadastro
from py_principal import TPrincipal
from kivy.uix.screenmanager import ScreenManager
from kivy.app import App
from kivy import Config
from kivy.lang import Builder
Config.set('graphics', 'resizable', True)
Config.set('kivy', 'exit_on_escape', '0')
# Config.set('graphics', 'window_state', 'maximized')
Config.set('graphics', 'width', 1000)
Config.set('graphics', 'height', 600)
class GerenciadorTelas(ScreenManager):
def __init__(self):
super().__init__()
self.tprincipal = TPrincipal()
self.tcadastro = TCadastro()
self.treconhecimento = TReconhecimento()
self.add_widget(self.tprincipal)
self.add_widget(self.tcadastro)
self.add_widget(self.treconhecimento)
class Kv_Main(App):
title = 'Sistema de controle de acesso por Reconheicmento Facial'
icon = '/assets/ImagesApp/logo.png'
def build(self):
Builder.load_file('kv_main.kv')
return GerenciadorTelas()
if __name__ == '__main__':
Kv_Main().run()
| eticialima/recognitionfacial | project/py_main.py | py_main.py | py | 1,088 | python | en | code | 3 | github-code | 36 |
15715933133 | import json
import os
import sys
from tempfile import NamedTemporaryFile
DEPRECATED_KEYS = [
'site_yaml_path',
'inventory_config',
'variable_manager_config',
'passwords',
'modules',
'private_key_file']
LIST_TYPES = ['skip-tags', 'tags']
DIRECT_PARAMS = ['start_at_task', 'scp_extra_args', 'sftp_extra_args',
'ssh_common_args', 'ssh_extra_args', 'timeout']
def get_fileno():
try:
return sys.stdout.fileno
except AttributeError:
return
class CloudifyAnsibleSDKError(Exception):
"""Generic Error for handling issues preparing
the Ansible Playbook.
"""
pass
class AnsiblePlaybookFromFile(object):
""" Object for communication to Ansible Library."""
def __init__(self,
playbook_path=None,
sources='localhost,',
options_config=None,
run_data=None,
verbosity=2,
logger=None,
site_yaml_path=None,
environment_variables=None,
additional_args=None,
**kwargs):
self.playbook = site_yaml_path or playbook_path
self.sources = sources
self.options_config = options_config or {}
self.run_data = run_data or {}
self.environment_variables = environment_variables or {}
self.additional_args = additional_args or ''
self._verbosity = verbosity
self.logger = logger
for deprecated_key in DEPRECATED_KEYS:
if deprecated_key in kwargs:
self.logger.error(
'This key been deprecated: {0} {1}'.format(
deprecated_key, kwargs[deprecated_key]))
# add known additional params to additional_args
for field in DIRECT_PARAMS:
if kwargs.get(field):
self.additional_args += "--{field} {value} ".format(
field=field.replace("_", "-"),
value=json.dumps(kwargs[field]))
@property
def env(self):
_env = os.environ.copy()
for key, value in self.environment_variables.items():
_env[key] = value
return _env
@property
def verbosity(self):
verbosity = '-v'
for i in range(1, self._verbosity):
verbosity += 'v'
return verbosity
@property
def options(self):
options_list = []
if 'extra_vars' not in self.options_config:
self.options_config['extra_vars'] = {}
self.options_config['extra_vars'].update(self.run_data)
for key, value in self.options_config.items():
if key == 'extra_vars':
f = NamedTemporaryFile(delete=False)
with open(f.name, 'w') as outfile:
json.dump(value, outfile)
value = '@{filepath}'.format(filepath=f.name)
elif key == 'verbosity':
self.logger.error('No such option verbosity')
del key
continue
key = key.replace("_", "-")
if isinstance(value, dict):
value = json.dumps(value)
elif isinstance(value, list) and key not in LIST_TYPES:
value = [i for i in value]
elif isinstance(value, list):
value = u",".join(value)
options_list.append(
'--{key}={value}'.format(key=key, value=repr(value)))
return ' '.join(options_list)
@property
def process_args(self):
return [
self.verbosity,
'-i {0}'.format(self.sources),
self.options,
self.additional_args,
self.playbook
]
def execute(self, process_execution_func, **kwargs):
return process_execution_func(**kwargs)
| christaotaoz/shkd-work | work/doc/srv6+5G/ansible8.82/cloudify_ansible_sdk/__init__.py | __init__.py | py | 3,848 | python | en | code | 0 | github-code | 36 |
74504081705 | import turtle
import os
#window = wn
wn = turtle.Screen()
wn.title("Developed by: Map The Coder")
wn.bgcolor("purple")
wn.setup(width=800, height=600)
wn.tracer(0)
#a tracer stops the window from updating, therfore has to be manually updated. This allows me to speed up the game on command
# Score
score_a = 0
score_b = 0
# Paddle A
paddle_a = turtle.Turtle()
paddle_a.speed(0)
paddle_a.shape("square")
paddle_a.color("white")
paddle_a.shapesize(stretch_wid=5, stretch_len=1)
paddle_a.penup()
paddle_a.goto(-350, 0)
# Paddle B
paddle_b = turtle.Turtle()
paddle_b.speed(0)
paddle_b.shape("square")
paddle_b.color("white")
paddle_b.shapesize(stretch_wid=5, stretch_len=1)
paddle_b.penup()
paddle_b.goto(350, 0)
# Ball
ball = turtle.Turtle()
ball.speed(0)
ball.shape("circle")
ball.color("black")
ball.penup()
ball.goto(0, 0)
ball.dx = 2
ball.dy = -2
# Pen
pen = turtle.Turtle()
pen.speed(0)
pen.color("white")
pen.penup()
pen.hideturtle()
pen.goto(0, 260)
pen.write("Player A: 0 Player B: 0", align="center", font=("Courier", 24, "normal"))
# Functions
def paddle_a_up():
y = paddle_a.ycor()
y += 20
paddle_a.sety(y)
def paddle_a_down():
y = paddle_a.ycor()
y -= 20
paddle_a.sety(y)
def paddle_b_up():
y = paddle_b.ycor()
y += 20
paddle_b.sety(y)
def paddle_b_down():
y = paddle_b.ycor()
y -= 20
paddle_b.sety(y)
# Keyboard Binding
wn.listen()
# first line says "listen to the keyboard input "
wn.onkeypress(paddle_a_up, "w")
# this line states to the program "when the user presses lowercase w call the funtion paddle_a_up"
wn.onkeypress(paddle_a_down, "s")
wn.onkeypress(paddle_b_up, "Up")
wn.onkeypress(paddle_b_down, "Down")
# Main game loop
while True:
wn.update()
# Move the ball
ball.setx(ball.xcor() + ball.dx)
ball.sety(ball.ycor() + ball.dy)
# Border Checking
# This is comparing the ball's y cordinate, so it bounces off and stays in the game.
if ball.ycor() > 290:
ball.sety(290)
ball.dy *= -1
#this line of code reverses the direction of the ball.
os.system("afplay boing.wav&")
if ball.ycor() < -290:
ball.sety(-290)
ball.dy *= -1
os.system("afplay boing.wav&")
if ball.xcor() > 390:
ball.goto(0, 0)
ball.dx *= -1
score_a += 1
pen.clear()
pen.write("Player A: {} Player B: {}".format(score_a, score_b), align="center", font=("Courier", 24, "normal"))
if ball.xcor() < -390:
ball.goto(0, 0)
ball.dx *= -1
score_b += 1
pen.clear()
pen.write("Player A: {} Player B: {}".format(score_a, score_b), align="center", font=("Courier", 24, "normal"))
# Paddle and Ball Collisions
if (ball.xcor() > 340 and ball.xcor() < 350) and (ball.ycor() < paddle_b.ycor() + 40 and ball.ycor() > paddle_b.ycor() -40):
ball.setx(340)
ball.dx *= -1
os.system("afplay boing.wav&")
if (ball.xcor() < -340 and ball.xcor() > -350) and (ball.ycor() < paddle_a.ycor() + 40 and ball.ycor() > paddle_a.ycor() -40):
ball.setx(-340)
ball.dx *= -1
os.system("afplay boing.wav&") | jeremyamartins/pong.python | pong.py | pong.py | py | 3,166 | python | en | code | 0 | github-code | 36 |
20857149837 | import pandas as pd
from pandas.testing import assert_frame_equal
from sportpools.model.tennis import TennisPool
from sportpools.model.emulator import TennisPoolEmulator
ROUNDS = ["r64", "r32", "r16", "qf", "sm", "f", "w"]
def test_determine_black_points():
seeds = pd.DataFrame({
'seed': [1, 4, 6, 32, 64, None],
})
seeds_result = seeds.copy()
seeds_result['black'] = [5, 4, 3, 1, 0, 0]
assert_frame_equal(TennisPool.determine_black_points(seeds), seeds_result)
def test_clean_player_name():
players = pd.DataFrame({
'player': ['(1)Roger Federer(SUI)', '(2)Rafael Nadal(ESP)', '(3)Novak Djokovic(SRB)'],
})
players_result = pd.DataFrame({
'player': ['Roger Federer', 'Rafael Nadal', 'Novak Djokovic'],
})
assert_frame_equal(TennisPool.clean_player_name(players), players_result)
def test_convert_columns():
percentages = pd.DataFrame({
'perc': ['5%', '2.5%', '100.0%']
})
percentages_result = pd.DataFrame({
'perc': [.05, .025, 1.]
})
assert_frame_equal(TennisPool.convert_columns(percentages, ['perc']), percentages_result)
# def test_determine_score_potency():
# players = pd.DataFrame({
# 'rounds': [7, 6, 5, 4, 3, 2, 1],
# 'black': [0, 1, 1, 3, 5, 2, 0]})
#
# players_result = players.copy()
# players_result['potency'] = [160, 81, 63, 35, 15, 16, 10]
#
# assert_frame_equal(TennisPoolEmulator.determine_score_potency(players, ROUNDS), players_result)
def test_extract_seed():
players = pd.DataFrame({
'player': ['(1)Roger Federer(SUI)', '(2)Rafael Nadal(ESP)', '(3)Novak Djokovic(SRB)'],
})
players_result = players.copy()
players_result['seed'] = [1, 2, 3]
assert_frame_equal(TennisPool.extract_seed(players), players_result)
def test_clean_invalid_rows():
players = pd.DataFrame({
'player': ['(1)Roger Federer(SUI)', '(2)Rafael Nadal(ESP)', '(3)Novak Djokovic(SRB)', 'Player', None],
})
players_result = pd.DataFrame({
'player': ['(1)Roger Federer(SUI)', '(2)Rafael Nadal(ESP)', '(3)Novak Djokovic(SRB)'],
})
assert_frame_equal(TennisPool.clean_invalid_rows(players), players_result)
| bartcode/sportpools-tennis | tests/test_tennis_pool.py | test_tennis_pool.py | py | 2,212 | python | en | code | 0 | github-code | 36 |
29212263006 | import numpy as np
import pandas as pd
import datetime, time
# 处理输入时间戳,当前汽车驶入时间戳转化为sumo中以秒为单位
def time_processing(timeStamp):
timeArray = time.localtime(timeStamp)
# 时间时区设置转换
base_time = datetime.datetime(timeArray[0], timeArray[1], timeArray[2], 0, 0, 0)
# 获取当日日期定位到00:00:00
base_time = time.mktime(base_time.timetuple())
# base_time转变为时间戳格式
return timeStamp - base_time
def create_trip_file(data_file="../data/chengdu/20161116.csv"):
names = ["id", "start_time", "end_time", "time?", "from_lane", "to_lane"]
data = pd.read_csv(data_file, header=None, names=names, index_col=False)
# 行索引命名,列索生成
data = data.sort_values(by='start_time', ascending=True)
# 排序升序排序
with open("../data/chengdu/20161116_trips.trips.xml", mode="w") as f:
print('''<?xml version="1.0" encoding="UTF-8"?>
<routes xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="http://sumo.dlr.de/xsd/routes_file.xsd">
''', file=f)
for index, data_line in data.iterrows():
data_line["start_time"] = time_processing(data_line["start_time"])
print(
''' <trip id="{}" depart="{}" from="{}" to="{}"/>'''.format(data_line['id'], data_line['start_time'],
data_line['from_lane'],
data_line['to_lane']),
file=f)
print(
''' <trip id="{}" depart="{}" from="{}" to="{}"/>'''.format(data_line['id'], data_line['start_time'],
data_line['from_lane'],
data_line['to_lane']), )
print('''</routes>''', file=f)
| Rossions/TCSC | DataProcessing/chengdu/processing_abandon.py | processing_abandon.py | py | 2,020 | python | en | code | 1 | github-code | 36 |
25615151962 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import os
import pandas as pd
from datetime import datetime
from txt_table import txt_table
import re
# 将txt文件转化为csv
def transfer_data(folder):
#建立使用文件列表
filenames=[]
filelist=[]
filexlsx=[]
#遍历文件寻找txt
files=os.walk(folder)
for root,dirs,files in files:
for file in files:
# 自动生成文件名
if "readme" in file:
filexls=re.match("(.*)_readme",file).group(1)
filexlsx.append(filexls)
else:
pass
filename=os.path.join(root,file)
filenames.append(filename)
for filename in filenames:
try:
if ".txt" in filename and "readme" not in filename:
fileout=filename.replace('.txt','.csv')
data=txt_table(filename)
data.to_csv(fileout,sep=',',encoding='utf-8')
filelist.append(fileout)
elif ".TXT" in filename and "readme" not in filename:
fileout = filename.replace('.TXT','.csv')
data=txt_table(filename)
data.to_csv(fileout,sep=',',encoding='utf-8')
filelist.append(fileout)
except:
print('error!')
continue
print('已将txt文件转化为csv!')
#将数据合并
#print(filexlsx[0])
fileouts=os.path.join(folder,(filexlsx[0]+'.csv'))
df=pd.DataFrame()
for i in range(0,len(filelist)):
df_i=pd.read_csv(filelist[i],sep=',',encoding='utf-8')
#print(df_i)
#df.append(df_i)
df=pd.concat([df,df_i])
df.to_csv(fileouts,sep=',',encoding='utf-8')
print(df.head())
print("已完成数据合并!")
return fileouts
# 提取相关数据
def extract_data(filecsv,jiwens,file_station):
# 读取气象数据,这一步要提取有用的气象数据并求平均值
datas = pd.read_csv(filecsv, sep=',', encoding='utf-8')
# 提取列名
datacolumns= list(datas.columns)[2:]
del datacolumns[2] # 删除日期
#print(datacolumns)
datacolumnslist = [list(datas[i]) for i in datacolumns]
#print(datacolumnslist[0])
# shidu_ave = list(datas[u'V13003_701'])
file_n = filecsv.replace(".csv", "_N.csv")
print(file_n)
stations_china = list(pd.read_excel(file_station, sheet_name='Sheet1')[u'区站号'])
lng = list(pd.read_excel(file_station, sheet_name='Sheet1')[u'经度'])
lat = list(pd.read_excel(file_station, sheet_name='Sheet1')[u'纬度'])
# 建立站点数据列表
qixiangday=['stations_n','timeavailas','lngs','lats','station_n_tem_ave','station_n_tem_ave_max','station_n_tem_ave_min',
'station_n_shuiqiya_ave','station_n_jiangshui_20','station_n_jiangshui_08','station_n_fengsu_ave']
qixiangdaylist=[[] for i in range(len(qixiangday))]
#print(qixiangdaylist)
station_n_tem_sum = [[] for i in range(len(jiwens))]
for i in range(0, len(stations_china)):
# 临时列表
qixiangtemp = ['station_n','timeavaila', 'lng_n', 'lat_n', 'station_tem_ave', 'station_tem_ave_max','station_tem_ave_min',
'station_shuiqiya_ave', 'station_jiangshui_20', 'station_jiangshui_08','station_fengsu_ave']
qixiangtemplist=[[] for i in range(len(qixiangtemp))]
# 符合条件则建立列表
for j in range(0, len(datacolumnslist[0])):
if datacolumnslist[0][j] == stations_china[i]:
print(datacolumnslist[0][j])
qixiangtemplist[0].append(datacolumnslist[0][j]) # 区站号
qixiangtemplist[1].append(datacolumnslist[1][j]) # 有效时段
qixiangtemplist[2].append(lng[i]) # 经度
qixiangtemplist[3].append(lat[i]) # 纬度
qixiangtemplist[4].append(datacolumnslist[2][j]) # 累年日平均气温
qixiangtemplist[5].append(datacolumnslist[3][j]) # 累年平均日最高气温
qixiangtemplist[6].append(datacolumnslist[4][j]) # 累年平均日最低气温
qixiangtemplist[7].append(datacolumnslist[5][j]) # 累年日平均水汽压
qixiangtemplist[8].append(datacolumnslist[6][j]) # 累年20-20时日降水量
qixiangtemplist[9].append(datacolumnslist[7][j]) # 累年08-08时日降水量
qixiangtemplist[10].append(datacolumnslist[8][j]) # 累年日平均风速
#print(qixiangtemplist[9])
if len(qixiangtemplist[4]) != 0:
#print(qixiangtemplist[1])
qixiangdaylist[0].append(qixiangtemplist[0][0]) # 区站号
qixiangdaylist[1].append(str(qixiangtemplist[1][0])) # 有效时段
qixiangdaylist[2].append(qixiangtemplist[2][0]) # 经度
qixiangdaylist[3].append(qixiangtemplist[3][0]) # 纬度
# 求平均值
qixiangdaylist[4].append(sum(qixiangtemplist[4]) / len(qixiangtemplist[4])) # 累年日平均气温
qixiangdaylist[5].append(sum(qixiangtemplist[5]) / len(qixiangtemplist[5])) # 累年平均日最高气温
qixiangdaylist[6].append(sum(qixiangtemplist[6]) / len(qixiangtemplist[6])) # 累年平均日最低气温
qixiangdaylist[7].append(sum(qixiangtemplist[7]) / len(qixiangtemplist[9])) # 累年日平均水汽压
qixiangdaylist[8].append(sum(qixiangtemplist[8]) / len(qixiangtemplist[8])) # 累年20-20时日降水量
qixiangdaylist[9].append(sum(qixiangtemplist[9]) / len(qixiangtemplist[9])) # 累年08-08时日降水量
qixiangdaylist[10].append(sum(qixiangtemplist[10]) / len(qixiangtemplist[10])) # 累年日平均风速
# 求活动积温
for x in range(len(jiwens)):
tem_sum = []
for tem in qixiangtemplist[4]:
if tem > jiwens[x]:
tem_sum.append(tem)
else:
pass
station_n_tem_sum[x].append(sum(tem_sum))
#print(qixiangdaylist[10])
# 建立积温更新后的列表
for i in range(len(jiwens)):
qixiangday.append('jiwen%s' % jiwens[i])
qixiangdaylist.append(station_n_tem_sum[i])
dfs=pd.DataFrame(qixiangdaylist)
dfs=dfs.T
dfs.columns=qixiangday
print(dfs.head())
dfs.to_csv(file_n, sep=',', encoding='utf-8')
print('已完成气象数据提取!')
if __name__=='__main__':
time_start = datetime.now()
print('开始时间:' + str(time_start))
'''第一步,将txt文件转化为csv'''
folder = "D:\\Database\\02China\\04Qixiang\\510000\\"
#folder="C:\\Users\\jli\\Desktop\\AAA"
step1=input('是否进行文件转换:')
if int(step1)==0:
#filecsv=transfer_data(folder)
filecsv= "D:\\Database\\02China\\04Qixiang\\510000\\SURF_CHN_MUL_MDAY_19812010.csv"
'''第二步,提取每个站点的数据'''
step2=input("是否提取站点数据:")
if int(step2)==0:
file_station="D:\\Database\\02China\\04Qixiang\\SURF_CHN_MUL_STATION.xlsx"
jiwens=[0]
extract_data(filecsv, jiwens, file_station)
time_end = datetime.now()
print('结束时间:' + str(time_end))
time_last = time_end - time_start
print('用时' + str(time_last)) | hellboy1990/qixiang_explore | qixiang_check_v2.py | qixiang_check_v2.py | py | 7,538 | python | en | code | 5 | github-code | 36 |
72603582824 | a = int(input())
b = int(input())
c = []
d = 0
f = []
for i in range(a + b):
c.append(input())
for i in c:
if i not in f:
f.append(i)
d += 1
else:
d -= 1
if d != 0:
print(d)
else:
print('Таких нет')
| Reagent992/yandex_academy | 3.2 Множества, словари/05.py | 05.py | py | 256 | python | en | code | 0 | github-code | 36 |
73224987304 | # views.py
from django.shortcuts import get_object_or_404
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from calculator.models import Report
from calculator.serializers import ReportSerializer, ReportCalculationSerializer
import pandas as pd
from uuid import uuid4
from django.core.files.uploadedfile import InMemoryUploadedFile
from drf_spectacular.utils import extend_schema
@extend_schema(responses=ReportSerializer)
@api_view(['GET'])
def all_reports(request):
"""Retrieve the names and uuids of all saved reports"""
reports = Report.objects.all()
serializer = ReportSerializer(reports, many=True)
return Response(serializer.data)
@extend_schema(responses=ReportCalculationSerializer)
@api_view(['GET'])
def report(request, uuid):
"""Retrieve tax data for a specific report"""
report = get_object_or_404(Report, uuid=uuid)
serializer = ReportCalculationSerializer(report)
return Response(serializer.data)
@extend_schema(
request={
'multipart/form-data': {
'type': 'object',
'properties': {
'file': {
'type': 'file',
'format': 'binary'
},
'name': {
'type': 'string',
}
},
'required': ['file', 'name']
}
},
responses={201: ReportSerializer}
)
@api_view(['POST'])
def transactions(request):
"""Post a new .csv file with transactions made"""
column_names = ['date', 'transaction_type', 'amount', 'memo']
uploaded_file = request.FILES.get('file')
# This is the most vulnerable part of the code, user input can be amazingly bad sometimes
# Check if the uploaded file is empty
if isinstance(uploaded_file, InMemoryUploadedFile) and uploaded_file.size == 0:
return Response({"error": "Empty file. Please provide a file with content."},
status=status.HTTP_400_BAD_REQUEST)
# Try reading the file using pandas to check its format
try:
df = pd.read_csv(uploaded_file, names=column_names, header=0)
except pd.errors.ParserError:
return Response({"error": "Invalid file format. Please provide a valid CSV file."},
status=status.HTTP_400_BAD_REQUEST)
df = df.dropna()
report_data = {
'name': request.data.get('name'),
'uuid': uuid4(),
'transactions': df.to_dict('records')
}
report_serializer = ReportSerializer(data=report_data)
report_serializer.is_valid(raise_exception=True)
report_serializer.save()
return Response(report_serializer.data, status=status.HTTP_201_CREATED)
| StefKal/superdupertax | superdupertax/calculator/views.py | views.py | py | 2,749 | python | en | code | 0 | github-code | 36 |
73692412265 | import copy
import mock
import testtools
from stackalytics.processor import default_data_processor
from stackalytics.processor import normalizer
from stackalytics.tests.unit import test_data
class TestDefaultDataProcessor(testtools.TestCase):
def setUp(self):
super(TestDefaultDataProcessor, self).setUp()
self.get_users = mock.Mock(return_value=[
test_data.USERS,
])
normalized_data = copy.deepcopy(test_data.DEFAULT_DATA)
normalizer.normalize_default_data(normalized_data)
def tearDown(self):
super(TestDefaultDataProcessor, self).tearDown()
def test_normalizer(self):
data = copy.deepcopy(test_data.DEFAULT_DATA)
normalizer.normalize_default_data(data)
self.assertIn('releases', data['repos'][0])
self.assertEqual([], data['repos'][0]['releases'],
message='Empty list of releases expected')
self.assertEqual(0, data['users'][0]['companies'][-1]['end_date'],
message='The last company end date should be 0')
self.assertIn('user_id', data['users'][0])
self.assertEqual(test_data.USERS[0]['launchpad_id'],
data['users'][0]['user_id'],
message='User id should be set')
# verify that *independent company is added automatically
self.assertEqual(3, len(data['users'][1]['companies']))
self.assertEqual(0, data['users'][1]['companies'][-1]['end_date'],
message='The last company end date should be 0')
def test_update_project_list(self):
with mock.patch('stackalytics.processor.default_data_processor.'
'_retrieve_project_list_from_gerrit') as retriever:
retriever.return_value = [
{'module': 'nova',
'uri': 'git://git.openstack.org/openstack/nova',
'organization': 'openstack'},
{'module': 'qa', 'uri': 'git://git.openstack.org/openstack/qa',
'has_gerrit': True,
'organization': 'openstack'},
]
dd = {
'repos': [
{'module': 'qa',
'uri': 'git://git.openstack.org/openstack/qa',
'organization': 'openstack'},
{'module': 'tux',
'uri': 'git://git.openstack.org/stackforge/tux',
'organization': 'stackforge'},
],
'project_sources': [{'organization': 'openstack',
'uri': 'gerrit://'}],
'module_groups': [],
}
default_data_processor._update_project_list(dd)
self.assertEqual(3, len(dd['repos']))
self.assertIn('qa', set([r['module'] for r in dd['repos']]))
self.assertIn('nova', set([r['module'] for r in dd['repos']]))
self.assertIn('tux', set([r['module'] for r in dd['repos']]))
self.assertIn('has_gerrit', dd['repos'][0])
self.assertNotIn('has_gerrit', dd['repos'][1])
self.assertNotIn('has_gerrit', dd['repos'][2])
self.assertEqual(2, len(dd['module_groups']))
self.assertIn({'id': 'openstack',
'module_group_name': 'openstack',
'modules': ['qa', 'nova'],
'tag': 'organization'}, dd['module_groups'])
self.assertIn({'id': 'stackforge',
'module_group_name': 'stackforge',
'modules': ['tux'],
'tag': 'organization'}, dd['module_groups'])
def test_update_project_list_ext_project_source(self):
with mock.patch('stackalytics.processor.default_data_processor.'
'_retrieve_project_list_from_github') as retriever:
retriever.return_value = [
{'module': 'kubernetes',
'uri': 'git://github.com/kubernetes/kubernetes',
'organization': 'kubernetes'},
]
dd = {
'repos': [],
'project_sources': [
{'organization': 'kubernetes',
'uri': 'github://',
'module_group_id': 'kubernetes-group'},
],
'module_groups': [],
}
default_data_processor._update_project_list(dd)
self.assertEqual(1, len(dd['repos']))
self.assertIn('kubernetes',
set([r['module'] for r in dd['repos']]))
self.assertEqual(1, len(dd['module_groups']))
self.assertIn({'id': 'kubernetes-group',
'module_group_name': 'kubernetes',
'modules': ['kubernetes'],
'tag': 'organization'}, dd['module_groups'])
@mock.patch('stackalytics.processor.utils.read_json_from_uri')
def test_update_with_driverlog(self, mock_read_from_json):
default_data = {'repos': [{'module': 'cinder', }], 'users': []}
driverlog_dd = {'drivers': [{
'project_id': 'openstack/cinder',
'vendor': 'VMware',
'name': 'VMware VMDK Driver',
'ci': {
'id': 'vmwareminesweeper',
'success_pattern': 'Build successful',
'failure_pattern': 'Build failed'
}
}]}
mock_read_from_json.return_value = driverlog_dd
default_data_processor._update_with_driverlog_data(default_data, 'uri')
expected_user = {
'user_id': 'ci:vmware_vmdk_driver',
'user_name': 'VMware VMDK Driver',
'static': True,
'companies': [
{'company_name': 'VMware', 'end_date': None}],
}
self.assertIn(expected_user, default_data['users'])
self.assertIn(driverlog_dd['drivers'][0],
default_data['repos'][0]['drivers'])
@mock.patch('stackalytics.processor.utils.read_json_from_uri')
def test_update_with_driverlog_specific_repo(self, mock_read_from_json):
default_data = {'repos': [{'module': 'fuel-plugin-mellanox', }],
'users': []}
driverlog_dd = {'drivers': [{
'project_id': 'openstack/fuel',
'repo': 'stackforge/fuel-plugin-mellanox',
'vendor': 'Mellanox',
'name': 'ConnectX-3 Pro Network Adapter Support plugin',
'ci': {
'id': 'mellanox',
'success_pattern': 'SUCCESS',
'failure_pattern': 'FAILURE'
}
}]}
mock_read_from_json.return_value = driverlog_dd
default_data_processor._update_with_driverlog_data(default_data, 'uri')
expected_user = {
'user_id': 'ci:connectx_3_pro_network_adapter_support_plugin',
'user_name': 'ConnectX-3 Pro Network Adapter Support plugin',
'static': True,
'companies': [
{'company_name': 'Mellanox', 'end_date': None}],
}
self.assertIn(expected_user, default_data['users'])
self.assertIn(driverlog_dd['drivers'][0],
default_data['repos'][0]['drivers'])
| Mirantis/stackalytics | stackalytics/tests/unit/test_default_data_processor.py | test_default_data_processor.py | py | 7,360 | python | en | code | 12 | github-code | 36 |
8413029677 | import sqlite3
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
create_table = "CREATE TABLE IF NOT EXISTS hotels (hotel_id text PRIMARY KEY, name text, stars real, price real, city text)"
cursor.execute(create_table)
connection.commit()
connection.close() | mariorodeghiero/flask-python-rest-api-course | create_db.py | create_db.py | py | 279 | python | en | code | 0 | github-code | 36 |
74791240424 | import math
import json
import random
import argparse
def genRandomFeatures(n):
features = []
for i in range(0, n):
lat = (random.random() - 0.5) * 360.0
lng = (random.random() - 0.5) * 180.0
geom = { 'type': 'Point', 'coordinates': [lat, lng] }
props = { 'class': 1 if random.random() > 0.5 else 0 }
feature = { 'type': 'Feature', 'properties': props, 'geometry': geom }
features.append(feature)
return features
def genGridFeatures(nx, ny):
features = []
for i in range(0, nx):
for j in range(0, ny):
lat = (i - 0.5) * 360.0 / nx
lng = (j - 0.5) * 180.0 / ny
geom = { 'type': 'Point', 'coordinates': [lat, lng] }
props = { 'class': 1 if random.random() > 0.5 else 0 }
feature = { 'type': 'Feature', 'properties': props, 'geometry': geom }
features.append(feature)
return features
def main():
parser = argparse.ArgumentParser()
parser.add_argument(dest='tableName', help='The name of the db table')
parser.add_argument(dest='numPoints', type=int, help='The number of random points')
args = parser.parse_args()
features = genRandomFeatures(args.numPoints)
print("DROP TABLE IF EXISTS %s;" % args.tableName)
print("CREATE TABLE %s(gid serial PRIMARY KEY, geom GEOMETRY, attr NUMERIC);" % args.tableName)
for feature in features:
geom = "POINT(%g %g)" % tuple(feature['geometry']['coordinates'])
print("INSERT INTO %s VALUES (DEFAULT, GeomFromEWKT('SRID=4326;%s'), %d);" % (args.tableName, geom, feature['properties']['class']))
if __name__ == "__main__":
main()
| decision-labs/mapnik | benchmark/utils/random_points.py | random_points.py | py | 1,569 | python | en | code | 0 | github-code | 36 |
7136369222 | # -*- coding: utf-8 -*-
# ***************************************************
# * File : timefeatures.py
# * Author : Zhefeng Wang
# * Email : wangzhefengr@163.com
# * Date : 2023-04-19
# * Version : 0.1.041901
# * Description : description
# * Link : link
# * Requirement : 相关模块版本需求(例如: numpy >= 2.1.0)
# ***************************************************
# python libraries
import os
import sys
ROOT = os.getcwd()
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT))
from typing import List
import numpy as np
import pandas as pd
from pandas.tseries import offsets
from pandas.tseries.frequencies import to_offset
from sklearn.preprocessing import OneHotEncoder
from utils_func import is_weekend
# global variable
LOGGING_LABEL = __file__.split('/')[-1][:-3]
def feature_engineer(df):
"""
特征工程
1. 时间戳特征
2. 差分序列
3. 同时刻风场、邻近风机的特均值、标准差
"""
df["month"] = df.DATATIME.apply(lambda x: x.month, 1)
df["day"] = df.DATATIME.apply(lambda x: x.day, 1)
df["weekday"] = df.DATATIME.apply(lambda x: x.weekday(), 1)
df["hour"] = df.DATATIME.apply(lambda x: x.hour, 1)
df["minute"] = df.DATATIME.apply(lambda x: x.minute, 1)
return df
def time_static_features(series,
datetime_format: str = '%Y-%m-%d %H:%M:%S',
datetime_is_index: bool = False,
datetime_name: str = None,
target_name: str = None,
features: List = []) -> pd.DataFrame:
"""
时间特征提取
Args:
data ([type]): 时间序列
datetime_format ([type]): 时间特征日期时间格式
datetime_is_index (bool, optional): 时间特征是否为索引. Defaults to False.
datetime_name ([type], optional): 时间特征名称. Defaults to None.
features: 最后返回的特征名称列表
"""
data = series.copy()
# 日期时间特征处理
if datetime_is_index:
data["DT"] = data.index
data["DT"] = pd.to_datetime(data["DT"], format = datetime_format)
else:
data[datetime_name] = pd.to_datetime(data[datetime_name], format = datetime_format)
data["DT"] = data[datetime_name]
# 时间日期特征
data["date"] = data["DT"].apply(lambda x: x.date) # 日期
data["time"] = data["DT"].apply(lambda x: x.time) # 时间
data["year"] = data["DT"].apply(lambda x: x.year) # 年
data["is_year_start"] = data["DT"].apply(lambda x: x.is_year_start) # 是否年初
data["is_year_end"] = data["DT"].apply(lambda x: x.is_year_end) # 是否年末
data["is_leap_year"] = data["DT"].apply(lambda x: x.is_leap_year) # 是否是闰年
data["quarter"] = data["DT"].apply(lambda x: x.quarter) # 季度
data["is_quarter_start"] = data["DT"].apply(lambda x: x.is_quarter_start) # 是否季度初
data["is_quarter_end"] = data["DT"].apply(lambda x: x.is_quarter_end) # 是否季度末
# TODO 季节
# TODO 业务季度
data["month"] = data["DT"].apply(lambda x: x.month) # 月
data["is_month_start"] = data["DT"].apply(lambda x: x.is_month_start) # 是否月初
data["is_month_end"] = data["DT"].apply(lambda x: x.is_month_end) # 是否月末
data["daysinmonth"] = data["DT"].apply(lambda x: x.daysinmonth) # 每个月的天数
# TODO 每个月中的工作日天数
# TODO 每个月中的休假天数
# TODO 是否夏时制
data["weekofyear"] = data["DT"].apply(lambda x: x.isocalendar().week) # 一年的第几周
# TODO 一月中的第几周
data["dayofyear"] = data["DT"].apply(lambda x: x.dayofyear) # 一年的第几天
data["dayofmonth"] = data["DT"].apply(lambda x: x.day) # 日(一月中的第几天)
data["dayofweek"] = data["DT"].apply(lambda x: x.dayofweek) # 一周的第几天
data["is_weekend"] = data['dayofweek'].apply(is_weekend) # 是否周末
# TODO data["is_holiday"] = data["DT"].apply(is_holiday) # 是否放假/是否工作日/是否节假日
# TODO 节假日连续天数
# TODO 节假日前第 n 天
# TODO 节假日第 n 天
# TODO 节假日后第 n 天
# TODOdata["is_tiaoxiu"] = data["DT"].apply(is_tiaoxiu) # 是否调休
data["hour"] = data["DT"].apply(lambda x: x.hour) # 时(一天过去了几分钟)
data["minute"] = data["DT"].apply(lambda x: x.minute) # 分
# TODO data["past_minutes"] = data["DT"].apply(past_minutes) # 一天过去了几分钟
data["second"] = data["DT"].apply(lambda x: x.second) # 秒
data["microsecond"] = data["DT"].apply(lambda x: x.microsecond) # 微妙
data["nanosecond"] = data["DT"].apply(lambda x: x.nanosecond) # 纳秒
# TODO data["time_period"] = data["DT"].apply(time_period) # 一天的哪个时间段
data["day_high"] = data["hour"].apply(lambda x: 0 if 0 < x < 8 else 1) # 是否为高峰期
# TODO data["is_work"] = data["hour"].apply(is_work) # 该时间点是否营业/上班
del data["DT"]
if features == []:
selected_features = data
else:
selected_features = data[features]
return selected_features
def time_dynamic_features(series,
n_lag: int = 1,
n_fut: int = 1,
selLag = None,
selFut = None,
dropnan = True):
"""
Converts a time series to a supervised learning data set by adding time-shifted
prior and future period data as input or output (i.e., target result) columns for each period.
Params:
data: a series of periodic attributes as a list or NumPy array.
n_lag: number of PRIOR periods to lag as input (X); generates: Xa(t-1), Xa(t-2); min = 0 --> nothing lagged.
n_fut: number of FUTURE periods to add as target output (y); generates Yout(t+1); min = 0 --> no future periods.
selLag: only copy these specific PRIOR period attributes; default = None; EX: ['Xa', 'Xb' ].
selFut: only copy these specific FUTURE period attributes; default = None; EX: ['rslt', 'xx'].
dropnan: True = drop rows with NaN values; default = True.
Returns:
a Pandas DataFrame of time series data organized for supervised learning.
NOTES:
(1) The current period's data is always included in the output.
(2) A suffix is added to the original column names to indicate a relative time reference:
e.g.(t) is the current period;
(t-2) is from two periods in the past;
(t+1) is from the next period.
(3) This is an extension of Jason Brownlee's series_to_supervised() function, customized for MFI use
"""
data = series.copy()
n_vars = 1 if type(data) is list else data.shape[1]
df = pd.DataFrame(data)
origNames = df.columns
cols, names = list(), list()
# include all current period attributes
cols.append(df.shift(0))
names += [("%s" % origNames[j]) for j in range(n_vars)]
# ----------------------------------------------------
# lag any past period attributes (t-n_lag, ..., t-1)
# ----------------------------------------------------
n_lag = max(0, n_lag)
# input sequence (t-n, ..., t-1)
for i in range(n_lag, 0, -1):
suffix = "(t-%d)" % i
if (selLag is None):
cols.append(df.shift(i))
names += [("%s%s" % (origNames[j], suffix)) for j in range(n_vars)]
else:
for var in (selLag):
cols.append(df[var].shift(i))
names += [("%s%s" % (var, suffix))]
# ----------------------------------------------------
# include future period attributes (t+1, ..., t+n_fut)
# ----------------------------------------------------
n_fut = max(0, n_fut)
# forecast sequence (t, t+1, ..., t+n)
for i in range(0, n_fut + 1):
suffix = "(t+%d)" % i
if (selFut is None):
cols.append(df.shift(-i))
names += [("%s%s" % (origNames[j], suffix)) for j in range(n_vars)]
else:
for var in (selFut):
cols.append(df[var].shift(-i))
names += [("%s%s" % (var, suffix))]
# ----------------------------------------------------
# put it all together
# ----------------------------------------------------
agg = pd.concat(cols, axis = 1)
agg.columns = names
# ----------------------------------------------------
# drop rows with NaN values
# ----------------------------------------------------
if dropnan:
agg.dropna(inplace = True)
return agg
# TODO
def get_time_sin_cos(data: pd.DataFrame, col: str, n: int, one_hot: bool = False, drop: bool = True):
"""
构造时间特征
取 cos/sin 将数值的首位衔接起来, 比如说 23 点与 0 点很近, 星期一和星期天很近
Args:
data (_type_): _description_
col (_type_): column name
n (_type_): 时间周期
one_hot (bool, optional): _description_. Defaults to False.
drop (bool, optional): _description_. Defaults to True.
Returns:
_type_: _description_
"""
data[col + '_sin'] = round(np.sin(2 * np.pi / n * data[col]), 6)
data[col + '_cos'] = round(np.cos(2 * np.pi / n * data[col]), 6)
if one_hot:
ohe = OneHotEncoder()
X = ohe.fit_transform(data[col].values.reshape(-1, 1)).toarray()
df = pd.DataFrame(X, columns = [col + '_' + str(int(i)) for i in range(X.shape[1])])
data = pd.concat([data, df], axis = 1)
if drop:
data = data.drop(col, axis = 1)
return data
# TODO
def gen_lag_features(data, cycle):
"""
时间序列滞后性特征
- 二阶差分
Args:
data ([type]): 时间序列
cycle ([type]): 时间序列周期
"""
# 序列平稳化, 季节性差分
series_diff = data.diff(cycle)
series_diff = series_diff[cycle:]
# 监督学习的特征
for i in range(cycle, 0, -1):
series_diff["t-" + str(i)] = series_diff.shift(i).values[:, 0]
series_diff["t"] = series_diff.values[:, 0]
series_diff = series_diff[cycle + 1:]
return series_diff
class TimeFeature:
def __init__(self):
pass
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
pass
def __repr__(self):
return self.__class__.__name__ + "()"
class SecondOfMinute(TimeFeature):
"""
Second of Minute encoded as value between [-0.5, 0.5]
"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return index.second / 59.0 - 0.5
class MinuteOfHour(TimeFeature):
"""
Minute of hour encoded as value between [-0.5, 0.5]
"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return index.minute / 59.0 - 0.5
class HourOfDay(TimeFeature):
"""
Hour of day encoded as value between [-0.5, 0.5]
"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return index.hour / 23.0 - 0.5
class DayOfWeek(TimeFeature):
"""
Hour of day encoded as value between [-0.5, 0.5]
"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return index.dayofweek / 6.0 - 0.5
class DayOfMonth(TimeFeature):
"""
Day of month encoded as value between [-0.5, 0.5]
"""
# TODO
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (index.day - 1) / 30.0 - 0.5
class DayOfYear(TimeFeature):
"""
Day of year encoded as value between [-0.5, 0.5]
"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (index.dayofyear - 1) / 365.0 - 0.5
class MonthOfYear(TimeFeature):
"""
Month of year encoded as value between [-0.5, 0.5]
"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (index.month - 1) / 11.0 - 0.5
class WeekOfYear(TimeFeature):
"""
Week of year encoded as value between [-0.5, 0.5]
"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (index.isocalendar().week - 1) / 52.0 - 0.5
def time_features_from_frequency_str(freq_str: str) -> List[TimeFeature]:
"""
Returns a list of time features that will be appropriate for the given frequency string.
Args:
freq_str (str): Frequency string of the form [multiple][granularity] such as "12H", "5min", "1D" etc.
Raises:
RuntimeError: _description_
Returns:
List[TimeFeature]: _description_
"""
features_by_offsets = {
offsets.YearEnd: [],
offsets.QuarterEnd: [MonthOfYear],
offsets.MonthEnd: [MonthOfYear],
offsets.Week: [DayOfMonth, WeekOfYear],
offsets.Day: [DayOfWeek, DayOfMonth, DayOfYear],
offsets.BusinessDay: [DayOfWeek, DayOfMonth, DayOfYear],
offsets.Hour: [HourOfDay, DayOfWeek, DayOfMonth, DayOfYear],
offsets.Minute: [
MinuteOfHour,
HourOfDay,
DayOfWeek,
DayOfMonth,
DayOfYear,
],
offsets.Second: [
SecondOfMinute,
MinuteOfHour,
HourOfDay,
DayOfWeek,
DayOfMonth,
DayOfYear,
],
}
offset = to_offset(freq_str)
for offset_type, feature_classes in features_by_offsets.items():
if isinstance(offset, offset_type):
return [cls() for cls in feature_classes]
supported_freq_msg = f'''
Unsupported frequency {freq_str}
The following frequencies are supported:
Y - yearly
alias: A
M - monthly
W - weekly
D - daily
B - business days
H - hourly
T - minutely
alias: min
S - secondly
'''
raise RuntimeError(supported_freq_msg)
def time_features(dates, timeenc = 1, freq = "h"):
"""
> `time_features` takes in a `dates` dataframe with a 'dates' column
> and extracts the date down to `freq` where freq can be any of the
> following if `timeenc` is 0:
> * m - [month]
> * w - [month]
> * d - [month, day, weekday]
> * b - [month, day, weekday]
> * h - [month, day, weekday, hour]
> * t - [month, day, weekday, hour, *minute]
>
> If `timeenc` is 1, a similar, but different list of `freq` values
> are supported (all encoded between [-0.5 and 0.5]):
> * Q - [month]
> * M - [month]
> * W - [Day of month, week of year]
> * D - [Day of week, day of month, day of year]
> * B - [Day of week, day of month, day of year]
> * H - [Hour of day, day of week, day of month, day of year]
> * T - [Minute of hour*, hour of day, day of week, day of month, day of year]
> * S - [Second of minute, minute of hour, hour of day, day of week, day of month, day of year]
*minute returns a number from 0-3 corresponding to the 15 minute period it falls into.
"""
if timeenc == 0:
dates['month'] = dates.date.apply(lambda row: row.month, 1)
dates['day'] = dates.date.apply(lambda row: row.day, 1)
dates['weekday'] = dates.date.apply(lambda row: row.weekday(), 1)
dates['hour'] = dates.date.apply(lambda row: row.hour, 1)
dates['minute'] = dates.date.apply(lambda row: row.minute, 1)
dates['minute'] = dates.minute.map(lambda x: x // 15)
freq_map = {
'y':[],
'm':['month'],
'w':['month'],
'd':['month', 'day', 'weekday'],
'b':['month', 'day', 'weekday'],
'h':['month', 'day', 'weekday', 'hour'],
't':['month', 'day', 'weekday', 'hour', 'minute'],
}
return dates[freq_map[freq.lower()]].values
if timeenc == 1:
dates = pd.to_datetime(dates.date.values)
return np.vstack([
feat(dates) for feat in time_features_from_frequency_str(freq)
]).transpose(1, 0)
# 测试代码 main 函数
def main():
dates = pd.to_datetime([
"2023-01-01 01:01:05", "2023-01-01 01:01:10",
"2023-01-01 01:01:15", "2023-01-01 01:01:20",
"2023-01-01 01:01:25"
])
res = time_features(dates, freq = "5s")
print(res)
res2 = time_features_from_frequency_str("5s")
print(res2)
data = None
data_df = gen_time_features(data)
data_df = get_time_fe(data_df, 'hour', n = 24, one_hot = False, drop = False)
data_df = get_time_fe(data_df, 'day', n = 31, one_hot = False, drop = True)
data_df = get_time_fe(data_df, 'dayofweek', n = 7, one_hot = True, drop = True)
data_df = get_time_fe(data_df, 'season', n = 4, one_hot = True, drop = True)
data_df = get_time_fe(data_df, 'month', n = 12, one_hot = True, drop = True)
data_df = get_time_fe(data_df, 'weekofyear', n = 53, one_hot = False, drop = True)
# data
series = pd.read_csv(
"/Users/zfwang/machinelearning/datasets/car-sales.csv",
header = 0,
index_col = 0
)
# gen features
ts2df = Timeseries2Dataframe()
series = ts2df.timeseries2dataframe(
data = series,
n_lag = 12,
n_fut = 0,
selLag = None,
selFut = None,
dropnan = True
)
ts2df.analysis_features_select(series, "Sales")
ts2df.features_select(series, "Sales")
if __name__ == "__main__":
main()
| wangzhefeng/tsproj | utils/timefeatures.py | timefeatures.py | py | 17,505 | python | en | code | 0 | github-code | 36 |
24417230499 | from os.path import exists
from pyimpspec.data.data_set import (
DataSet,
dataframe_to_data_sets,
)
from typing import List
def parse_spreadsheet(path: str, **kwargs) -> List[DataSet]:
"""
Parse a spreadsheet (.xlsx or .ods) containing one or more impedance spectra.
Parameters
----------
path: str
The path to the file to process.
kwargs:
Keyword arguments are passed forward to `pandas.read_excel`_.
Returns
-------
List[DataSet]
"""
from pandas import (
DataFrame,
read_excel,
)
assert isinstance(path, str) and exists(path), path
data_sets: List[DataSet] = []
if "sheet_name" not in kwargs:
kwargs["sheet_name"] = None
label: str
df: DataFrame
for label, df in read_excel(path, **kwargs).items():
data_sets.extend(dataframe_to_data_sets(df, path=path, label=label))
assert isinstance(data_sets, list), data_sets
assert all(map(lambda _: isinstance(_, DataSet), data_sets))
return data_sets
| vyrjana/pyimpspec | src/pyimpspec/data/formats/spreadsheet.py | spreadsheet.py | py | 1,039 | python | en | code | 12 | github-code | 36 |
75072637544 | from vedo import Mesh, show, Lines
mesh_a = Mesh("../data/mouse_limb_a.stl").c("red5")
mesh_b = Mesh("../data/mouse_limb_b.stl").c("green5")
# Here user clicks on mesh A and then B to pick 5+5 landmarks
show("Click meshes & press i", mesh_a, mesh_b).clear()
# This shows that automatic alignment may be not good enough:
# make a copy and align it to mesh B with ICP
# mesh_fail = mesh_a.clone().align_to(mesh_b, rigid=True)
# show("ICP alignment is not good enough!", mesh_fail, mesh_b).clear()
# Warp A to match B using the manually picked landmarks:
pts_a = [
(490.395, 145.306, 891.946),
(290.923, 913.594, 279.837),
(651.440, 836.449, 775.404),
(771.368, 310.359, 465.300),
(266.866, 390.811, 941.395),
]
pts_b = [
(1735.77, 538.276, 524.379),
(1680.17, 1468.45, 644.959),
(1693.76, 1164.16, 140.946),
(1297.26, 904.027, 415.989),
(2036.56, 763.552, 477.475),
]
if len(pts_a) > 3:
# Make a copy and warp it
aligned_mesh = mesh_a.clone().warp(pts_a, pts_b)
# Create arrows to show the displacement
arrows = Lines(pts_a, pts_b)
# Compute the distance between the two meshes
mesh_b.distance_to(mesh_a)
mesh_b.cmap("coolwarm").add_scalarbar()
mesh_a.alpha(0.2) # make mesh A very transparent
# Show all the objects in one go
show("Warping Result", mesh_a, aligned_mesh, mesh_b, arrows)
| BiAPoL/PoL-BioImage-Analysis-TS-Early-Career-Track | docs/day2a_image_segmentation/vedo_material/scripts/10-morph_ab.py | 10-morph_ab.py | py | 1,376 | python | en | code | 6 | github-code | 36 |
34594770015 | """OpenAPI schema utility functions."""
from io import StringIO
_DEFAULT_EXAMPLES = {
"string": "string",
"integer": 1,
"number": 1.0,
"boolean": True,
"array": [],
}
_DEFAULT_STRING_EXAMPLES = {
"date": "2020-01-01",
"date-time": "2020-01-01T01:01:01Z",
"password": "********",
"byte": "QG1pY2hhZWxncmFoYW1ldmFucw==",
"ipv4": "127.0.0.1",
"ipv6": "::1",
}
def example_from_schema(schema):
"""
Generates an example request/response body from the provided schema.
>>> schema = {
... "type": "object",
... "required": ["id", "name"],
... "properties": {
... "id": {
... "type": "integer",
... "format": "int64"
... },
... "name": {
... "type": "string",
... "example": "John Smith"
... },
... "tag": {
... "type": "string"
... }
... }
... }
>>> example = example_from_schema(schema)
>>> assert example == {
... "id": 1,
... "name": "John Smith",
... "tag": "string"
... }
"""
# If an example was provided then we use that
if "example" in schema:
return schema["example"]
elif "oneOf" in schema:
return example_from_schema(schema["oneOf"][0])
elif "anyOf" in schema:
return example_from_schema(schema["anyOf"][0])
elif "allOf" in schema:
# Combine schema examples
example = {}
for sub_schema in schema["allOf"]:
example.update(example_from_schema(sub_schema))
return example
elif "enum" in schema:
return schema["enum"][0]
elif "type" not in schema:
# Any type
return _DEFAULT_EXAMPLES["integer"]
elif schema["type"] == "object" or "properties" in schema:
example = {}
for prop, prop_schema in schema.get("properties", {}).items():
example[prop] = example_from_schema(prop_schema)
return example
elif schema["type"] == "array":
items = schema["items"]
min_length = schema.get("minItems", 0)
max_length = schema.get("maxItems", max(min_length, 2))
assert min_length <= max_length
# Try generate at least 2 example array items
gen_length = min(2, max_length) if min_length <= 2 else min_length
example_items = []
if items == {}:
# Any-type arrays
example_items.extend(_DEFAULT_EXAMPLES.values())
elif isinstance(items, dict) and "oneOf" in items:
# Mixed-type arrays
example_items.append(_DEFAULT_EXAMPLES[sorted(items["oneOf"])[0]])
else:
example_items.append(example_from_schema(items))
# Generate array containing example_items and satisfying min_length and max_length
return [example_items[i % len(example_items)] for i in range(gen_length)]
elif schema["type"] == "string":
example_string = _DEFAULT_STRING_EXAMPLES.get(
schema.get("format", None), _DEFAULT_EXAMPLES["string"]
)
min_length = schema.get("minLength", 0)
max_length = schema.get("maxLength", max(min_length, len(example_string)))
gen_length = (
min(len(example_string), max_length)
if min_length <= len(example_string)
else min_length
)
assert 0 <= min_length <= max_length
if min_length <= len(example_string) <= max_length:
return example_string
else:
example_builder = StringIO()
for i in range(gen_length):
example_builder.write(example_string[i % len(example_string)])
example_builder.seek(0)
return example_builder.read()
elif schema["type"] in ("integer", "number"):
example = _DEFAULT_EXAMPLES[schema["type"]]
if "minimum" in schema and "maximum" in schema:
# Take average
example = schema["minimum"] + (schema["maximum"] - schema["minimum"]) / 2
elif "minimum" in schema and example <= schema["minimum"]:
example = schema["minimum"] + 1
elif "maximum" in schema and example >= schema["maximum"]:
example = schema["maximum"] - 1
return float(example) if schema["type"] == "number" else int(example)
else:
return _DEFAULT_EXAMPLES[schema["type"]]
| sphinx-contrib/openapi | sphinxcontrib/openapi/schema_utils.py | schema_utils.py | py | 4,446 | python | en | code | 103 | github-code | 36 |
43298189764 | from rpython.rtyper.lltypesystem import rffi, lltype
from pypy.module.cpyext.api import (
cpython_api, cpython_struct, bootstrap_function, build_type_checkers,
CANNOT_FAIL, Py_ssize_t, Py_ssize_tP, PyObjectFields, slot_function)
from pypy.module.cpyext.pyobject import (
decref, PyObject, make_ref, make_typedescr)
from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall
from pypy.interpreter.error import OperationError
from pypy.objspace.std.sliceobject import W_SliceObject
# Slice objects directly expose their members as PyObject.
# Don't change them!
PySliceObjectStruct = lltype.ForwardReference()
PySliceObject = lltype.Ptr(PySliceObjectStruct)
PySliceObjectFields = PyObjectFields + \
(("start", PyObject), ("step", PyObject), ("stop", PyObject), )
cpython_struct("PySliceObject", PySliceObjectFields, PySliceObjectStruct)
@bootstrap_function
def init_sliceobject(space):
"Type description of PySliceObject"
make_typedescr(W_SliceObject.typedef,
basestruct=PySliceObject.TO,
attach=slice_attach,
dealloc=slice_dealloc)
def slice_attach(space, py_obj, w_obj, w_userdata=None):
"""
Fills a newly allocated PySliceObject with the given slice object. The
fields must not be modified.
"""
py_slice = rffi.cast(PySliceObject, py_obj)
assert isinstance(w_obj, W_SliceObject)
py_slice.c_start = make_ref(space, w_obj.w_start)
py_slice.c_stop = make_ref(space, w_obj.w_stop)
py_slice.c_step = make_ref(space, w_obj.w_step)
@slot_function([PyObject], lltype.Void)
def slice_dealloc(space, py_obj):
"""Frees allocated PyBytesObject resources.
"""
py_slice = rffi.cast(PySliceObject, py_obj)
decref(space, py_slice.c_start)
decref(space, py_slice.c_stop)
decref(space, py_slice.c_step)
from pypy.module.cpyext.object import _dealloc
_dealloc(space, py_obj)
@cpython_api([PyObject, PyObject, PyObject], PyObject)
def PySlice_New(space, w_start, w_stop, w_step):
"""Return a new slice object with the given values. The start, stop, and
step parameters are used as the values of the slice object attributes of
the same names. Any of the values may be NULL, in which case the
None will be used for the corresponding attribute. Return NULL if
the new object could not be allocated."""
if w_start is None:
w_start = space.w_None
if w_stop is None:
w_stop = space.w_None
if w_step is None:
w_step = space.w_None
return W_SliceObject(w_start, w_stop, w_step)
@cpython_api([PySliceObject, Py_ssize_t, Py_ssize_tP, Py_ssize_tP, Py_ssize_tP,
Py_ssize_tP], rffi.INT_real, error=-1)
def PySlice_GetIndicesEx(space, w_slice, length, start_p, stop_p, step_p,
slicelength_p):
"""Usable replacement for PySlice_GetIndices(). Retrieve the start,
stop, and step indices from the slice object slice assuming a sequence of
length length, and store the length of the slice in slicelength. Out
of bounds indices are clipped in a manner consistent with the handling of
normal slices.
Returns 0 on success and -1 on error with exception set."""
if not isinstance(w_slice, W_SliceObject):
raise PyErr_BadInternalCall(space)
start_p[0], stop_p[0], step_p[0], slicelength_p[0] = \
w_slice.indices4(space, length)
return 0
@cpython_api([PySliceObject, Py_ssize_t, Py_ssize_tP, Py_ssize_tP, Py_ssize_tP],
rffi.INT_real, error=-1)
def PySlice_GetIndices(space, w_slice, length, start_p, stop_p, step_p):
"""Retrieve the start, stop and step indices from the slice object slice,
assuming a sequence of length length. Treats indices greater than
length as errors.
Returns 0 on success and -1 on error with no exception set (unless one of
the indices was not None and failed to be converted to an integer,
in which case -1 is returned with an exception set).
You probably do not want to use this function. If you want to use slice
objects in versions of Python prior to 2.3, you would probably do well to
incorporate the source of PySlice_GetIndicesEx(), suitably renamed,
in the source of your extension."""
if not isinstance(w_slice, W_SliceObject):
raise PyErr_BadInternalCall(space)
try:
start_p[0], stop_p[0], step_p[0] = \
w_slice.indices3(space, length)
except:
return -1
return 0
| mozillazg/pypy | pypy/module/cpyext/sliceobject.py | sliceobject.py | py | 4,493 | python | en | code | 430 | github-code | 36 |
8983073054 | from datetime import datetime
from typing import Optional, Union
class Poll:
"""
Slot class for each Pool object.
"""
MAX_OPTIONS = 10
MIN_OPTIONS = 2
__slots__ = [
"_message_id",
"_channel_id",
"_question",
"_options",
"_date_created_at",
"_user_id",
]
def __init__(
self,
message_id: int,
channel_id: int,
question: str,
options: list[str],
user_id: Optional[int] = None,
date_created: Optional[Union[datetime, str]] = datetime.now().strftime(
"%Y-%m-%d"
),
):
self._message_id = message_id
self._channel_id = channel_id
self._question = question
self._options = options
self._date_created_at = date_created
self._user_id = user_id
@property
def message_id(self) -> int:
return self._message_id
@property
def channel_id(self) -> int:
return self._channel_id
@property
def question(self) -> str:
return self._question
@property
def options(self) -> list[str]:
return self._options
@property
def created_at(self) -> datetime.date:
if isinstance(self._date_created_at, str):
return datetime.fromisoformat(self._date_created_at).date()
if isinstance(self._date_created_at, datetime):
return self._date_created_at.date()
return self._date_created_at
@property
def user_id(self) -> int:
return self._user_id
| TheXer/Jachym | src/ui/poll.py | poll.py | py | 1,559 | python | en | code | 11 | github-code | 36 |
40618220774 | from django.conf.urls import url
from . import views
urlpatterns=[
url(r'^register/',views.mapiview.as_view()),
url(r'^editview/',views.mapiview1.as_view()),
url(r'^update/',views.mapiview2.as_view()),
url(r'^vcus/',views.vcustomer),
url(r'^registercus/',views.post),
url(r'^viewtr/(?P<idd>\w+)',views.viewtr,name="viewtr"),
# (?P<idd>\w+)
] | jannamariyam/GOLD_APP | SPRINT 4/web/goldinapp/customer/urls.py | urls.py | py | 366 | python | en | code | 0 | github-code | 36 |
15131148748 | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def reverseList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
# 아무것도 주어지지 않은 경우
if head is None:
return []
# Node의 value를 담는 과정
values = []
values.append(head.val)
while head.next:
head = head.next
values.insert(0, head.val)
return values
| EnteLee/practice_algorithm | leetcode/206_reverse_linked_list/reverse_linked_list_yyj.py | reverse_linked_list_yyj.py | py | 568 | python | en | code | 0 | github-code | 36 |
72173860264 | import math
def sieve_of_sundaram(n):
k = (n - 2) // 2 # sınır belirliyor
prime_list = []
integers_list = [True] * (k + 1) # sınır kadar dizi oluşturuyor
for i in range(1, k + 1): # sınır kadar eleman dönüyor.
j = i
while i + j + 2 * i * j <= k: # 4 < 50 7 15
integers_list[i + j + 2 * i * j] = False
j += 1
if n > 2:
prime_list.append(2)
for i in range(1, k + 1):
if integers_list[i]:
prime_list.append(2 * i + 1)
return prime_list
primes = sieve_of_sundaram(10**6)
counter = 0
index = 1
sum_of_primes = 0
term_count = 0
max_term_count = 21
max_prime = 953
while index < len(primes):
sum_of_primes += primes[counter]
term_count += 1
counter += 1
if sum_of_primes in primes:
if max_term_count < term_count:
max_term_count = term_count
max_prime = sum_of_primes
if sum_of_primes >= primes[len(primes) - 1]:
if max_term_count > term_count + 5:
break
sum_of_primes = 0
counter = index
index += 1
term_count = 0
print(max_prime) | bugramuazmujde/ProjectEuler | problem_50_consecutive_prime_sum.py | problem_50_consecutive_prime_sum.py | py | 1,143 | python | en | code | 0 | github-code | 36 |
33006855107 | import csv
import io
from nltk.tokenize import word_tokenize
import sys
reload(sys)
sys.setdefaultencoding('ISO-8859-1')
def findLowest(topWords):
result = topWords.keys()[0]
for word in topWords:
if(topWords[word] < topWords[result]):
result = word
return result
with io.open("old_tweets.csv", encoding = "ISO-8859-1") as csvFile:
#fieldnames = ['username', 'date', 'text', 'id']
#reader = csv.DictReader(csvFile, fieldnames = fieldnames)
reader = csv.reader(csvFile, delimiter = ';')
reader.next()
i = 0;
counter = {}
topWords = {}
topsLowest = ""
topMaxAllowed = 2000
for row in reader:
for word in word_tokenize(row[2]):
counter[word] = counter.get(word, 0) + 1
if(len(topWords) < topMaxAllowed):
topWords[word] = counter[word]
if((not hasattr(topWords, topsLowest)) or topWords[topsLowest] > counter[word]):
topsLowest = word
elif (topWords[topsLowest] < counter[word]):
del topWords[topsLowest]
topWords[word] = counter[word]
topsLowest = findLowest(topWords)
with io.open("word_count.csv", mode="w", encoding="ISO-8859-1") as outputFile:
outputFile.write(unicode("word;frequency"))
for word in topWords:
outputFile.write(unicode("\n%s;%d" % (word, topWords[word])))
| Temirlan97/WhatTwitterFeels | wordBag/countWords.py | countWords.py | py | 1,230 | python | en | code | 0 | github-code | 36 |
40974261041 | #Import libraries
import numpy as np
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
#Database Connection
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
#Map database
Base = automap_base()
Base.prepare(engine, reflect = True)
#Reference known tables
measurement = Base.classes.measurement
station = Base.classes.station
#Set up flask
app = Flask(__name__)
#Flask Routes
@app.route("/")
def home():
return(
f"Available Routes:<br/>"
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/[enter start date: yyyy-mm-dd] <br/>"
f"/api/v1.0/[enter start date: yyyy-mm-dd]/[enter end date: yyyy-mm-dd]"
)
@app.route("/api/v1.0/precipitation")
def precipitation():
#Create session & query
session = Session(engine)
results = session.query(measurement.date, measurement.prcp).\
filter(measurement.date > "2016-08-23").all()
session.close()
#Create dictionary and append to list
prcp_list = []
for date, prcp in results:
prcp_dict = {}
prcp_dict['date'] = date
prcp_dict['prcp'] = prcp
prcp_list.append(prcp_dict)
#Return json
return jsonify(prcp_list)
@app.route("/api/v1.0/stations")
def stations():
#Create session & query
session = Session(engine)
stations = session.query(station.station).all()
station_list = list(np.ravel(stations))
session.close()
#Return json
return jsonify(station_list)
@app.route("/api/v1.0/tobs")
def tobs():
#Create session & query
session = Session(engine)
tobs_results = session.query(measurement.station, measurement.date, measurement.tobs).\
filter(measurement.station == 'USC00519281').\
filter(measurement.date >'2016-08-23').all()
session.close()
#Create dictionary and append to list
tobs_list = []
for station, date, tobs in tobs_results:
tobs_dict = {}
tobs_dict['station'] = station
tobs_dict['date'] = date
tobs_dict['tobs'] = tobs
tobs_list.append(tobs_dict)
#Return json
return jsonify(tobs_list)
@app.route("/api/v1.0/<start_date>")
def Start_date(start_date):
#Create session & query
session = Session(engine)
start_results = session.query( func.avg(measurement.tobs), func.max(measurement.tobs), func.min(measurement.tobs)).\
filter(measurement.date >= start_date)
session.close()
#Create dictionary and append to list
tobs_start_list = []
for avg, max, min in start_results:
start_dict = {}
start_dict['avg'] = avg
start_dict['max'] = max
start_dict['min'] = min
tobs_start_list.append(start_dict)
#Return json
return jsonify(tobs_start_list)
@app.route("/api/v1.0/<start_date>/<end_date>")
def Start_end_date(start_date, end_date):
#Create session & query
session = Session(engine)
start_results = session.query( func.avg(measurement.tobs), func.max(measurement.tobs), func.min(measurement.tobs)).\
filter(measurement.date >= start_date).\
filter(measurement.date <= end_date)
session.close()
#Create dictionary and append to list
tobs_start_end_list = []
for avg, max, min in start_results:
start_end_dict = {}
start_end_dict['avg'] = avg
start_end_dict['max'] = max
start_end_dict['min'] = min
tobs_start_end_list.append(start_end_dict)
#Return json
return jsonify(tobs_start_end_list)
if __name__ == '__main__':
app.run(debug=True) | AJ-Paine/10-Hawaii-Temperature-Exploration | app.py | app.py | py | 3,710 | python | en | code | 0 | github-code | 36 |
323607876 | """added Client Favourite and product views
Revision ID: bcc08ae9bed7
Revises: 399549c08a2a
Create Date: 2020-01-24 22:55:04.098191
"""
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
# revision identifiers, used by Alembic.
revision = 'bcc08ae9bed7'
down_revision = '399549c08a2a'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('client_favourite',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('product_id', sa.Integer(), nullable=False),
sa.Column('client_id', sa.Integer(), nullable=False),
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('updated', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['client_id'], ['client.id'], ),
sa.ForeignKeyConstraint(['product_id'], ['product.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('product_view',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('product_id', sa.Integer(), nullable=False),
sa.Column('client_id', sa.Integer(), nullable=True),
sa.Column('client_ip', sa.Integer(), nullable=False),
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('updated', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['client_id'], ['client.id'], ),
sa.ForeignKeyConstraint(['client_ip'], ['product.id'], ),
sa.ForeignKeyConstraint(['product_id'], ['product.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('product_view')
op.drop_table('client_favourite')
# ### end Alembic commands ###
| Dsthdragon/kizito_bookstore | migrations/versions/bcc08ae9bed7_added_client_favourite_and_product_views.py | bcc08ae9bed7_added_client_favourite_and_product_views.py | py | 1,741 | python | en | code | 0 | github-code | 36 |
20885862962 | # flake8: noqa
import nltk
nltk.download("brown")
nltk.download("names")
import numpy as np
import multiprocessing as mp
import string
import spacy
import os
os.system("python -m spacy download en_core_web_sm")
from sklearn.base import TransformerMixin, BaseEstimator
from normalise import normalise
import pandas as pd
class TextPreprocessor(BaseEstimator, TransformerMixin):
def __init__(self, variety="BrE", user_abbrevs={}, n_jobs=1):
"""
Text preprocessing transformer includes steps:
1. Text normalization
2. Punctuation removal
3. Stop words removal
4. Lemmatization
variety - format of date (AmE - american type, BrE - british format)
user_abbrevs - dict of user abbreviations mappings (from normalise package)
n_jobs - parallel jobs to run
"""
self.variety = variety
self.user_abbrevs = user_abbrevs
self.n_jobs = n_jobs
self.nlp = spacy.load("en_core_web_sm")
def fit(self, X, y=None):
return self
def transform(self, X, *_):
X_copy = X.copy()
partitions = 1
cores = mp.cpu_count()
if self.n_jobs <= -1:
partitions = cores
elif self.n_jobs <= 0:
return X_copy.apply(self._preprocess_text)
else:
partitions = min(self.n_jobs, cores)
data_split = np.array_split(X_copy, partitions)
pool = mp.Pool(cores)
data = pd.concat(pool.map(self._preprocess_part, data_split))
pool.close()
pool.join()
return data
def _preprocess_part(self, part):
return part.apply(self._preprocess_text)
def _preprocess_text(self, text):
normalized_text = self._normalize(text)
doc = self.nlp(normalized_text)
removed_punct = self._remove_punct(doc)
removed_stop_words = self._remove_stop_words(removed_punct)
return self._lemmatize(removed_stop_words)
def _normalize(self, text):
# some issues in normalise package
try:
return " ".join(
normalise(
text,
variety=self.variety,
user_abbrevs=self.user_abbrevs,
verbose=False,
)
)
except:
return text
def _remove_punct(self, doc):
return [t for t in doc if t.text not in string.punctuation]
def _remove_stop_words(self, doc):
return [t for t in doc if not t.is_stop]
def _lemmatize(self, doc):
return " ".join([t.lemma_ for t in doc])
def preprocess_text(save_path):
df_bbc = pd.read_csv("data/bbc-text.csv")
text = TextPreprocessor(n_jobs=-1).transform(df_bbc["text"])
df_bbc["text"] = text
df_bbc.to_csv(save_path, sep=",", index=False)
return True
| Lolik-Bolik/Hashing_Algorithms | utils/process_book.py | process_book.py | py | 2,868 | python | en | code | 2 | github-code | 36 |
7147825929 | #!/usr/bin/env python3
import re
import sys
import linecache
from pathlib import Path
regex = re.compile('#?(.*)\s?=\s?(.*)')
data = ''
try:
fpath = str(sys.argv[1])
if not Path(fpath).is_file():
raise Exception("file path is invalid or not a file")
except:
print("Error: file not provided or invalid file.")
sys.exit(1)
try:
with open(fpath, 'r') as fh:
lines = fh.readlines()
for line in lines:
if re.search('^\n', line):
data += line
elif regex.search(line):
match = regex.search(line)
comment = ''
if re.search('^#.*', line):
comment = '#'
data += (comment + match.group(1).strip() + "={{ getenv(\"" + match.group(1).replace('.', '_').upper().strip() + "\", \"" + match.group(2).strip() + "\") }}" + "\n")
elif re.search('^#.*', line):
data += line
else:
pass
with open(fpath + '.new', 'w') as fh:
fh.write(data)
except Exception as err:
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
print('EXCEPTION IN ({}, LINE {} "{}"): {}'.format(filename, lineno, line.strip(), exc_obj))
| japtain-cack/docker-marid | files/configToRemco.py | configToRemco.py | py | 1,338 | python | en | code | 0 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.