seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9084085834 | import os
import logging, requests
from rdflib import Namespace, Literal, Graph
from rdflib.namespace import DCTERMS, RDF, RDFS
from rdflib.plugins.stores.sparqlstore import SPARQLUpdateStore
from rdflib.graph import DATASET_DEFAULT_GRAPH_ID as default
from oslcapi.api.helpers.service_api import get_bucket
log = logging.getLogger('tester.sub')
OSLC = Namespace('http://open-services.net/ns/core#')
OSLC_EVENT = Namespace('http://open-services.net/ns/events#')
# Connect to fuseki triplestore.
FUSEKI_USER = os.getenv("FUSEKI_USER")
FUSEKI_PWD = os.getenv("FUSEKI_PWD")
fuseki_store = SPARQLUpdateStore(auth=(FUSEKI_USER,FUSEKI_PWD))
query_endpoint = 'http://fuseki.demos.gsi.upm.es/oslc-gc2/query'
update_endpoint = 'http://fuseki.demos.gsi.upm.es/oslc-gc2/update'
fuseki_data_endpoint = 'http://fuseki.demos.gsi.upm.es/oslc-gc2/data'
fuseki_store.open((query_endpoint, update_endpoint))
def generate_creation_event(resource, store):
log.warning('Creation event generated')
store.trs.generate_change_event(resource, 'Creation')
# Generate OSLC Event Resource for Fuseki Endpoint
g = Graph(fuseki_store, identifier=default)
g.add((resource.uri, RDF.type, OSLC_EVENT.Event))
g.add((resource.uri, DCTERMS.description, Literal('Creation Event')))
# Generate OSLC Event Resource for Kafka Topic
g2 = Graph()
g2.add((resource.uri, RDF.type, OSLC_EVENT.Event))
g2.add((resource.uri, DCTERMS.description, Literal('Creation Event')))
return g2
def generate_modification_event(payload, store):
log.warning('Modification event generated')
bucket = get_bucket(payload['bucket'])
service_provider = next(service_provider for service_provider in store.catalog.service_providers if
Literal(bucket.id) in service_provider.rdf.objects(None, DCTERMS.identifier))
resource = next(resource for resource in service_provider.oslc_resources if
Literal(bucket.number) in resource.rdf.objects(None, DCTERMS.identifier))
service_provider.oslc_resources.remove(resource)
resource = store.add_resource(service_provider, bucket)
store.trs.generate_change_event(resource, 'Modification')
return
def generate_deletion_event(resource, store):
log.warning('Deletion event generated')
log.warning(resource)
store.trs.generate_change_event(resource, 'Deletion')
# Generate OSLC Event Resource for Fuseki Endpoint
g = Graph(fuseki_store, identifier=default)
g.add((resource.uri, RDF.type, OSLC_EVENT.Event))
g.add((resource.uri, DCTERMS.description, Literal('Deletion Event')))
# Generate OSLC Event Resource for Kafka Topic
g2 = Graph()
g2.add((resource.uri, RDF.type, OSLC_EVENT.Event))
g2.add((resource.uri, DCTERMS.description, Literal('Deletion Event')))
return g2
| AlexVaPe/pyOSLC_GCP | oslcapi/api/helpers/service_events.py | service_events.py | py | 2,832 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "rdflib.Namespace",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "rdflib.Namespace",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"... |
32005535437 | import yaml
import os
import logging
from weight import Weight
from schema import Schema, SchemaError, Optional
from typing import Union
_LOGGER = logging.getLogger(__name__)
_LOGGER.setLevel(logging.DEBUG)
class KB_Chaos:
def __init__(self, chaos_path):
self.chaos_path = chaos_path
self.last_chaos = None
def is_instance_related(self):
"""Check whether chaos is instance related"""
if len(os.listdir(self.chaos_path)) == 0:
_LOGGER.error("No chaos found in {}".format(self.chaos_path))
return True, None
for chaos in os.listdir(self.chaos_path):
if chaos.endswith(".yaml"):
f = open(self.chaos_path + "/" + chaos)
data = f.read()
f.close()
data = yaml.safe_load(data)
if self.last_chaos is None:
self.last_chaos = data
else:
if data["anomalies"] == self.last_chaos["anomalies"]:
continue
else:
return True, data
if len(os.listdir(self.chaos_path)) <= 2:
return True, data
return False, data
class KB:
def __init__(self) -> None:
self.kb = None
self.hierarchy = {0: "chaos_type", 1: "chaos"}
self.metrics = []
self.traces = []
self.logs = []
self.cmds = []
self.type_metrics = []
self.type_traces = []
self.type_logs = []
self.type_cmds = []
self.metrics_score = None
self.traces_score = None
self.logs_score = None
self.cmds_score = None
self.type_metrics_score = None
self.type_traces_score = None
self.type_logs_score = None
self.type_cmds_score = None
def load(self, kb_path: str) -> Union[dict, None]:
"""Load knowledge base
Args:
kb_path (str): Knowledge base path
Raises:
Exception: Knowledge base check
Returns:
dict: Knowledge base
"""
if type(kb_path) is str:
f = open(kb_path)
data = f.read()
f.close()
self.kb = yaml.safe_load(data)
elif type(kb_path) is dict:
self.kb = kb_path
is_checked = self.check_kb()
if is_checked:
self.score_fingerprint()
return self.kb
else:
raise Exception("Knowledge Base check failed")
def check_kb(self) -> bool:
"""Check knowledge base config
Raises:
se: Schema error
Returns:
bool: check result
"""
if self.kb is None:
_LOGGER.error("Knowledge Base is not loaded")
return False
anomaly_schema = [{"index": int, "action": str, Optional("order"): int}]
custom_metrics_schema = {
Optional("network"): anomaly_schema,
Optional("cpu"): anomaly_schema,
Optional("memory"): anomaly_schema,
Optional("io"): anomaly_schema,
Optional("container"): anomaly_schema,
Optional("mongo"): anomaly_schema,
Optional("mysql"): anomaly_schema,
Optional("icmp"): anomaly_schema,
Optional("time"): anomaly_schema,
Optional("jvm"): anomaly_schema,
Optional("http"): anomaly_schema,
}
custom_traces_schema = {
Optional("onehop"): anomaly_schema,
}
custom_logs_schema = {
Optional("pod"): anomaly_schema,
}
custom_cmds_schema = {
Optional("config"): anomaly_schema,
Optional("exec"): anomaly_schema,
}
custom_schema = [
{
"index": int,
"experiment": str,
"instance_related": bool,
Optional("order"): bool,
"anomalies": {
Optional("metrics"): custom_metrics_schema,
Optional("traces"): custom_traces_schema,
Optional("logs"): custom_logs_schema,
Optional("cmds"): custom_cmds_schema,
},
}
]
config_schema = Schema(
{
Optional("network"): custom_schema,
Optional("pod"): custom_schema,
Optional("stress"): custom_schema,
Optional("time"): custom_schema,
Optional("jvm"): custom_schema,
Optional("dns"): custom_schema,
Optional("http"): custom_schema,
Optional("io"): custom_schema,
Optional("config"): custom_schema,
}
)
try:
config_schema.validate(self.kb)
_LOGGER.info("Configuration is valid.")
except SchemaError as se:
raise se
return True
def score_fingerprint(self):
"""Score fingerprint"""
# Two hierarchies for our experiment
chaos_types = self.kb.keys()
for chaos_type in chaos_types:
type_metrics = []
type_traces = []
type_logs = []
type_cmds = []
for chaos in self.kb[chaos_type]:
anomalies = chaos["anomalies"]
metrics = (
anomalies["metrics"] if "metrics" in anomalies else None
)
traces = anomalies["traces"] if "traces" in anomalies else None
logs = anomalies["logs"] if "logs" in anomalies else None
cmds = anomalies["cmds"] if "cmds" in anomalies else None
(
metrics_instance,
traces_instance,
logs_instance,
cmds_instance,
) = self.analyse(metrics, traces, logs, cmds)
type_metrics += metrics_instance
type_traces += traces_instance
type_logs += logs_instance
type_cmds += cmds_instance
self.type_metrics.append(type_metrics) if type_metrics else None
self.type_traces.append(type_traces) if type_traces else None
self.type_logs.append(type_logs) if type_logs else None
self.type_cmds.append(type_cmds) if type_cmds else None
for (data, score) in zip(
[
self.metrics,
self.traces,
self.logs,
self.cmds,
self.type_metrics,
self.type_traces,
self.type_logs,
self.type_cmds,
],
[
"metrics_score",
"traces_score",
"logs_score",
"cmds_score",
"type_metrics_score",
"type_traces_score",
"type_logs_score",
"type_cmds_score",
],
):
weight = Weight(data)
weighted_score = weight()
max_score = max(weighted_score.values())
for key in weighted_score:
weighted_score[key] = weighted_score[key] / max_score
setattr(self, score, weighted_score)
def analyse(
self, metrics: list, traces: list, logs: list, cmds: list
) -> tuple:
"""Analyse metrics, traces, logs, cmds
Args:
metrics (list): metrics
traces (list): traces
logs (list): logs
cmds (list): commands
Returns:
tuple: metrics, traces, logs, cmds
"""
metrics_instance = self.analyse_fingerprint(metrics, "metrics")
traces_instance = self.analyse_fingerprint(traces, "traces")
logs_instance = self.analyse_fingerprint(logs, "logs")
cmds_instance = self.analyse_fingerprint(cmds, "cmds")
return metrics_instance, traces_instance, logs_instance, cmds_instance
def analyse_fingerprint(
self, fingerprint: list, target_type: str = ""
) -> list:
"""Analyse fingerprint individually
Args:
fingerprint (list): Fingerprint
target_type (str, optional): Fingerprint type. Defaults to "".
Returns:
list: Rename instances
"""
if fingerprint is None or target_type == "":
_LOGGER.info("No {} found in Knowledge Base".format(target_type))
return []
types = fingerprint.keys()
new_instance = []
for one_type in types:
for clue in fingerprint[one_type]:
idx = clue["index"]
action = clue["action"]
clue_name = one_type + "-" + str(idx) + "-" + action
new_instance.append(clue_name)
if new_instance:
if target_type == "metrics":
self.metrics.append(new_instance)
elif target_type == "traces":
self.traces.append(new_instance)
elif target_type == "logs":
self.logs.append(new_instance)
elif target_type == "cmds":
self.cmds.append(new_instance)
self.logs.append(new_instance)
return new_instance
| Fengrui-Liu/MicroCBR | microCBR/kb.py | kb.py | py | 9,333 | python | en | code | 6 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_... |
25409835667 | import cv2
import numpy as np
# Load video
cap = cv2.VideoCapture('lift.mp4')
# Define output video properties
output_file = 'output.avi'
fourcc = cv2.VideoWriter_fourcc(*'XVID')
fps = cap.get(cv2.CAP_PROP_FPS)
frame_size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
# Create VideoWriter object
out = cv2.VideoWriter(output_file, fourcc, fps, frame_size)
# Select ROI
ret, frame = cap.read()
r = cv2.selectROI(frame)
# Initialize tracker
tracker = cv2.legacy.TrackerMOSSE_create()
tracker.init(frame, r)
# Initialize variables
positions = []
speeds = []
# Create blank image for line overlay
overlay = np.zeros_like(frame)
# Process video frame by frame
while True:
# Read frame
ret, frame = cap.read()
if not ret:
break
# Track object
ok, bbox = tracker.update(frame)
# Draw bounding box and center point
if ok:
# Convert bounding box to integers
bbox = np.int0(bbox)
# Draw bounding box
cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[0] + bbox[2], bbox[1] + bbox[3]), (0, 255, 0), 2)
# Calculate center point
cx = bbox[0] + bbox[2] // 2
cy = bbox[1] + bbox[3] // 2
# Draw center point
cv2.circle(frame, (cx, cy), 4, (0, 255, 0), -1)
# Save position
positions.append((cx, cy))
# Draw line to previous center point
if len(positions) > 1:
cv2.line(overlay, positions[-1], positions[-2], (0, 0, 255), 2)
# Calculate speed
if len(positions) > 1:
distance = np.sqrt((positions[-1][0] - positions[-2][0]) ** 2 + (positions[-1][1] - positions[-2][1]) ** 2)
speed = distance / (1 / cap.get(cv2.CAP_PROP_FPS))
speeds.append(speed)
# Add line overlay to frame
frame = cv2.addWeighted(frame, 1, overlay, 0.5, 0)
# Display frame
cv2.imshow('Frame', frame)
# Write frame to output video
out.write(frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Calculate maximum, minimum, and average speed
if len(speeds) > 0:
max_speed = max(speeds)
min_speed = min(speeds)
avg_speed = sum(speeds) / len(speeds)
print(f"Max speed: {max_speed:.2f} pixels per second")
print(f"Min speed: {min_speed:.2f} pixels per second")
print(f"Avg speed: {avg_speed:.2f} pixels per second")
else:
print("No speed data available")
# Release resources
cap.release()
out.release()
cv2.destroyAllWindows() | taoofstefan/BB-Tracking | main.py | main.py | py | 2,502 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.VideoWriter_fourcc",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "cv2.CAP_PROP_FPS",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "cv2.CAP_P... |
73673851706 | import time
# from seleniumwire import webdriver
from selenium import webdriver
from selenium.webdriver.edge.service import Service
import requests
import datetime
import lib
from fake_useragent import UserAgent
from pyvirtualdisplay import Display
ua = UserAgent()
driver_path = lib.driver_path
ex_path = lib.ex_path
# proxies = lib.proxies
# 初始化 web_driver, 记得开代理
class Driver(object):
def __init__(self, driver_path=r"D:\Python Projects\Webdriver\msedgedriver.exe", extension_path=None, proxies=None):
self.driver_path = driver_path
self.ex_path = extension_path
self.proxies = proxies
if not extension_path:
print('Warning: extension path is empty. Could not bypass the paywall')
def blank_driver(self, mute=False):
# 初始化selenium driver
self.browser_option = webdriver.EdgeOptions()
self.browser_option.add_experimental_option('excludeSwitches', ['enable-automation'])
self.browser_option.add_experimental_option('excludeSwitches', ['ignore-certificate-errors'])
self.browser_option.add_argument('--disable-gpu')
self.browser_option.add_argument('--user-agent=' + ua.random)
self.browser_option.add_experimental_option("detach", True)
self.browser_option.add_experimental_option("useAutomationExtension", False)
if self.ex_path:
self.browser_option.add_extension(self.ex_path)
if self.proxies:
self.browser_option.add_argument('--proxy-server=' + self.proxies)
preferences = {
"webrtc.ip_handling_policy": "disable_non_proxied_udp",
"webrtc.multiple_routes_enabled": False,
"webrtc.nonproxied_udp_enabled": False,
"credentials_enable_service": False,
"profile.password_manager_enabled": False
}
self.browser_option.add_experimental_option("prefs", preferences)
prefs = {'profile.managed_default_content_settings.images': 2,
}
self.browser_option.add_experimental_option('prefs', prefs)
# self.browser_option.add_argument('--headless=chrome')
driver = webdriver.Edge(service=Service(driver_path),
options=self.browser_option,
)
if not mute:
print('driver initialized')
return driver
#
if __name__ == '__main__':
driver = Driver(extension_path=ex_path).blank_driver()
# driver.get('https://browserleaks.com/ip')
driver.get('https://www.wsj.com/articles/feds-bullard-sees-need-to-keep-up-rapid-pace-of-rate-increases-11674058442?mod=markets_lead_pos9')
print(driver.current_url)
# # driver.get('http://httpbin.org/ip')
# # driver.get('http://www.google.com')
# print(driver.page_source)
# time.sleep(200)
# driver.quit()
| YoimiyaInUSTC/WSJ-Crawler | driver_init.py | driver_init.py | py | 2,879 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "fake_useragent.UserAgent",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "lib.driver_path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "lib.ex_path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "seleni... |
18091305999 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('account', '0051_auto_20150130_1145'),
]
operations = [
migrations.AlterField(
model_name='basicmemberinformation',
name='auth_key',
field=models.CharField(max_length=64, default='17e6e879a124e82aabec03d929cf0321a3d85672a8ee06c76765f9f27980ab26'),
preserve_default=True,
),
]
| hongdangodori/slehome | slehome/account/migrations/0052_auto_20150130_1145.py | 0052_auto_20150130_1145.py | py | 531 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.AlterField",
"line_number": 14,
"usage_type": "call"
},
{... |
22456923071 | from django.core import paginator
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from .models import *
from .forms import *
from reacts.forms import CommentForm
from .utils import searchTasks, paginateTasks
from users.decorator import allowed_users
from django.contrib.auth.decorators import login_required
from django.http import JsonResponse
def is_ajax(request):
return request.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest'
def ajax_test(request):
if is_ajax(request=request):
message = "This is ajax"
else:
message = "Not ajax"
return HttpResponse(message)
@login_required(login_url="login")
def tasks(request):
tasks, search_query = searchTasks(request)
custom_range, tasks = paginateTasks(request, tasks, 6)
user = request.user.profile
# submit=[]
context = {
# 'twolists': twolists,
'tasks':tasks,
'search_query': search_query,
'custom_range': custom_range,
# 'form': form,
}
return render(request, 'tasks/tasks.html', context)
@login_required(login_url="login")
def task(request, pk):
taskObj = Task.objects.get(id=pk)
form = CommentForm()
if request.method == 'POST':
form = CommentForm(request.POST)
comment = form.save(commit=False)
comment.task = taskObj
comment.user = request.user.profile
comment.save()
messages.success(request, 'Your comment was successfully submitted!')
return redirect('task', pk=taskObj.id)
return render(request, 'tasks/single-task.html', {'task': taskObj, 'form': form})
@allowed_users(allowed_roles=['اسرة اعداد خدام'])
@login_required(login_url="login")
def createTask(request):
profile = request.user.profile
form = TaskForm()
if request.method == 'POST':
form = TaskForm(request.POST, request.FILES)
if form.is_valid():
task = form.save(commit=False)
task.user = profile
task.save()
return redirect('tasks')
context = {'form': form}
return render(request, "tasks/task_form.html", context)
@allowed_users(allowed_roles=['اسرة اعداد خدام'])
@login_required(login_url="login")
def deleteTask(request, pk):
profile = request.user.profile
task = profile.task_set.get(id=pk)
if task:
if request.method == 'POST':
task.delete()
return redirect('tasks')
context = {'object': task}
return render(request, 'delete_template.html', context)
# @allowed_users(allowed_roles=['اسرة اعداد خدام'])
@login_required(login_url="login")
def submitTask(request, pk):
profile = request.user.profile
form = SubmitForm()
task = Task.objects.get(id=pk)
try:
submit = Submit.objects.filter(user=profile,task=task)[0]
form = SubmitForm(instance=submit)
except:
pass
if request.method == 'POST':
form = SubmitForm(request.POST, request.FILES)
try:
form = SubmitForm(request.POST, request.FILES, instance=submit)
if form.is_valid():
submit =form.save()
submit.save()
except:
if form.is_valid():
submit = form.save(commit=False)
submit.user = profile
submit.task = Task.objects.get(id=pk)
submit.save()
return redirect('tasks')
context = {'form': form}
return render(request, 'tasks/submit-form.html', context)
@login_required(login_url="login")
def viewSubmits(request, pk):
submit = Submit.objects.get(id=pk)
context = {'submit': submit}
return render(request, 'tasks/view-submits.html', context) | Kyrillos1/Ekhdm | tasks/views.py | views.py | py | 3,892 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "django.http.HttpResponse",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "utils.searchTasks",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "utils.paginateTasks",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "django... |
4605424135 | import argparse
from spherenet import OmniMNIST, OmniFashionMNIST
from spherenet import SphereConv2D, SphereMaxPool2D
import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
class SphereNet(nn.Module):
def __init__(self):
super(SphereNet, self).__init__()
self.conv1 = SphereConv2D(1, 32, stride=1)
self.pool1 = SphereMaxPool2D(stride=2)
self.conv2 = SphereConv2D(32, 64, stride=1)
self.pool2 = SphereMaxPool2D(stride=2)
self.fc = nn.Linear(14400, 10)
def forward(self, x):
x = F.relu(self.pool1(self.conv1(x)))
x = F.relu(self.pool2(self.conv2(x)))
x = x.view(-1, 14400) # flatten, [B, C, H, W) -> (B, C*H*W)
x = self.fc(x)
return x
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size=3)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
self.fc = nn.Linear(64*13*13, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2(x), 2))
x = x.view(-1, 64*13*13) # flatten, [B, C, H, W) -> (B, C*H*W)
x = self.fc(x)
return x
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
if data.dim() == 3:
data = data.unsqueeze(1) # (B, H, W) -> (B, C, H, W)
output = model(data)
loss = F.cross_entropy(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
def test(args, model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
if data.dim() == 3:
data = data.unsqueeze(1) # (B, H, W) -> (B, C, H, W)
output = model(data)
test_loss += F.cross_entropy(output, target).item() # sum up batch loss
pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
def main():
# Training settings
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--data', type=str, default='MNIST',
help='dataset for training, options={"FashionMNIST", "MNIST"}')
parser.add_argument('--batch-size', type=int, default=128, metavar='N',
help='input batch size for training')
parser.add_argument('--test-batch-size', type=int, default=128, metavar='N',
help='input batch size for testing')
parser.add_argument('--epochs', type=int, default=100, metavar='N',
help='number of epochs to train')
parser.add_argument('--optimizer', type=str, default='adam',
help='optimizer, options={"adam, sgd"}')
parser.add_argument('--lr', type=float, default=1e-4, metavar='LR',
help='learning rate')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed')
parser.add_argument('--log-interval', type=int, default=1, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save-interval', type=int, default=1, metavar='N',
help='how many epochs to wait before saving model weights')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device('cuda' if use_cuda else 'cpu')
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
np.random.seed(args.seed)
if args.data == 'FashionMNIST':
train_dataset = OmniFashionMNIST(fov=120, flip=True, h_rotate=True, v_rotate=True, img_std=255, train=True)
test_dataset = OmniFashionMNIST(fov=120, flip=True, h_rotate=True, v_rotate=True, img_std=255, train=False, fix_aug=True)
elif args.data == 'MNIST':
train_dataset = OmniMNIST(fov=120, flip=True, h_rotate=True, v_rotate=True, train=True)
test_dataset = OmniMNIST(fov=120, flip=True, h_rotate=True, v_rotate=True, train=False, fix_aug=True)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.test_batch_size, shuffle=False, **kwargs)
# Train
sphere_model = SphereNet().to(device)
model = Net().to(device)
if args.optimizer == 'adam':
sphere_optimizer = torch.optim.Adam(sphere_model.parameters(), lr=args.lr)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
elif args.optimizer == 'sgd':
sphere_optimizer = torch.optim.SGD(sphere_model.parameters(), lr=args.lr, momentum=args.momentum)
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
for epoch in range(1, args.epochs + 1):
# SphereCNN
print('{} Sphere CNN {}'.format('='*10, '='*10))
train(args, sphere_model, device, train_loader, sphere_optimizer, epoch)
test(args, sphere_model, device, test_loader)
if epoch % args.save_interval == 0:
torch.save(sphere_model.state_dict(), 'sphere_model.pkl')
# Conventional CNN
print('{} Conventional CNN {}'.format('='*10, '='*10))
train(args, model, device, train_loader, optimizer, epoch)
test(args, model, device, test_loader)
if epoch % args.save_interval == 0:
torch.save(model.state_dict(), 'model.pkl')
if __name__ == '__main__':
main()
| ChiWeiHsiao/SphereNet-pytorch | example.py | example.py | py | 6,671 | python | en | code | 106 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "spherenet.SphereConv2D",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "spherenet.Sphere... |
35207313249 | import shutil
from PyQt5.QtCore import QPropertyAnimation, QEasingCurve
import sys
from PyQt5.QtWidgets import QSlider, QLabel
from PyQt5.QtGui import QFont
from PyQt5.QtCore import QSignalMapper
from classes.FrequencyDomain import *
from classes.TimeGraph import *
from collections import namedtuple
from Dialog import *
from PyQt5.QtWidgets import QMainWindow, QApplication, QPushButton, QVBoxLayout, QDialog
Slider_tuple = namedtuple("Slider", ["min_frequency", "max_frequency", "slider_object", "window_curve"])
FORM_CLASS, _ = loadUiType(path.join(path.dirname(__file__), "mainWindow.ui"))
class MainApp(QMainWindow, FORM_CLASS):
def __init__(self, parent=None):
super(MainApp, self).__init__(parent)
QMainWindow.__init__(self, parent=None)
self.setupUi(self)
self.standard_deviation = 0
self.playing = None
self.animation = None
self.mode_name = None
self.dialog_window = None
self.setWindowTitle("Equalizer")
self.timer = QtCore.QTimer()
self.timer.setInterval(25)
self.timer.timeout.connect(self.move_line)
self.mode_mapper = QSignalMapper()
self.mode_mapper.mapped[str].connect(self.add_signal_and_make_slider)
self.input_signal_graph = TimeGraph(self.inputAudio)
self.output_signal_graph = TimeGraph(self.outputAudio)
self.outputAudio.setXLink(self.inputAudio)
self.outputAudio.setYLink(self.inputAudio)
self.slider_mapper = QSignalMapper()
self.slider_mapper.mapped[str].connect(self.slider_value_change)
self.mode_dictionary = {
"ECG": {"RBBB": Slider_tuple(0, 17.5, None, None), "Sinus": Slider_tuple(0, 4, None, None),
"ventricular fibrillation": Slider_tuple(17, 150, None, None)},
"Animal": {"Owl": Slider_tuple(0, 800, None, None), "Horse": Slider_tuple(1000, 2200, None, None),
"Bat": Slider_tuple(2500, 5000, None, None), "Goat": Slider_tuple(0, 7000, None, None),
"Dolphin": Slider_tuple(0, 14000, None, None)},
"Musical": {"Guitar": Slider_tuple(0, 900, None, None),
"Piccolo": Slider_tuple(1000, 2000, None, None),
"Xylophone": Slider_tuple(7000, 15000, None, None),
"trianglemod": Slider_tuple(4000, 6000, None, None)},
"Uniform": {}}
self.ui_components = {"reset": self.resetGraph, "clear": self.clearGraph,
"zoom_in": self.zoomInBtn, "zoom_out": self.zoomOutBtn, "speed_up": self.speedUp,
"slow_down": self.slowDown, }
self.frequency_domain = FrequencyDomain(input_spectro_pointer=self.spectroInputLayout,
output_spectro_pointer=self.spectroOutputLayout,
frequency_graph=self.frequency_graph)
handle_graph_buttons(self.ui_components, self.input_signal_graph)
handle_graph_buttons(self.ui_components, self.output_signal_graph)
self.window_signal = "Rectangular window"
self.handle_buttons()
def handle_buttons(self):
self.SideBar.toggled.connect(self.toggle_side_bar)
self.windowComboBox.currentTextChanged.connect(self.window_control)
self.Add_signal.clicked.connect(self.open_add_signal_dialog)
self.mute_input.clicked.connect(self.unmute_input_graph)
self.mute_output.clicked.connect(self.unmute_output_graph)
self.muteAllSounds.clicked.connect(self.mute_all)
self.clearGraph.clicked.connect(self.clear_all)
self.playPauseGraph.clicked.connect(self.pause_play_graph)
self.volumeSlider.valueChanged.connect(self.control_volume)
disable_enable_buttons(self.ui_components, False)
self.saveAudio.clicked.connect(self.save_output_audio_file)
def move_line(self):
self.input_signal_graph.move_line()
self.output_signal_graph.move_line()
def control_volume(self):
volume = self.volumeSlider.value()
pygame.mixer.music.set_volume(volume / 100.0)
if volume == 0:
self.muteAllSounds.setIcon(QIcon('icons/mute.png'))
else:
self.muteAllSounds.setIcon(QIcon('icons/sound.png'))
def window_control(self):
is_play = self.playing
if is_play:
self.pause_graphs()
if self.windowComboBox.currentText() == 'Gaussian window':
self.open_gaussian_window()
self.window_signal = self.windowComboBox.currentText()
self.create_output(False)
if is_play:
self.play_graphs()
def open_gaussian_window(self):
gaussian_window = QDialog(self)
gaussian_window.setWindowTitle('Gaussian Window')
layout = QVBoxLayout(gaussian_window)
label = QLabel('standard deviation = 500', gaussian_window)
layout.addWidget(label)
standard_deviation_slider = QSlider(gaussian_window)
standard_deviation_slider.setOrientation(1)
standard_deviation_slider.setMinimum(50)
standard_deviation_slider.setMaximum(1000)
standard_deviation_slider.setValue(500)
standard_deviation_slider.valueChanged.connect(
lambda: label.setText(f'standard deviation = {standard_deviation_slider.value()}'))
layout.addWidget(standard_deviation_slider)
ok_button = QPushButton('OK', gaussian_window)
ok_button.clicked.connect(gaussian_window.accept)
layout.addWidget(ok_button)
result = gaussian_window.exec_()
if result == QDialog.Accepted:
self.standard_deviation = standard_deviation_slider.value()
def add_signal_and_make_slider(self, file_path, mode_name):
self.clear_all()
disable_enable_buttons(self.ui_components, True)
self.mode_name = mode_name
data, sample_rate = self.input_signal_graph.add_wav_file(file_path, "input")
self.frequency_domain.add_new_file(data, sample_rate)
if mode_name == "Uniform":
self.add_uniform_signal(self.frequency_domain.frequencies)
mode_slider_ranges = self.mode_dictionary[mode_name]
position_index = 1
for slider_name, slider_parameter in mode_slider_ranges.items():
label = QLabel(slider_name)
label.setFont(QFont('Helvetica [Cronyx]', 10))
slider = QSlider()
slider.setOrientation(0)
slider.setMinimum(0)
slider.setMaximum(50)
slider.setMinimumSize(20, 250)
slider.setValue(10)
slider.setTickPosition(QSlider.TicksAbove)
self.slider_mapper.setMapping(slider, slider_name)
slider.valueChanged.connect(self.slider_mapper.map)
self.sliderLayout.addWidget(slider)
self.sliderLayout.addWidget(label)
line_color = random_color_generator()
frequency_start_line = pg.InfiniteLine(pos=slider_parameter.min_frequency, movable=False,
markers=[('>|', (1 - 0.25) / len(mode_slider_ranges.keys()), 10.0)],
pen=line_color)
frequency_end_line = pg.InfiniteLine(pos=slider_parameter.max_frequency, movable=False,
markers=[('|<', (1 - 0.25) / len(mode_slider_ranges.keys()), 10.0)],
pen=line_color)
pg.InfLineLabel(frequency_start_line, text=slider_name,
position=(1 - 0.2) * position_index / len(mode_slider_ranges.keys()))
window_on_frequency_graph = pg.PlotCurveItem()
self.mode_dictionary[mode_name][slider_name] = slider_parameter._replace(slider_object=slider,
window_curve=window_on_frequency_graph)
self.frequency_domain.frequency_graph.addItem(window_on_frequency_graph)
self.frequency_domain.frequency_graph.addItem(frequency_start_line)
self.frequency_domain.frequency_graph.addItem(frequency_end_line)
position_index += 1
self.create_output(True)
self.pause_play_graph()
self.unmute_input_graph()
def add_uniform_signal(self, frequencies):
band_length = len(frequencies) // 10
for i in range(10):
self.mode_dictionary["Uniform"][f"{i + 1}"] = Slider_tuple(frequencies[i * band_length],
frequencies[(i + 1) * band_length],
None, None)
def open_add_signal_dialog(self):
self.dialog_window = Dialog()
self.dialog_window.submitClicked.connect(self.add_signal_and_make_slider)
self.dialog_window.show()
def toggle_side_bar(self):
if self.SideBar.isChecked():
new_width = 500
else:
new_width = 0
self.animation = QPropertyAnimation(self.sideBarFrame, b"minimumWidth")
self.animation.setDuration(20)
self.animation.setEndValue(new_width)
self.animation.setEasingCurve(QEasingCurve.InOutQuart)
self.animation.start()
self.sideBarFrame.update()
def unmute_input_graph(self):
self.output_signal_graph.pygame_play_mute()
self.input_signal_graph.pygame_play_unmute()
self.mute_input.setIcon(QIcon('icons/sound.png'))
self.mute_output.setIcon(QIcon('icons/mute.png'))
def unmute_output_graph(self):
self.input_signal_graph.pygame_play_mute()
self.output_signal_graph.pygame_play_unmute()
self.mute_output.setIcon(QIcon('icons/sound.png'))
self.mute_input.setIcon(QIcon('icons/mute.png'))
def mute_all(self):
self.output_signal_graph.pygame_play_mute()
self.input_signal_graph.pygame_play_unmute(False)
self.mute_input.setIcon(QIcon('icons/mute.png'))
self.mute_output.setIcon(QIcon('icons/mute.png'))
self.muteAllSounds.setIcon(QIcon('icons/mute.png'))
def clear_all(self):
self.frequency_domain.clear()
# Clear all sliders in the layout
for widget in reversed(range(self.sliderLayout.count())):
widget = self.sliderLayout.itemAt(widget).widget()
if isinstance(widget, QSlider) or isinstance(widget, QLabel):
widget.deleteLater()
disable_enable_buttons(self.ui_components, False)
def create_output(self, new):
for slider in self.mode_dictionary[self.mode_name].keys():
self.change_frequency_domain_amplitudes(slider)
self.mode_dictionary[self.mode_name][slider].window_curve.setData(
np.linspace(self.mode_dictionary[self.mode_name][slider].min_frequency,
self.mode_dictionary[self.mode_name][slider].max_frequency, 500),
window_function(std=self.standard_deviation, name=self.window_signal,
amplitude=self.frequency_domain.max_amplitudes), pen="black")
self.create_output_wav_file(new)
def slider_value_change(self, slider_name):
self.change_frequency_domain_amplitudes(slider_name)
self.create_output_wav_file()
def change_frequency_domain_amplitudes(self, slider_name):
frequency_array = self.frequency_domain.frequencies
modified_band = (frequency_array > self.mode_dictionary[self.mode_name][slider_name].min_frequency) & (
frequency_array < self.mode_dictionary[self.mode_name][slider_name].max_frequency)
window_array = window_function(n=len(frequency_array[modified_band]),
amplitude=self.mode_dictionary[self.mode_name][
slider_name].slider_object.value() / 10,
std=self.standard_deviation,
name=self.window_signal)
self.frequency_domain.output_amplitudes[modified_band] = self.frequency_domain.amplitudes[
modified_band] * window_array
def create_output_wav_file(self, new=False):
playing_status = self.input_signal_graph.playing
if playing_status:
self.pause_graphs()
reconstructed_signal = get_inverse_fourier_transform(self.frequency_domain.output_amplitudes)
wav.write("played_audio/reconstructed.wav", self.frequency_domain.sampling_rate,
reconstructed_signal.astype(np.int16))
if new:
data, sample_rate = self.output_signal_graph.add_wav_file("played_audio/reconstructed.wav", "output")
else:
data, sample_rate = self.output_signal_graph.update_wave_file("played_audio/reconstructed.wav", "output")
self.frequency_domain.update_output_spectrogram(data, sample_rate)
if playing_status:
self.play_graphs()
def pause_graphs(self):
self.timer.stop()
self.playing = False
self.input_signal_graph.pause()
self.output_signal_graph.pause()
def play_graphs(self):
self.input_signal_graph.play()
self.output_signal_graph.play()
self.timer.start()
self.playing = True
def pause_play_graph(self):
if self.playing:
self.pause_graphs()
self.playPauseGraph.setIcon(QIcon('icons/play.png'))
else:
self.play_graphs()
self.playPauseGraph.setIcon(QIcon('icons/pause.png'))
def save_output_audio_file(self):
save_path = QFileDialog.getSaveFileName(self, 'Save File', "audio file", "wav Files (*.wav)")[0]
shutil.copyfile("played_audio/output.wav", f"{save_path}")
def main():
app = QApplication(sys.argv)
window = MainApp()
window.show()
app.exec_()
if __name__ == '__main__':
main()
| Zoz-HF/Ikoraiza | main.py | main.py | py | 14,129 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "collections.namedtuple",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QMainWindow",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QMainWindow.__init__",
"line_number": 23,
"usage_type": "call"
},
... |
11036089604 | import wx
import MapDisplay
class MapPreviewDialog(wx.Dialog):
def __init__(self, parent, id, map):
wx.Dialog.__init__(self, parent, id, "iPhone Preview")
self.map = map
self.display = MapDisplay.MapDisplay(self, -1, map)
self.display.SetMinSize((480, 320))
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.display, 0, wx.ALIGN_CENTER)
self.SetSizerAndFit(sizer)
| sdetwiler/pammo | editor/source/MapPreviewDialog.py | MapPreviewDialog.py | py | 436 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "wx.Dialog",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "wx.Dialog.__init__",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "wx.Dialog",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "MapDisplay.MapDisplay",... |
16408579191 | from pyscf import gto, scf
import asf
import numpy as np
from pyscf.mcscf import avas
ASF = asf.asf()
mol = gto.Mole()
mol.atom = """
C 0.00000 0.00000 0.00000
C 0.00000 0.00000 1.20000
"""
mol.basis = 'def2-svp'
mol.charge = 0
mol.spin = 0
mol.build()
# UHF for UNOs
mf = scf.RHF(mol).run(max_cycle=100)
mo_new = mf.stability()[0]
while mo_new is not mf.mo_coeff:
mf.kernel(dm0=mf.make_rdm1(mo_coeff=mo_new))
mo_new = mf.stability()[0]
# AVAS initial guess.
ao_labels = ['C 2s', 'C 2p']
norb, ne_act, orbs = avas.avas(mf, ao_labels, canonicalize=False)
# Plot AVAS selected orbitals.
# orbital list
fao = asf.act_fao(mf.mo_occ, ne_act)
orblist = fao + np.array(list(range(norb)))
asf.visualize_mos(mol, orbs, orblist)
# Select an active space using entropies.
ele, mos = ASF.find_active_space(mol, ne_act, norb, orbs, plot=True)
| LDongWang/ActiveSpaceFinder | examples/avas/c2.py | c2.py | py | 889 | python | en | code | null | github-code | 6 | [
{
"api_name": "asf.asf",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pyscf.gto.Mole",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pyscf.gto",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "pyscf.scf.RHF",
"line_number": 18,
... |
40880843423 | import re
import dateutil.parser
class DateRegex:
def __init__(
self,
pattern,
):
self.pattern = pattern
def convert(
self,
date_string,
):
match = re.search(
pattern=self.pattern,
string=date_string,
flags=re.IGNORECASE,
)
if not match or not match.groups():
return None
try:
year = match.group('year')
month = match.group('month')
day = match.group('day')
except Exception:
return None
try:
date_object = dateutil.parser.parse(
timestr='{day} {month} {year}'.format(
day=day,
month=month,
year=year,
),
dayfirst=True,
)
return date_object.replace(
tzinfo=None,
)
except:
return None
class DateGeneric:
def __init__(
self,
):
pass
def convert(
self,
date_string,
):
try:
date_object = dateutil.parser.parse(
timestr=date_string,
fuzzy=True,
)
return date_object.replace(
tzinfo=None,
)
except:
return None
class Dummy:
def __init__(
self,
):
pass
def convert(
self,
original_string,
):
return original_string
| dhkron/whois | whois/parsers/converter.py | converter.py | py | 1,544 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "re.search",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "re.IGNORECASE",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "dateutil.parser.parser.parse",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "dateutil.pa... |
24106546856 | import os
import cv2
from flask import (
Flask,
Response,
render_template,
request,
session,
redirect,
send_file,
url_for,
)
from fas.inferer import face_detector, fas_model, infer_img, infer_video, infer_frame
# from fas.inferer import face_detector, fas_model, infer_img, infer_video
app = Flask(__name__, template_folder="template", static_folder="static")
app.secret_key = "abc"
app.config["UPLOAD_FOLDER"] = "static/upload"
app.config["UPLOAD_IMG_EXT"] = [
"bmp",
"jpg",
"jpeg",
"png",
"tif",
"tiff",
"dng",
"webp",
"mpo",
]
app.config["UPLOAD_VID_EXT"] = ["mp4", "mov", "avi", "mkv"]
app.config["OUTPUT_FOLDER"] = "static/output"
FACE_DETECTORS = ["haar cascade", "retina face"]
FAS_MODELS = ["large", "small", "large_rf-f12", "large_rf-f12-e2"]
global cap, fd, fas, cam_on
cam_on = False
cap = None
fd = None
fas = None
def get_media_file(filename):
return os.path.join(app.config["UPLOAD_FOLDER"], filename)
def is_image(file_path):
extension = file_path.split(".")[-1].lower()
return extension in app.config["UPLOAD_IMG_EXT"]
def is_video(file_path):
extension = file_path.split(".")[-1].lower()
return extension in app.config["UPLOAD_VID_EXT"]
def render_upload(
html="upload_file.html",
iimg=None,
oimg=None,
ivideo=None,
ovideo=None,
face_detectors=FACE_DETECTORS,
fas_models=FAS_MODELS,
selected_face_detector=FACE_DETECTORS[0],
selected_fas_model=FAS_MODELS[0],
fd_time=None,
fas_time=None,
noti=None
):
return render_template(
html,
iimg=iimg,
oimg=oimg,
ivideo=ivideo,
ovideo=ovideo,
face_detectors=face_detectors,
fas_models=fas_models,
selected_face_detector=selected_face_detector,
selected_fas_model=selected_fas_model,
fd_time=fd_time,
fas_time=fas_time,
noti=noti
)
def render_camera(
html="camera.html",
face_detectors=FACE_DETECTORS,
fas_models=FAS_MODELS,
selected_face_detector=FACE_DETECTORS[0],
selected_fas_model=FAS_MODELS[0],
noti=None
):
global cam_on
return render_template(
html,
cam_on=cam_on,
face_detectors=face_detectors,
fas_models=fas_models,
selected_face_detector=selected_face_detector,
selected_fas_model=selected_fas_model,
noti = noti
)
def render_phonecamera(
html="phone_camera.html",
cam_ip=None,
face_detectors=FACE_DETECTORS,
fas_models=FAS_MODELS,
selected_face_detector=FACE_DETECTORS[0],
selected_fas_model=FAS_MODELS[0],
noti=None
):
global cam_on
return render_template(
html,
cam_on=cam_on,
cam_ip=cam_ip,
face_detectors=face_detectors,
fas_models=fas_models,
selected_face_detector=selected_face_detector,
selected_fas_model=selected_fas_model,
noti = noti
)
def generate_frames():
global fd, fas
while True:
## read the camera frame
success, frame = cap.read()
if not success:
print("Not success")
break
else:
# detect spoofing face
out_frame = infer_frame(spoof_model=fas, face_detector=fd, frame=frame)
_, buffer = cv2.imencode(".jpg", out_frame)
out_frame = buffer.tobytes()
yield (b"--frame\r\n" b"Content-Type: image/jpeg\r\n\r\n" + out_frame + b"\r\n")
@app.route("/")
def index():
session["fas_model"] = FAS_MODELS[0]
session["face_detector"] = FACE_DETECTORS[0]
return render_template(
"home.html",
face_detectors=FACE_DETECTORS,
fas_models=FAS_MODELS,
)
@app.route("/", methods=["POST"])
def goto():
if request.form.get("upload") == "Upload":
return redirect(url_for("upload"))
elif request.form.get("camera") == "Camera":
return redirect(url_for("camera"))
elif request.form.get("mobile-phone-camera") == "Mobile phone camera":
return redirect(url_for("phonecamera"))
return redirect(url_for("index"))
@app.route("/back", methods=["GET"])
def backtohome():
global cap, cam_on
if cam_on:
cap.release()
cam_on = False
return redirect(url_for("index"))
@app.route("/upload", methods=["POST", "GET"])
def upload():
if request.method == "POST":
input_file = request.files["input_file"]
if is_image(input_file.filename):
path = get_media_file(input_file.filename)
input_file.save(path)
session["uploaded_img_path"] = path
return render_upload(iimg=path)
elif is_video(input_file.filename):
path = get_media_file(input_file.filename)
input_file.save(path)
session["uploaded_img_path"] = path
return render_upload(ivideo=path)
else:
return render_upload(noti="Please upload image or video file")
return render_upload()
@app.route("/camera", methods=["GET", "POST"])
def camera():
global cap, cam_on, fas, fd
if request.method == "GET":
session["fas_model"] = FAS_MODELS[0]
session["face_detector"] = FACE_DETECTORS[0]
if cam_on:
cap.release()
cam_on = False
return render_camera()
else:
if request.form.get("start") == "Start":
if (not fas) or (session["fas_model"] != request.form.get("fas-model-btn")):
session["fas_model"] = request.form.get("fas-model-btn")
fas = fas_model(session["fas_model"])
if (not fd) or (
session["face_detector"] != request.form.get("face-detector-btn")
):
session["face_detector"] = request.form.get("face-detector-btn")
fd = face_detector(session["face_detector"])
cam_on = True
cap = cv2.VideoCapture(0)
elif request.form.get("stop") == "Stop":
cap.release()
cam_on = False
return render_camera(selected_face_detector=session["face_detector"],
selected_fas_model=session["fas_model"])
@app.route("/phonecamera", methods=["GET", "POST"])
def phonecamera():
global cap, cam_on, fd, fas
if request.method == "GET":
session["fas_model"] = FAS_MODELS[0]
session["face_detector"] = FACE_DETECTORS[0]
if cam_on:
cap.release()
cam_on = False
return render_phonecamera()
else:
if request.form.get("start") == "Start":
if (not fas) or (session["fas_model"] != request.form.get("fas-model-btn")):
session["fas_model"] = request.form.get("fas-model-btn")
fas = fas_model(session["fas_model"])
if (not fd) or (
session["face_detector"] != request.form.get("face-detector-btn")
):
session["face_detector"] = request.form.get("face-detector-btn")
fd = face_detector(session["face_detector"])
cam_ip = request.form.get("cam_ip")
cap = cv2.VideoCapture("https://" + cam_ip + "/video")
cam_on = True
return render_phonecamera(cam_ip=cam_ip,
selected_face_detector=session["face_detector"],
selected_fas_model=session["fas_model"])
elif request.form.get("stop") == "Stop":
cap.release()
cam_on = False
return render_phonecamera(selected_face_detector=session["face_detector"],
selected_fas_model=session["fas_model"])
@app.route("/stream", methods=["GET"])
def stream():
return Response(
generate_frames(), mimetype="multipart/x-mixed-replace; boundary=frame"
)
@app.route("/submit", methods=["POST", "GET"])
def submit():
global fd, fas
if request.method == "POST":
if (not fas) or (session["fas_model"] != request.form.get("fas-model-btn")):
session["fas_model"] = request.form.get("fas-model-btn")
fas = fas_model(session["fas_model"])
if (not fd) or (
session["face_detector"] != request.form.get("face-detector-btn")
):
session["face_detector"] = request.form.get("face-detector-btn")
fd = face_detector(session["face_detector"])
output_path = os.path.join(
app.config["OUTPUT_FOLDER"],
os.path.basename(session["uploaded_img_path"]),
)
if is_image(session["uploaded_img_path"]):
fd_time, fas_time = infer_img(
spoof_model=fas,
face_detector=fd,
img_path=session["uploaded_img_path"],
save_path=output_path,
)
session["last_output_img"] = output_path
return render_upload(
selected_face_detector=session["face_detector"],
selected_fas_model=session["fas_model"],
iimg=session["uploaded_img_path"],
oimg=session["last_output_img"],
fd_time=fd_time,
fas_time=fas_time,
)
elif is_video(session["uploaded_img_path"]):
infer_video(
spoof_model=fas,
face_detector=fd,
vid_path=session["uploaded_img_path"],
save_path=output_path,
)
session["last_output_img"] = output_path
return render_upload(
ivideo=session["uploaded_img_path"],
ovideo=session["last_output_img"],
selected_face_detector=session["face_detector"],
selected_fas_model=session["fas_model"],
)
else:
return render_upload(
iimg=session["uploaded_img_path"],
oimg=session["last_output_img"],
selected_face_detector=session["face_detector"],
selected_fas_model=session["fas_model"],
)
# elif request.form.get("start") == "Start":
return redirect("/")
@app.route("/download", methods=["GET"])
def download():
return send_file(session["last_output_img"], as_attachment=True)
if __name__ == "__main__":
app.run()
| LananhTran302001/face-anti-spoofing-flaskapp | app.py | app.py | py | 10,387 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "fas.inferer",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 46,... |
21251905362 | """Parse the discussion wiki and archive the data in the database."""
import re
import pathlib
from operator import ior
from functools import reduce
from string import punctuation
from database import DatabaseDiscussion
from parser_wiki import Parser, Discussion
DISCUSSION_ENTRY_PATH = "src\\queries\\discussion\\add_discussion_entry.sql"
EPISODE_ENTRY_PATH = "src\\queries\\add_episodes.sql"
FILE_PATH = "data\\wiki\\anime\\discussion_archive_edited"
PERMALINK_AND_TEXT = re.compile(
r"(?:\|([^\|]*)\|\[[^\]]*\]\(https?:\/\/redd\.it\/(\w+)(?:\)|$)"
r"|\[([^\]]*)\]\([^\s]*comments\/(\w+)[^\)]*(?:\)|$)"
r"|\[([^\]]*)\]\(\/(\w+)(?:\)|$))"
)
HEADERS = re.compile(r"\|?([^\|]+)\|?")
CONTENTS_LINKS = re.compile(
r"\[[^\]]+\]\([^|]*(?:comments|redd\.it)?\/(\w+)[^\)]*(?:\)|$)"
)
LONG_RUNNING_PARSE = re.compile(r"\[([^\]]+)\]\([^\)]*\/(\w+)\/?\)")
# Compare with parser_wiki.TableParser to see which one to keep/improve.
class TableDiscussionParser:
"""Parse discussion wiki tables."""
@staticmethod
def parse_table_one_header(table: list[str]) -> dict:
"""Parse a table that has a single header row.
Contents can be in the form:
- name | [text](link) (| repeat)
- [name](link) (| repeat)"""
return reduce(
ior,
list(
{
entry[1]: entry[0]
for entry in PERMALINK_AND_TEXT.findall(row)
if entry[1]
}
for row in table[1:]
),
)
@staticmethod
def parse_table_alternate_headers(table: list[str]) -> dict:
"""Parse a table that alternate headers and contents.
Contents have the form:
- name (| repeat)
[text](link) (| repeat)"""
ans = {}
for pair in zip(table[::2], table[1::2]):
header_row, link_row = pair
for title, contents in zip(header_row.split("|"), link_row.split("|")):
links = CONTENTS_LINKS.findall(contents)
if links and links[0]:
ans[links[0]] = title
return ans
@staticmethod
def parse_table_no_headers(table: list[str]) -> dict:
"""Parse a table that has no header.
Contents have the form:
- [text](link) (| repeat)"""
return reduce(
ior,
list(
{
entry[1]: entry[0]
for entry in LONG_RUNNING_PARSE.findall(row)
if entry[1]
}
for row in table
),
)
class ParserDiscussion(Parser):
"""Parser for episode discussion wiki pages."""
def parse_file(self) -> None:
"""Parse the contents."""
if self.year in {2011, 2012, 2013, 2014, 2015, 2016}:
self.parse_file_1(delimiter="* ")
elif self.year in {2017, 2018, 2019, 2021, 2022}:
self.parse_file_1(delimiter="**")
elif self.year in {2020}:
self.parse_file_1(delimiter="###")
elif self.name == "long_running_anime":
self.parse_file_1(delimiter="###")
def parse_file_1(self, delimiter: str) -> None:
"""Parse the contents.
Use the formatting for discussion archives years from 2011 to 2014."""
while not self.out_of_bounds:
if self.current_line.startswith(delimiter):
self.parse_entry(delimiter=delimiter)
else:
self.next_line()
def parse_entry(self, delimiter: str) -> None:
"""Parse a discussion entry."""
series_name = self.remove_formatting(self.current_line[2:])
# print(self.year, series_name)
discussion = Discussion(name=series_name, year=self.year)
self.next_line()
while (not self.out_of_bounds) and (
not self.current_line.startswith(delimiter)
):
if self.current_line.count("|") >= 1:
if self.current_line.lstrip(punctuation + " ").startswith("Case"):
while self.current_line.count("|") >= 1:
for pair in PERMALINK_AND_TEXT.findall(self.current_line):
title, post_id = (x for x in pair if x)
discussion.episodes[post_id] = title.strip()
self.next_line()
else:
if self.name == "long_running_anime":
table_parser = TableDiscussionParser.parse_table_no_headers
elif self.current_line.lstrip(punctuation + " ").startswith("Ep."):
table_parser = (
TableDiscussionParser.parse_table_alternate_headers
)
else:
table_parser = TableDiscussionParser.parse_table_one_header
table = self.read_table()
discussion.episodes |= table_parser(table)
else:
for pair in PERMALINK_AND_TEXT.findall(self.current_line):
title, post_id = (x for x in pair if x)
discussion.episodes[post_id] = title.strip()
self.next_line()
if discussion.episodes:
self.create_entry(discussion=discussion)
def create_entry(self, discussion: Discussion) -> None:
"""Create a db entry."""
self._db.begin()
try:
with open(DISCUSSION_ENTRY_PATH, encoding="utf8") as f:
self._db.q.execute(f.read(), discussion.info)
series_id = self._db.last_row_id
with open(EPISODE_ENTRY_PATH, encoding="utf8") as f:
query = f.read()
for post_id, episode in discussion.episodes.items():
# print(self.year, series_id, discussion.name, post_id, episode)
self._db.q.execute(
query, (series_id, post_id or None, self.remove_formatting(episode))
)
self._db.commit()
except Exception as e:
print(f"Exception: {e}")
print(
f"{self.year} - {series_id} - {discussion.name} - {post_id} - {episode}"
)
self._db.rollback()
@property
def year(self) -> int:
"""Return the year included in the file name."""
file_name = pathlib.Path(self._file_path).stem
try:
return int(file_name)
except ValueError:
return None
@property
def name(self) -> str:
"""Return the file name."""
return pathlib.Path(self._file_path).stem
@staticmethod
def parse_table() -> None:
pass
if __name__ == "__main__":
# Episode discussions year 2011-2022
for y in range(2011, 2023):
print(f"Processing year {y}")
parser = ParserDiscussion(
f"{FILE_PATH}\\{y}.md", DatabaseDiscussion(path="data\\discussion.sqlite")
)
parser.parse_file()
# Episode discussions long running anime
parser = ParserDiscussion(
f"{FILE_PATH}\\long_running_anime.md",
DatabaseDiscussion(path="data\\discussion.sqlite"),
)
parser.parse_file()
| Manitary/r-anime-archive | src/parser_wiki_discussion.py | parser_wiki_discussion.py | py | 7,244 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "re.compile",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 25,
... |
9679183776 | '''
some useful spark bot suff
'''
import os
import requests
import json
API_TEMPLATE = 'https://api.ciscospark.com/v1/{}'
MENTION_REGEX = r'<spark-mention.*?data-object-id="(\w+)".*?spark-mention>'
PERSON_ID = os.environ['PERSON_ID']
HEADERS = {
"Authorization": "Bearer {}".format(os.environ['TOKEN']),
"Content-Type": "application/json; charset=utf-8"
}
# To read messages other than those in which the bot is mentioned
ADMIN_HEADERS = {
"Authorization": "Bearer {}".format(os.environ['ADMIN_TOKEN']),
}
def get_person_info(person_id):
r = requests.get(
API_TEMPLATE.format('people/' + person_id),
headers=ADMIN_HEADERS
)
return json.loads(r.text)
def get_message_info(message_id):
r = requests.get(
API_TEMPLATE.format('messages/' + message_id),
headers=ADMIN_HEADERS
)
return json.loads(r.text)
def create_message(data):
return requests.post(
API_TEMPLATE.format('messages'),
json=data,
headers=HEADERS,
)
def list_messages(room_id, limit=None):
params = {'roomId': room_id}
if limit is not None:
params['max'] = limit
r = requests.get(
API_TEMPLATE.format('messages'),
params=params,
headers=ADMIN_HEADERS,
)
return json.loads(r.text)
def list_memberships(room_id):
r = requests.get(
API_TEMPLATE.format('memberships'),
params={'roomId': room_id},
headers=ADMIN_HEADERS,
)
return json.loads(r.text)
| msiddorn/spark-bot | bot_helpers.py | bot_helpers.py | py | 1,510 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.environ",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"l... |
24987056441 | import csv
import datetime
import time
from datetime import date, timedelta
import netsvc
logger = netsvc.Logger()
if __name__ != '__main__':
from tools import config
else:
config={'addons_path':'/home/quentin/tinydev/cci/code/server/bin/addons'}
partner_dict = {}
partner_dict[''] = ''
dict_partner = {}
def _get_partner_id(char):
return char
def convert2utf(row):
if row:
retRow = {}
for k,v in row.items():
retRow[k] = v.decode('latin1').encode('utf8').strip()
return retRow
return row
def get_first_day(dt, d_years=0, d_months=0):
# d_years, d_months are "deltas" to apply to dt
y, m = dt.year + d_years, dt.month + d_months
a, m = divmod(m-1, 12)
return date(y+a, m+1, 1)
def get_last_day(dt):
return get_first_day(dt, 0, 1) + timedelta(-1)
def mkDateTime(dateString,strFormat="%Y-%m-%d"):
# Expects "YYYY-MM-DD" string
# returns a datetime object
eSeconds = time.mktime(time.strptime(dateString,strFormat))
return datetime.datetime.fromtimestamp(eSeconds)
def _get_tax_code_id(char):
if char == '':
return ''
tmp = []
for c in char.split(';'):
if c != '':
if c[0] == '+':
tmp.append(('+','l10n_be.vat_code_a'+c[2:4]))
else:
tmp.append(('-','l10n_be.vat_code_a'+c[2:4]))
return tmp
def construct_vat_dict(reader_vat_code, reader_vat, vat_dict):
count = 0
for row in reader_vat_code:
#fill the first line with False
if count != "0":
if row['VSTORED,A,15']:
vat_dict[row['VSTORED,A,15']] = {
'inv':_get_tax_code_id(row['VLBAINV,A,30']),
'vat':_get_tax_code_id(row['VLTAINV,A,30']),
'ref_inv':_get_tax_code_id(row['VLBACRE,A,30']),
'ref_vat':_get_tax_code_id(row['VLTACRE,A,30']),
}
else:
vat_dict[''] = False
count += 1
count = 0
for row in reader_vat:
#fill the first line with False
if count != "0":
if row['VSTORED,A,15'] and vat_dict.has_key(row['VSTORED,A,15']):
vat_dict[row['VSTORED,A,15']]['inv_account'] = row['VIMPINV,A,10']
vat_dict[row['VSTORED,A,15']]['ref_account'] = row['VIMPCRE,A,10']
else:
vat_dict[row['VSTORED,A,15']]={
'inv':'',
'vat':'',
'ref_inv':'',
'ref_vat':'',
'inv_account':'',
'ref_account':'',
}
count += 1
return vat_dict
# -=====================================-
# -= 1. Defining Structure and Mapping =-
# -=====================================-
# -= A. Chart of Accounts =-
def _check_code_4_usertype(x):
if x['ABALANCE,A,10'] == 'LIABILIT':
return 'account_type_liability'
if x['ABALANCE,A,10'] == 'ASSETS':
return 'account_type_asset'
if x['ABALANCE,A,10'] == 'FXASSETS':
return 'account_type_asset'
if x['ABALANCE,A,10'] == 'INCOME':
return 'account_type_income'
if x['ABALANCE,A,10'] == 'DISCINC':
return 'account_type_income'
if x['ABALANCE,A,10'] == 'EXPENSE':
return 'account_type_expense'
if x['ABALANCE,A,10'] == 'DISCEXP':
return 'account_type_expense'
if x['ABALANCE,A,10'] == 'UNDEF':
return 'account_type_root'
# if x['AID,A,10'].startswith('6'):
# return 'account_type_expense'
# if x['AID,A,10'].startswith('7'):
# return 'income'
return 'account_type_root'
def _check_code_4_type(x):
if x['AID,A,10'].startswith('40'):
if x['AID,A,10'].startswith('406'):
return 'payable'
return 'receivable'
if x['AID,A,10'].startswith('44'):
return 'payable'
if len(x['AID,A,10']) <= 4:
return 'view'
return 'other'
account_map = {
'id': lambda x: 'account_'+x['AID,A,10'],
'code': lambda x: x['AID,A,10'],
'name': lambda x: x['HEADING1,A,40'],
'note': lambda x: x['AMEMO,M,11'],
'type': lambda x: _check_code_4_type(x),
'user_type:id': lambda x: _check_code_4_usertype(x),
'parent_id:id': lambda a: ''#'account_bob_0'
}
def import_account(reader, writer, mapping):
record = {}
for key, column_name in mapping.items():
record[key] = key
writer.writerow(record)
temp_dict = {}
list_ids = []
list_rows = []
for row in reader:
record = {}
for key,fnct in mapping.items():
record[key] = fnct(convert2utf(row))
temp_dict[record['code']]=record
list_ids.append(record['code'])
temp_keys = map(lambda x: int(x),temp_dict.keys())
temp_keys.sort()
temp_str_keys = map(lambda x: str(x),temp_keys)
for t in temp_str_keys:
if len(t)>1:
l = len(temp_dict[t]['code'])
aa = range(l+1)
aa.reverse()
aa.pop()
for i in aa:
if temp_dict[t]['code'][0:i-1] in list_ids:
temp_dict[t]['parent_id:id'] = 'account_' + str(temp_dict[t]['code'][0:i-1])
break
else:
temp_dict[t]['parent_id:id'] = 'account_bob_import.account_bob_0'
list_rows.append(temp_dict[t])
writer.writerows(list_rows)
return True
# -= B. Financial Journals =-
journals_map = {
'id' : lambda x: 'journal_'+x['DBID,A,4'],
'code': lambda x: x['DBID,A,4'],
'name': lambda x: x['HEADING1,A,30'],
'view_id:id': lambda x: 'account.account_journal_view', # journal view for all except the ones that are of type cash => cash journal view
'currency:id': lambda x: x['DBCURRENCY,A,3'],#to be check
'sequence_id:id': lambda x: 'account.sequence_journal', #entry journal for all
'type': lambda x: {
'PUR': 'purchase',
'PUC': 'purchase',
'SAL': 'sale',
'SAC': 'sale',
'CAS': 'cash',
'ISB': 'general',#default
'PRI': 'general',#default
'ISD': 'general',#default
'ICO': 'general',#default
'ISO': 'general',#default
'PRO': 'general',#default
'COP': 'general',#default
'ISI': 'general',#default
'ISM': 'general',#default
'IDN': 'general',#default
'ICE': 'general',#default
'':'general'
#else should be of 'general' type
}[x['DBTYPE,A,3']],
'default_debit_account_id:id':lambda x: x['DBACCOUNT,A,10'], #filled with the id of the account_account with code = x['DBACCOUNT,A,10'],
'default_credit_account_id:id':lambda x: x['DBACCOUNT,A,10'] ,#filled with the id of the account_account with code =
}
def import_journal(reader_journal, writer_journal, journals_map):
record = {}
for key, column_name in journals_map.items():
record[key] = key
writer_journal.writerow(record)
for row in reader_journal:
record = {}
for key,fnct in journals_map.items():
record[key] = fnct(convert2utf(row))
if record['default_debit_account_id:id']:
record['default_debit_account_id:id'] = 'account_' + str(record['default_debit_account_id:id'])
if record['default_credit_account_id:id']:
record['default_credit_account_id:id'] = 'account_' + str(record['default_credit_account_id:id'])
if record['type']=='cash':
record['view_id:id']='account.account_journal_bank_view'
cur = ''
if record['currency:id']:
cur = 'base.' + record['currency:id'].upper()
record['currency:id'] = cur
writer_journal.writerow(record)
return True
# -= C. Partners Data =-
#Beware: If 2 partners have the same name, we have to create only one partner with several adresses.
#We also have to record all their old names because they can be referenced in another files (e.g. the account_move_line one).
#That's the reason why we keep a dictionary to match the IDS.
def _get_cat(record):
#have to put the partner into category suppliers if CSUPTYPE,A,1 == 'S'
#have to put the partner into category customers if CCUSTYPE,A,1 == 'C'
res=[]
if 'CSUPTYPE,A,1' in record and record['CSUPTYPE,A,1'].upper() in ['S'] :
res.append('base.res_partner_category_8')
if 'CCUSTYPE,A,1' in record and record['CCUSTYPE,A,1'].upper() in ['C']:
res.append('base.res_partner_category_0')
return ','.join(res)
partners_map = {
'id':lambda x: x['CID,A,10'],
'ref': lambda x: x['CID,A,10'],
'name': lambda x: x['CNAME1,A,40'],
'lang': lambda x: {
#/!\ if a lang isn't installed, the value should be filled with ''
'E': 'en_US', #'E' for English
'D': 'de_DE',#'de_DE',#?? #'D' for German....de_DE
'F': 'fr_FR',#'fr_FR',#??#'F' for French..fr_FR
'N': 'nl_NL',#'nl_NL',#??#'N' for Dutch....nl_NL
'A': '',#no lang
'' : ''
}[x['CLANGUAGE,A,2']],
'vat': lambda x: x['CVATNO,A,12'],
'website': lambda x: x['HTTPADDRESS,A,60'],
'comment': lambda x: x['CMEMO,M,11'],
'domiciliation_bool': lambda x : x['CBANKORDERPAY,L,1'],
'domiciliation': lambda x : x['CBANKORDERPAYNO,A,15'],
'category_id:id':lambda x:_get_cat(x),
}
#have to create one res.partner.adress for this partner with this
partner_add_map = {
'id' : lambda x: '',
'city' : lambda x: x['CLOCALITY,A,40'],
'fax': lambda x: x['CFAXNO,A,25'],
'zip' : lambda x: x['CZIPCODE,A,10'],
'country_id:id':lambda x: x['CCOUNTRY,A,6'], #filled with id of res.country that have code == x['CCOUNTRY,A,6']
'phone' : lambda x: x['CTELNO,A,25'],
'street' : lambda x: x['CADDRESS1,A,40'],
'type' : lambda x: 'default',
'partner_id:id':lambda x: ''
}
#have to create res.partner.bank if x['CBANKNO,A,20'] <> False
partner_bank_map = {
'state': lambda x:'bank',#should be filled with id of res.Partner.bank.type that have name == 'Bank Account'
'acc_number': lambda x: x['CBANKNO,A,20'],
'partner_id:id':lambda x:''
}
def import_partner(reader_partner, writer_partner, partners_map, writer_address, partner_add_map, writer_bank, partner_bank_map):
record = {}
record_address = {}
record_bank = {}
list_partners = {}
for key, column_name in partners_map.items():
record[key] = key
for key, column_name in partner_add_map.items():
record_address[key] = key
for key, column_name in partner_bank_map.items():
record_bank[key] = key
writer_partner.writerow(record)
writer_address.writerow(record_address)
writer_bank.writerow(record_bank)
count_address = 0
for row in reader_partner:
record = {}
record_address = {}
record_bank = {}
for key,fnct in partners_map.items():
record[key] = fnct(convert2utf(row))
for key,fnct in partner_add_map.items():
record_address[key] = fnct(convert2utf(row))
partner_name = record['name']
if partner_name != "":
if record['lang'] not in langs:
langs.append(record['lang'])
#partner already exists
count_address = count_address + 1
record_address['id'] = 'add' + str(count_address)
if list_partners.has_key(record['name']):
record_address['type'] = 'other'
partner_dict[row['CID,A,10']] = list_partners[record['name']]
else:
#record it
list_partners[record['name']] = row['CID,A,10']
partner_dict[row['CID,A,10']] = record['id']
dict_partner[record['ref']] = record_address['id']
if not record['domiciliation_bool'] == '1':
record['domiciliation_bool'] = ''
writer_partner.writerow(record)
#create bank account if necessary
if row.has_key('CBANKNO,A,20') and row['CBANKNO,A,20']:
for key,fnct in partner_bank_map.items():
record_bank[key] = fnct(convert2utf(row))
record_bank['partner_id:id'] = _get_partner_id(partner_dict[row['CID,A,10']])
writer_bank.writerow(record_bank)
#create address in all cases ('default' address if partner didn't exist before, 'other' otherwise)
address = ''
if record_address['country_id:id']:
address = 'base.'+record_address['country_id:id'].lower()
record_address['partner_id:id'] = _get_partner_id(partner_dict[row['CID,A,10']])
record_address['country_id:id'] = address
writer_address.writerow(record_address)
return True
# -= D. Contacts Data =-
contacts_map = {
'id': lambda x:'' ,
'first_name': lambda x: x['PFIRSTNAME,A,30'],
'name': lambda x: x['PNAME,A,30'],
'title': lambda x: {
'0':'', #keep empty
'1':'Mss', #should be the id of res.partner.title where name == 'Miss'
'2':'Ms.', #should be the id of res.partner.title where name == 'Madam'
'3':'M.', #should be the id of res.partner.title where name == 'Sir'
'':'', #keep empty
#~ #/!\ if an id cannot be found, the value should be ''
}[x['PMF,A,1']],
'mobile': lambda x: x['PGSM,A,25'],
# 'lang_id': lambda x: {
# 'E': 'English',#should be the id of res.lang where name == 'English'
# 'D': 'German',#should be the id of res.lang where name == 'German'
# 'F': 'French',#should be the id of res.lang where name == 'French'
# 'N': 'Dutch',#should be the id of res.lang where name == 'Dutch'
# '': ''#for empty data.....
#
# #~ #/!\ if an id cannot be found, the value should be ''
# }[x['PLANGUAGE,A,2']],
#~ #have to be linked to the default adress of the partner with code == x['PCID,A,10']
}
job_map = {
#'id' : lambda x : '',
'address_id:id' : lambda x:'',
'contact_id:id' : lambda x:'',
'function_id:id': lambda x:'account_bob_import.res_partner_function_bob',
#'function_label' : lambda x:'' ...should be check...for cci users
}
def import_contact(reader_contact, writer_contact, contacts_map, writer_job, job_map):
record = {}
record_job = {}
for key, column_name in contacts_map.items():
record[key] = key
for key, column_name in job_map.items():
record_job[key] = key
writer_contact.writerow(record)
writer_job.writerow(record_job)
count_contact = 0
for row in reader_contact:
record = {}
record_job = {}
for key,fnct in contacts_map.items():
record[key] = fnct(convert2utf(row))
for key,fnct in job_map.items():
record_job[key] = fnct(convert2utf(row))
count_contact = count_contact + 1
record['id'] = "cont" + str(count_contact)
record_job['contact_id:id'] = record['id']
if dict_partner.has_key(row['PCID,A,10']):
record_job['address_id:id'] = dict_partner[row['PCID,A,10']]
else:
record_job['address_id:id'] = 'account_bob_import.res_partner_address_bob'
writer_contact.writerow(record)
writer_job.writerow(record_job)
return True
# -= E. Periods and FY =-
fyear_map = {
'id': lambda x: 'FY'+x['YEAR,I,4'],
'date_stop': lambda x: x['YEAR,I,4']+'-12-31', #last day of x['YEAR,I,4']
'date_start': lambda x: x['YEAR,I,4']+'-01-01',#first day of x['YEAR,I,4']
'code': lambda x: 'FY'+x['YEAR,I,4'],
'name': lambda x: 'Fiscal Year '+x['YEAR,I,4'],
'state': lambda x: 'draft',
}
def import_fyear(reader_fyear, writer_fyear, fyear_map):
record = {}
for key, column_name in fyear_map.items():
record[key] = key
writer_fyear.writerow(record)
fyear_rows = []
fyear_rows_ref = []
#parse the period csv file to know what are the fiscal years that need to be created
for row in reader_fyear:
if row['YEAR,I,4'] not in fyear_rows_ref:
fyear_rows_ref.append(row['YEAR,I,4'])
fyear_rows.append(row)
#create the fiscal years
for fyear in fyear_rows:
record = {}
for key,fnct in fyear_map.items():
record[key] = fnct(convert2utf(fyear))
writer_fyear.writerow(record)
return True
periods_map = {
'id': lambda x: 'period_'+x['YEAR,I,4']+"/"+x['MONTH,I,4'],
'date_stop': lambda x: get_last_day(mkDateTime(x['YEAR,I,4']+"-"+x['MONTH,I,4']+"-01")).strftime("%Y-%m-%d"),#last day of x['MONTH,I,4']
'date_start': lambda x:get_first_day(mkDateTime(x['YEAR,I,4']+"-"+x['MONTH,I,4']+"-01")).strftime("%Y-%m-%d"), #first day of x['MONTH,I,4']
'name': lambda x: x['LABEL,A,8'],
'state': lambda x: 'draft',
'fiscalyear_id:id': lambda x: 'FY'+x['YEAR,I,4'],
}
def import_period(reader_period, writer_period, period_map):
record = {}
for key, column_name in period_map.items():
record[key] = key
writer_period.writerow(record)
period_rows = []
for row in reader_period:
#only create periods if x['MONTH,I,4'] != 0
if row['MONTH,I,4'] != "0":
record = {}
for key,fnct in period_map.items():
record[key] = fnct(convert2utf(row))
writer_period.writerow(record)
return True
# -= F. Reconcile =-
arecon_map = {
'id' : lambda x: 'a'+x['HID,A,10'].strip()+'_'+x['HMATCHNO,I,4'],
'type': lambda x: 'bob imported',
'name': lambda x: 'a'+x['HID,A,10'].strip()+'_'+x['HMATCHNO,I,4'],
}
crecon_map = {
'id' : lambda x: 'c'+x['HID,A,10'].strip()+'_'+x['HMATCHNO,I,4'],
'type': lambda x: 'bob imported',
'name': lambda x: 'c'+x['HID,A,10'].strip()+'_'+x['HMATCHNO,I,4'],
}
def import_areconcile(reader, writer, map):
#write the header of creconcile
record = {}
for key, column_name in map.items():
record[key] = key
writer.writerow(record)
dict = {}
last_id = ""
for row in reader:
dict[row['HID,A,10']+row['HDBK,A,4']+row['HFYEAR,A,5']+row['HMONTH,I,4']+'/'+row['HDOCNO,I,4']+'/'+row['HORDERNO,I,4']] = row['HMTACHNO_ID:ID']
record = {}
for key,fnct in map.items():
record[key] = fnct(convert2utf(row))
if last_id != record['id']:
writer.writerow(record)
last_id = record['id']
return dict
def import_creconcile(reader, writer, map):
#write the header of creconcile
record = {}
for key, column_name in map.items():
record[key] = key
writer.writerow(record)
dict = {}
last_id = ""
for row in reader:
dict[row['HTYPE,A,1']+row['HID,A,10']+row['HFYEAR,A,5']+row['HDBK,A,4']+'/'+row['HDOCNO,I,4']+'/'+row['HORDERNO,I,4']] = row['HMATCHNO_ID:ID']
record = {}
for key,fnct in map.items():
record[key] = fnct(convert2utf(row))
if last_id != record['id']:
writer.writerow(record)
last_id = record['id']
return dict
# -= G. Move and Move_line =-
move_map = {
'id': lambda x: 'move_'+x['HDBK,A,4']+'/'+x['HFYEAR,A,5']+'/'+x['HDOCNO,I,4'],
'journal_id:id': lambda x: 'journal_'+x['HDBK,A,4'],
'state': lambda x: 'draft',
'period_id:id': lambda x: 'period_'+x['HFYEAR,A,5']+"/"+x['HMONTH,I,4'],
'ref': lambda x: '',
}
def _check_debit(x):
if (float(x) > 0):
return float(x)
return 0
def _check_credit(x):
if (float(x) < 0):
return -(float(x))
return 0
def _get_ammount_currency(x):
if x['HORDERNO,I,4'] != '1':
return _check_debit(x['HAMOUNT,$,8']) + _check_credit(x['HAMOUNT,$,8'])
return 0
def _check_debit_vat(x, ref):
if ref.startswith('-'):
return 0
if (float(x) < 0):
return -(float(x))
return float(x)
def _check_credit_vat(x, ref):
if ref.startswith('-'):
if (float(x) < 0):
return -(float(x))
return float(x)
return 0
#def _get_ammount_currency_vat(x):
# if x['HORDERNO,I,4'] != '1':
# return _check_debit_vat(x['HTAX,$,8'],x['HAMOUNT,$,8']) - _check_credit_vat(x['HTAX,$,8'],x['HAMOUNT,$,8'])
# return 0
def _pick_vat_code(x, vat_dict, is_vat=False):
if is_vat:
if x['HDBTYPE,A,3'][2]=='C':
#the move is a refund
return vat_dict[x['HVATCODE,A,10']]['ref_vat']
return vat_dict[x['HVATCODE,A,10']]['vat']
if x['HDBTYPE,A,3'][2]=='C':
#the move is a refund
return vat_dict[x['HVATCODE,A,10']]['ref_inv']
return vat_dict[x['HVATCODE,A,10']]['inv']
def _pick_vat_account(x, vat_dict):
if x['HDBTYPE,A,3'][2]=='C':
#the move is a refund
return vat_dict[x['HVATCODE,A,10']]['ref_account'] and 'account_'+vat_dict[x['HVATCODE,A,10']]['ref_account'] or 'account_'+x['HID,A,10']
return vat_dict[x['HVATCODE,A,10']]['inv_account'] and 'account_'+vat_dict[x['HVATCODE,A,10']]['inv_account'] or 'account_'+x['HID,A,10']
def _create_vat_move(x, vat_dict, count):
res = []
count = 0
for vat_code in _pick_vat_code(x,vat_dict,True):
count += 1
if count == 1:
res.append(_create_vat_move_core(x, vat_code, vat_dict, count))
else:
res.append(_create_vat_move_vat(x, vat_code, vat_dict, count, 'HTAX,$,8'))
return res
def _create_vat_move_vat(x, vat_code, vat_dict, count,base_or_vat):
return {
'id': 'move_line_'+x['HDBK,A,4']+'/'+x['HFYEAR,A,5']+'/'+x['HDOCNO,I,4']+'/'+x['HORDERNO,I,4']+'/'+str(count),
'currency_id': x['HCURRENCY,A,3'],
'date_maturity': x['HDUEDATE,D,4'],
'partner_id:id': _get_partner_id(partner_dict[x['HCUSSUP,A,10']]),
'journal_id:id': 'journal_'+x['HDBK,A,4'],
'tax_code_id:id': vat_code[1],
'tax_amount': str(abs(float(x[base_or_vat])) * _get_float(vat_code[0])),
'state': 'draft',
'debit': '0',
'credit': '0',
'ref': x['HDOCNO,I,4'],
'account_id:id': _pick_vat_account(x, vat_dict),
'period_id:id': 'period_'+x['HFYEAR,A,5']+"/"+x['HMONTH,I,4'],
'date': x['HDOCDATE,D,4'],
'move_id:id': 'move_'+x['HDBK,A,4']+'/'+x['HFYEAR,A,5']+'/'+x['HDOCNO,I,4'],
'name': x['HREM,A,40'] or '/',
# 'amount_currency': str(_get_ammount_currency_vat(x)),
}
def _create_vat_move_core(x, vat_code, vat_dict, count):
return {
'id': 'move_line_'+x['HDBK,A,4']+'/'+x['HFYEAR,A,5']+'/'+x['HDOCNO,I,4']+'/'+x['HORDERNO,I,4']+'/'+str(count),
'currency_id': x['HCURRENCY,A,3'],
'date_maturity': x['HDUEDATE,D,4'],
'partner_id:id': _get_partner_id(partner_dict[x['HCUSSUP,A,10']]),
'journal_id:id': 'journal_'+x['HDBK,A,4'],
'tax_code_id:id': vat_code[1],
'tax_amount': str(abs(float(x['HTAX,$,8'])) * _get_float(vat_code[0])),
'state': 'draft',
'debit': str(_check_debit_vat(x['HTAX,$,8'],x['HAMOUNT,$,8'])),
'credit': str(_check_credit_vat(x['HTAX,$,8'],x['HAMOUNT,$,8'])),
'ref': x['HDOCNO,I,4'],
'account_id:id': _pick_vat_account(x, vat_dict),
'period_id:id': 'period_'+x['HFYEAR,A,5']+"/"+x['HMONTH,I,4'],
'date': x['HDOCDATE,D,4'],
'move_id:id': 'move_'+x['HDBK,A,4']+'/'+x['HFYEAR,A,5']+'/'+x['HDOCNO,I,4'],
'name': x['HREM,A,40'] or '/',
# 'amount_currency': str(_get_ammount_currency_vat(x)),
}
#check if the movement is a VAT movement: return TRUE if the account code begins with '450' or '451'
def _is_vat_movement(x):
return x['HID,A,10'].startswith(('450','451','411'))
def _get_float(char):
if char == '-':
return -1
return 1
move_line_map = {
#TODO check currency import
#TODO (bugfix): create one currency BEF with value: 1 EUR = 40.3399 BEF
'id': lambda x: 'move_line_'+x['HDBK,A,4']+'/'+x['HFYEAR,A,5']+'/'+x['HDOCNO,I,4']+'/'+x['HORDERNO,I,4'],
'currency_id': lambda x: x['HCURRENCY,A,3'],
'date_maturity': lambda x: x['HDUEDATE,D,4'],
'partner_id:id': lambda x: _get_partner_id(partner_dict[x['HCUSSUP,A,10']]),
'journal_id:id': lambda x: 'journal_'+x['HDBK,A,4'],
'tax_code_id:id': lambda x:'',
'tax_amount': lambda x:'',
'state': lambda x: 'draft',
#qd vente: <0 c'est credit et >0 c'est debit
#qd achat: <0 c'est le credit et >0 c'est debit
'debit': lambda x: str(_check_debit(x['HAMOUNT,$,8'])),
'credit': lambda x: str(_check_credit(x['HAMOUNT,$,8'])),
'ref': lambda x: x['HDOCNO,I,4'],
'account_id:id': lambda x: 'account_'+x['HID,A,10'],
'period_id:id': lambda x: 'period_'+x['HFYEAR,A,5']+"/"+x['HMONTH,I,4'],
'date': lambda x: x['HDOCDATE,D,4'],
'move_id:id': lambda x: 'move_'+x['HDBK,A,4']+'/'+x['HFYEAR,A,5']+'/'+x['HDOCNO,I,4'],
'reconcile_id:id': lambda x: '',
'name': lambda x: x['HREM,A,40'] or '/',
# 'amount_currency': lambda x: str(_get_ammount_currency(x)),
}
def import_moves_and_lines(reader_move, writer_move, writer, move_map, map, dict_ahisto, dict_chisto, vat_dict):
#write the header of account.move
record = {}
for key, column_name in move_map.items():
record[key] = key
writer_move.writerow(record)
#write the header of account.move.line
record = {}
for key, column_name in map.items():
record[key] = key
writer.writerow(record)
move_rows_ref = {}
count =0
period_rows = []
tvacount = 0
#parse the move.csv file to know what are the account_move that need to be created
for row in reader_move:
count += 1
if (count%1000) == 0:
logger.notifyChannel(count)
if row['HCURRENCY,A,3'] not in currencies:
currencies.append(row['HCURRENCY,A,3'])
#only create move and move_line if x['HMONTH,I,4'] != 0
#and if row['HAMOUNT,$,8']!="" is different from 0 (or False)
if row['HMONTH,I,4'] != "0" and row['HAMOUNT,$,8']!="" and not float(row['HAMOUNT,$,8']) == 0.0:
temp = 'move_line_'+row['HDBK,A,4']+'/'+row['HFYEAR,A,5']+'/'+row['HDOCNO,I,4']+'/'+row['HORDERNO,I,4']
if not move_rows_ref.has_key(temp):
#writing of the account.move
move_rows_ref[temp] = 'ok'
record = {}
for key,fnct in move_map.items():
record[key] = fnct(convert2utf(row))
writer_move.writerow(record)
#writing of the account.move.line
if _is_vat_movement(row):
#vat movement cannot be imported and have to be generated from the move line
continue
record = {}
for key,fnct in map.items():
record[key] = fnct(convert2utf(row))
if dict_ahisto.has_key(row['HID,A,10']+row['HDBK,A,4']+row['HFYEAR,A,5']+row['HMONTH,I,4']+'/'+row['HDOCNO,I,4']+'/'+row['HORDERNO,I,4']):
record['reconcile_id:id'] = dict_ahisto[row['HID,A,10']+row['HDBK,A,4']+row['HFYEAR,A,5']+row['HMONTH,I,4']+'/'+row['HDOCNO,I,4']+'/'+row['HORDERNO,I,4']]
#for the case of sales or sales refund
elif row['HDBTYPE,A,3'] == 'SAL' or row['HDBTYPE,A,3'] == 'SAC':
if dict_chisto.has_key('C'+row['HCUSSUP,A,10']+row['HFYEAR,A,5']+row['HDBK,A,4']+'/'+row['HDOCNO,I,4']+'/'+row['HORDERNO,I,4']):
record['reconcile_id:id'] = dict_chisto['C'+row['HCUSSUP,A,10']+row['HFYEAR,A,5']+row['HDBK,A,4']+'/'+row['HDOCNO,I,4']+'/'+row['HORDERNO,I,4']]
#for the case of purchases or purchases refund
elif row['HDBTYPE,A,3'] == 'PUR' or row['HDBTYPE,A,3'] == 'PUC':
if dict_chisto.has_key('S'+row['HCUSSUP,A,10']+row['HFYEAR,A,5']+row['HDBK,A,4']+'/'+row['HDOCNO,I,4']+'/'+row['HORDERNO,I,4']):
record['reconcile_id:id'] = dict_chisto['S'+row['HCUSSUP,A,10']+row['HFYEAR,A,5']+row['HDBK,A,4']+'/'+row['HDOCNO,I,4']+'/'+row['HORDERNO,I,4']]
else:
#for the case of other operations. We have to search for the reconciliation with a customer and a supplier and we have to add 1 to the orderno
tmp = str(int(row['HORDERNO,I,4'])+1)
if dict_chisto.has_key('C'+row['HCUSSUP,A,10']+row['HFYEAR,A,5']+row['HDBK,A,4']+'/'+row['HDOCNO,I,4']+'/'+tmp):
record['reconcile_id:id'] = dict_chisto['C'+row['HCUSSUP,A,10']+row['HFYEAR,A,5']+row['HDBK,A,4']+'/'+row['HDOCNO,I,4']+'/'+tmp]
elif row['HDBTYPE,A,3'] == 'PUR' or row['HDBTYPE,A,3'] == 'PUC':
if dict_chisto.has_key('S'+row['HCUSSUP,A,10']+row['HFYEAR,A,5']+row['HDBK,A,4']+'/'+row['HDOCNO,I,4']+'/'+tmp):
record['reconcile_id:id'] = dict_chisto['S'+row['HCUSSUP,A,10']+row['HFYEAR,A,5']+row['HDBK,A,4']+'/'+row['HDOCNO,I,4']+'/'+tmp]
#if this move line is taxed
if row['HVATCODE,A,10']:
#create the base movement
tvacount += 1
tmp_cnt = 0
for vat_code in _pick_vat_code(row,vat_dict,False):
tmp_cnt += 1
if tmp_cnt == 1:
record['tax_amount']= str(abs(float(row['HBASE,$,8'])) * _get_float(vat_code[0]))
record['tax_code_id:id'] = vat_code[1]
else:
writer.writerow(convert2utf(_create_vat_move_vat(row, vat_code, vat_dict, count,'HBASE,$,8')))
#generate the vat movement
vat_move_list = _create_vat_move(row, vat_dict, tvacount)
for vat_move in vat_move_list:
writer.writerow(convert2utf(vat_move))
writer.writerow(record)
return True
# -=====================-
# -= 2. Importing DATA =-
# -=====================-
#specific part for CCI
reader_partner_matching = csv.DictReader(file('_conv_bob_id.csv','rb'))
bob_conv_matching = {}
bob_conv_matching[''] = ''
for row in reader_partner_matching:
bob_conv_matching[row['bob']] = row['partner']
def _get_partner_id(char):
if bob_conv_matching.has_key(char):
return bob_conv_matching[char]
return 'res_partner_destroyed'
partner_dict['GRAMME'] = ''
#end of specific part
langs = []
currencies = []
def run():
reader_account = csv.DictReader(file(config['addons_path']+'/account_bob_import/original_csv/accoun.csv','rb'))
writer_account = csv.DictWriter(file(config['addons_path']+'/account_bob_import/account.account.csv', 'wb'), account_map.keys())
import_account(reader_account, writer_account, account_map)
reader_journal = csv.DictReader(file(config['addons_path']+'/account_bob_import/original_csv/dbk.csv','rb'))
writer_journal = csv.DictWriter(file(config['addons_path']+'/account_bob_import/account.journal.csv', 'wb'), journals_map.keys())
import_journal(reader_journal, writer_journal, journals_map)
reader_partner = csv.DictReader(file(config['addons_path']+'/account_bob_import/original_csv/compan.csv','rb'))
writer_partner = csv.DictWriter(file(config['addons_path']+'/account_bob_import/res.partner.csv', 'wb'), partners_map.keys())
writer_address = csv.DictWriter(file(config['addons_path']+'/account_bob_import/res.partner.address.csv','wb'), partner_add_map.keys())
writer_bank = csv.DictWriter(file(config['addons_path']+'/account_bob_import/res.partner.bank.csv','wb'), partner_bank_map.keys())
import_partner(reader_partner, writer_partner, partners_map, writer_address, partner_add_map, writer_bank, partner_bank_map)
reader_contact = csv.DictReader(file(config['addons_path']+'/account_bob_import/original_csv/contacts.csv','rb'))
writer_contact = csv.DictWriter(file(config['addons_path']+'/account_bob_import/res.partner.contact.csv','wb'),contacts_map.keys())
writer_job = csv.DictWriter(file(config['addons_path']+'/account_bob_import/res.partner.job.csv','wb'),job_map.keys())
import_contact(reader_contact, writer_contact, contacts_map, writer_job, job_map)
reader_fyear = csv.DictReader(file(config['addons_path']+'/account_bob_import/original_csv/period.csv','rb'))
writer_fyear = csv.DictWriter(file(config['addons_path']+'/account_bob_import/account.fiscalyear.csv', 'wb'), fyear_map.keys())
import_fyear(reader_fyear, writer_fyear, fyear_map)
reader_period = csv.DictReader(file(config['addons_path']+'/account_bob_import/original_csv/period.csv','rb'))
writer_period = csv.DictWriter(file(config['addons_path']+'/account_bob_import/account.period.csv', 'wb'), periods_map.keys())
import_period(reader_period, writer_period, periods_map)
#import the account_tax from vat.csv
# constructing table account_tax => account_tax_code (for move and move_line)
reader_vat_code = csv.DictReader(file(config['addons_path']+'/account_bob_import/original_csv/vatcas.csv','rb'))
reader_vat = csv.DictReader(file(config['addons_path']+'/account_bob_import/original_csv/vat.csv','rb'))
vat_dict = construct_vat_dict(reader_vat_code, reader_vat, {})
reader_ahisto = csv.DictReader(file(config['addons_path']+'/account_bob_import/original_csv/ahisto_matchings.csv','rb'))
writer_reconcile = csv.DictWriter(file(config['addons_path']+'/account_bob_import/account.move.reconcile-1.csv', 'wb'), arecon_map.keys())
dict_ahisto = import_areconcile(reader_ahisto, writer_reconcile, arecon_map)
reader_chisto = csv.DictReader(file(config['addons_path']+'/account_bob_import/original_csv/chisto_matchings.csv','rb'))
writer_reconcile2 = csv.DictWriter(file(config['addons_path']+'/account_bob_import/account.move.reconcile-2.csv', 'wb'), crecon_map.keys())
dict_chisto = import_creconcile(reader_chisto, writer_reconcile2, crecon_map)
reader_move = csv.DictReader(file(config['addons_path']+'/account_bob_import/original_csv/ahisto.csv','rb'))
writer_move = csv.DictWriter(file(config['addons_path']+'/account_bob_import/account.move.csv', 'wb'), move_map.keys())
writer_move_line = csv.DictWriter(file(config['addons_path']+'/account_bob_import/account.move.line.csv', 'wb'), move_line_map.keys())
import_moves_and_lines(reader_move, writer_move, writer_move_line, move_map, move_line_map, dict_ahisto, dict_chisto, vat_dict)
if __name__ == '__main__':
run()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| factorlibre/openerp-extra-6.1 | account_bob_import/bob_import_step_2.py | bob_import_step_2.py | py | 34,361 | python | en | code | 9 | github-code | 6 | [
{
"api_name": "netsvc.Logger",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "tools.config",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "datetime.date",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"lin... |
27765054450 | from django.test import RequestFactory
from django.test import Client
from test_plus.test import TestCase
from rest_framework.test import force_authenticate
from rest_framework.test import APIRequestFactory
from semillas_backend.users.factory import UserFactory
from wallet.factory import TransactionFactory
from wallet import views
class BaseWalletTestCase(TestCase):
def setUp(self):
self.factory = APIRequestFactory()
self.user1 = UserFactory()
self.user2 = UserFactory()
TransactionFactory(
wallet_source=self.user1.wallet,
wallet_dest=self.user2.wallet
)
class WalletEndpointsTestCase(BaseWalletTestCase):
def test_get_wallet(self):
# Generate a request search for "testing" key word
# Attach the user to the request
request = self.factory.get('/api/v1/wallet/owner/')
force_authenticate(request, user=self.user1)
response = views.UserWalletDetail.as_view()(request, owner_uuid=self.user1.uuid)
# Expect: expect queryset of services ordered by proximity
# self.make_user()
self.assertEqual(
response.status_code,
200
)
def test_create_transaction_ok(self):
request = self.factory.post(
'/api/v1/wallet/transactions/create/',
{'user_source': self.user1.id,
'user_dest': self.user2.id,
'value': 5}
)
force_authenticate(request, user=self.user1)
response = views.CreateTransaction.as_view()(request)
self.user1.wallet.refresh_from_db()
self.user2.wallet.refresh_from_db()
self.assertEqual(
response.status_code,
201
)
self.assertEqual(
self.user1.wallet.balance,
5
)
self.assertEqual(
self.user2.wallet.balance,
15
)
def test_create_transaction_without_balance(self):
request = self.factory.post(
'/api/v1/wallet/transactions/create/',
{'user_source': self.user1.id,
'user_dest': self.user2.id,
'value': 25}
)
force_authenticate(request, user=self.user1)
response = views.CreateTransaction.as_view()(request)
self.assertEqual(
response.status_code,
400
)
def test_create_transaction_to_ourself(self):
# Same wallet on source and destination
request = self.factory.post(
'/api/v1/wallet/transactions/create/',
{'user_source': self.user1.id,
'user_dest': self.user1.id,
'value': 1}
)
force_authenticate(request, user=self.user1)
response = views.CreateTransaction.as_view()(request)
self.assertEqual(
response.status_code,
400
)
def test_create_transaction_from_others_wallet(self):
# Same wallet on source and destination
request = self.factory.post(
'/api/v1/wallet/transactions/create/',
{'user_source': self.user1.id,
'user_dest': self.user2.id,
'value': 1}
)
force_authenticate(request, user=self.user2)
response = views.CreateTransaction.as_view()(request)
# Expect: expect queryset of services ordered by proximity
# self.make_user()
self.assertEqual(
response.status_code,
401
)
| sergimartnez/semillas_backend | wallet/tests/test_views.py | test_views.py | py | 3,513 | python | en | code | null | github-code | 6 | [
{
"api_name": "test_plus.test.TestCase",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "rest_framework.test.APIRequestFactory",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "semillas_backend.users.factory.UserFactory",
"line_number": 18,
"usage_type... |
8938995188 | from django.core.management.base import BaseCommand, CommandError
from django.core.cache import cache
from utils import cronlog
from pom.scrape import laundry, menus, printers
class Command(BaseCommand):
args = '<modules to scrape>'
help = 'Scrapes data and stores in memcached with a timestamp'
def handle(self, *args, **options):
scrape = {'laundry':laundry, 'menus':menus, 'printers':printers}
for mod_name in args:
try:
mod = scrape[mod_name]
except KeyError:
self.stderr.write(cronlog.fmt("pom.scrape.%s does not exist" % mod_name))
continue
try:
data = mod.scrape()
except:
self.stderr.write(cronlog.fmt("pom.scrape.%s failed to scrape/render" % mod_name))
continue
cache.set('pom.'+mod_name, data)
self.stdout.write(cronlog.fmt("pom.scrape.%s scraped/rendered successfully" % mod_name))
| epkugelmass/USG-srv-dev | tigerapps/pom/management/commands/pom_scrape.py | pom_scrape.py | py | 992 | python | en | code | null | github-code | 6 | [
{
"api_name": "django.core.management.base.BaseCommand",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "pom.scrape.laundry",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "pom.scrape.menus",
"line_number": 11,
"usage_type": "name"
},
{
"api_na... |
14566034034 | from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.views import APIView
from skill_quest_app.models import Course, CourseQuizResult, CourseQuiz, InterestQuiz, CourseEnrollment
from skill_quest_app.serializers import CourseSerializer, QuizResultSerializer, CourseQuizSerializer, ProfileSerializer, \
InterestQuizSerializer, CourseQuizResultSerializer, CourseEnrollmentSerializer
# Create your views here.
class ListCoursesView(APIView):
def get(self, request, id=None):
queryset = Course.objects.filter(id=id) if id else Course.objects.all()
read_serializer = CourseSerializer(queryset, many=True, allow_empty=True)
return Response(read_serializer.data)
class PostCoursesView(APIView):
def post(self, request):
create_serializer = CourseSerializer(data=request.data)
if create_serializer.is_valid():
course_object = create_serializer.save()
read_serializer = CourseSerializer(course_object)
return Response(read_serializer.data, status=status.HTTP_201_CREATED)
return Response(create_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['PUT'])
def update_course(request, pk):
data = request.data
course = Course.objects.get(id=pk)
serializer = CourseSerializer(course, data=data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
@api_view(['DELETE'])
def delete_course(request, pk):
course = Course.objects.get(id=pk)
course.delete()
return Response("The course has been deleted...")
@api_view(['GET'])
def get_quiz_results():
quiz_result = CourseQuizResult.objects.all()
read_serializer = QuizResultSerializer(quiz_result)
return Response(read_serializer.data)
@api_view(['GET'])
def get_course_quiz(request, course_id):
queryset = CourseQuiz.objects.filter(id=course_id) if id else CourseQuiz.objects.all()
serializer = CourseQuizSerializer(queryset, many=True)
return Response(serializer.data)
class ProfileView(APIView):
def get(self, request, id=None):
queryset = Course.objects.filter(id=id) if id else Course.objects.all()
read_serializer = ProfileSerializer(queryset)
return Response(read_serializer.data)
def post(self, request):
create_serializer = ProfileSerializer(data=request.data)
if create_serializer.is_valid():
course_object = create_serializer.save()
read_serializer = CourseSerializer(course_object)
return Response(read_serializer.data, status=status.HTTP_201_CREATED)
return Response(create_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET'])
def get_interest_quiz(request, id):
queryset = InterestQuiz.objects.filter(id=id) if id else CourseQuiz.objects.all()
serializer = InterestQuizSerializer(queryset, many=True)
return Response(serializer.data)
@api_view(['POST'])
def post_interest_quiz(request):
create_serializer = InterestQuizSerializer(data=request.data)
if create_serializer.is_valid():
course_object = create_serializer.save()
read_serializer = CourseSerializer(course_object)
return Response(read_serializer.data, status=status.HTTP_201_CREATED)
return Response(create_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class CourseQuizResultCreateView(APIView):
def post(self, request):
serializer = CourseQuizResultSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET'])
def get_enrolled_course(request):
courses = CourseEnrollment.objects.all()
serializer = CourseEnrollmentSerializer(courses, many=True)
return Response(serializer.data)
@api_view(['POST'])
def create_enrolled_course(request):
data = request.data
course = CourseEnrollment.objects.create(description=data['description'])
serializer = CourseEnrollmentSerializer(course, many=False)
return Response(serializer.data)
| HemitPatel/Skill_Quest_Backend | skill_quest_app/views.py | views.py | py | 4,263 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "rest_framework.views.APIView",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "skill_quest_app.models.Course.objects.filter",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "skill_quest_app.models.Course.objects",
"line_number": 14,
"usa... |
34453088360 | import random
import atexit
import sys
import argparse
import time
from tracemalloc import start
parser = argparse.ArgumentParser(description='Simulates coin flips')
parser.add_argument('--quiet','-q', action='store_true', help='Run in quiet mode. Do not print out new max streaks')
parser.add_argument('--total', '-t', action='store_true', help='Print total number of coins flipped.')
parser.add_argument('--count', '-c', action='store_true', help='Print the number of coins flipped since previous highest streak.')
sys.tracebacklimit = 0
curr_count = 1
prev_coin = random.randint(0,1)
max_count = 0
max_count_value = 'none'
total_coins = 1
streak_coins = 1
start_time = time.time()
@atexit.register
def print_streak():
global curr_count, prev_coin, max_count, max_count_value
if max_count > 0:
print(f'{max_count} {max_count_value} [{round(time.time() - start_time, 2)}]')
def main():
global curr_count, prev_coin, max_count, max_count_value, total_coins, streak_coins, start_time
flags = parser.parse_args()
while True:
curr_coin = random.randint(0,1)
total_coins += 1
streak_coins += 1
if curr_coin == prev_coin:
curr_count += 1
else:
if max_count < curr_count:
max_count_value = 'heads' if prev_coin else 'tails'
max_count = curr_count
if not flags.quiet:
print(f'New max streak {max_count} {max_count_value} [{round(time.time() - start_time, 2)}]')
if flags.total:
print(f'Total coins flipped {total_coins}')
if flags.count:
print(f'Coins flipped since last streak {streak_coins}')
streak_coins = 0
curr_count = 1
prev_coin = curr_coin
if __name__ == '__main__':
main()
| bwu2018/pointless-coinflip | sim.py | sim.py | py | 1,868 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.tracebacklimit",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "random.randint",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "time.ti... |
2228249565 | import os
from pathlib import Path
import time
import random
class BuildTools():
def NewPlugin():
a = open('plugin.json', "w")
a.write('{\n\n\t"MyAddonName": "TestName",\n\t"LocalDependencies": "KoBashToolkit.engine.enginestart"\n}')
a.close()
def IDE(ide):
"""
Determines Config Based On IDE Type.
"""
if ide == 'vscode':
a = open('vs.js', 'w')
a.write('const vs;')
a.close()
n = open('Addon.json', 'w')
n.write('{\n\n\t"Launch.NetConfig": "--launch"\n}')
n.close()
def StartFiles():
i = open('MyPlugin.py', "w")
i.write('from KoBashToolkit.sharedtoolkits.cus import BuildTools as Build\n\n\ndef Plugin():\n\tBuild.IDE("vscode") # Change to what you want\n\tBuild.NewPlugin()')
i.close()
def EnCryp():
os.mkdir(".Net Encryption")
print("Encrypting addon Files..")
time.sleep(3)
print('Started NET Encryption')
print('Encrypted addon Data.')
def LoadingSequence(type: int):
if type == 1:
print('Loading Scripts..')
time.sleep(random.randint(0, 10))
print('Gathering addon.json...')
time.sleep(random.randint(0, 10))
print('Starting Python Lib..')
time.sleep(random.randint(0, 10))
print('Installed Successfully')
class Prompt():
def require(module):
a = Path(str(module) + ".kobash")
| thekaigonzalez/kobash.old | KoBashToolkit/sharedtoolkits/buildTools/cus.py | cus.py | py | 1,503 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.mkdir",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 34... |
73477701627 | import functools
import pyopencl as cl
import numpy as np
from .sparsetensor import SparseFunction, SparseTensor
from .densetensor import GPUBuffer, DenseTensor
class GradData:
def __init__(self, data, xidx, yidx, shape):
self.data = data
self.xidx = xidx
self.yidx = yidx
self.shape = shape
def buffer_new(ctx, shape, zero=False, dtype=np.float32):
return GPUBuffer(shape, hostbuf=None if not zero else np.zeros(shape, dtype=dtype))
def buffer_np(ctx, x):
return cl.Buffer(ctx.cl_ctx, cl.mem_flags.READ_WRITE | cl.mem_flags.COPY_HOST_PTR, hostbuf=x)
@functools.lru_cache
def clbuild(cl_ctx, name, prg):
return cl.Program(cl_ctx, prg).build().__getattr__(name)
def uint2(x, y):
return np.array((x,y), dtype=cl.cltypes.uint2)
i32 = np.int32
# ************* unary ops *************
def unary_op(ctx, code, x):
ret = buffer_new(ctx, x.shape)
unop = clbuild(ctx.cl_ctx, "unop", """
__kernel void unop(__global const float *a_g, __global float *res_g) {
int gid = get_global_id(0);
float a = a_g[gid];
res_g[gid] = """+code+""";
}""")
unop(ctx.cl_queue, [np.prod(ret.shape)], None, x.cl, ret.cl)
return ret
class ReLU(SparseFunction):
def forward(ctx, input):
ctx.save_for_backward(input)
return unary_op(ctx, 'max(a, (float)0.)', input)
def backward(ctx, grad_output):
input, = ctx.saved_tensors
return binary_op(ctx, 'a * (b >= 0)', grad_output, input)
class Log(SparseFunction):
def forward(ctx, input):
ctx.save_for_backward(input)
return unary_op(ctx, 'log(a)', input)
def backward(ctx, grad_output):
input, = ctx.saved_tensors
return binary_op(ctx, 'a / b', grad_output, input)
class Exp(SparseFunction):
def forward(ctx, input):
ret = unary_op(ctx, 'exp(a)', input)
ctx.save_for_backward(ret)
return ret
def backward(ctx, grad_output):
ret, = ctx.saved_tensors
return binary_op(ctx, 'a * b', grad_output, ret)
# ************* reduce ops *************
def reduce_op(ctx, code, code2, inp, axis=None, start="0.0"):
if axis is None:
# full reduce
osize = [1]*len(inp.shape)
else:
osize = np.array(inp.shape)
osize[list(axis)] = 1
ret = buffer_new(ctx, osize)
if axis is None:
ret.shape = (1,)
# TODO: this is insanely slow
reduce = clbuild(ctx.cl_ctx, "reduce", """
__kernel void reduce(__global const float *a_g, int sz, __global float *res_g, int prod, int n_dims,
__global const int *shape_x, __global const int *shape_ret) {
int gid = get_global_id(0);
float out = """+start+""";
for (int x = 0; x < sz; x++) {
int idx = 0; // compute index into a_g
int tprod = prod;
int tsz = sz;
for (int dim = 0; dim < n_dims; dim++) {
idx *= shape_x[dim];
if (shape_x[dim] == shape_ret[dim]) { // dim from gid, don't reduce
tprod /= shape_x[dim];
idx += (gid / tprod) % shape_x[dim];
} else { // dim from x
tsz /= shape_x[dim];
idx += (x / tsz) % shape_x[dim];
}
}
float a = a_g[idx];
"""+code+""";
}
res_g[gid] = """+code2+""";
}""")
reduce(ctx.cl_queue, [np.prod(osize)], None, inp.cl,
i32(np.prod(inp.shape)//np.prod(osize)), ret.cl,
i32(np.prod(osize)), i32(len(osize)),
buffer_np(ctx, np.array(inp.shape, dtype=np.int32)),
buffer_np(ctx, np.array(osize, dtype=np.int32)))
return ret
class Sum(SparseFunction):
def forward(ctx, input, axis=None):
if isinstance(axis, int): axis = [axis]
ctx.save_for_backward(input, axis)
ret = reduce_op(ctx, "out += a", "out", input, axis=axis)
if axis is not None:
ret.shape = tuple([input.shape[i] for i in range(len(input.shape)) if i not in axis])
return ret
def backward(ctx, grad_output):
input, axis = ctx.saved_tensors
shape = [1 if axis is None or i in axis else input.shape[i] for i in range(len(input.shape))]
output = GPUBuffer(shape, hostbuf=grad_output)
return binary_op(ctx, 'a+b', output, buffer_new(ctx, input.shape, zero=True))
class Max(SparseFunction):
def forward(ctx, input, axis=None):
if isinstance(axis, int): axis = [axis]
ret = reduce_op(ctx, "out = max(a,out)", "out", input, axis=axis, start="-INFINITY")
ctx.save_for_backward(input, axis, ret)
if axis is not None:
ret.shape = tuple([input.shape[i] for i in range(len(input.shape)) if i not in axis])
return ret
def backward(ctx, grad_output):
input, axis, ret = ctx.saved_tensors
shape = [1 if axis is None or i in axis else input.shape[i] for i in range(len(input.shape))]
ret2 = binary_op(ctx, "1.0*(a==b)", input, GPUBuffer(shape, ret))
div = reduce_op(ctx, "out += a", "out+1e-10", ret2, axis=axis)
ret3 = binary_op(ctx, "a/b", ret2, GPUBuffer(shape, div))
return binary_op(ctx, 'a*b', ret3, GPUBuffer(shape, grad_output))
# ************* binary ops *************
@functools.lru_cache
def get_binop_prg(cl_ctx, code, complist):
ndims = len(complist)
args = "".join([f", int d{i}" for i in range(ndims)] + [f", int p{i}" for i in range(ndims-1)])
compute_idx_rets = "".join([f"\n int idx_ret{i} = (gid0 / {f'p{i}' if i < ndims-1 else '1'}) % d{i};" for i in range(ndims)])
idx_exprs = ["0", "0"] # [idx_x, idx_y]
for i in range(ndims):
for j in range(2):
if complist[i][j]:
idx_exprs[j] = "idx_ret%d + d%d*(%s)" % (i, i, idx_exprs[j])
return cl.Program(cl_ctx, """__kernel void binop(__global const float *x_g, __global const float *y_g, __global float *res_g"""+args+""") {
int gid0 = get_global_id(0);"""+compute_idx_rets+"""
float a = x_g["""+idx_exprs[0]+"""];
float b = y_g["""+idx_exprs[1]+"""];
res_g[gid0] = """+code+""";\n}""").build()
def binary_op(ctx, code, x, y):
n_dims = max(len(x.shape), len(y.shape))
shape_x, shape_y = np.ones(n_dims, dtype=np.int32), np.ones(n_dims, dtype=np.int32)
shape_x[:len(x.shape)] = np.array(x.shape, dtype=np.int32)
shape_y[:len(y.shape)] = np.array(y.shape, dtype=np.int32)
if not np.all((shape_x == 1) | (shape_y == 1) | (shape_x == shape_y)):
raise Exception(f"binary op unbroadcastable shape mismatch: {x.shape} vs {y.shape}")
shape_ret = np.maximum(shape_x, shape_y)
dimlist, complist = [], [] # note: len(dimlist) may be less than n_dims
def push(dim, comp):
if len(complist) > 0 and complist[-1] == comp:
dimlist[-1] *= dim
elif comp != (False, False):
dimlist.append(dim); complist.append(comp)
for i in range(n_dims): # group together any adjacent dimensions that we can to simplify broadcasting
push(i32(max(shape_x[i], shape_y[i])), (shape_x[i] > 1, shape_y[i] > 1))
prg = get_binop_prg(ctx.cl_ctx, code, tuple(complist))
ret = buffer_new(ctx, shape_ret, zero=True)
prod_list = np.array(dimlist, dtype=i32)[-1::-1].cumprod(dtype=i32)[-1::-1] # take cumprod from back to front
prg.binop(ctx.cl_queue, [prod_list[0]] if len(dimlist) > 0 else [1], None, x.cl, y.cl, ret.cl, *dimlist, *(prod_list[1:]))
return ret
def unbroadcast(ctx, out, in_sh):
sum_axis = [i for i in range(len(in_sh)) if in_sh[i]==1 and out.shape[i]>1] if in_sh != (1,) else None
return reduce_op(ctx, "out += a", "out", out, sum_axis)
class Add(SparseFunction):
def forward(ctx, x, y):
ctx.save_for_backward(x.shape, y.shape)
return binary_op(ctx, 'a+b', x, y)
def backward(ctx, grad_output):
grad_x, grad_y = grad_output, grad_output
shape_x, shape_y = ctx.saved_tensors
return unbroadcast(ctx, grad_x, shape_x), unbroadcast(ctx, grad_y, shape_y),
class Sub(SparseFunction):
def forward(ctx, x, y):
ctx.save_for_backward(x.shape, y.shape)
return binary_op(ctx, 'a-b', x, y)
def backward(ctx, grad_output):
grad_x, grad_y = grad_output, unary_op(ctx, '-a', grad_output)
shape_x, shape_y = ctx.saved_tensors
return unbroadcast(ctx, grad_x, shape_x), unbroadcast(ctx, grad_y, shape_y),
class Mul(SparseFunction):
def forward(ctx, x, y):
ctx.save_for_backward(x, y)
return binary_op(ctx, 'a*b', x, y)
def backward(ctx, grad_output):
x,y = ctx.saved_tensors
grad_x = binary_op(ctx, 'a*b', y, grad_output)
grad_y = binary_op(ctx, 'a*b', x, grad_output)
return unbroadcast(ctx, grad_x, x.shape), unbroadcast(ctx, grad_y, y.shape),
class Pow(SparseFunction):
def forward(ctx, x, y):
ctx.save_for_backward(x, y)
return binary_op(ctx, 'pow(a,b)', x, y)
def backward(ctx, grad_output):
x,y = ctx.saved_tensors
grad_x = binary_op(ctx, 'a*b', grad_output,
binary_op(ctx, 'b * (pow((float)a, (float)(b-1.0)))', x, y))
grad_y = binary_op(ctx, 'a*b', grad_output,
binary_op(ctx, 'pow(a, (float)b) * log(a);', x, y))
return unbroadcast(ctx, grad_x, x.shape), unbroadcast(ctx, grad_y, y.shape),
# ************* movement ops *************
class Reshape(SparseFunction):
def forward(ctx, x, shape):
x.data.shape = tuple(shape)
ctx.save_for_backward(x)
return x
def backward(ctx, grad_output):
in_shape = ctx.saved_tensors
return in_shape
def perm_axis(ctx, inp, order):
# print("PERM:", inp, order)
osize = np.array(inp.shape)[list(order)]
ret = buffer_new(ctx, osize)
perm = clbuild(ctx.cl_ctx, "perm", """
__kernel void perm(__global const float *a_g, __global float *res_g, int n_axis,
__global const int *shape, __global const int *order) {
int gid = get_global_id(0);
int gi = gid;
int idx = 0;
for(int i = n_axis-1; i>-1; i--) {
int stride = 1;
for(int j=order[i]+1; j<n_axis; j++) stride *= shape[j];
idx += (gi % shape[order[i]])*stride;
gi /= shape[order[i]];
}
res_g[gid] = a_g[idx];
}""")
perm(ctx.cl_queue, [np.prod(osize)], None, inp.cl, ret.cl, i32(len(osize)),
buffer_np(ctx, np.array(inp.shape, dtype=np.int32)),
buffer_np(ctx, np.array(order, dtype=np.int32)))
# print("RAN")
return ret
class Transpose(SparseFunction):
def forward(ctx, x):
# print("T FWD:", x)
newdata = {
'data': x.datat,
'idxs': x.idxst,
'nnzs': x.nnzst,
'ellw': x.ellwt,
'datat': x.data,
'idxst': x.idxs,
'nnzst': x.nnzs,
'ellwt': x.ellw,
}
newshape = tuple(np.array(x.shape).T)
ret = SparseTensor(from_datas=newdata, shape=newshape)
return ret
def backward(ctx, grad_output):
return perm_axis(ctx, grad_output, np.argsort((1,0)))
# TODO: merge this with perm axis
def inner_slice(ctx, x, arg):
shift = [y[0] for y in arg]
oshape = [y[1]-y[0] for y in arg]
ret = buffer_new(ctx, oshape)
gslice = clbuild(ctx.cl_ctx, "gslice", """
__kernel void gslice(__global const float *input, __global float *output, int prod, int n_dims,
__global const int *shape_x, __global const int *shape_ret,
__global const int *shift) {
int gid = get_global_id(0);
int iptr = 0;
int zero = 1;
for (int dim = 0; dim < n_dims; dim++) {
prod /= shape_ret[dim];
int sidx = (gid / prod) % shape_ret[dim] + shift[dim];
zero &= (sidx >= 0 && sidx < shape_x[dim]);
iptr = (iptr * shape_x[dim]) + sidx;
}
output[gid] = zero ? input[iptr] : 0.0;
}""")
gslice(ctx.cl_queue, [np.prod(ret.shape)], None,
x.cl, ret.cl, i32(np.prod(ret.shape)), i32(len(ret.shape)),
buffer_np(ctx, np.array(x.shape, dtype=np.int32)),
buffer_np(ctx, np.array(ret.shape, dtype=np.int32)),
buffer_np(ctx, np.array(shift, dtype=np.int32)))
return ret
class Slice(SparseFunction):
def forward(ctx, x, arg=None):
ctx.save_for_backward(x.shape)
return inner_slice(ctx, x, arg)
def backward(ctx, grad_output):
shape, = ctx.saved_tensors
narg = [(0-p[0], grad_output.shape[i]+(shape[i]-p[1])) for i,p in enumerate(ctx.arg)]
return inner_slice(ctx, grad_output, narg)
# ************* processing ops *************
class Matmul(SparseFunction): # input and weights are swapped, legacy..
def forward(ctx, weight, input):
# print("WEIGHT/input:", weight.shape, input.shape)
# print(input.shape, weight.shape)
# assert weight.shape[-2] == input.shape[-1]
# if not weight.m:
# weight.m = DenseTensor(np.zeros((input.shape[0], weight.shape[1])))
isize, msize, osize = i32(input.shape[-2]), i32(input.shape[-1]), i32(weight.shape[-1])
outshape = np.array([input.shape[-2], weight.shape[-1]])
# print("OUT:", outshape, isize, msize, osize)
outdata = np.zeros(outshape)
ret = DenseTensor(outdata)
# ret = buffer_new(ctx.cl_ctx, outshape, zero=True)
# print("RET:", ret)
# print("RET:", input)
matmul = clbuild(ctx.cl_ctx, "matmul", """
// DENSE x SPARSE
__kernel void matmul(__global float* matData, // INPUT MATRIX DATA
__global uint* colIdx,
__global uint* rowNnz,
uint ellwidth,
uint mwidth,
uint ncols,
__global float* vector_x, // INPUT
__global float* vector_y // OUTPUT
) { // LOCAL SHARED BUFFER
uint gid = get_global_id(0);
uint nrows = get_global_size(0);
for (uint gid2 = 0; gid2 < ncols; gid2++) {
uint nnz = rowNnz[gid2];
float sum = 0;
for (uint i = 0; i < nnz; i++) {
uint index = (gid2 * ellwidth) + i;
uint col = colIdx[index];
float aval = matData[index];
float xval = vector_x[gid*mwidth+col];
sum += aval * xval;
//if (gid==0 && gid2==0)
// printf("aval, xval: %.2f,%.2f - %.2f: (%i,%i) \\n", aval, xval, sum, col, index);
}
//printf("SUM/NNZ: %.2f %i \\n", sum, nnz);
vector_y[gid*ncols+gid2] = sum;
}
}""")
ctx.save_for_backward(input, weight)
# (isize,msize) x (msize,osize) = (isize,osize)
matmul(ctx.cl_queue, [outshape.T[0]], None,
weight.datat.cl, weight.idxst.cl, weight.nnzst.cl, np.uint32(weight.ellwt), np.uint32(msize), np.uint32(outshape.T[1]), input.cl, ret.data.cl)
# resa = np.zeros(isize,osize).astype(np.float32)
# cl.enqueue_copy(ctx.cl_queue, resa, ret.cl)
# return ret.data
# return trans_axis(ctx, ret.data, (1,0)) # print("RES:", resa)
return ret.data
def backward(ctx, grad_output):
input, weight = ctx.saved_tensors
topkx, topky = weight.topkx, weight.topky
# print('BACK:', weight.shape, topkx, topky)
isize, msize, osize = i32(input.shape[-2]), i32(input.shape[-1]), i32(weight.shape[-1])
grad_input = DenseTensor(np.zeros(input.shape), dtype=np.float32)
grad_weight = DenseTensor(np.zeros(weight.shape), dtype=np.float32)
# print('GO:', input.shape, grad_output.shape)
# print("OUTSHAPE:", weight.shape, input.shape[0], isize, msize, weight.ellwt)
# grad_output = grad_output + weight.m
matmul2 = clbuild(ctx.cl_ctx, "matmul2", """
// DENSE x SPARSE-T
__kernel void matmul2(__global float* matData, // INPUT MATRIX DATA
__global uint* colIdx,
__global uint* rowNnz,
uint ellwidth,
uint mwidth,
uint ncols0,
__global float* vector_x, // INPUT
__global float* vector_y // OUTPUT
) { // LOCAL SHARED BUFFER
uint gid = get_global_id(0);
uint nrows = get_global_size(0);
uint nnz = rowNnz[gid];
uint gid2 = get_global_id(1);
uint ncols = get_global_size(1);
float sum = 0;
for (uint i = 0; i < nnz; i++) {
uint index = (gid2 * ellwidth) + i;
uint col = colIdx[index];
float aval = matData[index];
float xval = vector_x[gid*mwidth+col];
sum += aval * xval;
//if (gid==1 && gid2==0) {
// printf("aval, xval: %.2f,%.2f - %.2f: (%i,%i) \\n", aval, xval, sum, col, index);
//}
}
//printf("SUM/NNZ: %.2f %i \\n", sum, nnz);
vector_y[gid*ncols+gid2] = sum;
}""")
# (isize,osize) x (msize,osize) = (isize,msize)
# print('msize:', grad_output.shape, input.shape)
matmul2(ctx.cl_queue, input.shape, None,
weight.data.cl, weight.idxs.cl, weight.nnzs.cl, np.uint32(weight.ellw), np.uint32(grad_output.shape[1]), np.uint32(input.shape[0]), grad_output.cl, grad_input.data.cl)
# resa = np.zeros((input.shape[1], input.shape[0])).astype(np.float32)
# cl.enqueue_copy(ctx.cl_queue, resa, grad_input.data.cl)
# print('INPUT', DenseTensor(input).cpu().data, weight.shape[0], weight.shape[1])
# print('OUT:', grad_input.cpu().data)
gettopkx = clbuild(ctx.cl_ctx, "gettopkx", """
// multilplies x TRANSPOSED by y (dense-dense)
__kernel void gettopkx(__global float* x, // INPUT MATRIX DATA
__global float* xsum, // INPUT
__global uint* youtidx, // OUT
uint topky,
uint msize
) { // LOCAL SHARED BUFFER
uint isize = get_global_size(0);
int gidx = get_global_id(0); // row
// get topk
xsum[gidx] = 0;
for (uint i=0; i<msize; i++) {
float val = x[i*isize+gidx];
//if (gid == 0) {
// printf("\\nADD VALx: %.2f - %i", val, i*msize+gid);
//}
xsum[gidx] += val;
}
float valx = xsum[gidx];
uint posx = 0;
for (uint i = 0; i < isize; i++) {
float tempval = fabs(xsum[i]);
bool larger = (tempval > fabs(valx)) || (fabs(tempval) == fabs(valx) && i < gidx);
posx += (larger)?1:0;
}
if (posx < topky) {
youtidx[posx] = gidx;
}
}""")
gettopky = clbuild(ctx.cl_ctx, "gettopky", """
// multilplies x TRANSPOSED by y (dense-dense)
__kernel void gettopky(__global float* y, // INPUT
__global float* ysum, // INPUT
__global uint* xoutidx, // OUT
uint topkx,
uint msize
) { // LOCAL SHARED BUFFER
uint osize = get_global_size(0);
int gidy = get_global_id(0); // row
ysum[gidy] = 0;
for (uint i=0; i<msize; i++) {
float val = y[i*osize+gidy];
ysum[gidy] += val;
}
//barrier(CLK_GLOBAL_MEM_FENCE);
float valy = ysum[gidy];
uint posy = 0;
for (uint i = 0; i < osize; i++) {
float tempval = fabs(ysum[i]);
bool larger = (tempval > fabs(valy)) || (fabs(tempval) == fabs(valy) && i < gidy);
posy += (larger)?1:0;
}
if (posy < topkx) {
xoutidx[posy] = gidy;
}
}""")
sortuints = clbuild(ctx.cl_ctx, "sortuints", """
// multilplies x TRANSPOSED by y (dense-dense)
__kernel void sortuints(__global uint* x, // INPUT MATRIX DATA
__global uint* xs // INPUT
) { // LOCAL SHARED BUFFER
uint isize = get_global_size(0);
int gidx = get_global_id(0); // row
uint val = x[gidx];
uint posx = 0;
for (uint i = 0; i < isize; i++) {
uint tempval = x[i];
bool smaller = tempval < val;
posx += (smaller)?1:0;
}
xs[posx] = x[gidx];
}""")
matmul0 = clbuild(ctx.cl_ctx, "matmul0", """
// multilplies x TRANSPOSED by y (dense-dense)
__kernel void matmul0(__global float* x, // INPUT MATRIX DATA
__global float* y, // INPUT
__global uint* xidx, // INPUT YIDX
__global uint* yidx, // INPUT YIDX
__global float* resdata,// OUT
__global uint* rescols,
__global uint* resnnzs,
uint topkx,
uint ellw,
uint isize,
uint msize,
uint osize
) { // LOCAL SHARED BUFFER
uint topky = get_global_size(0);
uint gidx = yidx[get_global_id(0)]; // row
for (uint gidy0 = 0; gidy0 < topkx; gidy0++) {
uint gidy = xidx[gidy0];
float ret = 0.0;
uint i;
for (i = 0; i < msize; i++) {
uint xidx = i*isize+gidx;
float xval = x[xidx];
uint yidx = osize*i+gidy;
float yval = y[yidx];
ret += xval*yval;
//if (gidx==0 && gidy==0)
// printf("\\nmult: %.2f x %.2f - %.2f -- %i/%i", xval, yval, ret, xidx, yidx);
}
//if (gidx==0&&gidy==0)
// printf("\\nsum:%.2f", ret);
// add for
uint nnz = resnnzs[gidx];
for (i = 0; i < nnz; i++) {
if (rescols[i] >= gidy) {
break;
}
for (uint j = nnz; j >= i; j--) {
//resdata[j+1] = resdata[j];
}
}
resdata[gidx * ellw + gidy0] = ret;
rescols[gidx * ellw + gidy0] = gidy;
resnnzs[gidx] += 1;
}
}""")
matmul0t = clbuild(ctx.cl_ctx, "matmul0t", """
// multilplies x TRANSPOSED by y (dense-dense)
__kernel void matmul0t(__global float* x, // INPUT MATRIX DATA
__global float* y, // INPUT
__global uint* xidx, // INPUT YIDX
__global uint* yidx, // INPUT YIDX
__global float* resdata,// OUT
__global uint* rescols,
__global uint* resnnzs,
uint topky,
uint ellw,
uint isize,
uint msize,
uint osize
) { // LOCAL SHARED BUFFER
uint topkx = get_global_size(0);
uint gidy = xidx[get_global_id(0)]; // row
for (uint gidx0 = 0; gidx0 < topky; gidx0++) {
uint gidx = yidx[gidx0];
float ret = 0.0;
uint i;
for (i = 0; i < msize; i++) {
uint xidx = i*isize+gidx;
float xval = x[xidx];
uint yidx = osize*i+gidy;
float yval = y[yidx];
ret += xval*yval;
//if (gidx==0 && gidy==0)
// printf("\\nmult: %.2f x %.2f - %.2f -- %i/%i", xval, yval, ret, gidx, gidy,i);
}
//if (gidx==0&&gidy==0)
// printf("\\nsum:%.2f", ret);
// add for
uint nnz = resnnzs[gidx];
for (i = 0; i < nnz; i++) {
if (rescols[i] >= gidy) {
break;
}
for (uint j = nnz; j >= i; j--) {
//resdata[j+1] = resdata[j];
}
}
resdata[gidy * ellw + gidx0] = ret;
rescols[gidy * ellw + gidx0] = gidx;
resnnzs[gidy] += 1;
}
}""")
# Weight update
isize = weight.shape[0]
msize = grad_output.shape[0]
osize = weight.shape[1]
dim1 = weight.shape[1]#min(weight.shape[1], topkx)
dim2 = weight.shape[0]#min(weight.shape[0], topky)
x_sum_buf = DenseTensor(np.zeros(weight.shape[0]))
y_sum_buf = DenseTensor(np.zeros(weight.shape[1]))
x_idx_buf = DenseTensor(np.zeros(topkx), dtype=np.uint32)
y_idx_buf = DenseTensor(np.zeros(topky), dtype=np.uint32)
xs_idx_buf = DenseTensor(np.zeros(topkx), dtype=np.uint32)
ys_idx_buf = DenseTensor(np.zeros(topky), dtype=np.uint32)
sdata_buf = DenseTensor(np.zeros(weight.shape[0]*topkx))
sidxs_buf = DenseTensor(np.zeros(weight.shape[0]*topkx), dtype=np.uint32)
snnzs_buf = DenseTensor(np.zeros(weight.shape[0]), dtype=np.uint32)
sdatat_buf = DenseTensor(np.zeros(weight.shape[1]*topky))
sidxst_buf = DenseTensor(np.zeros(weight.shape[1]*topky), dtype=np.uint32)
snnzst_buf = DenseTensor(np.zeros(weight.shape[1]), dtype=np.uint32)
# print('IN', DenseTensor(input).cpu().data, weight.shape, input.shape[0])
# print('INPUT', grad_output.cpu().data)
# print('OUT', grad_input.cpu().data.sum())
# print('asdf:', isize, msize, osize)
gettopkx(ctx.cl_queue, [isize], None, input.cl, x_sum_buf.data.cl,
y_idx_buf.data.cl, np.uint32(topky), np.uint32(msize))
gettopky(ctx.cl_queue, [osize], None, grad_output.cl,
y_sum_buf.data.cl, x_idx_buf.data.cl, np.uint32(topkx), np.uint32(msize))
sortuints(ctx.cl_queue, [topkx], None, x_idx_buf.data.cl, xs_idx_buf.data.cl)
sortuints(ctx.cl_queue, [topky], None, y_idx_buf.data.cl, ys_idx_buf.data.cl)
matmul0(ctx.cl_queue, [topky], None, input.cl, grad_output.cl, xs_idx_buf.data.cl,
ys_idx_buf.data.cl, sdata_buf.data.cl, sidxs_buf.data.cl, snnzs_buf.data.cl,
np.uint32(topkx), np.uint32(topkx), np.uint32(isize), np.uint32(msize), np.uint32(osize))
matmul0t(ctx.cl_queue, [topkx], None, input.cl, grad_output.cl, xs_idx_buf.data.cl,
ys_idx_buf.data.cl, sdatat_buf.data.cl, sidxst_buf.data.cl, snnzst_buf.data.cl,
np.uint32(topky), np.uint32(topky), np.uint32(isize), np.uint32(msize), np.uint32(osize))
# x_sum_buf.data.cl.release()
# y_sum_buf.data.cl.release()
# sdata_buf.data.cl.release()
# sidxs_buf.data.cl.release()
# snnzs_buf.data.cl.release()
# sdatat_buf.data.cl.release()
# sidxst_buf.data.cl.release()
# snnzst_buf.data.cl.release()
# x_idx_buf.data.cl.release()
# y_idx_buf.data.cl.release()
newdata = {
'data': sdata_buf.data,
'idxs': sidxs_buf.data,
'nnzs': snnzs_buf.data,
'ellw': topkx,
'datat': sdatat_buf.data,
'idxst': sidxst_buf.data,
'nnzst': snnzst_buf.data,
'ellwt': topky,
}
w_grad = SparseTensor(from_datas=newdata, shape=weight.shape)
# gradpy = w_grad.to_numpy()
# print('grad_max:', w_grad.shape, gradpy.sum())
# gradpy = w_grad.to_numpy(dual=True)
# print('grad_max:', w_grad.shape, gradpy.sum())
# asdf
# updatem = clbuild(ctx.cl_ctx, "updatem", """
# // sorts x and y in ascending order and returns sorted indices
# __kernel void updatem(__global float* m, // INPUT MATRIX DATA
# __global float* grad, // INPUT MATRIX DATA
# uint msize,
# uint osize,
# uint topk,
# float scale,
# __global uint* xoutidx,
# __global uint* youtidx,
# __global float* matData, // OUTPUT MATRIX DATA
# __global uint* colIdx,
# __global uint* rowNnz
# ) {
# uint gid = get_global_id(0);
# uint nnz = rowNnz[gid];
# for (uint i=0; i<nnz; i++) {
# uint col = colIdx[gid*topk+i];
# float val = matData[gid*topk+i];
# m[osize*gid+col] = 0;
# }
# for (uint i=0; i<osize; i++) {
# m[osize*gid+i] = scale * grad[osize*gid+i];
# }
# }""")
# scale = 0.9
# updatem(ctx.cl_queue, [grad_output.shape[0],], None,
# weight.m.data.cl, grad_output.data.cl, np.uint32(grad_input.shape[-1]), np.uint32(grad_output.shape[1]), np.uint32(topky), np.float32(scale), xs_idx_buf.data.cl, ys_idx_buf.data.cl,
# sdata_buf.data.cl, sidxs_buf.data.cl, snnzs_buf.data.cl)
return w_grad, grad_input
def trans_axis(ctx, inp, order=(1,0)):
osize = np.array(inp.shape)[list(order)]
ret = buffer_new(ctx, osize)
trans = clbuild(ctx.cl_ctx, "trans", """
__kernel void trans(__global float *a_g,
__global float *res_g,
uint width) {
int row = get_global_id(0);
for(uint i=0; i<width; i++) {
//printf("\\nSET:%i-%i", row, i);
res_g[row*width+i] = 0;
}
}""")
trans(ctx.cl_queue, [osize[1]], None, inp.cl, ret.cl, np.uint32(osize[0]))
print("PERM RET:", ret)
return ret
class Conv2D(SparseFunction):
def forward(ctx, x, w, stride=1, groups=1):
if isinstance(ctx.stride, int): ctx.stride = (ctx.stride, ctx.stride)
cout,cin,H,W = w.shape
ys,xs = ctx.stride
bs,cin_,iy,ix = x.shape
oy,ox = (iy-(H-ys))//ys, (ix-(W-xs))//xs
if cin*ctx.groups != cin_: raise Exception(f"Input Tensor shape {x.shape} does not match the shape of the weights {w.shape}. ({cin*ctx.groups} vs. {cin_})")
assert cout % ctx.groups == 0
rcout = cout//ctx.groups
ctx.save_for_backward(x,w)
# output buffer
ret = buffer_new(ctx, (bs, cout, oy, ox))
# input = (bs, groups, cin, iy, ix)
# weight = (groups, rcout, cin, H, W)
# output = (bs, groups, rcout, oy, ox)
conv = clbuild(ctx.cl_ctx, "conv", """
__kernel void conv(__global const float *input, __global const float *weight, __global float *output,
int H, int W, int groups, int rcout, int cin, int oy, int ox, int iy, int ix, int ys, int xs) {
int B = get_global_id(0)/(groups*rcout); // range 0-bs
int g = (get_global_id(0)/rcout)%groups;
int c = get_global_id(0) % rcout;
int Y = get_global_id(1); // range 0-oy
int X = get_global_id(2); // range 0-ox
int IY = Y*ys;
int IX = X*xs;
float acc = 0.0;
for (int ci = 0; ci < cin; ci++) {
for (int y = IY; y < IY+H; y++) {
for (int x = IX; x < IX+W; x++) {
acc += input[B*groups*cin*iy*ix + g*cin*iy*ix + ci*iy*ix + y*ix + x] * \
weight[g*rcout*cin*H*W + c*cin*H*W + ci*H*W + (y-IY)*W + (x-IX)];
}
}
}
output[B*groups*rcout*oy*ox + g*rcout*oy*ox + c*oy*ox + Y*ox + X] = acc;
}""")
conv(ctx.cl_queue, [bs*groups*rcout, oy, ox], None,
x.cl, w.cl, ret.cl,
i32(H), i32(W), i32(groups), i32(rcout), i32(cin),
i32(oy), i32(ox), i32(iy), i32(ix), i32(ys), i32(xs)
)
return ret
def backward(ctx, grad_output):
bs,_,oy,ox = grad_output.shape
x, w = ctx.saved_tensors
cout,cin,H,W = w.shape
ys,xs = ctx.stride
bs,cin_,iy,ix = x.shape
oy,ox = (iy-(H-ys))//ys, (ix-(W-xs))//xs
assert cin*ctx.groups == cin_
assert cout % ctx.groups == 0
rcout = cout//ctx.groups
dx = buffer_new(ctx, (bs, cin_, iy, ix), zero=True)
dw = buffer_new(ctx, (cout, cin, H, W))
# tensx = (bs, groups*cin, iy, ix)
# tensw = (groups*rcout, cin, H, W)
# ggg = (bs, groups*rout, oy, ox)
convw = clbuild(ctx.cl_ctx, "convw", """
__kernel void convw(__global const float *tensx, __global const float *ggg, __global float *dw,
int H, int W, int groups, int rcout, int cin, int oy, int ox, int iy, int ix, int ys, int xs, int bs) {
int g = get_global_id(0)/(rcout*cin) ; // range 0-groups
int c = (get_global_id(0)/(cin)) %rcout; // range 0-rcout
int ci = get_global_id(0) % cin; // range 0-cin
int y = get_global_id(1); // range 0-H
int x = get_global_id(2); // range 0-W
float acc = 0.0;
for (int Y = 0; Y < oy; Y++) {
for (int X = 0; X < ox; X++) {
for (int B = 0; B < bs; B++) {
acc += ggg[B*groups*rcout*oy*ox + +g*rcout*oy*ox + c*oy*ox + Y*ox + X] * \
tensx[B*groups*cin*iy*ix + g*cin*iy*ix + ci*iy*ix + (Y*ys+y)*ix + X*xs+x];
}
}
}
dw[get_global_id(0)*H*W + y*W + x] = acc;
}""")
convx = clbuild(ctx.cl_ctx, "convx", """
__kernel void convx(__global const float *tensw, __global const float *ggg, __global float *dx,
int H, int W, int groups, int rcout, int cin, int oy, int ox, int iy, int ix, int ys, int xs, int bs) {
int B = get_global_id(0);
int g = get_global_id(1);
int ci = get_global_id(2);
for (int Y = 0; Y < oy; Y++) {
for (int X = 0; X < ox; X++) {
for (int y = 0; y < H; y++) {
for (int x = 0; x < W; x++) {
float acc = 0.0;
for (int c = 0; c < rcout; c++) {
acc += ggg[B*groups*rcout*oy*ox + g*rcout*oy*ox + c*oy*ox + Y*ox + X] * \
tensw[g*rcout*cin*H*W + c*cin*H*W + ci*H*W + y*W + x];
}
dx[B*groups*cin*iy*ix + g*cin*iy*ix + ci*iy*ix + (Y*ys+y)*ix + X*xs+x] += acc;
}
}
}
}
}
""")
conv_args = i32(H), i32(W), i32(ctx.groups), i32(rcout), i32(cin), i32(oy), i32(ox), i32(iy), i32(ix), i32(ys), i32(xs), i32(bs)
convw(ctx.cl_queue, [ctx.groups*rcout*cin, H, W], None, x.cl, grad_output.cl, dw.cl, *conv_args)
convx(ctx.cl_queue, [bs, ctx.groups, cin], None, w.cl, grad_output.cl, dx.cl, *conv_args)
return dx, dw
| fpaboim/tinysparse | tinygrad/ops_gpusparse.py | ops_gpusparse.py | py | 32,916 | python | en | code | 9 | github-code | 6 | [
{
"api_name": "numpy.float32",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "densetensor.GPUBuffer",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pyopencl.Buffer"... |
18239861293 | #!/usr/bin/env python
import urllib2
from bs4 import BeautifulSoup
def main():
boys_limit = 265
boys_url = 'http://www.muslimnames.info/baby-boys/islamic-boys-names-'
girls_limit = 243
girls_url = 'http://www.muslimnames.info/baby-girls/islamic-girls-names-'
output_file = open('names.txt', 'a')
selector = 'boys'
c = 1
c_url = boys_url
for i in range(1, girls_limit + boys_limit):
if c > boys_limit:
c = 1
c_url = girls_url
selector = 'girls'
response = urllib2.urlopen(c_url + str(c) + '/')
html = response.read()
soup = BeautifulSoup(html, 'html.parser')
for link in [link.string for link in soup.select('div.nrow_name.' + selector + ' a')]:
output_file.write('%s\n' % link)
c = c + 1
if __name__ == '__main__':
main() | amazoedu0/Artificial-Intelligence | muslim-names-crawler-master/muslim-names-crawler-master/muslim_names_crawler.py | muslim_names_crawler.py | py | 763 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "urllib2.urlopen",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 23,
"usage_type": "call"
}
] |
21099019143 | # coding: utf-8
import blogSystem.models as blog_models
from django.shortcuts import render_to_response, RequestContext
import json
from django.db.models import Q
import time
from itertools import chain
import jieba
from django.core.paginator import Paginator
from django.core.paginator import EmptyPage
from django.core.paginator import PageNotAnInteger
from django.conf import settings
import logging
logger = logging.getLogger(__name__)
def search(req, tmp_name='postList.html'):
page = req.GET.get('page', 1)
limit = settings.PAGE_SIZE
start = time.time()
query = req.GET.get('q', '')
qs = jieba.cut(query)
qs = [q for q in list(qs) if q.strip()]
# 将搜索条件和最近一次session记录比对,若一样则不显示,否则显示耗时记录
if req.session.get('query') == query:
show = 'no'
else:
show = 'yes'
req.session['query'] = query
breads = [
{'location': u'首页', 'href': '/'},
{'location': u'搜索:%s'%query}
]
s_list = []
for q in qs:
post = blog_models.Post.objects.filter(is_valid=1).filter(Q(title__icontains=q) | Q(summary__icontains=q) | Q(content__icontains=q))
s_list.append(post)
posts = chain.from_iterable(s_list)
posts = list(set(posts))
paginator = Paginator(posts, limit) # 实例化一个分页对象
try:
post = paginator.page(page) # 获取某页对应的记录
except PageNotAnInteger: # 如果页码不是个整数
post = paginator.page(1) # 取第一页的记录
except EmptyPage: # 如果页码太大,没有相应的记录
post = paginator.page(paginator.num_pages) # 取最后一页的记录
end = time.time()
dic = {
'breads': breads,
'posts': post,
'q': query,
'time': str(round((end - start), 3)) + 's',
'count': len(posts),
'show': show
}
return render_to_response(tmp_name, dic, context_instance=RequestContext(req))
| zzlpeter/blog | blogSystem/search/views.py | views.py | py | 2,006 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.PAGE_SIZE",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 24,
"usage_type": "name"
},
{
"api_na... |
20831073732 | import random
from string import ascii_letters
from socialnet.models import Account, Post, PostImages, Avatar, Image, Following
from posts.models import Comments, Tag, PostTags, Upvote, Downvote
from django.core.management.base import BaseCommand, CommandError
def random_string(length):
return "".join(random.choices(population=ascii_letters, k=length))
def add_to_db_random_value():
firstname = ["Andrew", "Den", "John", "Ann", "Mary", "Molly"]
lastname = ["Edison", "Brown", "Black", "White", "Snow", "Lincoln"]
text = """
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore
magna aliqua. Lorem ipsum dolor sit amet. Vel eros donec ac odio tempor orci. Consectetur adipiscing elit ut
aliquam purus sit. Vulputate eu scelerisque felis imperdiet proin fermentum leo vel orci. Luctus accumsan tortor
posuere ac ut consequat semper. Turpis egestas maecenas pharetra convallis posuere morbi leo urna.
Cursus metus aliquam eleifend mi in nulla posuere sollicitudin. Tincidunt augue interdum velit euismod in
pellentesque massa placerat duis. Auctor elit sed vulputate mi sit. Non enim praesent elementum facilisis.
Tortor at risus viverra adipiscing at. Diam maecenas sed enim ut. Velit dignissim sodales ut eu sem integer
vitae. Malesuada fames ac turpis egestas. Etiam dignissim diam quis enim lobortis scelerisque. Tortor id aliquet
lectus proin nibh nisl condimentum id. Cursus metus aliquam eleifend mi in nulla posuere. Sit amet mauris
commodo quis imperdiet massa tincidunt. Diam vel quam elementum pulvinar etiam non quam. Diam vel quam elementum
pulvinar etiam non quam lacus. Eget felis eget nunc lobortis. Tellus rutrum tellus pellentesque eu tincidunt
tortor. Et netus et malesuada fames ac turpis.
"""
users = [Account(
first_name=random.choice(firstname),
last_name=random.choice(lastname),
email=random_string(5) + "@" + random_string(4) + ".com",
password=random_string(15),
bio=text
) for _ in range(1, 13)]
Account.objects.bulk_create(users)
tags = [Tag(name=random.choice(text.split())) for _ in range(3)]
Tag.objects.bulk_create(tags)
posts = [Post(title="".join(random.choice(text.split())),
description=" ".join(random.choices(population=text.split(), k=random.randint(20, 50))),
author=random.choice(users)
) for _ in range(12)]
Post.objects.bulk_create(posts)
post_tags = [PostTags(post=random.choice(posts),
tag=random.choice(tags)) for _ in range(15)]
PostTags.objects.bulk_create(post_tags)
comments = [Comments(body=" ".join(random.choices(population=text.split(), k=random.randint(3, 15))),
author=random.choice(users),
post=random.choice(posts)
) for _ in range(18)]
Comments.objects.bulk_create(comments)
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument("fake_data", nargs='+', type=str)
def handle(self, *args, **options):
PostImages.objects.all().delete()
Avatar.objects.all().delete()
Image.objects.all().delete()
PostTags.objects.all().delete()
Tag.objects.all().delete()
Post.objects.all().delete()
Upvote.objects.all().delete()
Downvote.objects.all().delete()
Comments.objects.all().delete()
Following.objects.all().delete()
Account.objects.all().delete()
if options["fake_data"]:
add_to_db_random_value()
self.stdout.write(self.style.SUCCESS("Successfully create"))
else:
CommandError("Error")
| YevheniiMorozov/social | gramm/socialnet/management/commands/fake_data.py | fake_data.py | py | 3,984 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "random.choices",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "string.ascii_letters",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "socialnet.models.Account",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "random.cho... |
38930994169 | # type: ignore
from inspect import getmembers, isfunction
import re, typing
import traceback
from typing import Callable
from PySide2.QtWidgets import QWidget, QSplitter, QVBoxLayout, QSizePolicy, QMenu, QPushButton, QAction, QScrollArea
from PySide2.QtGui import QIcon
from PySide2.QtCore import Signal, QSize
import damaker
from damaker.pipeline import Operation
import damaker.processing
import damaker.stream
import damaker_gui
import damaker_gui.widgets as widgets
import rpy2.robjects as robjects
# TODO: test scroll for long parametters and connect it to the view
class FunctionListWidget(QSplitter, widgets.ITabWidget):
name: str= "Operations"
icon: str = u":/flat-icons/icons/flat-icons/services.svg"
operationTriggered = Signal(object)
apply = Signal(Operation)
@property
def toolbar(self) -> list[widgets.ActionButton]:
return [widgets.ActionButton(self.reload, "Refresh Plugins", u":/flat-icons/icons/flat-icons/refresh.svg"),]
def __init__(self, parent=None):
super().__init__(parent)
self.menus = []
# -Function list widget-
self.functionList = QWidget()
self.functionListLayout = QVBoxLayout()
self.functionListLayout.setMargin(0)
self.functionListLayout.setSpacing(0)
self.setMinimumWidth(150)
self.functionList.setLayout(self.functionListLayout)
self.addWidget(self.functionList)
self.functionEdit = QWidget()
self.functionEditLayout = QVBoxLayout()
self.functionEdit.setLayout(self.functionEditLayout)
self.addWidget(self.functionEdit)
self.setHandleWidth(4)
self.categories: dict[str, list[function]] = {}
self.functions: dict[str, function] = {}
self.loadFunctions()
self.operationTriggered.connect(self.editFunction)
self.pipeline: widgets.PipelineWidget = None
def editFunction(self, func: Callable):
widgets.clearLayout(self.functionEditLayout, delete=True)
self.functionEditLayout.addWidget(widgets.FunctionForm(Operation(func), self.onApply, self.addToPipeline))
def onApply(self):
op = self.getOperation()
print(f"🟢 Running operation: {op.name}")
try:
op.run()
except Exception as e:
print(f"🛑 Operation runtime error")
print(traceback.format_exc())
# self.apply.emit(self.getOperation())
for preview in damaker_gui.MainWindow.Instance.getTabsByType(widgets.PreviewFrame):
preview.view.updateFrame()
print("✅ Operation finished.")
def addToPipeline(self):
op = self.getOperation()
if self.pipeline != None:
self.pipeline.addOperation(op.copy())
print("Added operation to pipeline ✔")
def reload(self):
widgets.clearLayout(self.functionListLayout)
self.menus.clear()
self.loadFunctions()
print("Reloaded operations ✔")
def convert_func_rpy2py(self, name, funcR):
funcPy = FunctionListWidget._emptyFunc
def loadFunctions(self):
damaker.plugins = damaker.importPlugins()
self.functions = dict(getmembers(damaker.processing, isfunction))
self.functions.update(dict(getmembers(damaker.stream, isfunction)))
self.functions.update(dict(getmembers(damaker.plugins, isfunction)))
# print(dict(getmembers(damaker.plugins, lambda obj: isinstance(obj, robjects.functions.Function))))
self.categories = {"Plugins": []}
for func in self.functions.values():
if func.__name__[0] == '_':
continue
name = re.findall('Name:\s*(.*)\n', str(func.__doc__))
if len(name) > 0:
func.alias = name[0]
else:
func.alias = func.__name__
category = re.findall('Category:\s*(.*)\n', str(func.__doc__))
if len(category) > 0:
if not category[0] in self.categories.keys():
self.categories[category[0]] = []
self.categories[category[0]].append(func)
else:
self.categories["Plugins"].append(func)
for cat, funcs in self.categories.items():
if len(funcs) == 0:
continue
menu = QMenu(cat)
menu.setToolTipsVisible(True)
# menu.setStyleSheet(_menuStyleSheet)
for func in funcs:
action: QAction = menu.addAction(func.alias)
action.setToolTip(func.__doc__)
menu.triggered.connect(lambda action: self.operationTriggered.emit(self.getFunction(action.text())))
btn = QPushButton(cat)
btn.setMinimumHeight(15)
btn.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.MinimumExpanding)
# btn.setStyleSheet("color: white;")
btn.setMenu(menu)
btn.clicked.connect(btn.showMenu)
self.functionListLayout.addWidget(btn)
# retain widgets in memory
self.menus.append([menu, btn])
# self.functionListLayout.addStretch()
def _emptyFunc():
pass
def getFunction(self, alias) -> Callable:
for functions in self.categories.values():
for func in functions:
if func.alias == alias:
return func
return FunctionListWidget._emptyFunc
def getOperation(self) -> Operation:
form: widgets.FunctionForm = self.functionEditLayout.itemAt(0).widget()
widget: widgets.OperationWidget = form.operationWidget
if issubclass(type(widget), widgets.OperationWidget):
return widget.getOperation()
print("No operation")
return Operation(FunctionListWidget._emptyFunc)
def connectPipeline(self, widget):
if issubclass(type(widget), widgets.PipelineWidget) or issubclass(type(widget), widgets.PipelineViewer):
self.pipeline = widget
def disconnectPipeline(self, widget):
if self.pipeline == widget:
self.pipeline = None | subski/DAMAKER | damaker_gui/widgets/FunctionListWidget.py | FunctionListWidget.py | py | 6,131 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "PySide2.QtWidgets.QSplitter",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "damaker_gui.widgets.ITabWidget",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "damaker_gui.widgets",
"line_number": 23,
"usage_type": "name"
},
{
... |
1693464120 | import discord
from discord.ext import commands
from discord import app_commands
class Ping(commands.Cog):
def __init__(self, client):
self.client = client
@app_commands.command()
async def ping(self, interaction: discord.Interaction):
"""Shows the latency of the bot (doesn't really matter tbh)"""
await interaction.response.send_message(f'Ping: {round(self.client.latency * 1000)}ms')
async def setup(client):
await client.add_cog(Ping(client))
| megachickn101/nphc-discord-bot | cogs/ping.py | ping.py | py | 490 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "discord.ext.commands.Cog",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "discord.Interaction",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_nam... |
8659872785 | import argparse
import os
import sys
import time
from watchdog.observers import Observer
from watchdog.events import LoggingEventHandler
from watchdog.events import FileSystemEventHandler
#import multiprocessing as mp
from collections import OrderedDict
import re
import copy
import json
import subprocess
#import sched
import webbrowser
from shutil import copyfile, which
import dateutil
from datetime import datetime
from operator import itemgetter
from .version import __version__
from .statsparser import get_argument_parser as sp_get_argument_parser
from .statsparser import parse_args as sp_parse_args
from .helper import initLogger, resources_dir, get_script_dir, hostname, ArgHelpFormatter, r_file, r_dir, rw_dir, defaults, jinja_env
import threading
import logging
import queue
from pathlib import Path
from jinja2 import Environment, PackageLoader, select_autoescape
ALL_RUNS = {}
ALL_RUNS_LOCK = threading.RLock()
SP_DIRS = {}
SP_DIRS_LOCK = threading.RLock()
MUX_RESULTS = {}
MUX_RESULTS_LOCK = threading.RLock()
UPDATE_OVERVIEW = False
UPDATE_OVERVIEW_LOCK = threading.RLock()
logger = None
class parse_statsparser_args(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
to_test = values.split(' ')
argument_parser = sp_get_argument_parser()
args = sp_parse_args(argument_parser, to_test)
setattr(namespace,self.dest,to_test)
def parse_args():
parser = argparse.ArgumentParser(description='''A tool for monitoring and protocoling sequencing runs
performed on the Oxford Nanopore Technologies GridION
sequencer and for automated post processing and transmission
of generated data. It collects information on QC and
sequencing experiments and displays summaries of mounted
flow cells as well as comprehensive reports about currently
running and previously performed experiments.''',
formatter_class=ArgHelpFormatter,
add_help=False)
general_group = parser.add_argument_group('General arguments',
"arguments for advanced control of the program's behavior")
general_group.add_argument('-n', '--no_transfer',
action='store_true',
help='''no data transfer to the remote host''')
general_group.add_argument('-a', '--all_fast5',
action='store_true',
help='''also put fast5 files of reads removed by length and quality
filtering into barcode bins''')
general_group.add_argument('-p', '--pass_only',
action='store_true',
help='''use data from fastq_pass only''')
general_group.add_argument('-l', '--min_length',
type=int,
default=1000,
help='''minimal length to pass filter''')
general_group.add_argument('-r', '--min_length_rna',
type=int,
default=50,
help='''minimal length to pass filter for rna libraries''')
general_group.add_argument('-q', '--min_quality',
type=int,
default=5,
help='''minimal quality to pass filter''')
general_group.add_argument('-d', '--rsync_dest',
default="{}@{}:{}".format(defaults()["user"], defaults()["host"], defaults()["dest"]),
help='''destination for data transfer with rsync, format USER@HOST[:DEST].
Key authentication for the specified destination must be set up (see option -i),
otherwise data transfer will fail. Default value is parsed from setting
file {}'''.format(os.path.join(resources_dir, "defaults.ini")))
general_group.add_argument('-i', '--identity_file',
default="{}".format(defaults()["identity"]),
help='''file from which the identity (private key) for public key authentication is read.
Default value is parsed from setting file {}'''.format(os.path.join(resources_dir, "defaults.ini")))
general_group.add_argument('--bc_kws',
nargs='*',
default=['RBK', 'NBD', 'RAB', 'LWB', 'PBK', 'RPB', 'arcod'],
help='''if at least one of these key words is a substring of the run name,
porechop is used to demultiplex the fastq data''')
general_group.add_argument('-u', '--update_interval',
type=int,
default=300,
help='minimum time interval in seconds for updating the content of a report page')
general_group.add_argument('-m', '--ignore_file_modifications',
action='store_true',
help='''Ignore file modifications and only consider file creations regarding
determination of the latest log files''')
io_group = parser.add_argument_group('I/O arguments',
'Further input/output arguments. Only for special use cases')
io_group.add_argument('-o', '--output_dir',
action=rw_dir,
default="/data/dominION/",
help='Path to the base directory where experiment reports shall be saved')
arg_data_basedir = \
io_group.add_argument('--data_basedir',
action=rw_dir,
default='/data',
help='Path to the directory where basecalled data is saved')
io_group.add_argument('--minknow_log_basedir',
action=r_dir,
default='/var/log/MinKNOW',
help='''Path to the base directory of GridIONs log files''')
io_group.add_argument('--logfile',
help='''File in which logs will be safed
(default: OUTPUTDIR/logs/YYYY-MM-DD_hh:mm_HOSTNAME_LOGLVL.log''')
sp_arguments = parser.add_argument_group('Statsparser arguments',
'Arguments passed to statsparser for formatting html reports')
sp_arguments.add_argument('--statsparser_args',
action=parse_statsparser_args,
default=[],
help='''Arguments that are passed to the statsparser script.
See a full list of available arguments with --statsparser_args " -h" ''')
help_group = parser.add_argument_group('Help')
help_group.add_argument('-h', '--help',
action='help',
default=argparse.SUPPRESS,
help='Show this help message and exit')
help_group.add_argument('--version',
action='version',
version=__version__,
help="Show program's version string and exit")
help_group.add_argument('-v', '--verbose',
action='store_true',
help='Additional debug messages are printed to stdout')
help_group.add_argument('--quiet',
action='store_true',
help='Only errors and warnings are printed to stdout')
args = parser.parse_args()
ns = argparse.Namespace()
arg_data_basedir(parser, ns, args.data_basedir, '')
if not os.path.exists(args.identity_file):
print("Identity file {} does not exists. Please check key authentication settings or specify a different key with option -i.".format(args.identity_file))
exit()
args.watchnchop_args = []
if args.no_transfer:
args.watchnchop_args.append('-n')
if args.all_fast5:
args.watchnchop_args.append('-a')
if args.pass_only:
args.watchnchop_args.append('-p')
#args.watchnchop_args.extend(['-l', str(args.min_length)])
#args.watchnchop_args.extend(['-r', str(args.min_length_rna)])
args.watchnchop_args.extend(['-q', str(args.min_quality)])
args.watchnchop_args.extend(['-d', args.rsync_dest])
args.watchnchop_args.extend(['-i', args.identity_file])
return args
def main(args):
global ALL_RUNS
global ALL_RUNS_LOCK
global UPDATE_OVERVIEW
global logger
for p in [args.output_dir,
os.path.join(args.output_dir, 'runs'),
os.path.join(args.output_dir, 'qc'),
os.path.join(args.output_dir, 'logs')]:
if not os.path.exists(p):
os.makedirs(p)
if args.verbose:
loglvl = logging.DEBUG
elif args.quiet:
loglvl = logging.WARNING
else:
loglvl = logging.INFO
if not args.logfile:
logs_filename = "{}_{}_{}.log".format(datetime.now().strftime("%Y-%m-%d_%H:%M"), hostname, loglvl)
args.logfile = os.path.join(args.output_dir, 'logs', logs_filename)
initLogger(logfile=args.logfile, level=loglvl)
logger = logging.getLogger(name='gw')
logger.info("##### starting dominION {} #####\n".format(__version__))
logger.info("setting up dominION status page environment")
if not os.path.exists(os.path.join(args.output_dir, 'res')):
os.makedirs(os.path.join(args.output_dir, 'res'))
for res_file in ['style.css', 'flowcell.png', 'no_flowcell.png']:
copyfile(os.path.join(resources_dir, res_file),
os.path.join(args.output_dir, 'res', res_file))
import_qcs(os.path.join(args.output_dir, "qc"))
import_runs(os.path.join(args.output_dir, "runs"))
logger.info("starting to observe runs directory for changes to directory names")
observed_dir = os.path.join(args.output_dir, 'runs')
event_handler = RunsDirsEventHandler(observed_dir)
observer = Observer()
observer.schedule(event_handler,
observed_dir,
recursive=True)
observer.start()
logger.info("starting channel watchers:")
watchers = []
for channel in range(5):
watchers.append(Watcher(args.minknow_log_basedir,
channel,
args.ignore_file_modifications,
args.output_dir,
args.data_basedir,
args.statsparser_args,
args.update_interval,
args.watchnchop_args,
args.min_length,
args.min_length_rna,
args.bc_kws))
logger.info("initiating dominION overview page")
update_overview(watchers, args.output_dir)
webbrowser.open('file://' + os.path.realpath(os.path.join(args.output_dir, "{}_overview.html".format(hostname))))
logger.info("entering main loop")
try:
n = 0
while True:
for watcher in watchers:
watcher.check_q()
if UPDATE_OVERVIEW:
update_overview(watchers, args.output_dir)
UPDATE_OVERVIEW = False
time.sleep(0.2)
n += 1
if n == 100:
n = 0
set_update_overview()
except KeyboardInterrupt:
for watcher in watchers:
watcher.observer.stop()
if watcher.spScheduler.is_alive() if watcher.spScheduler else None:
watcher.stop_statsparser(0.05)
for wcScheduler in watcher.wcScheduler:
if wcScheduler.is_alive() if wcScheduler else None:
wcScheduler.join(timeout=0.05)
for watcher in watchers:
logger.info("joining GA{}0000's observer".format(watcher.channel))
watcher.observer.join()
for wcScheduler in watcher.wcScheduler:
if wcScheduler.is_alive() if wcScheduler else None:
logger.info("joining GA{}0000's watchnchop scheduler".format(watcher.channel))
wcScheduler.join()
for watcher in watchers:
if watcher.spScheduler.is_alive() if watcher.spScheduler else None:
logger.info("joining GA{}0000's statsparser scheduler".format(watcher.channel))
watcher.stop_statsparser()
def set_update_overview():
global UPDATE_OVERVIEW
UPDATE_OVERVIEW_LOCK.acquire()
UPDATE_OVERVIEW = True
UPDATE_OVERVIEW_LOCK.release()
def add_database_entry(flowcell, run_data, mux_scans):
ALL_RUNS_LOCK.acquire()
#TODO: check for all mandatory entries
asic_id_eeprom = flowcell['asic_id_eeprom']
run_id = run_data['run_id']
if asic_id_eeprom in ALL_RUNS:
if run_id in ALL_RUNS[asic_id_eeprom]:
logger.warning("{} exists multiple times in database!".format(run_id))
logger.warning("conflicting runs: {}, {}".format(ALL_RUNS[asic_id_eeprom][run_id]['run_data']['relative_path'],
run_data['relative_path']))
ALL_RUNS_LOCK.release()
return False
else:
ALL_RUNS[asic_id_eeprom] = {}
ALL_RUNS[asic_id_eeprom][run_id] = {'flowcell' : flowcell,
'run_data' : run_data,
'mux_scans' : mux_scans}
logger.debug('{} - added experiment of type "{}" performed on flowcell "{}" on "{}"'.format(asic_id_eeprom,
run_data['experiment_type'],
flowcell['flowcell_id'],
run_data['protocol_start']))
ALL_RUNS_LOCK.release()
return True
def add_mux_scan_results(flowcell_data, mux_scans):
MUX_RESULTS_LOCK.acquire()
asic_id_eeprom = flowcell_data['asic_id_eeprom']
flowcell_id = flowcell_data['flowcell_id']
if asic_id_eeprom not in MUX_RESULTS:
MUX_RESULTS[asic_id_eeprom] = []
for mux_scan in mux_scans:
mux_scan_copy = copy.deepcopy(mux_scan)
if not 'total' in mux_scan:
if 'group * total' in mux_scan:
mux_scan_copy['total'] = mux_scan['group * total']
del mux_scan_copy['group * total']
else:
continue
mux_scan_copy['flowcell_id'] = flowcell_id
mux_scan_copy['timestamp'] = dateutil.parser.parse(mux_scan['timestamp'])
for i in range(len(MUX_RESULTS[asic_id_eeprom])):
if mux_scan_copy['timestamp'] < MUX_RESULTS[asic_id_eeprom][i]['timestamp']:
MUX_RESULTS[asic_id_eeprom].insert(i, mux_scan_copy)
break
else:
MUX_RESULTS[asic_id_eeprom].append(mux_scan_copy)
MUX_RESULTS_LOCK.release()
def import_qcs(qc_dir):
logger.info("importing platform qc entries from files in directory {}".format(qc_dir))
for fp in [os.path.join(qc_dir, fn) for fn in os.listdir(qc_dir) if fn.endswith('.json')]:
if os.path.isfile(fp):
with open(fp, "r") as f:
try:
flowcell, run_data, mux_scans = json.loads(f.read(), object_pairs_hook=OrderedDict)
except:
logger.warning("failed to parse {}, json format or data structure corrupt".format(fn))
continue
asic_id_eeprom = flowcell['asic_id_eeprom']
add_mux_scan_results(flowcell, mux_scans)
def import_runs(base_dir, refactor=False):
logger.info("importing sequencing run entries from files in directory {}".format(base_dir))
for experiment in [d for d in os.listdir(base_dir) if os.path.isdir(os.path.join(base_dir, d))]:
experiment_dir = os.path.join(base_dir, experiment)
for sample in [d for d in os.listdir(experiment_dir) if os.path.isdir(os.path.join(experiment_dir, d))]:
sample_dir = os.path.join(experiment_dir, sample)
for fp in [os.path.join(sample_dir, fn) for fn in os.listdir(sample_dir) if fn.endswith('.json')]:
if os.path.isfile(fp):
with open(fp, "r") as f:
try:
flowcell, run_data, mux_scans = json.loads(f.read(), object_pairs_hook=OrderedDict)
except:
logger.warning("failed to parse {}, json format or data structure corrupt".format(fn))
continue
# temporarily change attributes experiment and sample according to directory names
prev = (run_data['experiment'] if 'experiment' in run_data else None,
run_data['sample'] if 'sample' in run_data else None)
changed = prev == (experiment, sample)
run_data['experiment'] = experiment
run_data['sample'] = sample
if refactor and changed:
# make changes permanent
logging.info("writing changes to attributes 'experiment' and 'sample' to file")
data = (flowcell, run_data, mux_scans)
with open( fp, 'w') as f:
print(json.dumps(data, indent=4), file=f)
if not add_database_entry(flowcell, run_data, mux_scans):
logger.error("failed to add content from {} to the database".format(fp))
continue
# add mux scans
add_mux_scan_results(flowcell, mux_scans)
def get_runs_by_flowcell(asic_id_eeprom):
ALL_RUNS_LOCK.acquire()
runs = {}
if asic_id_eeprom:
if asic_id_eeprom in ALL_RUNS:
for run_id in ALL_RUNS[asic_id_eeprom]:
if 'qc' not in ALL_RUNS[asic_id_eeprom][run_id]['run_data']['experiment_type'].lower():
runs[run_id] = ALL_RUNS[asic_id_eeprom][run_id]
ALL_RUNS_LOCK.release()
return runs
def get_latest_mux_scan_result(asic_id_eeprom):
latest_result = None
MUX_RESULTS_LOCK.acquire()
if asic_id_eeprom in MUX_RESULTS:
latest_result = MUX_RESULTS[asic_id_eeprom][0]
MUX_RESULTS_LOCK.release()
return latest_result
def get_latest(runs):
latest_qc = None
for run_id in runs:
if latest_qc:
_protocol_start = dateutil.parser.parse(runs[latest_qc]['run_data']['protocol_start'])
if protocol_start > _protocol_start:
latest_qc = run_id
else:
latest_qc = run_id
protocol_start = dateutil.parser.parse(runs[run_id]['run_data']['protocol_start'])
return latest_qc
def update_overview(watchers, output_dir):
channel_to_css = {0:"one", 1:"two", 2:"three", 3:"four", 4:"five"}
render_dict = {"version" : __version__,
"dateTimeNow" : datetime.now().strftime("%Y-%m-%d_%H:%M"),
"channels" : [],
"all_exp" : []
}
for watcher in watchers:
channel = watcher.channel
render_dict["channels"].append({})
asic_id_eeprom = None
try:
asic_id_eeprom = watcher.channel_status.flowcell['asic_id_eeprom']
except:
pass
runs = get_runs_by_flowcell(asic_id_eeprom)
#qcs = get_qcs_by_flowcell(asic_id_eeprom)
render_dict["channels"][channel]['latest_qc'] = {}
latest_qc = get_latest_mux_scan_result(asic_id_eeprom)
if latest_qc:
render_dict["channels"][channel]['latest_qc']['timestamp'] = latest_qc['timestamp'].date()
render_dict["channels"][channel]['latest_qc']['total'] = latest_qc['total']
if 'in_use' in latest_qc and watcher.channel_status.sequencing:
render_dict["channels"][channel]['latest_qc']['in_use'] = latest_qc['in_use']
else:
render_dict["channels"][channel]['latest_qc']['in_use'] = 0
render_dict["channels"][channel]['runs'] = []
for run_id in runs:
experiment = runs[run_id]['run_data']['experiment']
if not experiment:
if 'user_filename_input' in runs[run_id]['run_data']:
experiment = runs[run_id]['run_data']['user_filename_input']
if not experiment:
logger.WARNING('not adding run with id {} to overview because no experiment name is set'.format(run_id))
sample = runs[run_id]['run_data']['sample']
if not sample:
sample = experiment
link = os.path.abspath(os.path.join(output_dir,'runs',experiment,sample,'report.html'))
render_dict["channels"][channel]['runs'].append({'experiment':experiment,
'link':link})
render_dict["channels"][channel]['channel'] = channel_to_css[watcher.channel]
render_dict["channels"][channel]['asic_id_eeprom'] = asic_id_eeprom
if asic_id_eeprom:
if not latest_qc:
render_dict["channels"][channel]['flowcell_id'] = "NO RECORDS"
else:
render_dict["channels"][channel]['flowcell_id'] = latest_qc['flowcell_id']
else:
render_dict["channels"][channel]['flowcell_id'] = '-'
ALL_RUNS_LOCK.acquire()
all_runs_info = []
for asic_id_eeprom in ALL_RUNS:
for run_id in ALL_RUNS[asic_id_eeprom]:
experiment_type = ALL_RUNS[asic_id_eeprom][run_id]['run_data']['experiment_type']
if not 'qc' in experiment_type.lower():
protocol_start = dateutil.parser.parse(ALL_RUNS[asic_id_eeprom][run_id]['run_data']['protocol_start'])
duration = "N/A"
if 'protocol_end' in ALL_RUNS[asic_id_eeprom][run_id]['run_data']:
if ALL_RUNS[asic_id_eeprom][run_id]['run_data']['protocol_end']:
protocol_end = dateutil.parser.parse(ALL_RUNS[asic_id_eeprom][run_id]['run_data']['protocol_end'])
duration = "{}".format(protocol_end - protocol_start).split('.')[0]
sequencing_kit = ALL_RUNS[asic_id_eeprom][run_id]['run_data']['sequencing_kit']
experiment = ALL_RUNS[asic_id_eeprom][run_id]['run_data']['experiment']
sample = ALL_RUNS[asic_id_eeprom][run_id]['run_data']['sample']
if not sample:
sample = experiment
link = os.path.abspath(os.path.join(output_dir,'runs',experiment,sample,'report.html'))
all_runs_info.append({'link':link,
'experiment':experiment,
'sample': sample,
'sequencing_kit': sequencing_kit,
'protocol_start': protocol_start,
'duration': duration})
ALL_RUNS_LOCK.release()
if all_runs_info:
all_runs_info = sorted(all_runs_info, key=lambda k: k['protocol_start'], reverse=True)
run = 0
sample = 0
grouped = [[[all_runs_info[0]]]] if all_runs_info else [[[]]]
for run_info in all_runs_info[1:]:
if grouped[run][sample][0]['experiment'] == run_info['experiment']:
if grouped[run][sample][0]['sample'] == run_info['sample']:
grouped[run][sample].append(run_info)
else:
grouped[run].append( [run_info] )
sample += 1
else:
grouped.append( [[run_info]] )
run += 1
sample = 0
for exp in grouped:
render_dict['all_exp'].append(
{'num_samples':str(sum([len(sample) for sample in exp])),
'experiment':exp[0][0]['experiment'],
'samples':[]})
for sample in exp:
render_dict['all_exp'][-1]['samples'].append(
{'num_runs':str(len(sample)),
'link':sample[0]['link'],
'sample':sample[0]['sample'],
'runs':[]})
for run in sample:
render_dict['all_exp'][-1]['samples'][-1]['runs'].append(run)
template = jinja_env.get_template('overview.template')
with open(os.path.join(output_dir, "{}_overview.html".format(hostname)), 'w') as f:
print(template.render(render_dict), file=f)
class ChannelStatus():
empty_run_data = OrderedDict([
('run_id', None),
('minion_id', None),
('sequencing_kit', None),
('protocol_start', None),
('protocol_end', None),
('relative_path', None),
('sample', None),
('experiment', None)
])
empty_flowcell = OrderedDict([
('flowcell_id', None),
('asic_id', None),
('asic_id_eeprom', None),
('flowcell', None)
])
empty_mux = OrderedDict()
def __init__(self, minion_id, channel):
self.minion_id = minion_id
self.flowcell = copy.deepcopy(self.empty_flowcell)
self.run_data = copy.deepcopy(self.empty_run_data)
self.mux_scans = []
self.run_data['minion_id'] = minion_id
self.logger = logging.getLogger(name='gw.w{}.cs'.format(channel+1))
self.sequencing = False
def update(self, content, overwrite=False):
for key in content:
if key in self.flowcell:
if self.flowcell[key]:
if overwrite:
self.logger.info("changing the current value of {} ({}) to {}".format(key, self.flowcell[key], content[key]))
self.flowcell[key] = content[key]
else:
self.logger.debug("not changing the current value of {} ({}) to {}".format(key, self.flowcell[key], content[key]))
continue
else:
self.flowcell[key] = content[key]
self.logger.info("new flowcell value for {} : {}".format(key, content[key]))
continue
elif key in self.run_data:
if self.run_data[key]:
if overwrite:
self.logger.info("changing the current value of {} ({}) to {}".format(key, self.run_data[key], content[key]))
self.run_data[key] = content[key]
else:
self.logger.debug("not changing the current value of {} ({}) to {}".format(key, self.run_data[key], content[key]))
continue
self.run_data[key] = content[key]
self.logger.info("new run value for {} : {}".format(key, content[key]))
def add_mux_scan(self, timestamp, active_pores, in_use=None):
self.mux_scans.append(copy.deepcopy(self.empty_mux))
self.mux_scans[-1]['timestamp'] = timestamp
self.mux_scans[-1]['total'] = active_pores
if in_use:
self.mux_scans[-1]['in_use'] = in_use
add_mux_scan_results(self.flowcell, [self.mux_scans[-1]])
self.logger.debug("added new mux scan result")
def flowcell_disconnected(self):
self.logger.info("resetting flowcell and run data")
self.flowcell = copy.deepcopy(self.empty_flowcell)
self.run_data = copy.deepcopy(self.empty_run_data)
self.run_data['minion_id'] = self.minion_id
self.mux_scans = []
self.sequencing = False
def reset_channel(self):
self.logger.info("resetting run data")
self.run_data = copy.deepcopy(self.empty_run_data)
self.run_data['minion_id'] = self.minion_id
self.mux_scans = []
self.sequencing = False
class WatchnchopScheduler(threading.Thread):
def __init__(self, data_basedir, relative_path, experiment, sequencing_kit, fastq_reads_per_file,
bc_kws, stats_fp, channel, watchnchop_args, min_length, min_length_rna):
threading.Thread.__init__(self)
if getattr(self, 'daemon', None) is None:
self.daemon = True
else:
self.setDaemon(True)
self.stoprequest = threading.Event() # set when joined without timeout (eg if terminated with ctr-c)
self.exp_end = threading.Event() # set when joined with timeout (eg if experiment ended)
self.logger = logging.getLogger(name='gw.w{}.wcs'.format(channel+1))
self.observed_dir = os.path.join(data_basedir, relative_path, 'fastq_pass')
# define the command that is to be executed
self.cmd = [which('perl'),
which('watchnchop'),
'-o', stats_fp,
'-f', str(fastq_reads_per_file)]
if watchnchop_args:
self.cmd.extend(watchnchop_args)
for kw in bc_kws:
if kw.lower() in experiment.lower() or kw.lower() in sequencing_kit.lower():
self.cmd.append('-b')
break
self.cmd.append('-l')
if 'rna' in experiment.lower() or 'rna' in sequencing_kit.lower():
self.cmd.append(str(min_length_rna))
else:
self.cmd.append(str(min_length))
self.cmd.append(os.path.join(data_basedir, relative_path, ''))
self.process = None
def run(self):
self.logger.info("STARTED watchnchop scheduler")
while not (self.stoprequest.is_set() or self.exp_end.is_set()):
if self.conditions_met():
self.process = subprocess.Popen(self.cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, creationflags=subprocess.IDLE_PRIORITY_CLASS)
self.logger.info("STARTED WATCHNCHOP with arguments: {}".format(self.cmd))
break
time.sleep(1)
while not (self.stoprequest.is_set() or self.exp_end.is_set()):
time.sleep(1)
if self.process:
try:
self.process.terminate()
self.logger.info("TERMINATED watchnchop process")
except:
self.logger.error("TERMINATING watchnchop process failed")
else:
if self.stoprequest.is_set():
self.logger.error("watchnchop was NEVER STARTED: this thread was ordered to kill the watchnchop subprocess before it was started")
return
# try one last time to start watchnchop (necessary for runs with extremly low output, where all reads are buffered)
self.logger.info("starting watchnchop in one minutes, then kill it after another 5 minutes")
for i in range(60):
if self.stoprequest.is_set():
self.logger.error("watchnchop was NEVER STARTED: this thread was ordered to kill the watchnchop subprocess before it was started")
return
time.sleep(1)
if self.conditions_met():
self.process = subprocess.Popen(self.cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, creationflags=subprocess.IDLE_PRIORITY_CLASS)
self.logger.info("STARTED WATCHNCHOP with arguments: {}".format(self.cmd))
else:
self.logger.error("watchnchop NOT STARTED: directory {} still does not exist or contains no fastq files".format(self.observed_dir))
return
for i in range(300):
if self.stoprequest.is_set():
break
time.sleep(1)
self.process.terminate()
self.logger.info("TERMINATED watchnchop process")
def conditions_met(self):
if os.path.exists(self.observed_dir):
if [fn for fn in os.listdir(self.observed_dir) if fn.endswith('.fastq')]:
return True
return False
def join(self, timeout=None):
if timeout:
self.exp_end.set()
else:
self.stoprequest.set()
super(WatchnchopScheduler, self).join(timeout)
class StatsparserScheduler(threading.Thread):
def __init__(self, update_interval, sample_dir, statsparser_args, channel):
threading.Thread.__init__(self)
if getattr(self, 'daemon', None) is None:
self.daemon = True
else:
self.setDaemon(True)
self.stoprequest = threading.Event() # set when joined without timeout (eg if terminated with ctr-c)
self.exp_end = threading.Event() # set when joined with timeout (eg if experiment ended)
self.logger = logging.getLogger(name='gw.w{}.sps'.format(channel+1))
self.channel = channel
self.update_interval = update_interval
self.sample_dir = sample_dir
self.statsparser_args = statsparser_args
self.page_opened = False
def run(self):
while not self.stoprequest.is_set() or self.exp_end.is_set():
last_time = time.time()
if self.conditions_met():
self.update_report()
this_time = time.time()
while (this_time - last_time < self.update_interval) and not self.stoprequest.is_set() or self.exp_end.is_set():
time.sleep(1)
this_time = time.time()
# start statsparser a last time if the experiment ended
if not self.stoprequest.is_set() and self.conditions_met():
self.update_report()
SP_DIRS_LOCK.acquire()
if self.sample_dir in SP_DIRS:
if SP_DIRS[self.sample_dir] == self.channel:
del SP_DIRS[self.sample_dir]
SP_DIRS_LOCK.release()
def conditions_met(self):
conditions_met = False
stats_fns = [fn for fn in os.listdir(os.path.abspath(self.sample_dir)) if fn.endswith('stats.csv')] if os.path.exists(os.path.abspath(self.sample_dir)) else []
# assure that only one statsparser instance is running on a directory at a time
SP_DIRS_LOCK.acquire()
if not self.sample_dir in SP_DIRS:
SP_DIRS[self.sample_dir] = self.channel
if stats_fns and SP_DIRS[self.sample_dir] == self.channel:
conditions_met = True
SP_DIRS_LOCK.release()
return conditions_met
def update_report(self):
self.logger.info("updating report...")
cmd = [os.path.join(get_script_dir(),'statsparser'), #TODO: change to which() ?
self.sample_dir,
'-q']
cmd.extend(self.statsparser_args)
cp = subprocess.run(cmd) # waits for process to complete
if cp.returncode == 0:
if not self.page_opened:
basedir = os.path.abspath(self.sample_dir)
fp = os.path.join(basedir, 'report.html')
self.logger.info("OPENING " + fp)
try:
webbrowser.open('file://' + os.path.realpath(fp))
except:
pass
self.page_opened = True
else:
self.logger.warning("statsparser returned with errorcode {} for directory {}".format(cp.returncode, self.sample_dir))
def join(self, timeout=None):
if timeout:
self.exp_end.set()
else:
self.stoprequest.set()
super(StatsparserScheduler, self).join(timeout)
class Watcher():
def __init__(self, minknow_log_basedir, channel, ignore_file_modifications, output_dir, data_basedir,
statsparser_args, update_interval, watchnchop_args, min_length, min_length_rna, bc_kws):
self.q = queue.PriorityQueue()
self.watchnchop_args = watchnchop_args
self.min_length = min_length
self.min_length_rna = min_length_rna
self.channel = channel
self.output_dir = output_dir
self.data_basedir = data_basedir
self.statsparser_args = statsparser_args
self.update_interval = update_interval
self.bc_kws = bc_kws
self.observed_dir = os.path.join(minknow_log_basedir, "GA{}0000".format(channel+1))
self.event_handler = LogFilesEventHandler(self.q, ignore_file_modifications, channel)
self.observer = Observer()
self.observer.schedule(self.event_handler,
self.observed_dir,
recursive=False)
self.observer.start()
self.channel_status = ChannelStatus("GA{}0000".format(channel+1), channel)
self.spScheduler = None
self.wcScheduler = []
self.logger = logging.getLogger(name='gw.w{}'.format(channel+1))
self.logger.info("...watcher for {} ready".format(self.observed_dir))
def check_q(self):
# checking sheduler queue
if not self.q.empty():
self.logger.debug("Queue content for {}:".format(self.observed_dir))
while not self.q.empty():
timestamp, origin, line = self.q.get()
self.logger.debug("received '{}' originating from '{} log' at '{}'".format(line, origin, timestamp))
if origin == 'server':
self.parse_server_log_line(line)
elif origin == 'bream':
self.parse_bream_log_line(line)
#elif origin == 'analyser':
# self.parse_analyser_log_line(line)
def parse_server_log_line(self, line):
dict_content = {}
overwrite = False
timestamp = line[:23]
# fetch output_path, run_id, script_path, relative_path, protocol_start, flowcell_id [, experiment, sample]
if "protocol_started" in line:
for m in re.finditer('([^\s,]+) = ([^\s,]+)', line):
dict_content[m.group(1)] = m.group(2)
overwrite = True
dict_content['relative_path'] = dict_content['output_path'].split("/./")[1].strip("/")
subdirs = dict_content['relative_path'].split('/')
if len(subdirs) == 3:
# case sequencing protocol
dict_content['experiment'] = subdirs[0]
dict_content['sample'] = subdirs[1]
dict_content['flowcell_id'] = subdirs[2].split('_')[3]
elif len(subdirs) == 1:
# case qc protocol
dict_content['flowcell_id'] = subdirs[0].split('_')[3]
self.logger.info("PROTOCOL START")
set_update_overview()
self.channel_status.run_data['protocol_start'] = timestamp
# fetch protocol_end
elif "protocol_finished" in line:
self.logger.info("PROTOCOL END")
set_update_overview()
self.channel_status.run_data['protocol_end'] = timestamp
if self.channel_status.mux_scans:
self.save_logdata()
self.channel_status.reset_channel()
self.stop_statsparser()
self.stop_watchnchop()
#
elif "[engine/info]: : flowcell_discovered" in line:
for m in re.finditer('([^\s,]+) = ([^\s,]+)', line):
dict_content[m.group(1)] = m.group(2)
overwrite = True
self.logger.info("FLOWCELL DISCOVERED")
set_update_overview()
self.channel_status.flowcell_disconnected()
self.stop_statsparser()
self.stop_watchnchop()
elif "[engine/info]: : data_acquisition_started" in line:
for m in re.finditer('([^\s,]+) = ([^\s,]+)', line):
dict_content[m.group(1)] = m.group(2)
overwrite = True
elif "flowcell_disconnected" in line:
self.logger.info("FLOWCELL DISCONNECTED")
set_update_overview()
self.channel_status.flowcell_disconnected()
elif "pores available for sequencing" in line:
active_pores = None
in_use = None
for m in re.finditer("has ([0-9]+) pores available for sequencing", line):
active_pores = m.group(1)
for m in re.finditer("Starting sequencing with ([0-9]+) pores", line):
in_use = m.group(1)
self.logger.info("new mux scan result: {} active, {} in use".format(active_pores, in_use))
self.channel_status.add_mux_scan(timestamp, active_pores, in_use=in_use)
set_update_overview()
self.save_logdata()
if dict_content:
self.channel_status.update(dict_content, overwrite)
def parse_bream_log_line(self, line):
dict_content = {}
overwrite = False
timestamp = line.split(" - ")[1]
if "INFO - Attribute" in line:
for m in re.finditer("([^\s,]+) set to (.+)", line):
dict_content[m.group(1)] = m.group(2)
elif "INFO - Asked to start protocol" in line:
for m in re.finditer("'--([^\s,]+)=([^\s,]+)'", line):
dict_content[m.group(1)] = m.group(2)
overwrite = True
elif "INFO - Updating context tags in MinKNOW with" in line:
for m in re.finditer("'([^\s,]+)'[:,] u?'([^\s,]+)'", line):
dict_content[m.group(1)] = m.group(2)
if 'sequencing_kit' in dict_content:
dict_content['sequencing_kit'] = dict_content['sequencing_kit'].upper()
elif "platform_qc.report" in line:
self.logger.info("QC FINISHED")
elif "sequencing.start" in line:
dict_content["sequencing_start_time"] = timestamp
self.logger.info("SEQUENCING STARTS")
self.channel_status.sequencing = True
set_update_overview()
self.start_watchnchop()
self.start_statsparser()
if dict_content:
self.channel_status.update(dict_content, overwrite)
def check_attributes(self, attributes):
for key in attributes:
if key in self.channel_status.run_data:
if self.channel_status.run_data[key]:
continue
else:
return key
elif key in self.channel_status.flowcell:
if self.channel_status.flowcell[key]:
continue
else:
return key
else:
return key
return None
def save_logdata(self):
missing_key = self.check_attributes(['experiment_type', 'run_id', 'flowcell_id', 'asic_id_eeprom'])
if missing_key:
self.logger.warning("NOT SAVING REPORT for {} because the crucial attribute '{}' is missing".format(self.channel_status.run_data['run_id'], missing_key))
return
fn = []
if "qc" in self.channel_status.run_data['experiment_type'].lower():
missing_key = self.check_attributes(['experiment', 'sample'])
if not missing_key:
self.logger.warning("NOT SAVING REPORT for {} because it is not certain that this is a qc run".format(self.channel_status.run_data['run_id']))
return
fn.extend(["QC", self.channel_status.flowcell['flowcell_id'], self.channel_status.run_data['run_id']])
target_dir = os.path.join(self.output_dir, 'qc')
else:
missing_key = self.check_attributes(['experiment', 'sample'])
if missing_key:
self.logger.warning("NOT SAVING REPORT for {} because the crucial attribute '{}' is missing".format(self.channel_status.run_data['run_id'], missing_key))
return
fn.extend([self.channel_status.run_data['run_id'], 'logdata'])
target_dir = os.path.join(self.output_dir,
'runs',
self.channel_status.run_data['experiment'],
self.channel_status.run_data['sample'])
fn = "_".join(fn) + ".json"
self.logger.info("saving log data to file {}".format(os.path.join(target_dir, fn)))
data = (self.channel_status.flowcell, self.channel_status.run_data, self.channel_status.mux_scans)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
with open( os.path.join(target_dir, fn), 'w') as f:
print(json.dumps(data, indent=4), file=f)
ALL_RUNS_LOCK.acquire()
run_id = self.channel_status.run_data['run_id']
asic_id_eeprom = self.channel_status.flowcell['asic_id_eeprom']
if asic_id_eeprom in ALL_RUNS:
ALL_RUNS[asic_id_eeprom][run_id] = {'flowcell': data[0],
'run_data': data[1],
'mux_scans': data[2]}
else:
ALL_RUNS[asic_id_eeprom] = {}
ALL_RUNS[asic_id_eeprom][run_id] = {'flowcell': data[0],
'run_data': data[1],
'mux_scans': data[2]}
ALL_RUNS_LOCK.release()
def start_watchnchop(self):
missing_key = self.check_attributes(['experiment', 'sample', 'sequencing_kit', 'run_id', 'fastq_reads_per_file', 'relative_path'])
if missing_key:
self.logger.warning("NOT executing watchnchop because the crucial attribute '{}' is missing".format(missing_key))
return
self.stop_watchnchop()
stats_fp = os.path.join(self.output_dir,
'runs',
self.channel_status.run_data['experiment'],
self.channel_status.run_data['sample'],
"{}_stats.csv".format(self.channel_status.run_data['run_id']))
self.wcScheduler.append(WatchnchopScheduler(self.data_basedir,
self.channel_status.run_data['relative_path'],
self.channel_status.run_data['experiment'],
self.channel_status.run_data['sequencing_kit'],
self.channel_status.run_data['fastq_reads_per_file'],
self.bc_kws,
stats_fp,
self.channel,
self.watchnchop_args,
self.min_length,
self.min_length_rna))
self.wcScheduler[-1].start()
return
def stop_watchnchop(self, timeout=1.2):
if self.wcScheduler[-1].is_alive() if self.wcScheduler else None:
if timeout:
self.wcScheduler[-1].join(timeout)
else:
self.wcScheduler[-1].join()
def start_statsparser(self):
missing_key = self.check_attributes(['experiment', 'sample'])
if missing_key:
self.logger.warning("NOT starting statsparser scheduler because the crucial attribute '{}' is missing".format(missing_key))
return
#start creation of plots at regular time intervals
self.stop_statsparser()
sample_dir = os.path.join(self.output_dir,
'runs',
self.channel_status.run_data['experiment'],
self.channel_status.run_data['sample'])
self.logger.info('SCHEDULING update of report for sample {1} every {0:.1f} minutes'.format(self.update_interval/1000, sample_dir))
self.spScheduler = StatsparserScheduler(self.update_interval,
sample_dir,
self.statsparser_args,
self.channel)
self.spScheduler.start()
def stop_statsparser(self, timeout=1.2):
if self.spScheduler.is_alive() if self.spScheduler else None:
if timeout:
self.spScheduler.join(timeout)
else:
self.spScheduler.join()
class OpenedFilesHandler():
'''manages a set of opened files, reads their contents and
processes them line by line. Incomplete lines are stored until
they are "completed" by a newline character.'''
def __init__(self, channel):
self.logger = logging.getLogger(name='gw.w{}.ofh'.format(channel+1))
self.open_files = {}
def open_new_file(self, path):
self.logger.info("Opening file {}".format(path))
self.open_files[path] = [open(path, 'r'), ""]
def close_file(self, path):
self.logger.debug("Attempting to close file {}".format(path))
try:
self.open_files[path][0].close()
except:
self.logger.debug("File handle of file {} couldn't be closed".format(path))
if path in self.open_files:
del self.open_files[path]
self.logger.debug("Deleted entry in open_files for file {}".format(path))
def process_lines_until_EOF(self, process_function, path):
file = self.open_files[path][0]
while 1:
line = file.readline()
if line == "":
break
elif line.endswith("\n"):
line = (self.open_files[path][1] + line).strip()
if line:
process_function(line)
self.open_files[path][1] = ""
else:
#line potentially incomplete
self.open_files[path][1] = self.open_files[path][1] + line
class LogFilesEventHandler(FileSystemEventHandler):
control_server_log, bream_log = None, None
def __init__(self, q, ignore_file_modifications, channel):
super(LogFilesEventHandler, self).__init__()
self.ignore_file_modifications = ignore_file_modifications
self.file_handler = OpenedFilesHandler(channel)
self.comm_q = q
# while no server log file is opened, all lines read are buffered in a seperate Priority Queue
self.buff_q = queue.PriorityQueue()
self.q = self.buff_q
self.logger = logging.getLogger(name='gw.w{}.lfeh'.format(channel+1))
def on_moved(self, event):
pass
def on_created(self, event):
if not event.is_directory:
activate_q = False
self.logger.debug("File {} was created".format(event.src_path))
basename = os.path.basename(event.src_path)
if basename.startswith("control_server_log"):
if self.control_server_log:
self.file_handler.close_file(event.src_path)
self.logger.info("Replacing current control_server_log file {} with {}".format(self.control_server_log, event.src_path))
else:
# read lines of server file first, then activate the real communication q
activate_q = True
self.control_server_log = event.src_path
self.logger.info("New control_server_log file {}".format(self.control_server_log))
process_function = self.enqueue_server_log_line
elif basename.startswith("bream") and basename.endswith(".log"):
if self.bream_log:
self.file_handler.close_file(event.src_path)
self.logger.info("Replacing current bream_log file {} with {}".format(self.bream_log, event.src_path))
self.bream_log = event.src_path
self.logger.info("New bream_log file {}".format(self.bream_log))
process_function = self.enqueue_bream_log_line
else:
self.logger.debug("File {} is not of concern for this tool".format(event.src_path))
return
self.file_handler.open_new_file(event.src_path)
self.file_handler.process_lines_until_EOF(process_function, event.src_path)
self.logger.info("approx. queue size: {}".format(self.q.qsize()))
if activate_q:
self.activate_q()
def on_deleted(self, event):
if not event.is_directory:
self.logger.debug("File {} was deleted".format(event.src_path))
#self.file_handler.close_file(event.src_path)
if self.control_server_log == event.src_path:
control_server_log = None
self.logger.warning("Current control_server_log file {} was deleted!".format(event.src_path))
elif self.bream_log == event.src_path:
self.bream_log = None
self.logger.warning("Current bream_log file {} was deleted".format(event.src_path))
else:
self.logger.debug("File {} is not opened and is therefore not closed.".format(event.src_path))
self.file_handler.close_file(event.src_path)
def on_modified(self, event):
if not event.is_directory:
self.logger.debug("File {} was modified".format(event.src_path))
if event.src_path in self.file_handler.open_files:
if self.control_server_log == event.src_path:
self.file_handler.process_lines_until_EOF(self.enqueue_server_log_line, event.src_path)
elif self.bream_log == event.src_path:
self.file_handler.process_lines_until_EOF(self.enqueue_bream_log_line, event.src_path)
else:
self.logger.warning("case not handled")
return
else:
if not self.ignore_file_modifications:
self.on_created(event)
else:
self.logger.debug("File {} existed before this script was started".format(event.src_path))
def activate_q(self):
self.logger.info("activating communication queue")
self.q = self.comm_q
while not self.buff_q.empty():
self.q.put(self.buff_q.get())
def enqueue_server_log_line(self, line):
try:
self.q.put( (dateutil.parser.parse(line[:23]), 'server', line) )
except:
self.logger.debug("the timestamp of the following line in the server log file could not be parsed:\n{}".format(line))
def enqueue_bream_log_line(self, line):
try:
self.q.put( (dateutil.parser.parse(line.split(' - ')[1]), 'bream', line) )
except:
self.logger.debug("the timestamp of the following line in the bream log file could not be parsed:\n{}".format(line))
class RunsDirsEventHandler(FileSystemEventHandler):
def __init__(self, observed_dir):
super(RunsDirsEventHandler, self).__init__()
self.observed_dir = os.path.abspath(observed_dir)
self.logger = logging.getLogger(name='gw.reh')
def on_moved(self, event):
if event.is_directory or (self.depth(event.src_path) == 3 and event.src_path.endswith('.json')):
self.logger.debug("moved {}, depth {}, \ndest {}".format(event.src_path, self.depth(event.src_path), event.dest_path))
if self.observed_dir in event.dest_path and self.depth(event.dest_path) == self.depth(event.src_path):
self.reload_runs()
else:
self.on_deleted(event)
def on_created(self, event):
if event.is_directory:
self.logger.debug("created directory {}, depth {}".format(event.src_path, self.depth(event.src_path)))
if 1 <= self.depth(event.src_path) <= 2:
self.reload_runs()
elif self.depth(event.src_path) == 3 and event.src_path.endswith('.json'):
self.logger.debug("created file {}, depth {}".format(event.src_path, self.depth(event.src_path)))
self.reload_runs()
def on_modified(self, event):
if event.is_directory:
self.logger.debug("modified directory {}, depth {}".format(event.src_path, self.depth(event.src_path)))
def on_deleted(self, event):
if event.is_directory:
self.logger.debug("deleted directory {}, depth {}".format(event.src_path, self.depth(event.src_path)))
if 1 <= self.depth(event.src_path) <= 2:
self.reload_runs()
elif self.depth(event.src_path) == 3 and event.src_path.endswith('.json'):
self.logger.debug("deleted file {}, depth {}".format(event.src_path, self.depth(event.src_path)))
self.reload_runs()
def depth(self, src_path):
src_path = os.path.abspath(src_path)
return len(src_path.replace(self.observed_dir, '').strip('/').split('/'))
def reload_runs(self):
ALL_RUNS_LOCK.acquire()
self.logger.info('deleting and re-importing all runs due to changes in the run directory')
# delete sequencing runs
to_delete = []
for asic_id_eeprom in ALL_RUNS:
for run_id in ALL_RUNS[asic_id_eeprom]:
if 'qc' not in ALL_RUNS[asic_id_eeprom][run_id]['run_data']['experiment_type']:
to_delete.append( (asic_id_eeprom, run_id) )
for asic_id_eeprom, run_id in to_delete:
del ALL_RUNS[asic_id_eeprom][run_id]
#reload runs
import_runs(self.observed_dir)
ALL_RUNS_LOCK.release()
set_update_overview()
return
def standalone():
args = parse_args()
main(args)
if __name__ == "__main__":
standalone()
| MarkusHaak/dominION | dominion/dominion.py | dominion.py | py | 47,728 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "threading.RLock",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "threading.RLock",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "threading.RLock",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "threading.RLock",
... |
32841409829 | import sys
import pymysql
import pymongo
import re
import itertools
import pickle as pkl
import pandas as pd
from pymongo import MongoClient
from collections import defaultdict
from util.preprocessing import *
client = MongoClient('localhost', 27017)
database = client['research']
ptt_posts = database['2018_ptt_posts']
ptt_objects = database['2018_ptt_objects']
ptt_comments = database['2018_ptt_comments']
if __name__ == "__main__":
total_document_len = 0
total_document_count = 0
document_freq = defaultdict(int)
cnt = 0
for post in ptt_posts.find(no_cursor_timeout=True):
sys.stdout.write(f'\r{cnt}')
cnt += 1
post['comments'] = list(ptt_comments.find({'parentID': post['uniID']}, no_cursor_timeout=True))
post['comments_count'] = len(post['comments'])
content = post['content']
if str(content) != 'nan' and content != None:
sentences = to_sentence(content)
tokenized_content = tokenize(sentences, load_stopwords(), re.compile('[\Wa-zA-Z0-9]+'))
post['sentence'] = " ".join(sentences)
post['tokenized_content'] = tokenized_content
post['keywords'] = defaultdict(int)
for term in itertools.chain.from_iterable(tokenized_content):
document_freq[term] += 1
post['keywords'][term] += 1
post['words_count'] = len(post['keywords'])
total_document_len += post['words_count']
total_document_count += 1
ptt_posts.update_one({'_id': post['_id']}, {'$set': post}, upsert=False)
with open('document_freq.pkl', 'wb') as f:
pkl.dump(document_freq, f)
with open('ptt_log.txt', 'w') as f:
f.write(f'total_document_length: {total_document_len}\n')
f.write(f'total_document_count: {total_document_count}\n')
f.write(f'avg_document_length: {total_document_len / total_document_count}\n')
| kartd0094775/IdentifyKOL | preprocess_ptt.py | preprocess_ptt.py | py | 1,961 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sys.stdout.write",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "sys.stdo... |
71577995707 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Test that the ``Oxentiel`` class loads dictionaries correctly. """
from typing import Dict, Any
from hypothesis import given
from oxentiel import Oxentiel
from oxentiel.tests import strategies
# pylint: disable=no-value-for-parameter
@given(strategies.settings_dicts())
def test_ox_adds_all_keys_from_nested_dicts(settings: Dict[str, Any]) -> None:
""" Test that all keys are added when the dictionary is nested. """
ox = Oxentiel(settings)
def check_keys(mapping: Dict[str, Any], ox: Oxentiel) -> None:
""" Recursively add all keys from a nested dictionary. """
for key, value in mapping.items():
if isinstance(value, dict):
check_keys(value, getattr(ox, key))
assert key in ox.keys()
check_keys(settings, ox)
@given(strategies.settings_dicts())
def test_ox_attributes_get_set(settings: Dict[str, Any]) -> None:
""" Test that all keys are set as attributes. """
ox = Oxentiel(settings)
def check_attributes(mapping: Dict[str, Any], ox: Oxentiel) -> None:
""" Recursively add all keys from a nested dictionary. """
for key, value in mapping.items():
if isinstance(value, dict):
check_attributes(value, getattr(ox, key))
assert hasattr(ox, key)
check_attributes(settings, ox)
def test_ox_settings_passed_by_value() -> None:
""" Test that modifying ``Oxentiel.settings`` doesn't change the argument dict. """
settings = {"key": {"subkey": [1, 2]}}
ox = Oxentiel(settings)
settings["key"]["subkey"].append(3)
assert 3 not in ox.key.subkey
@given(strategies.settings_dicts())
def test_ox_repr_prints_everything(settings: Dict[str, Any]) -> None:
""" Test that every key appears in the string representation. """
ox_repr = repr(Oxentiel(settings))
print(ox_repr)
for key in settings:
assert repr(key) in ox_repr
| langfield/oxentiel | oxentiel/tests/test_oxentiel.py | test_oxentiel.py | py | 1,960 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.Dict",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "oxentiel.Oxentiel",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"line_numb... |
10129406269 | from aiogram import types
from aiogram.dispatcher.filters.builtin import CommandHelp
from loader import dp
@dp.message_handler(CommandHelp())
async def bot_help(message: types.Message):
text = ("Список команд: ",
"/start - Начать диалог",
"/help - Получить справку",
"/ref - Получить информацию о реферальной программе",
"/cancel - Выйти из режима покупки товара, если что-то пошло не так",
"\nДля выборов товаров вы можете использовать инлайн-режим.\n",
"Для этого введите в любом диалоге: @имя_бота")
await message.answer("\n".join(text))
| nekitmish/RefShop | handlers/users/help.py | help.py | py | 835 | python | ru | code | 0 | github-code | 6 | [
{
"api_name": "aiogram.types.Message",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "aiogram.types",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "loader.dp.message_handler",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "loade... |
11685423386 | import pandas as pd
import numpy as np
import random
import time
import os
import gc
import pickle
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import roc_auc_score
import lightgbm as lgb
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.simplefilter('ignore')
N_SPLITS = 5
N_ESTIMATORS = 20000
EARLY_STOPPING_ROUNDS = 200
VERBOSE = 1000
SEED = 2021
def seed_everything(seed=42):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
seed_everything(SEED)
train = pd.read_csv("train.csv")
test = pd.read_csv("test.csv")
submission = pd.read_csv(INPUT + "sample_solution.csv")
features = [col for col in test.columns if 'f' in col]
TARGET = 'claim'
target = train[TARGET].copy()
train['n_missing'] = train[features].isna().sum(axis=1)
test['n_missing'] = test[features].isna().sum(axis=1)
train['std'] = train[features].std(axis=1)
test['std'] = test[features].std(axis=1)
features += ['n_missing', 'std']
n_missing = train['n_missing'].copy()
train[features] = train[features].fillna(train[features].mean())
test[features] = test[features].fillna(test[features].mean())
scaler = StandardScaler()
train[features] = scaler.fit_transform(train[features])
test[features] = scaler.transform(test[features])
lgb_params = {
'objective': 'binary',
'n_estimators': N_ESTIMATORS,
'random_state': SEED,
'learning_rate': 5e-3,
'subsample': 0.6,
'subsample_freq': 1,
'colsample_bytree': 0.4,
'reg_alpha': 10.0,
'reg_lambda': 1e-1,
'min_child_weight': 256,
'min_child_samples': 20,
'importance_type': 'gain',
}
lgb_oof = np.zeros(train.shape[0])
lgb_pred = np.zeros(test.shape[0])
lgb_importances = pd.DataFrame()
skf = StratifiedKFold(n_splits=N_SPLITS, shuffle=True, random_state=SEED)
for fold, (trn_idx, val_idx) in enumerate(skf.split(X=train, y=n_missing)):
print(f"===== fold {fold} =====")
X_train = train[features].iloc[trn_idx]
y_train = target.iloc[trn_idx]
X_valid = train[features].iloc[val_idx]
y_valid = target.iloc[val_idx]
X_test = test[features]
start = time.time()
model = lgb.LGBMClassifier(**lgb_params)
model.fit(
X_train,
y_train,
eval_set=[(X_valid, y_valid)],
eval_metric='auc',
early_stopping_rounds=EARLY_STOPPING_ROUNDS,
verbose=VERBOSE,
)
fi_tmp = pd.DataFrame()
fi_tmp['feature'] = model.feature_name_
fi_tmp['importance'] = model.feature_importances_
fi_tmp['fold'] = fold
fi_tmp['seed'] = SEED
lgb_importances = lgb_importances.append(fi_tmp)
lgb_oof[val_idx] = model.predict_proba(X_valid)[:, -1]
lgb_pred += model.predict_proba(X_test)[:, -1] / N_SPLITS
elapsed = time.time() - start
auc = roc_auc_score(y_valid, lgb_oof[val_idx])
print(f"fold {fold} - lgb auc: {auc:.6f}, elapsed time: {elapsed:.2f}sec\n")
print(f"oof lgb roc = {roc_auc_score(target, lgb_oof)}")
np.save("lgb_oof.npy", lgb_oof)
np.save("lgb_pred.npy", lgb_pred)
submission[TARGET] = lgb_pred
submission.to_csv("submission.csv", index=False)
with open('model_LGBMClassifier.pkl','wb') as file:
file.write(pickle.dumps(model))
| leokri89/ml-codebase | models_sample/lightgbm.py | lightgbm.py | py | 3,278 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "warnings.simplefilter",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "random.seed",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.seed",... |
43975077800 | import mock
import uuid
from contextlib import contextmanager
import webtest
from pyramid.config import Configurator
from cliquet.events import (ResourceChanged, AfterResourceChanged,
ResourceRead, AfterResourceRead, ACTIONS)
from cliquet.storage.exceptions import BackendError
from cliquet.tests.testapp import main as testapp
from cliquet.tests.support import unittest, BaseWebTest, get_request_class
from cliquet import statsd
@contextmanager
def notif_broken(app, event_cls):
old = app.registry.notify
def buggy(event):
if not isinstance(event, event_cls):
return old(event)
raise Exception("boom")
app.registry.notify = buggy
yield
app.registry.notify = old
class BaseEventTest(BaseWebTest):
subscribed = tuple()
def setUp(self):
super(BaseEventTest, self).setUp()
self.events = []
self.body = {'data': {'name': 'de Paris'}}
def tearDown(self):
self.events = []
super(BaseEventTest, self).tearDown()
def listener(self, event):
self.events.append(event)
def make_app(self, settings=None):
settings = self.get_app_settings(settings)
self.config = Configurator(settings=settings)
for event_cls in self.subscribed:
self.config.add_subscriber(self.listener, event_cls)
self.config.commit()
app = testapp(config=self.config)
app = webtest.TestApp(app)
app.RequestClass = get_request_class(self.api_prefix)
return app
class ResourceReadTest(BaseEventTest, unittest.TestCase):
subscribed = (ResourceRead,)
def test_get_sends_read_event(self):
resp = self.app.post_json(self.collection_url, self.body,
headers=self.headers, status=201)
record_id = resp.json['data']['id']
record_url = self.get_item_url(record_id)
self.app.get(record_url, headers=self.headers)
self.assertEqual(len(self.events), 1)
self.assertEqual(self.events[0].payload['action'], ACTIONS.READ.value)
self.assertEqual(len(self.events[0].read_records), 1)
def test_collection_get_sends_read_event(self):
self.app.get(self.collection_url, headers=self.headers)
self.assertEqual(len(self.events), 1)
self.assertEqual(self.events[0].payload['action'], ACTIONS.READ.value)
self.assertEqual(len(self.events[0].read_records), 0)
def test_post_sends_read_if_id_already_exists(self):
resp = self.app.post_json(self.collection_url, self.body,
headers=self.headers, status=201)
record = resp.json['data']
body = dict(self.body)
body['data']['id'] = record['id']
# a second post with the same record id
self.app.post_json(self.collection_url, body, headers=self.headers,
status=200)
self.assertEqual(len(self.events), 1)
self.assertEqual(self.events[0].payload['action'], ACTIONS.READ.value)
class ResourceChangedTest(BaseEventTest, unittest.TestCase):
subscribed = (ResourceChanged,)
def test_post_sends_create_action(self):
self.app.post_json(self.collection_url, self.body,
headers=self.headers, status=201)
self.assertEqual(len(self.events), 1)
self.assertEqual(self.events[0].payload['action'],
ACTIONS.CREATE.value)
def test_put_sends_create_action(self):
body = dict(self.body)
body['data']['id'] = record_id = str(uuid.uuid4())
record_url = self.get_item_url(record_id)
self.app.put_json(record_url, body,
headers=self.headers, status=201)
self.assertEqual(len(self.events), 1)
self.assertEqual(self.events[0].payload['action'],
ACTIONS.CREATE.value)
def test_not_triggered_on_failed_put(self):
record_id = str(uuid.uuid4())
record_url = self.get_item_url(record_id)
self.app.put_json(record_url, self.body, headers=self.headers)
headers = self.headers.copy()
headers['If-Match'] = '"12345"'
self.app.put_json(record_url, self.body, headers=headers, status=412)
self.assertEqual(len(self.events), 1)
self.assertEqual(self.events[0].payload['action'],
ACTIONS.CREATE.value)
def test_patch_sends_update_action(self):
resp = self.app.post_json(self.collection_url, self.body,
headers=self.headers, status=201)
record = resp.json['data']
record_url = self.get_item_url(record['id'])
self.app.patch_json(record_url, self.body, headers=self.headers,
status=200)
self.assertEqual(len(self.events), 2)
self.assertEqual(self.events[0].payload['action'],
ACTIONS.CREATE.value)
self.assertEqual(self.events[1].payload['action'],
ACTIONS.UPDATE.value)
def test_put_sends_update_action_if_record_exists(self):
body = dict(self.body)
body['data']['id'] = record_id = str(uuid.uuid4())
record_url = self.get_item_url(record_id)
self.app.put_json(record_url, body,
headers=self.headers, status=201)
body['data']['more'] = 'stuff'
self.app.put_json(record_url, body,
headers=self.headers, status=200)
self.assertEqual(len(self.events), 2)
self.assertEqual(self.events[0].payload['action'],
ACTIONS.CREATE.value)
self.assertEqual(self.events[1].payload['action'],
ACTIONS.UPDATE.value)
def test_delete_sends_delete_action(self):
resp = self.app.post_json(self.collection_url, self.body,
headers=self.headers, status=201)
record = resp.json['data']
record_url = self.get_item_url(record['id'])
self.app.delete(record_url, headers=self.headers, status=200)
self.assertEqual(len(self.events), 2)
self.assertEqual(self.events[0].payload['action'],
ACTIONS.CREATE.value)
self.assertEqual(self.events[1].payload['action'],
ACTIONS.DELETE.value)
def test_collection_delete_sends_delete_action(self):
self.app.post_json(self.collection_url, self.body,
headers=self.headers, status=201)
self.app.post_json(self.collection_url, self.body,
headers=self.headers, status=201)
self.app.delete(self.collection_url, headers=self.headers, status=200)
self.assertEqual(len(self.events), 3)
self.assertEqual(self.events[0].payload['action'],
ACTIONS.CREATE.value)
self.assertEqual(self.events[1].payload['action'],
ACTIONS.CREATE.value)
self.assertEqual(self.events[2].payload['action'],
ACTIONS.DELETE.value)
def test_request_fails_if_notify_fails(self):
with notif_broken(self.app.app, ResourceChanged):
self.app.post_json(self.collection_url, self.body,
headers=self.headers, status=500)
self.assertEqual(len(self.events), 0)
def test_triggered_on_protected_resource(self):
app = self.make_app(settings={
'psilo_write_principals': 'system.Authenticated'
})
app.post_json('/psilos', self.body,
headers=self.headers, status=201)
self.assertEqual(len(self.events), 1)
self.assertEqual(self.events[0].payload['action'],
ACTIONS.CREATE.value)
def test_permissions_are_stripped_from_event_on_protected_resource(self):
app = self.make_app(settings={
'psilo_write_principals': 'system.Authenticated'
})
resp = app.post_json('/psilos', self.body,
headers=self.headers, status=201)
record = resp.json['data']
record_url = '/psilos/' + record['id']
app.patch_json(record_url, {"data": {"name": "De barcelona"}},
headers=self.headers)
impacted_records = self.events[-1].impacted_records
self.assertNotIn('__permissions__', impacted_records[0]['new'])
self.assertNotIn('__permissions__', impacted_records[0]['old'])
class AfterResourceChangedTest(BaseEventTest, unittest.TestCase):
subscribed = (AfterResourceChanged,)
def test_request_succeeds_if_notify_fails(self):
with notif_broken(self.app.app, AfterResourceChanged):
self.app.post_json(self.collection_url, self.body,
headers=self.headers)
self.assertEqual(len(self.events), 0)
class AfterResourceReadTest(BaseEventTest, unittest.TestCase):
subscribed = (AfterResourceRead,)
def test_request_succeeds_if_notify_fails(self):
with notif_broken(self.app.app, AfterResourceChanged):
self.app.post_json(self.collection_url, self.body,
headers=self.headers)
self.assertEqual(len(self.events), 0)
class ImpactedRecordsTest(BaseEventTest, unittest.TestCase):
subscribed = (ResourceChanged,)
def test_create_has_new_record_and_no_old_in_payload(self):
resp = self.app.post_json(self.collection_url, self.body,
headers=self.headers)
record = resp.json['data']
impacted_records = self.events[-1].impacted_records
self.assertEqual(len(impacted_records), 1)
self.assertNotIn('old', impacted_records[0])
self.assertEqual(impacted_records[0]['new'], record)
def test_collection_delete_has_old_record_and_no_new_in_payload(self):
resp = self.app.post_json(self.collection_url, self.body,
headers=self.headers)
record1 = resp.json['data']
resp = self.app.post_json(self.collection_url, self.body,
headers=self.headers)
record2 = resp.json['data']
self.app.delete(self.collection_url, headers=self.headers, status=200)
impacted_records = self.events[-1].impacted_records
self.assertEqual(len(impacted_records), 2)
self.assertNotIn('new', impacted_records[0])
self.assertNotIn('new', impacted_records[1])
self.assertEqual(impacted_records[0]['old']['deleted'], True)
self.assertEqual(impacted_records[1]['old']['deleted'], True)
deleted_ids = {impacted_records[0]['old']['id'],
impacted_records[1]['old']['id']}
self.assertEqual(deleted_ids, {record1['id'], record2['id']})
def test_update_has_old_and_new_record(self):
resp = self.app.post_json(self.collection_url, self.body,
headers=self.headers, status=201)
record = resp.json['data']
record_url = self.get_item_url(record['id'])
self.app.patch_json(record_url, {'data': {'name': 'en boite'}},
headers=self.headers)
impacted_records = self.events[-1].impacted_records
self.assertEqual(len(impacted_records), 1)
self.assertEqual(impacted_records[0]['new']['id'], record['id'])
self.assertEqual(impacted_records[0]['new']['id'],
impacted_records[0]['old']['id'])
self.assertEqual(impacted_records[0]['old']['name'], 'de Paris')
self.assertEqual(impacted_records[0]['new']['name'], 'en boite')
def test_delete_has_old_record_and_no_new_in_payload(self):
resp = self.app.post_json(self.collection_url, self.body,
headers=self.headers, status=201)
record = resp.json['data']
record_url = self.get_item_url(record['id'])
self.app.delete(record_url, headers=self.headers, status=200)
impacted_records = self.events[-1].impacted_records
self.assertEqual(len(impacted_records), 1)
self.assertNotIn('new', impacted_records[0])
self.assertEqual(impacted_records[0]['old']['id'], record['id'])
self.assertEqual(impacted_records[0]['old']['deleted'], True)
class BatchEventsTest(BaseEventTest, unittest.TestCase):
subscribed = (ResourceChanged, ResourceRead)
def test_impacted_records_are_merged(self):
record_id = str(uuid.uuid4())
record_url = self.get_item_url(record_id)
body = {
"defaults": {
"method": "PUT",
"path": record_url
},
"requests": [
{"body": {'data': {'name': 'foo'}}},
{"body": {'data': {'name': 'bar'}}},
{"body": {'data': {'name': 'baz'}}},
{"method": "DELETE"}
]
}
self.app.post_json("/batch", body, headers=self.headers)
self.assertEqual(len(self.events), 3)
create_event = self.events[0]
self.assertEqual(create_event.payload['action'], 'create')
self.assertEqual(len(create_event.impacted_records), 1)
self.assertNotIn('old', create_event.impacted_records[0])
update_event = self.events[1]
self.assertEqual(update_event.payload['action'], 'update')
impacted = update_event.impacted_records
self.assertEqual(len(impacted), 2)
self.assertEqual(impacted[0]['old']['name'], 'foo')
self.assertEqual(impacted[0]['new']['name'], 'bar')
self.assertEqual(impacted[1]['old']['name'], 'bar')
self.assertEqual(impacted[1]['new']['name'], 'baz')
delete_event = self.events[2]
self.assertEqual(delete_event.payload['action'], 'delete')
self.assertEqual(len(delete_event.impacted_records), 1)
self.assertNotIn('new', delete_event.impacted_records[0])
def test_one_event_is_sent_per_resource(self):
body = {
"defaults": {
"method": "POST",
"body": self.body,
},
"requests": [
{"path": '/mushrooms'},
{"path": '/mushrooms'},
{"path": '/psilos'},
]
}
self.app.post_json("/batch", body, headers=self.headers)
self.assertEqual(len(self.events), 2)
def test_one_event_is_sent_per_action(self):
body = {
"defaults": {
"path": '/mushrooms',
},
"requests": [
{"method": "POST", "body": self.body},
{"method": "DELETE"},
{"method": "GET"},
]
}
self.app.post_json("/batch", body, headers=self.headers)
self.assertEqual(len(self.events), 3)
def test_events_are_not_sent_if_subrequest_fails(self):
patch = mock.patch.object(self.storage,
'delete_all',
side_effect=BackendError('boom'))
patch.start()
self.addCleanup(patch.stop)
request_create = {
"method": "POST",
"body": self.body,
}
request_delete_all = {
"method": "DELETE",
"body": self.body,
}
body = {
"defaults": {
"path": self.collection_url
},
"requests": [request_create, request_delete_all]
}
self.app.post_json("/batch", body, headers=self.headers,
status=503)
self.assertEqual(len(self.events), 0)
def load_from_config(config, prefix):
class ClassListener(object):
def __call__(self, event):
pass
return ClassListener()
@unittest.skipIf(not statsd.statsd_module, "statsd is not installed.")
class StatsDTest(BaseWebTest, unittest.TestCase):
def get_app_settings(self, *args, **kwargs):
settings = super(StatsDTest, self).get_app_settings(*args, **kwargs)
if not statsd.statsd_module:
return settings
settings['statsd_url'] = 'udp://localhost:8125'
this_module = 'cliquet.tests.resource.test_events'
settings['event_listeners'] = 'test'
settings['event_listeners.test.use'] = this_module
return settings
def test_statds_tracks_listeners_execution_duration(self):
statsd_client = self.app.app.registry.statsd._client
with mock.patch.object(statsd_client, 'timing') as mocked:
self.app.post_json(self.collection_url,
{"data": {"name": "pouet"}},
headers=self.headers)
timers = set(c[0][0] for c in mocked.call_args_list)
self.assertIn('listeners.test', timers)
| mozilla-services/cliquet | cliquet/tests/resource/test_events.py | test_events.py | py | 16,885 | python | en | code | 65 | github-code | 6 | [
{
"api_name": "contextlib.contextmanager",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "cliquet.tests.support.BaseWebTest",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "pyramid.config.Configurator",
"line_number": 48,
"usage_type": "call"
},
... |
37788290307 | import yaml
from sigma.parser.condition import ConditionAND, ConditionOR
from sigma.config.exceptions import SigmaConfigParseError
from sigma.config.mapping import FieldMapping
# Configuration
class SigmaConfiguration:
"""Sigma converter configuration. Contains field mappings and logsource descriptions"""
def __init__(self, configyaml=None):
if configyaml == None:
self.config = None
self.fieldmappings = dict()
self.logsources = dict()
self.logsourcemerging = SigmaLogsourceConfiguration.MM_AND
self.defaultindex = None
self.backend = None
else:
config = yaml.safe_load(configyaml)
self.config = config
self.fieldmappings = dict()
try:
for source, target in config['fieldmappings'].items():
self.fieldmappings[source] = FieldMapping(source, target)
except KeyError:
pass
if type(self.fieldmappings) != dict:
raise SigmaConfigParseError("Fieldmappings must be a map")
try:
self.logsourcemerging = config['logsourcemerging']
except KeyError:
self.logsourcemerging = SigmaLogsourceConfiguration.MM_AND
try:
self.defaultindex = config['defaultindex']
except KeyError:
self.defaultindex = None
self.logsources = list()
self.backend = None
def get_fieldmapping(self, fieldname):
"""Return mapped fieldname if mapping defined or field name given in parameter value"""
try:
return self.fieldmappings[fieldname]
except KeyError:
return FieldMapping(fieldname)
def get_logsource(self, category, product, service):
"""Return merged log source definition of all logosurces that match criteria"""
matching = [logsource for logsource in self.logsources if logsource.matches(category, product, service)]
return SigmaLogsourceConfiguration(matching, self.defaultindex)
def set_backend(self, backend):
"""Set backend. This is used by other code to determine target properties for index addressing"""
self.backend = backend
if self.config != None:
if 'logsources' in self.config:
logsources = self.config['logsources']
if type(logsources) != dict:
raise SigmaConfigParseError("Logsources must be a map")
for name, logsource in logsources.items():
self.logsources.append(SigmaLogsourceConfiguration(logsource, self.defaultindex, name, self.logsourcemerging, self.get_indexfield()))
def get_indexfield(self):
"""Get index condition if index field name is configured"""
if self.backend != None:
return self.backend.index_field
class SigmaLogsourceConfiguration:
"""Contains the definition of a log source"""
MM_AND = "and" # Merge all conditions with AND
MM_OR = "or" # Merge all conditions with OR
def __init__(self, logsource=None, defaultindex=None, name=None, mergemethod=MM_AND, indexfield=None):
self.name = name
self.indexfield = indexfield
if logsource == None: # create empty object
self.category = None
self.product = None
self.service = None
self.index = list()
self.conditions = None
elif type(logsource) == list and all([isinstance(o, SigmaLogsourceConfiguration) for o in logsource]): # list of SigmaLogsourceConfigurations: merge according to mergemethod
# Merge category, product and service
categories = set([ ls.category for ls in logsource if ls.category != None ])
products = set([ ls.product for ls in logsource if ls.product != None ])
services = set([ ls.service for ls in logsource if ls.service != None])
if len(categories) > 1 or len(products) > 1 or len(services) > 1:
raise ValueError("Merged SigmaLogsourceConfigurations must have disjunct categories (%s), products (%s) and services (%s)" % (str(categories), str(products), str(services)))
try:
self.category = categories.pop()
except KeyError:
self.category = None
try:
self.product = products.pop()
except KeyError:
self.product = None
try:
self.service = services.pop()
except KeyError:
self.service = None
# Merge all index patterns
self.index = list(set([index for ls in logsource for index in ls.index])) # unique(flat(logsources.index))
if len(self.index) == 0 and defaultindex is not None: # if no index pattern matched and default index is present: use default index
if type(defaultindex) == str:
self.index = [defaultindex]
elif type(defaultindex) == list and all([type(i) == str for i in defaultindex]):
self.index = defaultindex
else:
raise TypeError("Default index must be string or list of strings")
# "merge" index field (should never differ between instances because it is provided by backend class
indexfields = [ ls.indexfield for ls in logsource if ls.indexfield != None ]
try:
self.indexfield = indexfields[0]
except IndexError:
self.indexfield = None
# Merge conditions according to mergemethod
if mergemethod == self.MM_AND:
cond = ConditionAND()
elif mergemethod == self.MM_OR:
cond = ConditionOR()
else:
raise ValueError("Mergemethod must be '%s' or '%s'" % (self.MM_AND, self.MM_OR))
for ls in logsource:
if ls.conditions != None:
cond.add(ls.conditions)
if len(cond) > 0:
self.conditions = cond
else:
self.conditions = None
elif type(logsource) == dict: # create logsource configuration from parsed yaml
if 'category' in logsource and type(logsource['category']) != str \
or 'product' in logsource and type(logsource['product']) != str \
or 'service' in logsource and type(logsource['service']) != str:
raise SigmaConfigParseError("Logsource category, product or service must be a string")
try:
self.category = logsource['category']
except KeyError:
self.category = None
try:
self.product = logsource['product']
except KeyError:
self.product = None
try:
self.service = logsource['service']
except KeyError:
self.service = None
if self.category == None and self.product == None and self.service == None:
raise SigmaConfigParseError("Log source definition will not match")
if 'index' in logsource:
index = logsource['index']
if type(index) not in (str, list):
raise SigmaConfigParseError("Logsource index must be string or list of strings")
if type(index) == list and not all([type(index) == str for index in logsource['index']]):
raise SigmaConfigParseError("Logsource index patterns must be strings")
if type(index) == list:
self.index = index
else:
self.index = [ index ]
else:
# no default index handling here - this branch is executed if log source definitions are parsed from
# config and these must not necessarily contain an index definition. A valid index may later be result
# from a merge, where default index handling applies.
self.index = []
if 'conditions' in logsource:
if type(logsource['conditions']) != dict:
raise SigmaConfigParseError("Logsource conditions must be a map")
cond = ConditionAND()
for key, value in logsource['conditions'].items():
cond.add((key, value))
self.conditions = cond
else:
self.conditions = None
else:
raise SigmaConfigParseError("Logsource definitions must be maps")
def matches(self, category, product, service):
"""Match log source definition against given criteria, None = ignore"""
searched = 0
for searchval, selfval in zip((category, product, service), (self.category, self.product, self.service)):
if searchval == None and selfval != None:
return False
if selfval != None:
searched += 1
if searchval != selfval:
return False
if searched:
return True
def get_indexcond(self):
"""Get index condition if index field name is configured"""
cond = ConditionOR()
if self.indexfield:
for index in self.index:
cond.add((self.indexfield, index))
return cond
else:
return None
def __str__(self):
return "[ LogSourceConfiguration: %s %s %s indices: %s ]" % (self.category, self.product, self.service, str(self.index))
| socprime/soc_workflow_app_ce | soc_workflow_ce/server/translation_script/sigma/tools/sigma/configuration.py | configuration.py | py | 9,714 | python | en | code | 91 | github-code | 6 | [
{
"api_name": "yaml.safe_load",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sigma.config.mapping.FieldMapping",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sigma.config.exceptions.SigmaConfigParseError",
"line_number": 28,
"usage_type": "call"
... |
25136195461 | from sqlalchemy import create_engine, MetaData, Table, Column, Integer, String, DateTime, Float, and_, or_
from sqlalchemy.dialects import postgresql
from geoalchemy2 import Geometry
from geoalchemy2.functions import GenericFunction, ST_AsMVTGeom, ST_TileEnvelope
from sqlalchemy.dialects.postgresql import BYTEA, JSONB
import mercantile
from functools import partial
import pyproj
from shapely.ops import transform
from shapely.geometry import shape, MultiPolygon, MultiPoint, MultiLineString
from flask_gzip import Gzip
import os
import json
import numpy
from redis import Redis
from flask import Flask
from flask import request, make_response
from flask import jsonify
from flask_cors import CORS
import xxhash
import importlib
POSTGRES = os.environ.get('POSTGRES', '51.15.160.236:25432')
POSTGRES_USER = os.environ.get('POSTGRES_USER', 'admin')
POSTGRES_PASS = os.environ.get('POSTGRES_PASS', 'tgZWW3Dgze94FN9O')
POSTGRES_DBNAME = os.environ.get('POSTGRES_DBNAME', 'ohm')
REDIS_HOST = os.environ.get('REDIS_HOST', 'localhost')
REDIS_PORT = int(os.environ.get('REDIS_PORT', '6379'))
REDIS_DB = int(os.environ.get('REDIS_DB', '3'))
TILES_LAYERS=os.environ.get('TILES_LAYERS', 'boundary,boundary_label,culture,culture_label,waterway,water,building,industrial,landuse,transportation,place,religion')
EXPANDER = os.environ.get('EXPANDER', 'https://github.com/openhistorymap/mapstyles/blob/master/expander.json')
EPHEMERAL_STEP = float(1)/12/31
PERSIST_STEP = float(1)/12
def get_step(layer):
if layer in TILES_LAYERS.split(','):
return EPHEMERAL_STEP
else:
return EPHEMERAL_STEP
def get_month(x, year_step = EPHEMERAL_STEP):
yr = [int(x)+y for y in [x*year_step for x in range(0,12*31)]]
nyr = sorted(yr + [x])
i = nyr.index(x)
return yr[i-1]
class ST_AsMVT(GenericFunction):
type = BYTEA
def float_to_date(f):
y = int(f)
m = int((f-y)*12)+1
d = int(((f-y)*12-(m-1))*30)
return '{}-{}-{}'.format(y,m,d)
def create_app(test_config=None):
# create and configure the app
app = Flask(__name__, instance_relative_config=True)
# pool_pre_ping should help handle DB connection drops
engine = create_engine('postgresql://{}:{}@{}/{}'.format(
POSTGRES_USER, POSTGRES_PASS, POSTGRES, POSTGRES_DBNAME
), pool_size=20, max_overflow=0, pool_pre_ping=True,
echo=False)
db = engine.connect()
r = Redis(REDIS_HOST, REDIS_PORT, REDIS_DB)
exp = {
"boundary": {
"name": {
"Rome": {
"color": "#8e001c"
},
"Lotharingia": {
"color": "#ddb318"
},
"Kingdom of Italy": {
"color": "#6397d0"
}
}
}
}
metadata = MetaData()
ohm_items = Table('items', metadata,
Column('id', Integer, primary_key=True, autoincrement=True),
Column('ohm_from', Float, index=True),
Column('ohm_to', Float, index=True),
Column('layer', String, index=True),
Column('properties', JSONB),
Column('geom', Geometry(geometry_type='GEOMETRY', srid=3857)),
Column('author', String, default='ohm'),
)
ohm_rels = Table('relations', metadata,
Column('id', Integer, primary_key=True, autoincrement=True),
Column('ohm_from', Float, index=True),
Column('ohm_to', Float, index=True),
Column('layer', String, index=True),
Column('properties', JSONB),
Column('geom', Geometry(geometry_type='GEOMETRY', srid=3857)),
Column('author', String, default='ohm'),
)
ohm_rel_members = Table('relation_members', metadata,
Column('id', Integer, primary_key=True, autoincrement=True),
Column('relation', Integer, index=True),
Column('item', Integer, index=True),
Column('role', String, index=True),
Column('author', String, default='ohm'),
)
ohm_items_members = Table('item_node', metadata,
Column('id', Integer, primary_key=True, autoincrement=True),
Column('item', Integer, index=True),
Column('node_id', Integer, index=True),
)
ohm_items_members = Table('item_arc', metadata,
Column('id', Integer, primary_key=True, autoincrement=True),
Column('item', Integer, index=True),
Column('arc_id', Integer, index=True),
)
ohm_arcs = Table('arc', metadata,
Column('id', Integer, primary_key=True, autoincrement=True),
Column('node_1', Integer),
Column('node_2', Integer),
Column('direction', Integer),
)
ohm_points = Table('node', metadata,
Column('id', Integer, primary_key=True, autoincrement=True),
Column('geom', Geometry(geometry_type='POINT', srid=3857)),
)
project = partial(
pyproj.transform,
pyproj.Proj(init='epsg:4326'), # source coordinate system
pyproj.Proj(init='epsg:3857')) # destination coordinate system
@app.route('/')
def index():
return ''
@app.route('/setup')
def setup():
metadata.create_all(engine)
return jsonify({'result':'OK'})
def pimp(data):
ret = {}
if data['layer'] in exp.keys():
for k in exp[data['layer']]:
if k in data:
ret = exp[data['layer']][k].get(data[k], {})
out = ret.copy()
out.update(data)
return out
def map_single(jdata):
ss = shape(jdata['geometry'])
ss = transform(project, ss)
pdata = jdata['properties']
#print(pdata)
_from = jdata['from'] if 'from' in jdata else pdata['ohm:from:date']
_to = jdata['to'] if 'to' in jdata else pdata['ohm:to:date']
pdata = pimp(pdata)
cs = [ss]
if isinstance(ss, (MultiPolygon, MultiLineString, MultiPoint,)):
cs = [its.buffer(0) for its in ss if its]
ret = []
pdata['$area'] = ss.area
pdata['$length'] = ss.length
if 'Poly' in ss.geom_type:
ret.append(dict(
ohm_from = _from,
ohm_to = _to,
layer = pdata['layer']+"_label",
properties = pdata,
geom = 'SRID=3857;' + ss.representative_point().wkt
))
for s in cs:
ret.append(dict(
ohm_from = _from,
ohm_to = _to,
layer = pdata['layer'],
properties = pdata,
geom = 'SRID=3857;' + ss.wkt
))
return ret
@app.route('/items', methods=['POST'])
def saveItem():
data = request.data
jdata = json.loads(data)
#print(len(jdata))
if not isinstance(jdata, list):
jdata = [jdata]
jdata = list(map(map_single, jdata))
#print(len(jdata))
flat_list = []
for sublist in jdata:
for item in sublist:
flat_list.append(item)
r.rpush('store', *[json.dumps(fi) for fi in flat_list])
#map(lambda x: db.execute('ohm_storeitem(\'{layer}\', {ohm_from}, {ohm_to}, {properties}, {x})'.format**(x)), flat_list)
#db.execute(ohm_items.insert(), flat_list)
return jsonify({'result': 'OK', 'items_added': len(flat_list)})
def out_rel_feat(r):
rets = []
f = r['itms']
n = r['rel']
pp = f[0].properties
min_ = f[0].properties['ohm:from:date']
max_ = f[-1].properties['ohm:to:date']
pp['ohm:from:date'] = min_
pp['ohm:from:date:year'] = int(min_)
pp['ohm:to:date'] = max_
pp['ohm:to:date:year'] = int(max_)
pp['relation'] = n
fp = []
for fpo in f:
fpop = fpo.properties
fpop['name'] = float_to_date(fpop['ohm:from:date'])
fpop['relation'] = n
rets.append({
"type": "Feature",
"properties": fpop,
"geometry": json.loads(fpo.gg)
})
fp.append(
json.loads(fpo.gg).get('coordinates'),
)
rets.append({
"type": "Feature",
"properties": pp,
"geometry": {
"type": "LineString",
"coordinates": fp
}
})
return rets
@app.route('/relation', methods=['POST'])
def newRelation():
data = request.data
jdata = json.loads(data)
if not isinstance(jdata, list):
jdata = [jdata]
jdata = list(map(map_single, jdata))
flat_list = []
for sublist in jdata:
for item in sublist:
flat_list.append(item)
x = db.execute(ohm_items.insert(), flat_list)
print(x)
return jsonify({'result': 'OK', 'items_added': len(flat_list)})
@app.route('/bots', methods=['GET'])
@app.route('/bots/<bot>', methods=['GET'])
def runBot(bot = 'movement'):
r.rpush('bot', bot)
#m = importlib.import_module("bot.{}".format(bot))
#m.run(db, )
return jsonify({'result': 'OK'})
@app.route('/status', methods=['GET'])
def status():
ret = {
'bots': r.llen('bot'),
'store': r.llen('store')
}
return jsonify({'result': 'OK', 'status': ret})
CORS(app, resources={r"*": {"origins": "*"}})
Gzip(app)
return app
app = create_app()
if __name__ == '__main__':
app.run(host='0.0.0.0', port='9039', debug=True, threaded=True)
| openhistorymap/tiles-api | app/api copy.py | api copy.py | py | 9,634 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.environ.get",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_... |
24176860716 | # coding: utf-8
import re
import logging
from collections import OrderedDict
from copy import copy
logger = logging.getLogger(__name__)
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
from lxml import etree
def uniq_seq_merge(seq1, seq2):
new_seq = copy(seq1)
items_only_in_seq2 = set(seq2) - set(new_seq)
len2 = len(seq2)
for item in items_only_in_seq2:
i2 = seq2.index(item)
if i2 == len2 - 1:
new_seq.append(item)
else:
for i in range(i2 + 1, len2):
key = seq2[i]
if key in new_seq:
new_seq.insert(new_seq.index(key), item)
break
else:
new_seq.append(item)
return new_seq
def xpath_add_default_ns(xpath, def_ns='__default__'):
'''
Add a default ns tag. This is because lxml doesn't support empty namespace
'''
def find_brackets(xpath):
ret = []
stack = []
p = re.compile(r'([\[\]])')
for e in p.finditer(xpath):
string = e.group()
index = e.start()
if string == '[':
stack.append(index)
else:
ret.append((stack[-1], index))
stack.pop(-1)
return ret
def in_brackets(index, brackets):
if not brackets:
return False
for start, end in brackets:
if start < index < end:
return True
return False
ret = []
p = re.compile(r'/([^\[\]/\'\"]+)')
last_end = 0
brackets = find_brackets(xpath)
for match in p.finditer(xpath):
string = match.group()
start = match.start()
end = match.end()
if in_brackets(start, brackets):
ret.append(xpath[last_end:end])
last_end = end
else:
# is a real node tag
if ':' in string:
# has a name space in name
ret.append(xpath[last_end:end])
last_end = end
else:
ret.append(xpath[last_end:start])
ret.append('/' + def_ns + ':' + string[1:])
last_end = end
if end < len(xpath) - 1:
ret.append(xpath[last_end:])
return ''.join(ret)
class XmlConfig:
'''
using lxml to process xml format file
'''
def __init__(self, filename: str='', lines: list=None) -> None:
if filename:
et = etree.parse(filename)
elif not lines:
et = etree.fromstringlist(lines)
self.filename = filename
self.etree = et
self.root = et.getroot()
self._nsmap_r = dict([(value, key) for key, value in self.root.nsmap.items()])
self._children_count = {}
self._dict = OrderedDict()
self._node_map = {}
self._get_node_list()
def search(self, xpath, name_def_ns='__default__'):
'''search node which matches xpath'''
if self._nsmap_r:
# the file is with namespaces defined
nsmap = copy(self.root.nsmap)
if None in nsmap:
nsmap[name_def_ns] = nsmap[None]
nsmap.pop(None)
xpath = xpath_add_default_ns(xpath, name_def_ns)
return self.etree.xpath(xpath, namespaces=nsmap)
return self.etree.xpath(xpath)
def _rm_prefix(self, tag: str):
# remove the namespace in tag
if '}' in tag:
ns, _, name = tag.partition('}')
ns = ns[1:]
if self._nsmap_r[ns]:
return '{}:{}'.format(self._nsmap_r[ns], name)
else:
return name
else:
return ''
def _path_split_attr(self, path):
# split the path to xpath and attrib
path_attr_pattern = re.compile(r'(.*)\[@([^\]]*)\]$')
m = path_attr_pattern.match(path)
if m:
real_path, attr = m.groups()
else:
real_path = path
attr = None
return real_path, attr
def _path_split_seq(self, path):
# split the path to path and sequence number
p = re.compile(r'(.*)\[(\d+)\]$')
m = p.match(path)
if m:
# with sequence number
real_path, seq = m.groups()
seq = int(seq)
else:
real_path = path
seq = None
# logger.debug('Sequence number is {}'.format(seq))
return real_path, seq
def _get_node_with_ns(self, path):
# get the node with the namespace in tag
if not self.root.nsmap:
return None
search = ''
for seg in path.split('/')[1:]:
if seg[-1] == ']':
# with a sequence number
name, _, seq = seg.partition('[')
search += "/*[name()='{}'][{}".format(name, seq)
else:
search += "/*[name()='{}']".format(seg)
logger.debug('Node search string is: {}'.format(search))
l = self.etree.xpath(search)
return l[0] if l else None
def _get_node_detail(self, node):
'''extra node tag, text, attribute and count of child'''
if node is not None:
tag = self._rm_prefix(node.tag) if self._nsmap_r else node.tag
text = node.text.strip() if node.text else ''
attrib = node.attrib
count_child = len(node)
return tag, text, attrib, count_child
return None
def _walk_node_list(self, node, prefix='', counter=1):
'''
This function is the recursive to get node list
Needed only for xml with namespaces
'''
tag, text, attrib, count_child = self._get_node_detail(node)
if counter == 1:
new_prefix = prefix + '/' + tag
sib = node.getnext()
while sib != None:
if sib.tag is etree.Comment:
sib = sib.getnext()
continue
if self._rm_prefix(sib.tag) == tag:
new_prefix = new_prefix + '[1]'
break
sib = sib.getnext()
else:
new_prefix = prefix + '/' + tag + '[{}]'.format(str(counter))
self._dict[new_prefix] = (tag, text, attrib, count_child)
self._node_map[new_prefix] = node
counters = {}
for ch in node.getchildren():
if ch.tag is etree.Comment:
continue
tag = self._rm_prefix(ch.tag)
counters[tag] = counters.setdefault(tag, 0) + 1
self._walk_node_list(ch, prefix=new_prefix, counter=counters[tag])
def _get_parent_name(self, path):
parent, _, tag = path.rpartition('/')
if tag.startswith('comment()'):
return ''
return parent
def _get_node_list(self):
'''
Generate the _dict
_dict: OrderedDict
key: xpath
value: [tag, text, attrib, child_count]
'''
self._dict = OrderedDict()
self._node_map = {}
if self.root.nsmap:
'''
etree.getpath returns weird string wchich cannot be used for etree.xpath to lookup
the node.
So here need to use self._walk_node_list()
'''
self._walk_node_list(self.root)
else:
for ch in self.root.iter():
if ch.tag is etree.Comment:
continue
path = self.etree.getpath(ch)
self._dict[path] = self._get_node_detail(ch)
self._node_map[path] = ch
for name in self._dict.keys():
parent = self._get_parent_name(name)
if not parent:
continue
else:
self._children_count[parent] = self._children_count.setdefault(parent, 0) + 1
def _get_node(self, path):
if self.root.nsmap:
return self._get_node_with_ns(path)
else:
l = self.etree.xpath(path)
if l:
return l[0]
else:
logger.debug('Not found: {}'.format(path))
return None
# alias of get_node
get_node = _get_node
def get_path(self, node):
# Find full path of node
for path, n in self._node_map.items():
if n == node:
return path
else:
return ''
# def _get_siblings(self, path):
# real_path, seq = self._path_split_seq(path)
# len_rp = len(real_path)
# ret = []
# if seq:
# for k, v in self._dict.items():
# if k == path:
# continue
# if k.startswith(real_path) and '/' not in k[len_rp:]:
# ret.append(k)
# return ret
def _match_attr(self, attrib1, attrib2, group_id_keys:tuple):
for key in group_id_keys:
if key in attrib1 and key in attrib2 and attrib1.get(key) == attrib2.get(key):
return True
return False
def _dict_diff(self, d1, d2, include_ok=True):
# assume d1 and d2 are dict
all_keys = uniq_seq_merge(list(d1.keys()), list(d2.keys()))
diff = []
for key in all_keys:
if key in d1 and key not in d2:
diff.append(('Removed', key, d1.get(key), ''))
elif key not in d1 and key in d2:
diff.append(('New', key, '', d2.get(key)))
else:
v1 = d1.get(key)
v2 = d2.get(key)
if v1 == v2:
if include_ok:
diff.append(('OK', key, v1, v2))
else:
diff.append(('Change', key, v1, v2))
return diff
def __getitem__(self, path):
return self._dict.get(path, None)
def _list_children(self, parent_path, tag=''):
if self[parent_path][-1] == 0:
return []
ret = []
prefix = '/'.join((parent_path, tag))
l_p = len(prefix)
for k, v in self._dict.items():
if k.startswith(prefix) and '/' not in k[l_p:]:
ret.append(k)
return ret
def _get_match_node(self, path, node_match_matrix):
# to find the correct node to match
# For example, /groups/group[3] matches the /groups/group[4]
# Then the children need to compare to each other's children
# logger.debug('Looking for {} in {}'.format(path, node_match_matrix))
for p1, p2 in node_match_matrix[::-1]:
if path == p1:
return p2
return None
def _get_attr(self, path, attr):
return self._dict[path][2].get(attr, None)
def _get_children_count(self, path, obj):
if path in obj._dict:
return obj._dict[path][-1]
else:
return 0
def _group_node_match(self, parent_path, tag, xc2, node_match_matrix, id_field='id'):
'''
_group_node_match: use only when compare two instances
args:
parent_path: the group's parent path
tag: the shared tag of group nodes
xc2: the instance will be compared
node_match_matrix: the node match matrix
id_field: the atrribute to identify the node
'''
match = []
unmatched = []
id_map = {}
ppath2 = self._get_match_node(parent_path, node_match_matrix)
logger.debug('ppath2 is {}'.format(ppath2))
if ppath2 == None:
ppath2 = parent_path
for path in xc2._list_children(ppath2, tag):
_, seq = xc2._path_split_seq(path)
node_id = xc2[path][2][id_field]
id_map[node_id] = seq
for path in self._list_children(parent_path, tag):
_, seq = self._path_split_seq(path)
node_id = self[path][2][id_field]
if node_id in id_map:
p2 = '{}/{}[{}]'.format(ppath2, tag, id_map[node_id])
match.append((path, p2))
logger.debug('Group {}: {} matched {}'.format(parent_path, path, p2))
if p2 != path:
node_match_matrix.append((path, p2))
id_map.pop(node_id)
else:
logger.debug('Group {}: no match found for {}'.format(parent_path, seq))
unmatched.append(path)
unmatched2 = ['{}/{}[{}]'.format(ppath2, tag, seq) for seq in id_map.values()]
return match, unmatched, unmatched2
def _mark_children(self, path, status, obj, node_compared):
# mark all children to one single status
# this can be used to mark all children of a Removed or New node to the same status
ret = []
logger.debug('Marking {}\'s sub nodes to be {}'.format(path, status))
for sub_path in obj._dict.keys():
if sub_path.startswith(path):
_, text, attr, _ = obj[sub_path]
node_compared.append(sub_path)
logger.debug('Marking {} to {}'.format(sub_path, status))
if status == 'New':
p1, p2 = '', sub_path
t1, t2 = '', text
else:
p1, p2 = sub_path, ''
t1, t2 = text, ''
ret.append((p1, status, t1, t2, '', p2))
for key, value in attr.items():
if status == 'New':
p1, p2 = '', sub_path
v1, v2 = '', value
else:
p1, p2 = sub_path, ''
v1, v2 = value, ''
ret.append((p1, status, key, v1, v2, p2))
# logger.debug('Diffs before return {}'.format(ret))
return ret
def comp(self, filename: str, group_id_field_map=None, include_ok=True):
'''
Compare with another xml file
args:
filename: file name
group_id_field_map: a dict contains the id field of each group of sub nodes
The format should be this way: {'bean': 'id', 'Field': 'index', 'module': 'name'}
The key is the tag name and value should be the attribute to distinguish the node
include_ok: Whether the return values contains the same content
return:
list of changes
'''
logger.debug('Compare to {}'.format(filename))
if group_id_field_map == None:
group_id_field_map = {}
xc2 = self.__class__(filename)
changes = []
# node_match_matrix
# This is designed for the nodes have multiple children with same tag but different
# attributes
# In some cases the order of childs may vary but actually the node can find equivelent
# node in the second file. Keeping this matrix to avoid the accuracy problem due to
# sequence
# empty = ('', '', {}, 0)
node_match_matrix = []
# store the nodes already processed
node_compared1 = []
node_compared2 = []
# check all nodes in self
for path in self._dict.keys():
logger.debug('Comparing my path {}'.format(path))
if path in node_compared1:
logger.debug('Already compared. Skip {}'.format(path))
continue
# logger.debug(node_compared1)
# logger.debug(node_compared2)
real_p, seq = self._path_split_seq(path)
parent = self._get_parent_name(path)
if not seq:
# the path is not ending with a sequence number, means not in a group
logger.debug('Not in a group')
ppath2 = self._get_match_node(parent, node_match_matrix)
if ppath2 == None:
path2 = path
else:
path2 = path.replace(parent, ppath2)
if self._get_children_count(path, self) > 0:
node_match_matrix.append((path, path2))
if path2 in xc2._dict:
logger.debug('Compare {} to {}'.format(path, path2))
left, right = self[path], xc2[path2]
diffs = self._node_comp(left, right, path, path2, include_ok=include_ok)
changes.extend(diffs)
node_compared1.append(path)
node_compared2.append(path2)
else:
# not in xc2, need to mark as 'Removed'
diffs = self._mark_children(path, 'Removed', self, node_compared1)
changes.append(diffs)
node_compared1.append(path)
else:
# the path ends with a sequence number, means in a group
logger.debug('In group: {}'.format(parent))
_, _, tag = real_p.rpartition('/')
if tag not in group_id_field_map:
raise KeyError('Please indicate id field for tag "{}" to group_id_field_map'.format(tag))
matched, unmatched1, unmatched2 = self._group_node_match(
parent, tag, xc2, node_match_matrix, group_id_field_map[tag]
)
# logger.debug('Node match matrix: {}'.format(node_match_matrix))
for p1, p2 in matched:
logger.debug('Comparing {} vs {}'.format(p1, p2))
diffs = self._node_comp(self[p1], xc2[p2], p1, p2, include_ok)
node_compared1.append(p1)
node_compared2.append(p2)
# logger.debug('Current diffs: {}'.format(diffs))
changes.extend(diffs)
for p1 in unmatched1:
logger.debug('No match found for {} in {}. Mark as Removed'.format(p1, xc2.filename))
diffs = self._mark_children(p1, 'Removed', self, node_compared1)
# logger.debug('Removed diffs: {}'.format(diffs))
node_compared1.append(p1)
changes.extend(diffs)
for p2 in unmatched2:
logger.debug('No match found for {} in self, Mark as New'.format(p2))
diffs = self._mark_children(p2, 'New', xc2, node_compared2)
# logger.debug('New diffs: {}'.format(diffs))
node_compared2.append(p2)
changes.extend(diffs)
for path in xc2._dict.keys():
if path in node_compared2:
logger.debug('Already compared. Skip {}'.format(path))
continue
diffs = self._mark_children(path, 'New', xc2, node_compared2)
changes.extend(diffs)
return changes
def _list_group_node(self, path):
p = re.compile(r'(.*)\[(\d+)\]$')
m = p.match(path)
if m:
# with sequence number
real_path, seq = m.groups()
seq = int(seq)
return self.etree.xpath(real_path)
def _node_comp(self, data_node1, data_node2, path1, path2='', include_ok=True):
'''
Compare data two nodes
args:
data_node1: data of node1 with path1
data_node2: data of node2 with path2
path1: the node path of self
path2: the node path of the file to be compared
include_ok: whether to inlucde values with no difference
'''
_, text1, attr1, _ = data_node1
_, text2, attr2, _ = data_node2
changes = []
if not path2:
path2 = path1
if text1 != text2:
logger.debug('Text difference: {} - {}'.format(text1, text2))
changes.append((path1, 'Change', '', text1, text2, path2))
elif text1 != '' and include_ok:
changes.append((path1, 'OK', '', text1, text1, path2))
attrib_diff = self._dict_diff(attr1, attr2, include_ok)
if attrib_diff:
logger.debug('Attrib diff: {}'.format(attrib_diff))
for diff in attrib_diff:
ch, key, v1, v2 = diff
if ch == 'OK':
if include_ok:
changes.append((path1, ch, key, v1, v2, path2))
else:
changes.append((path1, ch, key, v1, v2, path2))
return sorted(changes, key=lambda t:t[0])
def set(self, path, value):
real_path, attr = self._path_split_attr(path)
node = self._get_node(real_path)
if node is None:
raise KeyError('Invalid path: {}'.format(path))
if node is not None:
if attr:
# set attrib
node.set(attr, value)
self[real_path][2][attr] = value
else:
# set text
if value:
node.text = value
d = self[real_path]
self._dict[real_path] = (d[0], value, d[2], d[3])
else:
# cannot find or create the node
logger.warn('Unable to find or create node')
def set_attr(self, path, attr, value):
if attr == '':
self.set(path, value)
else:
new_path = '{}[@{}]'.format(path, attr)
self.set(new_path, value)
def get(self, path):
real_path, attr = self._path_split_attr(path)
# node = self._get_node(real_path)
if real_path not in self._dict:
raise KeyError('No xpath found: {}'.format(path))
attrib = self[real_path][2]
if attr:
if attr not in attrib:
raise KeyError('No attrib found: {}'.format(path))
return attrib.get(attr)
else:
return self[real_path][1]
def add_node(self, path:str):
# if self._get_node(path) is not None:
# logger.info('Node exists')
# return
# check if path with sequence number
real_path, seq = self._path_split_seq(path)
if seq:
seq = int(seq)
parent_path = self._get_parent_name(real_path)
tag = real_path.rpartition('/')[-1]
if parent_path not in self._dict:
raise ValueError('Wrong path: {}'.format(real_path))
parent_node = self._get_node(parent_path)
max_child_path = self._list_children(parent_path, tag)[-1]
max_n = int(self._path_split_seq(max_child_path)[-1])
element = etree.Element(tag)
if seq <= max_n:
parent_node.insert(seq, element)
else:
parent_node.append(element)
else:
parent_path, _, tag = path.rpartition('/')
parent_node = self._get_node(parent_path)
element = etree.Element(tag)
parent_node.append(element)
self._get_node_list()
return element
def del_node(self, path:str):
'''delete a node'''
# delete it's children
node = self._get_node(path)
for child in node.getchildren():
self.del_node(self.get_path(child))
# delete node from etree
parent = self._get_parent_name(path)
parent_node = self.get_node(parent)
parent_node.remove(node)
# delete from self._dict and self._node_map
self._get_node_list()
def save(self, filename=''):
# save to disk
if filename == '':
filename = self.filename
logger.debug('Write to {}'.format(filename))
open(filename, 'w').write(etree.tostring(self.root).decode())
def walk(self, json=False):
for k, v in self.get_dict().items():
print('{}: {}'.format(k, v))
def get_dict(self, json=False):
ret = OrderedDict()
for node_path, vs in self._dict.items():
if node_path.split('/')[-1].startswith('comment()'):
continue
_, text, attrib, _ = vs
ret[node_path] = text
for key, value in attrib.iteritems():
ret['{}[@{}]'.format(node_path, key)] = value
return ret
def update_from(self, filename, group_id_field_map=None):
# Update current file from another file
for path1, status, key, v1, v2, path2 in self.comp(filename, group_id_field_map, include_ok=False):
if status == 'Change':
action = status
elif status == 'New':
action = 'Add'
elif status == 'Remove':
logger.debug('Remove is not supported yet')
continue
logger.debug('{} {}: {} -> {}'.format(status, key, v1, v2))
self.set(key, v2)
def _node_validate(node, requirements: list, include_ok=True):
ret = []
for req in requirements:
key = req.get('attrib')
value = req.get('value')
current_value = node.attrib.get(key)
if current_value == None:
change = 'Missing'
elif current_value != value:
change = 'NotComply'
else:
if include_ok == False:
continue
change = 'OK'
ret.append((key, value, current_value, change))
return ret
def validate(self, node_path, search_path, requirements: list, include_ok=False):
'''
Valid if self values match the requirements
args:
node_path: a XPath string can match a node
search_path: a XPath search string that can to search nodes
requirements: a list of value requirements
Example: [
{'attrib': 'key1', 'value': 'value1'},
{'attrib': 'key2', 'value': 'value2'},
{'attrib': 'key3', 'value': 'value3'},
]
'''
ret = []
if node_path == 'search' or node_path == '':
logger.debug('Searching {}'.format(search_path))
xpath = search_path
nodes = self.search(search_path)
if len(nodes) == 0:
logger.debug('No node found for {}'.format(xpath))
change = 'Missing'
ret.append((xpath, '', '', '', 'NodeMissing'))
# elif len(nodes) > 1:
# change = 'Deviation'
# ret.append((xpath, '', '', '', 'TooManyNode'))
# # raise ValueError('XPath matched two or more nodes: {}'.format(search_path))
# else:
# logger.debug('Node found')
# node = nodes[0]
else:
node = self._get_node(node_path)
xpath = node_path
if node is None:
change = 'Missing'
ret.append((xpath, '', '', '', 'NodeMissing'))
else:
nodes = [node,]
for node in nodes:
path = self.get_path(node)
for req in requirements:
key = req.get('attrib')
value = req.get('value')
current_value = node.attrib.get(key)
if current_value == None:
change = 'Missing'
elif current_value != value:
change = 'NotComply'
else:
if include_ok == False:
continue
change = 'OK'
ret.append((path, key, value, current_value, change))
if not ret:
logger.debug('No compliance issue found')
return ret
def multi_set(self, search_path, attrib, value):
for node in self.search(search_path):
path = self.get_path(node)
if attrib:
path = '{}[@{}]'.format(path, attrib)
self.set(path, value) | felixchr/xml_conf | xmlconf.py | xmlconf.py | py | 28,143 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "copy.copy",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number":... |
74436961468 | from matplotlib import colors
import matplotlib.pyplot as plt
fp = open('out', 'r')
lines = list(map(lambda x: [float(y)
for y in x.split(':')[1:]], fp.readlines()))
lines = list(map(lambda x: [x[0], x[1]*100], lines))
for line in lines:
print(line)
# For found
ns = [lines[x][0] for x in range(0, len(lines), 2)]
times = [lines[x][1] for x in range(0, len(lines), 2)]
ns2 = [2*lines[x][0] for x in range(0, len(lines), 2)]
times2 = [2*lines[x][1] for x in range(0, len(lines), 2)]
print(ns)
print(times)
plt.plot(ns, times)
plt.plot(ns2, times2)
plt.xlabel("number of items")
plt.ylabel("Time*100")
plt.legend(["One", "Two"])
plt.show()
# For not found
ns = [lines[x][0] for x in range(1, len(lines), 2)]
times = [lines[x][1] for x in range(1, len(lines), 2)]
print(ns)
print(times)
plt.plot(ns, times)
plt.xlabel("number of items")
plt.ylabel("Time*100")
plt.show()
| dipeshkaphle/LabsAndAssignments | CSLR41-AlgosLab/Lab1/plot.py | plot.py | py | 912 | python | en | code | 7 | github-code | 6 | [
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "matpl... |
9767092930 | import numpy as np
# import networkx as nx
from collections import *
from itertools import *
from functools import *
import math
import re
from common.session import AdventSession
session = AdventSession(day=18, year=2020)
data = session.data.strip()
data = data.split('\n')
p1, p2 = 0, 0
def findmatch(expr, i):
match = 0
while i < len(expr):
if expr[i] == '(':
match += 1
elif expr[i] == ')':
match -= 1
if match == 0:
return i
i += 1
def evaluate(expr):
i = 0
while i < len(expr) and len(expr) > 1:
if i - 1 >= 0 and expr[i - 1].isdigit() and expr[i + 1].isdigit() and \
not expr[i].isdigit():
expr[i - 1:i + 2] = [str(eval(''.join(expr[i - 1:i + 2])))]
i = 0
elif expr[i] == '(':
end = findmatch(expr, i)
expr[i:end + 1] = [evaluate(expr[i + 1:end])]
i = 0
i += 1
return expr[0]
def evaluate2(expr):
for op in ('+', '*'):
i = 0
while i < len(expr) and len(expr) > 1:
if i - 1 >= 0 and expr[i - 1].isdigit() and expr[i] == op and \
expr[i + 1].isdigit():
expr[i - 1:i + 2] = [str(eval(''.join(expr[i - 1:i + 2])))]
i = 0
elif expr[i] == '(':
end = findmatch(expr, i)
expr[i:end + 1] = [evaluate2(expr[i + 1:end])]
i = 0
i += 1
return expr[0]
for line in data:
p1 += int(evaluate([c for c in line if c != ' ']))
p2 += int(evaluate2([c for c in line if c != ' ']))
print(f'Part 1: {p1}')
print(f'Part 2: {p2}')
# session.submit(p1, part=1)
# session.submit(p2, part=2)
# session.submit(p1, part=2)
| smartspot2/advent-of-code | 2020/day18.py | day18.py | py | 1,777 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "common.session.AdventSession",
"line_number": 11,
"usage_type": "call"
}
] |
8665454584 | # -*- coding: utf-8 -*-
import os
import time
from boto3.dynamodb.conditions import Key
import settings
from db_util import DBUtil
from lambda_base import LambdaBase
from jsonschema import validate
from not_authorized_error import NotAuthorizedError
from user_util import UserUtil
class MeCommentsDelete(LambdaBase):
def get_schema(self):
return {
'type': 'object',
'properties': {
'comment_id': settings.parameters['comment']['comment_id']
},
'required': ['comment_id']
}
def validate_params(self):
UserUtil.verified_phone_and_email(self.event)
validate(self.params, self.get_schema())
comment = DBUtil.get_validated_comment(self.dynamodb, self.params['comment_id'])
DBUtil.validate_article_existence(self.dynamodb, comment['article_id'], status='public')
def exec_main_proc(self):
comment_table = self.dynamodb.Table(os.environ['COMMENT_TABLE_NAME'])
comment = comment_table.get_item(
Key={"comment_id": self.params['comment_id']}
)['Item']
if not self.__is_accessable_comment(comment):
raise NotAuthorizedError('Forbidden')
deleted_comment_table = self.dynamodb.Table(os.environ['DELETED_COMMENT_TABLE_NAME'])
delete_targets = self.__get_delete_targets(comment)
with deleted_comment_table.batch_writer() as batch:
for item in delete_targets:
item.update({'deleted_at': int(time.time())})
batch.put_item(Item=item)
with comment_table.batch_writer() as batch:
for item in delete_targets:
batch.delete_item(Key={'comment_id': item['comment_id']})
return {'statusCode': 200}
def __is_accessable_comment(self, comment):
user_id = self.event['requestContext']['authorizer']['claims']['cognito:username']
article_info_table_name = self.dynamodb.Table(os.environ['ARTICLE_INFO_TABLE_NAME'])
article_info = article_info_table_name.get_item(Key={"article_id": comment['article_id']})['Item']
if article_info['user_id'] == user_id or comment['user_id'] == user_id:
return True
return False
def __get_delete_targets(self, comment):
comment_table = self.dynamodb.Table(os.environ['COMMENT_TABLE_NAME'])
targets = [comment]
query_params = {
'IndexName': 'parent_id-sort_key-index',
'KeyConditionExpression': Key('parent_id').eq(comment['comment_id'])
}
thread_comments = comment_table.query(**query_params)['Items']
targets.extend(thread_comments)
return targets
| AlisProject/serverless-application | src/handlers/me/comments/delete/me_comments_delete.py | me_comments_delete.py | py | 2,706 | python | en | code | 54 | github-code | 6 | [
{
"api_name": "lambda_base.LambdaBase",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "settings.parameters",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "user_util.UserUtil.verified_phone_and_email",
"line_number": 27,
"usage_type": "call"
}... |
14350976599 | from flask import Flask,request
from flask_restful import Resource, Api
from tensorflow import keras
import numpy as np
from flask_cors import CORS
COLUMNS = ['temp', 'wind', 'rain', 'FFMC', 'DMC', 'DC', 'ISI', 'RH', 'BUI', 'FWI']
app = Flask(__name__)
#
CORS(app)
# creating an API object
api = Api(app)
# Load model
model = keras.models.load_model('model.h5', compile=False)
#prediction api call
class predict(Resource):
def get(self):
# Get data
data = np.array([[float(request.args.get(field)) for field in COLUMNS]])
# Predict
prediction = model.predict(data)
prediction = float(prediction[0])
return prediction
#
api.add_resource(predict, '/predict/')
if __name__ == '__main__':
app.run() | grab-bootcamp/API | app.py | app.py | py | 766 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flask_restful.Api",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.models.... |
74874760507 | import setuptools # Must be before Cython import
import Emma
with open("README.md", "r") as fp:
long_description = fp.read()
try:
from Cython.Compiler import Options
from Cython.Build import cythonize
Options.docstrings = True
Options.fast_fail = True
extensions = cythonize(
[
setuptools.Extension("Emma.emma_libs.memoryMap", sources=["Emma/emma_libs/memoryMap.py"]),
setuptools.Extension("Emma.emma_libs.memoryEntry", sources=["Emma/emma_libs/memoryEntry.py"])
]
)
except ImportError:
extensions = None
setuptools.setup(
name="pypiemma",
version=Emma.EMMA_VERSION,
license="GPLv3+",
description="Emma Memory and Mapfile Analyser (Emma) | Conduct static (i.e. worst case) memory consumption \
analyses based on arbitrary linker map files. It produces extensive .csv files which are easy to filter and \
post-process. Optionally .html and markdown reports as well as neat figures help you visualising your results.",
long_description=long_description,
long_description_content_type="text/markdown",
maintainer="The Emma Authors",
maintainer_email="emma-dev@googlegroups.com",
url="https://github.com/bmwcarit/Emma",
zip_safe=False, # Needed for Cython
packages=setuptools.find_namespace_packages(), # Recursively find package files (i.e. sub-folders, ...)
python_requires=Emma.PYTHON_REQ_VERSION,
install_requires=["Pygments",
"Markdown",
"matplotlib",
"pandas",
"pypiscout>=2.0",
"graphviz",
"svgwrite"
],
extras_require={"dev": # Install dev version via `pip3 install pypiemma[dev]`
["gprof2dot",
"pylint",
"mkdocs>=1.1.2", # There was a break in the config files: https://squidfunk.github.io/mkdocs-material/releases/5/
"mkdocs-material>=5.2.1" # There was a break in the config files: https://squidfunk.github.io/mkdocs-material/releases/5/
],
},
entry_points={ # Make Emma available as independent scripts
"console_scripts": [
"emma=Emma.emma:runEmma",
"emma_vis=Emma.emma_vis:runEmmaVis",
"emma_deltas=Emma.emma_vis:runEmmaDeltas"
],
},
ext_modules=extensions, # Needed for Cython
keywords=[
"memory-analysis",
"mapfile",
"memory-analyzer",
"embedded",
"ghs",
"gcc",
"mcu",
"linker",
"visualization",
"reports",
"csv",
"python",
"categorisation",
"memory-consumption",
"mapfile-analyser"
],
classifiers=[
"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)",
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Topic :: Scientific/Engineering :: Visualization",
"Topic :: Software Development",
"Topic :: Software Development :: Embedded Systems",
"Topic :: Software Development :: Quality Assurance",
],
)
| bmwcarit/Emma | setup.py | setup.py | py | 3,594 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "Cython.Compiler.Options.docstrings",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "Cython.Compiler.Options",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "Cython.Compiler.Options.fast_fail",
"line_number": 13,
"usage_type": "att... |
1584177491 | import os
from django.core.files.storage import FileSystemStorage
try:
FileNotFoundError
except:
FileNotFoundError = IOError
class BaseStorage(FileSystemStorage):
def _open(self, name, mode='rb'):
try:
return super(BaseStorage, self)._open(name, mode)
except FileNotFoundError:
if 'w' in mode: # if writing, make sure the parent structure exists
self._ensure_directory(name)
try:
try:
f = self._get(name)
except IOError:
# if the underlying file doesn't exist, no matter.
pass
else:
# if it does, write the contents locally
self._write(f, name)
except Exception:
pass
return super(BaseStorage, self)._open(name, mode)
def _exists_locally(self, name):
return super(BaseStorage, self).exists(name)
def exists(self, name):
if self._exists_locally(name):
return True
return self._exists_upstream(name)
def _ensure_directory(self, name):
dirname = os.path.dirname(self.path(name))
if not os.path.exists(dirname):
os.makedirs(dirname)
def _write(self, filelike, name):
self._ensure_directory(name)
f = open(self.path(name), mode='wb')
f.write(filelike.read())
def _fetch_local(self, name, force=False):
if self._exists_locally(name) and not force:
return
return self._write(self._get(name), name)
| beniwohli/django-localdevstorage | localdevstorage/base.py | base.py | py | 1,602 | python | en | code | 50 | github-code | 6 | [
{
"api_name": "django.core.files.storage.FileSystemStorage",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "os.path.dirname",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name... |
72532113149 | # pylint: disable=unused-variable
# pylint: disable=unused-argument
# pylint: disable=redefined-outer-name
import json
from copy import deepcopy
import httpx
import pytest
import respx
from fastapi import FastAPI
from respx import MockRouter
from simcore_service_api_server._meta import API_VTAG
from simcore_service_api_server.core.settings import ApplicationSettings
from simcore_service_api_server.models.schemas.profiles import Profile
from starlette import status
@pytest.fixture
def mocked_webserver_service_api(app: FastAPI):
"""Mocks some responses of web-server service"""
settings: ApplicationSettings = app.state.settings
assert settings.API_SERVER_WEBSERVER
# pylint: disable=not-context-manager
with respx.mock(
base_url=settings.API_SERVER_WEBSERVER.api_base_url,
assert_all_called=False,
assert_all_mocked=True,
) as respx_mock:
# NOTE: webserver-api uses the same schema as api-server!
# in-memory fake data
me = deepcopy(Profile.Config.schema_extra["example"])
def _get_me(request):
return httpx.Response(status.HTTP_200_OK, json={"data": me})
def _update_me(request: httpx.Request):
changes = json.loads(request.content.decode(request.headers.encoding))
me.update(changes)
return httpx.Response(status.HTTP_200_OK, json={"data": me})
respx_mock.get("/me", name="get_me").mock(side_effect=_get_me)
respx_mock.put("/me", name="update_me").mock(side_effect=_update_me)
yield respx_mock
del me
async def test_get_profile(
client: httpx.AsyncClient,
auth: httpx.BasicAuth,
mocked_webserver_service_api: MockRouter,
):
# needs no auth
resp = await client.get(f"/{API_VTAG}/meta")
assert resp.status_code == status.HTTP_200_OK
# needs auth
resp = await client.get(f"/{API_VTAG}/me")
assert resp.status_code == status.HTTP_401_UNAUTHORIZED
assert not mocked_webserver_service_api["get_me"].called
resp = await client.get(f"/{API_VTAG}/me", auth=auth)
assert resp.status_code == status.HTTP_200_OK
assert mocked_webserver_service_api["get_me"].called
profile = Profile(**resp.json())
assert profile.first_name == "James"
assert profile.last_name == "Maxwell"
async def test_update_profile(
client: httpx.AsyncClient,
auth: httpx.BasicAuth,
mocked_webserver_service_api: MockRouter,
):
# needs auth
resp = await client.put(
f"/{API_VTAG}/me",
json={"first_name": "Oliver", "last_name": "Heaviside"},
auth=auth,
)
assert resp.status_code == status.HTTP_200_OK, resp.text
profile = Profile.parse_obj(resp.json())
assert profile.first_name == "Oliver"
assert profile.last_name == "Heaviside"
| ITISFoundation/osparc-simcore | services/api-server/tests/unit/_with_db/test_api_user.py | test_api_user.py | py | 2,813 | python | en | code | 35 | github-code | 6 | [
{
"api_name": "fastapi.FastAPI",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "simcore_service_api_server.core.settings.ApplicationSettings",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "respx.mock",
"line_number": 28,
"usage_type": "call"
},
... |
1398543214 | """ Outlnies the methods to be used for the signUp app. """
from django.shortcuts import render, redirect
from django.core.mail import EmailMessage
from django.template.loader import render_to_string
from django.contrib.sites.shortcuts import get_current_site
from django.utils.http import urlsafe_base64_encode
from django.utils.encoding import force_bytes
from django.contrib.auth.models import Group
from accounts.models import CustomUser
from signUp import forms
from signUp.tokens import account_activation_token
def activate(request, uidb64, token):
""" Activates a new user account with their id being their primary key.
Args:
request: The HTTP request submitted by the user.
uidb64: A unique ID for the user's email.
token: A object to help identify the user.
Returns:
redirect: 'login': Redirects the user to the login page once their,
account has been validated.
"""
# User = CustomUser.objects.get
try:
user = CustomUser.objects.get(pk=id)
user.is_user = True
user.save()
except:
user = None
# If the user exists and has a valid token, save the account
if user is not None and account_activation_token.check_token(user, token):
user.is_user = True
user.save()
return redirect('login')
def activate_email(request, user, to_email) -> None:
""" Formulates the message that gets sent in the activation email and
sends the email to the user.
Args:
request: The HTTP request submitted by the user.
user: user: The user object representing the user who is having their
email validated.
to_email: (str): The user's email, given as a string for ease of
processing.
"""
mail_subject = "Activate your user account"
# Converts the message to be sent to the user into a string
message = render_to_string(
"signUp/template_activate_user.html",
{"user": user.username,
"domain": get_current_site(request).domain,
"uid": urlsafe_base64_encode(force_bytes(user.pk)),
"token": account_activation_token.make_token(user),
"protocol": 'https' if request.is_secure() else "http"
}
)
email = EmailMessage(mail_subject, message, to=[to_email])
email.send()
def signup(request):
""" Displays the sign up page (GET request) and takes the data from the
sign up form, validates it and creates a new user, displaying any error
messages if necessary.
Args:
request: The HTTP request submitted by the user.
Returns:
render: Signup page is shown to the user if they enter the wrong
details.
redirect: (leaderboard): The user is redirected to the leaderboard
page if they have submitted a valid form.
"""
if request.method == "POST":
form = forms.SignUpForm(request.POST)
# If the data entered into the form is valid save the details and
# create a new user, otherwise throw the relevant error message
if form.is_valid():
user = form.save(commit=False)
user.is_user = False
user.save()
user_group = Group.objects.get(name='user')
user_group.user_set.add(user)
activate_email(request, user, form.cleaned_data.get('email'))
return redirect('leaderboard')
else:
# GET request case
form = forms.SignUpForm()
return render(request, 'registration/signup.html', {'form': form})
| jjandrew/GroupEngineeringProjectGroup4 | technical-documents/source-code/signUp/views.py | views.py | py | 3,569 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "accounts.models.CustomUser.objects.get",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "accounts.models.CustomUser.objects",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "accounts.models.CustomUser",
"line_number": 29,
"usage_typ... |
43901468636 | import io
from PIL import Image
import pytesseract
from wand.image import Image as wi
pytesseract.pytesseract.tesseract_cmd = r"C:\Program Files\Tesseract-OCR\tesseract.exe"
pdf = wi(filename = "AvradeepGupta.pdf", resolution = 300) # To read the pdf file and create a pdf object
pdfImage = pdf.convert('jpeg') # To convert the pdf to a pdf of images of jpeg format
imageBlobs = [] # Empty List to store each page
for img in pdfImage.sequence:
imgPage = wi(image= img) # To retrieve the actual image and not the object definition
imageBlobs.append(imgPage.make_blob('jpeg')) # Append to the ImageBlobs and to make the binary string of the image
recognized_text = [] # List of recognized text for each page
for imgBlob in imageBlobs: # Iterate for all the Images
im = Image.open(io.BytesIO(imgBlob)) # Using PIL library and using io to open the image
text = pytesseract.image_to_string(im, lang='eng') # Convert the image to string
recognized_text.append(text) # Appending the text content for each image
print(recognized_text) # Printing the entire list
#Image_To_Text
#im = Image.open("acknowledgement.png")
#text = pytesseract.image_to_string(im, lang='eng')
#print(text) | AvradeepGupta/OCR | OCR.py | OCR.py | py | 1,516 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pytesseract.pytesseract",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "wand.image.Image",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "wand.image.Image",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "PIL.Imag... |
9063496529 | # -*- coding: utf-8 -*-
# ---
# @Software: PyCharm
# @Site:
# @File: num_clustering.py
# @Author: Alan D.Chen
# @E-mail: chense_mail@126.com
# @Time: 2020,八月 07
# ---
import pandas as pd
from sklearn.cluster import KMeans, MeanShift, AgglomerativeClustering, DBSCAN, spectral_clustering
from sklearn import metrics
from sklearn.metrics import calinski_harabasz_score
import matplotlib.pyplot as plt
from xml_extract2 import xml_extract
from DBSCAN2x import dbscanx
import numpy as np
from mean_shiftx import mean_shift
from k_meansx import mainx
from prettytable import PrettyTable
import math
path = '/home/alanc/Documents/faster-rcnn.pytorch-pytorch-1.0/data/VOCdevkit2007/VOC2007/Annotations'
m = num_items_selected = 500
Zdata = xml_extract(path, m)
## just for AgglomerativeClustering
linkages = ['ward', 'average', 'complete']
## just for spectral_clustering
##变换成矩阵,输入必须是对称矩阵
metrics_metrix = (-1 * metrics.pairwise.pairwise_distances(Zdata)).astype(np.int32)
metrics_metrix += -1 * metrics_metrix.min()
## SSE sum of the squared errors
sse_list = []
sse_list2 = []
sse_list3 = []
K = range(1, 15)
for k in range(1,15):
kmeans=KMeans(n_clusters=k)
kmeans.fit(Zdata)
sse_list.append([k, kmeans.inertia_, 0]) #model.inertia_返回模型的误差平方和,保存进入列表
# Calculate the slope difference between the two sides of a point #
for i in range(1,13):
sse_list[i][2] = (sse_list[i][1]-sse_list[i-1][1])/(sse_list[i][0]-sse_list[i-1][0]) - (sse_list[i+1][1]-sse_list[i][1])/(sse_list[i+1][0]-sse_list[i][0])
for i in range(len(sse_list)-1):
# 获得第一个元素,将其与剩余的元素进行比较,如果大于即交换位置
for j in range(i+1,len(sse_list)):
if sse_list[i][2]>sse_list[j][2]:
temp=sse_list[j]
sse_list[j]=sse_list[i]
sse_list[i]=temp
#print("The best number for K-means clustering by SSE(sum of the squared errors) is ", sse_list[0][0])
## 轮廓系数
## silhouette_score & Calinski-Harabaz Index
clusters = range(2,15)
sc_scores = []
sc_scores2 = []
ac_scores = []
ac_scores2 = []
pc_scores = []
pc_scores2 = []
for k in clusters:
kmeans_model = KMeans(n_clusters=k).fit(Zdata)
ac_model = AgglomerativeClustering(linkage=linkages[2], n_clusters=k).fit(Zdata)
pc_model = spectral_clustering(metrics_metrix, n_clusters=k)
sc_score = metrics.silhouette_score(Zdata, kmeans_model.labels_,sample_size=10000, metric='euclidean')
sc_scores.append([k, sc_score])
sc_score2 = metrics.calinski_harabasz_score(Zdata, kmeans_model.labels_)
sc_scores2.append([k, sc_score2])
## Agglomerative
ac_score = metrics.silhouette_score(Zdata, ac_model.labels_, sample_size=10000, metric='euclidean')
ac_scores.append([k, ac_score])
ac_score2 = metrics.calinski_harabasz_score(Zdata, ac_model.labels_)
ac_scores2.append([k, ac_score2])
## spectral_clustering
pc_score = metrics.silhouette_score(Zdata, pc_model, sample_size=10000, metric='euclidean')
pc_scores.append([k, pc_score])
pc_score2 = metrics.calinski_harabasz_score(Zdata, pc_model)
pc_scores2.append([k, pc_score2])
for i in range(len(sc_scores)-1):
# 获得第一个元素,将其与剩余的元素进行比较,如果小于即交换位置
for j in range(i+1,len(sc_scores)):
if sc_scores[i][1]<sc_scores[j][1]:
temp=sc_scores[j]
sc_scores[j]=sc_scores[i]
sc_scores[i]=temp
if sc_scores2[i][1] < sc_scores2[j][1]:
temp = sc_scores2[j]
sc_scores2[j] = sc_scores2[i]
sc_scores2[i] = temp
if ac_scores[i][1]<ac_scores[j][1]:
temp=ac_scores[j]
ac_scores[j]=ac_scores[i]
ac_scores[i]=temp
if ac_scores2[i][1] < ac_scores2[j][1]:
temp = ac_scores2[j]
ac_scores2[j] = ac_scores2[i]
ac_scores2[i] = temp
if pc_scores[i][1]<pc_scores[j][1]:
temp=pc_scores[j]
pc_scores[j]=pc_scores[i]
pc_scores[i]=temp
if pc_scores2[i][1] < pc_scores2[j][1]:
temp = pc_scores2[j]
pc_scores2[j] = pc_scores2[i]
pc_scores2[i] = temp
# if sc_scores3[i][1] < sc_scores3[j][1]:
# temp = sc_scores3[j]
# sc_scores3[j] = sc_scores3[i]
# sc_scores3[i] = temp
num_cluster, cluster_ids = mean_shift(Zdata, 70.0)
num_cluster_dbscanx = dbscanx(path, m)
# #print(sc_scores)
# print("The best number for K-means clustering by Silhouette Coefficient is ", sc_scores[0][0])
# #print(sc_scores2)
# print("The best number for K-means clustering by Calinski-Harabaz Index is ", sc_scores2[0][0])
#
# #print(ac_scores)
# print("The best number for Agglomerative clustering by Silhouette Coefficient is ", ac_scores[0][0])
# #print(ac_scores2)
# print("The best number for Agglomerative clustering by Calinski-Harabaz Index is ", ac_scores2[0][0])
#
# #print(pc_scores)
# print("The best number for Spectral clustering by Silhouette Coefficient is ", pc_scores[0][0])
# #print(pc_scores2)
# print("The best number for Spectral clustering by Calinski-Harabaz Index is ", pc_scores2[0][0])
#
# print("The best number for DBSCAN clustering is ", num_cluster_dbscanx)
num_clusterx = (sse_list[0][0] + sc_scores[0][0] + sc_scores2[0][0] + ac_scores[0][0] + ac_scores2[0][0]
+ pc_scores[0][0] + pc_scores2[0][0] + num_cluster_dbscanx)/8
num_clusterx = int(math.ceil(num_clusterx))
#################################
x = PrettyTable(["Method for clustering", "Automatic presentation", "SSE(sum of the squared errors)", "Silhouette Coefficient", "Calinski-Harabaz Index"])
x.align["Method for clustering"] = "l" # Left align city names
x.padding_width = 1 # One space between column edges and contents (default)
x.add_row(["K-means/PAM",0,sse_list[0][0],sc_scores[0][0],sc_scores2[0][0]])
x.add_row(["Hierarchical",0, 0,ac_scores[0][0],ac_scores2[0][0]])
x.add_row(["Spectral",0,0,pc_scores[0][0],pc_scores2[0][0]])
x.add_row(["DBSCANx",num_cluster_dbscanx,0,0,0])
x.add_row(["Mean-shift", num_cluster, 0, 0,0])
print(x)
print("Based on the above information, the following suggestions by the clustering system are : \n ")
nx, centerx = mainx(num_clusterx)
print("Plan 1:\n", "Number of clusters(K-means/PAM):",nx,"\n Cluster center:")
for l in range(len(centerx)):
print(centerx[l])
print("Plan 2:\n", "Number of clusters(mean shift):",num_cluster,"\n Cluster center:")
for l in range(len(cluster_ids)):
print(cluster_ids[l])
| Alan-D-Chen/CDIoU-CDIoUloss | anchor_generater/num_clustering.py | num_clustering.py | py | 6,580 | python | en | code | 25 | github-code | 6 | [
{
"api_name": "xml_extract2.xml_extract",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.pairwise.pairwise_distances",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.pairwise",
"line_number": 31,
"usage_type": "attribu... |
6465021088 | """
This module takes care of starting the API Server, Loading the DB and Adding the endpoints
"""
from flask import Flask, request, jsonify, url_for, Blueprint
from api.models import db, User, Family
from api.utils import generate_sitemap, APIException
api = Blueprint('api', __name__)
@api.route('/Family', methods=['GET'])
def get_members():
Members = Family.query.all()
result = [element.serialize() for element in Members]
response_body = {
"message": "lista familiar"
}
return jsonify(result), 200
@api.route('/Family/<int:family_id>', methods=['GET'])
def get_members_id(family_id):
GetMember = Family.query.get(family_id)
result = GetMember.serialize()
response_body = {"msg": "un familiar"}
return jsonify(result), 200
@api.route('/Family', methods=['POST'])
def create_members():
data = request.data
data = json.loads(data)
Member = Family(
name= data["name"],
lastname = data ["lastname"],
years = data["years"])
db.session.add(Member)
db.session.commit()
response_body = {
"message": "Creado!"
}
return jsonify(Member.serialize())
@api.route('/Family', methods=['DELETE'])
def delete_members():
data = request.data
data = json.loads(data)
memberDel = Family(
name= data["name"],
lastname = data ["lastname"],
years = data["years"])
db.session.delete(memberDel)
db.session.commit()
response_body = {
"message": "borrado!"
}
return jsonify(memberDel), 200
| yasRF/apiFamily | src/api/routes.py | routes.py | py | 1,551 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "api.models",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "flask.Blueprint",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "api.models.Family.query.all",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "api.models.Famil... |
5329031446 | import torch
import copy
from torch.quantization.quantize import add_observer_
_RELU_BRANCH = {'son':None,'can_be_fused':True}
_BN_BRANCH = {'son': {torch.nn.ReLU:_RELU_BRANCH},'can_be_fused':True}
_NN_BRANCH = {'son': {torch.nn.ReLU:_RELU_BRANCH},'can_be_fused':False}
_CONV_BRANCH = {'son': {torch.nn.BatchNorm2d:_BN_BRANCH,torch.nn.ReLU:_RELU_BRANCH},'can_be_fused':False}
_FUSETREE = {'son':{torch.nn.Conv2d:_CONV_BRANCH,torch.nn.Linear:_NN_BRANCH},'can_be_fused':False}
# FuseTree = {torch.nn.Conv2d:{torch.nn.ReLU:None,torch.nn.BatchNorm2d:{torch.nn.ReLU:None}},torch.nn.Linear:{torch.nn.ReLU:None}}
def fuse_module(module, inplace = False):
if not inplace:
module = copy.deepcopy(module)
_fuse_module_helper(module)
return module
def _fuse_module_helper(module):
names = []
tmpTree = _FUSETREE
for name,child in module.named_children():
if type(child) in tmpTree['son']:
tmpTree = tmpTree['son'][type(child)]
names.append(name)
else:
_fuse_module_helper(child)
if tmpTree['can_be_fused']:
torch.quantization.fuse_modules(module,names,inplace=True)
names = []
tmpTree = _FUSETREE
if tmpTree['can_be_fused']:
torch.quantization.fuse_modules(module,names,inplace=True)
# QCONFIGS = {} #use class method
# def propagate_qconfig(module,qconfig=None,inplace=False):
# if not inplace:
# module = copy.deepcopy(module)
# module.qconfig = QCONFIGS[getattr(module,'qconfig',qconfig)]
# if module.config is None:
# raise Exception('not qconfig passed in or set in module')
# for name, child in module.named_children():
# propagate_qconfig(child,qconfig)
#
# def prepare(model,inplace=False):
# assert hasattr(model,'qconfig')
# propagate_qconfig(model,qconfig=model.qconfig,inplace=inplace)
# add_observer_(model)
# return model
| HuDi2018/QTorch | utils/Quant.py | Quant.py | py | 1,941 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "torch.nn",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": ... |
5048789934 | from mesa import Agent
import random
class GameAgent(Agent):
""" An agent that is more likely to choose the mean option. """
def __init__(self, unique_id, model, home_cell=None):
super().__init__(unique_id, model)
self.score = 1
self.home_cell = home_cell
self.spouse = None
self.partner = None
self.choice = None
self.age = 0
self.children = []
self.dead_spouse = False
self.health = 10
def __str__(self):
return f"Agent {self.unique_id}, with score {self.score}. \n Home cell: {str(self.home_cell)}, Spouse: {self.spouse.unique_id if self.spouse else None}. \n Age: {self.age}, Children: {str([[child.age, child.home_cell == self.home_cell] for child in self.children])}. \n Had spouse: {str(self.dead_spouse)}"
def step(self):
if self.health < 1 or self.age > 85:
adult_at_home = [child for child in self.children if
((child.age >= 18) and (child.home_cell == self.home_cell))]
children_at_home = [child for child in self.children if child.age < 18]
parents_at_home = [parent for parent in self.parents if parent.home_cell == self.home_cell]
for child in self.children:
child.parents.remove(self)
for parent in self.parents:
parent.children.remove(self)
if self.spouse:
self.spouse.dead_spouse = True
self.spouse.spouse = None
else:
if len(adult_at_home) == 0:
if len(children_at_home) > 0:
for child in children_at_home:
self.model.schedule.remove(child)
self.model.grid.remove_agent(child)
self.children = []
if len(parents_at_home) == 0:
if self.home_cell not in self.model.unoccupied_houses:
self.model.unoccupied_houses.append(self.home_cell)
if self.home_cell in self.model.occupied_houses:
self.model.occupied_houses.remove(self.home_cell)
else:
for child in children_at_home:
new_parent = random.choice(adult_at_home)
new_parent.children.append(child)
child.parents.append(new_parent)
self.model.schedule.remove(self)
self.model.grid.remove_agent(self)
else:
if self.age < 18:
if self.model.time != "night":
pass
else:
self.age += 1
elif self.spouse is None and len([child for child in self.children if child.spouse is None]) == 0:
if self.model.time == "morning":
self.move(mingle=True)
elif self.model.time == "midday":
if self.partner:
self.move_in()
elif self.model.time == "afternoon":
self.move(destination=self.home_cell)
elif self.model.time == "evening":
if self.spouse:
self.reproduce()
else:
self.sleep()
else:
if self.model.time == "morning":
self.move()
elif self.model.time == "midday":
if self.partner:
self.choose_action()
elif self.model.time == "afternoon":
self.move(destination=self.home_cell)
elif self.model.time == "evening":
if self.spouse:
self.reproduce()
else:
self.sleep()
def move(self, destination=None, mingle=False):
if destination is None:
# Find other player to play with and move to their cell
# If no other player, move randomly
if mingle:
other_players = self.model.out_minglers
else:
other_players = self.model.out_agents
if len(other_players) > 0:
other_player = random.choice(other_players)
self.partner = other_player
other_player.partner = self
self.move(other_player.pos)
other_players.remove(other_player)
else:
# Move to random cell
random_cell = (
random.randint(1, self.model.grid.width - 2), random.randint(1, self.model.grid.height - 2))
while not self.model.grid.is_cell_empty(random_cell):
random_cell = (
random.randint(1, self.model.grid.width - 2), random.randint(1, self.model.grid.height - 2))
self.move(destination=random_cell)
other_players.append(self)
else:
# Move to destination cell
self.model.grid.move_agent(self, destination)
self.pos = destination
def reproduce(self):
type_dict = {"mean": MeanAgent, "altruistic": AltruisticAgent, "greenbeard": GreanBeardAltruistic,
"imposter": ImposterGreenBeards, "spiteful_family": SpitefulFamily, "spiteful": Spiteful, "tft": TitForTat,
"tft_family": TitForTatFamily}
# print("Trying for babys")
if self.score > 10 and self.spouse.score > 10:
# Create new agent
if 18 < self.age < 55 and 18 < self.spouse.age < 55:
# print("BABY!")
num_children = random.randint(1, 2)
for i in range(num_children):
child_type = random.choice([self.type, self.spouse.type])
self.model.num_agents += 1
child = type_dict[child_type](self.model.num_agents, self.model)
child.home_cell = self.home_cell
self.model.schedule.add(child)
self.model.grid.place_agent(child, child.home_cell)
self.children.append(child)
self.spouse.children.append(child)
child.parents = [self, self.spouse]
self.score -= 10
self.spouse.score -= 10
def sleep(self):
food_to_eat = self.model.harshness * (1 + sum(
0.5 for child in self.children if child.home_cell == self.home_cell and child.age >= 18) + sum(
0.3 for child in self.children if child.home_cell == self.home_cell and child.age < 18))
if self.score < food_to_eat:
self.health -= 20
for child in self.children:
if child.home_cell == self.home_cell:
child.health -= 20
else:
self.score -= food_to_eat
self.health += 10
for child in self.children:
if child.home_cell == self.home_cell:
child.health += 10
self.age += 1
def move_in(self):
if self.age - 5 <= self.partner.age <= self.age + 5:
if len(self.model.unoccupied_houses) > 0:
# Move to random unoccupied house
self.home_cell = random.choice(self.model.unoccupied_houses)
self.partner.home_cell = self.home_cell
self.spouse = self.partner
self.partner.spouse = self
self.model.unoccupied_houses.remove(self.home_cell)
self.model.occupied_houses.append(self.home_cell)
def fight(self):
# Prisoner's dilemma
if self.choice == "cooperate":
if self.partner.choice == "cooperate":
self.score += 3
self.partner.score += 3
else:
self.score += 0
self.partner.score += 5
else:
if self.partner.choice == "cooperate":
self.score += 5
self.partner.score += 0
else:
self.score += 1
self.partner.score += 1
self.choice = None
self.partner.choice = None
class MeanAgent(GameAgent):
""" An agent that is more likely to choose the mean option. """
def __init__(self, unique_id, model):
super().__init__(unique_id, model)
self.type = "mean"
def choose_action(self):
# Choose action
self.choice == "defect"
if self.partner.choice:
self.fight()
class AltruisticAgent(GameAgent):
""" An agent that is more likely to be altruistic. """
def __init__(self, unique_id, model):
super().__init__(unique_id, model)
self.type = "altruistic"
def choose_action(self):
# Choose action
self.choice = "cooperate"
if self.partner.choice:
self.fight()
class GreanBeardAltruistic(GameAgent):
""" An agent that is more likely to be altruistic to other greenbeards."""
def __init__(self, unique_id, model):
super().__init__(unique_id, model)
self.type = "greenbeard"
def choose_action(self):
# Choose action
if self.partner.type == "greenbeard" or self.partner.type == "imposter":
self.choice = "cooperate"
else:
self.choice = "defect"
if self.partner.choice:
self.fight()
class ImposterGreenBeards(GameAgent):
""" An agent who pretends to be a green beard to take advantage of the other greenbeard's altruism."""
def __init__(self, unique_id, model):
super().__init__(unique_id, model)
self.type = "imposter"
def choose_action(self):
# Choose action
if self.partner.type == "greenbeard" or self.partner.type == "imposter":
self.choice = "defect"
else:
self.choice = "cooperate"
if self.partner.choice:
self.fight()
class SpitefulFamily(GameAgent):
""" An agent who pretends is altruistic, unless they have been betrayed before. At which point they (and their
family) will defect. """
def __init__(self, unique_id, model):
super().__init__(unique_id, model)
self.type = "spiteful_family"
self.wronged_list = []
def choose_action(self):
# Choose action
if self.partner in self.wronged_list:
self.choice = "defect"
else:
self.choice = "cooperate"
if self.partner.choice:
self.fight()
def tell_related(self, agent):
# Tells the other agent that they have been wronged
list_of_family = self.children + [self.partner] + self.parents + [[i for i in child.children] for child in
self.children]
for member in list_of_family:
if member.type == "spiteful_family":
member.wronged_list.append(agent)
def fight(self):
super().fight()
if self.partner.choice == "defect":
if self.partner not in self.wronged_list:
self.wronged_list.append(self.partner)
self.tell_related(self.partner)
class Spiteful(GameAgent):
""" An agent who pretends is altruistic, unless they have been betrayed before. At which point they will defect. """
def __init__(self, unique_id, model):
super().__init__(unique_id, model)
self.type = "spiteful"
self.wronged_list = []
def choose_action(self):
# Choose action
if self.partner in self.wronged_list:
self.choice = "defect"
else:
self.choice = "cooperate"
if self.partner.choice:
self.fight()
def fight(self):
super().fight()
if self.partner.choice == "defect":
if self.partner not in self.wronged_list:
self.wronged_list.append(self.partner)
class TitForTat(GameAgent):
""" Classical Tit for Tat strategy. """
def __init__(self, unique_id, model):
super().__init__(unique_id, model)
self.type = "tft"
self.wronged_list = []
def choose_action(self):
# Choose action
if self.partner in self.wronged_list:
self.choice = "defect"
else:
self.choice = "cooperate"
if self.partner.choice:
self.fight()
def fight(self):
super().fight()
if self.partner.choice == "defect":
if self.partner not in self.wronged_list:
self.wronged_list.append(self.partner)
else:
if self.partner in self.wronged_list:
self.wronged_list.remove(self.partner)
class TitForTatFamily(GameAgent):
""" Classical Tit for Tat strategy. """
def __init__(self, unique_id, model):
super().__init__(unique_id, model)
self.type = "tft_family"
self.wronged_list = []
def choose_action(self):
# Choose action
if self.partner in self.wronged_list:
self.choice = "defect"
else:
self.choice = "cooperate"
if self.partner.choice:
self.fight()
def fight(self):
super().fight()
if self.partner.choice == "defect":
if self.partner not in self.wronged_list:
self.wronged_list.append(self.partner)
self.tell_related(self.partner)
else:
if self.partner in self.wronged_list:
self.wronged_list.remove(self.partner, remove=True)
def tell_related(self, agent, remove=False):
# Tells the other agent that they have been wronged
list_of_family = self.children + [self.partner] + self.parents + [[i for i in child.children] for child in
self.children]
for member in list_of_family:
if member.type == "tft_family":
if remove:
member.wronged_list.remove(agent)
else:
member.wronged_list.append(agent) | LouisSentinella/AgentBasedModelling | prisoners_dilemma/agents.py | agents.py | py | 14,250 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "mesa.Agent",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "random.choice",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_num... |
16551762444 | '''
Created on May 12, 2015
@author: wohlhart
'''
from tnetcore.layers.base import LayerParams, Layer
from tnetcore.util import readCfgIntNoneListParam, readCfgIntParam # @UnresolvedImport
import theano.tensor as T
import numpy
class CatLayerParams(LayerParams):
'''
Concatenation Layer Parameters
'''
yaml_tag = u'!CatLayerParams'
def __init__(self, inputDim=None,axis=1):
'''
'''
super(CatLayerParams,self).__init__(inputDim=inputDim,outputDim=None)
self.LayerClass = CatLayer
self._inputDim = inputDim
self._axis = axis
self.update()
def initFromConfig(self,cfg,sectionKey):
super(CatLayerParams,self).initFromConfig(cfg,sectionKey)
self._inputDim = readCfgIntNoneListParam(cfg,sectionKey,'inputDim',self._inputDim)
self._axis = readCfgIntParam(cfg,sectionKey,'axis',self._axis)
self.update()
@property
def axis(self):
return self._axis
@axis.setter
def axis(self,value):
self._axis = value
self.update()
def update(self):
'''
calc outputDim
'''
if (self._axis is None) or (self._inputDim is None):
return
#assert len(self._inputDim) > 1 and len(self._inputDim[0]) > 1, "CatLayer needs more than one input"
self.checkInputDim(expectMultipleInputs=True)
# inputDim is a list of inputDims. check if they agree along the non-cat-axes
inDim = numpy.array(self._inputDim)
#print("self._inputDim {}".format(self._inputDim))
#print("inDim {}".format(inDim))
assert inDim.shape[1] == 4, "Wrong inputDim shape {}; each row must have 4 entries (bs,nchan,h,w)".format(inDim.shape)
numInputs = inDim.shape[0]
nonCatAxes = numpy.setdiff1d(numpy.arange(inDim.shape[1]),[self._axis])
numEqual = numpy.sum(inDim == inDim[0],axis=0)
assert all(numEqual[nonCatAxes] == numInputs), "the axes along which not to concatenate must be equal, but are (axis={})\n{}".format(self._axis,inDim)
# outDim is inDim for all non-cat-axes and sum over inDims for the cat axis
outDim = inDim[0]
outDim[self._axis] = sum(inDim[:,self._axis])
self._outputDim = list(outDim)
self.checkOutputDim()
def debugPrint(self,indent=0):
iStr = " "*indent
print("CatLayer:")
print(iStr + "inputs = {}".format(self._inputs))
print(iStr + "inputDim = {}".format(self._inputDim))
print(iStr + "axis = {}".format(self._axis))
print(iStr + "outputDim = {}".format(self._outputDim))
def __getstate__(self):
state = super(CatLayerParams,self).__getstate__()
state['axis'] = self._axis
return state
def __setstate__(self,state):
super(CatLayerParams,self).__setstate__(state)
self._axis = state['axis']
self.update()
class CatLayer(Layer):
"""
Concatenation Layer
"""
def __init__(self, rng, inputVar, cfgParams, copyLayer=None, layerNum=None):
"""
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type inputVar: theano.tensor.dtensor4
:param inputVar: symbolic image tensor, of shape image_shape
:type cfgParams: ConvLayerParams
"""
self.cfgParams = cfgParams
axis = cfgParams.axis
self.inputVar = inputVar
self.output = T.concatenate(inputVar, axis)
# store parameters of this layer; has none
self.params = []
self.weights = []
| paroj/ObjRecPoseEst | src/tnetcore/layers/catlayer.py | catlayer.py | py | 3,825 | python | en | code | 71 | github-code | 6 | [
{
"api_name": "tnetcore.layers.base.LayerParams",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "tnetcore.util.readCfgIntNoneListParam",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "tnetcore.util.readCfgIntParam",
"line_number": 32,
"usage_type": "... |
30052972082 | from django.conf.urls import include, url
from rest_framework import routers
from App.views import UserViewSet, GroupViewSet, BookViewSet
router = routers.DefaultRouter()
router.register('user',UserViewSet)
router.register('group',GroupViewSet)
router.register('book',BookViewSet)
urlpatterns = [
url('^drf/',include(router.urls))
] | chrisyuuuuu/Web- | Django/Drf案例/1-serializers/App/urls.py | urls.py | py | 340 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "rest_framework.routers.DefaultRouter",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "rest_framework.routers",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "App.views.UserViewSet",
"line_number": 7,
"usage_type": "argument"
},
{
... |
15183208796 | #! usr/bin/env python
# -*- coding : utf-8 -*-
import codecs
from sklearn import datasets, linear_model
from sklearn.metrics import mean_squared_error, make_scorer
import time
import numpy as np
np.random.seed(123)
from skopt import gp_minimize
import matplotlib.pyplot as plt
from random import uniform
from skopt.acquisition import gaussian_ei
def main():
# import some data to play with
X = []
y = []
with codecs.open("../data/machine.data", 'r', 'utf-8') as infile:
for line in infile:
tokens = line.split(',')
X.append([float(x) for x in tokens[:5]])
y.append(float(tokens[6]))
slice = int(round(len(X)*0.8))
X_train = X[:slice]
X_test = X[slice:]
y_train = y[:slice]
y_test = y[slice:]
regr = linear_model.Lasso()
regr.fit(X_train, y_train)
y_predict = [i for i in regr.predict(X_test)]
print("loss of the model:{}".format(mean_squared_error(y_test, y_predict)))
# apply gridsearch
worst_case = float("inf")
mse_gs_scores = []
t0 = time.time()
for g in [(i+1)*0.001 for i in range(8000)]:
regr = linear_model.Lasso(alpha=g)
regr.fit(X_train, y_train)
y_pred = [i for i in regr.predict(X_test)]
mse = mean_squared_error(y_test, y_pred)
mse_gs_scores.append([g,mse])
# save if best
if mse < worst_case:
worst_case = mse
best_grid = g
t1 = time.time()
print("time taken by gridserach: {}".format(t1 - t0))
print((worst_case,best_grid))
# applying random search
worst_case = float("inf")
mse_rs_scores = []
t0 = time.time()
for _ in range(1000):
g = uniform(0, 8)
regr = linear_model.Lasso(alpha=g)
regr.fit(X_train, y_train)
y_pred = [i for i in regr.predict(X_test)]
mse = mean_squared_error(y_test, y_pred)
mse_rs_scores.append([g, mse])
# save if best
if mse < worst_case:
worst_case = mse
best_random = g
t1 = time.time()
print("time taken by randomserach: {}".format(t1 - t0))
print((worst_case,best_random))
# apply bayesian optimization
noise_level = 0.1
def f(alphavalue):
regr = linear_model.Lasso(alpha=alphavalue)
regr.fit(X_train, y_train)
y_pred = [i for i in regr.predict(X_test)]
return mean_squared_error(y_test, y_pred)
x = np.array([(i+1)*0.001 for i in range(8000)])
fx = [f(x_i) for x_i in x]
plt.plot(x, fx, "r--", label="True (unknown)")
plt.fill(np.concatenate([x, x[::-1]]),
np.concatenate(([fx_i - 1.9600 * noise_level for fx_i in fx],
[fx_i + 1.9600 * noise_level for fx_i in fx[::-1]])),
alpha=.2, fc="r", ec="None")
t4 = time.time()
res = gp_minimize(f, # the function to minimize
[(0.001, 8.0)], # the bounds on each dimension of x
acq_func="EI", # the acquisition function
n_calls=15, # the number of evaluations of f
n_random_starts=5, # the number of random initialization points
random_state=123)
t5 = time.time()
print("time taken by BO_search: {}".format(t5 - t4))
print(res['fun'])
print(res['x'])
plt.plot(res.x_iters, res.func_vals, "b--", label="BO")
plt.plot([i[0] for i in mse_rs_scores][:10], [i[1] for i in mse_rs_scores][:10], "g--", label="Random Search")
plt.legend()
plt.grid()
plt.show()
plt.rcParams["figure.figsize"] = (8, 14)
x = np.linspace(0.001, 8.0, 8000).reshape(-1, 1)
x_gp = res.space.transform(x.tolist())
fx = np.array([f(x_i) for x_i in x])
# Plot the 5 iterations following the 5 random points
for n_iter in range(5):
gp = res.models[n_iter]
curr_x_iters = res.x_iters[:5 + n_iter]
curr_func_vals = res.func_vals[:5 + n_iter]
# Plot true function.
plt.subplot(5, 2, 2 * n_iter + 1)
plt.plot(x, fx, "r--", label="True (unknown)")
plt.fill(np.concatenate([x, x[::-1]]),
np.concatenate([fx - 1.9600 * noise_level,
fx[::-1] + 1.9600 * noise_level]),
alpha=.2, fc="r", ec="None")
# Plot GP(x) + contours
y_pred, sigma = gp.predict(x_gp, return_std=True)
plt.plot(x, y_pred, "g--", label=r"$\mu_{GP}(x)$")
plt.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.2, fc="g", ec="None")
# Plot sampled points
plt.plot(curr_x_iters, curr_func_vals,
"r.", markersize=8, label="Observations")
# Adjust plot layout
plt.grid()
if n_iter == 0:
plt.legend(loc="best", prop={'size': 6}, numpoints=1)
if n_iter != 4:
plt.tick_params(axis='x', which='both', bottom='off',
top='off', labelbottom='off')
# Plot EI(x)
plt.subplot(5, 2, 2 * n_iter + 2)
acq = gaussian_ei(x_gp, gp, y_opt=np.min(curr_func_vals))
plt.plot(x, acq, "b", label="EI(x)")
plt.fill_between(x.ravel(), -2.0, acq.ravel(), alpha=0.3, color='blue')
next_x = res.x_iters[5 + n_iter]
next_acq = gaussian_ei(res.space.transform([next_x]), gp, y_opt=np.min(curr_func_vals))
plt.plot(next_x, next_acq, "bo", markersize=6, label="Next query point")
# Adjust plot layout
plt.ylim(0, 0.1)
plt.grid()
if n_iter == 0:
plt.legend(loc="best", prop={'size': 6}, numpoints=1)
if n_iter != 4:
plt.tick_params(axis='x', which='both', bottom='off',
top='off', labelbottom='off')
plt.show()
if __name__ == '__main__':
main()
| aggarwalpiush/Hyperparameter-Optimization-Tutorial | model/svm_demo.py | svm_demo.py | py | 5,946 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "numpy.random.seed",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "codecs.open",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.La... |
31866653355 | #python
import os
import socket
import time
import csv
from pathlib import Path
#django modules
from django.shortcuts import render, redirect
from django.views.generic.edit import CreateView
from django.views.generic import DetailView, FormView, ListView
#models
from apps.start_block.models import Session
from apps.admin2.models import Athletes, Coach
#local functions
from .math_functions import graph_data, save_csv_data
class DetailSessionView(DetailView):
template_name = 'start_point/results.html'
model = Session
pk_url_kwarg = 'session_id'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
session_id = context['object'].id
session_data = Session.objects.get(pk = session_id)
file_name = session_data.data
context["graph_gauche"] = F"data/{file_name}_left.png"
context["graph_droit"] = F"data/{file_name}_right.png"
context["max_force_gauche"] = session_data.max_force_left
context['max_force_droit'] = session_data.max_force_right
return context
def delete(request, session_id):
data = Session.objects.filter(id = session_id)
data.update(
enabled = False,
)
return redirect("start_block:SessionList")
class CreateSessionView(CreateView):
model = Session
fields = [
'athlete',
'coach',
]
template_name = "start_point_home.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['file_name'] = generate_file_name()
return context
def generate_file_name():
year = time.localtime()[0]
month = time.localtime()[1]
day = time.localtime()[2]
hour = time.localtime()[3]
mins = time.localtime()[4]
sec = time.localtime()[5]
name = F"{year}_{month}_{day}_{hour}_{mins}_{sec}"
return name
def results(request):
BASE_DIR = Path(__file__).resolve().parent.parent.parent
template_name = 'start_point/results.html'
#get info from web page
coach_id = request.POST["coach"]
coach_name = Coach.objects.get(
id = coach_id
).nom
athlete_id = request.POST["athlete"]
athlete_name = Athletes.objects.get(
id = athlete_id
).nom
file_name = request.POST["data"]
file_name = F"{coach_name}__{athlete_name}_{file_name}"
save_csv_data(file_name)
print("graficando")
max_force_gauche, max_force_droit = graph_data(file_name)
#context data
data = {
"graph_gauche": F"data/{file_name}_left.png",
"max_force_gauche": max_force_gauche,
'max_force_droit': max_force_droit,
}
#saving model
new_data = Session(
data = file_name,
athlete = Athletes.objects.get(id=request.POST['athlete']),
coach = Coach.objects.get(id=request.POST['coach']),
max_force_left = max_force_gauche,
max_force_right = max_force_droit,
)
new_data.save()
return render(
request,
template_name,
context = data
)
def get_data(request):
name = request.POST["data"]
file = open(name, "a")
#if(request.method == "GET"):
# return redirect("start_block:home")
s_point = socket.socket()
port = 50
host = "10.20.1.56"
print(F"Connecting to {host} in port {port}")
s_point.connect((host, port))
try:
message = b"1"
s_point.send(message)
data = b""
number = 0
llega = b""
print(F'Receiving data in {name}')
while (not data == b"!"):
data = s_point.recv(1)
#print(data)
llega += data
if (data == b"\n"):
number += 1
#print(F"{number} = {str(llega)}")
file.write(F"{llega.decode('ascii')}")
llega = b""
except Exception as E:
print("Error: ")
print(E)
file.close()
s_point.close()
new_data = Session()
new_data.athlete = Athletes.objects.get(id=request.POST['athlete'])
new_data.data = request.POST['data']
new_data.save()
return redirect("start_block:home")
class ListSessionView(ListView):
model = Session
template_name = 'start_point/list_sessions.html'
def get_queryset(self):
return Session.objects.filter(
athlete__enabled = True,
).exclude(
enabled = False,
)
def get_ordering(self):
ordering = self.request.GET.get('ordering', 'created')
# validate ordering here
return ordering
| MeletChirino/Linky4Teens | new_gui/apps/start_block/views.py | views.py | py | 4,720 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.views.generic.DetailView",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "apps.start_block.models.Session",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "apps.start_block.models.Session.objects.get",
"line_number": 29,
"usage_t... |
8665069914 | import json
import os
import settings
from jsonschema import validate
from decimal_encoder import DecimalEncoder
from lambda_base import LambdaBase
class ArticlesEyecatch(LambdaBase):
def get_schema(self):
return {
'type': 'object',
'properties': {
'topic': settings.parameters['topic']
},
'required': ['topic']
}
def validate_params(self):
validate(self.params, self.get_schema())
def exec_main_proc(self):
screened_article_table = self.dynamodb.Table(os.environ['SCREENED_ARTICLE_TABLE_NAME'])
eyecatch_articles = screened_article_table.get_item(Key={'article_type': 'eyecatch'}).get('Item')
if not eyecatch_articles \
or not eyecatch_articles.get('articles') \
or not eyecatch_articles.get('articles').get(self.params['topic']):
items = []
return {
'statusCode': 200,
'body': json.dumps({'Items': items})
}
items = [self.__get_public_article(article_id) for article_id in
eyecatch_articles.get('articles').get(self.params['topic'])]
items = [item for item in items if item is not None]
return {
'statusCode': 200,
'body': json.dumps({'Items': items}, cls=DecimalEncoder)
}
def __get_public_article(self, article_id):
article_info_table = self.dynamodb.Table(os.environ['ARTICLE_INFO_TABLE_NAME'])
article_info = article_info_table.get_item(Key={'article_id': article_id}).get('Item')
if not article_info or not article_info['status'] == 'public':
return None
return article_info
| AlisProject/serverless-application | src/handlers/articles/eyecatch/articles_eyecatch.py | articles_eyecatch.py | py | 1,738 | python | en | code | 54 | github-code | 6 | [
{
"api_name": "lambda_base.LambdaBase",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "settings.parameters",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "jsonschema.validate",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "o... |
5008945541 | """
This module contains a basic orchestrator for the execution of sequential data transformation stages.
"""
from __future__ import annotations
import typing as t
import types
from fontai.config.pipeline import Config as PipelineConfig, ConfigHandler as PipelineConfigHandler
from fontai.runners.base import ConfigurableTransform, FittableTransform
from fontai.config.core import BasePipelineTransformConfig
class ManyToManyTransform(object):
"""Helper class to execute one-to-many many-to-many transformations in the pipeline
Attributes:
core_transform (ConfigurableTransform): Core transformer class
"""
def __init__(self, core_transform):
self.core_transform = core_transform
def transform(self, data: t.Any):
"""Outputs a generator of transformed elements
Args:
data (t.Any): Input data
Yields:
t.Any: individual outputs
"""
for elem in self.to_generator(data):
for out in self.to_generator(self.core_transform.transform(elem)):
yield out
def to_generator(self, data):
if not isinstance(data, types.GeneratorType):
return iter((data,))
else:
return data
class Pipeline(ConfigurableTransform):
"""Pipeline class to execute a sequence of ConfigurableTransforms; this allows to perform the whole set of transformations from raw data to (possible multiple) trained models
Attributes:
streaming_pipeline (t.List[ConfigurableTransform]): List of instantiated transforms
transforms (type): classes of pipeline stages inheriting from ConfigurableTransform. Possible choices are defined in the fontai.runners.stages module
configs (t.List[BasePipelineTransformConfig]): Sequence of configuration files to instantiate and execute each stage
fit_stage (t.List[bool]): If True, fit the corresponding pipeline stage instead of using it for scoring. It is ignored if the stage is not fittable.
"""
def __init__(self, transforms: t.List[type], configs: t.List[BasePipelineTransformConfig], fit_stage: t.List[bool]):
"""Summary
Args:
transforms (t.List[type]): List of transformations in the pipeline
configs (t.List[BasePipelineTransformConfig]): List of parsed configurations, one per stage in the pipeline
fit_stage (t.List[bool]): If True, fit the corresponding pipeline stage instead of using it for scoring. It is ignored if the stage is not fittable.
"""
self.transforms = transforms
self.configs = configs
self.fit_stage = fit_stage
self.streaming_pipeline = [
ManyToManyTransform(core_transform = transform.from_config_object(config)) for transform, config in zip(self.transforms, self.configs)]
def transform(self, data: t.Any) -> t.Any:
out = data
for streaming_transform in self.streaming_pipeline:
out = streaming_transform.transform(out)
return out
@classmethod
def from_config_object(cls, config: PipelineConfig) -> Pipeline:
return cls(config.stages, config.configs, config.fit_stage)
@classmethod
def run_from_config_object(cls, config: PipelineConfig) -> None:
pipeline = cls.from_config_object(config)
for transform, config, fit in zip(pipeline.transforms, pipeline.configs, pipeline.fit_stage):
if fit and issubclass(transform, FittableTransform):
transform.fit_from_config_object(config)
else:
transform.run_from_config_object(config)
@classmethod
def get_config_parser(cls) -> PipelineConfigHandler:
return PipelineConfigHandler() | nestorSag/textfont-ai | src/fontai/fontai/runners/pipeline.py | pipeline.py | py | 3,542 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "typing.Any",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "types.GeneratorType",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "fontai.runners.base.ConfigurableTransform",
"line_number": 42,
"usage_type": "name"
},
{
... |
11370330174 | from Models import pi_net, weights_init_1st
import multiprocessing
import torch.optim as optim
import torch
from torch.distributions import Categorical
import torch.nn.functional as F
from torch import tanh
import numpy as np
from utils import get_state_repr_from_int, get_state_from_int, get_state_as_int, get_state_repr, get_state_as_pair
FloatTensor = torch.FloatTensor
LongTensor = torch.LongTensor
ByteTensor = torch.ByteTensor
def __repr__(self):
return '<%s.%s object at %s>' % (
self.__class__.__module__,
self.__class__.__name__,
hex(id(self))
)
class Agent():
def __init__(self, agent_id, sending_queue, response_queue, episodes, exp_conf, results):
self.agent_id = agent_id
self.action_queue = sending_queue
self.continue_queue = response_queue
self.net = pi_net()
self.net.apply(weights_init_1st)
self.lr = exp_conf['lr']
self.optimizer = optim.RMSprop(self.net.parameters(), lr=self.lr)
self.episodes = episodes
self.results = results
self.net_class = exp_conf['net']
self.DEBUG = exp_conf["DEBUG"]
self.GAMMA = exp_conf['gamma']
def reset(self):
print("reset")
def start(self):
#print(multiprocessing.current_process(), __repr__(self.net), __repr__(self))
reward_per_day = []
score = []
times_trained = 0
times_reach_goal = 0
for k in range(self.episodes):
observation = np.zeros((6,6))
observation[0,0] = 6
episode_series = []
reward_acum = []
time_of_day = 0
done = False
while not done:
np_observation = get_state_repr(observation)
# np_observation = np.expand_dims(np_observation, axis=0)
np_observation = np.expand_dims(np_observation, axis=0)
observation_tensor = torch.FloatTensor(np_observation)
action_probs = self.net(observation_tensor)
action_probs_orig = action_probs
# FOR EXPLORATION:
action_probs = F.dropout(action_probs, p=0.3, training=True)
action_probs = F.softmax(action_probs, dim=1)
m = Categorical(action_probs)
action = m.sample()
log_prob = m.log_prob(action)
# break
# Execute action in environment.
if k % 1000 == 0 and self.DEBUG:
# print("action_probs_orig ")
# print(action_probs_orig)
print("Time of day=" + str(time_of_day) + ", on state=" + str(get_state_as_pair(observation)) +
", selected action=" + str(get_state_as_pair(get_state_from_int(action.item()))) + " ,")
time_of_day += 1
# sending to env:
self.action_queue.put((self.agent_id, action.item()))
# waiting for result:
observation, reward, done, info = self.continue_queue.get()
if k % 1000 == 0 and self.DEBUG:
print(
"new state=" + str(get_state_as_pair(observation)) + ", rewards=" + str(reward) + ", done=" + str(
done))
# if done and reward != 1.0:
# if observation == 5 or observation == 7 or observation == 11 or observation == 12:
# reward = -1.0
step_data = [get_state_repr(observation), action, log_prob, reward, done, info]
episode_series.append(step_data)
last_reward = reward
reward_acum.append(reward)
# FINISH EPISODE
reward_per_day.append(np.sum(reward_acum))
if len(score) < 100 :
score.append(np.sum(reward_acum))
else:
score[k % 100] = np.sum(reward_acum)
if k % 1000 == 0 and self.DEBUG:
print(
"Episode {} finished after {} timesteps with r={}. Running score: {}. Times trained: {}. Times reached goal: {}.".format(
k, len(episode_series), np.sum(reward_acum), np.mean(score), times_trained, times_reach_goal))
times_trained = 0
times_reach_goal = 0
policy_loss = []
rewards_list = []
for i in range(len(episode_series)):
j = i
G = 0
# alpha = 1 / len(episode_series)
# get the log_prob of the last state:
gamma_cum = 1
while j < len(episode_series):
[observation, action, log_prob, reward, done, info] = episode_series[j]
G = G + reward * gamma_cum
gamma_cum = gamma_cum * self.GAMMA
j = j + 1
[observation, action, log_prob, reward, done, info] = episode_series[i]
policy_loss.append(G * -log_prob)
rewards_list.append(G)
policy_loss = torch.cat(policy_loss).sum()
policy_loss.backward()
self.optimizer.step()
policy_loss = []
times_trained = times_trained + 1
if reward > 0.0:
times_reach_goal = times_reach_goal + 1
self.results.put(reward_per_day) # MP.Queue()
#print("reward_per_day")
#print(reward_per_day)
| ssainz/reinforcement_learning_algorithms | fleet_simulator/FleetSimulatorAgentConcurrent.py | FleetSimulatorAgentConcurrent.py | py | 5,529 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.FloatTensor",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "torch.LongTensor",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "torch.ByteTensor",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "Mo... |
30753464221 | from django.shortcuts import render
from fristapp.models import People, Aritcle
from django.http import HttpResponse
from django.template import Context, Template
# Create your views here.
def first_try(request):
person = People(name='Spork', job="officer")
html_string = '''
<html lang="en">
<head>
<title>firstApp</title>
<meta charset="UTF-8">
<link href="https://cdnjs.cloudflare.com/ajax/libs/semantic-ui/2.2.4/semantic.css" rel="stylesheet">
</head>
<body>
<h1 class="ui center aligned icon header">
<i class="hand spock icon"></i> Hello,{{person.name}}
</h1>
</body>
</html>
'''
t = Template(html_string)
c = Context({'person': person})
web_page = t.render(c)
return HttpResponse(web_page)
def index(request):
queruset = request.GET.get('tag')
if queruset:
article_list = Aritcle.objects.filter(tag=queruset)
else:
article_list = Aritcle.objects.all()
print(queruset)
Context = {}
Context['article_list'] = article_list
index_page = render(request, 'firstweb.html', Context)
return index_page
| LTMana/code | Python/Django/fristsite/fristapp/views.py | views.py | py | 1,123 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "fristapp.models.People",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.template.Template",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "django.template.Context",
"line_number": 32,
"usage_type": "call"
},
{
"api_name":... |
18855352074 | from cloud.filestore.tests.python.lib.common import get_nfs_mount_path
import os
import pytest
import shutil
import tempfile
def pytest_addoption(parser):
parser.addoption(
"--target-dir",
action="store",
default="Path to target directory to run tests on",
)
@pytest.fixture
def target_dir_path(pytestconfig):
try:
tmp_dir = tempfile.mkdtemp(dir=get_nfs_mount_path())
yield tmp_dir
finally:
if tmp_dir is not None:
shutil.rmtree(tmp_dir, ignore_errors=True)
def lock_file_descriptor(target_dir_path: str, flags: int):
flags |= os.O_CREAT
suffix = 'read' if flags & os.O_RDONLY else 'write'
lock_file_path = os.path.join(target_dir_path, f'test_lockfile_{suffix}')
if not os.path.exists(lock_file_path):
os.mknod(lock_file_path)
fd = os.open(lock_file_path, flags)
assert fd > 0
try:
yield fd
finally:
os.close(fd)
@pytest.fixture()
def read_lock_file_descriptor(target_dir_path):
yield from lock_file_descriptor(target_dir_path, os.O_RDONLY)
@pytest.fixture()
def read_lock_file_descriptor_second(target_dir_path):
yield from lock_file_descriptor(target_dir_path, os.O_RDONLY)
@pytest.fixture()
def write_lock_file_descriptor(target_dir_path):
yield from lock_file_descriptor(target_dir_path, os.O_WRONLY)
| ydb-platform/nbs | cloud/filestore/tools/testing/fs_posix_compliance/suite/python_tests/conftest.py | conftest.py | py | 1,362 | python | en | code | 32 | github-code | 6 | [
{
"api_name": "tempfile.mkdtemp",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "cloud.filestore.tests.python.lib.common.get_nfs_mount_path",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "shutil.rmtree",
"line_number": 23,
"usage_type": "call"
},
... |
21925605098 | from __future__ import annotations
import copy
from typing import Optional, Dict
from dlgo.gotypes import Player, Point
from dlgo import zobrist
from dlgo.scoring import compute_game_result
from dlgo.utils import MoveAge
__all__ = [
'Board',
'GameState',
'Move',
]
neighbor_tables = {}
corner_tables = {}
def init_neighbor_table(dim: (int, int)):
rows, cols = dim
new_table = {}
for r in range(1, rows + 1):
for c in range(1, cols + 1):
p = Point(row=r, col=c)
full_neighbors = p.neighbors()
true_neighbors = [
n for n in full_neighbors
if 1 <= n.row <= rows and 1 < n.col <= cols]
new_table[p] = true_neighbors
neighbor_tables[dim] = new_table
def init_corner_table(dim: (int, int)):
rows, cols = dim
new_table = {}
for r in range(1, rows + 1):
for c in range(1, cols + 1):
p = Point(row=r, col=c)
full_corners = [
Point(row=p.row - 1, col=p.col - 1),
Point(row=p.row - 1, col=p.col + 1),
Point(row=p.row + 1, col=p.col - 1),
Point(row=p.row + 1, col=p.col + 1),
]
true_corner = [
n for n in full_corners
if 1 <= n.row <= rows and 1 <= n.col <= cols]
new_table[p] = true_corner
corner_tables[dim] = new_table
class IllegalMoveError(Exception):
pass
class GoString:
def __init__(self, color, stones, liberties):
self.color = color
self.stones = frozenset(stones)
self.liberties = frozenset(liberties)
def without_liberty(self, point):
new_liberties = self.liberties - {point}
return GoString(self.color, self.stones, new_liberties)
def with_liberty(self, point: Point):
new_liberties = self.liberties | {point}
return GoString(self.color, self.stones, new_liberties)
def merged_with(self, go_string: GoString) -> GoString:
assert go_string.color == self.color
combined_stones = self.stones | go_string.stones
return GoString(self.color, combined_stones,
(self.liberties | go_string.liberties) - combined_stones)
@property
def num_liberties(self) -> int:
return len(self.liberties)
def __eq__(self, other) -> bool:
return all([
isinstance(other, GoString),
self.color == other.color,
self.stones == other.stones,
self.liberties == other.liberties
])
def __repr__(self) -> str:
return f"GoString({self.color}, {self.stones}, {self.liberties})"
def __deepcopy__(self, memodict={}):
return GoString(self.color, self.stones, copy.deepcopy(self.liberties))
class Board:
def __init__(self, num_rows: int, num_cols: int):
self.num_rows = num_rows
self.num_cols = num_cols
self._grid: Dict[Point, Optional[GoString]] = {}
self._hash = zobrist.EMPTY_BOARD
global neighbor_tables
dim = (num_rows, num_cols)
if dim not in neighbor_tables:
init_neighbor_table(dim)
if dim not in corner_tables:
init_corner_table(dim)
self.neighbor_table = neighbor_tables[dim]
self.corner_table = corner_tables[dim]
self.move_ages = MoveAge(self)
def neighbors(self, point: Point):
return self.neighbor_table[point]
def corners(self, point: Point):
return self.corner_table[point]
def place_stone(self, player: Player, point: Point):
assert self.is_on_grid(point)
if self._grid.get(point) is not None:
print(f"Illegal play on {point}")
assert self._grid.get(point) is None
adjacent_same_color = []
adjacent_opposite_color = []
liberties = []
self.move_ages.increment_all()
self.move_ages.add(point)
for neighbor in self.neighbor_table[point]:
neighbor_string = self._grid.get(neighbor)
if neighbor_string is None:
liberties.append(neighbor)
elif neighbor_string.color == player:
if neighbor_string not in adjacent_same_color:
adjacent_same_color.append(neighbor_string)
else:
if neighbor_string not in adjacent_opposite_color:
adjacent_opposite_color.append(neighbor_string)
new_string = GoString(player, [point], liberties)
for same_color_string in adjacent_same_color:
new_string = new_string.merged_with(same_color_string)
for new_string_point in new_string.stones:
self._grid[new_string_point] = new_string
self._hash ^= zobrist.HASH_CODE[point, None]
self._hash ^= zobrist.HASH_CODE[point, player]
for other_color_string in adjacent_opposite_color:
replacement = other_color_string.without_liberty(point)
if replacement.num_liberties:
self._replace_string(other_color_string.without_liberty(point))
else:
self._remove_string(other_color_string)
def is_on_grid(self, point: Point) -> bool:
return 1 <= point.row <= self.num_rows and 1 <= point.col <= self.num_cols
def get(self, point: Point) -> Optional[Player]:
string = self._grid.get(point)
if string is None:
return None
return string.color
def get_go_string(self, point: Point) -> Optional[GoString]:
string = self._grid.get(point)
if string is None:
return None
return string
def zobrist_hash(self) -> int:
return self._hash
def _replace_string(self, new_string: GoString):
for point in new_string.stones:
self._grid[point] = new_string
def _remove_string(self, string: GoString):
for point in string.stones:
self.move_ages.reset_age(point)
for neighbor in self.neighbor_table[point]:
neighbor_string = self._grid.get(neighbor)
if neighbor_string is None:
continue
if neighbor_string is not string:
self._replace_string(neighbor_string.with_liberty(point))
self._grid[point] = None
self._hash ^= zobrist.HASH_CODE[point, string.color]
self._hash ^= zobrist.HASH_CODE[point, None]
def is_self_capture(self, player: Player, point: Point) -> bool:
friendly_strings = []
for neighbor in self.neighbor_table[point]:
neighbor_string = self._grid.get(neighbor)
if neighbor_string is None:
return False
elif neighbor_string.color == player:
friendly_strings.append(neighbor_string)
else:
if neighbor_string.num_liberties == 1:
return False
if all(neighbor.num_liberties == 1 for neighbor in friendly_strings):
return True
return False
def will_capture(self, player: Player, point: Point) -> bool:
for neighbor in self.neighbor_table[point]:
neighbor_string = self._grid.get(neighbor)
if neighbor_string is None:
continue
elif neighbor_string.color == player:
continue
else:
if neighbor_string.num_liberties == 1:
return True
return False
def __eq__(self, other):
return isinstance(other, Board) and \
self.num_rows == other.num_rows and \
self.num_cols == other.num_cols and \
self._hash() == other._hash()
def __deepcopy__(self, memodict={}):
copied = Board(self.num_rows, self.num_cols)
copied._grid = copy.copy(self._grid)
copied._hash = self._hash
return copied
class Move:
def __init__(self, point: Optional[Point] = None, is_pass: bool = False,
is_resign: bool = False):
assert (point is not None) ^ is_pass ^ is_resign
self.point = point
self.is_play = (self.point is not None)
self.is_pass = is_pass
self.is_resign = is_resign
@classmethod
def play(cls, point) -> Move:
return Move(point=point)
@classmethod
def pass_turn(cls) -> Move:
return Move(is_pass=True)
@classmethod
def resign(cls) -> Move:
return Move(is_resign=True)
def __str__(self):
if self.is_pass:
return 'pass'
if self.is_resign:
return 'resign'
return f"(r {self.point.row}, c {self.point.col}"
def __hash__(self):
return hash((
self.is_play,
self.is_pass,
self.is_resign,
self.point))
def __eq__(self, other):
return (
self.is_play,
self.is_pass,
self.is_resign,
self.point) == (
other.is_play,
other.is_pass,
other.is_resign,
other.point)
class GameState:
def __init__(self, board: Board, next_player: Player, previous: Optional[GameState],
move: Optional[Move]):
self.board = board
self.next_player = next_player
self.previous_state = previous
if not self.previous_state:
self.previous_states = frozenset()
else:
self.previous_states = frozenset(
previous.previous_states | {(previous.next_player, previous.board.zobrist_hash())}
)
self.last_move = move
def apply_move(self, move: Move) -> GameState:
if move.is_play:
next_board = copy.deepcopy(self.board)
next_board.place_stone(self.next_player, move.point)
else:
next_board = self.board
return GameState(next_board, self.next_player.other, self, move)
@classmethod
def new_game(cls, board_size) -> GameState:
if isinstance(board_size, int):
board_size = (board_size, board_size)
board = Board(*board_size)
return GameState(board, Player.black, None, None)
def is_over(self) -> bool:
if self.last_move is None:
return False
if self.last_move.is_resign:
return True
second_last_move = self.previous_state.last_move
if second_last_move is None:
return False
return self.last_move.is_pass and second_last_move.is_pass
def is_move_self_capture(self, player: Player, move: Move) -> bool:
if not move.is_play:
return False
return self.board.is_self_capture(player, move.point)
@property
def situation(self) -> (Player, Board):
return self.next_player, self.board
def does_move_violate_ko(self, player: Player, move: Move) -> bool:
if not move.is_play:
return False
if not self.board.will_capture(player, move.point):
return False
next_board = copy.deepcopy(self.board)
next_board.place_stone(player, move.point)
next_situation = (player.other, next_board.zobrist_hash())
return next_situation in self.previous_states
def is_valid_move(self, move: Move) -> bool:
if self.is_over():
return False
if move.is_pass or move.is_resign:
return True
return self.board.get(move.point) is None and \
not self.is_move_self_capture(self.next_player, move) and \
not self.does_move_violate_ko(self.next_player, move)
def legal_moves(self) -> [Move]:
if self.is_over():
return []
moves = []
for row in range(1, self.board.num_rows + 1):
for col in range(1, self.board.num_cols + 1):
move = Move.play(Point(row, col))
if self.is_valid_move(move):
moves.append(move)
# These two moves are always legal.
moves.append(Move.pass_turn())
moves.append(Move.resign())
return moves
def winner(self):
if not self.is_over():
return None
if self.last_move.is_resign:
return self.next_player
game_result = compute_game_result(self)
return game_result.winner
| dbradf/dlgo | src/dlgo/goboard_fast.py | goboard_fast.py | py | 12,409 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "dlgo.gotypes.Point",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "dlgo.gotypes.Point",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "dlgo.gotypes.Point",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "dlgo.gotypes... |
4565273196 | import numpy as np
import matplotlib.pyplot as plt
import csv
import os
dirname = os.path.dirname(__file__)
t = 1
sig_x_est, sig_y_est, sig_vx_est, sig_vy_est = np.array([0.25, 0.25, 0.1, 0.1]) * 20
sig_x_mea, sig_y_mea, sig_vx_mea, sig_vy_mea = np.array([0.1, 0.1, 1, 1]) * 40
def predict(A, x, y, vx, vy):
X = np.array([[x], [y], [vx], [vy]])
return np.dot(A, X)
def main():
data = []
xi = 0
yi = 0
with open(os.path.join(dirname, "kalmann.txt")) as f:
lines = f.readlines()
xi, yi = [float(x) for x in lines[0].split(",")]
data = [[float(x) for x in line.split(",")] for line in lines[1:]]
data = np.array(data)
P = np.array(
[
[sig_x_est**2, 0, 0, 0],
[0, sig_y_est**2, 0, 0],
[0, 0, sig_vx_est**2, 0],
[0, 0, 0, sig_vy_est**2],
]
)
A = np.array(
[
[1, 0, t, 0],
[0, 1, 0, t],
[0, 0, 1, 0],
[0, 0, 0, 1],
]
)
R = np.array(
[
[sig_x_mea**2, 0, 0, 0],
[0, sig_y_mea**2, 0, 0],
[0, 0, sig_vx_mea**2, 0],
[0, 0, 0, sig_vy_mea**2],
]
)
X = np.diagflat([xi, yi, 0, 0])
x_kal = [xi]
y_kal = [yi]
x_mea = [xi]
y_mea = [yi]
with open(os.path.join(dirname, "kalmann_est.txt"), "w") as wf:
for x, y, vx, vy in data:
X = predict(A, X[0][0], X[1][0], X[2][0], X[3][0])
P = np.diag(np.diag(A @ P @ A.T))
H = np.identity(4)
S = H @ P @ H.T + R
K = P @ H.T @ np.linalg.inv(S)
Y = H @ np.array([[x], [y], [vx], [vy]])
X = X + K @ (Y - H @ X)
P = np.diag(np.diag((np.identity(4) - K @ H) @ P))
x_kal.append(X[0][0])
y_kal.append(X[1][0])
x_mea.append(x)
y_mea.append(y)
wf.write(
f"{X[0][0]} , {X[1][0]} , {X[2][0]} , {X[3][0]} , {P[0][0]} , {P[1][1]} , {P[2][2]} , {P[3][3]}\n"
)
w, h = np.max(x_kal), np.max(y_kal)
h = h / w
plt.figure(figsize=(15, h * 15))
plt.plot(x_mea, y_mea, alpha=0.5, label="Observation", color="tab:red")
plt.plot(x_kal, y_kal, label="After applying kalman filter", color="tab:blue")
plt.legend()
plt.savefig(os.path.join(dirname, "output.png"), dpi=300)
plt.show()
main()
| C-12-14/AGV-Task-Round | Kalman-Filter/kalman.py | kalman.py | py | 2,456 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.dirname",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number"... |
37225811351 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import os, sys, glob, pickle
import numpy as np
import sqlite3
import matplotlib.pyplot as plt
from analysis import crosstalk, darknoise
from contrib import legend, natsort
from ROOT import TH1D, TF1
import ROOT
from matplotlib.backends.backend_pdf import PdfPages
ROOT.Math.MinimizerOptions.SetDefaultMinimizer("Minuit2")
parser = argparse.ArgumentParser()
parser.add_argument("path", help="the path to the luigi simulation results directory containing the g4sipm.tsv")
parser.add_argument("--bin-width", help="the bin width in p.e.", default=1, type=float)
parser.add_argument("--tight-layout", help="display log scale", action="store_true", default=False)
args = parser.parse_args()
pdf = PdfPages(args.path + '/n_pe.pdf')
paths = natsort.sort(glob.glob(args.path + "/*/g4sipm.tsv"))
for i, path in enumerate(paths):
# Prepare plots
f1, ax1 = plt.subplots()
# Get SiPM properties
g4sipm = pickle.load(open(os.path.dirname(path) + "/g4sipm.pkl"))
n_particles = g4sipm["particleSourceMessenger"]["nParticles"]
crosstalkNeighbours = g4sipm["g4sipmUiMessenger"]["noiseCrosstalkNeighbours"]
name = g4sipm["sipmModel"]["name"]
pde = g4sipm["sipmModel"]["pdeAt400nm"]
p_ct = g4sipm["sipmModel"]["crossTalkProbability"]
ncells = g4sipm["sipmModel"]["numberOfCells"]
v_ov = g4sipm["sipmModel"]["overVoltage"]
# Read cached results from tsv file
pe = np.loadtxt(path, delimiter=" ")[:, 1] # number of photons, peak height / p.e.
# Histogram the time difference.
xmin = np.floor(np.min(pe))
xmax = np.ceil(np.max(pe))
nbins = int((xmax - xmin) / args.bin_width)
# Create and fill histogram.
h = TH1D(name + "-%d" % n_particles, name, nbins, xmin - args.bin_width / 2.0, xmax - args.bin_width / 2.0)
# h = TH1D(name + "-%d" % n_particles, name, nbins , 0, xmax)
h.Sumw2()
for p in pe:
h.Fill(p)
#
x = np.array([h.GetBinLowEdge(i) for i in xrange(1, nbins + 1)])
y = np.array([h.GetBinContent(i) for i in xrange(1, nbins + 1)])
yerr = np.array([h.GetBinError(i) for i in xrange(1, nbins + 1)])
# Plot
ax1.hist(x, bins=x, weights=y, histtype="step", label="entries %d" % len(pe))
ax1.errorbar(x + h.GetBinWidth(1) / 2.0, y, yerr=yerr, fmt='.', color="k", capthick=0)
# Fit a Poisson function
fit = TF1("fit", "[1] * TMath::Poisson(x, [0])", xmin, xmax)
fit.SetParameter(0, h.GetMean())
fit.SetParameter(1, h.GetEntries())
h.Fit(fit, "0R")
# Plot fit result.
x_fit = np.linspace(xmin, xmax, 1024)
y_fit = [fit.Eval(xi) for xi in x_fit]
ax1.plot(x_fit, y_fit, '-', label="Poisson fit")
# Plot the fit result.
legend.add(ax1,
labels=[r"$ N_{ph}$", r"Mean", r"$\mu$", r"$\chi^2 / ndof$", r"PDE(400 nm)", r"$ P_{ct}$"],
values=[n_particles, h.GetMean(), fit.GetParameter(0), fit.GetChisquare(), pde * 100.0, p_ct * 100.0],
errors=[None, None, fit.GetParError(0), None, None, None],
units=[None, None, None, "/ %d" % fit.GetNDF(), r"%", r"%"],
loc="center right",
title=("%s +%.1f V" % (name, v_ov * 1e6)))
# Fit a Erlang distribution
fit = TF1("fit", darknoise.erlang, xmin, xmax, 3)
fit.SetParameter(0, h.GetEntries())
fit.SetParameter(1, h.GetMean() * 1.3)
fit.SetParameter(2, 1.3)
fit.SetParLimits(1, 1.0, 10.0 * h.GetMean())
fit.SetParNames("A", "k", "#lambda")
h.Fit(fit, "0R")
# Plot fit result.
x_fit = np.linspace(xmin, xmax, 1024)
y_fit = [fit.Eval(xi) for xi in x_fit]
ax1.plot(x_fit, y_fit, '-', label="Erlang fit")
# Fit a Gaus distribution
fit = TF1("fit", "gaus", xmin, xmax)
fit.SetParameter(0, h.GetEntries())
fit.SetParameter(1, h.GetMean())
fit.SetParameter(2, h.GetRMS())
h.Fit(fit, "0R")
# Plot fit result.
x_fit = np.linspace(xmin, xmax, 1024)
y_fit = [fit.Eval(xi) for xi in x_fit]
ax1.plot(x_fit, y_fit, '-', label="Gaus fit")
# Style the plot
ax1.set_xlabel("p.e.")
ax1.set_ylabel("entries / %.1f p.e." % args.bin_width)
ax1.legend(loc="upper right")
# Tight layout
if args.tight_layout:
f1.tight_layout()
# Save plots
pdf.savefig(f1)
plt.close(f1)
pdf.close()
| ntim/g4sipm | sample/plots/luigi/n_pe.py | n_pe.py | py | 4,397 | python | en | code | 26 | github-code | 6 | [
{
"api_name": "ROOT.Math.MinimizerOptions.SetDefaultMinimizer",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "ROOT.Math",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 17,
"usage_type": "call"
},
{
... |
24591311642 | import pygame
import math
class Bat:
def __init__(self, screen, startX, startY, speed, width=20, height=80):
self.startX = startX-(math.ceil(width/2))
self.startY = startY-(math.ceil(height/2))
self.screen = screen
self.speed = speed
self.width = width
self.height = height
self.rect = self.drawCurrent()
self.score = 0
def drawCurrent(self):
self.rect = pygame.draw.rect(self.screen, (255,255,255), pygame.Rect(self.startX, self.startY, self.width, self.height))
return self.rect
def move(self, down):
if down:
self.startY += self.speed
else:
self.startY -= self.speed
| BananenKraft/Pong | bat.py | bat.py | py | 706 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "math.ceil",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "math.ceil",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pygame.draw.rect",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 1... |
29944782862 | from sklearn import datasets
digits = datasets.load_digits()
# Take the first 500 data points: it's hard to see 1500 points
X = digits.data[:500]
y = digits.target[:500]
print (X.shape, y.shape)
from sklearn.manifold import TSNE
tsne = TSNE(n_components=2, random_state=0)
X_2d = tsne.fit_transform(X)
'''
0 -> formula_1
1 -> grass_cutting
2 -> water
3 -> helicopter
4 -> auto
5 -> cricket
6 -> guitar
7 -> sewing machine
8 -> stapler
9 -> traffic
'''
class_names = ['formula_1', 'grass_cutting', 'tap_water', 'helicopter', 'rikshaw', 'cricket', 'guitar', 'sewing', 'stapler', 'traffic']
target_ids = range(len(class_names))
from matplotlib import pyplot as plt
plt.figure(figsize=(6, 5))
colors = 'r', 'g', 'b', 'c', 'm', 'y', 'k', 'w', 'orange', 'purple'
for i, c, label in zip(target_ids, colors, class_names):
plt.scatter(X_2d[y == i, 0], X_2d[y == i, 1], c=c, label=label)
plt.legend()
plt.show()
| iamjanvijay/Background-Sound-Classification-in-Speech-Audio-Segments | utils/plot_tsne.py | plot_tsne.py | py | 914 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "sklearn.datasets.load_digits",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "sklearn.datasets",
"line_number": 2,
"usage_type": "name"
},
{
"api_name": "sklearn.manifold.TSNE",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "mat... |
23497250977 | # coding: utf-8
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import * # pylint:disable=redefined-builtin,unused-wildcard-import,wildcard-import,wrong-import-order
from collections import deque
import cv2
from .config import ConfigurationError, get_config
from .imgutils import (_frame_repr, _image_region, _ImageFromUser, _load_image,
pixel_bounding_box, crop, limit_time)
from .logging import debug, draw_on, ImageLogger
from .types import Region, UITestFailure
def detect_motion(timeout_secs=10, noise_threshold=None, mask=None,
region=Region.ALL, frames=None):
"""Generator that yields a sequence of one `MotionResult` for each frame
processed from the device-under-test's video stream.
The `MotionResult` indicates whether any motion was detected -- that is,
any difference between two consecutive frames.
Use it in a ``for`` loop like this::
for motionresult in stbt.detect_motion():
...
In most cases you should use `wait_for_motion` instead.
:type timeout_secs: int or float or None
:param timeout_secs:
A timeout in seconds. After this timeout the iterator will be exhausted.
Thas is, a ``for`` loop like ``for m in detect_motion(timeout_secs=10)``
will terminate after 10 seconds. If ``timeout_secs`` is ``None`` then
the iterator will yield frames forever. Note that you can stop
iterating (for example with ``break``) at any time.
:param float noise_threshold:
The amount of noise to ignore. This is only useful with noisy analogue
video sources. Valid values range from 0 (all differences are
considered noise; a value of 0 will never report motion) to 1.0 (any
difference is considered motion).
This defaults to 0.84. You can override the global default value by
setting ``noise_threshold`` in the ``[motion]`` section of
:ref:`.stbt.conf`.
:type mask: str or `numpy.ndarray`
:param mask:
A black & white image that specifies which part of the image to search
for motion. White pixels select the area to analyse; black pixels select
the area to ignore. The mask must be the same size as the video frame.
This can be a string (a filename that will be resolved as per
`load_image`) or a single-channel image in OpenCV format.
:type region: `Region`
:param region:
Only analyze the specified region of the video frame.
If you specify both ``region`` and ``mask``, the mask must be the same
size as the region.
:type frames: Iterator[stbt.Frame]
:param frames: An iterable of video-frames to analyse. Defaults to
``stbt.frames()``.
| Added in v28: The ``region`` parameter.
| Added in v29: The ``frames`` parameter.
"""
if frames is None:
import stbt
frames = stbt.frames()
frames = limit_time(frames, timeout_secs) # pylint: disable=redefined-variable-type
if noise_threshold is None:
noise_threshold = get_config(
'motion', 'noise_threshold', type_=float)
debug("Searching for motion")
if mask is None:
mask = _ImageFromUser(None, None, None)
else:
mask = _load_image(mask, cv2.IMREAD_GRAYSCALE)
debug("Using mask %s" % mask.friendly_name)
try:
frame = next(frames)
except StopIteration:
return
region = Region.intersect(_image_region(frame), region)
previous_frame_gray = cv2.cvtColor(crop(frame, region),
cv2.COLOR_BGR2GRAY)
if (mask.image is not None and
mask.image.shape[:2] != previous_frame_gray.shape[:2]):
raise ValueError(
"The dimensions of the mask '%s' %s don't match the "
"video frame %s" % (
mask.friendly_name, mask.image.shape,
previous_frame_gray.shape))
for frame in frames:
imglog = ImageLogger("detect_motion", region=region)
imglog.imwrite("source", frame)
imglog.set(roi=region, noise_threshold=noise_threshold)
frame_gray = cv2.cvtColor(crop(frame, region), cv2.COLOR_BGR2GRAY)
imglog.imwrite("gray", frame_gray)
imglog.imwrite("previous_frame_gray", previous_frame_gray)
absdiff = cv2.absdiff(frame_gray, previous_frame_gray)
previous_frame_gray = frame_gray
imglog.imwrite("absdiff", absdiff)
if mask.image is not None:
absdiff = cv2.bitwise_and(absdiff, mask.image)
imglog.imwrite("mask", mask.image)
imglog.imwrite("absdiff_masked", absdiff)
_, thresholded = cv2.threshold(
absdiff, int((1 - noise_threshold) * 255), 255,
cv2.THRESH_BINARY)
eroded = cv2.erode(
thresholded,
cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)))
imglog.imwrite("absdiff_threshold", thresholded)
imglog.imwrite("absdiff_threshold_erode", eroded)
out_region = pixel_bounding_box(eroded)
if out_region:
# Undo cv2.erode above:
out_region = out_region.extend(x=-1, y=-1)
# Undo crop:
out_region = out_region.translate(region.x, region.y)
motion = bool(out_region)
result = MotionResult(getattr(frame, "time", None), motion,
out_region, frame)
draw_on(frame, result, label="detect_motion()")
debug("%s found: %s" % (
"Motion" if motion else "No motion", str(result)))
_log_motion_image_debug(imglog, result)
yield result
def wait_for_motion(
timeout_secs=10, consecutive_frames=None,
noise_threshold=None, mask=None, region=Region.ALL, frames=None):
"""Search for motion in the device-under-test's video stream.
"Motion" is difference in pixel values between two consecutive frames.
:type timeout_secs: int or float or None
:param timeout_secs:
A timeout in seconds. This function will raise `MotionTimeout` if no
motion is detected within this time.
:type consecutive_frames: int or str
:param consecutive_frames:
Considers the video stream to have motion if there were differences
between the specified number of consecutive frames. This can be:
* a positive integer value, or
* a string in the form "x/y", where "x" is the number of frames with
motion detected out of a sliding window of "y" frames.
This defaults to "10/20". You can override the global default value by
setting ``consecutive_frames`` in the ``[motion]`` section of
:ref:`.stbt.conf`.
:param float noise_threshold: See `detect_motion`.
:param mask: See `detect_motion`.
:param region: See `detect_motion`.
:param frames: See `detect_motion`.
:returns: `MotionResult` when motion is detected. The MotionResult's
``time`` and ``frame`` attributes correspond to the first frame in
which motion was detected.
:raises: `MotionTimeout` if no motion is detected after ``timeout_secs``
seconds.
| Added in v28: The ``region`` parameter.
| Added in v29: The ``frames`` parameter.
"""
if frames is None:
import stbt
frames = stbt.frames()
if consecutive_frames is None:
consecutive_frames = get_config('motion', 'consecutive_frames')
consecutive_frames = str(consecutive_frames)
if '/' in consecutive_frames:
motion_frames = int(consecutive_frames.split('/')[0])
considered_frames = int(consecutive_frames.split('/')[1])
else:
motion_frames = int(consecutive_frames)
considered_frames = int(consecutive_frames)
if motion_frames > considered_frames:
raise ConfigurationError(
"`motion_frames` exceeds `considered_frames`")
debug("Waiting for %d out of %d frames with motion" % (
motion_frames, considered_frames))
if mask is None:
mask = _ImageFromUser(None, None, None)
else:
mask = _load_image(mask, cv2.IMREAD_GRAYSCALE)
debug("Using mask %s" % mask.friendly_name)
matches = deque(maxlen=considered_frames)
motion_count = 0
last_frame = None
for res in detect_motion(
timeout_secs, noise_threshold, mask, region, frames):
motion_count += bool(res)
if len(matches) == matches.maxlen:
motion_count -= bool(matches.popleft())
matches.append(res)
if motion_count >= motion_frames:
debug("Motion detected.")
# We want to return the first True motion result as this is when
# the motion actually started.
for result in matches:
if result:
return result
assert False, ("Logic error in wait_for_motion: This code "
"should never be reached")
last_frame = res.frame
raise MotionTimeout(last_frame, mask.friendly_name, timeout_secs)
class MotionResult(object):
"""The result from `detect_motion` and `wait_for_motion`.
:ivar float time: The time at which the video-frame was captured, in
seconds since 1970-01-01T00:00Z. This timestamp can be compared with
system time (``time.time()``).
:ivar bool motion: True if motion was found. This is the same as evaluating
``MotionResult`` as a bool. That is, ``if result:`` will behave the
same as ``if result.motion:``.
:ivar Region region: Bounding box where the motion was found, or ``None``
if no motion was found.
:ivar Frame frame: The video frame in which motion was (or wasn't) found.
Added in v28: The ``frame`` attribute.
"""
_fields = ("time", "motion", "region", "frame")
def __init__(self, time, motion, region, frame):
self.time = time
self.motion = motion
self.region = region
self.frame = frame
def __bool__(self):
return self.motion
def __repr__(self):
return (
"MotionResult(time=%s, motion=%r, region=%r, frame=%s)" % (
"None" if self.time is None else "%.3f" % self.time,
self.motion, self.region, _frame_repr(self.frame)))
class MotionTimeout(UITestFailure):
"""Exception raised by `wait_for_motion`.
:ivar Frame screenshot: The last video frame that `wait_for_motion` checked
before timing out.
:vartype mask: str or None
:ivar mask: Filename of the mask that was used, if any.
:vartype timeout_secs: int or float
:ivar timeout_secs: Number of seconds that motion was searched for.
"""
def __init__(self, screenshot, mask, timeout_secs):
super(MotionTimeout, self).__init__()
self.screenshot = screenshot
self.mask = mask
self.timeout_secs = timeout_secs
def __str__(self):
return "Didn't find motion%s within %g seconds." % (
" (with mask '%s')" % self.mask if self.mask else "",
self.timeout_secs)
def _log_motion_image_debug(imglog, result):
if not imglog.enabled:
return
template = u"""\
<h4>
detect_motion:
{{ "Found" if result.motion else "Didn't find" }} motion
</h4>
{{ annotated_image(result) }}
<h5>ROI Gray:</h5>
<img src="gray.png" />
<h5>Previous frame ROI Gray:</h5>
<img src="previous_frame_gray.png" />
<h5>Absolute difference:</h5>
<img src="absdiff.png" />
{% if "mask" in images %}
<h5>Mask:</h5>
<img src="mask.png" />
<h5>Absolute difference – masked:</h5>
<img src="absdiff_masked.png" />
{% endif %}
<h5>Threshold (noise_threshold={{noise_threshold}}):</h5>
<img src="absdiff_threshold.png" />
<h5>Eroded:</h5>
<img src="absdiff_threshold_erode.png" />
"""
imglog.html(template, result=result)
| alexlyn/stb-tester | _stbt/motion.py | motion.py | py | 12,155 | python | en | code | null | github-code | 6 | [
{
"api_name": "types.Region.ALL",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "types.Region",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "stbt.frames",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "imgutils.limit_time",
... |
10432077002 | #!/usr/bin/env python
# Get a listings of the files in each dataset
# see get-dc0-file-lists.sh
import json
from pytablewriter import MarkdownTableWriter
# from https://stackoverflow.com/questions/1094841/get-human-readable-version-of-file-size
def sizeof_fmt(num, suffix="B"):
for unit in ["", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"]:
if abs(num) < 1024.0:
return f"{num:3.1f} {unit}{suffix}"
num /= 1024.0
return f"{num:.1f} Yi{suffix}"
def write_dataset(something, n_files, data_size, file_table_rows):
dset_table_header = ["File Name", "Datatype", "Size"]
writer = MarkdownTableWriter(
headers=dset_table_header,
value_matrix=file_table_rows,
margin=1
)
dset_text = f"""---
title: "Planck PR4 {something.upper()}"
author: "CMB-S4 Collaboration"
description: "Planck Public Release 4 {something.upper()}"
date_created: "2023-03-22"
seo:
type: Dataset
---
[Back to release](./planck_pr4.html#datasets)
# Dataset: Planck PR4 {something.upper()}
This dataset is publicly available via Globus Transfer or HTTPS. [Click here](https://app.globus.org/file-manager?origin_id=38f01147-f09e-483d-a552-3866669a846d&origin_path=%2Fpublic%2Fplanck%2Fplanck_pr4%2F{something}%2F) to view the files in the Globus web app.
Download the [file manifest](https://g-456d30.0ed28.75bc.data.globus.org/public/planck/planck_pr4/{something}/manifest.json) for the exact file sizes and checksums.
## Files
- Number of files: {n_files}
- Total size: {data_size}
- [JSON format file manifest](https://g-456d30.0ed28.75bc.data.globus.org/public/planck/planck_pr4/{something}/manifest.json)
"""
with open(f'planck_pr4-{something}.md', 'w') as f:
f.write(dset_text)
f.write(writer.dumps())
things = ["fullsky", "half_ring", "lowres", "quickpol", "single"]
# dc0-chlat-split$split-$band.json
# Rows for data release page
# | [Link](dc0-chlat-split01-025.html) | CHLAT | `01` | `025` | `2` | 3.8 GiB |
pr4_dsets_table_header = ["Link", "Category", "Number of Files", "Total Size"]
pr4_dsets_table_data = []
for something in things:
dset_table_data = []
# load file list
with open(f'pr4-{something}.json') as f:
file_data = json.load(f)
file_list = file_data["DATA"]
# loop over files, build file table info for dataset
# remove manifest from list
# total up bytes in dataset
total_bytes = 0
n_files = len(file_list) - 1
for file_entry in file_list:
fname = file_entry['name']
if not fname == 'manifest.json':
total_bytes += file_entry['size']
fsize = sizeof_fmt(file_entry['size'])
flink = f'[`{fname}`](https://g-456d30.0ed28.75bc.data.globus.org/public/planck/planck_pr4/{something}/{fname})'
dset_table_data.append([flink, fsize])
dset_size = sizeof_fmt(total_bytes)
write_dataset(something, n_files, dset_size, dset_table_data)
dset_url = f'[Link](plank_pr4-{something}.html)'
pr4_dsets_table_data.append([dset_url, f'{something.upper()}', f'`{n_files}`', dset_size])
writer = MarkdownTableWriter(
headers=pr4_dsets_table_header,
value_matrix=pr4_dsets_table_data,
margin=1
)
with open('pr4-dset-table.md', 'w') as f:
f.write(writer.dumps())
with open('pr4-sidebar.yml', 'w') as f:
f.write(' - title: Planck Public Release 4)\n')
f.write(' output: web\n')
f.write(' folderitems:\n')
f.write(' - title: Planck PR4\n')
f.write(' url: "planck_pr4.html"\n')
f.write(' output: web\n')
for something in things:
f.write(f' - title: Planck PR4 {something.upper()}\n')
f.write(f' url: "planck_pr4-{something}.html"\n')
f.write(f' output: web\n')
| CMB-S4/serverless-data-portal-cmb-s4 | buildpr4.py | buildpr4.py | py | 3,826 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pytablewriter.MarkdownTableWriter",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "pytablewriter.MarkdownTableWriter",
"line_number": 92,
"usage_type": "call"
}
] |
24254079862 | from base import *
from fabric.api import cd, env, run
NOTIFICATION_SENDER = os.getenv('NOTIFICATION_SENDER')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
########## END MANAGER CONFIGURATION
########## DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True if os.getenv('TOLA_DEBUG') == 'True' else False
########## END DEBUG CONFIGURATION
########## DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
try:
DATABASES = {
'default': {
'ENGINE': os.environ["TOLATABLES_DB_ENGINE"],
'NAME': os.environ["TOLATABLES_DB_NAME"],
'USER': os.environ["TOLATABLES_DB_USER"],
'PASSWORD': os.environ["TOLATABLES_DB_PASS"],
'HOST': os.environ["TOLATABLES_DB_HOST"],
'PORT': os.getenv('TOLATABLES_DB_PORT', 5432),
}
}
except KeyError:
# Fallback for tests without environment variables configured
# Depends on os.environ for correct functionality
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'tolatables',
}
}
print("DATABASES: {}".format(DATABASES))
########## END DATABASE CONFIGURATION
# Hosts/domain names that are valid for this site
if os.getenv('TOLA_HOSTNAME') is not None:
ALLOWED_HOSTS = os.environ['TOLA_HOSTNAME'].split(',')
USE_X_FORWARDED_HOST = True if os.getenv('TOLA_USE_X_FORWARDED_HOST') == 'True' else False
########## GOOGLE CLIENT CONFIG ###########
if os.getenv('TABLES_URL') is not None:
GOOGLE_REDIRECT_URL = os.getenv('TABLES_URL') + '/oauth2callback/'
else:
GOOGLE_REDIRECT_URL = 'http://localhost:8000/oauth2callback/'
if os.getenv('GOOGLE_ANALYTICS') is not None:
GOOGLE_ANALYTICS = os.getenv('GOOGLE_ANALYTICS')
else:
GOOGLE_ANALYTICS = None
####### Tola Activity API #######
TOLA_ACTIVITY_API_URL = os.getenv('TOLA_ACTIVITY_API_URL', '')
TOLA_ACTIVITY_API_TOKEN = os.getenv('TOLA_ACTIVITY_API_TOKEN')
########## CACHE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
########## END CACHE CONFIGURATION
########## END CACHE CONFIGURATION
try:
template_dir = os.environ['TOLATABLES_TEMPLATE_DIR']
except KeyError:
template_dir = "templates2"
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [normpath(join(SITE_ROOT, 'templates2')), ],
# 'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'tola.context_processors.get_silos',
'tola.context_processors.get_servers',
'tola.context_processors.google_oauth_settings',
'tola.context_processors.google_analytics',
],
'builtins': [
'django.contrib.staticfiles.templatetags.staticfiles',
'silo.templatetags.underscoretags',
],
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
},
},
]
APP_BRANCH = os.getenv('APP_BRANCH')
ACTIVITY_URL = os.getenv('ACTIVITY_URL')
TABLES_URL = os.getenv('TABLES_URL')
TABLES_LOGIN_URL = TOLA_ACTIVITY_API_URL
SOCIAL_AUTH_TOLA_KEY = os.getenv('SOCIAL_AUTH_TOLA_KEY')
SOCIAL_AUTH_TOLA_SECRET = os.getenv('SOCIAL_AUTH_TOLA_SECRET')
CELERY_BROKER_URL = os.getenv('CELERY_BROKER_URL')
CELERY_RESULT_BACKEND = os.getenv('CELERY_BROKER_URL')
# Hosts to deploy onto
env.hosts = ['.toladata.io', '.tola.io']
# Where your project code lives on the server
env.project_root = DJANGO_ROOT
def deploy_static():
with cd(env.project_root):
run('./manage.py collectstatic -v0 --noinput')
GOOGLE_API_KEY = os.getenv('GOOGLE_API_KEY')
GOOGLE_OAUTH_CLIENT_ID = os.getenv('GOOGLE_OAUTH_CLIENT_ID')
GOOGLE_OAUTH_CLIENT_SECRET = os.getenv('GOOGLE_OAUTH_CLIENT_SECRET')
ONEDRIVE_CLIENT_ID = os.getenv('ONEDRIVE_CLIENT_ID')
ONEDRIVE_REDIRECT_URI = os.getenv('ONEDRIVE_REDIRECT_URI')
# This allows for additional settings to be kept in a local file
try:
from local_secret import *
except ImportError:
pass
| toladata/TolaTables | tola/settings/local.py | local.py | py | 4,834 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "fabric.api.env.hosts",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "fabric.api.env",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "fabric.api.env.project_root",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_... |
31249158545 | import os
from pathlib import Path
import random
import pandas as pd
from music21 import converter
from data_preparation import extract_notes
from preprocessing.preprocess_midi import preprocess_music21_song
from helpers.samplinghelpers import render_token_sequence
def prepare_annotations(labels_file: str) -> None:
"""
rename filenames in annotations from .wav to .mid
:param labels_file:
:return:
"""
labels = pd.read_csv(labels_file)
# filenames have .wav extension, but dataset consists of .mid
labels['fname'] = labels['fname'].apply(lambda fname: fname.replace('.wav', '.mid'))
labels.to_csv(labels_file, index=False)
def train_test_split_and_save(labels_file, class_labels):
labels = pd.read_csv(labels_file)
# choose classes in class_labels list
labels = labels[labels['toptag_eng_verified'].isin(class_labels)]
# split on train and test
train = labels.sample(frac=0.8)
test = labels[~labels.index.isin(train.index)]
print(f'Train shape: {train.shape}, test shape: {test.shape}')
train.reset_index(drop=True).to_csv('annotations_train.csv', index=False)
test.reset_index(drop=True).to_csv('annotations_test.csv', index=False)
def build_structured_dataset(raw_dataset_path, annotations, output_dir, train_test_frac):
"""
The function creates dir tree for dataset and store files in that tree in order to their classes
:param train_test_frac: fraction of midi to use in test dataset
:param raw_dataset_path: path to raw midi dataset
:param annotations: file with emotion annotations
:param output_dir: dir for text dataset
:return:
"""
# creating dirs for text-midi dataset with train-test division
train_dir = os.path.join(output_dir, 'train')
test_dir = os.path.join(output_dir, 'test')
Path(output_dir).mkdir(exist_ok=True)
Path(train_dir).mkdir(exist_ok=True)
Path(test_dir).mkdir(exist_ok=True)
labels = pd.read_csv(annotations)
# get text_repr of all midi files
all_midi_files = []
for file in os.listdir(raw_dataset_path):
if file.endswith('.mid'):
cur_midi_file = os.path.join(raw_dataset_path, file)
all_midi_files.append(cur_midi_file)
text_repr = get_text_repr_filelist(all_midi_files)
# save text representations of midi in text files according to their classes
for midi_file, text_midi in zip(all_midi_files, text_repr):
cur_midi_file = os.path.split(midi_file)[1]
cur_label = labels[labels['fname'] == cur_midi_file]['toptag_eng_verified'].item()
# split text_midi to bars
text_bars = []
start_track = text_midi.index('TRACK_START') + len('TRACK_START') + 1
end_track = text_midi.rfind('TRACK_END') - 1
text_tracks = text_midi[start_track:end_track].split(' TRACK_END TRACK_START ')
for text_track in text_tracks:
start = text_track.index('BAR_START') + len('BAR_START') + 1
end = text_track.rfind('BAR_END') - 1
cur_text_bars = text_track[start:end].split(' BAR_END BAR_START ')
# group bars
cur_text_bars = [cur_text_bars[i] + ' ' + cur_text_bars[i + 1] for i in range(0, len(cur_text_bars) - 1, 2)]
# delete empty bars and one-note bars
for text_bar in cur_text_bars:
# we need at least two notes in bar
if len(text_bar.split(' ')) >= 12: # NOTE_ON TIME_DELTA NOTE_OFF NOTE_ON TIME_DELTA NOTE_OFF
text_bars.append(text_bar)
if random.random() <= train_test_frac:
cur_file_to_save = os.path.join(test_dir, cur_label) + '.txt'
else:
cur_file_to_save = os.path.join(train_dir, cur_label) + '.txt'
with open(cur_file_to_save, 'a') as text_midi_file:
text_midi_file.write('\n'.join(text_bars))
def create_text_files_from_midi(dataset, filelist, output):
midi_filelist = []
for file in filelist:
if file.endswith('.mid'):
midi_filelist.append(file)
texts = extract_notes([os.path.join(dataset, cur_file) for cur_file in midi_filelist])
for (text, midi_file) in zip(texts, midi_filelist):
with open(os.path.join(output, midi_file.replace('.mid', '.txt')), 'w') as text_midi:
text_midi.write(text)
if __name__ == '__main__':
labels_filename = 'verified_annotation.csv'
dataset_path = 'emotion_midi'
output_directory = 'emotion_midi_text'
output_dir = 'emotion_midi_texts'
# prepare_annotations(labels_file=labels_filename)
# classes = ['cheerful', 'tense']
# factor = 0.2
# train_test_split_and_save(labels_filename, classes)
# build_structured_dataset(dataset_path, labels_filename, output_directory, train_test_frac=0.3)
create_text_files_from_midi(dataset_path, os.listdir(dataset_path), output_dir)
| Vitaliy1234/music_generation | data/music_midi/prepare_data.py | prepare_data.py | py | 4,890 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_numb... |
70640030589 | import chainer
from chainer import serializers, Variable, cuda
from flownets import FlowNetS
import cv2
import numpy as np
import argparse
### parameter ###
INPUT_FILE1 = 'samples/0000000-imgL.ppm'
INPUT_FILE2 = 'samples/0000000-imgR.ppm'
OUTPUT_FILE = './results/test'
ARROW_FREQ = 16
def preprocessing(img):
img = img.astype('f')
img = img / 255.0
img = img.transpose((2, 0, 1))
return img
def Padding(img1,img2):
assert (img1.shape == img2.shape), 'Not equal img1.shape & img2.shape'
height,width = img1.shape[0], img1.shape[1]
if height >= width:
pad = int((height-width)/2)
img1 = cv2.copyMakeBorder(img1,0,0,pad,pad,cv2.BORDER_CONSTANT,value=0)
img2 = cv2.copyMakeBorder(img2,0,0,pad,pad,cv2.BORDER_CONSTANT,value=0)
elif height <= width:
pad = int((width-height)/2)
img1 = cv2.copyMakeBorder(img1,pad,pad,0,0,cv2.BORDER_CONSTANT,value=0)
img2 = cv2.copyMakeBorder(img2,pad,pad,0,0,cv2.BORDER_CONSTANT,value=0)
return img1, img2
def Liner_interpolation(a,b,c,d,dx,dy):
pix_e = (b-a)*dx - a
pix_f = (d-c)*dx - c
pix_g = (pix_f-pix_e)*dy - pix_f
return pix_g
def main():
parser = argparse.ArgumentParser(
description='Test FlownetS')
parser.add_argument('--gpu', '-g', type=int, default=0,
help='GPU ID (negative value indicates CPU)')
parser.add_argument("--load_model", '-m', default='flownets.npz', help='load model')
parser.add_argument("--method", default='dnn', help='cv2 or dnn')
args = parser.parse_args()
### FlowNet (DNN) ###
if args.method == 'dnn':
if args.gpu >= 0:
chainer.cuda.get_device(0).use()
f = FlowNetS()
serializers.load_npz('flownets.npz', f)
if args.gpu >=0:
f.to_gpu()
row_img1 = cv2.imread(INPUT_FILE1)
row_img2 = cv2.imread(INPUT_FILE2)
# Padding
row_img1, row_img2 = Padding(row_img1,row_img2)
row_img1 = cv2.resize(row_img1, (512,512), cv2.INTER_AREA)
row_img2 = cv2.resize(row_img2, (512,512), cv2.INTER_AREA)
img_arrow = row_img1.copy()
height,width,ch = row_img1.shape
img1 = preprocessing(row_img1)
img2 = preprocessing(row_img2)
xin = np.zeros((1, 6, 512, 512), dtype=np.float32)
xin[0, 0:3, :] = img1
xin[0, 3:6, :] = img2
if args.gpu>=0:
xin = cuda.to_gpu(xin)
res = f(Variable(xin)).data
if args.gpu>=0:
res = cuda.to_cpu(res)
img=np.zeros((128,128,3))
img[:,:,0]=res[0, 0] + 128
img[:,:,2]=res[0, 1] + 128
img=img.astype(np.uint8)
cv2.imwrite('samples/out.jpg', img)
# flownet
delta_x = res[0,0] # (128,128)
delta_y = res[0,1] # (128,128)
delta_x = cv2.resize(delta_x, (height,width))
delta_y = cv2.resize(delta_y, (height,width))
img_trans = np.zeros_like(row_img1)
for x in range(width):
for y in range(height):
current_dx = delta_x[x,y]
current_dy = delta_y[x,y]
if (np.floor(x+current_dx)>=0)\
and(np.floor(x+current_dx)+1<width)\
and(np.floor(y+current_dy)>=0)\
and(np.floor(y+current_dy+1)<height):
# wander if row_img1 or row_img2?
pix_a = row_img1[int(np.floor(x+current_dx)), int(np.floor(y+current_dy)),:]
pix_b = row_img1[int(np.floor(x+current_dx+1)), int(np.floor(y+current_dy)),:]
pix_c = row_img1[int(np.floor(x+current_dx)), int(np.floor(y+current_dy+1)),:]
pix_d = row_img1[int(np.floor(x+current_dx+1)), int(np.floor(y+current_dy+1)),:]
pix_g = Liner_interpolation(pix_a,pix_b,pix_c,pix_d,current_dx,current_dy)
img_trans[x,y,:] = pix_g
# arraw vector
if (x % ARROW_FREQ == 0) and (y % ARROW_FREQ == 0):
cv2.arrowedLine(img_arrow,(x,y),(int(np.floor(x+current_dx)),int(np.floor(y+current_dy))), (0,255,0), thickness=1, tipLength=0.05)
else:
img_trans[x,y,:] = 0
# arraw vector
if (x % ARROW_FREQ == 0) and (ARROW_FREQ % 8 == 0):
cv2.arrowedLine(img_arrow,(x,y),(int(np.floor(x+current_dx)),int(np.floor(y+current_dy))), (0,255,0), thickness=1, tipLength=0.05)
# error map
img_diff = abs(row_img1 - img_trans)
cv2.imwrite(OUTPUT_FILE + '_img_diff_dnn.jpg', img_diff)
cv2.imwrite(OUTPUT_FILE + '_img_trans_dnn.jpg', img_trans)
cv2.imwrite(OUTPUT_FILE + '_img_vector_dnn.jpg', img_arrow)
### Dense optical flow (opencv) ###
if args.method == 'cv2':
img1_rgb = cv2.imread(INPUT_FILE1)
img2_rgb = cv2.imread(INPUT_FILE2)
img1_gray= img1_rgb.copy()
img2_gray= img2_rgb.copy()
img1_gray= cv2.cvtColor(img1_gray,cv2.COLOR_BGR2GRAY)
img2_gray= cv2.cvtColor(img2_gray,cv2.COLOR_BGR2GRAY)
img1_rgb, img2_rgb = Padding(img1_rgb, img2_rgb)
img1_gray, img2_gray = Padding(img1_gray, img2_gray)
img1_rgb = cv2.resize(img1_rgb, (512,512), cv2.INTER_AREA)
img2_rgb = cv2.resize(img2_rgb, (512,512), cv2.INTER_AREA)
img1_gray = cv2.resize(img1_gray, (512,512), cv2.INTER_AREA)
img2_gray = cv2.resize(img2_gray, (512,512), cv2.INTER_AREA)
flow = cv2.calcOpticalFlowFarneback(img1_gray,img2_gray, None, 0.5, 3, 15, 3, 5, 1.2, 0) # (512,512,2)
img_arrow = img1_rgb.copy()
delta_x, delta_y = flow[:,:,0], flow[:,:,1]
#delta_y, delta_x = flow[:,:,0], flow[:,:,1]
img_trans = np.zeros_like(img1_rgb)
height,width,ch= img1_rgb.shape
# NOTE i don't know which is correct, (x,y) or (y,x) to plot vector map
for x in range(width):
for y in range(height):
current_dy = delta_x[x,y]
current_dx = delta_y[x,y]
if (np.floor(x+current_dx)>=0)\
and(np.floor(x+current_dx)+1<width)\
and(np.floor(y+current_dy)>=0)\
and(np.floor(y+current_dy+1)<height):
# wander if row_img1 or row_img2?
pix_a = img1_rgb[int(np.floor(x+current_dx)), int(np.floor(y+current_dy)),:]
pix_b = img1_rgb[int(np.floor(x+current_dx+1)), int(np.floor(y+current_dy)),:]
pix_c = img1_rgb[int(np.floor(x+current_dx)), int(np.floor(y+current_dy+1)),:]
pix_d = img1_rgb[int(np.floor(x+current_dx+1)), int(np.floor(y+current_dy+1)),:]
pix_g = Liner_interpolation(pix_a,pix_b,pix_c,pix_d,current_dx,current_dy)
img_trans[x,y,:] = pix_g
# arraw vector
if (x % ARROW_FREQ == 0) and (y % ARROW_FREQ == 0):
#cv2.arrowedLine(img_arrow,(x,y),(int(np.floor(x+current_dx)),int(np.floor(y+current_dy))), (0,255,0), thickness=1, tipLength=0.05)
cv2.arrowedLine(img_arrow,(y,x),(int(np.floor(y+current_dy)),int(np.floor(x+current_dx))), (0,255,0), thickness=1, tipLength=0.05)
else:
img_trans[x,y,:] = 0
# arraw vector
if (x % ARROW_FREQ == 0) and (ARROW_FREQ % 8 == 0):
#cv2.arrowedLine(img_arrow,(x,y),(int(np.floor(x+current_dx)),int(np.floor(y+current_dy))), (0,255,0), thickness=1, tipLength=0.05)
cv2.arrowedLine(img_arrow,(y,x),(int(np.floor(y+current_dy)),int(np.floor(x+current_dx))), (0,255,0), thickness=1, tipLength=0.05)
# error map
img_diff = abs(img1_rgb - img_trans)
cv2.imwrite(OUTPUT_FILE + '_img_diff_cv2.jpg', img_diff)
cv2.imwrite(OUTPUT_FILE + '_img_trans_cv2.jpg', img_trans)
cv2.imwrite(OUTPUT_FILE + '_img_vector_cv2.jpg', img_arrow)
if __name__ == '__main__':
main()
| kou7215/opticalflow | run.py | run.py | py | 8,175 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "cv2.copyMakeBorder",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "cv2.BORDER_CONSTANT",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "cv2.copyMakeBorder",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "cv2.BO... |
25320255908 | from flask import Flask, render_template, request
app = Flask(__name__)
ENV = "debug"
if ENV == 'debug':
app.debug = True
app.config['SQLALCHEMY_TRACK_MODIFICATION'] = False
# set app source
@app.route('/')
def index():
return render_template('index.html')
if __name__ == '__main__':
app.run() | JakeSiewJK64/joekane_site1 | app.py | app.py | py | 316 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 16,
"usage_type": "call"
}
] |
10625323914 | import boto3
access_key = ''
secret_access_key = ''
def get_all_clusters():
ecs_client = boto3.client('ecs', aws_access_key_id=access_key, aws_secret_access_key=secret_access_key)
response = ecs_client.list_clusters()
cluster_arns = response['clusterArns']
return cluster_arns
# print(get_all_regions())
# Get all clusters
clusters = get_all_clusters()
print(clusters)
# Print the clusters
for cluster_arn in clusters:
print(cluster_arn) | PrantaChakraborty/boto3 | s3/ecs.py | ecs.py | py | 462 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "boto3.client",
"line_number": 8,
"usage_type": "call"
}
] |
18515564727 | ###### UNIMIB - 2022 Indiegogo
######
import sys
import json
import pyspark
from pyspark.sql.functions import col, collect_list, array_join
from awsglue.transforms import *
from awsglue.utils import getResolvedOptions
from pyspark.context import SparkContext
from awsglue.context import GlueContext
from awsglue.job import Job
##### FROM FILES
kickstarter_dataset_path = "s3://unimib-raw-data-2022/ds_project_details_full.csv"
###### READ PARAMETERS
args = getResolvedOptions(sys.argv, ['JOB_NAME'])
##### START JOB CONTEXT AND JOB
sc = SparkContext()
glueContext = GlueContext(sc)
spark = glueContext.spark_session
job = Job(glueContext)
job.init(args['JOB_NAME'], args)
#### READ INPUT FILES TO CREATE AN INPUT DATASET
projects_dataset = spark.read \
.option("header","true") \
.option("quote", "\"") \
.option("escape", "\"") \
.csv(kickstarter_dataset_path)
projects_dataset.printSchema()
### REMOVE DUPLICATES
projects_dataset = projects_dataset.dropDuplicates(["project_id"]).dropDuplicates(["title"]).dropDuplicates(["tagLine"])
#### FILTER ITEMS WITH NULL POSTING KEY
count_items = projects_dataset.count()
count_items_null = projects_dataset.filter("project_id is not null").count()
print(f"Number of items from RAW DATA {count_items}")
print(f"Number of items from RAW DATA with NOT NULL KEY {count_items_null}")
## READ TAGS DATASET
img_dataset_path = "s3://unimib-raw-data-2022/ds_img_details_full.csv"
img_dataset = spark.read.option("header","true").csv(img_dataset_path)
# CREATE THE AGGREGATE MODEL, ADD TAGS TO TEDX_DATASET
img_dataset_agg = img_dataset.groupBy(col("project_url").alias("project_id_ref")).agg(collect_list("name").alias("names"))
img_dataset_agg.printSchema()
projects_dataset_agg = projects_dataset.join(img_dataset_agg, projects_dataset.project_id == img_dataset_agg.project_id_ref, "left") \
.drop("project_id_ref")
projects_dataset_agg.printSchema()
projects_dataset_agg.write.option("compression", "snappy").mode("overwrite").parquet("s3://unimib-dwh-2022/projects_dataset.out")
| mauropelucchi/unimib_masterbi_2022 | aws/aws_glue_job.py | aws_glue_job.py | py | 2,077 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "awsglue.utils.getResolvedOptions",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "pyspark.context.SparkContext",
"line_number": 25,
"usage_type": "call"
},
{
"api_n... |
27185060813 | """Core of the discord bot."""
import discord
from discord.ext import commands
from pretty_help import PrettyHelp
import config as c
__author__ = "Diabolica"
intents = discord.Intents.default()
intents.members = True
startup_extensions = ["config", "commands_miscellaneous", "commands_ticketing", "commands_roblox"]
bot = commands.Bot(intents=intents, command_prefix=c.prefix, description="Grand Quest Helper is an assistant bot for the Grand Quest Games Community.", owner_id=c.bot_owner_id, case_insensitive=True)
bot.help_command = PrettyHelp(dm_help=True, no_category="Default", show_index=False, show_hidden=False, color=discord.Color.from_rgb(r=41, g=28, b=115))
# Events
@bot.event
async def on_ready():
print('''
+--------------------------------+
| GrandQuestHelper has logged in |
+--------------------------------+
''')
await bot.change_presence(status=discord.Status.online, activity=discord.Game(name='Try {}help command'.format(c.prefix)))
@bot.event
async def on_command_error(ctx, error):
if isinstance(error, commands.CommandOnCooldown):
cd = round(error.retry_after) + 1
await ctx.reply('This command is on cooldown for {0:d} more second{1}.'.format(cd, 's' if cd != 1 else ''), delete_after=c.delete_timer)
if isinstance(error, commands.CheckFailure):
await ctx.reply('You\'re unable to do that!', delete_after=c.delete_timer)
if isinstance(error, commands.MissingRequiredArgument):
await ctx.reply('This command is missing required arguments.', delete_after=c.delete_timer)
@bot.event
async def on_message(message):
await process_command(message)
@bot.event
async def on_message_edit(old_message, new_message):
if old_message.content == new_message.content:
return
await process_command(new_message)
@bot.command()
@commands.is_owner()
async def Reload(ctx):
"""Reloads the extensions of the bot."""
success = True
for ext in startup_extensions:
print('{} has been reloaded'.format(ext))
try:
bot.reload_extension(ext)
except Exception as ex:
success = False
try:
await ctx.author.send('Failed to load extension {0}\n{1}: {2}'.format(ext, type(ex).__name__, str(ex)))
finally:
pass
await ctx.author.send('Commands reloaded successfully!' if success else 'Something went wrong! :sob:')
# Functions
async def process_command(message):
if message.author == bot.user:
return
for command_line in message.content.split('\n{0}'.format(c.prefix)):
if command_line == message.content.split('\n{0}'.format(c.prefix))[0] and not command_line.startswith(c.prefix):
continue
if not command_line.startswith(c.prefix):
command_line = "{0}{1}".format(c.prefix, command_line)
message.content = command_line
if message.content:
command = message.content.split()[0].replace(c.prefix, "")
message.content = message.content.replace(command, command.lower())
try:
if bot.get_command(command):
await message.delete(delay=0.25)
finally:
pass
await bot.process_commands(message)
if __name__ == "__main__":
for extension in startup_extensions:
try:
bot.load_extension(extension)
except Exception as e:
print('Failed to load extension {0}\n{1}: {2}'.format(extension, type(e).__name__, str(e)))
bot.run(c.token)
| Diabolicah/GPO-Bot | main.py | main.py | py | 3,653 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "discord.Intents.default",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "discord.Intents",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "d... |
4697384472 | import numpy as np
from player import Player
from territory import Territory
from troop import Troop
import random
from enum import Enum
starting_troops = 25
usa_states = {"Alabama":["Mississippi","Tennessee","Florida","Georgia"],
"Alaska":["Hawaii","California","Arizona"],
"Arizona":["California","Nevada","Utah","New Mexico","Colorado"],
"Arkansas":["Tennessee","Missouri","Oklahoma","Mississippi","Louisiana","Texas"],
"California":["Nevada","Arizona","Alaska"],
"Colorado":["Utah","Wyoming","Arizona","New Mexico","Nebraska","Kansas","Oklahoma"],
"Connecticut":["New York","Rhode Island","Massachusetts"],
"Delaware":["New Jersey","Maryland","Pennsylvania"],
"Florida":["Alabama","Georgia"],
"Georgia":["Florida","Alabama","South Carolina","Tennessee","North Carolina"],
"Hawaii":["Alaska","Texas"],
"Idaho":["Wyoming","Montana","Washington","Utah","Nevada","Oregon"],
"Illinois":["Wisconsin","Iowa","Missouri","Indiana","Kentucky"],
"Indiana":["Illinois","Michigan","Ohio","Kentucky"],
"Iowa":["Wisconsin","Minnesota","Nebraska","South Dakota","Missouri","Illinois"],
"Kansas":["Nebraska","Oklahoma","Colorado","Missouri"],
"Kentucky":["Indiana","Illinois","Virginia","Ohio","West Virginia","Tennessee","Missouri"],
"Louisiana":["Arkansas","Texas","Mississippi"],
"Maine":["New Hampshire"],
"Maryland":["Delaware","Virginia","Pennsylvania","West Virginia"],
"Massachusetts":["Vermont","New Hampshire","New York","Rhode Island","Connecticut"],
"Michigan":["Indiana","Ohio","Wisconsin"],
"Minnesota":["North Dakota","South Dakota","Iowa","Wisconsin"],
"Mississippi":["Alabama","Arkansas","Louisiana","Tennessee"],
"Missouri":["Kansas","Arkansas","Iowa","Illinois","Kentucky","Tennessee","Oklahoma"],
"Montana":["Idaho","Wyoming","North Dakota","South Dakota"],
"Nebraska":["Iowa","South Dakota","Wyoming","Colorado","Kansas","Missouri"],
"Nevada":["Idaho","Utah","Arizona","California","Oregon"],
"New Hampshire":["Maine","Vermont","Massachusetts"],
"New Jersey":["Delaware","New York","Pennsylvania"],
"New Mexico":["Oklahoma","Texas","Colorado","Utah","Arizona"],
"New York":["Vermont","New Jersey","Pennsylvania","Massachusetts","Connecticut"],
"North Carolina":["South Carolina","Virginia","Tennessee"],
"North Dakota":["Montana","South Dakota","Minnesota"],
"Ohio":["West Virginia","Indiana","Michigan","Kentucky","Pennsylvania"],
"Oklahoma":["Texas","Kansas","Colorado","New Mexico","Arkansas","Missouri"],
"Oregon":["Idaho","Washington","Nevada","California"],
"Pennsylvania":["New York","Delaware","New Jersey","Maryland","Ohio","West Virginia"],
"Rhode Island":["Massachusetts","Connecticut"],
"South Carolina":["North Carolina","Georgia"],
"South Dakota":["North Dakota","Wyoming","Montana","Nebraska","Iowa","Minnesota"],
"Tennessee":["North Carolina","Alabama","Mississippi","Georgia","Arkansas","Kentucky","Missouri"],
"Texas":["New Mexico","Oklahoma","Arkansas","Louisiana","Hawaii"],
"Utah":["Idaho","Nevada","Wyoming","Nevada","Colorado","New Mexico"],
"Vermont":["New York","New Hampshire","Massachusetts"],
"Virginia":["West Virginia","Maryland","North Carolina","Kentucky"],
"Washington":["Oregon","Idaho"],
"West Virginia":["Ohio","Virginia","Pennsylvania","Kentucky","Maryland"],
"Wisconsin":["Michigan","Minnesota","Illinois","Iowa"],
"Wyoming":["Montana","Idaho","Nebraska","Utah","Colorado","South Dakota"]}
egypt_states = {"Alexandria":["Beheira","Matruh"],
"Aswan":["Red Sea","Luxor","New Valley"],
"Asyut":["Minya","Sohag","New Valley","Red Sea","Qena"],
"Beheira":["Alexandria","Kafr El Sheikh","Gharbia","Monufia","Giza"],
"Beni Suef":["Minya","Giza","Faiyum","Red Sea"],
"Cairo":["Giza","Suez","Qalyubia","Sharqia","Ismailia"],
"Dakahlia":["Damietta","Port Said","Sharqia","Gharbia","Kafr El Sheikh"],
"Damietta":["Dakahlia","Port Said"],
"Faiyum":["Giza","Beni Suef"],
"Gharbia":["Dakahlia","Kafr El Sheikh","Beheira","Monufia"],
"Giza":["Faiyum","Suez","Beheira","Monufia","Qalyubia","Cairo","Matruh","New Valley","Red Sea"],
"Ismailia":["North Sinai","Suez","Cairo","Sharqia","Port Said"],
"Kafr El Sheikh":["Dakahlia","Beheira","Gharbia"],
"Luxor":["Aswan","New Valley","Qena","Red Sea"],
"Matruh":["Alexandria","Giza","Beheira","New Valley"],
"Minya":["Beni Suef","Asyut","Giza","New Valley","Red Sea"],
"Monufia":["Giza","Qalyubia","Qalyubia","Gharbia"],
"New Valley":["Matruh","Giza","Minya","Asyut","Sohag","Qena","Luxor","Aswan"],
"North Sinai":["South Sinai","Suez","Ismailia","Port Said"],
"Port Said":["North Sinai","Dakahlia","Damietta","Sharqia","Ismailia"],
"Qalyubia":["Giza","Sharqia","Monufia","Gharbia","Cairo"],
"Qena":["Sohag","Luxor","Red Sea","New Valley"],
"Red Sea":["Suez","Giza","Beni Suef","Minya","Asyut","Sohag","Qena","Luxor","Aswan"],
"Sharqia":["Cairo","Ismailia","Suez","Qalyubia","Dakahlia","Port Said"],
"Sohag":["Asyut","Qena","Red Sea","New Valley"],
"South Sinai":["Suez","North Sinai"],
"Suez":["Giza","Cairo","North Sinai","South Sinai","Sharqia","Ismailia"]}
colors = ['#346ac3','#d23c2f','#e1a904','#191919','#326f26','#764dbe']
class Game:
def __init__(self,map,player_types,mode=0,players_num=2,player_turn=0,state=None):
self.players_num = players_num
self.mode = mode
self.player_turn = player_turn
self.state = state
self.map = map
self.player_types = player_types
self.game_over = None
def start(self):
self.generate_map()
self.generate_players()
self.generate_troops()
self.update_state()
self.init_agents()
def generate_map(self):
self.territories = {}
if self.map == 'USA':
for state,adjacents in usa_states.items():
self.territories[state]=Territory(state,adjacents)
elif self.map == 'Egypt':
for state,adjacents in egypt_states.items():
self.territories[state]=Territory(state,adjacents)
def generate_players(self):
self.players = []
for i in range(0,self.players_num):
type = self.player_types[i]
self.players.append(Player(i,colors[i],type=type))
self.players[i].set_goal_state(self)
def generate_troops(self):
starting_troops = max(20,2*len(self.territories) // self.players_num)
for i in range(0,starting_troops):
for player in self.players:
if player.troops is None:
player.troops=[]
if player.territories is None:
player.territories = []
troop = Troop(i,player,2)
troop.assign_randomly(list(self.territories.values()))
player.troops.append(troop)
def init_agents(self):
for player in self.players:
if player.type in [4,5,6,7]:
player.init_agent(self)
def get_territory(self,name):
return self.territories[name]
def update_state(self):
for i,player in enumerate(self.players):
if player and len(player.territories)==0:
self.players[i] = None
if self.players[self.player_turn] is None:
self.player_turn = (self.player_turn+1) % self.players_num
if self.state is None:
self.state ={}
self.state = {player.id:{str(trt.name):len(trt.troops) for trt in player.territories} for player in self.players if player }
self.state[-1] = {trt.name:0 for trt in list(self.territories.values()) if trt.occupying_player is None}
self.check_winner()
def check_winner(self):
check_list = [player for player in self.players if player]
if len(check_list)==1:
self.game_over={'over':True,'winner':check_list[0].id,'turns':check_list[0].turns_played}
def json(self):
return {
"map":self.map,
"mode":self.mode,
"players_num":self.players_num,
"player_turn":self.player_turn,
"state":self.state,
"players":[player.json() if player else None for player in self.players],
"occupied_territories":[trty.json() for trty in list(self.territories.values()) if trty.occupying_player],
"territories":[trty.json() for trty in list(self.territories.values())],
"game_over":self.game_over
}
class GameMode(Enum):
AI_VS_AI = 2
HUMAN_VS_AI = 1
HUMAN_VS_HUMAN = 0
class PlayerType(Enum):
HUMAN = 0
PASSIVE = 1
AGRESSIVE = 2
PACIFIST = 3
GREEDY = 4
ASTAR = 5
ASTAR_REAL = 6
MINIMAX = 7
| ZeyadZanaty/risk-game-ai | server/game.py | game.py | py | 8,762 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "territory.Territory",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "territory.Territory",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "player.Player",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "player.troop... |
8599562923 | import webbrowser
import msal
import logging
import requests
import json
from msal import PublicClientApplication
APPLICATION_ID = '31a4641c-9cae-4d30-a2d4-c104bf383785'
CLIENT_SECRET = '5M78Q~QVl-rib2HqHVJ4xhRe-XWcGySwtZMgPbjz'
authority_url = 'https://login.microsoftonline.com/common/'
base_url = 'https://graph.microsoft.com/v1.0/'
endpoint = base_url + 'me'
SCOPES = ['User.Read', 'User.Export.All']
#
# # method 2: Login to acquire access_token
#
# client = PublicClientApplication(client_id=APPLICATION_ID,
# authority=authority_url)
#
# flow = client.initiate_device_flow(scopes=SCOPES)
# print(flow['user_code'])
# webbrowser.open(flow['verification_uri'])
#
# token_response = client.acquire_token_by_device_flow(flow)
# print(token_response['access_token'])
def email_sender(destinatario, nome_superior=None, nome_demitido=None, dt_demissao=None, modelo_equipamento=None, patrimonio_equipamento=None):
f = open('parameters.json')
config = json.load(f)
app = msal.ConfidentialClientApplication(
config["client_id"], authority=config["authority"],
client_credential=config["secret"],
# token_cache=... # Default cache is in memory only.
# You can learn how to use SerializableTokenCache from
# https://msal-python.rtfd.io/en/latest/#msal.SerializableTokenCache
)
# The pattern to acquire a token looks like this.
result = None
# Firstly, looks up a token from cache
# Since we are looking for token for the current app, NOT for an end user,
# notice we give account parameter as None.
result = app.acquire_token_silent(config["scope"], account=None)
if not result:
logging.info("No suitable token exists in cache. Let's get a new one from AAD.")
result = app.acquire_token_for_client(scopes=config["scope"])
if "access_token" in result:
# Calling graph using the access token
request_body = {
'message': {
# recipient list
'toRecipients': [
{
'emailAddress': {
'address': f'{destinatario}'
}
}
],
# email subject
'subject': 'TESTE - Transferência de Equipamentos',
'importance': 'normal',
'body': {
'contentType': 'HTML',
'content': f'<b>Prezado {nome_superior}, \n ex-colaborador:{nome_demitido} desligado em '
f'{dt_demissao}, favor 'f'transferir equipamento{modelo_equipamento},'
f' patrimônio {patrimonio_equipamento}'f' para outro colaborador ativo</b>'
},
}
}
graph_response = requests.post(config['endpoint'],
headers={'Authorization': 'Bearer ' + result['access_token']}, json=request_body)
print("Graph API call result: ")
print(graph_response)
else:
print(result.get("error"))
print(result.get("error_description"))
print(result.get("correlation_id")) # You may need this when reporting a bug
# request_body = {
# 'message': {
# # recipient list
# 'toRecipients': [
# {
# 'emailAddress': {
# 'address': '<recipient email address>'
# }
# }
# ],
# # email subject
# 'subject': 'You got an email',
# 'importance': 'normal',
# 'body': {
# 'contentType': 'HTML',
# 'content': '<b>Be Awesome</b>'
# },
# # include attachments
# 'attachments': [
# draft_attachment('hello.txt'),
# draft_attachment('image.png')
# ]
# }
# }
if __name__ == '__main__':
email_sender('thiagovieirac@gmail.com')
| tvcastro1/projetos-analise-dados | citrix-podio/demitidos/emailer.py | emailer.py | py | 3,963 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "json.load",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "msal.ConfidentialClientApplication",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "requests.po... |
19416806187 | """Determine potential of renewable electricity in each administrative unit.
* Take the (only technically restricted) raster data potentials,
* add restrictions based on scenario definitions,
* allocate the onshore potentials to the administrative units,
* allocate the offshore potentials to exclusive economic zones (EEZ),
* allocate the offshore potential of EEZ to units based on the fraction of shared coast.
This is in analogy to `areas.py` but for potentials [TWh/a] rather than areas [km2] .
"""
from enum import IntEnum, Enum
import click
import numpy as np
import pandas as pd
import rasterio
from rasterstats import zonal_stats
import fiona
from src.technical_eligibility import Eligibility, FOREST, FARM, OTHER
from src.utils import Config
class ProtectedArea(IntEnum):
"""Derived from UNEP-WCMC data set."""
PROTECTED = 255
NOT_PROTECTED = 0
class Potential(Enum):
"""Classes of renewable electricity potentials."""
ROOFTOP_PV = (1, [Eligibility.ROOFTOP_PV])
OPEN_FIELD_PV = (2, [Eligibility.ONSHORE_WIND_AND_PV])
ONSHORE_WIND = (3, [Eligibility.ONSHORE_WIND_AND_PV, Eligibility.ONSHORE_WIND])
OFFSHORE_WIND = (4, [Eligibility.OFFSHORE_WIND])
def __init__(self, int_id, corresponding_eligibilities):
self.int_id = int_id
self.eligible_on = corresponding_eligibilities
@property
def area_name(self):
return "{}_km2".format(self.name.lower())
@property
def capacity_name(self):
return "{}_mw".format(self.name.lower())
@property
def electricity_yield_name(self):
return "{}_twh_per_year".format(self.name.lower())
@staticmethod
def onshore():
"""Returns all onshore potentials."""
return [
Potential.ROOFTOP_PV,
Potential.OPEN_FIELD_PV,
Potential.ONSHORE_WIND,
]
@staticmethod
def offshore():
"""Returns all offshore potentials."""
return [
Potential.OFFSHORE_WIND
]
def __repr__(self):
return self.electricity_yield_name
def __str__(self):
return self.__repr__()
@click.command()
@click.argument("path_to_units")
@click.argument("path_to_eez")
@click.argument("path_to_shared_coast")
@click.argument("path_to_electricity_yield_pv_prio")
@click.argument("path_to_electricity_yield_wind_prio")
@click.argument("path_to_eligibility_categories")
@click.argument("path_to_land_cover")
@click.argument("path_to_protected_areas")
@click.argument("path_to_result")
@click.argument("scenario")
@click.argument("config", type=Config())
def potentials(path_to_units, path_to_eez, path_to_shared_coast,
path_to_electricity_yield_pv_prio, path_to_electricity_yield_wind_prio,
path_to_eligibility_categories, path_to_land_cover, path_to_protected_areas,
path_to_result, scenario, config):
"""Determine potential of renewable electricity in each administrative unit.
* Take the (only technically restricted) raster data potentials,
* add restrictions based on scenario definitions,
* allocate the onshore potentials to the administrative units,
* allocate the offshore potentials to exclusive economic zones (EEZ),
* allocate the offshore potential of EEZ to units based on the fraction of shared coast.
"""
with rasterio.open(path_to_eligibility_categories, "r") as src:
eligibility_categories = src.read(1)
with rasterio.open(path_to_electricity_yield_pv_prio, "r") as src:
transform = src.transform
electricity_yield_pv_prio = src.read(1)
with rasterio.open(path_to_electricity_yield_wind_prio, "r") as src:
electricity_yield_wind_prio = src.read(1)
with rasterio.open(path_to_land_cover, "r") as src:
land_cover = src.read(1)
with rasterio.open(path_to_protected_areas, "r") as src:
protected_areas = src.read(1)
with fiona.open(path_to_units, "r") as src:
unit_ids = [feature["properties"]["id"] for feature in src]
unit_geometries = [feature["geometry"] for feature in src]
with fiona.open(path_to_eez, "r") as src:
eez_ids = [feature["properties"]["id"] for feature in src]
eez_geometries = [feature["geometry"] for feature in src]
shared_coasts = pd.read_csv(path_to_shared_coast, index_col=0)
electricity_yield_pv_prio, electricity_yield_wind_prio = apply_scenario_config(
potential_pv_prio=electricity_yield_pv_prio,
potential_wind_prio=electricity_yield_wind_prio,
categories=eligibility_categories,
land_cover=land_cover,
protected_areas=protected_areas,
scenario_config=config["scenarios"][scenario]
)
electricity_yield_pv_prio, electricity_yield_wind_prio = decide_between_pv_and_wind(
potential_pv_prio=electricity_yield_pv_prio,
potential_wind_prio=electricity_yield_wind_prio,
electricity_yield_pv_prio=electricity_yield_pv_prio,
electricity_yield_wind_prio=electricity_yield_wind_prio,
eligibility_categories=eligibility_categories
)
onshore_potentials = pd.DataFrame(
index=unit_ids,
data={
potential: potentials_per_shape(
eligibilities=potential.eligible_on,
potential_map=(electricity_yield_pv_prio if "pv" in str(potential).lower()
else electricity_yield_wind_prio),
eligibility_categories=eligibility_categories,
shapes=unit_geometries,
transform=transform
)
for potential in Potential.onshore()
}
)
offshore_eez_potentials = pd.DataFrame(
index=eez_ids,
data={
potential: potentials_per_shape(
eligibilities=potential.eligible_on,
potential_map=(electricity_yield_pv_prio if "pv" in str(potential).lower()
else electricity_yield_wind_prio),
eligibility_categories=eligibility_categories,
shapes=eez_geometries,
transform=transform
)
for potential in Potential.offshore()
}
)
offshore_potentials = pd.DataFrame(
data=shared_coasts.dot(offshore_eez_potentials),
columns=Potential.offshore()
)
potentials = pd.concat([onshore_potentials, offshore_potentials], axis=1)
potentials.index.name = "id"
potentials.to_csv(
path_to_result,
header=True,
index=True
)
def apply_scenario_config(potential_pv_prio, potential_wind_prio, categories,
land_cover, protected_areas, scenario_config):
"""Limit potential in each pixel based on scenario config."""
# share-rooftops-used
share_rooftops_used = scenario_config["share-rooftops-used"]
mask = categories == Eligibility.ROOFTOP_PV
potential_pv_prio[mask] = potential_pv_prio[mask] * share_rooftops_used
potential_wind_prio[mask] = potential_wind_prio[mask] * share_rooftops_used
# share-forest-used-for-wind
share_forest_used_for_wind = scenario_config["share-forest-used-for-wind"]
mask = np.isin(land_cover, FOREST) & (categories != Eligibility.ROOFTOP_PV)
potential_pv_prio[mask] = potential_pv_prio[mask] * share_forest_used_for_wind
potential_wind_prio[mask] = potential_wind_prio[mask] * share_forest_used_for_wind
# share-other-land-used
share_other_land_used = scenario_config["share-other-land-used"]
mask = np.isin(land_cover, OTHER) & (categories != Eligibility.ROOFTOP_PV)
potential_pv_prio[mask] = potential_pv_prio[mask] * share_other_land_used
potential_wind_prio[mask] = potential_wind_prio[mask] * share_other_land_used
# share-farmland-used
share_farmland_used = scenario_config["share-farmland-used"]
mask = np.isin(land_cover, FARM) & (categories != Eligibility.ROOFTOP_PV)
potential_pv_prio[mask] = potential_pv_prio[mask] * share_farmland_used
potential_wind_prio[mask] = potential_wind_prio[mask] * share_farmland_used
# share-offshore-used
share_offshore_used = scenario_config["share-offshore-used"]
mask = categories == Eligibility.OFFSHORE_WIND
potential_pv_prio[mask] = potential_pv_prio[mask] * share_offshore_used
potential_wind_prio[mask] = potential_wind_prio[mask] * share_offshore_used
# pv-on-farmland
pv_on_farmland = scenario_config["pv-on-farmland"]
if not pv_on_farmland:
mask = np.isin(land_cover, FARM) & (categories == Eligibility.ONSHORE_WIND_AND_PV)
potential_pv_prio[mask] = 0
# share-protected-areas-used
use_protected_areas = scenario_config["use-protected-areas"]
if not use_protected_areas:
mask = (protected_areas == ProtectedArea.PROTECTED) & (categories != Eligibility.ROOFTOP_PV)
potential_pv_prio[mask] = 0
potential_wind_prio[mask] = 0
return potential_pv_prio, potential_wind_prio
def decide_between_pv_and_wind(potential_pv_prio, potential_wind_prio,
electricity_yield_pv_prio, electricity_yield_wind_prio,
eligibility_categories):
"""When both are possible, choose PV when its electricity yield is higher, or vice versa."""
pv_and_wind_possible = eligibility_categories == Eligibility.ONSHORE_WIND_AND_PV
higher_wind_yield = electricity_yield_pv_prio <= electricity_yield_wind_prio
potential_pv_prio[pv_and_wind_possible & higher_wind_yield] = 0
potential_wind_prio[pv_and_wind_possible & ~higher_wind_yield] = 0
return potential_pv_prio, potential_wind_prio
def potentials_per_shape(eligibilities, potential_map, eligibility_categories, shapes, transform):
"""Determine potential of one eligibility category per shape."""
potential_map = potential_map.copy()
potential_map[~np.isin(eligibility_categories, eligibilities)] = 0
potentials = zonal_stats(
shapes,
potential_map,
affine=transform,
stats="sum",
nodata=-999
)
return [stat["sum"] for stat in potentials]
if __name__ == "__main__":
potentials()
| timtroendle/possibility-for-electricity-autarky | src/potentials.py | potentials.py | py | 10,162 | python | en | code | 10 | github-code | 6 | [
{
"api_name": "enum.IntEnum",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "enum.Enum",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "src.technical_eligibility.Eligibility.ROOFTOP_PV",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_... |
20804943026 | import pickle
from sklearn import model_selection
from sklearn.linear_model import LinearRegression
model = LinearRegression()
loaded_model = pickle.load(open('model', 'rb'))
val = "sssfAfsDfe%%%{dInIisdChdh*e]DHSdbeTNhfhdyeSSWTTFSSSllfjdjs{\\#3fdas34df7adJHHstcsdDFur3sfj_1mdfneypcs0KJDsrsFs7sd4nfec3_sdrufdl35}453"
print(len(val))
res = ""
for pos, i in enumerate(loaded_model.coef_):
print(i)
if i == 1:
res += val[pos]
print(res)
print(len(loaded_model.coef_))
print(loaded_model) | MysterionRise/ai-ctf-2022-solutions | stegano-regression/stegano.py | stegano.py | py | 502 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sklearn.linear_model.LinearRegression",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 6,
"usage_type": "call"
}
] |
30763331030 | import sys
import torch
import ool.picture.models.thirdparty.space.model as spc
from ool.picture.models.thirdparty.space.model import Space
from oolexp import OOLLayeredBoxExp
class MultipleOptimizer(torch.optim.Optimizer):
def __init__(self, *optimisers):
self.opts = optimisers
self.defaults = self.opts[0].defaults
self.state = self.opts[0].state
self.param_groups = []
for opt in self.opts:
self.param_groups.extend(opt.param_groups)
def __getstate__(self):
return {
'defaults': self.defaults,
'state': self.state,
'param_groups': self.param_groups,
}
def __setstate__(self, state):
self.__dict__.update(state)
def __repr__(self):
return f"Multi:{' '.join(str(opt) for opt in self.opts)}"
def state_dict(self):
return {
'opts': [
opt.state_dict() for opt in self.opts
]
}
def load_state_dict(self, state_dict):
for opt, sd in zip(self.opts, state_dict['opt']):
opt.load_state_dict(sd)
def zero_grad(self, set_to_none: bool = False):
for opt in self.opts:
opt.zero_grad(set_to_none)
def step(self, closure):
for opt in self.opts:
opt.step(closure)
def add_param_group(self, param_group):
raise NotImplementedError()
class LitSPACE(OOLLayeredBoxExp):
def __init__(self,
tag='test',
seed=None,
data='clevr-crop-(128, 128)',
batch_size=16,
grad_clip=1.0,
# learning_rate=1e-4,
max_steps=160000,
fg_std = 0.15,
bg_std = 0.15,
):
super(LitSPACE, self).__init__(seed, 'mse', 'min')
self.save_hyperparameters()
spc.arch.fg_sigma = fg_std
spc.arch.bg_sigma = bg_std
self.model = Space()
def training_step(self, batch, batch_idx):
batch = self.accelated_batch_postprocessing(batch)
img, *other = batch
output = self.model(img, self.trainer.global_step)
self.maybe_log_training_outputs(output)
return output['loss']
def configure_optimizers(self):
adam = torch.optim.Adam(list(self.model.bg_module.parameters()), lr=1e-3)
rms = torch.optim.RMSprop(list(self.model.fg_module.parameters()), lr=1e-5)
return MultipleOptimizer(rms, adam)
# def trainer_kwargs(self):
# return dict(accumulate_grad_batches=3)
def validation_step(self, batch, batch_idx, dataloader_idx=None):
prefix = '' if dataloader_idx is None else f"v{dataloader_idx}/"
batch = self.accelated_batch_postprocessing(batch)
img, *other = batch
output = self.model(img, self.trainer.global_step)
self.maybe_log_validation_outputs(batch, batch_idx, output, prefix)
if __name__ == '__main__':
print(' '.join(sys.argv))
LitSPACE.parse_args_and_execute()
| karazijal/clevrtex | experiments/space.py | space.py | py | 3,054 | python | en | code | 8 | github-code | 6 | [
{
"api_name": "torch.optim",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "oolexp.OOLLayeredBoxExp",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "ool.picture.models.thirdparty.space.model.arch",
"line_number": 69,
"usage_type": "attribute"
... |
6972201686 | import os
import argparse
#from tools import train_net
from tools.lib import init_lr
import random
import numpy as np
from tools.classification import classification
from tools.classification_multi import classification_multi
import torch
def seed_torch(seed=0):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
#torch.backends.cudnn.enabled = False
seed_torch(0)
root_path = os.getcwd() #'/data2/lqm/pytorch_interpretable/py_icnn'
parser = argparse.ArgumentParser('parameters')
#info:gpu
parser.add_argument('--gpu_id',type=int,default=0,help='select the id of the gpu')
#info:task
parser.add_argument('--task_name',type=str,default='classification',help='select classification or classification_multi')
parser.add_argument('--task_id',type=int,default=0,help='0,1,2..')
parser.add_argument('--dataset',type=str,default='voc2010_crop',help='select voc2010_crop, helen, cub200,cubsample'
'celeba, vocpart, ilsvrcanimalpart')
parser.add_argument('--imagesize',type=int,default=224,help='')
parser.add_argument('--label_name',type=str,default='bird',help='if voc2010_crop, set bird, cat, cow, dog, horse or sheep;'
'else, it does not matter')
parser.add_argument('--label_num',type=int,default=1,help='keep the same number of label_name')
parser.add_argument('--model',type=str,default='resnet_18',help='select vgg_vd_16, vgg_m, vgg_s, '
'alexnet, resnet_18, resnet_50, densenet_121')
parser.add_argument('--losstype',type=str,default='logistic',help='select logistic or softmax')
#info:hyper-parameter
parser.add_argument('--batchsize',type=int,default=8,help='select more than 8 may cause out of cuda memory, '
'when you want to choose different batchsize, you also need to adjust line 94 of /tools/sgd.py at the same time to make them consistent')
parser.add_argument('--dropoutrate',type=int,default=0,help='select the number between 0 and 1')
parser.add_argument('--lr',type=int,default=0,help='see function init_lr in /tools/lib.py for details')
parser.add_argument('--epochnum',type=int,default=0,help='see function init_lr in /tools/lib.py for details')
parser.add_argument('--weightdecay',type=int,default=0.0005,help='0.02,0.002')
parser.add_argument('--momentum',type=int,default=0.09,help='0.02,0.002')
args = parser.parse_args()
args.lr, args.epochnum = init_lr(args.model,args.label_num,args.losstype) #init lr and epochnum
if(args.task_name=='classification'):
if args.dataset == 'celeba':
args.label_num = 40
classification(root_path, args)
else:
if args.dataset == 'vocpart':
args.label_name = ['bird','cat','cow','dog','horse','sheep']
args.label_num = 6
classification_multi(root_path,args)
| ada-shen/ICNN | demo.py | demo.py | py | 3,178 | python | en | code | 59 | github-code | 6 | [
{
"api_name": "random.seed",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.seed",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"lin... |
2894455012 | import numpy as np
import matplotlib.pyplot as plt
def gaussEliminationLS( m, n, a, x):
for i in range(m-1):
for k in range(m):
if abs(a[i][i]<abs(a[k][i])):
for j in range(n):
temp= a[i][j]
a[i][j]= a[k][j]
a[k][j]= temp
for k in range(i+1,m):
term = a[k][i]/a[i][i]
for j in range(n):
a[k][j]= a[k][j]-term*a[i][j]
for i in range(m-1,-1,-1):
x[i] = a[i][n-1]
for j in range(i+1,n-1):
x[i] = x[i]-a[i][j]*x[j]
x[i]= x[i]/a[i][i]
return x
def cSCoeffCalc(n,h,sig,y,a,b,c,d):
for i in range(n):
d[i]=y[i]
b[i]=sig[i]/2.0
a[i]=(sig[i+1]-sig[i])/(h[i]*6.0)
c[i]=(y[i+1]-y[i])/h[i]-h[i]*(2*sig[i]+sig[i+1])/6.0
def tridiagonalCubicSplineGen(n,h,a,y):
for i in range(n-1):
a[i][i]=2*(h[i]+h[i+1])
for i in range(n-2):
a[i][i+1]=h[i+1]
a[i+1][i]=h[i+1]
for i in range(1,n):
a[i-1][n-1]=(y[i+1]-y[i])*6/h[i]-(y[i]-y[i-1])*6/h[i-1]
def printMatrix(m, n, matrix):
ss=""
for i in range(m):
for j in range(n):
ss+=str(matrix[i][j])+" "
print(ss);
def copyMatrix( m, n, matrix1, matrix2):
for i in range(m):
for j in range(n):
matrix2[i][j]=matrix1[i][j]
#x= np.array([-3,-2 ,-1, 0, 1, 2, 3])
#y= np.array([-1, -1, -1, 0, 1, 1, 1])
x= np.array([0,1,2.5,3.6,5,7,8.1,10])
y= np.array([0,.8,.6,-.44,-.96,.66,.97,-.54])
m= x.shape[0]
n= m-1
h = np.zeros((n,1))
for i in range(n):
h[i]=x[i+1]-x[i]
a = np.zeros((n,1))
b = np.zeros((n,1))
c = np.zeros((n,1))
d = np.zeros((n,1))
sig = np.zeros((n+1,1))
sigTemp = np.zeros((n-1,1))
sig[0]=0
sig[n]=0
tri = np.zeros((n-1,n))
tridiagonalCubicSplineGen(n,h,tri,y)
print("The tridiagonal system for the Natural spline is:\n\n")
printMatrix(n-1,n,tri)
# Perform Gauss Elimination
gaussEliminationLS(n-1,n,tri,sigTemp)
for i in range(1,n):
sig[i]=sigTemp[i-1]
# Print the values of Si's
for i in range(n+1):
print("\nSig["+str(i)+"]= " +str(sig[i]))
# calculate the values of ai's, bi's, ci's, and di's
cSCoeffCalc(n,h,sig,y,a,b,c,d);
print("The equations of cubic interpolation polynomials between the successive intervals are:\n\n")
for i in range(n):
print("P"+str(i)+"(x) b/w ["+str(x[i])+","+str(x[i+1])+"] = "+str(a[i])+"*(x-"+str(x[i])+")^3+"+str(b[i])+"*(x-"+str(x[i])+")^2+"+str(c[i])+"*(x-"+str(x[i])+")+"+str(d[i])+"\n")
function = lambda x: (a[i]*(x-x[i])**3+b[i]*(x-x[i])**2+c[i]*(x-x[i])+d[i])
X= np.linspace(x[i],x[i+1])
plt.plot(X,function(X))
plt.show()
| meheraj2325/CSE-3212-Numerical-Methods-Lab | lab4/cubic_spline2.py | cubic_spline2.py | py | 2,688 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.array",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": ... |
73486867709 | '''
@Jailson
Data: 17-11-2022
'''
import requests
from csv import writer
from datetime import datetime
data_e_hora_atuais = datetime.now()
data_e_hora_em_texto = data_e_hora_atuais.strftime('%d/%m/%Y %H:%M')
#################################################################################
# Emon service info
emon_ip = "193.136.227.157"
emon_apikey = "95ca8292ee40f87f6ff0d1a07b2dca6f" # emon ecopool
node_id = "ecopool"
##################################################################################
API_KEY = "23ffbe727b2bee451d3dc7b37ad2b813"
API_KEY_PRO = "5c27c543425c4d4a1efc3c6bee965937"
cidade = "faro"
code = "351"
link = "https://api.openweathermap.org/data/2.5/forecast?q="+str(cidade)+"&appid="+str(API_KEY)
def main():
requisicao = requests.get(link) # faz a requisição para o site(api)
requisicao_dic = requisicao.json() # armazena os valores solicitado num dicionario
print(requisicao_dic)
temp = requisicao_dic['list'][0]['main']['temp'] - 273.15
humidade = requisicao_dic['list'][0]['main']['humidity']
veloc_vent = ((requisicao_dic['list'][0]['wind']['speed']) / (1000)) * 3600
velocidade = '{:.0f}'.format(veloc_vent)
temperatura = '{:.0f}'.format(temp)
#print(temperatura,velocidade,humidade)
# enviar para emoncms
data_json = '{"TemperaturaExt":' + str(temperatura) + ',"HumidadeExt":' + str(humidade) + ',"VelocidadeExt":' + str(velocidade) +'}'
emon_link = 'http://' + emon_ip + '/emoncms/input/post?node=' + node_id + '&fulljson=' + str(data_json) + "&apikey=" + str(emon_apikey)
request = requests.get(emon_link)
# enviar para arquivo csv
# The data assigned to the list
list_data = [data_e_hora_em_texto,temperatura, velocidade, humidade]
with open('files/files.csv', 'a', newline='') as f_object:
# Pass the CSV file object to the writer() function
writer_object = writer(f_object)
# Result - a writer object
# Pass the data in the list as an argument into the writerow() function
writer_object.writerow(list_data)
# Close the file object
f_object.close()
if __name__ == "__main__":
main() | marcelo-m7/EcoPool | varexternas.py | varexternas.py | py | 2,070 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "datetime.datetime.now",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "requests.get",
... |
37511806658 | from invimg.scripts.inference import invert
import math
import os
import torch
import torchvision
from tqdm import tqdm
import numpy as np
from optimclip.criteria.clip_loss import CLIPLoss
from optimclip.criteria.id_loss import IDLoss
from optimclip.models.stylegan2.model import Generator
import clip
from faceparsing.test import evaluate
from PIL import Image
from torchvision import transforms
from run_config.config import Options
STYLESPACE_DIMENSIONS = [512 for _ in range(15)] + [256, 256, 256] + [128, 128, 128] + [64, 64, 64] + [32, 32]
# invert()
STYLESPACE_INDICES_WITHOUT_TORGB = [i for i in range(len(STYLESPACE_DIMENSIONS)) if i not in list(range(1, len(STYLESPACE_DIMENSIONS), 3))]
def get_ganmodel(opts):
generator = Generator(opts.size, 512, 8, channel_multiplier=2)
# TODO 看看generator
model = torch.load(opts.gan_model)['g_ema']
generator.load_state_dict(model, strict=True)
generator = generator.eval().cuda()
return generator
def get_lr(t, initial_lr, rampdown=0.25, rampup=0.05):
lr_ramp = min(1, (1 - t) / rampdown)
lr_ramp = 0.5 - 0.5 * math.cos(lr_ramp * math.pi)
lr_ramp = lr_ramp * min(1, t / rampup)
return initial_lr * lr_ramp
def get_init_latent(orig_pic):
latent_path = 'result/inv/latents.npy'
try:
latents = np.load(latent_path, allow_pickle=True).item()
latent_code = np.expand_dims(np.array(latents[orig_pic]), axis=0)
except FileNotFoundError:
invert() # 没有当前图片的latent code,再invert一遍
latents = np.load(latent_path, allow_pickle=True).item()
latent_code = np.expand_dims(np.array(latents[orig_pic]), axis=0)
latent_code_init = torch.tensor(latent_code).cuda()
deltas_path = 'result/inv/weight_deltas/' + orig_pic.split('.')[0] + '.npy'
deltas = np.load(deltas_path, allow_pickle=True)
deltas = [torch.from_numpy(w).cuda() if w is not None else None for w in deltas]
return latent_code_init, deltas
def get_imgloss(region, orig_img, img_gen, mask):
img_loss_sum = torch.sum(torch.square(orig_img - img_gen))
img_loss = 0
if region:
if 'bbox' in region:
bbox = region['bbox']
crop_area = (orig_img - img_gen)[:][:][bbox[0]:bbox[1]][bbox[2]:bbox[3]]
img_loss = img_loss_sum - torch.sum(torch.square(crop_area))
area = opts.size ** 2 - abs(bbox[0] - bbox[1]) * abs(bbox[2] - bbox[3]) # 剩余的面积
img_loss /= area
elif 'organ' in region:
# print(mask.shape)
img_loss = torch.sum(torch.square(orig_img * mask - img_gen * mask))
area = mask.norm(1) # 1的个数即为他的一范数
img_loss /= area
else:
print('region输入错误')
else:
img_loss = img_loss_sum / (opts.size ** 2)
return img_loss
def optim(text, input_img, opts, region):
# 分词并拼接
edit_text = torch.cat([clip.tokenize(text)]).cuda()
orig_img = Image.open(input_img)
convert = transforms.ToTensor()
normalize = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
orig_img = normalize(convert(orig_img))
orig_img = orig_img.unsqueeze(0).cuda()
orig_pic = str(input_img).split('/')[-1]
latent_code_init, deltas = get_init_latent(orig_pic)
os.makedirs(opts.results, exist_ok=True)
gan_generator = get_ganmodel(opts)
with torch.no_grad():
latent_code_init = gan_generator([latent_code_init], input_is_latent=True, return_latents=True)
# 生成初始图片
with torch.no_grad():
inv_img, _ = gan_generator([latent_code_init], input_is_latent=True,input_is_stylespace=True, randomize_noise=True,
)
latent = [s.detach().clone() for s in latent_code_init]
for c, s in enumerate(latent):
if c in STYLESPACE_INDICES_WITHOUT_TORGB:
s.requires_grad = True
latent = latent_code_init.clone().detach()
latent.requires_grad = True
clip_loss = CLIPLoss(opts)
id_loss = IDLoss(opts)
optimizer = torch.optim.Adam(latent, lr=opts.alpha)
# 得到感兴趣的区域的mask
mask = None
if region and 'organ' in region:
evaluate(region['organ'], 'result/faceparsing/', dspth='input_img/', cp='./faceparsing/res/cp/79999_iter.pth')
mask = Image.open('result/faceparsing/' + orig_pic)
mask = convert(mask).cuda()
mask = mask.repeat(3, 1, 1)
mask = mask.unsqueeze(0)
pbar = tqdm(range(opts.step))
for i in pbar:
t = i / opts.step
lr = get_lr(t, opts.alpha)
optimizer.param_groups[0]["lr"] = lr
img_gen, _ = gan_generator([latent], input_is_latent=True, input_is_stylespace=True, randomize_noise=True)
c_loss = clip_loss(img_gen, edit_text)
if opts.id_lambda > 0:
i_loss = id_loss(img_gen, inv_img)[0]
else:
i_loss = 0 # 不需要idloss就不跑模型了,节省时间
latent_loss = sum([((latent_code_init[c] - latent[c]) ** 2).sum() for c in range(len(latent_code_init))])
img_loss = get_imgloss(region, orig_img, img_gen, mask)
# print('latent_loss', latent_loss)
# print('img_loss', img_loss)
loss = c_loss + opts.latent_lambda * latent_loss + opts.id_lambda * i_loss + opts.img_lambda * img_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
pbar.set_description(
(
f"loss: {loss.item():.4f};"
)
)
if opts.save_intermediate_image_every > 0 and i % opts.save_intermediate_image_every == 0:
with torch.no_grad():
img_gen, _ = gan_generator([latent], input_is_latent=True, input_is_stylespace=True, randomize_noise=True)
torchvision.utils.save_image(img_gen, f"result/opt/{str(i).zfill(5)}.jpg", normalize=True, range=(-1, 1))
final_result = torch.cat([orig_img, inv_img, img_gen, mask])
torchvision.utils.save_image(final_result.detach().cpu(), os.path.join(opts.results, "final_result.jpg"),
normalize=True, scale_each=True, range=(-1, 1))
return final_result
if __name__ == '__main__':
opts = Options().get_args()
result = optim(text='blue eyes', input_img='input_img/img1.png', opts=opts, region={'organ': ['hair']})
from torchvision.utils import make_grid
from torchvision.transforms import ToPILImage
result_image = ToPILImage()(
make_grid(result.detach().cpu(), normalize=True, scale_each=True, range=(-1, 1), padding=0))
h, w = result_image.size
result_image.resize((h // 2, w // 2))
import matplotlib.pyplot as plt
plt.imshow(result_image)
plt.show()
| wangyuchi369/makeup-clip | test.py | test.py | py | 6,753 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "optimclip.models.stylegan2.model.Generator",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "math.cos",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "math.p... |
16312489701 | from flask import Blueprint, render_template, request, flash, redirect
shared_file = Blueprint('shared_file', __name__)
@shared_file.route('/')
def get__():
from models import File, User
files = File.query.filter(File.shared).all()
users = list(User.get_by(id_=file.creator_id) for file in files)
list_ = list((file.filename, user.username) for file, user in zip(files, users))
return render_template('shared_file.html', list=list_)
@shared_file.route('/download')
def get__download():
from models import User, File
try:
filename = request.args.get('filename')
assert filename, 'missing filename'
username = request.args.get('username')
assert username, 'missing username'
type_ = request.args.get('type')
assert type_, 'missing type'
assert type_ in ('encrypted', 'signature'), 'unknown type'
user = User.get_by(username=username)
return File.download_file(user, filename, type_)
except AssertionError as e:
message = e.args[0] if len(e.args) else str(e)
flash('下载失败!' + message)
return redirect('/shared_file')
| TheMasterOfMagic/ac | views/shared_file.py | shared_file.py | py | 1,156 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "flask.Blueprint",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "models.File.query.filter",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "models.File.query",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "models.F... |
23748731008 | # !/usr/bin/env python
# -*- coding: utf-8 -*-
"""Entry point for the server application."""
import json
import logging
import traceback
from datetime import datetime
from flask import Response, jsonify, current_app
from flask_jwt_simple import (JWTManager, jwt_required, get_jwt_identity, get_jwt)
from gevent.wsgi import WSGIServer
from backend.flask_app.api.user import user
from backend.flask_app.api.home import home
from .factory import create_app, create_user
from .http_codes import Status
logger = logging.getLogger(__name__)
app = create_app()
jwt = JWTManager(app)
@app.before_first_request
def init():
"""Initialize the application with defaults."""
create_user(app)
@jwt.jwt_data_loader
def add_claims_to_access_token(identity):
"""Explicitly set identity and claims for jwt."""
print("identita data loader %s" % identity)
if identity == 'zidpadne@seznam.cz':
roles = 'admin'
else:
roles = 'user'
now = datetime.utcnow()
return {
'exp': now + current_app.config['JWT_EXPIRES'],
'iat': now,
'nbf': now,
'sub': identity,
'roles': roles
}
def main():
"""Main entry point of the app."""
try:
port = 8080
ip = '0.0.0.0'
http_server = WSGIServer((ip, port), app,log=logging,error_log=logging)
print("Server started at: {0}:{1}".format(ip, port))
http_server.serve_forever()
except Exception as exc:
# logger.error(exc.message)
logger.exception(traceback.format_exc())
finally:
# Do something here, vykresleni nejakeho mainu
pass
@app.route('/', methods=['GET'])
def test_connection():
ret = {'msg': 'Is okey'}
return jsonify(ret), 200
app.register_blueprint(user, url_prefix='/api/user')
app.register_blueprint(home, url_prefix='/api/home') | zIPjahoda/Flask-Angular | backend/flask_app/server.py | server.py | py | 1,852 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "factory.create_app",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "flask_jwt_simple.JWTManager",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "fact... |
27566260651 | from django.shortcuts import render, HttpResponseRedirect
from .forms import MeetingCreateForm
from .models import Meeting
from django.contrib.auth.decorators import login_required
from django.urls import reverse
from django.contrib import messages
from datetime import datetime, timezone as tz
from django.utils import timezone
def home(request):
form = MeetingCreateForm()
if request.method == 'POST':
form = MeetingCreateForm(request.POST)
if form.is_valid():
fm = form.save(commit=False)
# since in our form, we do not want to be selecting users,
# we have to set the creator as the current user.
fm.creator = request.user
fm.save()
return HttpResponseRedirect(reverse('meeting_list'))
return render(request, 'onlinemeet/home.html', {'form': form})
@login_required() # to ensure only logged in user can view this page.
def meeting_list(request):
"""We are going to filter the meeting, so only the registered user can view
the page, and then all meeting created by such individual will be displayed"""
user = request.user
# meeting_url = request.build_absolute_uri()
meetings = Meeting.objects.filter(creator=user)
return render(request, 'onlinemeet/meeting_list.html', {'meetings': meetings})
def meeting(request, unique_meeting_name):
message = None
meeting = Meeting.objects.get(unique_meeting_name=unique_meeting_name)
if not meeting.meeting_time:
"""
will check if it is not time for the meeting using the property we declared in the model.
"""
now = timezone.localtime()
t = abs(now - meeting.starting_date_time).total_seconds()
MinutesGet, SecondsGet = divmod(t, 60)
HoursGet, MinutesGet = divmod(MinutesGet, 60)
message = f"it is not the time for meeting {meeting.title_of_meeting}, Meeting starts in {HoursGet} Hours : {MinutesGet} Minutes : {'{:.2f}'.format(SecondsGet)} Seconds."
# return render(request, 'onlinemeet/meeting_list.html', {'meetings': meetings})
print(now, message)
messages.warning(request, message)
# return render(request, 'onlinemeet/meeting_list.html', {'meetings': meetings})
return HttpResponseRedirect(reverse('home'))
elif meeting.after_meeting:
""" will check if the meeting time has passed"""
now = timezone.localtime()
t = abs(meeting.ending_date_time - now).total_seconds()
MinutesGet, SecondsGet = divmod(t, 60)
HoursGet, MinutesGet = divmod(MinutesGet, 60)
message = f"The meeting {meeting.title_of_meeting}, ended {HoursGet} Hours : {MinutesGet} Minutes : {'{:.2f}'.format(SecondsGet)} Seconds."
print(now, message)
messages.warning(request, message)
return HttpResponseRedirect(reverse('home'))
if not request.user == meeting.creator:
"""check to know if the current user is the creator of the meeting
if True, then the person will be redirected to a page that has moderator privileges, else, redirect the guest to the guest page."""
return render(request, 'onlinemeet/guest.html', {'meeting': meeting,
"message": message})
return render(request, 'onlinemeet/meeting_page.html', {'meeting': meeting})
| Afeez1131/Django-online-meeting | onlinemeet/views.py | views.py | py | 3,327 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "forms.MeetingCreateForm",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "forms.MeetingCreateForm",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.HttpResponseRedirect",
"line_number": 21,
"usage_type": "call"
},
{
... |
72435188028 | import requests
import json
URL = "http://localhost:8000/auth/users/"
def post_data():
# data = {
# "emial":"adirokade15@gmail.com",
# "name":"AdityaRokade",
# "password":"djangoroot",
# "re_password":"djangoroot",
# "first_name":"adi",
# "last_name":"rokade"
# }
# data ={
# 'email':'adirokade15@gmail.com',
# 'name':'AdityaRokade',
# 'password':'djangoroot',
# 're_password':'djangoroot',
# 'first_name':'adi',
# 'last_name':'rokade'
# }
# print(type(data))
# print("myapp1")
# json_data = json.dumps(data)
# print("myapp2",json_data)
# print(type(json_data))
r = requests.post(url = URL, data = data)
data = r.json()
print(data)
post_data() | adityarokade/social_book | social_book/myapp.py | myapp.py | py | 805 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.post",
"line_number": 31,
"usage_type": "call"
}
] |
22290772593 | import streamlit as st
import pandas as pd
st.title("Upload CSV project")
uploaded_csv = st.file_uploader('選擇CSV檔')
if uploaded_csv is not None:
df = pd.read_csv(uploaded_csv,encoding='utf-8')
st.header('CSV檔內容:')
st.dataframe(df) | chiangcw0410/mysql_test | test/upload.py | upload.py | py | 259 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "streamlit.title",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "streamlit.file_uploader",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "streamlit.header... |
27581741716 | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 4 13:56:32 2017
@author: hannu
"""
import numpy as np
import matplotlib.pyplot as plt
import random
import scipy.constants as const
from constants import *
####### Functions for KMC ######
def f(sigma, x):
normal = (1/(2*const.pi*sigma**2))*np.exp(-(x**2)/(2*sigma**2))
return normal
#function to calculate the recombinations
def recombination(vacx,vacy,intx,inty,N,rates,defs):
distvac=distances(vacx,vacy,N)
distint=distances(intx,inty,N)
for i in range(N):
for j in range(N):
#distvac=distances(vacx,vacy,N)
#distint=distances(intx,inty,N)
if(abs((distvac[i]-distint[j]))<=recomb):
vacx[i]=np.NaN
vacy[i]=np.NaN
rates[i]=0
defs=defs-2
intx[j]=np.NaN
inty[j]=np.NaN
rates[j+299]=0
distvac[i]=np.sqrt((vacx[i]**2+vacy[i]**2))
distint[j]=np.sqrt((intx[j]**2+inty[j]**2))
return(defs,vacx,vacy,intx,inty)
#calculates the distance from the origin
def distances(x,y,N):
distances = np.linspace(-70*10**-10,70*10**-10, num=N)
for i in range(N):
distances[i]=np.sqrt(x[i]**2+y[i]**2)
return distances
def cum(rates):
R=[0 for i in range(600)]
for i in range(600):
R[i]=sum(R)+rates[i]
return(R) | hpelttari/Kinetic-Monte-Carlo | Si_migration/functions.py | functions.py | py | 1,456 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "scipy.constants.pi",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "scipy.constants",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "numpy.exp",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.NaN",
"li... |
23113245409 | #-*- coding: utf-8 -*-
#-----------------------------------------------------------------------#
# Autor: Luis Enrique Rojas Desales #
#-----------------------------------------------------------------------#
# Este codigo esta liberado bajo licencia GPL. #
#-----------------------------------------------------------------------#
'''
Descarga Masiva SAT
Luis E. Rojas Desales
'''
from Interfaz import ListaRFC
from PySide2.QtGui import QIcon
from PySide2.QtWidgets import QMainWindow
from PySide2 import QtCore
import os
import configparser
class ListaC(QMainWindow):
def __init__(self, parent):
self.parent = parent
super().__init__()
self.ui = ListaRFC.Ui_MainWindow()
self.ui.setupUi(self)
self.setWindowModality(QtCore.Qt.ApplicationModal)
self.setWindowIcon(QIcon('cfdi.ico'))
self.inicio()
def inicio(self):
contenido = os.listdir('C:/CFDIs/')
for i in range(len(contenido)):
self.ui.lista.addItem(contenido[i])
self.ui.lista.itemDoubleClicked.connect(self.onClicked)
self.ui.aceptar.clicked.connect(self.aceptar)
def onClicked(self, item):
#QMessageBox.information(self, "Info", item.text())
self.close()
configuracion = configparser.ConfigParser()
configuracion.read('C:/CFDIs/' + item.text() + '/datos.cfg')
self.parent.ui.lrfc.setText(configuracion['Contribuyente']['rfc'])
self.parent.ui.lrazon.setText(configuracion['Contribuyente']['razon'])
self.parent.cargar(configuracion['Contribuyente']['rfc'])
def aceptar(self):
item = self.ui.lista.currentItem()
self.close()
configuracion = configparser.ConfigParser()
configuracion.read('C:/CFDIs/' + item.text() + '/datos.cfg')
self.parent.ui.lrfc.setText(configuracion['Contribuyente']['rfc'])
self.parent.ui.lrazon.setText(configuracion['Contribuyente']['razon'])
self.parent.cargar(configuracion['Contribuyente']['rfc'])
| ikiex/CFDIMasivo | CFDI/Controlador/lista.py | lista.py | py | 2,079 | python | es | code | 3 | github-code | 6 | [
{
"api_name": "PySide2.QtWidgets.QMainWindow",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "Interfaz.ListaRFC.Ui_MainWindow",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "Interfaz.ListaRFC",
"line_number": 25,
"usage_type": "name"
},
{
"a... |
35862928120 | import json
import redis
from flask import Flask, request, Response, make_response
import base64
from jwt.api_jwt import PyJWT
app = Flask(__name__)
d = {'write': '1', 'read': '2', 'delete': '3'}
HOST = 'rediska'
Key = '12345'
@app.route('/auth/')
def requestic4():
user = request.authorization.username
password = request.authorization.password
if d.get(user) != None and d[str(user)] == password:
payload = {"role": str(user)}
jwt_Obj = PyJWT()
jwt_token = jwt_Obj.encode(payload=payload, key=Key)
rez = make_response(str(jwt_token, 'UTF-8'), 200)
rez.headers['Authorization'] = str(jwt_token, 'UTF-8')
return rez
else:
return make_response("invalid user or password" + str(user) + ' ' + str(password), 400)
@app.route('/<key>/', methods=['PUT'])
def requestic1(key):
key = int("{}".format(key))
data = json.loads(request.data)
Jwt1 = request.headers['Authorization']
message = data.get("message")
try:
jwt_Obj = PyJWT()
decode_token = jwt_Obj.decode(str(Jwt1), key=Key)
if decode_token['role'] == "write":
if key == None or message == None:
return Response(status=400)
else:
cache = redis.Redis(host=HOST, port=6379)
cache.ping()
if cache.exists(key):
cache.delete(key)
cache.set(key, json.dumps(message))
return make_response("changed", 200)
else:
cache.set(key, json.dumps(message))
return make_response({key: message}, 201)
else:
return make_response("invalid1 tiket", 400)
except Exception:
return make_response("invalid2 tiket", 400)
@app.route('/<key>/', methods=['GET'])
def requestic2(key):
key=int("{}".format(key))
Jwt1 = request.headers['Authorization']
try:
jwt_Obj = PyJWT()
decode_token = jwt_Obj.decode(str(Jwt1), key=Key)
if decode_token['role'] == "read":
cache = redis.Redis(host = HOST, port=6379)
cache.ping()
if cache.exists(key):
res = json.loads(cache.get(key))
return make_response({"message": res}, 200)
else:
return Response(status=400)
else:
return make_response("invalid1 tiket", 400)
except Exception:
return make_response("invalid2 tiket", 400)
@app.route('/<key>/', methods=['DELETE'])
def requestic3(key):
key=int("{}".format(key))
Jwt1 = request.headers['Authorization']
try:
jwt_Obj = PyJWT()
decode_token = jwt_Obj.decode(str(Jwt1), key=Key)
if decode_token['role'] == "delete":
cache = redis.Redis(host = HOST, port=6379)
cache.ping()
if key == None:
return Response(status = 400)
else:
if cache.exists(key):
res = json.loads(cache.get(key))
cache.delete(key)
return make_response({"message": res}, 204)
else:
return Response(status=404)
else:
return make_response("invalid1 tiket", 400)
except Exception:
return make_response("invalid2 tiket", 400)
if __name__ == '__main__':
app.run(host = '0.0.0.0') | ZharkovMihail/server_with_jwt | server.py | server.py | py | 2,843 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask.request.authorization",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "flask.reque... |
74743637626 | import re
from PyQt4 import QtGui
from PyQt4 import QtCore
from PyQt4.QtCore import Qt
from customeditor import CustomEditor
from camelot.view.art import Icon
import camelot.types
class VirtualAddressEditor(CustomEditor):
def __init__(self, parent=None, editable=True, address_type=None, **kwargs):
CustomEditor.__init__(self, parent)
self._address_type = address_type
self.layout = QtGui.QHBoxLayout()
self.layout.setMargin(0)
self.combo = QtGui.QComboBox()
self.combo.addItems(camelot.types.VirtualAddress.virtual_address_types)
self.combo.setEnabled(editable)
if address_type:
self.combo.setVisible(False)
self.layout.addWidget(self.combo)
self.editor = QtGui.QLineEdit()
self.editor.setEnabled(editable)
self.layout.addWidget(self.editor)
self.setFocusProxy(self.editor)
self.editable = editable
nullIcon = Icon('tango/16x16/apps/internet-mail.png').getQIcon()
self.label = QtGui.QToolButton()
self.label.setIcon(nullIcon)
self.label.setAutoFillBackground(False)
self.label.setAutoRaise(True)
self.label.setEnabled(False)
self.label.setToolButtonStyle(Qt.ToolButtonIconOnly)
self.layout.addWidget(self.label)
self.editor.editingFinished.connect(self.emit_editing_finished)
self.editor.textEdited.connect(self.editorValueChanged)
self.combo.currentIndexChanged.connect(self.comboIndexChanged)
self.setLayout(self.layout)
self.setAutoFillBackground(True)
self.checkValue(self.editor.text())
@QtCore.pyqtSlot()
def comboIndexChanged(self):
self.checkValue(self.editor.text())
self.emit_editing_finished()
def set_value(self, value):
value = CustomEditor.set_value(self, value)
if value:
self.editor.setText(value[1])
idx = camelot.types.VirtualAddress.virtual_address_types.index(self._address_type or value[0])
self.combo.setCurrentIndex(idx)
icon = Icon('tango/16x16/devices/printer.png').getQIcon()
# These icons don't exist any more in the new tango icon set
# if str(self.combo.currentText()) == 'phone':
# icon = Icon('tango/16x16/devices/phone.png').getQIcon()
if str(self.combo.currentText()) == 'fax':
icon = Icon('tango/16x16/devices/printer.png').getQIcon()
# if str(self.combo.currentText()) == 'mobile':
# icon = Icon('tango/16x16/devices/mobile.png').getQIcon()
# if str(self.combo.currentText()) == 'im':
# icon = Icon('tango/16x16/places/instant-messaging.png').getQIcon()
# if str(self.combo.currentText()) == 'pager':
# icon = Icon('tango/16x16/devices/pager.png').getQIcon()
if str(self.combo.currentText()) == 'email':
icon = Icon('tango/16x16/apps/internet-mail.png').getQIcon()
#self.label.setFocusPolicy(Qt.StrongFocus)
self.label.setAutoRaise(True)
#self.label.setAutoFillBackground(True)
self.label.setIcon(icon)
self.label.setEnabled(self.editable)
self.label.clicked.connect(
lambda:self.mailClick(self.editor.text())
)
else:
self.label.setIcon(icon)
#self.label.setAutoFillBackground(False)
self.label.setAutoRaise(True)
self.label.setEnabled(self.editable)
self.label.setToolButtonStyle(Qt.ToolButtonIconOnly)
# self.update()
# self.label.update()
# self.layout.update()
self.checkValue(value[1])
def get_value(self):
value = (unicode(self.combo.currentText()), unicode(self.editor.text()))
return CustomEditor.get_value(self) or value
def set_enabled(self, editable=True):
self.combo.setEnabled(editable)
self.editor.setEnabled(editable)
if not editable:
self.label.setEnabled(False)
else:
if self.combo.currentText() == 'email':
self.label.setEnabled(True)
def checkValue(self, text):
if self.combo.currentText() == 'email':
email = unicode(text)
mailCheck = re.compile('^\S+@\S+\.\S+$')
if not mailCheck.match(email):
palette = self.editor.palette()
palette.setColor(QtGui.QPalette.Active,
QtGui.QPalette.Base,
QtGui.QColor(255, 0, 0))
self.editor.setPalette(palette)
else:
palette = self.editor.palette()
palette.setColor(QtGui.QPalette.Active,
QtGui.QPalette.Base,
QtGui.QColor(255, 255, 255))
self.editor.setPalette(palette)
elif self.combo.currentText() == 'phone' \
or self.combo.currentText() == 'pager' \
or self.combo.currentText() == 'fax' \
or self.combo.currentText() == 'mobile':
number = unicode(text)
numberCheck = re.compile('^[0-9 ]+$')
if not numberCheck.match(number):
palette = self.editor.palette()
palette.setColor(QtGui.QPalette.Active,
QtGui.QPalette.Base,
QtGui.QColor(255, 0, 0))
self.editor.setPalette(palette)
else:
palette = self.editor.palette()
palette.setColor(QtGui.QPalette.Active,
QtGui.QPalette.Base,
QtGui.QColor(255, 255, 255))
self.editor.setPalette(palette)
else:
Check = re.compile('^.+$')
if not Check.match(unicode(text)):
palette = self.editor.palette()
palette.setColor(QtGui.QPalette.Active,
QtGui.QPalette.Base,
QtGui.QColor(255, 0, 0))
self.editor.setPalette(palette)
else:
palette = self.editor.palette()
palette.setColor(QtGui.QPalette.Active,
QtGui.QPalette.Base,
QtGui.QColor(255, 255, 255))
self.editor.setPalette(palette)
def editorValueChanged(self, text):
self.checkValue(text)
def mailClick(self, adress):
url = QtCore.QUrl()
url.setUrl('mailto:%s?subject=Subject'%str(adress))
QtGui.QDesktopServices.openUrl(url)
def emit_editing_finished(self):
self.value = []
self.value.append(str(self.combo.currentText()))
self.value.append(str(self.editor.text()))
self.set_value(self.value)
self.label.setFocus()
# emiting editingFinished without a value for the mechanism itself will lead to
# integrity errors
if self.value[1]:
self.editingFinished.emit()
def set_background_color(self, background_color):
if background_color:
palette = self.editor.palette()
palette.setColor(self.backgroundRole(), background_color)
self.editor.setPalette(palette)
else:
return False
| kurtraschke/camelot | camelot/view/controls/editors/virtualaddresseditor.py | virtualaddresseditor.py | py | 7,474 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "customeditor.CustomEditor",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "customeditor.CustomEditor.__init__",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "customeditor.CustomEditor",
"line_number": 14,
"usage_type": "name"
},
{... |
32640335090 | # AUTHOR: Louis Tsiattalou
# DESCRIPTION: Match list items to closest tf-idf match in second list.
import pandas as pd
from tfidf_matcher.ngrams import ngrams
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.neighbors import NearestNeighbors
def matcher(original=[], lookup=[], k_matches=5, ngram_length=3):
"""Takes two lists, returns top `k` matches from `lookup` dataset.
This function does this by:
- Splitting the `lookup` list into ngrams.
- Transforming the resulting ngram list into a TF-IDF Sparse Matrix.
- Fit a NearestNeighbours Model to the matrix using the lookup data.
- Transform the `original` list into a TF-IDF Sparse Matrix.
- Calculates distances to all the `n-matches` nearest neighbours
- Then extract the `original`, `n-matches` closest lookups, and calculate
a match score (abs(1 - Distance to Nearest Neighbour))
:param original: List of strings to generate ngrams from.
:type original: list (of strings), or Pandas Series.
:param lookup: List of strings to match against.
:type lookup: list (of strings), or Pandas Series.
:param k_matches: Number of matches to return.
:type k_matches: int
:param ngram_length: Length of Ngrams returned by `tfidf_matcher.ngrams` callable
:type ngram_length: int
:raises AssertionError: Throws an error if the datatypes in `original` aren't strings.
:raises AssertionError: Throws an error if the datatypes in `lookup` aren't strings.
:raises AssertionError: Throws an error if `k_matches` isn't an integer.
:raises AssertionError: Throws an error if k_matches > len(lookup)
:raises AssertionError: Throws an error if ngram_length isn't an integer
:return: Returns a Pandas dataframe with the `original` list,
`k_matches` columns containing the closest matches from `lookup`,
as well as a Match Score for the closest of these matches.
:rtype: Pandas dataframe
"""
# Assertions
assert all(
[type(x) == type("string") for x in original]
), "Original contains non-str elements!"
assert all(
[type(x) == type("string") for x in lookup]
), "Lookup contains non-str elements!"
assert type(k_matches) == type(0), "k_matches must be an integer"
assert k_matches < len(
lookup
), "k_matches must be shorter than the total length of the lookup list"
assert type(ngram_length) == type(0), "ngram_length must be an integer"
# Enforce listtype, set to lower
original = list(original)
lookup = list(lookup)
original_lower = [x.lower() for x in original]
lookup_lower = [x.lower() for x in lookup]
# Set ngram length for TfidfVectorizer callable
def ngrams_user(string, n=ngram_length):
return ngrams(string, n)
# Generate Sparse TFIDF matrix from Lookup corpus
vectorizer = TfidfVectorizer(min_df=1, analyzer=ngrams_user)
tf_idf_lookup = vectorizer.fit_transform(lookup_lower)
# Fit KNN model to sparse TFIDF matrix generated from Lookup
nbrs = NearestNeighbors(n_neighbors=k_matches, n_jobs=-1, metric="cosine").fit(
tf_idf_lookup
)
# Use nbrs model to obtain nearest matches in lookup dataset. Vectorize first.
tf_idf_original = vectorizer.transform(original_lower)
distances, lookup_indices = nbrs.kneighbors(tf_idf_original)
# Extract top Match Score (which is just the distance to the nearest neighbour),
# Original match item, and Lookup matches.
original_name_list = []
confidence_list = []
index_list = []
lookup_list = []
# i is 0:len(original), j is list of lists of matches
for i, lookup_index in enumerate(lookup_indices):
original_name = original[i]
# lookup names in lookup list
lookups = [lookup[index] for index in lookup_index]
# transform distances to confidences and store
confidence = [1 - round(dist, 2) for dist in distances[i]]
original_name_list.append(original_name)
# store index
index_list.append(lookup_index)
confidence_list.append(confidence)
lookup_list.append(lookups)
# Convert to df
df_orig_name = pd.DataFrame(original_name_list, columns=["Original Name"])
df_lookups = pd.DataFrame(
lookup_list, columns=["Lookup " + str(x + 1) for x in range(0, k_matches)]
)
df_confidence = pd.DataFrame(
confidence_list,
columns=["Lookup " + str(x + 1) + " Confidence" for x in range(0, k_matches)],
)
df_index = pd.DataFrame(
index_list,
columns=["Lookup " + str(x + 1) + " Index" for x in range(0, k_matches)],
)
# bind columns
matches = pd.concat([df_orig_name, df_lookups, df_confidence, df_index], axis=1)
# reorder columns | can be skipped
lookup_cols = list(matches.columns.values)
lookup_cols_reordered = [lookup_cols[0]]
for i in range(1, k_matches + 1):
lookup_cols_reordered.append(lookup_cols[i])
lookup_cols_reordered.append(lookup_cols[i + k_matches])
lookup_cols_reordered.append(lookup_cols[i + 2 * k_matches])
matches = matches[lookup_cols_reordered]
return matches
| LouisTsiattalou/tfidf_matcher | tfidf_matcher/matcher.py | matcher.py | py | 5,188 | python | en | code | 41 | github-code | 6 | [
{
"api_name": "tfidf_matcher.ngrams.ngrams",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "sklearn.feature_extraction.text.TfidfVectorizer",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "sklearn.neighbors.NearestNeighbors",
"line_number": 69,
"usag... |
14911244486 | import unittest
import time
import ddt
import json
from Public.cogfig import EXECL_PATH
from Interface.test_mock import test_mock_mi
test_send = test_mock_mi()
import math
from Public.read_excel import read_excel
from unittest import mock
wenjian = EXECL_PATH + '\\jekn.xlsx' #查询到对应的case文件
index_excel = read_excel(wenjian, '指数价')
#上一次指数价
last_prices=9409.9
@ddt.ddt()
class TestClient(unittest.TestCase):
def test_fail_request(self,test):
# #调用方法实例化,f获得test_send的实例
# f=test_send.test_send_requestr()
# #把返回值作为mock,mock
# f=mock.Mock(return_value='404')
# #调用属性实例化
# print(type(f))
# self.assertEqual(f(), '404')
#指数价
#统计
sum = 0
#金额不为0的用户,保存进入prices
prices = []
for i in range(len(test)):
if float(test[i]) > 0:
sum += float(test[i])
prices.append(test[i])
num = len(prices)
#记录prices的总长度等于0直接返回0
if num == 0:
return 0
##记录prices的总长度等于1
elif num == 1:
global last_prices
# 计算当前价格-上一次的价格/上一次的价格是否大于0.25
if math.fabs(float(prices[0]) - float(last_prices)) / float(last_prices) > 0.25:
# 直接返回上一次的价格,因为跑出来的指数价格跟上一次的指数价格差距太大
return last_prices
else:
# 返回当前的指数价格
prices[0]
##记录prices的总长度等于2
elif num == 2:
#计算prices的第一个数值减去第二个数组
dp = float(prices[0]) - float(prices[1])
#计算出来的第一个数值减去第二个数组的总值是否小于0
if dp <= 0: #dp小于0是正常的
#1、把总值转成整数
#2、判断总值/价格1>0.25
if -dp / float(prices[0]) > 0.25: #如果值大于0.25就是异常
#价格1-上一次价格<=价格2-上一次价格
if math.fabs(float(prices[0]) - last_prices) <= math.fabs(float(prices[1]) - last_prices):
print(prices[0])
#直接返回价格1
return prices[0]
else:
#返回价格2
return prices[1]
else:
#总的价格/平均价
index = sum / float(num)
print("指数价", index)
last_prices= index
return index
else:
#
if dp / float(prices[1]) > 0.25:
if math.fabs(prices[0] - last_prices) <= math.fabs(prices[1] - last_prices):
return prices[0]
else:
return prices[1]
else:
return sum / float(num)
#数组里面有三个价格
avg = sum / float(num)
#记录异常的价格
nums = 0
for i in range(len(prices)):
dv = math.fabs((float(prices[i]) - avg) / avg)
print(dv)
if dv > 0.03:
nums += 1
prices[i] = 0
if nums == 0:
print(nums)
return avg
return self.test_fail_request(prices)
# #正常的数值
# def test_1_average_value(self):
# s=1
# while True:
# if s <= 1:
# test = test_send.test_send_requestr()
# # if r_binance
# print(test)
#
# price = self.test_fail_request(test)
# print('指数值', price)
# if price > 0:
# last_prices = price
# time.sleep(0.5)
# s += 1
# else:
# break
#指标表价只有一个值
@ddt.data(*index_excel.next())
def test_2_to_value(self,data):
s=1
test_list = data['指数价']
while True:
if s <= 1:
test = test_send.test_send_requestr()
print("分割", type(test_list))
last_list=test_list.split(',')
print("分割",type(last_list))
price_list=[]
#计算取出来的值,若取出来的值偏差大于0.25,就返回上一次的指数价
for i in range(0,len(last_list)):
global last_prices
f=math.fabs(float(last_prices) - float(last_list[i])) / float(last_prices)
print(f)
if f> 0.25:
price_list.append(0)
else:
#last_prices = last_list[i]
price_list.append(last_list[i])
test = mock.Mock(return_value=price_list)
# #调用属性实例化
test_list=test()
price = self.test_fail_request(test_list)
print('指数值',price)
time.sleep(0.5)
if float(price)>0:
last_prices = price
s += 1
else:
break
if __name__ == '__main__':
unittest.main()
| LiuYaowei-Geek/deep | test_Case/mock.py | mock.py | py | 5,537 | python | zh | code | 1 | github-code | 6 | [
{
"api_name": "Interface.test_mock.test_mock_mi",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "Public.cogfig.EXECL_PATH",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "Public.read_excel.read_excel",
"line_number": 12,
"usage_type": "call"
},
{
... |
5366659164 | import datetime
import logging
import os.path
import x509
LOG = logging.getLogger(__name__)
class CertWatcher(object):
def __init__(self, key_path, cert_path, common_name, ca_driver,
on_refresh_success=None, on_refresh_failure=None,
refresh_window=None):
if not os.path.isfile(key_path):
raise Exception("key needs to exist")
self.key_path = key_path
self.cert_path = cert_path
self.ca_driver = ca_driver
self.on_refresh_success = on_refresh_success
self.on_refresh_failure = on_refresh_failure
self.common_name = common_name
self.refresh_window = refresh_window
@property
def key(self):
return open(self.key_path).read()
@property
def cert(self):
return open(self.cert_path).read()
def get_expire_date(self):
return x509.get_expire_date(self.cert)
def seconds_until_expiry(self):
diff = self.get_expire_date() - datetime.datetime.now()
return diff.total_seconds()
def _replace_cert(self, cert_contents):
LOG.info("Replacing certificate at %s" % self.cert_path)
cert = open(self.cert_path, "w")
cert.write(cert_contents)
cert.close()
def _will_be_expired(self, date):
return date > self.get_expire_date()
def _expires_in_window(self):
now = datetime.datetime.now()
if not self.refresh_window:
LOG.debug("No refresh window set, assuming expired")
return True
window = now + datetime.timedelta(0, self.refresh_window)
if self._will_be_expired(window):
LOG.info("%s is expired inside window of %s"
% (self.cert_path, self.refresh_window))
return True
LOG.info("Certificate valid within window of %s seconds"
% self.refresh_window)
return False
def _cert_exists(self):
if not os.path.isfile(self.cert_path):
LOG.info("No cert found at %s" % self.cert_path)
return False
return True
def is_invalid_cert(self):
return not self._cert_exists() or self._expires_in_window()
def check_and_update(self):
LOG.info('Checking validity of certificate %s' % self.cert_path)
if self.is_invalid_cert():
csr = x509.generate_csr(self.key, self.common_name)
cert = None
try:
cert = self.ca_driver.sign(csr)
except Exception as e:
LOG.exception("Could not retrieve cert\n%s", e)
if cert:
self._replace_cert(cert)
self.on_refresh_success()
else:
self.on_refresh_failure()
| takac/cathead | cathead/certwatch.py | certwatch.py | py | 2,756 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path.path.isfile",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.path",
... |
23235971280 | """
__/\\\\\\\\\\\\______________________/\\\\\\\\\\\____/\\\________/\\\_
_\/\\\////////\\\__________________/\\\/////////\\\_\/\\\_______\/\\\_
_\/\\\______\//\\\________________\//\\\______\///__\/\\\_______\/\\\_
_\/\\\_______\/\\\_____/\\\\\______\////\\\_________\/\\\_______\/\\\_
_\/\\\_______\/\\\___/\\\///\\\_______\////\\\______\/\\\_______\/\\\_
_\/\\\\\\\\\\\\/____\///\\\\\/___\///\\\\\\\\\\\/____\///\\\\\\\\\/___
_\////////////________\/////_______\///////////________\/////////_____
Created by Tomáš Sandrini
"""
from . import __version__
import argparse
import os
import shutil
import sys
from datetime import datetime
from . import handler
from .actions import ValidateMonths, ValidateYears
def get_args(args):
"""
Get the script arguments.
"""
description = "DoSU - pandoc note writing utility"
arg = argparse.ArgumentParser(description=description)
arg.add_argument(
'-M',
metavar='make',
nargs='+',
help="Make (create) given subjects"
)
arg.add_argument(
'-C',
metavar='compile',
nargs='+',
help="Compile notes for a given subjects"
)
arg.add_argument(
'-W',
metavar='write',
help="Start note taking for a subject"
)
arg.add_argument(
'-D',
metavar='delete',
nargs='+',
help="Delete subjects"
)
arg.add_argument(
'-m',
metavar='month',
nargs='+',
action=ValidateMonths,
help="months"
)
arg.add_argument(
'-y',
metavar='year',
nargs='+',
action=ValidateYears,
help="years"
)
arg.add_argument(
'-v',
action='store_true',
help="Print current dosu version"
)
arg.add_argument(
'-l',
action='store_true',
help="List all subjects"
)
arg.add_argument(
'-q',
action='store_true',
help="Quiet mode, don't print anything and \
don't display notifications."
)
return arg.parse_args(args)
def process_args(args):
"""
Process args.
"""
if not len(sys.argv) > 1 and False:
print("error: dosu needs to be given arguments to run.\n"
" Refer to \"dosu -h\" for more info.")
sys.exit(1)
if args.q:
sys.stdout = sys.stderr = open(os.devnull, 'w')
if args.M:
handler.make(args.M)
if args.D:
handler.delete(args.D)
if args.W:
handler.write(args.W)
if args.C:
today = datetime.today()
years = args.y if args.y != None else [today.year]
months = args.m if args.m != None else [today.month]
if args.y:
months = args.m if args.m else [i for i in range(13)][1:]
else:
months = args.m if args.m else [today.month]
handler.compile(subjects=args.C, years=years, months=months)
if args.l:
handler.list()
if args.v:
print("DoSU ", __version__)
sys.exit(0)
def main():
"""
Main script function
"""
args = get_args(sys.argv[1:])
process_args(args)
if __name__ == "__main__":
main()
| tsandrini/dosu | dosu/__main__.py | __main__.py | py | 3,242 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "actions.ValidateMonths",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "actions.ValidateYears",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "... |
17121698120 | import cv2
import numpy as np
# Load the source and kernel images
source_image = cv2.imread('/home/xpirr/workspace/python/DSP/HW2/Resim6_8.jpg', cv2.IMREAD_GRAYSCALE)
kernel_image = cv2.imread('/home/xpirr/workspace/python/DSP/HW2/EvrenIspiroglu.py', cv2.IMREAD_GRAYSCALE)
# Convert the kernel image to a numpy array of type np.float32
kernel_image = np.array(kernel_image, dtype=np.float32)
# Normalize the kernel image so that its sum is 1
kernel_image = kernel_image / np.sum(kernel_image)
# Pad the source image with zeros
padded_image = np.pad(source_image, ((1, 1), (1, 1)), 'constant')
# Compute the output image using 2D convolution
output_image = np.zeros_like(source_image)
for i in range(1, padded_image.shape[0]-1):
for j in range(1, padded_image.shape[1]-1):
patch = padded_image[i-1:i+2, j-1:j+2]
output_image[i-1, j-1] = np.sum(patch * kernel_image)
# Display the input and output images
cv2.imshow('Source Image', source_image)
cv2.imshow('Kernel Image', kernel_image)
cv2.imshow('Output Image', output_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
| ispiroglu/DSP-HW2 | Demo3.py | Demo3.py | py | 1,091 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cv2.imread",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.IMREAD_GRAYSCALE",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "cv2.imread",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "cv2.IMREAD_GRAYSCALE",
... |
1277452974 | """Default settings."""
import logging
settings = {
'log': {
'level': "debug", # log level
},
'auth': {
'required': False, # set to `True` to enable authentication
'basic_auth': {
'path': '/dev/null', # path to htpasswd file
},
},
'server': {
'port': 1779, # port :-P
},
'staticpath': '/dev/null', # path to static files
'packagepath': '/dev/null', # path to qgis plugins
}
logging.basicConfig(
level=getattr(logging, settings['log']['level'].upper()),
)
| t4k1t/qgisrv | qgisrv/settings.py | settings.py | py | 552 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logging.basicConfig",
"line_number": 22,
"usage_type": "call"
}
] |
34084173801 | from django.conf.urls.defaults import patterns, url
from django.template.defaultfilters import slugify
from rc.resources.views import ResourceItemListView
from rc.resources.apps.operations import models
def green_building_url(url_string, building_type, image_url=None,
image_alt=None, image_caption=None,
buildings_name=None, model=models.CampusGreenBuilding):
if not buildings_name:
buildings_name = ' '.join(building_type.split()[1:]).lower()
return url(url_string,
ResourceItemListView.as_view(
model=model,
queryset=model.objects.published().filter(
type__type=building_type).order_by(
'type', 'certification', 'organization__name'),
template_name='operations/campusgreenbuilding_list.html'),
name=slugify(building_type),
kwargs={'cert_order': dict(models.CampusGreenBuilding.LEED_LEVELS),
'title': building_type,
'image_url': image_url,
'image_alt': image_alt,
'image_caption': image_caption,
'buildings_name': buildings_name,
'member_only': True})
urlpatterns = patterns('',
url(r'^campus-alternative-transportation-websites$',
ResourceItemListView.as_view(
model=models.TransportationWebsite,
queryset=models.TransportationWebsite.objects.published().order_by(
'organization__name')),
name='transportation-websites',
kwargs={'member_only': True, 'title': 'Campus Alternative Transportation Websites'}),
url(r'^bottled-water-elimination-and-reduction$',
ResourceItemListView.as_view(
model=models.BottledWaterBan,
queryset=models.BottledWaterBan.objects.published().order_by(
'type', 'organization__name')),
name='bottled-water-bans',
kwargs={'type_list': [ level[0] for level in
models.BottledWaterBan.BAN_TYPES ],
'type_dict': dict(models.BottledWaterBan.BAN_TYPES),
'title': 'Campus Bottled Water Bans and Reduction Campaigns',
'member_only': True}),
url(r'^campus-building-energy-dashboards$',
ResourceItemListView.as_view(
model=models.BuildingDashboard,
queryset=models.BuildingDashboard.objects.published().order_by(
'partner__name', 'organization__name')),
name='building-dashboards',
kwargs={'title': 'Campus Building Energy Dashboards',
'member_only': True}),
url(r'^biodiesel-campus-fleets$',
ResourceItemListView.as_view(
model=models.BiodieselFleet,
queryset=models.BiodieselFleet.objects.published().order_by(
'production', 'organization__country',
'organization__name')),
name='biodiesel-fleets',
kwargs={'member_only': True,
'production_types':
dict(models.BiodieselFleet.PRODUCTION_TYPE)}),
url(r'^campus-bicycle-plans$',
ResourceItemListView.as_view(
model=models.BicyclePlan,
queryset=models.BicyclePlan.objects.published().order_by(
'organization__name')),
name='bicycle-plans',
kwargs={'member_only': True}),
url(r'^campus-car-bans$',
ResourceItemListView.as_view(
model=models.CarBan,
queryset=models.CarBan.objects.published().order_by(
'-type', 'organization__name')),
name='car-bans',
kwargs={'ban_types': dict(models.CarBan.BAN_TYPES)}),
url(r'^campus-commuter-surveys$',
ResourceItemListView.as_view(
model=models.CommuterSurvey,
queryset=models.CommuterSurvey.objects.published().order_by(
'type', 'organization__name')),
name='commuter-surveys',
kwargs={'survey_types': dict(models.CommuterSurvey.SURVEY_TYPES),
'member_only': True}),
url(r'^campus-electric-vehicle-fleets$',
ResourceItemListView.as_view(
model=models.ElectricFleet,
queryset=models.ElectricFleet.objects.published().order_by(
'organization__country', 'organization__name')),
name='electric-fleets',
kwargs={'member_only': True}),
url(r'^campus-energy-plans$',
ResourceItemListView.as_view(
model=models.EnergyPlan,
queryset=models.EnergyPlan.objects.published().order_by(
'organization__name')),
name='energy-plans',
kwargs={'member_only': True}),
url(r'^campus-energy-plans$',
ResourceItemListView.as_view(
model=models.EnergyPlan,
queryset=models.EnergyPlan.objects.published().order_by(
'organization__name')),
name='energy-plans',
kwargs={'member_only': True}),
url(r'^campus-energy-websites$',
ResourceItemListView.as_view(
model=models.EnergyWebsite,
queryset=models.EnergyWebsite.objects.published().order_by(
'organization__name')),
name='energy-websites'),
url(r'^campus-global-warming-commitments$',
ResourceItemListView.as_view(
model=models.GlobalWarmingCommitment,
queryset=models.GlobalWarmingCommitment.objects.published().order_by(
'organization__name', 'date')),
kwargs={'member_only': True},
name='global-warming-commitments',
),
url(r'^campus-hybrid-vehicle-fleets$',
ResourceItemListView.as_view(
model=models.HybridFleet,
queryset=models.HybridFleet.objects.published().order_by(
'organization__country', 'organization__name')),
name='hybrid-fleets',
kwargs={'member_only': True}),
url(r'^campus-recycling-and-waste-minimization-websites$',
ResourceItemListView.as_view(
model=models.RecyclingWebsite,
queryset=models.RecyclingWebsite.objects.published().order_by(
'organization__name')),
name='recycling-websites',
kwargs={'title': 'Campus Recycling & Waste Minimization Websites',
'member_only': True}),
url(r'^campus-water-conservation-efforts$',
ResourceItemListView.as_view(
model=models.WaterConservationEffort,
queryset=models.WaterConservationEffort.objects.published().order_by(
'organization__country', 'organization__name')),
name='water-conservation-efforts',
kwargs={'member_only': True}),
url(r'^wind-power-campus-1$',
ResourceItemListView.as_view(
model=models.WindTurbine,
queryset=models.WindTurbine.objects.published().order_by(
'-size', 'organization__name')),
name='wind-turbines',
kwargs={'member_only': True,
'title': 'Wind Turbine Installations on Campus'}),
url(r'^carsharing-campus$',
ResourceItemListView.as_view(
model=models.CarShare,
queryset=models.CarShare.objects.published().order_by(
'partner__name', 'organization__name')),
name='car-shares',
kwargs={'member_only': True}),
url(r'^renewable-energy-research-centers$',
ResourceItemListView.as_view(
model=models.RenewableResearchCenter,
queryset=models.RenewableResearchCenter.objects.published().order_by(
'organization__name')),
name='renewable-research-centers',
kwargs={
'title': 'Renewable Energy Research Centers',
'member_only': True,
}),
url(r'^campus-installations-stationary-fuel-cells$',
ResourceItemListView.as_view(
model=models.FuelCell,
queryset=models.FuelCell.objects.published().order_by('-size',
'organization__name')),
name='fuel-cells',
kwargs={
'title': 'Campus Installations of Stationary Fuel Cells',
'member_only': True,
}),
url(r'^sustainable-dining-initiatives-campus$',
ResourceItemListView.as_view(
model=models.DiningInitiative,
queryset=models.DiningInitiative.objects.published().order_by(
'ownership', 'organization__name')),
name='dining-initiatives',
kwargs={'owners': dict(models.DiningInitiative.OWNERS),
'member_only': True}),
url(r'^campus-greenhouse-gas-emissions-inventories$',
ResourceItemListView.as_view(
model=models.GHGInventory,
queryset=models.GHGInventory.objects.published().order_by(
'methodology', 'organization__name')),
name='ghg-inventories',
kwargs={'methodology_types': dict(models.GHGInventory.METHODOLOGY_TYPES),
'member_only': False}),
url(r'^sustainable-landscaping-campus$',
ResourceItemListView.as_view(
model=models.SustainableLandscape,
queryset=models.SustainableLandscape.objects.published().order_by(
'organization__name')),
name='sustainable-landscapes',
kwargs={
'title': 'Sustainable Landscaping Initiatives on Campus',
'member_only': True,
}),
url(r'^links-related-sustainable-purchasing-campus$',
ResourceItemListView.as_view(
model=models.PurchasingLink,
queryset=models.PurchasingLink.objects.published().order_by(
'type', 'organization__name')),
name='purchasing-links',
kwargs={'type_list': dict(models.PurchasingLink.LINK_TYPES),
'title': 'Sustainable Purchasing Initiatives on Campus',
'member_only': True}),
url(r'^campus-universal-transit-passes$',
ResourceItemListView.as_view(
model=models.TransitPass,
queryset=models.TransitPass.objects.published().order_by(
'-type', 'organization__country',
'organization__name')),
name='transit-passes',
kwargs={
'type_list': dict(models.TransitPass.PASS_TYPES),
'member_only': True,
}),
green_building_url(
url_string=r'^athletic-recreation-centers-stadiums$',
building_type='Green Athletic Buildings',
image_url='http://www.aashe.org/files/univ_of_arizona_rec_center_0.jpg',
image_alt='Univ Arizona',
image_caption='University of Arizona Recreation Center'),
green_building_url(
url_string=r'^green-student-centers$',
building_type='Green Student Centers',
image_url='http://www.aashe.org/files/sju_mckeown_0.jpg',
image_alt='SJU McKeown',
image_caption='St. John\'s University McKeown Center',
),
green_building_url(
url_string=r'^green-libraries-campus$',
building_type='Green Libraries on Campus',
image_url='http://www.aashe.org/files/thompson_library_1.jpg',
image_alt='OSU Thompson Library',
image_caption='Ohio State University Thompson Library',
buildings_name='libraries',
),
green_building_url(
url_string=r'^green-residence-halls$',
building_type='Green Residence Halls',
image_url='http://www.aashe.org/files/ashdown_house_mit.jpg',
image_alt='MIT Ashdown House',
image_caption='MIT Ashdown House',
# Model is empty, dunno why (mt)
model=models.GreenResidenceHall,
),
green_building_url(
url_string=r'^green-science-buildings$',
building_type='Green Science Buildings',
image_url='http://www.aashe.org/files/brandeis.jpg',
image_alt='Brandeis University Shapiro Science Center',
image_caption='Brandeis University Shapiro Science Center',
),
)
| AASHE/django-irc | rc/resources/apps/operations/urls.py | urls.py | py | 12,244 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "rc.resources.apps.operations.models.CampusGreenBuilding",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "rc.resources.apps.operations.models",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.defaults.url",
"line_number":... |
16734122984 | from typing import Any
from fastapi import FastAPI, Response, Request
from pathlib import Path
from pydantic import BaseModel
from autogoal.utils._storage import inspect_storage
import uvicorn
from autogoal_remote.distributed.proxy import loads, dumps, encode, decode
class Body(BaseModel):
values: Any
app = FastAPI()
@app.get("/input")
async def input(request: Request):
"""
Returns the model input type
"""
return {
"semantic type name": str(request.app.model.best_pipeline_.input_types),
"pickled data": dumps(
request.app.model.best_pipeline_.input_types, use_dill=True
),
}
@app.get("/output")
async def output(request: Request):
"""
Returns the model output type
"""
return {
"semantic type name": str(
request.app.model.best_pipeline_.algorithms[-1].__class__.output_type()
),
"pickled data": dumps(
request.app.model.best_pipeline_.algorithms[-1].__class__.output_type(),
use_dill=True,
),
}
@app.get("/inspect")
async def inspect(request: Request):
"""
Returns the model inspect command
"""
return {"data": str(inspect_storage(Path(request.app.model.export_path)))}
@app.post("/")
async def eval(t: Body, request: Request):
"""
Returns the model prediction over the provided values
"""
model = request.app.model
data = loads(t.values)
result = model.predict(data)
return {"data": dumps(result)}
def run(model, ip=None, port=None):
"""
Starts HTTP API with specified model.
"""
app.model = model
uvicorn.run(app, host=ip or "0.0.0.0", port=port or 8000)
| autogoal/autogoal-remote | autogoal_remote/production/server.py | server.py | py | 1,689 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "pydantic.BaseModel",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "fastapi.FastAPI",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "fastapi.Request",
"... |
62345004 | from django.urls import path, include
from core import views
urlpatterns = [
path('', views.index, name='index'),
path('register/',views.register, name='register'),
path('home/',views.home, name='home'),
path('history/', views.history, name='history'),
path('generate-new-label/', views.generate_new_label, name='generate-new-label'),
path('edit-label/<int:id>/', views.edit_label, name='edit-label'),
path('delete-label/<int:id>/', views.delete_label, name='delete-label'),
path('print-label/<int:id>/', views.print_label, name='print-label'),
path('logout/', views.logout, name='logout'),
]
| lquresh52/shipping-label-generaor | core/urls.py | urls.py | py | 636 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "core.views.index",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "core.views",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
... |
24255720694 | import pandas as pd
import os
import time
from xlrd import XLRDError
start_time = time.time()
# list of paths to ebay files
ebay_files = []
# searching all excel files in the folder
for root, dirs, files in os.walk(r'D:\Projects\shopContent\ebay'):
ebay_files.extend([os.path.join(root, file) for file in files if file.endswith('.xlsx')])
dirs.clear()
# creating dataframe
ebay_df = pd.DataFrame()
# appending tables from all source ebay files to one dataframe skipping first 2 rows
print("Creating ebay dataframe!")
for file in ebay_files:
try:
ebay_df = ebay_df.append(pd.read_excel(file, sheet_name="Listings", skiprows=2))
except XLRDError:
print(f"No sheet named \'Listings\' in file - {file}")
# create dataframe from csv file
print("Creating shopify dataframe!")
shopify_df = pd.read_csv(r'D:\Projects\shopContent\shopify\shopify.csv', sep=',', encoding="utf-8", header=0)
# replace '||' symbols to ', ' in column 'C:Season'
print("Replacing '||' symbols in ebay dataframe!")
ebay_df['C:Season'] = ebay_df['C:Season'].str.replace("\|\|", ', ')
# enable only 'Custom Label (SKU)', 'C:Brand', 'C:Type', 'C:Season' columns in dataframe
print("Excluding columns in ebay dataframe!")
ebay_df = ebay_df[['Custom Label (SKU)', 'C:Brand', 'C:Type', 'C:Season']]
# export ebay_df and shopify_df to excel files
print("Export ebay and shopify dataframes to xlsx!")
ebay_df.to_excel(r'D:\Projects\shopContent\ebay\ebay.xlsx', index=False, header=True, encoding="utf-8")
shopify_df.to_excel(r'D:\Projects\shopContent\shopify\shopify.xlsx', index=False, header=True, encoding="utf-8")
# rename columns name in ebay dataframe
print("Renaming columns in ebay dataframe!")
ebay_df.rename(columns={'Custom Label (SKU)': 'Variant SKU', 'C:Brand': 'Vendor',
'C:Type': 'Type', 'C:Season': 'Tags'}, inplace=True)
# exclude columns 'Vendor', 'Type', 'Tags' in shopify dataframe
print("Excluding columns in shopify dataframe!")
shopify_df = shopify_df[['Handle', 'Title', 'Body (HTML)', 'Published',
'Option1 Name', 'Option1 Value', 'Option2 Name', 'Option2 Value',
'Option3 Name', 'Option3 Value', 'Variant SKU', 'Variant Grams',
'Variant Inventory Tracker', 'Variant Inventory Qty',
'Variant Inventory Policy', 'Variant Fulfillment Service',
'Variant Price', 'Variant Compare At Price',
'Variant Requires Shipping', 'Variant Taxable', 'Variant Barcode',
'Image Src', 'Image Position', 'Image Alt Text', 'Gift Card',
'SEO Title', 'SEO Description',
'Google Shopping / Google Product Category', 'Google Shopping / Gender',
'Google Shopping / Age Group', 'Google Shopping / MPN',
'Google Shopping / AdWords Grouping',
'Google Shopping / AdWords Labels', 'Google Shopping / Condition',
'Google Shopping / Custom Product', 'Google Shopping / Custom Label 0',
'Google Shopping / Custom Label 1', 'Google Shopping / Custom Label 2',
'Google Shopping / Custom Label 3', 'Google Shopping / Custom Label 4',
'Variant Image', 'Variant Weight Unit', 'Variant Tax Code',
'Cost per item']]
# replace unnecessary characters with blank in ebay dataframe
print("Replacing unnecessary symbols in ebay dataframe!")
ebay_df['Variant SKU'] = ebay_df['Variant SKU'].str.replace("-", '')
ebay_df['Variant SKU'] = ebay_df['Variant SKU'].str.replace("A", '')
ebay_df['Variant SKU'] = ebay_df['Variant SKU'].str.replace("B", '')
ebay_df['Variant SKU'] = ebay_df['Variant SKU'].str[:6]
# replace unnecessary characters with blank in shopify dataframe
print("Replacing unnecessary symbols in shopify dataframe!")
shopify_df['Variant SKU'] = shopify_df['Variant SKU'].str.replace("-", '')
shopify_df['Variant SKU'] = shopify_df['Variant SKU'].str.replace("\'", '')
shopify_df['Variant SKU'] = shopify_df['Variant SKU'].str.replace("A", '')
shopify_df['Variant SKU'] = shopify_df['Variant SKU'].str.replace("B", '')
shopify_df['Variant SKU'] = shopify_df['Variant SKU'].str[:6]
# delete rows-duplicates in ebay dataframe
print("Deleting duplicates in ebay dataframe!")
ebay_df = ebay_df.drop_duplicates(subset=['Variant SKU'], keep='first')
# left join shopify_df to ebay_df using column 'Variant SKU'
print('Joining shopify_df and ebay_df')
join_ebay_shopify_df = pd.merge(shopify_df, ebay_df, on='Variant SKU', how='left')
# set blank value in cell where 'Variant SKU' is null
print("Setting blank value in cell where 'Variant SKU' is null")
for index, row in join_ebay_shopify_df.iterrows():
if row.isnull()['Variant SKU']:
join_ebay_shopify_df.at[index, 'Vendor'] = ''
join_ebay_shopify_df.at[index, 'Type'] = ''
join_ebay_shopify_df.at[index, 'Tags'] = ''
# export join dataframe to excel file
print("Export final dataframe to xlsx!")
join_ebay_shopify_df.to_excel(r'D:\Projects\shopContent\final.xlsx', index=False, header=True, encoding="utf-8")
# time spent for execution
end_time = time.time()
print(f"\nTime spent: {end_time-start_time}")
| bfesiuk/shopContent | creating.py | creating.py | py | 4,995 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "time.time",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.walk",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"u... |
42743009421 | from setuptools import find_packages
from setuptools import setup
package_name = 'camera_calibration'
setup(
name=package_name,
version='1.12.23',
packages=find_packages(exclude=['test']),
data_files=[
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
('share/' + package_name, ['package.xml']),
],
install_requires=['setuptools'],
author='James Bowman',
author_email='vincent.rabaud@gmail.com',
zip_safe=True,
keywords=['ROS', 'camera_calibration'],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Topic :: Software Development',
],
description=(
'camera_calibration for ROS2'
),
license='Apache License, Version 2.0',
tests_require=['pytest'],
entry_points={
'console_scripts': [
'cameracalibrator = camera_calibration.nodes.cameracalibrator:main',
'cameracheck = camera_calibration.nodes.cameracheck:main',
],
},
)
| ahuizxc/ros2_camera_calibration | setup.py | setup.py | py | 1,118 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "setuptools.setup",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 9,
"usage_type": "call"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.