seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
70397498577 | '''
File contains functions for cleaning the raw data from Beer Advocate
'''
def cleanDigits(unformatted_score):
'''
remove unwanted formatting from scores
'''
import re
scsplit = re.split('/',unformatted_score)
## If there was character to split on, normalize it
if len(scsplit) > 1:
score = float(scsplit[0]) / float(scsplit[1]) ### Split and format
return score
def cleanText(unformatted_text):
'''
remove non-utc characters from text
'''
f_text = unformatted_text.strip()
## !!! How to fix style specialization to read right, not just
return text
def cleanString(unformatted_text):
'''
Function to be applied across all data loaded
Cleans character data of non utf-8 characters
Fixes Score data to read as percentage, rather than '9/10' str
'''
import re
#test if string has character text
if re.match('\w+',unformatted_text):
#if so, edit for text
form_text = cleanText(unformatted_text)
else:
form_text = cleanDigits(unformatted_text)
return form_text
| ericsdata/colinsbeer | src/BeerBrush.py | BeerBrush.py | py | 1,119 | python | en | code | 0 | github-code | 13 |
17080007274 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.EcConsumeInfo import EcConsumeInfo
from alipay.aop.api.domain.EcOrderInfo import EcOrderInfo
from alipay.aop.api.domain.EcConsumeInfo import EcConsumeInfo
from alipay.aop.api.domain.EcVoucherInfo import EcVoucherInfo
class AlipayCommerceEcConsumeDetailQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayCommerceEcConsumeDetailQueryResponse, self).__init__()
self._consume_info = None
self._related_order_info = None
self._related_refund_list = None
self._related_voucher_list = None
@property
def consume_info(self):
return self._consume_info
@consume_info.setter
def consume_info(self, value):
if isinstance(value, EcConsumeInfo):
self._consume_info = value
else:
self._consume_info = EcConsumeInfo.from_alipay_dict(value)
@property
def related_order_info(self):
return self._related_order_info
@related_order_info.setter
def related_order_info(self, value):
if isinstance(value, EcOrderInfo):
self._related_order_info = value
else:
self._related_order_info = EcOrderInfo.from_alipay_dict(value)
@property
def related_refund_list(self):
return self._related_refund_list
@related_refund_list.setter
def related_refund_list(self, value):
if isinstance(value, list):
self._related_refund_list = list()
for i in value:
if isinstance(i, EcConsumeInfo):
self._related_refund_list.append(i)
else:
self._related_refund_list.append(EcConsumeInfo.from_alipay_dict(i))
@property
def related_voucher_list(self):
return self._related_voucher_list
@related_voucher_list.setter
def related_voucher_list(self, value):
if isinstance(value, list):
self._related_voucher_list = list()
for i in value:
if isinstance(i, EcVoucherInfo):
self._related_voucher_list.append(i)
else:
self._related_voucher_list.append(EcVoucherInfo.from_alipay_dict(i))
def parse_response_content(self, response_content):
response = super(AlipayCommerceEcConsumeDetailQueryResponse, self).parse_response_content(response_content)
if 'consume_info' in response:
self.consume_info = response['consume_info']
if 'related_order_info' in response:
self.related_order_info = response['related_order_info']
if 'related_refund_list' in response:
self.related_refund_list = response['related_refund_list']
if 'related_voucher_list' in response:
self.related_voucher_list = response['related_voucher_list']
| alipay/alipay-sdk-python-all | alipay/aop/api/response/AlipayCommerceEcConsumeDetailQueryResponse.py | AlipayCommerceEcConsumeDetailQueryResponse.py | py | 2,948 | python | en | code | 241 | github-code | 13 |
71617248018 | #!/bin/python3 -u
import discord
import os
from dotenv import load_dotenv
from discord.ext import commands
from discord.utils import get
import re
load_dotenv()
intents = discord.Intents.all()
client = commands.Bot(command_prefix='!', intents=intents)
@client.event
async def on_ready():
print('ccbot_commands started on bot {0.user}'.format(client))
@client.event
async def on_message(message):
if message.author == client.user:
if message.channel.id == int(os.getenv('channel')):
content = str(message.content)
flag=0
user_ptrn = "@(.*?)@"
chan_ptrn = "#(.*?)#"
levl_ptrn = "] : (.*?) has reached a"
drop_ptrn = "] : (.*?) received a drop"
qwst_ptrn = "] : (.*?) has completed a quest"
user_ment = re.search(user_ptrn, content)
chan_ment = re.search(chan_ptrn, content)
levl_ment = re.search(levl_ptrn, content)
drop_ment = re.search(drop_ptrn, content)
qwst_ment = re.search(qwst_ptrn, content)
if user_ment:
user_name = user_ment.group(1)
for member in message.guild.members:
user_full = str("@" + user_name + "@")
name = member.name
if member.nick:
nick = member.nick
if nick.lower() == user_name.lower():
content = content.replace(user_full, str('<@' + str(member.id) + '>'))
flag=1
break
if name.lower() == user_name.lower():
content = content.replace(user_full, str('<@' + str(member.id) + '>'))
flag=1
break
if chan_ment:
chan_name = chan_ment.group(1)
for channel in message.guild.channels:
chan_full = str("#" + chan_name + "#")
name = channel.name
if name.lower() == chan_name.lower():
content = content.replace(chan_full, str('<#' + str(channel.id) + '>'))
flag = 1
break
cmd_cfg = open("ccbot_commands.cfg", "r")
bot_chan = client.get_channel(int(os.getenv('botchan')))
for cmd in cmd_cfg:
content_l = content.lower()
cmd_ment = content_l.find(str("!" + cmd.strip()))
if cmd_ment > 0:
await bot_chan.send(str("!" + cmd.strip()))
cmd_cfg.close()
if flag:
await message.channel.send(content)
await message.delete()
else:
if levl_ment:
levl_name = levl_ment.group(1)
await message.channel.send("*Gz @" + levl_name + " on the level")
elif drop_ment:
drop_name = drop_ment.group(1)
await message.channel.send("*Gz @" + drop_name + " on the drop")
elif qwst_ment:
qwst_name = qwst_ment.group(1)
await message.channel.send("*Gz @" + qwst_name + " on the quest")
else:
return
client.run(os.getenv('TOKEN'))
| DrewCording/TenTalkBot | ccbot_commands.py | ccbot_commands.py | py | 3,343 | python | en | code | 3 | github-code | 13 |
38399276593 | import network as net
from PIL import Image, ImageDraw, ImageFont
from vector import Vector
WIDTH = 800
HEIGHT = 600
BG = ( 0, 0, 0 )
FG = ( 255, 255, 255 )
def draw( network: net.NeuralNetwork ) -> Image:
im = Image.new( "RGB", ( WIDTH, HEIGHT ), BG )
dr = ImageDraw.Draw( im )
layers = network.get_layers()
n = len( layers )
biggest_layer = max( layers, key=lambda x: len(x) )
nr = HEIGHT // len( biggest_layer ) // 3
shift = WIDTH // n
left = 0
right = shift
prev_layer = []
for layer in layers:
cx = ( left + right ) // 2
left, right = right, right + shift
draw_layer( dr, layer, nr, cx, prev_layer )
im.show()
def draw_layer( dr: ImageDraw.Draw,
layer: net.Neuron_layer,
nr: int,
cx: int,
prev_layer: list[ tuple[ int, int ] ] ) -> None:
n = len( layer )
high = 0
low = HEIGHT // n
shift = low
curr_layer = []
for neuron in layer:
cy = ( high + low ) // 2
curr_layer.append( (cx, cy) )
for x, y in prev_layer:
dr.line( [ (cx, cy), (x, y) ], fill=FG, width=1 )
high, low = low, low + shift
draw_neuron( dr, neuron, cx, cy, nr )
prev_layer.clear()
prev_layer.extend( curr_layer )
def draw_neuron( dr: ImageDraw.Draw,
neuron: net.Neuron_type,
cx: int,
cy: int,
nr: int ) -> None:
dr.ellipse( [ (cx - nr, cy - nr), (cx + nr, cy + nr) ], fill=FG, outline=FG )
# text = str( round( neuron.get_value(), 3 ) )
# font_size = nr // 2
# font = ImageFont.truetype( "arial.ttf", font_size )
# dr.text( ( cx - font_size, cy - font_size // 2 ), text=text, font=font, align ="center")
net = net.NeuralNetwork( net.Perceptron, 2, 7, 3, 2 )
draw( net )
| ejdam87/neural-network | old/draw_network.py | draw_network.py | py | 1,873 | python | en | code | 0 | github-code | 13 |
18855966708 | from typing import List
def checker(n, idx, arr):
if n == 0:
return 0
fwd_dist = None
for i in range(idx + 1, len(arr)):
if arr[i] != 0:
continue
fwd_dist = i - idx
break
bwd_dist = None
for i in range(idx - 1, -1, -1):
if arr[i] != 0:
continue
bwd_dist = idx - i
break
if bwd_dist and fwd_dist:
return min(bwd_dist, fwd_dist)
else:
return bwd_dist or fwd_dist
def read_input() -> List[int]:
n = int(input())
number_list = list(map(int, input().strip().split()))
return number_list
nearest_arr = []
given_arr = read_input()
for idx, n in enumerate(given_arr):
nearest_arr.append(checker(n, idx, given_arr))
print(' '.join(str(x) for x in nearest_arr))
| vaydich/algorithms | introduction_to_algorithms/final_part/a.py | a.py | py | 802 | python | en | code | 0 | github-code | 13 |
23125168760 | from __future__ import print_function
import gdown
from googleapiclient import discovery
from httplib2 import Http
from oauth2client import file, client, tools
import io
from googleapiclient.http import MediaIoBaseDownload
SCOPES = 'https://www.googleapis.com/auth/drive.readonly.metadata'
store = file.Storage('storage.json')
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets('/work/miniscopepipeline/miniursi/drive/credentials.json', SCOPES)
creds = tools.run_flow(flow, store)
DRIVE = discovery.build('drive', 'v3', http=creds.authorize(Http()))
def search(service, folder_id):
page_token = None
while True:
response = service.files().list(q="mimeType='video/avi'",
includeItemsFromAllDrives=True,
supportsAllDrives=True,
corpora='allDrives',
spaces='drive',
fields='nextPageToken, files(id,parents,name)',
pageToken=page_token).execute()
for file in response.get('files', []):
parents_found = (file.get('parents')),
print(parents_found),
if folder_id in (file.get('parents')):
print ('Found file: %s (%s)' % (file.get('name'), file.get('id')))
gdown.download(('https://drive.google.com/uc?id=%s' % (file.get('id'))), (file.get('name')))
page_token = response.get('nextPageToken', None)
if page_token is None:
break
## The commands below use language from the Google API rather than gdown, however it runs into errors because
## Hopper is not an interactive interface. This is close to working but I am unsure if it will work with the format.
# request = service.files().get_media(fileId=file.get('id'))
# fh = io.BytesIO()
# downloader = MediaIoBaseDownload(fh, request)
# done = False
# while done is False:
# status, done = downloader.next_chunk()
def upload(folder_id):
search(DRIVE, folder_id)
| Vassar-Miniscope/miniursi | drive/drive_list.py | drive_list.py | py | 2,226 | python | en | code | 0 | github-code | 13 |
15817151609 | """Dataset and DataModule for the MultiNLI dataset."""
# Imports Python builtins.
import os
import os.path as osp
import sys
# Imports Python packages.
import numpy as np
import pandas as pd
import wget
# Imports PyTorch packages.
import torch
from torchvision.datasets.utils import (
extract_archive,
)
# Imports milkshake packages.
from milkshake.datamodules.dataset import Dataset
from milkshake.datamodules.datamodule import DataModule
class MultiNLIDataset(Dataset):
"""Dataset for the MultiNLI dataset."""
def __init__(self, *xargs, **kwargs):
super().__init__(*xargs, **kwargs)
def download(self):
multinli_dir = osp.join(self.root, "multinli")
if not osp.isdir(multinli_dir):
os.makedirs(multinli_dir)
url = (
"https://github.com/kohpangwei/group_DRO/raw/"
"f7eae929bf4f9b3c381fae6b1b53ab4c6c911a0e/"
"dataset_metadata/multinli/metadata_random.csv"
)
wget.download(url, out=multinli_dir)
url = "https://nlp.stanford.edu/data/dro/multinli_bert_features.tar.gz"
wget.download(url, out=multinli_dir)
extract_archive(osp.join(multinli_dir, "multinli_bert_features.tar.gz"))
url = (
"https://raw.githubusercontent.com/izmailovpavel/"
"spurious_feature_learning/6d098440c697a1175de6a24"
"d7a46ddf91786804c/dataset_files/utils_glue.py"
)
wget.download(url, out=multinli_dir)
def load_data(self):
multinli_dir = osp.join(self.root, "multinli")
sys.path.append(multinli_dir)
metadata_path = osp.join(multinli_dir, "metadata_random.csv")
metadata_df = pd.read_csv(metadata_path)
bert_filenames = [
"cached_train_bert-base-uncased_128_mnli",
"cached_dev_bert-base-uncased_128_mnli",
"cached_dev_bert-base-uncased_128_mnli-mm",
]
features_array = sum([torch.load(osp.join(multinli_dir, name))
for name in bert_filenames], start=[])
all_input_ids = torch.tensor([
f.input_ids for f in features_array
]).long()
all_input_masks = torch.tensor([
f.input_mask for f in features_array
]).long()
all_segment_ids = torch.tensor([
f.segment_ids for f in features_array
]).long()
self.data = torch.stack((
all_input_ids,
all_input_masks,
all_segment_ids,
), dim=2)
self.targets = np.asarray(metadata_df["gold_label"].values)
spurious = np.asarray(metadata_df["sentence2_has_negation"].values)
no_negation = np.argwhere(spurious == 0).flatten()
negation = np.argwhere(spurious == 1).flatten()
contradiction = np.argwhere(self.targets == 0).flatten()
entailment = np.argwhere(self.targets == 1).flatten()
neutral = np.argwhere(self.targets == 2).flatten()
self.groups = [
np.intersect1d(contradiction, no_negation),
np.intersect1d(contradiction, negation),
np.intersect1d(entailment, no_negation),
np.intersect1d(entailment, negation),
np.intersect1d(neutral, no_negation),
np.intersect1d(neutral, negation),
]
split = np.asarray(metadata_df["split"].values)
self.train_indices = np.argwhere(split == 0).flatten()
self.val_indices = np.argwhere(split == 1).flatten()
self.test_indices = np.argwhere(split == 2).flatten()
# Adds group indices into targets for metrics.
targets = []
for j, t in enumerate(self.targets):
g = [k for k, group in enumerate(self.groups) if j in group][0]
targets.append([t, g])
self.targets = np.asarray(targets)
class MultiNLI(DataModule):
"""DataModule for the MultiNLI dataset."""
def __init__(self, args, **kwargs):
super().__init__(args, MultiNLIDataset, 3, 6, **kwargs)
def augmented_transforms(self):
return None
def default_transforms(self):
return None
| tmlabonte/last-layer-retraining | milkshake/datamodules/multinli.py | multinli.py | py | 4,191 | python | en | code | 7 | github-code | 13 |
10895057054 | ### this .py is for generating model performance, MAE, NRMSE, and CR
import numpy as np
import matplotlib.pyplot as plt
def tripLengthFrequency(OD,distance):
distanceRange=np.arange(0, np.floor(distance.max())+1,0.5)
tlf=np.zeros(len(distanceRange)-1)
n=len(OD)
for k in range(len(distanceRange)-1):
for i in range(n):
for j in range(n):
if distanceRange[k]<=distance[i][j]<distanceRange[k+1]:
tlf[k]=tlf[k]+OD[i][j]
tlf=tlf/tlf.sum()
return tlf
def dR(distance):
return np.arange(0, np.floor(distance.max())+1,0.5)[1:]
def CR(predicttlf,referencetlf):
part1=0
part2=0
for i in range(len(predicttlf)):
part1=part1+min(predicttlf[i],referencetlf[i])
part2=part2+max(predicttlf[i],referencetlf[i])
return part1/part2
def MOEs(predict,reference,dij):
MAE=np.mean(abs(np.array(predict)-np.array(reference)))
NRMSE=((np.array(predict)-np.array(reference))**2).sum()/np.array(reference).sum()
tlfd=CR(tripLengthFrequency(reference,dij),tripLengthFrequency(predict,dij))
return MAE,NRMSE,tlfd
| nicholasadam/PKDD2018-dynamic-zone-correlation | ODMOE.py | ODMOE.py | py | 1,162 | python | en | code | 1 | github-code | 13 |
70014292818 | import random
import asyncio
from aiotasks import build_manager
manager = build_manager("redis://")
@manager.task()
async def task_01(num):
wait_time = random.randint(0, 4)
print("Task 01 starting: {}".format(num))
await asyncio.sleep(wait_time, loop=manager.loop)
print("Task 01 stopping")
return wait_time
async def generate_tasks():
# Generates 5 tasks
for x in range(5):
async with task_01.delay(x) as f:
print(f)
if __name__ == '__main__':
# Start aiotasks for waiting tasks
manager.run()
# Launch the task generator. It'll create 5 tasks
manager.loop.run_until_complete(generate_tasks())
# Wait until tasks is done
manager.blocking_wait(exit_on_finish=True)
# Stop aiotasks listen tasks and shutdown
manager.stop()
| cr0hn/aiotasks | examples/standalone_tasks_and_wait_standalone.py | standalone_tasks_and_wait_standalone.py | py | 822 | python | en | code | 431 | github-code | 13 |
39298646006 | # These 4 list store data for users, meetups, questions & rsvps respectively
users, meetups, questions, rsvps = [], [], [], []
class BaseModels(object):
"""
This class contains methods that are common to all other
models
"""
def __init__(self):
self.users = users
self.meetups = meetups
self.questions = questions
self.rsvps = rsvps
def makeresp(self, payload, status_code):
""" Returns user details if found and message if not """
if isinstance(payload, str):
return {
"status": status_code,
"error": payload
}
if not isinstance(payload, list):
return {
"status": status_code,
"data": [payload]
}
return {
"status": status_code,
"data": payload
}
def check_item_exists(self, key, item, database):
"""
This method accepts a key e.g username
an item e.g 'Leewel' and
a database e.g users
"""
# if database = user ... try check for with 'index of where it is found'
# Confirm if all databases store in a similar structure
# example for user
try:
data = [record for record in database if record[key].lower() == item.lower()]
except:
data = [record for record in database if record[key] == int(item)]
if not data:
return "{} not found".format(key)
return data
def check_item_return_index(self, key, item, database):
"""
This method accepts a key e.g username, an item which is being
checked for e.g 'Leewel' and a database to search for the item
e.g users
"""
try:
data = [[ind, record] for [ind, record] in enumerate(database) if record[key].lower() == item.lower()]
except:
data = [[ind, record] for [ind, record] in enumerate(database) if record[key] == int(item)]
if not data:
return "{} not found".format(key)
return data
def check_missing_details(self, details):
"""
Checks if required data exists in the provided details
and returns missing values or [] if none
"""
for key, value in details.items():
if not value:
return "{} is a required field".format(key)
def check_is_error(self, data):
""" Checks if data passed to it is of type string """
return isinstance(data, str)
| ansarisan/vigilant-spoon | app/api/v1/models/base_model.py | base_model.py | py | 2,598 | python | en | code | 0 | github-code | 13 |
18462384632 | def solution(left, right):
answer = 0
num_list = [i for i in range(left, right+1)]
for num in num_list:
divisor_list = []
for i in range(1,num+1):
if num % i == 0:
divisor_list.append(i)
if len(divisor_list) % 2 == 0:
answer += num
else:
answer -= num
return answer | zzuckerfrei/yesjam | programmers/약수의_개수와_덧셈.py | 약수의_개수와_덧셈.py | py | 366 | python | en | code | 0 | github-code | 13 |
28990531035 | class Triangle:
number_of_sides=3
def __init__(self,angle1,angle2,angle3):
self.angle1=angle1
self.angle2=angle2
self.angle3=angle3
def checkangles(self):
if((self.angle1+self.angle2+self.angle3)==180):
print("True")
return True
else:
print("False")
return False
my_triangle=Triangle(60,60,60)
my_triangle.checkangles()
res=my_triangle.number_of_sides
print(str(res))
| SumanthPai/Python-CodeVerse | triangle.py | triangle.py | py | 543 | python | en | code | 0 | github-code | 13 |
18501370907 | from aws_cdk import core
from replication.s3stack import S3Stack
class replicationStack:
def __init__(self):
self.app = core.App()
def build(self) -> core.App:
setup_stack = S3Stack(
self.app,
"setup-stack",
env={'region':'us-east-1'}
)
replication_stack = S3Stack(
self.app,
"replication-stack",
env={'region':'us-west-2'}
)
return self.app
| fossouo/S3ReplicationCDK | app/replication/replicationstack.py | replicationstack.py | py | 477 | python | en | code | 0 | github-code | 13 |
28169093614 | import pygame, sys, time
from pygame.locals import *
from random import randint
class TankMain(object):
"""坦克大战的主窗口"""
width = 800
height = 600
my_tank_missile_list = []
my_tank = None
# enemy_list = []
enemy_list = pygame.sprite.Group() # 敌方坦克的族群
explode_list = []
enemy_missile_list = pygame.sprite.Group()
wall = None
# 开始游戏的方法
def startGame(self):
# pygame模块初始化,加载系统的资源
pygame.init()
# 创建一个屏幕,屏幕窗口的大小,宽和高,窗口的特性,能否拉伸(0,resizeable,fullscreem),
screem = pygame.display.set_mode((TankMain.width, TankMain.height), 0, 32)
# 给窗口设置标题
pygame.display.set_caption("坦克大战")
TankMain.wall = Wall(screem, 80, 160, 30, 100)
# 创建一个我方坦克,坦克显示在屏幕的仲夏部位置
TankMain.my_tank = My_Tank(screem)
if len(TankMain.enemy_list) == 0:
for i in range(1, 6): # 游戏开始时候初始化5个敌方坦克
TankMain.enemy_list.add(Enemy_Tank(screem)) # 把敌方坦克放到族群里面
while True:
if len(TankMain.enemy_list) < 5:
TankMain.enemy_list.add(Enemy_Tank(screem))
# 设置屏幕的背景色为黑色
screem.fill((0, 0, 0))
# 显示左上角的文字
for i, text in enumerate(self.write_text(), 1):
screem.blit(text, (0, 5 + (15 * i)))
# 创建游戏中的墙,并且对墙和其他对象进行检测
TankMain.wall.display()
TankMain.wall.hit_other()
self.get_event(TankMain.my_tank, screem) # 获取事件,根据获取的事件进行处理
if TankMain.my_tank:
TankMain.my_tank.hit_enemy_missile() # 我方的坦克和敌方的炮弹进行碰撞检测
if TankMain.my_tank and TankMain.my_tank.live:
TankMain.my_tank.display()
TankMain.my_tank.move() # 在屏幕上移动我方坦克
else:
TankMain.my_tank = None
# 显示所有的敌方坦克
for enemy in TankMain.enemy_list:
enemy.display()
enemy.random_move()
enemy.random_fire()
# 显示所有的我方炮弹
for m in TankMain.my_tank_missile_list:
if m.live:
m.display()
m.hit_tank() # 炮弹打中敌方坦克
m.move()
else:
TankMain.my_tank_missile_list.remove(m)
# 显示所有敌方炮弹
for m in TankMain.enemy_missile_list:
if m.live:
m.display()
m.move()
else:
TankMain.enemy_missile_list.remove(m)
for explode in TankMain.explode_list:
explode.display()
# 每次休眠0.05秒调到下一帧
time.sleep(0.05)
# 显示重置
pygame.display.update()
# 获取所有的时间(点击鼠标,敲击键盘)
def get_event(self, my_tank, screem):
for event in pygame.event.get():
if event.type == QUIT:
self.stopGame() # 程序退出
if event.type == KEYDOWN and not my_tank and event.key == K_n:
print("new a tank")
TankMain.my_tank = My_Tank(screem)
if event.type == KEYDOWN and my_tank:
if event.key == K_LEFT:
my_tank.direction = "L"
my_tank.stop = False
# my_tank.move()
if event.key == K_RIGHT:
my_tank.direction = "R"
my_tank.stop = False
# my_tank.move()
if event.key == K_UP:
my_tank.direction = "U"
my_tank.stop = False
# my_tank.move()
if event.key == K_DOWN:
my_tank.direction = "D"
my_tank.stop = False
# my_tank.move()
if event.key == K_ESCAPE:
self.stopGame()
if event.key == K_SPACE:
m = my_tank.fire()
m.good = True # 我方坦克发射的炮弹,好炮弹
TankMain.my_tank_missile_list.append(m)
if event.type == KEYUP and my_tank:
if event.key == K_LEFT or event.key == K_RIGHT or event.key == K_UP or event.key == K_DOWN:
my_tank.stop = True
# close game
def stopGame(self):
sys.exit()
# set the game windows title
def set_title(self):
pass
# 在屏幕的左上角显示文字内容
def write_text(self):
font = pygame.font.SysFont("simsunnsimsun", 20)
text_sf1 = font.render("敌方坦克数量为:%d" % len(TankMain.enemy_list), True, (255, 0, 0))
text_sf2 = font.render("我方炮弹数量为:%d" % len(TankMain.my_tank_missile_list), True, (255, 0, 0))
return text_sf1, text_sf2
# 坦克大战游戏中所有对象的父类
class BaseItem(pygame.sprite.Sprite):
def __init__(self, screem):
pygame.sprite.Sprite.__init__(self)
# 所有对象功能的属性
self.screem = screem
# 把坦克对对应图片显示在游戏窗口上
def display(self):
if self.live:
self.image = self.images[self.direction]
self.screem.blit(self.image, self.rect)
class Tank(BaseItem):
# 坦克的高度和宽度,定义类属性,所有坦克对象高和宽都是一样的
width = 50
height = 50
def __init__(self, screem, left, top):
super().__init__(screem)
# self.screem=screem#坦克在移动或者显示过程中需要用到当前游戏的屏幕上
self.direction = "D" # 坦克的方向,默认方向往下(上下左右)
self.speed = 10 # 坦克移动的速度
self.stop = False
self.images = {} # take的所有图片,key为方向,value为图片(surface)
self.images["L"] = pygame.image.load("images/tankL.gif")
self.images["R"] = pygame.image.load("images/tankR.gif")
self.images["U"] = pygame.image.load("images/tankU.gif")
self.images["D"] = pygame.image.load("images/tankD.gif")
self.image = self.images[self.direction] # 坦克的图片有方向决定
self.rect = self.image.get_rect()
self.rect.left = left
self.rect.top = top
self.live = True # 决定坦克是否消灭了
self.oldtop = self.rect.top
self.oldleft = self.rect.left
def stay(self):
self.rect.top = self.oldtop
self.rect.left = self.oldleft
def move(self):
self.oldleft = self.rect.left
self.oldtop = self.rect.top
if not self.stop:
if self.direction == "L":
if self.rect.left > 0: # 判断坦克是否在屏幕左边的边界上
self.rect.left -= self.speed
else:
self.rect.left = 0
elif self.direction == "R":
if self.rect.right < TankMain.width:
self.rect.right += self.speed
else:
self.rect.right = TankMain.width
elif self.direction == "U":
if self.rect.top > 0:
self.rect.top -= self.speed
else:
self.rect.top = 0
elif self.direction == "D":
if self.rect.bottom < TankMain.height:
self.rect.bottom += self.speed
else:
self.rect.bottom = TankMain.height
def fire(self):
m = Missile(self.screem, self)
return m
class My_Tank(Tank):
def __init__(self, screem):
# 创建一个我方坦克,坦克显示在屏幕的仲夏部位置)
super().__init__(screem, 275, 400)
self.stop = True
self.live = True
def hit_enemy_missile(self):
hit_list = pygame.sprite.spritecollide(self, TankMain.enemy_missile_list, False)
for m in hit_list: # 我方坦克中弹了
m.live = False
TankMain.enemy_missile_list.remove(m)
self.live = False
explode = Explode(self.screem, self.rect)
TankMain.explode_list.append(explode)
class Enemy_Tank(Tank):
def __init__(self, screem):
super().__init__(screem, randint(1, 5 * 100), 200)
self.speed = 4
self.step = 10 # 坦克按照某个方向连续移动的步数
self.get_random_direction()
# 敌方坦克,按照一个确定的随机方向,连续移动6步,然后才能再次改变方向.
def random_move(self):
if self.live:
if self.step == 0:
self.get_random_direction()
self.step = 6
else:
self.move()
self.step -= 1
def get_random_direction(self):
r = randint(0, 4) # 得到一个坦克移动方向和停止的随机数
if r == 4:
self.stop = True
elif r == 1:
self.direction = "L"
self.stop = False
elif r == 2:
self.direction = "R"
self.stop = False
elif r == 3:
self.direction = "U"
self.stop = False
elif r == 0:
self.direction = "D"
self.stop = False
def random_fire(self):
r = randint(0, 50)
if r > 45:
m = self.fire()
TankMain.enemy_missile_list.add(m)
else:
return
class Missile(BaseItem):
width = 12
height = 12
def __init__(self, screem, tank):
super().__init__(screem)
self.tank = tank
self.direction = tank.direction # 炮弹的方向由锁发射的坦克方向决定
self.speed = 14 # 炮弹移动的速度
self.stop = False
# 炮弹的所有图片,key为方向,value为图片(surface)
self.images = {}
self.images["L"] = pygame.image.load("images/missileL.gif")
self.images["R"] = pygame.image.load("images/missileR.gif")
self.images["U"] = pygame.image.load("images/missileU.gif")
self.images["D"] = pygame.image.load("images/missileD.gif")
self.image = self.images[self.direction] # 坦克的图片有方向决定
self.rect = self.image.get_rect()
self.rect.left = tank.rect.left + (tank.width - self.width) // 2
self.rect.top = tank.rect.top + (tank.height - self.height) // 2
self.live = True # 炮弹是否消灭了
self.good = False
def move(self):
if self.live: # 如果炮弹还存在
if self.direction == "L":
if self.rect.left > 0: # 判断坦克是否在屏幕左边的边界上
self.rect.left -= self.speed
else:
self.live = False
elif self.direction == "R":
if self.rect.right < TankMain.width:
self.rect.right += self.speed
else:
self.live = False
elif self.direction == "U":
if self.rect.top > 0:
self.rect.top -= self.speed
else:
self.live = False
elif self.direction == "D":
if self.rect.bottom < TankMain.height:
self.rect.bottom += self.speed
else:
self.live = False
# 炮弹击中坦克,第一种,我方炮弹击中敌方坦克,敌方炮弹击中我方坦克,
def hit_tank(self):
if self.good: # 如果炮弹是我方的炮弹
hit_list = pygame.sprite.spritecollide(self, TankMain.enemy_list, False)
for e in hit_list:
e.live = False
TankMain.enemy_list.remove(e) # 如果敌方坦克被击中,从列表中删除敌方坦克
self.live = False
explode = Explode(self.screem, e.rect) # 产生了一个爆炸对象
TankMain.explode_list.append(explode)
# 爆炸类
class Explode(BaseItem):
def __init__(self, screen, rect):
super().__init__(screen)
self.live = True
self.images = [pygame.image.load("images/0.gif"), \
pygame.image.load("images/1.gif"), \
pygame.image.load("images/2.gif"), \
pygame.image.load("images/3.gif"), \
pygame.image.load("images/4.gif"), \
pygame.image.load("images/5.gif"), \
pygame.image.load("images/6.gif"), \
pygame.image.load("images/7.gif"), \
pygame.image.load("images/8.gif"), \
pygame.image.load("images/9.gif"), \
pygame.image.load("images/10.gif")]
self.step = 0
self.rect = rect # 爆炸的位置和发生爆炸前,保单喷到的坦克位置一样,在构建爆炸的时候把坦克的rect传递进来
# dispaly方法在整个游戏运行过程中,循环调用,每个0.1秒调用一次
def display(self):
if self.live:
if self.step == len(self.images): # 最后一张爆炸图片已经显示了
self.live = False
else:
self.image = self.images[self.step]
self.screem.blit(self.image, self.rect)
self.step += 1
else:
pass # 删除该对象
# 游戏中的障碍物
class Wall(BaseItem):
def __init__(self, screen, left, top, width, height):
super().__init__(screen)
self.rect = Rect(left, top, width, height)
self.color = (255, 0, 0)
def display(self):
self.screem.fill(self.color, self.rect)
# 针对墙和其他坦克或者炮弹的碰撞检测
def hit_other(self):
if TankMain.my_tank:
is_hit = pygame.sprite.collide_rect(self, TankMain.my_tank)
if is_hit:
TankMain.my_tank.stop = True
TankMain.my_tank.stay()
if TankMain.enemy_list:
hits_list = pygame.sprite.spritecollide(self, TankMain.enemy_list, False)
for e in hits_list:
e.stop = True
e.stay()
if TankMain.my_tank_missile_list:
hit_missile_list = pygame.sprite.spritecollide(self, TankMain.my_tank_missile_list,False)
for m in hit_missile_list:
m.live=False
if TankMain.enemy_missile_list:
hit_enemy_missile_list = pygame.sprite.spritecollide(self, TankMain.enemy_missile_list,False)
for m in hit_enemy_missile_list:
m.live = False
if __name__ == '__main__':
game = TankMain()
game.startGame()
| mengfangpo123/pythoncode | tank.py | tank.py | py | 15,153 | python | en | code | 0 | github-code | 13 |
6527712738 | import csv
import os
import json
import numpy
import numpy as np
import onnx
from collections import defaultdict
from onnx import numpy_helper
from onnx import shape_inference
from onnx_explorer import logo_str
from onnx_explorer.utils import get_file_size, byte_to_mb, get_file_size_mb, get_model_size_mb
class ONNXModelAnalyzer:
def __init__(self):
pass
@staticmethod
def get_dtype_name(tensor_dtype):
'''
get data type name
:param tensor_dtype:
:return:
'''
return onnx.TensorProto.DataType.Name(tensor_dtype)
@staticmethod
def save_format_txt(output_info, output_file, show_node_details=False):
'''
:param output_info:
:param output_file:
:return:
1.summary
2.parameter_data_types
3.operators-lists
4.operators
5.inputs
6.outputs
7.node_details
'''
with open(output_file + ".txt", "w") as f:
f.write(f"{logo_str}\n")
# Write model information in the given format
if show_node_details:
# Calculate input size (MB)
input_size = sum(numpy.prod(input_info['shape']) for input_info in output_info['inputs']) * 4 / (1024 * 1024)
forward_backward_pass_size = sum(numpy.prod(node_detail['output_shape']) for node_detail in output_info['node_details']) * 4 / (1024 * 1024)
# Calculate estimated total size (MB)
estimated_total_size = input_size + forward_backward_pass_size + output_info['summary']['model_size']
# Write model information in the given format
f.write("=========================================================================================================\n")
f.write("Layer (type:depth-idx) Output Shape Param #\n")
f.write("=========================================================================================================\n")
if "node_details" in output_info:
sorted_node_details = sorted(output_info.get("node_details", []), key=lambda x: x['depth'])
last_depth = -1
for node_detail in sorted_node_details:
output_shape = str(node_detail['output_shape'])
param_num = node_detail['param_count']
depth = node_detail['depth']
layer_name = node_detail['name']
if depth > last_depth:
indent = '│ ' * depth
branch = '├─'
elif depth == last_depth:
indent = '│ ' * (depth - 1)
branch = '├─'
else: # depth < last_depth
indent = '│ ' * depth
branch = '└─'
last_depth = depth
op_type_str = f"{node_detail['op_type']} (d={depth}):"
param_num_str = f"{param_num: <6}"
f.write(f"{indent}{branch}{op_type_str: <18} {layer_name: <20} {output_shape: <25} {param_num_str}\n")
f.write("=========================================================================================================\n")
f.write(f"Total params: {output_info['summary']['num_params']}\n")
f.write(f"Trainable params: {output_info['summary']['num_params']}\n")
f.write("Non-trainable params: 0\n")
f.write(
"=========================================================================================================\n")
f.write(f"Input size (MB): {input_size:.2f}\n")
f.write(f"Forward/backward pass size (MB): {forward_backward_pass_size:.2f}\n")
f.write(f"Params size (MB): {output_info['summary']['model_size']}\n")
f.write(f"Estimated Total Size (MB): {estimated_total_size:.2f}\n")
f.write(
"=========================================================================================================\n\n")
############################################################################################################################
# Write model information in the given format
f.write("================================【summary】================================\n")
for key, value in output_info["summary"].items():
f.write(f"| {key}: {value}\n")
f.write("=====================【parameter_data_types】=====================\n")
for key, value in output_info["parameter_data_types"].items():
f.write(f"| {key}: {value}\n")
f.write("===========================【operators-lists】===========================\n")
operators_list = output_info["operators_list"]
f.write(f"| {operators_list}\n")
f.write("===========================【operators】===========================\n")
for key, value in output_info["operators"].items():
f.write(f"| {key}: count={value['count']}, percentage={value['percentage']}\n")
f.write("===========================【inputs】==============================\n")
for input_info in output_info["inputs"]:
f.write(f"| name={input_info['name']}, dtype={input_info['dtype']}, shape={input_info['shape']}\n")
f.write("===========================【outputs】=============================\n")
for output_info in output_info["outputs"]:
f.write(f"name={output_info['name']}, dtype={output_info['dtype']}, shape={output_info['shape']}\n")
if "node_details" in output_info:
f.write(
"=========================================【node_details】==========================================\n")
for node_detail in output_info["node_details"]:
f.write(f"op_type={node_detail['op_type']}, name={node_detail['name']}\n")
f.write(f"inputs: {', '.join(node_detail['inputs'])}\n")
f.write(f"outputs: {', '.join(node_detail['outputs'])}\n")
f.write("attributes:\n")
for attr_name, attr_value in node_detail["attributes"].items():
f.write(f" {attr_name}: {attr_value}\n")
f.write("\n")
print(f"Model analysis saved to {output_file}.txt")
@staticmethod
def save_format_json(output_file, output_info):
with open(output_file + ".json", "w") as f:
json.dump(output_info, f, indent=2)
print(f"\nModel analysis saved to {output_file}.json")
@staticmethod
def save_format_csv(output_info, output_file):
with open(output_file + ".csv", "w", newline='') as f:
csv_writer = csv.writer(f)
csv_writer.writerow(["section", "key", "value"])
for key, value in output_info["summary"].items():
csv_writer.writerow(["summary", key, value])
for key, value in output_info["parameter_data_types"].items():
csv_writer.writerow(["parameter_data_types", key, value])
for key, value in output_info["operators"].items():
csv_writer.writerow(["operators", key, f"count={value['count']}, percentage={value['percentage']}"])
for input_info in output_info["inputs"]:
csv_writer.writerow(
["inputs", input_info['name'], f"dtype={input_info['dtype']}, shape={input_info['shape']}"])
for output_info in output_info["outputs"]:
csv_writer.writerow(
["outputs", output_info['name'], f"dtype={output_info['dtype']}, shape={output_info['shape']}"])
if "node_details" in output_info:
for node_detail in output_info["node_details"]:
csv_writer.writerow(["node_details", node_detail['name'], f"op_type={node_detail['op_type']}"])
csv_writer.writerow(
["node_details", node_detail['name'], f"inputs: {', '.join(node_detail['inputs'])}"])
csv_writer.writerow(
["node_details", node_detail['name'], f"outputs: {', '.join(node_detail['outputs'])}"])
for attr_name, attr_value in node_detail["attributes"].items():
csv_writer.writerow(["node_details", node_detail['name'], f"{attr_name}: {attr_value}"])
print(f"Model analysis saved to {output_file}.csv")
@staticmethod
def get_file_size(file_path):
return os.path.getsize(file_path)
@staticmethod
def get_node_depth(node_name, node_parents, nodes_by_output, visited=None):
'''
get node depth
:param node_name:
:param node_parents:
:param nodes_by_output:
:param visited:
:return:
'''
if visited is None:
visited = set()
if node_name in visited:
return 0
visited.add(node_name)
depth = 0
if node_name in node_parents:
parent_depths = [ONNXModelAnalyzer.get_node_depth(parent_name, node_parents, nodes_by_output, visited) for
parent_name in node_parents[node_name]]
depth = max(parent_depths) + 1
return depth
@staticmethod
def print_node_structure(f, node_name, node_details, node_parent, depth=0):
'''
print node structure
:param f:
:param node_name:
:param node_details:
:param node_parent:
:param depth:
:return:
'''
node_detail = node_details[node_name]
f.write("─" * depth + f"{node_detail['op_type']} ({node_detail['name']})\n")
for child_node_name in [node['name'] for node in node_details if node_parent[node['name']] == node_name]:
f.print_node_structure(f, child_node_name, node_details, node_parent, depth + 1)
@staticmethod
def analyze_onnx_model(onnx_file_path, save_to_file=False, output_file=None, show_node_details=False):
'''
analyze onnx model
:param onnx_file_path:
:param save_to_file:
:param output_file:
:param show_node_details:
:return:
'''
if onnx_file_path is not None:
if os.path.exists(onnx_file_path):
# Load ONNX model
model = onnx.load(onnx_file_path)
# Validate model
onnx.checker.check_model(model)
# Infer shapes
inferred_model = shape_inference.infer_shapes(model)
value_info = {vi.name: vi for vi in inferred_model.graph.value_info}
# Get graph information
graph = model.graph
# Get node information
nodes = graph.node
# Get input and output tensor information
inputs = graph.input
outputs = graph.output
# Get model parameters
initializer = graph.initializer
# Calculate parameters for each node
initializer_dict = {tensor.name: numpy_helper.to_array(tensor) for tensor in initializer}
node_params = {}
for node in nodes:
node_param_count = 0
for input_name in node.input:
if input_name in initializer_dict:
node_param_count += initializer_dict[input_name].size
node_params[node.name] = node_param_count
# Create a dictionary to find nodes by their output tensor names
nodes_by_output = {output_name: node for node in nodes for output_name in node.output}
# Calculate parents for each node
node_parents = defaultdict(list)
for node in nodes:
for input_name in node.input:
if input_name in nodes_by_output:
node_parents[node.name].append(nodes_by_output[input_name].name)
print("Node parents:")
for node_name, parent_list in node_parents.items():
print(f"{node_name}: {parent_list}")
# Count the number of nodes
node_count = len(nodes)
# Count the number of input and output tensors
input_count = len(inputs)
output_count = len(outputs)
# Calculate the number of parameters
num_params = sum(numpy_helper.to_array(tensor).size for tensor in initializer)
# Count the number of operators
op_count = defaultdict(int)
for node in nodes:
op_count[node.op_type] += 1
# Calculate operator percentage
op_percentage = {op_type: count / node_count * 100 for op_type, count in op_count.items()}
# Count the number of parameters for each data type
dtype_count = defaultdict(int)
for tensor in initializer:
dtype_name = ONNXModelAnalyzer.get_dtype_name(tensor.data_type)
dtype_count[dtype_name] += numpy_helper.to_array(tensor).size
nodes_by_name = {node.name: node for node in nodes}
# Calculate model size
# model_size = os.path.getsize(onnx_file_path)
# model_size = get_file_size_mb(onnx_file_path)
model_size = get_model_size_mb(onnx_file_path)
# Prepare output information
output_info = {
"summary": {
"model": onnx_file_path,
"node_count": node_count,
"input_count": input_count,
"output_count": output_count,
"num_params": num_params,
"model_size": model_size,
# "model_size": byte_to_mb(model_size)
},
"parameter_data_types": {dtype_name: count for dtype_name, count in dtype_count.items()},
"operators": {op_type: {"count": count, "percentage": op_percentage[op_type]} for op_type, count in
op_count.items()}, "operators_list": list(op_count.keys()),
"inputs": [{"name": input_tensor.name,
"dtype": ONNXModelAnalyzer.get_dtype_name(input_tensor.type.tensor_type.elem_type),
"shape": [dim.dim_value for dim in input_tensor.type.tensor_type.shape.dim]} for input_tensor in inputs],
"outputs": [{"name": output_tensor.name,
"dtype": ONNXModelAnalyzer.get_dtype_name(output_tensor.type.tensor_type.elem_type),
"shape": [dim.dim_value for dim in output_tensor.type.tensor_type.shape.dim]} for output_tensor in outputs],
"node_details": [
{
"op_type": node.op_type,
"name": node.name,
"inputs": [input_name for input_name in node.input],
"outputs": [output_name for output_name in node.output],
"attributes": {attr.name: str(attr) for attr in node.attribute},
"output_shape": [dim.dim_value for dim in value_info[node.output[0]].type.tensor_type.shape.dim] if node.output[0] in value_info else [],
"param_count": node_params[node.name],
"depth": ONNXModelAnalyzer.get_node_depth(node.name, node_parents, nodes_by_output)
} for node in nodes
]}
if show_node_details:
# Print output information
import pprint
pprint.pprint(output_info)
# Save output information to file
if save_to_file:
if output_file is None:
output_file = os.path.splitext(onnx_file_path)[0] + "_analysis"
else:
output_path = os.path.split(output_file)[0]
print("output_path:", output_path)
if os.path.exists(output_path) is False:
os.makedirs(output_path)
# Save as JSON
ONNXModelAnalyzer.save_format_json(output_file, output_info)
# Save as TXT
ONNXModelAnalyzer.save_format_txt(output_info, output_file, show_node_details)
# Save as CSV
ONNXModelAnalyzer.save_format_csv(output_info, output_file)
else:
print("onnx_file_path not found")
else:
print("onnx_file_path is None")
def main():
print(f"{logo_str}\n")
model_path = "../ckpts/yolov5/yolov5x6.onnx"
output_file = "../weights/yolov5/yolov5x6"
ONNXModelAnalyzer.analyze_onnx_model(model_path, save_to_file=True, output_file=output_file,show_node_details=False)
if __name__ == '__main__':
main()
| isLinXu/onnx-explorer | onnx_explorer/OnnxAlyzer.py | OnnxAlyzer.py | py | 17,893 | python | en | code | 4 | github-code | 13 |
17625380375 | import sys
from PyQt5.QtWidgets import *
from math import *
class Main(QDialog):
def __init__(self):
super().__init__()
self.init_ui()
self.equation = "" #계산식을 저장할 변수 생성
self.numeric = "" #두 자리 수 이상을 표시하기 위해 변수 생성
self.operation =[] #연산자 저장
def init_ui(self):
main_layout = QVBoxLayout()
### 각 위젯을 배치할 레이아웃을 미리 만들어 둠
layout_op = QGridLayout()
layout_number = QGridLayout()
layout_solution = QGridLayout() #grid로 변경, 변수명 변경
### 수식 입력과 답 출력을 위한 LineEdit 위젯 생성
self.solution = QLineEdit("")
### layout_equation_solution 레이아웃에 답 위젯을 추가
layout_solution.addWidget(self.solution, 0, 0)
### 사칙연상 버튼 생성
button_plus = QPushButton("+")
button_minus = QPushButton("-")
button_product = QPushButton("x")
button_division = QPushButton("/")
### 사칙연산 버튼을 클릭했을 때, 각 사칙연산 부호가 수식창에 추가될 수 있도록 시그널 설정
button_plus.clicked.connect(lambda state, operation = "+": self.button_operation_clicked(operation))
button_minus.clicked.connect(lambda state, operation = "-": self.button_operation_clicked(operation))
button_product.clicked.connect(lambda state, operation = "*": self.button_operation_clicked(operation))
button_division.clicked.connect(lambda state, operation = "/": self.button_operation_clicked(operation))
### 사칙연산 버튼을 레이아웃에 추가
layout_number.addWidget(button_plus, 2, 3)
layout_number.addWidget(button_minus, 1, 3)
layout_number.addWidget(button_product, 0, 3)
layout_op.addWidget(button_division, 1,3)
##단항 연산 버튼 생성
button_remain = QPushButton("%")
button_reverse = QPushButton("1/x")
button_square = QPushButton("x^2")
button_sqrt = QPushButton("X^(1/2)")
##단항 연산 버튼을 클릭했을 때, 수식창에 추가될 수 있도록 시그널 설정
button_remain.clicked.connect(lambda state, operation = "%": self.button_operation_clicked(operation))
button_reverse.clicked.connect(lambda state, operation = "re": self.button_single_op_clicked(operation))
button_square.clicked.connect(lambda state, operation = "sq": self.button_single_op_clicked(operation))
button_sqrt.clicked.connect(lambda state, operation = "sqrt": self.button_single_op_clicked(operation))
##단항 연산 버튼을 레이아웃에 추가
layout_op.addWidget(button_remain, 0, 0)
layout_op.addWidget(button_reverse, 1, 0)
layout_op.addWidget(button_square, 1, 1)
layout_op.addWidget(button_sqrt, 1, 2)
### =, c, ce, backspace 버튼 생성
button_equal = QPushButton("=")
button_c = QPushButton("C")
button_ce = QPushButton("CE")
button_backspace = QPushButton("Backspace")
### =, c, ce backspace 버튼 클릭 시 시그널 설정
button_equal.clicked.connect(self.button_equal_clicked)
button_c.clicked.connect(self.button_clear_clicked)
button_ce.clicked.connect(self.button_clear_clicked)
button_backspace.clicked.connect(self.button_backspace_clicked)
### =, clear, backspace 버튼을 layout_clear_equal 레이아웃에 추가
layout_op.addWidget(button_c, 0, 2)
layout_op.addWidget(button_ce, 0, 1)
layout_op.addWidget(button_backspace, 0, 3)
layout_number.addWidget(button_equal, 3, 3)
### 숫자 버튼 생성하고, layout_number 레이아웃에 추가
### 각 숫자 버튼을 클릭했을 때, 숫자가 수식창에 입력 될 수 있도록 시그널 설정
number_button_dict = {}
for number in range(0, 10):
number_button_dict[number] = QPushButton(str(number))
number_button_dict[number].clicked.connect(lambda state, num = number:
self.number_button_clicked(num))
if number >0:
x,y = divmod(number-1, 3)
layout_number.addWidget(number_button_dict[number], 2-x, y)
elif number==0:
layout_number.addWidget(number_button_dict[number], 3, 1)
### 소숫점 버튼과 00 버튼을 입력하고 시그널 설정
button_dot = QPushButton(".")
button_dot.clicked.connect(lambda state, num = ".": self.number_button_clicked(num))
layout_number.addWidget(button_dot, 3, 2)
button_sign = QPushButton("+/-")
layout_number.addWidget(button_sign, 3, 0)
### 각 레이아웃을 main_layout 레이아웃에 추가
main_layout.addLayout(layout_solution)
main_layout.addLayout(layout_op)
main_layout.addLayout(layout_number)
self.setLayout(main_layout)
self.show()
#################
### functions ###
#################
def number_button_clicked(self, num):
self.numeric += str(num)
self.equation += str(num)
self.solution.setText(self.numeric)
def button_operation_clicked(self, operation):
self.operation.append(operation)
self.equation += "e" #연산자 입력을 표시
self.numeric = ""
def button_single_op_clicked(self, operation):
self.operation.append(operation)
solution = self.calc_op1()
self.numeric = solution
self.solution.setText(str(solution))
def button_equal_clicked(self):
solution = self.calc()
self.solution.setText(str(solution))
self.numeric =""
def button_clear_clicked(self):
self.equation = ""
self.numeric = ""
self.solution.setText("")
def button_backspace_clicked(self):
self.equation = self.equation[:-1]
self.numeric = self.numeric[:-1]
self.solution.setText(self.numeric)
def calc(self):
solution = 0
self.numeric = ""
for i in self.equation:
if i == "e": #연산자를 만난 경우
self.equation = self.equation[1:]
x = float(self.numeric)
self.calc()
y = float(self.numeric)
solution = self.calc_op(x, y)
break
else:
self.equation = self.equation[1:]
self.numeric += i
return solution
def calc_op(self, x, y):
op = self.operation.pop()
#사칙연산
if op == "+":
solution = x + y
elif op == "-":
solution = x - y
elif op == "*":
solution = x*y
elif op == "/":
solution = x/y
elif op == "%":
solution = x % y
return solution
def calc_op1(self):
op = self.operation.pop()
if op == "re":
solution = 1/float(self.numeric)
elif op == "sq":
solution = (float(self.numeric)) * (float(self.numeric))
elif op == "sqrt":
solution = sqrt(float(self.numeric))
return solution
if __name__ == '__main__':
app = QApplication(sys.argv)
main = Main()
sys.exit(app.exec_())
| Yuren03/pyqt_calculator_practice | calculator_main.py | calculator_main.py | py | 7,728 | python | en | code | 0 | github-code | 13 |
17723307322 | import numpy as np
import pandas as pd
import seaborn as sns
import statsmodels.api as sm
import sys
from matplotlib import pyplot as plt
from mne_bids.tsv_handler import _from_tsv
from pathlib import Path
from ptitprince import PtitPrince as pt
from sklearn.utils import check_random_state
if not str(Path(__file__).parents[3]) in sys.path:
sys.path.append(str(Path(__file__).parents[3]))
from mtsmorf.move_exp.cv import fit_classifiers_cv
from mtsmorf.move_exp.functions.move_experiment_functions import get_preprocessed_labels, get_event_data
from mtsmorf.io.read import get_trial_info_pd, get_unperturbed_trial_inds
label_names = {0: "Down", 1: "Right", 2: "Up", 3: "Left"}
def independence_test(X, y):
"""Compute point estimates for coefficient between X and y."""
covariates = sm.add_constant(X)
model = sm.MNLogit(y, covariates)
res = model.fit(disp=False)
coeff = res.params.iloc[1]
return coeff
def bootstrap_independence_test(
X, y, num_bootstraps=200, alpha=0.05, random_state=None
):
"""Bootstrap esitmates for coefficients between X and y."""
rng = check_random_state(random_state)
Ql = alpha / 2
Qu = 1 - alpha / 2
estimates = []
n = len(X)
for i in range(num_bootstraps):
# Compute OR estimate for bootstrap sample
inds = rng.randint(n, size=n)
Xboot = X.iloc[inds]
yboot = y.iloc[inds]
estimates.append(independence_test(Xboot, yboot))
# Get desired lower and upper percentiles of approximate sampling distribution
q_low, q_up = np.percentile(estimates, [Ql * 100, Qu * 100])
return q_low, q_up, estimates
def independence_test_GLM(X, y):
"""Compute point estimates for coefficient between X and y."""
covariates = sm.add_constant(X)
model = sm.GLM(y, covariates)
res = model.fit(disp=False)
coeff = res.params.iloc[1]
return coeff
def bootstrap_independence_test_GLM(
X, y, num_bootstraps=200, alpha=0.05, random_state=None
):
"""Bootstrap esitmates for coefficients between X and y."""
rng = check_random_state(random_state)
Ql = alpha / 2
Qu = 1 - alpha / 2
estimates = []
n = len(X)
for i in range(num_bootstraps):
# Compute OR estimate for bootstrap sample
inds = rng.randint(n, size=n)
Xboot = X.iloc[inds]
yboot = y.iloc[inds]
estimates.append(independence_test_GLM(Xboot, yboot))
# Get desired lower and upper percentiles of approximate sampling distribution
q_low, q_up = np.percentile(estimates, [Ql * 100, Qu * 100])
return q_low, q_up, estimates
def independence_test_OLS(X, y):
"""Compute point estimates for regression coefficient between X and y."""
covariates = sm.add_constant(X)
model = sm.OLS(y, covariates)
res = model.fit(disp=False)
coeff = res.params[1]
return coeff
def bootstrap_independence_test_OLS(
X, y, num_bootstraps=200, alpha=0.05, random_state=None
):
"""Bootstrap esitmates for regression coefficients between X and y."""
rng = check_random_state(random_state)
Ql = alpha / 2
Qu = 1 - alpha / 2
estimates = np.empty(
num_bootstraps,
)
n = len(X)
for i in range(num_bootstraps):
# Compute OR estimate for bootstrap sample
inds = rng.randint(n, size=n)
Xboot = X.iloc[inds]
yboot = y.iloc[inds]
estimates[i] = independence_test_OLS(Xboot, yboot)
# Get desired lower and upper percentiles of approximate sampling distribution
q_low, q_up = np.percentile(estimates, [Ql * 100, Qu * 100])
return q_low, q_up, estimates
def get_event_durations(bids_path, event_key="Left Target", periods=1, verbose=False):
"""Get the event durations for the specified event_key for the specified
period.
"""
behav, events = get_trial_info_pd(bids_path, verbose=verbose)
# get difference between Left Target onset and its preceding and succeeding events
inds = events.trial_type == event_key
durations = events.onset.diff(periods=periods).abs()[inds]
durations.index = np.arange(len(durations))
# remove perturbed trial indices
successes = behav.query("successful_trial_flag == 1")
successes.index = np.arange(len(successes))
unperturbed_trial_inds = get_unperturbed_trial_inds(successes)
durations = durations.iloc[unperturbed_trial_inds]
return durations
def plot_event_durations(bids_path, jitter=0.025, ax=None, random_state=None):
"""
docstring
"""
rng = check_random_state(random_state)
if ax is None:
ax = plt.gca()
subject = bids_path.subject
# Compute durations for go cue and left target events
go_cue_duration = get_event_durations(
bids_path, event_key="Left Target", periods=1
)
left_target_duration = get_event_durations(
bids_path, event_key="Left Target", periods=-1
)
## Plot stripplot with random jitter in the x-coordinate
df = pd.DataFrame(
{
'"Go Cue" duration': go_cue_duration,
'"Left Target" duration': left_target_duration,
}
)
df_x_jitter = pd.DataFrame(
rng.normal(loc=0, scale=jitter, size=df.values.shape),
index=df.index,
columns=df.columns,
)
df_x_jitter += np.arange(len(df.columns))
for col in df:
ax.plot(df_x_jitter[col], df[col], "o", alpha=0.40, zorder=1, ms=8, mew=1)
ax.set_xticks(range(len(df.columns)))
ax.set_xticklabels(df.columns)
ax.set_xlim(-0.5, len(df.columns) - 0.5)
ax.set_ylim(-0.5, 2.5)
for idx in df.index:
ax.plot(
df_x_jitter.loc[idx, ['"Go Cue" duration', '"Left Target" duration']],
df.loc[idx, ['"Go Cue" duration', '"Left Target" duration']],
color="grey",
linewidth=0.5,
alpha=0.75,
linestyle="--",
zorder=-1,
)
ax.set(ylabel="duration (s)", title=f"{subject.upper()}: Duration of Events")
return ax
def plot_event_onsets(bids_path, jitter=0.025, ax=None, random_state=None):
"""
docstring
"""
rng = check_random_state(random_state)
subject = bids_path.subject
behav, events = get_trial_info_pd(bids_path)
if not isinstance(behav, pd.DataFrame):
behav = pd.DataFrame(behav)
if not isinstance(events, pd.DataFrame):
events = pd.DataFrame(events)
if ax is None:
ax = plt.gca()
## Convert columns to numeric dtype
events.onset = pd.to_numeric(events.onset)
behav[["successful_trial_flag", "force_magnitude"]] = behav[
["successful_trial_flag", "force_magnitude"]
].apply(pd.to_numeric)
## Get onsets for relevant events
left_target_inds = events.index[events.trial_type == "Left Target"]
go_cue_onset = events.onset.iloc[left_target_inds - 1]
go_cue_onset.index = np.arange(len(go_cue_onset))
left_target_onset = events.onset.iloc[left_target_inds]
left_target_onset.index = np.arange(len(left_target_onset))
hit_target_onset = events.onset.iloc[left_target_inds + 1]
hit_target_onset.index = np.arange(len(hit_target_onset))
## Remove unsuccessful and perturbed trials
successful_trials = behav[behav.successful_trial_flag == 1]
successful_trials.index = go_cue_onset.index
perturbed_trial_inds = successful_trials.force_magnitude > 0
go_cue_onset = go_cue_onset[~perturbed_trial_inds]
left_target_onset = left_target_onset[~perturbed_trial_inds]
hit_target_onset = hit_target_onset[~perturbed_trial_inds]
## Plot data in strip plot
df = pd.DataFrame(
{
'"Go Cue"': go_cue_onset - go_cue_onset,
'"Left Target"': left_target_onset - go_cue_onset,
'"Hit Target"': hit_target_onset - go_cue_onset,
}
)
jitter = 0.025
df_x_jitter = pd.DataFrame(
rng.normal(loc=0, scale=jitter, size=df.values.shape),
index=df.index,
columns=df.columns,
)
df_x_jitter += np.arange(len(df.columns))
for col in df:
ax.plot(df_x_jitter[col], df[col], "o", alpha=0.40, zorder=1, ms=8, mew=1)
ax.set_xticks(range(len(df.columns)))
ax.set_xticklabels(df.columns)
ax.set_xlim(-0.5, len(df.columns) - 0.5)
ax.set_ylim(-0.5, 4)
for idx in df.index:
ax.plot(
df_x_jitter.loc[idx, ['"Go Cue"', '"Left Target"', '"Hit Target"']],
df.loc[idx, ['"Go Cue"', '"Left Target"', '"Hit Target"']],
color="grey",
linewidth=0.5,
alpha=0.75,
linestyle="--",
zorder=-1,
)
ax.set(ylabel="duration (s)", title=f"{subject.upper()}: Duration of Events")
return ax
def plot_durations_by_label_raincloud(bids_path, ax=None):
if ax is None:
ax = plt.gca()
subject = bids_path.subject
# compute length of time window for go cue to hit target
go_cue_durations = get_event_durations(bids_path, event_key="Left Target", periods=-1)
left_target_durations = get_event_durations(bids_path, event_key="Left Target", periods=1)
total_durations = go_cue_durations + left_target_durations
labels = get_preprocessed_labels(bids_path, label_keyword="target_direction")
durations_df = pd.DataFrame(dict(durations=total_durations, labels=labels))
# plot rain clouds
pt.RainCloud(
x="labels", y="durations", palette=None, data=durations_df, orient="h", alpha=0.6, ax=ax
)
ax.set(
title=f"{subject.upper()}: 'Go Cue' to 'Hit Target' Durations",
xlabel="target direction",
ylabel="duration (s)",
yticklabels=["Down", "Right", "Up", "Left"]
)
return ax
def plot_durations_by_label_kde(bids_path, ax=None):
if ax is None:
ax = plt.gca()
subject = bids_path.subject
# compute length of time window for go cue to hit target
go_cue_durations = get_event_durations(bids_path, event_key="Left Target", periods=-1)
left_target_durations = get_event_durations(bids_path, event_key="Left Target", periods=1)
total_durations = go_cue_durations + left_target_durations
labels = get_preprocessed_labels(bids_path, label_keyword="target_direction")
durations_df = pd.DataFrame(dict(durations=total_durations, labels=labels))
# plot kde plots
for label, group in durations_df.groupby('labels'):
sns.distplot(
group.durations,
hist=False, hist_kws=dict(alpha=0.3),
kde=True, kde_kws=dict(fill=True, palette='crest'),
label=label_names[label],
ax=ax
)
ax.legend()
ax.set(
title=f"{subject.upper()}: 'Go Cue' to 'Hit Target' Durations",
xlabel="duration (s)",
)
def plot_durations_cv_split(bids_path, cv, ax=None):
subject = bids_path.subject
if ax is None:
ax = plt.gca()
# compute length of time window for go cue to hit target
go_cue_durations = get_event_durations(bids_path, event_key="Left Target", periods=-1)
left_target_durations = get_event_durations(bids_path, event_key="Left Target", periods=1)
total_durations = go_cue_durations + left_target_durations
epochs, labels = get_event_data(bids_path)
epochs_data = epochs.get_data()
ntrials, nchs, nsteps = epochs_data.shape
X = epochs_data.reshape(ntrials, -1)
y = labels
# Get train and test indices for the first fold only
*inds, = cv.split(X, y)
train, test = inds[0]
is_test = [1 if i in test else 0 for i in range(ntrials)]
durations_df = pd.DataFrame(dict(durations=total_durations, labels=labels, is_test=is_test))
# plot rain clouds
pt.RainCloud(
x="labels", y="durations", hue="is_test", palette=None, data=durations_df, orient="h", alpha=0.6, ax=ax
)
ax.set(
title=f"{subject.upper()}: 'Go Cue' to 'Hit Target' Durations",
xlabel="target direction",
ylabel="duration (s)",
yticklabels=["Down", "Right", "Up", "Left"]
)
return ax
def fit_classifiers_cv_time_window(bids_path, cv, metrics, time_window_method, return_data=False, random_state=None):
"""docstring."""
#TODO: Optimize implementation of this function.
if time_window_method not in ['trial_specific', 'patient_specific']:
raise ValueError("time_window_method should be one of 'trial_specific' or 'patient_specific'")
subject = bids_path.subject
if time_window_method == 'trial_specific':
go_cue_durations = get_event_durations(bids_path, event_key="Left Target", periods=-1)
left_target_durations = get_event_durations(bids_path, event_key="Left Target", periods=1)
tmin = -max(go_cue_durations)
tmax = max(left_target_durations)
elif time_window_method == 'patient_specific':
go_cue_durations = get_event_durations(bids_path, event_key="Left Target", periods=-1)
left_target_durations = get_event_durations(bids_path, event_key="Left Target", periods=1)
epochs, labels = get_event_data(bids_path, tmin=tmin-0.2, tmax=tmax+0.2)
epochs_data = epochs.get_data()
ntrials, nchs, nsteps = epochs_data.shape
print(f"{subject.upper()}: epochs_data.shape = ({epochs_data.shape})")
t = epochs.times
mask = (t >= -np.asarray(go_cue_durations)[:, None, None]) \
& (t <= np.asarray(left_target_durations)[:, None, None])
masked_data = epochs_data * mask
X = masked_data.reshape(ntrials, -1)
y = labels
image_height = nchs
image_width = nsteps
clf_scores = fit_classifiers_cv(X, y, image_height, image_width, cv, metrics, random_state=random_state)
if return_data:
return clf_scores, masked_data, labels
return clf_scores | adam2392/motor-decoding | mtsmorf/move_exp/functions/time_window_selection_functions.py | time_window_selection_functions.py | py | 13,810 | python | en | code | 0 | github-code | 13 |
12269716908 | import sys
sys.stdin = open("글자수_input.txt", "r")
T = int(input())
for test_case in range(1, T + 1):
inx = list(input())
inx = list(set(inx))
table = input()
out = 0
for c in inx:
temp=0
for text in table:
if text==c:
temp+=1
if temp>out:
out = temp
print(f"#{test_case} {out}")
| ksinuk/python_open | my_pychram/6 string course/글자수.py | 글자수.py | py | 379 | python | en | code | 0 | github-code | 13 |
6364104441 | """
Ce programme est régi par la licence CeCILL soumise au droit français et
respectant les principes de diffusion des logiciels libres. Vous pouvez
utiliser, modifier et/ou redistribuer ce programme sous les conditions
de la licence CeCILL diffusée sur le site "http://www.cecill.info".
"""
import discord
from discord.ext import commands
from utils import Gunibot, MyContext
from core import setup_logger
class Welcome(commands.Cog):
def __init__(self, bot: Gunibot):
self.bot = bot
self.logger = setup_logger('welcome')
self.config_options = ["welcome_roles"]
bot.get_command("config").add_command(self.config_welcome_roles)
@commands.command(name="welcome_roles")
async def config_welcome_roles(
self, ctx: MyContext, roles: commands.Greedy[discord.Role]
):
if len(roles) == 0:
roles = None
else:
roles = [role.id for role in roles]
await ctx.send(
await self.bot.sconfig.edit_config(ctx.guild.id, "welcome_roles", roles)
)
async def give_welcome_roles(self, member: discord.Member):
config = self.bot.server_configs[member.guild.id]
roles_id = config["welcome_roles"]
if not roles_id: # if nothing has been setup
return
roles = [member.guild.get_role(x) for x in roles_id]
pos = member.guild.me.top_role.position
roles = filter(lambda x: (x is not None) and (x.position < pos), roles)
await member.add_roles(*roles, reason="New members roles")
@commands.Cog.listener()
async def on_member_join(self, member: discord.Member):
"""Called when a member joins a guild"""
if not member.guild.me.guild_permissions.manage_roles: # if not allowed to manage roles
self.logger.info(
'Module - Welcome: Missing "manage_roles" permission'\
'on guild "%s"',
member.guild.name,
)
return
if "MEMBER_VERIFICATION_GATE_ENABLED" not in member.guild.features:
# we give new members roles if the verification gate is disabled
await self.give_welcome_roles(member)
@commands.Cog.listener()
async def on_member_update(self, before: discord.Member, after: discord.Member):
"""Main function called when a member got verified in a community server"""
if before.pending and not after.pending:
if "MEMBER_VERIFICATION_GATE_ENABLED" in after.guild.features:
await self.give_welcome_roles(after)
async def setup(bot:Gunibot=None):
if bot is not None:
await bot.add_cog(Welcome(bot), icon="👋")
| Curiosity-org/Gipsy | plugins/welcome/welcome.py | welcome.py | py | 2,690 | python | en | code | 13 | github-code | 13 |
30765949865 | import os
import albumentations as A
import hydra
import pytorch_lightning as pl
from albumentations.pytorch.transforms import ToTensorV2
from torch.utils.data import DataLoader, random_split
from datasets import UNETDataset
from utils import get_data
class UNETDataModule(pl.LightningDataModule):
def __init__(self, config):
super(UNETDataModule, self).__init__()
self.project_root = hydra.utils.get_original_cwd() + "/"
self.config = config
dim = config.data.lung_mask_dim
self.transforms = A.Compose(
[
A.Resize(height=dim, width=dim, always_apply=True),
A.Rotate(limit=35, p=1.0),
A.HorizontalFlip(p=0.5),
A.Normalize(
mean=[0.0, 0.0, 0.0],
std=[1.0, 1.0, 1.0],
max_pixel_value=255.0,
),
ToTensorV2(),
],
)
self.cxr_dir = self.project_root + config.data.cxr_dir
self.mask_dir = self.project_root + config.data.mask_dir
self.bs = config.data.lm_batch_size
def prepare_data(self):
if not os.path.exists(self.project_root + self.config.data.lung_mask_raw_dir):
get_data(
cxr_dir=self.cxr_dir,
mask_dir=self.mask_dir,
data_dir=self.project_root + "data/",
raw_image_dir=self.project_root + self.config.data.lung_mask_raw_dir,
)
def setup(self, stage=None):
dataset = UNETDataset(
cxr_dir=self.cxr_dir, mask_dir=self.mask_dir, transform=self.transforms
)
train_samples = int(len(dataset) * 0.8)
self.train_data, self.val_data = random_split(
dataset, [train_samples, len(dataset) - train_samples]
)
def train_dataloader(self):
return DataLoader(
self.train_data,
batch_size=self.bs,
shuffle=True,
pin_memory=True,
num_workers=os.cpu_count(),
)
def val_dataloader(self):
return DataLoader(
self.val_data,
batch_size=self.bs,
pin_memory=True,
num_workers=os.cpu_count(),
)
| mrdvince/dltb_hpu | src/datamodules.py | datamodules.py | py | 2,250 | python | en | code | 0 | github-code | 13 |
42173365530 | from django.shortcuts import render, redirect, HttpResponse
from django.core.files.storage import FileSystemStorage
from django.http import JsonResponse
from django.views.generic.base import View
from rest_framework.views import APIView
from main.models import Product, Photo_product
from django.core import serializers
from djmoney.money import Money
import json
class MainPage(View):
def get(self, request):
data = request.GET.get("info")
if(data):
return render(request, 'base.html', context={'error_reg': data})
return render(request, 'main/index.html')
class ProductPage(APIView):
def get(self, request, pk):
product = Product.objects.get(id=pk)
photos = Photo_product.objects.filter(product=product)
photo = photos[0]
array = []
for photo in photos:
array.append(photo)
array.pop()
data = {
'product' : product,
'photo' : photo,
'photos' : array,
'params_json': json.dumps(product.parametrs)
}
return render(request, 'main/product.html', context=data)
class SelectProducts(APIView):
def get(self, request):
values = request.GET
type = values.get('type')
data = []
if (type == "all"):
products = Product.objects.all()
else :
products = Product.objects.filter(type=type)
for product in products:
photos = Photo_product.objects.filter(product=product)
data_photo = []
for photo in photos:
data_photo.append(photo.photo.name)
info = {
'product': json.loads(serializers.serialize('json', [product])),
'photos': data_photo
}
data.append(info)
return JsonResponse(data, safe=False)
class GetCountProducts(APIView):
def get(self, request):
values = request.GET
type = values.get('type')
if (type == "all"):
products = Product.objects.all().count()
else :
products = Product.objects.filter(type=type).count()
return JsonResponse(products, safe=False)
class CreateProduct(APIView):
def post(self, request):
if request.method == 'POST' and request.FILES['file']:
data = request.POST
image = request.FILES['file']
filename = ''
product = Product(
title=data.get('title'),
description=data.get('description'),
price=Money(float(parseToMoney(data.get('price'))), 'RUB'),
type=data.get('type'),
count=int(data.get('count')),
articul=data.get('articul')
)
product.save()
fss = FileSystemStorage(location='media/product_photos/')
file = fss.save(image.name, image)
fileTaskModel = Photo_product(product = product, photo=file)
fileTaskModel.save()
data = []
info = {
'product': json.loads(serializers.serialize('json', [product])),
'photos': [fileTaskModel.photo.name]
}
data.append(info)
return JsonResponse(data, safe=False)
class DeleteProduct(APIView):
def get(self, request, pk):
product = Product.objects.get(id=pk)
product.delete()
return redirect('/')
class EditProduct(APIView):
def post(self, request):
if request.method == 'POST':
data = request.POST
product = Product.objects.get(id=int(data.get('id')))
product.title = data.get('title')
product.description = data.get('description')
product.price = Money(float(parseToMoney(data.get('price'))), 'RUB')
product.type = data.get('type')
product.count = int(data.get('count'))
product.articul = data.get('articul')
product.parametrs = json.loads(data.get('parametrs'))
product.save()
return JsonResponse("ok", safe=False)
class AddPhoto(APIView):
def post(self, request):
if request.method == 'POST' and request.FILES['file']:
data = request.POST
filename = ''
image = request.FILES['file']
fss = FileSystemStorage(location='media/product_photos/')
file = fss.save(image.name, image)
id_product = request.POST.get('id')
product = Product.objects.get(id = id_product)
fileTaskModel = Photo_product(product = product, photo = file)
fileTaskModel.save()
return JsonResponse("ok", safe=False)
def parseToMoney(value):
isDecimal = False
money = ''
for i in value:
if i == '1' or i == '2' or i == '3' or i == '4' or i == '5' or i == '6'or i == '7' or i == '8' or i == '9' or i == '0':
money += i
if i == ',':
isDecimal = True
if isDecimal:
return money[:-2]
else:
return money | YannGotti/kyrsovaya-mironov-django | project/main/views.py | views.py | py | 5,206 | python | en | code | 0 | github-code | 13 |
21253897266 |
# Bottom up
class Solution:
def isInterleave(self, s1: str, s2: str, s3: str) -> bool:
n = len(s1)
m = len(s2)
if n + m != len(s3):
return False
dp = [[False for _ in range(m+1)] for _ in range(n+1)]
dp[n][m] = True
for i in range(n, -1, -1):
for j in range(m, -1, -1):
if i < n and s1[i] == s3[i + j] and dp[i+1][j]:
dp[i][j] = True
if j < n and s2[j] == s3[i+j] and dp[i][j + 1]:
dp[i][j] = True
return dp[0][0]
# Top down
class Solution2:
def isInterleave(self, s1: str, s2: str, s3: str) -> bool:
cache = {}
n = len(s1)
m = len(s2)
#k = i + j
def dfs(i, j, k):
if i >= n and j >= m:
return True
# if k >= len(s3):
# return False
if (i, j) in cache:
return cache[(i, j)]
if i < n and s1[i] == s3[i + j] and dfs(i+1, j):
return True
if j < n and s2[j] == s3[i+j] and dfs(i, j + 1):
return True
cache[(i, j)] = False
return False
return dfs(0, 0, 0)
print(Solution().isInterleave('aabcc', 'dbbca', 'aadbbbcbcac'))
| sundar91/dsa | DP/interleaving-strings.py | interleaving-strings.py | py | 1,308 | python | en | code | 0 | github-code | 13 |
71136359379 | from django.urls import path
from chats.views import *
urlpatterns = [
path('chats_list/<int:user_id>/', ChatList.as_view(), name='chats_list'),
path('chats_detail/<int:id>/', ChatEditDeleteUpdate.as_view(), name='chat_detail'),
path('create_chat/', ChatListCreate.as_view(), name = 'create_chat'),
path('messages_list/<int:chat_id>/', MessageListCreate.as_view(), name='create_message'),
path('edit_message/<int:id>/', MessageEditDeleteUpdate.as_view(), name='edit_message'),
path('add_member/', ChatMemberCreateList.as_view(), name='add_member'),
path('delete_member/<int:chat>/<int:user>/', ChatMemberDelete.as_view(), name='delete_member'),
] | Klorestz/Backend-dev-VK | project/messenger/chats/urls.py | urls.py | py | 688 | python | en | code | 0 | github-code | 13 |
19965782785 | """foreign key
Revision ID: c5d41f501eae
Revises: 37cf6cf9e9e1
Create Date: 2020-08-22 17:05:46.585115
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'c5d41f501eae'
down_revision = '37cf6cf9e9e1'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('calendar',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('start_date', sa.DateTime(), nullable=True),
sa.Column('end_date', sa.DateTime(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('to do',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('description', sa.String(length=128), nullable=True),
sa.Column('week_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['week_id'], ['week.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.drop_table('semester_calendar')
op.drop_table('to_do_item')
op.add_column('week', sa.Column('calendar_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'week', 'calendar', ['calendar_id'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'week', type_='foreignkey')
op.drop_column('week', 'calendar_id')
op.create_table('to_do_item',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('description', sa.VARCHAR(length=128), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('semester_calendar',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('start_date', sa.DATETIME(), nullable=True),
sa.Column('end_date', sa.DATETIME(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.drop_table('to do')
op.drop_table('calendar')
# ### end Alembic commands ###
| R0YLUO/Semester-Calendar | migrations/versions/c5d41f501eae_foreign_key.py | c5d41f501eae_foreign_key.py | py | 2,011 | python | en | code | 0 | github-code | 13 |
2082665470 |
from solver import *
def solve_system(m, input_i, output_i, state_i, nstate_i, input_n, output_n, state_n):
"""
Solve a system with data gathered in the matrix m, and the input / output
/ state names / indicies in that matrix specified.
The _n name arrays are contain the names of the given col indicies into
the matrix given in the _i arrays. For example, if my inputs, called A and
B are in cols 1 and 3, then I would pass:
input_i = [0,2]
input_n = ['A', 'B']
state_i and nstate_i are the state and new state column sets. They must
have the same length, as every state must have the next state given for
every row. Both of these use the names given in state_n.
Results are returned in a list of tuples:
(result name: string, result expression: string)
Example:
column | quanitity
-----------------------
0 | input `a`
1 | state `i`
2 | state `j`
3 | Unused
4 | output `x`
5 | output `y`
6 | nextstate `i`
7 | nextstate `j`
Construct matrix:
>>> M = Matrix(16, 8)
>>> M.bin_set_row(0 , 0b0001110, 7)
>>> M.bin_set_row(1 , 0b1000111, 7)
>>> M.bin_set_row(2 , 0b0011110, 7)
>>> M.bin_set_row(3 , 0b1011111, 7)
>>> M.bin_set_row(4 , 0b0100110, 7)
>>> M.bin_set_row(5 , 0b1100111, 7)
>>> M.bin_set_row(6 , 0b0110010, 7)
>>> M.bin_set_row(7 , 0b1111010, 7)
Run solver:
>>> result_list = solve_system(M, [0], [4,5], [1,2], [6,7], ['a'], ['x', 'y'], ['i','j'])
Result for the output variables x and y in cols 4 and 5:
>>> result_list[4]
('x', "i' + j'")
>>> result_list[5]
('y', '1')
"""
# total cols in the matrix to solve
ncols = len(input_i)+len(output_i)+len(state_i)*3
# the new matrix to populate
new_m = Matrix(m.get_num_rows(), ncols, 0)
# transcribe the rows of the data matrix into the new matrix
for row_i in range(m.get_num_rows()):
row = m.get_row(row_i)
newrow = []
# row format:
# [ input | curstate | output | nextstate JK ]
# first, the inputs
for in_i in input_i:
newrow.append(row[in_i])
# then the current state
for st_i in state_i:
newrow.append(row[st_i])
# then the outputs
for out_i in output_i:
newrow.append(row[out_i])
# then the next state J and K values (implement state transitions
# as if using JKFF to implement state)
for ns_i in range(len(nstate_i)):
a = row[state_i[ns_i]]
b = row[nstate_i[ns_i]]
if a == 0 and b == 1:
# 0->1, JK = 1X
newrow.append(1)
newrow.append(EITHER)
elif a == 0 and b == 0:
# 0->0, JK = 0X
newrow.append(0)
newrow.append(EITHER)
elif a == 1 and b == 0:
# 1->0, JK = X1
newrow.append(EITHER)
newrow.append(1)
else:
# 1->1, JK = X0
newrow.append(EITHER)
newrow.append(0)
# and add the row
for i,v in enumerate(newrow):
new_m.set_val(row_i, i, v)
# now, we need to gather the results for each output in terms of a set of
# input and current state.
result_list = [] # [ (name,simplified expression) ]
# get arguments for simplifier, and formatting code
all_input_index_list = []
all_input_name_list = []
# populate those lists
for i in range(len(input_i)):
all_input_name_list.append(input_n[i])
all_input_index_list.append(i)
for i in range(len(state_i)):
all_input_name_list.append(state_n[i])
all_input_index_list.append(len(input_i) + i)
# get the next-state JK inputs in terms of the inputs
for st_i in range(len(state_i)):
# J input
implicant_list = solve_and_or(new_m,
len(input_i)+len(state_i)+len(output_i)\
+2*st_i,
all_input_index_list)
ast = imp_list_to_ast(implicant_list)
ast = sort_and_flatten_ast(simplify_ast(ast))
result = ast_to_string(ast, all_input_name_list)
result_list.append((state_n[st_i]+'_J', result))
# K input
implicant_list = solve_and_or(new_m,
len(input_i)+len(state_i)+len(output_i)\
+2*st_i+1,
all_input_index_list)
ast = imp_list_to_ast(implicant_list)
ast = sort_and_flatten_ast(simplify_ast(ast))
result = ast_to_string(ast, all_input_name_list)
result_list.append((state_n[st_i]+'_K', result))
# get the outputs in terms of the inputs
for out_i in range(len(output_i)):
implicant_list = solve_and_or(new_m,
len(input_i)+len(state_i)+out_i,
all_input_index_list)
ast = imp_list_to_ast(implicant_list)
ast = sort_and_flatten_ast(simplify_ast(ast))
result = ast_to_string(ast, all_input_name_list)
result_list.append((output_n[out_i], result))
# and return that result list
return result_list
# doctest footer
if __name__ == "__main__":
import doctest
doctest.testmod()
| Mobius5150/C115_Logic_Analyzer | solve.py | solve.py | py | 4,810 | python | en | code | 0 | github-code | 13 |
27216045548 | '''
这个文件主要封装了一些常用的函数
'''
import nltk
from nltk import word_tokenize
from textblob import TextBlob
import textblob
from nltk.stem import WordNetLemmatizer
from bs4 import BeautifulSoup
from textblob.tokenizers import SentenceTokenizer as sent_tok
from textblob.tokenizers import WordTokenizer as word_tok
from .read_conf import config
from .english import normalize
stopwords = open(config("../../conf/cn_data.conf")["stop_pos"])
stopwords = stopwords.readlines()
stopwords = [i.strip() for i in stopwords]
#定义所有NLP的方法
class NLP:
def __init__(self):
self.__np_extractor = textblob.en.np_extractors.ConllExtractor()
self.__wnl = WordNetLemmatizer()
self.__st = sent_tok()
self.__wt = word_tok()
self.__stopwords = set(stopwords)
#用blob进行标注
def blob_tags(self,sentence):
blob = TextBlob(sentence)
return blob.tags
#用nltk进行标注
def nltk_tags(self,sentence):
tk = word_tokenize(sentence)
return nltk.tag.pos_tag(tk)
#将文本归一化,这个用的是conceptNet自带的归一化工具
def norm_text(self,text):
return normalize(text)
#去html的tag
def remove_tag(self,sentence):
sentence = BeautifulSoup(sentence).get_text()
sentence = sentence.split()
sentence = ' '.join(sentence)
return sentence
#分句
def sent_tokenize(self,sents):
result = self.__st.tokenize(sents)
return result
def word_tokenize(self,sent):
return self.__wt.tokenize(sent)
def bigrams(self,sent_tok):
return nltk.bigrams(sent_tok)
def trigrams(self,sent_tok):
return nltk.bigrams(sent_tok)
#判断pos是不是名词,即以N或n开头
def tag_is_noun(self,tag):
if tag.startswith('N'):
return True
return False
#判断pos是不是动词
def tag_is_verb(self,tag):
if tag.startswith('V'):
return True
return False
def is_stopwords(self,word):
return word in self.__stopwords
def sentence_length(self,sent):
words = self.word_tokenize(sent)
count = 0
for word in words:
if len(word) >= 2:
count += 1
return count
def sentence_length_exclude_stop(self,sent):
words = self.word_tokenize(sent)
count = 0
for word in words:
if len(word) >= 2 and self.is_stopwords(word)==False:
count += 1
return count
import re
#句子过滤器
#这个主要是论文中压缩句子的方法
#Lu Wang. et al 2013 ACL中给出的七条规则
#rule1 : 去掉报头,(新闻语料特有),如[MOSCOW,October 19(XINHUA)] - 等
#rule2 : 去掉相对日期,如星期二
#rule3 : 去掉句子中间的一些插入语,如XXX, zhaoximo said,XXXX
#rule4 : 去掉领头的副词、形容词等,如Interesting, XXXX
#rule5 : 去掉名词的同位语(这个不好做)
#rule6 : 去掉一些由形容词或动名词等领导的从句,如Starting in 1990....
#rule7 : 去掉括号内的内容
from itertools import product
class rule_based_sentence_cleaner:
def __init__(self):
self.nlp = NLP()
def clean_head(self,sent,head_symbol):
if head_symbol in sent:
sp = sent.split(head_symbol)
sent = ' '.join(sp[1:])
return sent
def clean(self,sent):
#先执行rule1 和rule7
sent = sent.replace("``","")
sent = sent.replace("\'\'","")
if len(sent) < 5:
sent = ""
return sent
#rule7 去掉括号内的内容
regex = r"\(.+\)"
replacement = " "
sent = re.sub(regex,replacement,sent)
#rule1 去掉-- 和 _ 之前的内容
sent = self.clean_head(sent,"--")
sent = self.clean_head(sent,"_")
sent = self.clean_head(sent,":")
sent = sent.strip()
if len(sent) < 5:
sent = ""
return sent
if sent[-1] != "." and sent[-1] != "?":
sent += "."
return sent
#传说中的内存存储
class mem_cache:
def __init__(self,name):
self.data = {}
self.hit = 0
self.total = 0
self.name = name
def add(self,key,value):
self.data[key] = value
def has(self,key,display = False):
self.total += 1
if key in self.data:
self.hit += 1
if display:
if self.hit % 100 == 0:
print("cache名",self.name,"命中率",self.hit/self.total,"总个数",self.total,"命中个数",self.hit)
return self.data[key]
return None
| lavizhao/insummer | code/insummer/util.py | util.py | py | 4,831 | python | en | code | 7 | github-code | 13 |
9400355778 | import click
from sqlalchemy import select
from rich.console import Console
# ------------
# Custom Modules
from .models import (
Vehicle,
FuelRecord,
select_vehicle_by_id,
select_vehicle_by_name,
)
from .common import is_int
# -------------
console = Console()
date_format_strings = [
"%Y-%m-%d",
"%d/%m/%y",
"%m/%d/%y",
"%d/%m/%Y",
"%m/%d/%Y",
]
@click.group("fuel")
@click.pass_context
def fuel(*args, **kwargs):
"""
Manage vehicle fuel records.
"""
pass
def fuel_add_usage(vid, session):
"""
Display how to use `$ ft fuel add` with examples from the
database. The user should be able to copy/paste one of the commands
directly.
If vid is not None, we'll assume it is invalid.
"""
console.print()
if vid is None:
console.print('No Arguments :frowning:!')
else:
console.print(
f"{vid} does not resolve to a vehicle!",
style="red",
)
console.print()
console.print('The command can be run with [white]vehicle names[/white]:', style='cyan')
console.print()
show_command = '$ ft fuel add'
result = session.execute(select(Vehicle))
vehicles = [(v.vehicle_id, v.name) for v in result.scalars().all()]
for _, name in vehicles:
console.print(f'[red]{show_command}[/red] [white]{name}[/white]')
console.print()
console.print('Or with vehicle [white]IDs[/white]:', style='cyan')
console.print()
for vid, _ in vehicles:
console.print(f'[red]{show_command}[/red] [white]{vid}[/white]')
console.print()
console.print('use `--help` to see more detailed information.')
console.print()
@fuel.command("add")
@click.pass_context
@click.argument(
"vehicle",
type=str,
required=False,
)
@click.option(
"--date",
type=click.DateTime(formats=date_format_strings),
prompt=False,
help=(
"The date fuel was added to the vehicle. "
"Support 5 major date formats in the following order: "
"Y-m-d, d/m/Y, d/m/y, m/d/Y, m/d/y (first match is taken)"
),
)
@click.option(
"--fuel",
type=float,
prompt=False,
help="The amount of fuel added to the vehicle.",
)
@click.option(
"--mileage",
type=float,
prompt=False,
help="The mileage since the last fill up.",
)
@click.option(
"--cost",
type=float,
prompt=False,
help="The full cost of the fuel.",
)
@click.option(
"--partial",
type=bool,
prompt=False,
help=(
"Was this a partial fill up. "
"Optional - you will not be prompted and have "
"to set the switch."
),
)
@click.option(
"--comment",
type=str,
prompt=False,
help=(
"A comment about this fuel record. "
"Optional - you will not be prompted and "
"have to set the switch."
),
)
def add(*args, **kwargs):
"""
Add a new fuel record to the database. You can add a fuel record by
vehicle name:
$ ft fuel add passat
or by vehicle id:
$ ft fuel add 4
If you do not specify the switches, you will be prompted for the
information automatically.
If you don't remember the name or id of the vehicle, execute:
$ ft fuel add
And it will display a list of valid options.
\b
NOTE: The date format can be one of the following:
1. `%Y-%m-%d` - year-month-day 2021-08-12
2. `%d/%m/%y` - day/month/year 12/08/21
3. `%m/%d/%y` - month/day/year 08/12/21
4. `%d/%m/%Y` - day/month/year 12/08/2021
5. `%m/%d/%Y` - month/day/year 08/12/2021
NOTE: The first format to produce a correct date is used. The date
is matched against the list in the order specified above. For
example, `02/03/2021` can match 2 or 3 but will match 2 first.
Beware. It is best to use the ISO 8601 representation.
NOTE: If you use any of the date formats that have a `/`in them you
will have to use quote marks when using them directly in switches.
"""
ctx = args[0]
config = ctx.obj["config"]
vid = kwargs["vehicle"]
if vid is None:
with config["db"].begin() as session:
fuel_add_usage(vid, session)
ctx.exit()
with config["db"].begin() as session:
# do we have an integer or a string?
if is_int(vid):
statement = select_vehicle_by_id(vid)
else:
statement = select_vehicle_by_name(vid)
# NOTE: select(Vehicle) returns the SQL statement that must be
# executed against the engine.
selected_vehicle = session.execute(statement).first()
if selected_vehicle is None:
fuel_add_usage(vid, session)
ctx.exit()
selected_vehicle = selected_vehicle[0]
# Now that we have a valid vehicle, let's make sure we have
# valid data.
data = {}
data["date"] = (
click.prompt(
"Date",
type=click.DateTime(formats=date_format_strings),
)
if kwargs["date"] is None
else kwargs["date"]
)
for key in ("fuel", "mileage", "cost"):
data[key] = (
click.prompt(
f"{key.title()}",
type=float,
)
if kwargs[key] is None
else kwargs[key]
)
data["partial"] = kwargs["partial"]
data["comment"] = kwargs["comment"]
data["fill_date"] = data.pop("date")
# plot the records and ask for confirmation to proceed:
console.print()
console.print(f"{selected_vehicle.vehicle_id} - {selected_vehicle.name}")
console.print(f'Date = {data["fill_date"]}')
console.print(f'Fuel = {data["fuel"]}')
console.print(f'Mileage = {data["mileage"]}')
console.print(f'Cost = {data["cost"]}')
console.print(f'Partial = {data["partial"]}')
console.print(f'Comment = {data["comment"]}')
if click.confirm("Is the Fuel Record Correct?", abort=True, default=True):
selected_vehicle.fuel_records.append(FuelRecord(**data))
session.flush()
console.print(
f"{len(selected_vehicle.fuel_records)} "
"Fuel Records associated with the vehicle."
)
# ft fuel show passat --records=10 <- default
# ft fuel show passat --records="all"
# - show the fuel records for the vehicle sorted in descending order
# - show the fuel record ids + stats (last program) probably need to use a dataframe
# - can specify --excel, --ods, --csv to write the report. See bulk export for details
# ft fuel delete 45
# - delete the fuel record with id=45
# - would use the show command to determine that
# ft fuel edit 45 --cost=45.67
# - same switches as the add command
# - will not prompt, any value that is not specified is not changed
# - add values to a dictionary and create a FuelRecord object and commit that to the database
# - verify that it does get updated...
# - NOTE: should be able to change the vehicle_id foreign key as well.
| TroyWilliams3687/fuel_tracker | src/fuel_tracker/command_fuel.py | command_fuel.py | py | 7,165 | python | en | code | 0 | github-code | 13 |
22224201874 | import json
import os
import pprint
import sys
from core.content import get_Data
from concurrent.futures import ThreadPoolExecutor
from core import login_hodj
threa = ThreadPoolExecutor(max_workers=32)
sys.path.append(os.getcwd())
def get_list(data):
# param = {"no": "dy0002", "data": {"days": 1, "rankType": 5, "liveDay": f"2022-{months.zfill(2)}-{day.zfill(2)}"}}
param = {"no": "dy3026", "data": {"ankType": "音乐原创榜","rankDay": f"{data}"}}
aa = get_Data(param)
pprint.pp(aa)
with open(f'static/json_file_dir/{param.get("no")}_{param.get("data").get("liveDay")}.json', 'a+',
encoding='utf-8') as f:
f.write(json.dumps(aa))
f.close()
if __name__ == '__main__':
# months = input("输入你想要的月份")
# day = input("输入你要的天数")
data_time = []
login_hodj.get_log()
for i in range(1, 13):
if i == 2:
for j in range(1, 28 + 1):
data_time.append(f"2021-{str(i).zfill(2)}-{str(j).zfill(2)}")
elif i in [1, 3, 5, 7, 8, 10, 12]:
for j in range(1, 32):
data_time.append(f"2021-{str(i).zfill(2)}-{str(j).zfill(2)}")
elif i in [4, 6, 9, 11]:
for j in range(1, 31):
data_time.append(f"2021-{str(i).zfill(2)}-{str(j).zfill(2)}")
threa.map(get_list, data_time)
| qifiqi/codebase | python_codebase/爬虫/红人点集/main.py | main.py | py | 1,367 | python | en | code | 3 | github-code | 13 |
10590941797 | import socket
SOCKET = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# First param: Family - Address family - assign type of address the socket can communicate with
# AF_INET - IPV4
# AF_INET6 - IPV6
# AF_UNIX - used for unix domian socket
# Second param : Type
# SOCK_DGRAM - specifies user datagram protocol (UDP)
# SOCK_STREAM - Uses transmission control protocol (TCP)
HOST = '127.0.0.1'
PORT = 4200
SOCKET.bind((HOST, PORT))
# socket obj.bind((ip, port))
print("Listening at {}".format(SOCKET.getsockname()))
while True:
data, clientAddress = SOCKET.recvfrom(65535)
# 65535 Maximum size for udp datagram
message = data.decode('ascii')
upperCase = message.upper()
print('The client at {} says {!r}'.format(clientAddress, message))
data = upperCase.encode('ascii')
SOCKET.sendto(data, clientAddress)
| shalvinpshaji/socket-programming | socket1.py | socket1.py | py | 821 | python | en | code | 1 | github-code | 13 |
8865758711 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 20 10:15:05 2018
@author: james
"""
#%% Preamble
import os
import yaml
import numpy as np
import pandas as pd
import re
import matplotlib.pyplot as plt
from scipy import optimize
#%% Functions
def load_yaml_configfile(fname):
"""
load yaml config file
args:
:param fname: string, complete name of config file
out:
:return: config dictionary
"""
with open(fname, 'r') as ds:
try:
config = yaml.load(ds,Loader=yaml.FullLoader)
except yaml.YAMLError as exc:
print(exc)
return config
def import_aeronet_day_average (path,file_name,version,padding,datatype):
"""
Import Aeronet data (daily averages) to dataframe
args:
:param path: string, path where aeronet files are located
:param file_name: string, name of downloaded file, with extension
:param version: integer giving file version
:param padding: boolean to decide whether to pad values by interpolation
:param datatype: string to define whether it is AOD or SSA data
out:
:return dataframe: timeseries data imported from file
:return time_range: string defining time range of data
:return empty_days: list with days that contain no data
"""
if version == 3:
rows_preamble = 6
elif version == 2:
if datatype == "aod":
rows_preamble = 4
elif datatype == "ssa":
rows_preamble = 3
#1. read in data from file and create timestamp index
if version == 3 and datatype == "ssa":
dataframe = pd.read_csv(os.path.join(path,file_name),index_col=(1,2),header=0,skiprows=rows_preamble)
dataframe.index = pd.to_datetime(dataframe.index,format='(\'%d:%m:%Y\', \'%H:%M:%S\')')
dataframe = dataframe.drop('AERONET_Site',axis=1)
else:
dataframe = pd.read_csv(os.path.join(path,file_name),index_col=(0,1),header=0,skiprows=rows_preamble)
dataframe.index = pd.to_datetime(dataframe.index,format='(\'%d:%m:%Y\', \'%H:%M:%S\')')
dataframe.index.name = 'Timestamp (UTC)'
#2. Drop columns we don't need
if datatype == "aod":
if version == 2:
dataframe = dataframe.filter(regex='^AOT_|Angstrom$', axis=1)
elif version == 3:
dataframe = dataframe.filter(regex='^AOD_(?!Empty.*)|Angstrom_Exponent$', axis=1)
elif datatype == "ssa":
if version == 2:
dataframe = dataframe.filter(regex='^SSA', axis=1)
elif version == 3:
dataframe = dataframe.filter(regex='^Single_Scattering_Albedo', axis=1)
#Get rid of negative AOD and nans
dataframe = dataframe[dataframe>0]
dataframe.dropna(axis='columns',how='all',inplace=True)
#3. Extend file to include all values in the range (interpolate)
empty_days = []
if padding:
if version == 2:
timestring = ' 00:00:00'
elif version == 3:
timestring = ' 12:00:00'
#Get time range from file name
if len(file_name.split('_')[0]) == 6:
time_range = ['20' + date[0:2] + '-' + date[2:4] + '-' + date[4:6]
for date in file_name[0:13].split('_')]
pad_index = pd.date_range(start=time_range[0] + timestring,
end=time_range[1] + timestring,freq='D',
name=dataframe.index.name)
elif len(file_name.split('_')[0]) == 8:
time_range = [date[0:4] + '-' + date[4:6] + '-' + date[6:8]
for date in file_name[0:17].split('_')]
pad_index = pd.date_range(start=time_range[0] + timestring,
end=time_range[1] + timestring,freq='D',
name=dataframe.index.name)
#Fill with Nans
dataframe = dataframe.reindex(pad_index)
#Get list of empty days
[empty_days.append(day.date().strftime('%Y-%m-%d'))
for day in dataframe.index if dataframe.loc[day].isna().all()]
#For AOD, interpolate
if datatype == 'aod':
dataframe = dataframe.interpolate('index')
time_range = time_range[0] + '_' + time_range[-1]
else:
time_range = dataframe.index[0].strftime(format="%Y-%m-%d") + '_' +\
dataframe.index[-1].strftime(format="%Y-%m-%d")
return dataframe, time_range, empty_days
def import_aeronet_all(path,stat_dict,version,timeres,padding,datatype):
"""
Import Aeronet data to dataframe
args:
:param path: string, path where aeronet files are located
:param stat_dict: dictionary with all information about aeronet station
:param version: integer giving file version
:param timeres: integer defining the time resolution for interpolation
:param padding: boolean to decide whether to pad values with average
:param datatype: string to define data type (AOD or SSA)
out:
:return dataframe: timeseries data imported from file
:return time_range: string defining time range of data
:return empty_days: list with days that contain no data
"""
if datatype == 'aod':
file_name = stat_dict['aod_files']['all']
elif datatype == 'ssa':
file_name = stat_dict['ssa_files']['all']
if version == 3:
rows_preamble = 6
elif version == 2:
if datatype == "aod":
rows_preamble = 4
elif datatype == "ssa":
rows_preamble = 3
#1. read in data from file and create timestamp index
if version == 3 and datatype == "ssa":
dataframe = pd.read_csv(os.path.join(path,file_name),index_col=(1,2),header=0,skiprows=rows_preamble)
dataframe.index = pd.to_datetime(dataframe.index,format='(\'%d:%m:%Y\', \'%H:%M:%S\')')
dataframe = dataframe.drop('Site',axis=1)
else:
dataframe = pd.read_csv(os.path.join(path,file_name),index_col=(0,1),header=0,skiprows=rows_preamble)
dataframe.index = pd.to_datetime(dataframe.index,format='(\'%d:%m:%Y\', \'%H:%M:%S\')')
dataframe.index.name = 'Timestamp (UTC)'
#2. Drop columns we don't need
if datatype == "aod":
if version == 2:
dataframe = dataframe.filter(regex='^AOT_|Angstrom$', axis=1)
df_day_ave = stat_dict['df_day'].filter(regex='^AOT_|Angstrom$', axis=1)
elif version == 3:
dataframe = dataframe.filter(regex='^AOD_(?!Empty.*)|Angstrom_Exponent$', axis=1)
df_day_ave = stat_dict['df_day'].filter(regex='^AOD_(?!Empty.*)|Angstrom_Exponent$', axis=1)
elif datatype == "ssa":
if version == 2:
dataframe = dataframe.filter(regex='^SSA', axis=1)
elif version == 3:
dataframe = dataframe.filter(regex='^Single_Scattering_Albedo', axis=1)
dataframe = dataframe[dataframe>0]
dataframe.dropna(axis='columns',how='all',inplace=True)
#3. Interpolate values (do it by day)
#Get time range from file name
if len(file_name.split('_')[0]) == 6:
time_range = ['20' + date[0:2] + '-' + date[2:4] + '-' + date[4:6]
for date in file_name[0:13].split('_')]
elif len(file_name.split('_')[0]) == 8:
time_range = [date[0:4] + '-' + date[4:6] + '-' + date[6:8]
for date in file_name[0:17].split('_')]
#Interpolate
time_res_string = str(timeres) + 'min'
#Choose a smaller grid for first step of interpolation
fine_res_string = str(np.ceil(timeres/5)) + 'min'
#Split data into days
dfs = [group[1] for group in dataframe.groupby(dataframe.index.date)]
days = pd.to_datetime([group[0] for group in dataframe.groupby(dataframe.index.date)])
df_rs = []
for ix, iday in enumerate(days):
#First interpolate onto finer grid, allow filling
newindex_fine = pd.date_range(start=iday, end=dfs[ix].index[-1],
freq=fine_res_string)
#Define index for whole day, this will simply then fill with NaNs
newindex_timeres = pd.date_range(start=iday, end=iday + pd.Timedelta('1D')
- pd.Timedelta(time_res_string), freq=time_res_string)
#Define interpolation limit, only up to an hour (60 minutes) of filling
#interp_limit = int(60*24/(float(fine_res_string[0:-1])))
df_new = dfs[ix].reindex(dfs[ix].index.union(newindex_fine)).\
interpolate('index').reindex(newindex_timeres)
#Fill NaNs with average of the daily values
if padding:
df_rs.append(df_new.fillna(df_new.mean()))
else:
df_rs.append(df_new)
# if datatype == 'aod':
# fig, ax = plt.subplots(figsize=(9,8))
#
# dfs[ix].loc[:,'AOD_500nm'].plot(ax=ax,style='*',legend=False)
# df_rs[ix].loc[:,'AOD_500nm'].plot(ax=ax,style='--',legend=False)
# ax.set_ylabel('AOD at 500nm')
# plt.savefig('aod_500nm_interp_' + iday.strftime('%Y-%m-%d') + '.png')
# plt.close(fig)
#Put all data together again
dataframe_rs = pd.concat(df_rs,axis=0)
#Define end_time (make sure still in the same day)
start_time = pd.to_datetime(time_range[0])
end_time = pd.to_datetime(time_range[1]) + pd.Timedelta('1D') -\
pd.Timedelta(minutes=timeres)
total_index = pd.date_range(start=start_time, end=end_time, freq=time_res_string)
if padding:
if datatype == 'aod':
dataframe_rs = dataframe_rs.reindex(total_index)
dfs_day = [group[1] for group in df_day_ave.groupby(df_day_ave.index.date)]
day_index = [group[1].index for group in dataframe_rs.groupby(dataframe_rs.index.date)]
dfs_day_full = pd.concat([df_day.reindex(day_index[i]).fillna(df_day.mean())
for i, df_day in enumerate(dfs_day)],axis=0)
dataframe_rs = dataframe_rs.fillna(dfs_day_full)
print('Filled NaNs with daily averages')
# #These are test plots to check the interpolation
# #Split data into days
# dfs_rs = [group[1] for group in dataframe_rs.groupby(dataframe_rs.index.date)]
# days = pd.to_datetime([group[0] for group in dataframe_rs.groupby(dataframe_rs.index.date)])
#
# for ix, iday in enumerate(days):
# if datatype == 'aod':
# fig, ax = plt.subplots(figsize=(9,8))
#
# dfs_rs[ix].loc[:,'AOD_500nm'].plot(ax=ax,style='--',legend=False)
# ax.set_ylabel('AOD at 500nm')
# plt.savefig('aod_500nm_full_interp_' + iday.strftime('%Y-%m-%d') + '.png')
# plt.close(fig)
elif datatype == 'ssa':
dataframe_rs = dataframe_rs.reindex(total_index)
#Define time range string for file names
time_range_string = dataframe_rs.index[0].strftime(format="%Y-%m-%dT%H%M%S") + '_' +\
dataframe_rs.index[-1].strftime(format="%Y-%m-%dT%H%M%S")
empty_days = []
return dataframe_rs, time_range_string, empty_days
def load_plot_aerosol_data(load_path,station_dict,dict_paths,timeres,plotting=False,padding=False):
"""
Load Aeronet data for aerosol optical depth and single scattering albedo from file
and plot raw data if required
First import daily average and then all values.
If a day has no data, interpolation is used with surrounding days, to create new
daily average AODs. These values are then used to fill up the data in the
import_aeronet_all function
args:
:param load_path: string, path where aeronet files are located
:param station_dict: dictionary of aeronet stations (without data)
:param dict_path: dictionary with paths to save plots to
:param timeres: integer defining the time resolution for interpolation
:param plotting: flag for plotting
:param padding: boolean to decide whether to pad values with nan to fill a year
out:
:return: dictionary of aeronet stations including data as well as time range strings
"""
save_path = os.path.join(dict_paths["main"],dict_paths["raw"])
df_aod = pd.DataFrame()
df_ssa = pd.DataFrame()
range_aod_day = ""
range_ssa_day = ""
range_aod = ""
range_ssa = ""
for key in station_dict:
print("Importing daily average data from %s" % station_dict[key]["name"])
station_dict[key]['df_day'] = pd.DataFrame()
if station_dict[key]['aod_files']['day_ave']:
#Import data for daily averages
aod_version = station_dict[key]['aod_files']['version']
df_aod, range_aod_day, empty_aod = import_aeronet_day_average(load_path,station_dict[key]['aod_files']['day_ave'],
aod_version,padding,datatype="aod")
if not df_aod.empty:
if plotting:
data_aod = df_aod.filter(regex='^AO.',axis=1)
ax1 = data_aod.plot(legend=True,figsize=(10,6*10/8),
title='Daily average aerosol optical depth at ' +
station_dict[key]['name'],grid=True,colormap='jet')
ax1.set_ylabel('AOD')
plt.savefig(os.path.join(save_path,'aod_raw_data_' + range_aod_day + '_' + key + '.png'))
else:
print("All AOD values are NAN, no plots made")
station_dict[key]['df_day'] = df_aod
station_dict[key]['empty_aod'] = empty_aod
else:
print("No AOD data to import")
station_dict[key]['empty_aod'] = "All days"
if station_dict[key]['ssa_files']['day_ave']:
ssa_version = station_dict[key]['ssa_files']['version']
df_ssa, range_ssa_day, empty_ssa = import_aeronet_day_average(load_path,station_dict[key]['ssa_files']['day_ave'],
ssa_version,padding,datatype="ssa")
if not df_ssa.empty:
if plotting:
ax2 = df_ssa.plot(legend=True,figsize=(10,6*10/8),
title='Daily average single scattering albedo at ' +
station_dict[key]['name'],grid=True)
ax2.set_ylabel('SSA')
plt.savefig(os.path.join(save_path,'ssa_raw_data_' + range_ssa_day + '_' + key + '.png'))
else:
print("All SSA values are NAN, no plots made")
if station_dict[key]['df_day'].empty:
station_dict[key]['df_day'] = df_ssa
else:
station_dict[key]['df_day'] = pd.concat([df_aod,df_ssa],axis=1)
station_dict[key]['empty_ssa'] = empty_ssa
else:
print("No SSA data to import")
station_dict[key]['empty_ssa'] = "All days"
print("Importing all data from %s" % station_dict[key]["name"])
station_dict[key]['df_all'] = pd.DataFrame()
if station_dict[key]['aod_files']['all']:
aod_version = station_dict[key]['aod_files']['version']
df_aod, range_aod, empty_aod = import_aeronet_all(load_path,
station_dict[key],aod_version,timeres,padding,datatype="aod")
if not df_aod.empty:
if plotting:
data_aod = df_aod.filter(regex='^AO.',axis=1)
ax1 = data_aod.plot(legend=True,figsize=(10,6*10/8),
title='Aerosol optical depth inerpolated to ' + str(timeres) + ' minutes at ' +
station_dict[key]['name'],grid=True,colormap='jet')
ax1.set_ylabel('AOD')
plt.savefig(os.path.join(save_path,'aod_raw_data_' + range_aod + '_' + key + '.png'))
else:
print("All AOD values are NAN, no plots made")
station_dict[key]['df_all'] = df_aod
station_dict[key]['empty_aod'] = empty_aod
else:
print("No AOD data to import")
station_dict[key]['empty_aod'] = "All days"
if station_dict[key]['ssa_files']['all']:
ssa_version = station_dict[key]['ssa_files']['version']
df_ssa, range_ssa, empty_ssa = import_aeronet_all(load_path,
station_dict[key],ssa_version,timeres,padding,datatype="ssa")
if not df_ssa.empty:
if plotting:
ax2 = df_ssa.plot(legend=True,figsize=(10,6*10/8),
title='Daily average single scattering albedo inerpolated to ' + str(timeres) + ' minutes at ' +
station_dict[key]['name'],grid=True)
ax2.set_ylabel('SSA')
plt.savefig(os.path.join(save_path,'ssa_raw_data_' + range_ssa + '_' + key + '.png'))
else:
print("All SSA values are NAN")
if station_dict[key]['df_all'].empty:
station_dict[key]['df_all'] = df_ssa
else:
station_dict[key]['df_all'] = pd.concat([df_aod,df_ssa],axis=1)
station_dict[key]['empty_ssa'] = empty_ssa
else:
print("No SSA data to import")
station_dict[key]['empty_ssa'] = "All days"
return {"aero_stats": station_dict, "time_range_aod_day": range_aod_day,
"time_range_ssa_day": range_ssa_day, "time_range_aod_all": range_aod,
"time_range_ssa_all": range_ssa}
def extract_wavelengths(dataframe,fit_range,version):
"""
Extract wavelengths (integers) from column names
args:
:param dataframe: dataframe with AOD and other values
:param fit_range: wavelength range to fit
out:
:return data_aod: dataframe with only AOD values
:return xdata: Wavelength values for fit procedure
"""
if version == 2:
#Extract AOD values
data_aod = dataframe.filter(regex="^AOT_",axis=1)
#Get wavelengths from the column names
aod_wvl = [x.replace('AOT_','') for x in data_aod.columns.values.tolist()]
elif version == 3:
#Extract AOD values
data_aod = dataframe.filter(regex="^AOD_",axis=1)
#Get wavelengths from the column names
aod_wvl = [x.replace('AOD_','') for x in data_aod.columns.values.tolist()]
aod_wvl = [x.replace('nm','') for x in aod_wvl]
aod_wvl = [re.sub('Empty.*','',x) for x in aod_wvl]
#wavelength in micrometers
xdata = np.array(aod_wvl,dtype=float)/1000.
#Define Boolean mask from fit range
mask = (xdata >= fit_range[0]/1000.) & (xdata <= fit_range[1]/1000.)
xdata = xdata[mask]
data_aod = data_aod.loc[:,mask]
return data_aod, xdata
def extract_visible_ssa(dataframe,version):
"""
Extract SSA values in the visible wavelength band
args:
:param dataframe: dataframe with AOD and other values
:param version: integer, Aeronet version
out:
:return: dataframe with visible SSA values
"""
#Extract SSA values
if version == 2:
data_ssa = dataframe.filter(regex="^SSA",axis=1)
#Get wavelengths from column names
ssa_wvl = [x.replace('SSA','') for x in data_ssa.columns.values.tolist()]
ssa_wvl = np.array([x.replace('-T','') for x in ssa_wvl],dtype=float)
elif version == 3:
data_ssa = dataframe.filter(regex='^Single_Scattering_Albedo', axis=1)
#Get wavelengths from column names
ssa_wvl = [x.replace('Single_Scattering_Albedo[','') for x in data_ssa.columns.values.tolist()]
ssa_wvl = np.array([x.replace('nm]','') for x in ssa_wvl],dtype=float)
#Define Boolean mask from visible wavelength band
mask_ssa_vis = (ssa_wvl >= 380.0) & (ssa_wvl <= 740.0)
series_ssa_vis = pd.Series(data_ssa.loc[:,mask_ssa_vis].mean(axis=1),
index=data_ssa.index,name='ssa_vis')
data_ssa = pd.concat([data_ssa,series_ssa_vis],axis=1)
return data_ssa
def angstrom_fit (fit_config, station_dict, fit_range, dict_paths, plotting,
curvature=False):
"""
Fit Aeronet data using Angstrom's formula for each station in the config file
and plot if required
args:
:param fit_config: string defining whether to use daily data or all data
:param station_dict: dictionaries with station info and data
:param fit_range: array giving wavelength range for AOD fit
:param dict_paths: dictionary with plot paths
:param plotting: dict, output plots or not
:param curvature: bool, whether to add quadratic term for wavelength dependence of alpha
out:
:return station_dict: dictionaries with station data and also including fit parameters
:return xdata: array of wavelengths used for the fit
"""
version_aod = station_dict["aod_files"]["version"]
version_ssa = station_dict["ssa_files"]["version"]
save_path = os.path.join(dict_paths["main"],dict_paths["fits"])
#Get data from dictionaries
if fit_config == "day_ave":
data = station_dict['df_day']
print("Extracting alpha and beta from log-linear fit for daily average AOD data from %s"
% station_dict['name'])
elif fit_config == "all":
data = station_dict['df_all']
print("Extracting alpha and beta from log-linear fit for all AOD data from %s ... this takes a while"
% station_dict['name'])
if plotting:
print("Plotting is turned on, the fit for each day will be plotted, please be patient...")
#Get original index before processing
ix = data.index
#Get wavelengths from column names
data_aod, xdata = extract_wavelengths(data,fit_range,version_aod)
#Get index of valid values (remove NANs)
data_aod = data_aod.dropna(axis='rows')
#notnan_ix = data_aod[data_aod.notna()].index
#Apply mask to AOD values to extract data from fit
ydata = data_aod.values
#Get angstrom data from dataframe
if version_aod == 2:
data_alpha = data.filter(regex="Angstrom",axis=1).reindex(data_aod.index)
elif version_aod == 3:
data_alpha = data.filter(regex="Angstrom.",axis=1).reindex(data_aod.index)
alphadata = data_alpha.values
alphadata_ave = np.average(alphadata,axis=1)
# The fit is performed according to the following formula
# AOD = beta*lambda**(-alpha)
# log(AOD) = log(beta) - alpha*log(lambda)
#Take logarithm of the data
logx = np.log(xdata)
#Some values are negative in the data, assign small non-zero positive value
ydata[ydata<0.] = 0.0001
logy = np.log(ydata)
if not curvature:
fitfunc = lambda p, l: p[1] - p[0] * l
errfunc = lambda p, l, y: y - fitfunc(p, l)
pinit = [1, 1]
params = np.zeros((len(data_aod),2))
else:
fitfunc = lambda p, l: p[1] - p[0] * l + p[2] * l**2
errfunc = lambda p, l, y: y - fitfunc(p, l)
pinit = [1, 1, 1]
params = np.zeros((len(data_aod),3))
#parcov = np.zeros((len(data)))
# data_aod['alpha_fit'] = pd.Series(index=data_aod.index)
# data_aod['beta_fit'] = pd.Series(index=data_aod.index)
#Perform fit for each day
for i, datetime in enumerate(data_aod.index):
params[i,:] = optimize.leastsq(errfunc,pinit,args=(logx,logy[i,:]))[0]
if plotting[fit_config]: # and fit_config == "day_ave":
if fit_config == "day_ave":
titlestring = 'Daily average AOD at ' + station_dict['name'] + ' on ' +\
datetime.strftime(format="%Y-%m-%d")
filestring = 'aod_fit_day_ave_' + station_dict["name"] + '_'\
+ datetime.strftime(format="%Y%m%d") + '.png'
elif fit_config == "all":
titlestring = 'AOD at ' + station_dict['name'] + ' on ' +\
datetime.strftime(format="%Y-%m-%d %H:%M:%S")
filestring = 'aod_fit_' + station_dict["name"] + '_'\
+ datetime.strftime(format="%Y%m%d_%H%M%S") + '.png'
fig = plt.figure()
plt.plot(xdata*1000, np.exp(fitfunc(params[i,:],logx)))
plt.plot(xdata*1000,ydata[i,:],linestyle='None',marker = 'o')
plt.ylabel(r'$\tau(\lambda)$',fontsize=16)
plt.xlabel(r"$\lambda$ (nm)",fontsize=16)
plt.title(titlestring)
plt.annotate(r'$\alpha_{\rm fit}$ = '+ str(np.round(params[i,0],2))
,xy=(0.6,0.8),xycoords='figure fraction',fontsize=14)
plt.annotate(r'$\beta_{\rm fit}$ = '+ str(np.round(np.exp(params[i,1]),3))
,xy=(0.6,0.7),xycoords='figure fraction',fontsize=14)
plt.annotate(r'$\langle\alpha_{\rm meas}\rangle$ = '+
str(np.round(alphadata_ave[i],2)),xy=(0.6,0.6),
xycoords='figure fraction',fontsize=14)
if curvature:
plt.annotate(r'$\gamma_{\rm fit}$ = '+ str(np.round(np.exp(params[i,2]),3)),xy=(0.6,0.5),
xycoords='figure fraction',fontsize=14)
plt.annotate(r'$\tau = \beta \lambda^{-\alpha}$',xy=(0.4,0.5),
xycoords='figure fraction',fontsize=14)
fig.tight_layout()
plt.savefig(os.path.join(save_path,filestring))
plt.close(fig)
#add fit parameters to dataframe
data_aod['alpha_fit'] = pd.Series(params[:,0],index=data_aod.index)
data_aod['beta_fit'] = pd.Series(np.exp(params[:,1]),index=data_aod.index)
if curvature:
data_aod['gamma_fit'] = pd.Series(params[:,2],index=data_aod.index)
#Put back NANs
data_aod = data_aod.reindex(ix)
data_alpha = data_alpha.reindex(ix)
#Extract the SSA data and create mask for visible values
data_ssa = extract_visible_ssa(data,version_ssa)
data = pd.concat([data_aod,data_alpha,data_ssa],axis=1)
#Assign back to dictionary
if fit_config == "day_ave":
station_dict['df_day'] = data
elif fit_config == "all":
station_dict['df_all'] = data
return station_dict, xdata
def angstrom_fit_mean (fit_config, station_dict, mean_stats, xdata, fit_range, dict_paths, plotting,
curvature=False):
"""
Fit Aeronet data using Angstrom's formula to the mean AOD for several stations
args:
:param fit_config: string defining whether to use daily data or all data
:param station_dict: dictionaries with station info and data
:param mean_stats: list of stations to use for taking average
:param xdata: array of wavelengths to use for fitting
:param fit_range: array giving wavelength range for AOD fit
:param dict_paths: dictionary with plot paths
:param plotting: dict, output plots or not
:param curvature: bool, whether to add quadratic term for wavelength dependence of alpha
out:
:return data_mean: dataframe with averaged data
"""
save_path = os.path.join(dict_paths["main"],dict_paths["fits"])
#Get the number of stations to use for the mean
num_stats = len(mean_stats)
mean_label = '_'.join([s for s in mean_stats])
print("Extracting alpha and beta from log-linear fit for mean AOD data from %s ... this takes a while"
% mean_label)
#Fit data for the mean optical depth between several stations
for key in station_dict:
version_aod = station_dict[key]["aod_files"]["version"]
#Get data
if key in mean_stats:
if fit_config == "day_ave":
data = station_dict[key]['df_day']
elif fit_config == "all":
data = station_dict[key]['df_all']
ix = data.index
data_aod, xdata = extract_wavelengths(data,fit_range,version_aod)
aod_array = np.zeros((num_stats,len(data),len(xdata)))
aod_array[mean_stats.index(key),:,:] = data_aod.values
data_mean = pd.DataFrame(np.mean(aod_array,axis=0),index=data.index,
columns=data_aod.columns)
data_mean.dropna(axis='rows',inplace=True) #remove NANs for fitting
ydata = data_mean.values
logx = np.log(xdata)
ydata[ydata<0] = 0.0001
logy = np.log(ydata)
if not curvature:
fitfunc = lambda p, l: p[1] - p[0] * l
errfunc = lambda p, l, y: y - fitfunc(p, l)
pinit = [1, 1]
params = np.zeros((len(data_mean),2))
else:
fitfunc = lambda p, l: p[1] - p[0] * l + p[2] * l**2
errfunc = lambda p, l, y: y - fitfunc(p, l)
pinit = [1, 1, 1]
params = np.zeros((len(data_mean),3))
#parcov = np.zeros((len(df_aod_ssa_day)))
for iday in range(len(data_mean)):
params[iday,:] = optimize.leastsq(errfunc,pinit,args=(logx,logy[iday,:]))[0]
if plotting[fit_config]:
fig = plt.figure()
plt.plot(xdata*1000, np.exp(fitfunc(params[iday,:],logx)))
plt.plot(xdata*1000,ydata[iday,:],linestyle='None',marker = 'o')
plt.ylabel(r'$\tau(\lambda)$',fontsize=16)
plt.xlabel(r"$\lambda$ (nm)",fontsize=16)
plt.title('Mean (' + mean_label + ') AOD on ' + str(data_mean.index[iday]))
plt.annotate(r'$\alpha_{\rm fit}$ = '+ str(np.round(params[iday,0],2)),xy=(0.6,0.8),
xycoords='figure fraction',fontsize=14)
plt.annotate(r'$\beta_{\rm fit}$ = '+ str(np.round(np.exp(params[iday,1]),3)),xy=(0.6,0.7),
xycoords='figure fraction',fontsize=14)
if curvature:
plt.annotate(r'$\gamma_{\rm fit}$ = '+ str(np.round(np.exp(params[iday,2]),3)),xy=(0.6,0.6),
xycoords='figure fraction',fontsize=14)
plt.annotate(r'$\tau = \beta \lambda^{-\alpha}$',xy=(0.4,0.5),
xycoords='figure fraction',fontsize=14)
fig.tight_layout()
plt.savefig(os.path.join(save_path,'aod_fit_mean_' + mean_label + '_'
+ str(data_mean.index[iday]).split(' ')[0] + '.png'))
plt.close()
data_mean['alpha_fit'] = pd.Series(params[:,0],index=data_mean.index)
data_mean['beta_fit'] = pd.Series(np.exp(params[:,1]),index=data_mean.index)
if curvature:
data_mean['gamma_fit'] = pd.Series(params[:,2],index=data_mean.index)
#Put the NANs back to keep the dataframes aligned
data_mean = data_mean.reindex(ix)
ssa_array = np.zeros((num_stats,len(data)))
for key in station_dict:
#Get data
if key in mean_stats:
if fit_config == "day_ave":
data = station_dict[key]['df_day']
elif fit_config == "all":
data = station_dict[key]['df_all']
ssa_array[mean_stats.index(key),:] = data["ssa_vis"].values
data_mean['ssa_vis'] = pd.DataFrame(np.mean(ssa_array,axis=0),index=data_mean.index)
return data_mean
def save_aeronet_dataframe(data,path,filename,fit_type,version,empty_dict,
padding,station_name):
"""
Save Aeronet data to file
args:
:param data: dataframe to save to file
:param path: path for saving files
:param filename: name of file to be saved
:param fit_type: define whether we are saving all values or daily averages
:param version: integer defining aeronet version
:param empty_dict: dictionary with empty days
:param padding: boolean to decide whether to pad values with average
:param station_name: name of station
"""
#Set the date format
if fit_type == 'day_ave':
datestring = "%Y-%m-%d"
elif fit_type == 'all':
datestring = "%Y-%m-%dT%H:%M:%S"
if version == 2:
cols = ['AOT_500','alpha_fit','beta_fit','ssa_vis']
elif version == 3:
cols = ['AOD_500nm','alpha_fit','beta_fit','ssa_vis']
#Save data to file
f = open(os.path.join(path,filename), 'w')
f.write('#Aerosol data extracted from Aeronet for %s\n' % station_name)
if padding:
f.write('#Empty AOD data points have been filled with daily averages\n')
f.write('#Days of year with no AOD data: %s\n' % empty_dict['aod'])
f.write('#Days of year with no SSA data: %s\n' % empty_dict['ssa'])
f.write('#Data\n')
data.to_csv(f,columns=cols,float_format='%.6f', index_label='Date_Time', sep=' ',
header=['AOD_500','alpha','beta','ssa_vis'],na_rep='nan',
date_format=datestring)
f.close()
def save_aerosol_files(save_path,main_dict,mean_stats,fit_config,
padding,description):
"""
Save Aeronet data to file
args:
:param save_path: path for saving files
:param main_dict: dictionary of Aeronet stations and range strings
:param mean_stats: list of stations used for averaging
:param fit_config: define whether we are saving all values or daily averages
:param padding: boolean to decide whether to pad values with average
:param description: string with description of simulation
"""
station_dict = main_dict['aero_stats']
mean_label = '_'.join([s for s in mean_stats])
df_filelist = pd.DataFrame(index=station_dict.keys(),columns=['day_ave','all'])
df_filelist.index.name="Station"
for key in station_dict:
version = station_dict[key]["aod_files"]["version"]
empty_days = {'aod':station_dict[key]['empty_aod'],'ssa':station_dict[key]['empty_ssa']}
for fit_type in fit_config:
if fit_type == 'day_ave' and fit_config[fit_type]:
time_range = main_dict['time_range_aod_day']
filename = 'aerosol_angstrom_params_day_ave_' + time_range + '_' + key + '.dat'
longname = station_dict[key]["name"]
save_aeronet_dataframe(station_dict[key]['df_day'],save_path,filename,fit_type,version,
empty_days,padding,longname)
print("Saved daily average aeronet parameters from %s to file %s" % (longname,filename))
df_filelist.loc[key,"day_ave"] = filename
if fit_type == 'all' and fit_config[fit_type]:
time_range = main_dict['time_range_aod_all']
filename = 'aerosol_angstrom_params_all_' + time_range + '_' + key + '.dat'
longname = station_dict[key]["name"]
save_aeronet_dataframe(station_dict[key]['df_all'],save_path,filename,fit_type,version,
empty_days,padding,longname)
print("Saved all aeronet parameters from %s to file %s" % (station_dict[key]["name"],filename))
df_filelist.loc[key,"all"] = filename
if type(mean_stats) == list:
version = 3
empty_days = {'aod': "See list of empty days in individual fit files",
'ssa': "See list of empty days in individual fit files"}
longname = [station_dict[key]["name"] for key in mean_stats]
filename = 'aerosol_angstrom_params_mean_day_ave_' + time_range + '_'\
+ mean_label + '.dat'
save_aeronet_dataframe(main_dict['aeronet_mean_day'],save_path,filename,
fit_type,version,empty_days,padding,longname)
print("Saved daily average mean parameters from %s to file %s" % (mean_stats,filename))
df_filelist.loc["mean","day_ave"] = filename
filename = 'aerosol_angstrom_params_mean_all_' + time_range + '_'\
+ mean_label + '.dat'
save_aeronet_dataframe(main_dict['aeronet_mean_all'],save_path,
filename,fit_type,version,empty_days,padding,longname)
print("Saved all mean parameters from %s to file %s" % (mean_stats,filename))
df_filelist.loc["mean","all"] = filename
filename_list = "aerosol_filelist_" + description + ".dat"
df_filelist.to_csv(os.path.join(save_path,filename_list),sep=' ',
header=df_filelist.columns.values,na_rep='nan')
print("List of files written to %s" % filename_list)
#%%Main Program
#######################################################################
### MAIN PROGRAM ###
#######################################################################
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("configfile", help="yaml file containing config")
args = parser.parse_args()
plt.ioff()
plt.style.use('my_paper')
#Get configuration file name
config_filename = os.path.abspath(args.configfile)#""#
#Read in values from configuration file
config = load_yaml_configfile(config_filename)
info = config["description"]
print("Extracting Aeronet data for %s" % info)
homepath = os.path.expanduser("~")
#get path for loading data
load_path = os.path.join(homepath,config["path_aerosol_data"])
#Load stations and other info from config file
aeronet_stats = config["aeronet_stations"]
plot_paths = config["path_plots"]
plot_paths["main"] = os.path.join(homepath,plot_paths["main"])
save_path = os.path.join(homepath,config["path_fit_data"])
plot_flag = config["plot_flag"]
curvature = config["curvature"]
pad_flag = config["pad_values"]
mean_stats = config["mean_stations"]
resolution = config["timeres"]
fit_range = config["fit_range"]
#Load Aeronet data and plot all values if required
aeronet_dict = load_plot_aerosol_data(load_path,aeronet_stats,
plot_paths,resolution,plot_flag,pad_flag)
del aeronet_stats
for key in aeronet_dict["aero_stats"]:
for fit_type in config["fit_config"]:
if config["fit_config"][fit_type]:
aeronet_dict["aero_stats"][key], xfit_data = angstrom_fit(fit_type,aeronet_dict["aero_stats"][key],
fit_range,plot_paths,plot_flag,curvature)
aeronet_mean_all = pd.DataFrame()
aeronet_mean_day = pd.DataFrame()
if type(mean_stats) == list:
for fit_type in config["fit_config"]:
if config["fit_config"][fit_type]:
aeronet_mean = angstrom_fit_mean(fit_type,aeronet_dict["aero_stats"],mean_stats,xfit_data,
fit_range,plot_paths,plot_flag,curvature)
if fit_type == 'all':
aeronet_mean_all = aeronet_mean
elif fit_type == 'day_ave':
aeronet_mean_day = aeronet_mean
aeronet_dict.update({"aeronet_mean_all":aeronet_mean_all})
aeronet_dict.update({"aeronet_mean_day":aeronet_mean_day})
save_aerosol_files(save_path,aeronet_dict,mean_stats,config["fit_config"],pad_flag,info)
if __name__ == "__main__":
main()
| jamesmhbarry/PVRAD | aeronetmystic/aeronetmystic/pvcal_aerosol_input.py | pvcal_aerosol_input.py | py | 40,278 | python | en | code | 1 | github-code | 13 |
17046159004 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipaySecurityRiskContentSyncDetectModel(object):
def __init__(self):
self._channel = None
self._content_type = None
self._data_list = None
self._open_id = None
self._products = None
self._request_id = None
self._tenants = None
self._user_id = None
@property
def channel(self):
return self._channel
@channel.setter
def channel(self, value):
self._channel = value
@property
def content_type(self):
return self._content_type
@content_type.setter
def content_type(self, value):
self._content_type = value
@property
def data_list(self):
return self._data_list
@data_list.setter
def data_list(self, value):
if isinstance(value, list):
self._data_list = list()
for i in value:
self._data_list.append(i)
@property
def open_id(self):
return self._open_id
@open_id.setter
def open_id(self, value):
self._open_id = value
@property
def products(self):
return self._products
@products.setter
def products(self, value):
self._products = value
@property
def request_id(self):
return self._request_id
@request_id.setter
def request_id(self, value):
self._request_id = value
@property
def tenants(self):
return self._tenants
@tenants.setter
def tenants(self, value):
self._tenants = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.channel:
if hasattr(self.channel, 'to_alipay_dict'):
params['channel'] = self.channel.to_alipay_dict()
else:
params['channel'] = self.channel
if self.content_type:
if hasattr(self.content_type, 'to_alipay_dict'):
params['content_type'] = self.content_type.to_alipay_dict()
else:
params['content_type'] = self.content_type
if self.data_list:
if isinstance(self.data_list, list):
for i in range(0, len(self.data_list)):
element = self.data_list[i]
if hasattr(element, 'to_alipay_dict'):
self.data_list[i] = element.to_alipay_dict()
if hasattr(self.data_list, 'to_alipay_dict'):
params['data_list'] = self.data_list.to_alipay_dict()
else:
params['data_list'] = self.data_list
if self.open_id:
if hasattr(self.open_id, 'to_alipay_dict'):
params['open_id'] = self.open_id.to_alipay_dict()
else:
params['open_id'] = self.open_id
if self.products:
if hasattr(self.products, 'to_alipay_dict'):
params['products'] = self.products.to_alipay_dict()
else:
params['products'] = self.products
if self.request_id:
if hasattr(self.request_id, 'to_alipay_dict'):
params['request_id'] = self.request_id.to_alipay_dict()
else:
params['request_id'] = self.request_id
if self.tenants:
if hasattr(self.tenants, 'to_alipay_dict'):
params['tenants'] = self.tenants.to_alipay_dict()
else:
params['tenants'] = self.tenants
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipaySecurityRiskContentSyncDetectModel()
if 'channel' in d:
o.channel = d['channel']
if 'content_type' in d:
o.content_type = d['content_type']
if 'data_list' in d:
o.data_list = d['data_list']
if 'open_id' in d:
o.open_id = d['open_id']
if 'products' in d:
o.products = d['products']
if 'request_id' in d:
o.request_id = d['request_id']
if 'tenants' in d:
o.tenants = d['tenants']
if 'user_id' in d:
o.user_id = d['user_id']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/AlipaySecurityRiskContentSyncDetectModel.py | AlipaySecurityRiskContentSyncDetectModel.py | py | 4,627 | python | en | code | 241 | github-code | 13 |
39357252944 | import pandas as pd
import os
import pickle
import geopandas as gpd
from pathlib import Path
from nextbike.constants import CONSTANTS
from nextbike.constants import __FILE__
from nextbike import utils
def __read_geojson(geojson):
"""
Method is private. It reads geojson-files located in the external folder
:param geojson: name of the .geojson-file, which is located in data/external/
:return: df of the .geojson
"""
path = os.path.join(__FILE__, CONSTANTS.PATH_EXTERNAL.value, geojson)
try:
df = gpd.read_file(path)
return df
except FileNotFoundError:
print("Data file not found. Path was " + path)
def read_csv(loc, name, **kwargs):
"""
:param loc: internal, processed or external
:param name: name of the csv
:return: df of the csv
"""
if loc.lower() not in ["internal", "external", "processed"]:
raise Exception('loc has to be either internal, external or processed')
else:
if loc.lower() == "internal":
path = os.path.join(__FILE__, CONSTANTS.PATH_RAW.value, name)
elif loc.lower() == "external":
path = os.path.join(__FILE__, CONSTANTS.PATH_EXTERNAL.value, name)
else:
path = os.path.join(__FILE__, CONSTANTS.PATH_PROCESSED.value, name)
try:
df = pd.read_csv(path, **kwargs)
return df
except FileNotFoundError:
print("Data file not found. Path was " + path)
def read_file(path=None, **kwargs):
"""
:param path: Path of the source file, if path = None dortmund.csv will be used.
:return: Read data as DataFrame
"""
if path is None:
path = os.path.join(__FILE__, CONSTANTS.PATH_RAW.value + "dortmund.csv")
else:
path = os.path.join(__FILE__, path)
try:
df = pd.read_csv(path, **kwargs)
return df
except FileNotFoundError:
print("Data file not found. Path was " + path)
def __read_model(name):
d = Path(__file__).resolve().parents[2]
with open(os.path.join(d, CONSTANTS.PATH_OUTPUT.value + name + ".pkl"), 'rb') as handle:
model = pickle.load(handle)
return model
| ey96/DataScienceBikesharing | nextbike/io/input.py | input.py | py | 2,177 | python | en | code | 0 | github-code | 13 |
8062497619 | import os
import sys
import requests
import argparse
# pip install "qrcode[pil]"
from PIL import Image
import qrcode
from pathlib import Path
from urllib.parse import urlparse
from io import BytesIO
ALL_DUCKS_ENDPOINT = "/admin/many-ducks"
OUTPUT_DIR = "./qrcodes"
def ducks_endpoint(frontend, id_of_duck):
url = urlparse(f"{frontend}/duck/{id_of_duck}")
return url.geturl()
def bearer_header(token):
return {"Authorization": f"Bearer {token}"}
def request_all_ducks(base_url, token):
url = urlparse(base_url + ALL_DUCKS_ENDPOINT)
rsp = requests.get(
url=url.geturl(),
headers=bearer_header(token),
verify=False,
)
if rsp.status_code == 400:
print("invalid admin token")
sys.exit(-1)
else:
return rsp.json()
def large_icon(icon_url: str):
path, name = icon_url.rsplit("/", maxsplit=1)
return "/".join([path, f"3x-{name}"])
def duck_info(duck_data):
return {
"id": duck_data["id"],
"name": duck_data["title"]["cn"],
"loc": duck_data["location"]["description"]["cn"],
"icon": large_icon(duck_data["duckIconUrl"]),
}
def get_icon(icon_url):
rsp = requests.get(icon_url)
return Image.open(BytesIO(rsp.content))
if __name__ == '__main__':
parser = argparse.ArgumentParser("generate QR codes of the ducks", allow_abbrev=True)
parser.add_argument("-e, --endpoint", dest="endpoint", type=str, help="backend base url")
parser.add_argument("-t, --token", dest="token", type=str, help="admin token")
parser.add_argument("-f, --frontend-url", dest="frontend", type=str, help="frontend base url")
args = parser.parse_args(args=sys.argv[1:])
ducks = [duck_info(dd) for dd in request_all_ducks(args.endpoint, args.token)]
os.makedirs(OUTPUT_DIR, exist_ok=True)
for duck in ducks:
qr = qrcode.QRCode(error_correction=qrcode.ERROR_CORRECT_H)
duck_url = ducks_endpoint(args.frontend, duck["id"])
print(duck_url)
qr.add_data(duck_url)
qr.make(fit=True)
qr_img: Image.Image = qr.make_image().get_image()
qr_img = qr_img.convert(mode="RGBA")
icon = get_icon(duck["icon"]).convert(mode="RGBA")
pos = int(qr_img.width / 2 - icon.width / 2)
mask = icon.getchannel("A").convert(mode="1")
Image.Image.paste(qr_img, icon, (pos, pos), mask=mask)
with open(Path(OUTPUT_DIR) / f"{duck['name']}({duck['loc']}).png", "wb") as fp:
qr_img.save(fp)
| Congyuwang/cyberduck-backend | python-utils/gen_duck_qr_codes.py | gen_duck_qr_codes.py | py | 2,505 | python | en | code | 2 | github-code | 13 |
1398254155 | # Uses python3
import sys
def fibonacci_partial_sum_naive(from_, to):
sum = 0
current = 0
next = 1
for i in range(to + 1):
if i >= from_:
sum += current
current, next = next, current + next
return sum % 10
def pisano10():
fibs = [0, 1] + [-1 for i in range(2, 61)]
for i in range(2, 61):
fibs[i] = fibs[i-1] + fibs[i-2]
sum_fibs = [sum(fibs[:i]) % 10 for i in range(1, len(fibs))]
return sum_fibs
def fibonacci_sum_fast(n):
"""The cumsum is also a period of length 60, like the Fib-mod-10 Pisano series."""
fib_sum_mod10 = pisano10()
return fib_sum_mod10[n % 60]
def fibonacci_partial_sum_fast(a, b):
return (fibonacci_sum_fast(b) + 10 - fibonacci_sum_fast(a-1)) % 10
if __name__ == '__main__':
input = sys.stdin.read();
from_, to = map(int, input.split())
print(fibonacci_partial_sum_fast(from_, to)) | AlexEngelhardt-old/courses | Data Structures and Algorithms/01 Algorithmic Toolbox/Week 2 - Algorithmic Warm-up/assignment/7_fibonacci_partial_sum.py | 7_fibonacci_partial_sum.py | py | 913 | python | en | code | 2 | github-code | 13 |
9735564748 | from operator import itemgetter
#input arr
arr = [2,1,2,2]
#Parse the arr for freq
parsed_arry = [[0 for x in range(2)] for y in range(len(arr))]
for x in arr:
parsed_arry[x][1] += 1
parsed_arry[x][0] = x
#Sort the array by 2nd column from desc order.
sorted_arr = sorted(parsed_arry, key=itemgetter(1), reverse=True)
#Count how many rows we need in the array
count = 0
for line in sorted_arr:
if line[0] != 0:
count += 1
#Copy the sorted array with extra rows over to the array with no extra zeros to match the stdout
finished_arr = [[0 for x in range(2)] for y in range(count)]
for i in range(len(sorted_arr)):
if sorted_arr[i][0] == 0:
break
else:
finished_arr[i] = sorted_arr[i]
| ScorpiosCrux/coding-challenges | hackerrank/amazon-practice/amazon_summary.py | amazon_summary.py | py | 737 | python | en | code | 0 | github-code | 13 |
35517934308 | import numpy as np
from .. import tools
from ..HomTra import HomTra
def mean_distance_between(focal_point, facet_centers):
"""
Returns the average distance between the focal_point position and all
the individual mirror facet center positions.
Parameter
---------
focal_point 3D position
facet_centers A list of 3D positions
"""
sum_of_distances = 0.0
for facet_center in facet_centers:
sum_of_distances += np.linalg.norm(focal_point - facet_center)
number_of_facets = facet_centers.shape[0]
return sum_of_distances/number_of_facets
def PAP_offset_in_z(
focal_length,
facet_centers,
max_iterations=10000,
precision=1e-4):
"""
Returns the position offset in z direction of the factory's frame to the
Principal Aperture Plane (PAP) of the reflector.
Parameter
---------
focal_length The focal length of the overall segmented imaging reflector
facet_centers A list of the 3D facet center positions
precision The precision needed on the PAP offset to quit the iteration
max_iterations An upper limit before quiting iteration without convergence
Note
----
This offset is expected to be 0.0 for the Davies-Cotton geometry and >0.0
for the parabolic geometry.
"""
focal_piont = np.array([0.0, 0.0, focal_length])
iteration = 0
while True:
iteration += 1
mean_dist = mean_distance_between(focal_piont, facet_centers)
delta_z = mean_dist - focal_length
if delta_z < precision:
break
if iteration > max_iterations:
raise RuntimeError(
'Unable to converge principal plane offset after '+
str(max_iterations)+
' iterations.')
focal_piont[2] = focal_piont[2] - 0.5*delta_z
return focal_piont[2] - focal_length
def ideal_reflector2facet(focal_piont, facet_center):
"""
Returns an ideal homogenoues transformation reflector2facet for a mirror
facet located at facet_center.
Parameter
---------
focal_piont The 3D focal point in the reflector frame
facet_center A 3D mirror facet center position in the reflector frame
"""
unit_z = np.array([0.0, 0.0, 1.0])
connection = focal_piont - facet_center
connection /= np.linalg.norm(connection)
rotation_axis = np.cross(unit_z, connection)
angle_to_unit_z = np.arccos(np.dot(unit_z, connection))
ideal_angle = angle_to_unit_z/2.0
reflector2facet = HomTra()
reflector2facet.set_translation(facet_center)
reflector2facet.set_rotation_axis_and_angle(rotation_axis, ideal_angle)
return reflector2facet
def ideal_reflector2facets(focal_length, facet_centers, PAP_offset):
"""
Returns a list of ideal homogenoues transformation reflector2facet for all
mirror facets on a reflector.
Parameter
---------
focal_length The focal length of the overall segmented imaging reflector
facet_centers A list of mirror facet centers in the reflector frame
PAP_offset The principal aperture offset in the reflector frame
"""
focal_piont_Rframe = np.array([0.0, 0.0, focal_length + PAP_offset])
reflector2facets = []
for facet_center in facet_centers:
reflector2facets.append(
ideal_reflector2facet(
focal_piont_Rframe,
facet_center))
return reflector2facets
def mirror_facet_centers(reflector):
"""
Returns a list of mirror facet center positions calculated from the
mirror tripod center positions.
Parameter
---------
reflector The reflector dictionary
"""
nodes = reflector['nodes']
tripods = reflector['mirror_tripods']
tripod_centers = tools.mirror_tripod_centers(nodes, tripods)
facet_centers = tripod_centers.copy()
facet_centers[:,2] += reflector['geometry'].bar_outer_diameter
return facet_centers
def make_reflector2tripods(reflector):
"""
Returns a list of homogenoues transformations from the reflector frame to
the mirror tripod centers
Parameter
---------
reflector The reflector dictionary
"""
nodes = reflector['nodes']
tripods = reflector['mirror_tripods']
reflector2tripods = []
for tripod in tripods:
center = tools.mirror_tripod_center(nodes, tripod)
Rx = tools.mirror_tripod_x(nodes, tripod)
Ry = tools.mirror_tripod_y(nodes, tripod)
Rz = tools.mirror_tripod_z(nodes, tripod)
t = HomTra()
t.T[:,0] = Rx
t.T[:,1] = Ry
t.T[:,2] = Rz
t.T[:,3] = center
reflector2tripods.append(t)
return reflector2tripods
def ideal_alignment(reflector):
"""
Returns the ideal alignment of the mirror facets for a given reflector.
tripod2facet A list of homogenoues
transformations from a mirror
tripod frame to the corresponding
mirror facet frame
principal_aperture_plane_offset The positional offset to the
principal aperture plane of the
segmented reflector in the reflector
factory frame
Parameter
---------
reflector The reflector dictionary
"""
facet_centers = mirror_facet_centers(reflector)
focal_length = reflector['geometry'].focal_length
PAP_offset = PAP_offset_in_z(
focal_length=focal_length,
facet_centers=facet_centers)
reflector2facets = ideal_reflector2facets(
focal_length=focal_length,
facet_centers=facet_centers,
PAP_offset=PAP_offset)
reflector2tripods = make_reflector2tripods(reflector)
tripods2facets = []
for i in range(len(reflector2facets)):
reflector2facet = reflector2facets[i]
reflector2tripod = reflector2tripods[i]
tripod2facet = reflector2tripod.inverse().multiply(reflector2facet)
tripods2facets.append(tripod2facet)
return {
'tripods2facets': tripods2facets,
'principal_aperture_plane_offset': PAP_offset
}
def reflector2facets(reflector, alignment):
"""
Returns a list of homogenoues transformations from the reflector frame to
each mirror facet frame.
Parameter
---------
reflector The reflector dictionary
alignment The alignment dictionary
"""
reflector2tripods = make_reflector2tripods(reflector)
reflector2facets = []
for i in range(len(reflector2tripods)):
tripod2facet = alignment['tripods2facets'][i]
reflector2tripod = reflector2tripods[i]
reflector2facet = reflector2tripod.multiply(tripod2facet)
reflector2facets.append(reflector2facet)
return reflector2facets
| cherenkov-plenoscope/cable_robo_mount | cable_robo_mount/mirror_alignment/mirror_alignment.py | mirror_alignment.py | py | 6,973 | python | en | code | 0 | github-code | 13 |
24471430476 | #!/usr/bin/python3
import os
def HammingDistance(firstBinary, secondBinary):
assert len(firstBinary) == len(secondBinary)
result = 0
for i in range(len(firstBinary)):
if (firstBinary[i] != secondBinary[i]):
result += 1
return result
def hexCharToBin(hexChar):
if (hexChar == '0'): return '0000'
if (hexChar == '1'): return '0001'
if (hexChar == '2'): return '0010'
if (hexChar == '3'): return '0011'
if (hexChar == '4'): return '0100'
if (hexChar == '5'): return '0101'
if (hexChar == '6'): return '0110'
if (hexChar == '7'): return '0111'
if (hexChar == '8'): return '1000'
if (hexChar == '9'): return '1001'
if (hexChar == 'a'): return '1010'
if (hexChar == 'b'): return '1011'
if (hexChar == 'c'): return '1100'
if (hexChar == 'd'): return '1101'
if (hexChar == 'e'): return '1110'
if (hexChar == 'f'): return '1111'
else: print('Something went wrong')
def hexStringToBinString(hexString):
result = ""
for i in range(len(hexString)):
result += hexCharToBin(hexString[i])
return result
somedir = './puf_data'
filenames = [f for f in os.listdir(somedir) if os.path.isfile(os.path.join(somedir, f))]
#Number of LinesPerFile. This number is extracted manually by looking at one File
linesPerFile = 512
n = 256
#Init for the two Arrays
puf_data = [[0 for x in range(linesPerFile)] for y in range(len(filenames))]
puf_data_bin = [[0 for x in range(linesPerFile)] for y in range(len(filenames))]
for i in range(len(filenames)):
file = open(somedir + "/" + filenames[i], "r")
for j in range(linesPerFile):
puf_data[i][j] = file.readline()
#Deletes the \n charakter
puf_data[i][j] = puf_data[i][j][:-1]
puf_data_bin[i][j] = hexStringToBinString(puf_data[i][j])
file.close()
print('Reading the files complete! Starting the Calcualations!')
#now every line is stored with its HEX-Values in puf_data. It is a 2D Array filled with strings. The "Rows" are the different files and the "Columns" are the different lines. puf_data_bin stores the exact same values as binary string
uniqueness = 0.0
avg_uniqueness = 0.0
for file in range(len(filenames)):
for line in range(linesPerFile - 1):
for otherline in range(line + 1, linesPerFile):
uniqueness += HammingDistance(puf_data_bin[file][line],puf_data_bin[file][otherline])
faktor = 2.0/(linesPerFile * (linesPerFile - 1) * n)
uniqueness *= faktor
avg_uniqueness += uniqueness
avg_uniqueness = avg_uniqueness / len(filenames)
print('The Average Uniqueness of all files is: ' + str(100 * avg_uniqueness) + '%')
reliability = 0.0
avg_reliabilty = 0.0
for line in range(linesPerFile):
for file in range(len(filenames) - 1):
for otherfile in range(file + 1, len(filenames)):
reliability += HammingDistance(puf_data_bin[file][line], puf_data_bin[otherfile][line])
faktor = 2.0 / (len(filenames) * (len(filenames) - 1) * n)
reliability *= faktor
avg_reliabilty += reliability
avg_reliabilty /= linesPerFile
print('The Average Reliability of all files is: ' + str( 100 * (1 - avg_reliabilty))+ '%')
uniformity = 0.0
avg_uniformity = 0.0
for file in range(len(filenames)):
for line in range(linesPerFile):
avg_uniformity += HammingDistance(puf_data_bin[file][line], '0' * n) / float(n)
avg_uniformity /= (linesPerFile * (len(filenames)))
print('The Average Uniformity of all files is: ' + str(100 * avg_uniformity) + '%')
aliasing = 0.0
avg_aliasing = 0.0
for file in range(len(filenames)):
for bit in range(n):
for line in range(linesPerFile):
aliasing += HammingDistance(puf_data_bin[file][line][bit], '0')
aliasing /= (linesPerFile * n)
avg_aliasing += aliasing
avg_aliasing /= len(filenames)
print('The Average Bit-Aliasing of all files is: ' + str(100 * avg_aliasing) + '%') | Pelcz97/HW-Sicherheit | Task1-PUF/source_files/analyze_puf_data.py | analyze_puf_data.py | py | 3,926 | python | en | code | 0 | github-code | 13 |
15642702528 | import cv2
import easy_tf_log
import numpy as np
from gym import spaces
from gym.core import ObservationWrapper, Wrapper
"""
Wrappers for gym environments to help with debugging.
"""
class NumberFrames(ObservationWrapper):
"""
Draw number of frames since reset.
"""
def __init__(self, env):
ObservationWrapper.__init__(self, env)
self.frames_since_reset = None
def reset(self):
self.frames_since_reset = 0
return self.observation(self.env.reset())
def observation(self, obs):
# Make sure the numbers are clear even if some other wrapper takes maxes of observations
# over pairs of time steps
if self.frames_since_reset % 2 == 0:
x = 0
else:
x = 70
cv2.putText(obs,
str(self.frames_since_reset),
org=(x, 70),
fontFace=cv2.FONT_HERSHEY_PLAIN,
fontScale=2.0,
color=(255, 255, 255),
thickness=2)
self.frames_since_reset += 1
return obs
class EarlyReset(Wrapper):
"""
Reset the environment after 100 steps.
"""
def __init__(self, env):
Wrapper.__init__(self, env)
self.n_steps = None
def reset(self):
self.n_steps = 0
return self.env.reset()
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.n_steps += 1
if self.n_steps >= 100:
done = True
return obs, reward, done, info
class ConcatFrameStack(ObservationWrapper):
"""
Concatenate a stack horizontally into one long frame.
"""
def __init__(self, env):
ObservationWrapper.__init__(self, env)
# Important so that gym's play.py picks up the right resolution
obs_shape = env.observation_space.shape
assert len(obs_shape) == 3 # height, width, n_stack
self.observation_space = spaces.Box(low=0, high=255,
shape=(obs_shape[0], obs_shape[1] * obs_shape[2]),
dtype=np.uint8)
def observation(self, obs):
assert len(obs.shape) == 3
obs = np.moveaxis(obs, -1, 0)
return np.hstack(obs)
class MonitorEnv(Wrapper):
"""
Log per-episode rewards and episode lengths.
"""
def __init__(self, env, log_prefix="", log_dir=None):
Wrapper.__init__(self, env)
if log_prefix:
self.log_prefix = log_prefix + ": "
else:
self.log_prefix = ""
if log_dir is not None:
self.logger = easy_tf_log.Logger()
self.logger.set_log_dir(log_dir)
else:
self.logger = None
self.episode_rewards = None
self.episode_length_steps = None
self.episode_n = -1
self.episode_done = None
def reset(self):
self.episode_rewards = []
self.episode_length_steps = 0
self.episode_n += 1
self.episode_done = False
return self.env.reset()
def step(self, action):
if self.episode_done:
raise Exception("Attempted to call step() after episode done")
obs, reward, done, info = self.env.step(action)
self.episode_rewards.append(reward)
self.episode_length_steps += 1
if done:
self.episode_done = True
reward_sum = sum(self.episode_rewards)
print("{}Episode {} finished; episode reward sum {}".format(self.log_prefix,
self.episode_n,
reward_sum))
if self.logger:
self.logger.logkv('rl/episode_reward_sum', reward_sum)
self.logger.logkv('rl/episode_length_steps', self.episode_length_steps)
return obs, reward, done, info
| mrahtz/ocd-a3c | debug_wrappers.py | debug_wrappers.py | py | 3,966 | python | en | code | 38 | github-code | 13 |
26377419112 | import pandas as pd
import numpy as np
from constants import *
def getData():
df = pd.read_csv(TRAIN_DATA_FILEPATH, encoding = DATASET_ENCODING, header = None, names = DATASET_COLUMNS)
df = df.fillna("")
usefulColumns = ['target','text']
df = df[usefulColumns]
df['target'] = df['target'].astype(np.int8)
return df
if __name__=='__main__':
twitter_df = getData()
| neha-singh09/sentiment_analysis | data_loading.py | data_loading.py | py | 395 | python | en | code | 0 | github-code | 13 |
8596874382 | from sklearn.feature_selection import mutual_info_regression
from carotid import carotid_data_util as cdu
from scipy.stats import pearsonr
dataset = 'ko'
target = 'Stenosis_code'
seed = 7
if dataset == 'ko':
id_all, x_data_all, y_data_all = cdu.get_ko(target)
fName = 'svm_ko.csv'
elif dataset == 'jim':
id_all, x_data_all, y_data_all = cdu.get_jim(target)
fName = 'svm_jim.csv'
elif dataset == 'jim2':
id_all, x_data_all, y_data_all = cdu.get_jim2(target)
fName = 'svm_jim2.csv'
else:
id_all, x_data_all, y_data_all = cdu.get_new(target)
fName = 'svm_new.csv'
x = x_data_all['XLC_PS'].values
y = x_data_all['XLC_ED'].values
print("mutal info", mutual_info_regression(x.reshape(-1, 1), y.reshape(-1, 1)))
print("Pearson (sorce, p-value)", pearsonr(x, y))
| chingheng113/ml_farm | carotid/bk/independence_test.py | independence_test.py | py | 791 | python | en | code | 0 | github-code | 13 |
41180728714 | import cv2
import numpy as np
img = cv2.imread('tapu.jpg')
img_cpy = img.copy()
img_cpy2 = img.copy()
img_cpy3 = img.copy()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, img_thres = cv2.threshold(gray, 180, 255, cv2.THRESH_BINARY)
img_cont, contours, hierarchy = cv2.findContours(img_thres, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
n_contours = list(filter(lambda x: cv2.contourArea(x) > 100000, contours))
cv2.drawContours(img_cpy, n_contours, -1, color=(0, 0, 0), thickness=-1)
cv2.drawContours(img_cpy2, contours, -1, color=(0, 0, 255), thickness=2)
cv2.drawContours(img_cpy3, n_contours, -1, color=(0, 255, 0), thickness=3)
black_min = np.array([0, 0, 0], np.uint8)
black_max = np.array([1, 1, 1], np.uint8)
mask = cv2.inRange(img_cpy, black_min, black_max)
mask = cv2.bitwise_not(mask)
mask_rgb = cv2.cvtColor(mask, cv2.COLOR_GRAY2RGB)
img_cpy = cv2.bitwise_and(img_cpy, mask_rgb)
cv2.imshow('mask', mask)
cv2.imshow('tapu contour1', img_cpy2)
cv2.imshow('tapu contour2', img_cpy3)
cv2.imshow('tapu blackback', img_cpy)
cv2.waitKey(0)
cv2.destroyAllWindows()
#cv2.imwrite('../pic/contour_large.png', img_cpy3)
#cv2.imwrite('../pic/silhouette.png', mask)
#cv2.imwrite('../pic/tapu_bb.png', img_cpy)
"""
hsv = cv2.cvtColor(img_cpy, cv2.COLOR_BGR2HSV_FULL)
h = hsv[:, :, 0]
s = hsv[:, :, 1]
mask = np.zeros(h.shape, dtype=np.uint8)
mask[((h < 20) | (h > 200)) & (s > 128)] = 255
print(type(mask), mask.shape)
print(type(img_cpy), img_cpy.shape)
cv2.imshow('red', mask)
mask = cv2.resize(mask, img.shape[1::-1])
gray_mask = cv2.cvtColor(mask, cv2.COLOR_BAYER_BG2GRAY)
masked_img = cv2.bitwise_and(img, gray_mask)
cv2.imshow('masked', masked_img)
red_min = np.array([0, 0, 0], np.uint8)
red_max = np.array([0, 0, 255], np.uint8)
mask_red = cv2.inRange(img_filter, red_min, red_max)
mask = cv2.bitwise_not(mask_red)
mask_rgb = cv2.cvtColor(mask, cv2.COLOR_GRAY2RGB)
masked_img = cv2.bitwise_and(img, mask_rgb)
#cv2.imshow('tapu red', mask_red)
#cv2.imshow('tapu mask', mask)
cv2.imshow('tapu filetr', masked_img)
"""
| knzkhuka/4e_experiment | 8_computer_graphics/prog/mask.py | mask.py | py | 2,035 | python | en | code | 0 | github-code | 13 |
70583484178 | import peers
from core.nodes import Miner, Node, Wallet
from core.base import indieChain, Block
import tkinter as tk
from tkinter import *
from tkinter import Canvas, Entry, Label
import logging
from time import sleep
import _thread
from threading import Thread
from core.base import UTXO, Transaction
import random
# loggin.Logger.set
logging.basicConfig(level=logging.DEBUG)
#logging.basicConfig(level=logging.ERROR)
main_port = random.randint(4000, 9999)
nodelist = ['a', 'b', 'c', 'd','e','f','g','h']
man1, man2 = None, None
wal1, wal2 = None, None
def main(port):
global man1, wal1
man = peers.Manager('localhost', port)
man1 = man
ic = indieChain()
miner = Miner(ic)
man.setNode(miner)
miner.setNetwork(man)
#print(miner.getNodePublicKey().exportKey())
try:
man.activity_loop()
except Exception as e:
man.close()
raise(e)
def peer1(id, main_port, recvaddr, amount,sender):
global man2, wal2
sleep(1)
man = peers.Manager('localhost', main_port + id)
man2 = man
ic = indieChain()
node = Node(ic)
man.setNode(node)
node.setNetwork(man)
wallet = Wallet(node, 10000)
wal2 = wallet
try:
loop = man.loop
peer = loop.run_until_complete(man.connect_to_peer('localhost', main_port))
sleep(1)
# loop.run_until_complete(peer.send_raw_data(b'asdfa'))
# man.broadcastTrx(t1)
for i in range(0, len(recvaddr), 2):
wallet.finalizeTransaction([wallet.makePayment(recvaddr[i], int(amount[i]) ), wallet.makePayment(recvaddr[i+1], int(amount[i+1]) )])
# man.broadcastToMiners(b)
if len(recvaddr)%2 == 1:
j = len(recvaddr) -1
wallet.finalizeTransaction([wallet.makePayment(recvaddr[j], int(amount[j]) ), wallet.makePayment(sender,0) ] )
print(peer._key.exportKey())
# blk = loop.run_until_complete(man.getBlock(peer.id, '3248234a983b7894d923c49'))
# print(blk)
man.activity_loop()
except Exception as e:
man.close()
raise(e)
class MainWindow(tk.Frame):
wallet_counter = 0
miner_counter = 0
def __init__(self, *args, **kwargs):
tk.Frame.__init__(self, *args, **kwargs)
# self.button1 = tk.Button(self, text="Create new node", command=self.create_node)
#self.button2 = tk.Button(self, text="Create new miner", command=self.create_miner)
self.button3 = tk.Button(self, text="Create new wallet", command=self.create_wallet)
# self.button1.pack(side="top")
#self.button2.pack(side="top")
self.button3.pack(side="top")
def create_wallet(self):
self.wallet_counter += 1
self.recvaddr = []
self.amount = []
t = tk.Toplevel(self)
t.wm_title("Wallet #%s" % self.wallet_counter)
lr = Label(t,text='Receiver address: ').pack(side=TOP, padx=10, pady=10)
entryr = Entry(t, width=20)
entryr.pack(side=TOP,padx=10,pady=10)
ln = Label(t,text='Amount: ').pack(side=TOP, padx=10, pady=10)
entryn = Entry(t, width=20)
entryn.pack(side=TOP,padx=10,pady=10)
def onTransact():
recvaddr1 = entryr.get()
self.recvaddr = recvaddr1.split(',')
amount1 = entryn.get()
self.amount = amount1.split(',')
sender = nodelist[self.wallet_counter]
try:
t1 = Thread(target=main, args=(main_port,))
t1.start()
t2 = Thread(target=peer1, args=(1, main_port, self.recvaddr, self.amount, sender) )
t2.start()
t1.join()
print("Thread-1 joined")
except KeyboardInterrupt as kb:
pass
except Exception as e:
print("Error: unable to start thread")
raise(e)
Button(t, text='GO', command=onTransact).pack(side=TOP)
root = tk.Tk()
main_win = MainWindow(root)
main_win.pack(side="top", fill="both", expand=True)
root.mainloop()
# def peer_tester(id, main_port):
# man = peers.Manager('localhost', 5003)
# try:
# loop = man.loop
# peer = loop.run_until_complete(man.connect_to_peer('localhost', main_port))
# loop.run_until_complete(peer.send_raw_data(b'derfadsa'))
# loop.ensure_future(peer.send_raw_data(b'derfadsa'))
# print(loop._ready)
# loop.run_forever()
# print("DAta sent")
# except Exception as e:
# man.close()
# raise(e)
# ex: set tabstop=4 shiftwidth=4 expandtab:
| asutoshpalai/indiechain | gui_test.py | gui_test.py | py | 4,625 | python | en | code | 13 | github-code | 13 |
26993571685 | class vertex:
def __init__(self, value, visited):
self.value = value
self.visited = visited
self.adj_vertices = []
self.in_vertices = []
class graph:
g = []
def __init__(self, g):
self.g = g
# This method creates a graph from a list of words. A node of
# the graph contains a character representing the start or end
# character of a word.
def create_graph(self, words_list):
for i in range(len(words_list)):
word = words_list[i]
start_char = word[0]
end_char = word[len(word) - 1]
start = self.vertex_exists(start_char)
if start == None:
start = vertex(start_char, False)
self.g.append(start)
end = self.vertex_exists(end_char)
if end == None:
end = vertex(end_char, False)
self.g.append(end)
# Add an edge from start vertex to end vertex
self.add_edge(start, end)
# This method returns the vertex with a given value if it
# already exists in the graph, returns NULL otherwise
def vertex_exists(self, value):
for i in range(len(self.g)):
if self.g[i].value == value:
return self.g[i]
return None
# This method returns TRUE if all nodes of the graph have
# been visited
def all_visited(self):
for i in range(len(self.g)):
if self.g[i].visited == False:
return False
return True
# This method adds an edge from start vertex to end vertex by
# adding the end vertex in the adjacency list of start vertex
# It also adds the start vertex to the in_vertices of end vertex
def add_edge(self, start, end):
start.adj_vertices.append(end)
end.in_vertices.append(start)
# This method returns TRUE if out degree of each vertex is equal
# to its in degree, returns FALSE otherwise
def out_equals_in(self):
for i in range(len(self.g)):
out = len(self.g[i].adj_vertices)
inn = len(self.g[i].in_vertices)
if out != inn:
return False
return True
# This method returns TRUE if the graph has a cycle containing
# all the nodes, returns FALSE otherwise
def can_chain_words_rec(self, node, starting_node):
node.visited = True
# Base case
# return TRUE if all nodes have been visited and there
# exists an edge from the last node being visited to
# the starting node
adj = node.adj_vertices
if self.all_visited():
for i in range(len(adj)):
if adj[i] == starting_node:
return True
# Recursive case
for i in range(len(adj)):
if adj[i].visited == False:
node = adj[i]
if self.can_chain_words_rec(node, starting_node):
return True
return False
def can_chain_words(self, list_size):
# Empty list and single word cannot form a chain
if list_size < 2:
return False
if len(self.g) > 0:
if self.out_equals_in():
return self.can_chain_words_rec(self.g[0], self.g[0])
return False
def print_graph(self):
for i in range(len(self.g)):
print(self.g[i].value + " " + str(self.g[i].visited) + "\n")
adj = self.g[i].adj_vertices
for j in range(len(adj)):
print(adj[j].value + " ")
print("\n")
import unittest
import sys
class verification_tests(unittest.TestCase):
def test_case1(self):
g = graph([])
list_of_words=['eve', 'eat', 'ripe', 'tear']
g.create_graph(list_of_words)
self.assertTrue(g.can_chain_words(len(list_of_words)))
def test_case2(self):
g = graph([])
list_of_words=['aba','aba']
g.create_graph(list_of_words)
self.assertTrue(g.can_chain_words(len(list_of_words)))
def test_case3(self):
g = graph([])
list_of_words=['deg','fed']
g.create_graph(list_of_words)
self.assertFalse(g.can_chain_words(len(list_of_words)))
def test_case4(self):
g = graph([])
list_of_words=['ghi', 'abc', 'def', 'xyz']
g.create_graph(list_of_words)
self.assertFalse(g.can_chain_words(len(list_of_words)))
if __name__ == '__main__':
unittest.main() | myers-dev/Data_Structures | graphs/circular_words/main-working.py | main-working.py | py | 4,193 | python | en | code | 1 | github-code | 13 |
42434930366 | #!/usr/bin/python3
import argparse
import subprocess
DB_PATH = '/lustre7/software/experimental/biocontainers_image/command.db'
def main():
args = parse_args()
if args.command:
search_by_command(args.command)
elif args.image:
search_by_image(args.image)
elif args.filepath:
search_by_filepath(args.filepath)
def parse_args():
parser = argparse.ArgumentParser(description='search database of biocontainer singularity image.')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-c', '--command', action='append', help='outputs the file path of the singularity image containing the specified commands.')
group.add_argument('-i', '--image', help='outputs a list of package and version contained in the specified singularity image.')
group.add_argument('-f', '--filepath', help='outputs a list of package and version contained in the specified file path of singularity image.')
args = parser.parse_args()
#print(args)
return(args)
def search_by_command(commands):
sql_part = []
for command in commands:
sql_part.append('SELECT filepath FROM COMMAND_IMAGE_FILEPATH WHERE command=\'' + command + '\'')
sql = '"' + ' INTERSECT '.join(sql_part) + ' ORDER BY filepath;"'
#print(sql)
command = 'sqlite3' + ' ' + DB_PATH + ' ' + sql
#print(command)
exec_subprocess(command)
def search_by_image(image):
sql = '"' + 'SELECT command FROM COMMAND_IMAGE_FILEPATH WHERE image=\'' + image + '\' ORDER BY command;' + '"'
# print(SQL)
command = 'sqlite3' + ' ' + DB_PATH + ' ' + sql
# print(COMMAND)
exec_subprocess(command)
def search_by_filepath(filepath):
sql = '"' + 'SELECT command FROM COMMAND_IMAGE_FILEPATH WHERE filepath=\'' + filepath + '\' ORDER BY command;' + '"'
# print(SQL)
command = 'sqlite3' + ' ' + DB_PATH + ' ' + sql
# print(COMMAND)
exec_subprocess(command)
def exec_subprocess(command):
proc = subprocess.run(command, stdout = subprocess.PIPE, stderr = subprocess.PIPE, shell = True)
print(proc.stdout.decode("utf8"))
if proc.stderr:
print(proc.stderr.decode("utf8"))
if __name__ == '__main__':
main()
| yookuda/biocontainers_image | search_command_db.py | search_command_db.py | py | 2,211 | python | en | code | 0 | github-code | 13 |
72915318418 | import dataclasses
from typing import List, Iterator
from qutebrowser.commands import cmdexc, command
from qutebrowser.misc import split, objects
from qutebrowser.config import config
@dataclasses.dataclass
class ParseResult:
"""The result of parsing a commandline."""
cmd: command.Command
args: List[str]
cmdline: List[str]
class CommandParser:
"""Parse qutebrowser commandline commands.
Attributes:
_partial_match: Whether to allow partial command matches.
_find_similar: Whether to find similar matches on unknown commands.
If we use this for completion, errors are not shown in the UI,
so we don't need to search.
"""
def __init__(
self,
partial_match: bool = False,
find_similar: bool = False,
) -> None:
self._partial_match = partial_match
self._find_similar = find_similar
def _get_alias(self, text: str, *, default: str) -> str:
"""Get an alias from the config.
Args:
text: The text to parse.
aliases: A map of aliases to commands.
default : Default value to return when alias was not found.
Return:
The new command string if an alias was found. Default value
otherwise.
"""
parts = text.strip().split(maxsplit=1)
aliases = config.cache['aliases']
if parts[0] not in aliases:
return default
alias = aliases[parts[0]]
try:
new_cmd = '{} {}'.format(alias, parts[1])
except IndexError:
new_cmd = alias
if text.endswith(' '):
new_cmd += ' '
return new_cmd
def _parse_all_gen(
self,
text: str,
aliases: bool = True,
**kwargs: bool,
) -> Iterator[ParseResult]:
"""Split a command on ;; and parse all parts.
If the first command in the commandline is a non-split one, it only
returns that.
Args:
text: Text to parse.
aliases: Whether to handle aliases.
**kwargs: Passed to parse().
Yields:
ParseResult tuples.
"""
text = text.strip().lstrip(':').strip()
if not text:
raise cmdexc.EmptyCommandError
if aliases:
text = self._get_alias(text, default=text)
if ';;' in text:
# Get the first command and check if it doesn't want to have ;;
# split.
first = text.split(';;')[0]
result = self.parse(first, **kwargs)
if result.cmd.no_cmd_split:
sub_texts = [text]
else:
sub_texts = [e.strip() for e in text.split(';;')]
else:
sub_texts = [text]
for sub in sub_texts:
yield self.parse(sub, **kwargs)
def parse_all(self, text: str, **kwargs: bool) -> List[ParseResult]:
"""Wrapper over _parse_all_gen."""
return list(self._parse_all_gen(text, **kwargs))
def parse(self, text: str, *, keep: bool = False) -> ParseResult:
"""Split the commandline text into command and arguments.
Args:
text: Text to parse.
keep: Whether to keep special chars and whitespace.
"""
cmdstr, sep, argstr = text.partition(' ')
if not cmdstr:
raise cmdexc.EmptyCommandError
if self._partial_match:
cmdstr = self._completion_match(cmdstr)
try:
cmd = objects.commands[cmdstr]
except KeyError:
raise cmdexc.NoSuchCommandError.for_cmd(
cmdstr,
all_commands=list(objects.commands) if self._find_similar else [],
)
args = self._split_args(cmd, argstr, keep)
if keep and args:
cmdline = [cmdstr, sep + args[0]] + args[1:]
elif keep:
cmdline = [cmdstr, sep]
else:
cmdline = [cmdstr] + args[:]
return ParseResult(cmd=cmd, args=args, cmdline=cmdline)
def _completion_match(self, cmdstr: str) -> str:
"""Replace cmdstr with a matching completion if there's only one match.
Args:
cmdstr: The string representing the entered command so far.
Return:
cmdstr modified to the matching completion or unmodified
"""
matches = [cmd for cmd in sorted(objects.commands, key=len)
if cmdstr in cmd]
if len(matches) == 1:
cmdstr = matches[0]
elif len(matches) > 1 and config.val.completion.use_best_match:
cmdstr = matches[0]
return cmdstr
def _split_args(self, cmd: command.Command, argstr: str, keep: bool) -> List[str]:
"""Split the arguments from an arg string.
Args:
cmd: The command we're currently handling.
argstr: An argument string.
keep: Whether to keep special chars and whitespace
Return:
A list containing the split strings.
"""
if not argstr:
return []
elif cmd.maxsplit is None:
return split.split(argstr, keep=keep)
else:
# If split=False, we still want to split the flags, but not
# everything after that.
# We first split the arg string and check the index of the first
# non-flag args, then we re-split again properly.
# example:
#
# input: "--foo -v bar baz"
# first split: ['--foo', '-v', 'bar', 'baz']
# 0 1 2 3
# second split: ['--foo', '-v', 'bar baz']
# (maxsplit=2)
split_args = split.simple_split(argstr, keep=keep)
flag_arg_count = 0
for i, arg in enumerate(split_args):
arg = arg.strip()
if arg.startswith('-'):
if arg in cmd.flags_with_args:
flag_arg_count += 1
else:
maxsplit = i + cmd.maxsplit + flag_arg_count
return split.simple_split(argstr, keep=keep,
maxsplit=maxsplit)
# If there are only flags, we got it right on the first try
# already.
return split_args
| qutebrowser/qutebrowser | qutebrowser/commands/parser.py | parser.py | py | 6,430 | python | en | code | 9,084 | github-code | 13 |
41884230960 | from django.db import transaction
from rest_framework import serializers
from rest_framework.exceptions import NotFound, PermissionDenied
from core.models import User
from core.serializers import ProfileSerializer
from goals.choices import Role, Status
from goals.models import Board, BoardParticipant, Goal, GoalCategory, GoalComment
class ParticipantSerializer(serializers.ModelSerializer):
role = serializers.ChoiceField(choices=BoardParticipant.editable_roles)
user = serializers.SlugRelatedField(
slug_field='username', queryset=User.objects.all()
)
class Meta:
model = BoardParticipant
exclude = ('is_deleted',)
read_only_fields = ('id', 'created', 'updated', 'board')
class BoardSerializer(serializers.ModelSerializer):
class Meta:
model = Board
read_only_fields = ('id', 'created', 'updated')
exclude = ('is_deleted',)
class BoardWithParticipantsSerializer(BoardSerializer):
participants = ParticipantSerializer(many=True)
def update(self, instance: Board, validated_data: dict):
# Get the owner of the board
owner = self.context['request'].user
# Get the new participants from the validated data
new_participants = validated_data.pop('participants')
# Create a dictionary with the new participants mapped by user id
new_by_id = {part['user'].id: part for part in new_participants}
# Get the old participants excluding the owner
old_participants = instance.participants.exclude(user=owner)
with transaction.atomic():
# Loop through the old participants
for old_participant in old_participants:
# If the old participant is not in the new participants, delete it
if old_participant.user_id not in new_by_id:
old_participant.delete()
else:
# If the role of the old participant is different from the new role,
# update the role
if (
old_participant.role
!= new_by_id[old_participant.user_id]['role']
):
old_participant.role = new_by_id[old_participant.user_id][
'role'
]
old_participant.save()
# Remove the old participant from the new_by_id dictionary
new_by_id.pop(old_participant.user_id)
# Create new board participants for the remaining participants in new_by_id
for new_part in new_by_id.values():
BoardParticipant.objects.create(
board=instance, user=new_part['user'], role=new_part['role']
)
# Update the title of the board
instance.title = validated_data['title']
instance.save()
return instance
class GoalCategoryCreateSerializer(serializers.ModelSerializer):
user = serializers.HiddenField(default=serializers.CurrentUserDefault())
def validate_board(self, board: Board) -> Board:
"""
Function to validate the board.
Args:
board (Board): The board object to be validated.
Returns:
Board: The validated board object.
Raises:
serializers.ValidationError: If the board is deleted.
PermissionDenied: If the user does not have access to the board.
"""
# Check if the board is deleted
if board.is_deleted:
raise serializers.ValidationError('Доска не найдена')
# Check if the user has access to the board
if not BoardParticipant.objects.filter(
board=board,
user=self.context['request'].user,
role__in=[Role.owner, Role.writer],
).exists():
raise PermissionDenied('Нет доступа к этой категории')
# Return the validated board
return board
class Meta:
model = GoalCategory
read_only_fields = ('id', 'created', 'updated', 'user')
fields = '__all__'
class GoalCategorySerializer(GoalCategoryCreateSerializer):
user = ProfileSerializer(read_only=True)
class GoalCreateSerializer(serializers.ModelSerializer):
user = serializers.HiddenField(default=serializers.CurrentUserDefault())
def validate_category(self, category: GoalCategory) -> GoalCategory:
"""
Validates the given category object.
Args:
category (GoalCategory): The category object to validate.
Returns:
GoalCategory: The validated category object.
Raises:
serializers.ValidationError: If the category is deleted.
PermissionDenied: If the current user does not have access to the category.
"""
# Check if the category is deleted
if category.is_deleted:
raise serializers.ValidationError('Категория не найдена')
# Check if the current user has access to the category
if not BoardParticipant.objects.filter(
board=category.board,
user=self.context['request'].user,
role__in=[Role.owner, Role.writer],
).exists():
raise PermissionDenied('Нет доступа к этой категории')
# Return the validated category object
return category
class Meta:
model = Goal
fields = '__all__'
read_only_fields = ('id', 'created', 'updated', 'user')
class GoalSerializer(serializers.ModelSerializer):
user = ProfileSerializer(read_only=True)
class Meta:
model = Goal
fields = '__all__'
read_only_fields = ('id', 'created', 'updated', 'user')
class GoalListSerializer(serializers.ModelSerializer):
class Meta:
model = Goal
exclude = ('user',)
class GoalCommentCreateSerializer(serializers.ModelSerializer):
user = serializers.HiddenField(default=serializers.CurrentUserDefault())
def validate_goal(self, goal: Goal) -> Goal:
"""
Validates the given goal and performs necessary checks.
Args:
goal (Goal): The goal to validate.
Returns:
Goal: The validated goal.
Raises:
NotFound: If the goal's status is archived.
PermissionDenied: If the user doesn't have access to the category board.
"""
# Check if the goal's status is archived
if goal.status == Status.archived:
raise NotFound('Доска не найдена')
# Check if the user has access to the category board
board_participant_exists = BoardParticipant.objects.filter(
board=goal.category.board,
user=self.context['request'].user,
role__in=[Role.owner, Role.writer],
).exists()
if not board_participant_exists:
raise PermissionDenied('Нет доступа к этой категории')
# Return the validated goal
return goal
class Meta:
model = GoalComment
read_only_fields = ('id', 'created', 'updated', 'user')
fields = '__all__'
class GoalCommentSerializer(serializers.ModelSerializer):
user = ProfileSerializer(read_only=True)
goal = serializers.PrimaryKeyRelatedField(read_only=True)
class Meta:
model = GoalComment
read_only_fields = ('id', 'created', 'updated', 'user', 'goal')
fields = '__all__'
| Danilu2537/ToDo-web-app | goals/serializers.py | serializers.py | py | 7,619 | python | en | code | 1 | github-code | 13 |
17061241834 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.OrganizationContractDTO import OrganizationContractDTO
class UserSubOrganizationDTO(object):
def __init__(self):
self._id = None
self._org_contract_list = None
self._org_id = None
self._org_name = None
self._role_type = None
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = value
@property
def org_contract_list(self):
return self._org_contract_list
@org_contract_list.setter
def org_contract_list(self, value):
if isinstance(value, list):
self._org_contract_list = list()
for i in value:
if isinstance(i, OrganizationContractDTO):
self._org_contract_list.append(i)
else:
self._org_contract_list.append(OrganizationContractDTO.from_alipay_dict(i))
@property
def org_id(self):
return self._org_id
@org_id.setter
def org_id(self, value):
self._org_id = value
@property
def org_name(self):
return self._org_name
@org_name.setter
def org_name(self, value):
self._org_name = value
@property
def role_type(self):
return self._role_type
@role_type.setter
def role_type(self, value):
self._role_type = value
def to_alipay_dict(self):
params = dict()
if self.id:
if hasattr(self.id, 'to_alipay_dict'):
params['id'] = self.id.to_alipay_dict()
else:
params['id'] = self.id
if self.org_contract_list:
if isinstance(self.org_contract_list, list):
for i in range(0, len(self.org_contract_list)):
element = self.org_contract_list[i]
if hasattr(element, 'to_alipay_dict'):
self.org_contract_list[i] = element.to_alipay_dict()
if hasattr(self.org_contract_list, 'to_alipay_dict'):
params['org_contract_list'] = self.org_contract_list.to_alipay_dict()
else:
params['org_contract_list'] = self.org_contract_list
if self.org_id:
if hasattr(self.org_id, 'to_alipay_dict'):
params['org_id'] = self.org_id.to_alipay_dict()
else:
params['org_id'] = self.org_id
if self.org_name:
if hasattr(self.org_name, 'to_alipay_dict'):
params['org_name'] = self.org_name.to_alipay_dict()
else:
params['org_name'] = self.org_name
if self.role_type:
if hasattr(self.role_type, 'to_alipay_dict'):
params['role_type'] = self.role_type.to_alipay_dict()
else:
params['role_type'] = self.role_type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = UserSubOrganizationDTO()
if 'id' in d:
o.id = d['id']
if 'org_contract_list' in d:
o.org_contract_list = d['org_contract_list']
if 'org_id' in d:
o.org_id = d['org_id']
if 'org_name' in d:
o.org_name = d['org_name']
if 'role_type' in d:
o.role_type = d['role_type']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/UserSubOrganizationDTO.py | UserSubOrganizationDTO.py | py | 3,472 | python | en | code | 241 | github-code | 13 |
74528073936 | # This file includes the algorithm for two-dimensional optimisation of the channels of a soft touchpad.
import cv2
import numpy as np
# Hyperparameters
height = 50 # height of the touchpad
width = 50 # width of the touchpad
maxt_iterations = 100000 # maximum number of iterations
max_individuals = 100 # number of individuals
# Fitness weights, min = 0, max = 1
w_unique = 0 # uniqueness
w_cover = 0 # coverage
w_channel = 0 # number of channels
w_alone = 1 # unconnected channels
touch_kernel = [3, 3] # effect of touch
# Blueprint of individuals
class Individual:
def __init__(self):
self.fitness = 0
# Sequence representation as an OpenCV image object, BGR represent the three different layers
self.genome = np.random.choice([0, 255], size=(height, width, 3)).astype('uint8')
def update_genome(self, new_genome):
self.genome = new_genome
def connected_components(layer):
eq_list = [[],[]] # equivalency list
n_components = 0 # components found
for i in range(1, layer.shape[0]):
for j in range(1, layer.shape[1]):
if layer[i-1][j] == 0 and layer[i][j-1] == 0:
# component found
layer[i][j] = n_components
n_components += 1
elif layer[i-1][j] != 0:
layer[i,j] = layer[i-1][j]
eq_list[0].append()
else:
layer[i,j] = layer[i][j-1]
def fitness_calculation(genome):
# The fitness has four weighted parts: uniqueness, coverage, number of channels and unconnected pixels
s_unique = 0 # uniqueness
s_cover = 0 # coverage
s_channel = 0 # number of channels
s_alone = 0 # unconnected channels
# Unconnected channels: channels that are not connected to the edges receive penalty
return w_unique * s_unique + w_cover * s_cover + w_channel * s_channel + w_alone * s_alone
# Creating instances
individuals = [Individual() for i in range(max_individuals)]
print(fitness_calculation(individuals[0].genome))
# Resizing image to show
image_to_show = cv2.resize(individuals[0].genome, (0,0), fx = 10, fy = 10)
cv2.imshow('Touchpad channels', image_to_show)
cv2.waitKey(0)
cv2.destroyAllWindows() | gaborsoter/phd_codes | optimisation/genetic_algorithm.py | genetic_algorithm.py | py | 2,067 | python | en | code | 0 | github-code | 13 |
19594869253 | from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework.decorators import api_view
import json
from .models import UserProfileModel
from .serializers import UserProfileSerializer, UserProfileSerializerModificate
def custom_response(msg, response, status):
data ={
"message": msg,
"pay_load": response,
"status": status,
}
res= json.dumps(data)
response = json.loads(res)
return response
def search_userprofile(pk):
try:
return UserProfileModel.objects.get(pk = pk)
except UserProfileModel.DoesNotExist:
return 0
@api_view(['GET'])
def get_all_userproiles(request):
queryset = UserProfileModel.objects.all()
serializer = UserProfileSerializer(queryset, many=True ,context={'request':request})
return Response(custom_response("Returned", serializer.data, status=status.HTTP_200_OK))
@api_view(['POST'])
def create_userprofile(request, *args, **kwargs):
serializer = UserProfileSerializerModificate(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(custom_response("Created", serializer.data, status=status.HTTP_201_CREATED))
return Response(custom_response("Error", serializer.errors, status=status.HTTP_400_BAD_REQUEST))
@api_view(['GET'])
def get_detail_userprofile(request, id, format=None):
userprofile = search_userprofile(id)
if userprofile != 0:
serializer = UserProfileSerializer(userprofile)
return Response(custom_response("Returned", serializer.data, status=status.HTTP_200_OK))
return Response(custom_response("Error", "Not found", status=status.HTTP_404_NOT_FOUND))
@api_view(['PATCH'])
def update_userprofile(request, id, format=None):
userprofile = search_userprofile(id)
serializer = UserProfileSerializerModificate(userprofile, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(custom_response("Updated", serializer.data, status=status.HTTP_200_OK))
return Response(custom_response("Error", serializer.errors, status=status.HTTP_404_NOT_FOUND))
@api_view(['GET'])
def get_user_userprofile(request, user_id, format=None):
try:
userprofile = UserProfileModel.objects.get(user_profile=user_id)
except UserProfileModel.DoesNotExist:
return Response(custom_response("Error", "Not found", status=status.HTTP_404_NOT_FOUND))
serializer = UserProfileSerializer(userprofile, context={'request': request})
return Response(custom_response("Success", serializer.data, status=status.HTTP_200_OK))
@api_view(['GET'])
def get_user_userprofile_simple(request, user_id, format=None):
try:
userprofile = UserProfileModel.objects.get(user_profile=user_id)
except UserProfileModel.DoesNotExist:
return Response(custom_response("Error", "Not found", status=status.HTTP_404_NOT_FOUND))
serializer = UserProfileSerializerModificate(userprofile, context={'request': request})
return Response(custom_response("Success", serializer.data, status=status.HTTP_200_OK))
| alanggdev/devconnect-back | userprofile/views.py | views.py | py | 3,153 | python | en | code | 0 | github-code | 13 |
7590233259 | from collections import Counter
def word_count(fname):
with open(fname) as f:
return Counter(f.read().split())
print("Number of words in the file :",word_count("test.txt"))
##S = [x**2 for x in range(10)] # read elements to list
#M = [x for x in S if x % 2 == 0]
#M.reverse()
def Max(list):
if len(list) == 1:
return list[0]
else:
m = Max(list[1:])
return m if m > list[0] else list[0]
def main():
try:
list = eval(input("Enter a list of numbers: "))
print ("The largest number is: ", Max(list))
except SyntaxError:
print ("Please enter comma separated numbers")
except:
print ("Enter only numbers")
main()
| prgit21/Scripting-Lab | 1a.py | 1a.py | py | 683 | python | en | code | 0 | github-code | 13 |
285320455 | class Rearrange(object):
def get_len_of_ones(self, val):
count = 0
while val > 0:
count += 1
val = val & (val - 1)
return count
def count(self, list):
map = {}
for item in list:
b = self.get_len_of_ones(item)
data = map.get(b, [])
data.append(item)
map[b] = data
result = []
for key in sorted(map):
data = map[key]
data.sort()
# result.extend(list(data).sort())
result.extend(data)
print(result)
if __name__=="__main__":
myList = [7,8,6,5]
Rearrange().count(myList) | soniaarora/Algorithms-Practice | Solved in Python/LeetCode/arrays/Rearrange.py | Rearrange.py | py | 683 | python | en | code | 0 | github-code | 13 |
34447120712 | '''NOTE: added Pi > 0.05 threshold at the end'''
'''Changes made:
- Added split function to make training and test set
- Added 'fixed' boolean input to determine fixed point or not in bhat
- Changed variable names from X1 X2 to X_train and X_test etc
'''
import numpy as np
def MMalgVAL(X, y):
n, p = X.shape
# splitting into training and test set
X_train, X_test, y_train, y_test = split(X, y, 0.8)
n_test = len(y_test) # size of test set
# QR decomposition, overwrite X with Q
X_train, R_train = np.linalg.qr(X_train)
X_test, R_test = np.linalg.qr(X_test)
z_train = X_train.T @ y_train
z_test = X_test.T @ y_test
yqz_train = np.sum((y_train - X_train @ z_train)**2) # excess SSR
yqz_test = np.sum((y_test - X_test @ z_test)**2)
# Find fixed point for s2 (initialise with 1)
s2fp, d,_,_ = bhat(1, np.ones((p, 1)), R_train, z_train, R_test, z_test,
yqz_test, n_test, 10**-3, True)
# get a plot over a grid of s2 values
ngrid = 40
s2_grid = np.logspace(np.log10(s2fp/100), np.log10(s2fp*10), ngrid)
b_grid = np.full((p, ngrid), np.nan)
tloss_grid = np.full(ngrid, np.nan)
dg = np.ones((p, 1))# temporary delta vector resused from iterations to speed up things
# we are using the previous iteration's d here, maybe test a full reset each time?
for i in range(ngrid):
lossg, dg, bg,_ = bhat(s2_grid[i], dg, R_train, z_train, R_test, z_test,
yqz_test, n_test, 10**-5, False)
b_grid[:, i] = bg.reshape(-1) # gotta turn into basic array
tloss_grid[i] = lossg
# get the full data
R_full = np.vstack((R_train, R_test))
z_full = np.vstack((z_train, z_test))
# make a fit with the optimal s2 based on grid search and the full data
_, d, b, Pi = bhat(s2_grid[tloss_grid.argmin()], np.ones((p, 1)), R_full, z_full,
R_full, z_full, yqz_train + yqz_test, n, 10**-6, False)
b = b * (Pi > 0.05) # simple Pi threshold
return b, d, s2fp, Pi, tloss_grid, b_grid, s2_grid
def split(X, y, pctg): # gives a shuffled split of the data (THERE IS RANDOMISATION HERE)
'''pctg = percentage of the data that is training set'''
n = len(y)
split = int(np.ceil(pctg*n))
perm = np.random.permutation(n)
train = perm[:split]
test = perm[split:]
return X[train,:], X[test,:], y[train], y[test]
def bhat(s2, d, R, z, Rt, zt, yqzt, nt, eps, fixed = False):
''' s2, d, R, z, eps are required to run the iterative process (training data)
Rt, zt, yqzt, nt are required to compute test loss and used for fixed point calculation'''
nu, p = R.shape
for itrn in range(2600):
# QR decomposition
Rd = R * np.sqrt(d.T) # make sure d is a col vector before this
Q1,_ = np.linalg.qr(np.vstack((Rd.T, np.sqrt(s2) * np.eye(nu)))) # Don't need R matrix
Q2 = Q1[p:, :]
Q1 = Q1[:p, :]
Pi = np.sum(Q1 * Q1, axis = 1).reshape(p, 1)
Pz = Q1 @ (Q2.T @ z) / np.sqrt(s2)
I = (d > 0).reshape(-1)
d[I] = d[I] * abs(Pz[I]) / np.sqrt(Pi[I])
# This is run if looking for a fixed point solution
if fixed:
b = np.sqrt(d) * Pz
tLoss = (yqzt + np.sum((zt - Rt @ b)**2)) / nt # computes test loss (use this as don't have original data)
s2 = tLoss
# stop condition check
grad = Pi - Pz**2
KKT = np.sum(grad**2)
if KKT < eps: break
# final calculation of b and test loss
b = np.sqrt(d) * Pz
tLoss = (yqzt + np.sum((zt - Rt @ b)**2)) / nt
#print('iter:', itrn+1, '\ns2:', s2, '\ntLoss:', tLoss, '\nKKT:', KKT)
return tLoss, d, b, Pi | CarlmYang/The-KLIMAX-method | MMalgVAL.py | MMalgVAL.py | py | 3,885 | python | en | code | 0 | github-code | 13 |
72260994899 | # initial setup of stations in the subway map
stations = []
harvard_square = ("Harvard Square", 'red', None)
stations.append(harvard_square)
central_square = ("Central Square", 'red', None)
stations.append(central_square)
kendall_square = ("Kendall Square", 'red', None)
stations.append(kendall_square)
south_station = ("South Station", 'red', None)
stations.append(south_station)
park_street = ("Park Street", 'red', 'green')
stations.append(park_street)
boston_u = ("Boston U", 'green', None)
stations.append(boston_u)
copley_square = ("Copley Square", 'green', None)
stations.append(copley_square)
washington = ("Washington", 'red', 'orange')
stations.append(washington)
north_station = ("North Station", 'green', None)
stations.append(north_station)
haymarket = ("Haymarket", 'green', None)
stations.append(haymarket)
government_center = ("Government Center", 'green', 'blue')
stations.append(government_center)
wood_island = ("Wood Island", 'blue', None)
stations.append(wood_island)
airport = ("Airport", 'blue', None)
stations.append(airport)
aquarium = ("Aquarium", 'blue', None)
stations.append(aquarium)
state = ("State", 'blue', 'orange')
stations.append(state)
# test print for the stations and their tuples
#for station in stations:
# print(station[0])
# a list containing the path to take
path = []
# a list that contains the intersections calculated for a given state if needed
intersections = []
# solves the path based on a set of if statements
def SolvePath(currentState, goalState):
intersections.clear()
path.append(currentState)
# if the current state and the goal state are the same
if currentState[0] == goalState[0]:
PrintPath(path)
# if the current state and the goal state are ont he same line
elif goalState[1] == currentState[1] or goalState[1] == currentState[2]:
nextState = goalState
SolvePath(nextState, goalState)
# if the current state and the goal state are not on the same line
elif goalState[1] != currentState[1]:
# calculate the intersections for a given state
for station in stations:
if station[1] == currentState[1] and station[2] is not None:
intersections.append(station)
elif station[2] == currentState[1]:
intersections.append(station)
# test print for intersections
#for intersection in intersections:
# print(intersection)
# the next state is the first intersection in the list
nextState = intersections.pop(0)
# test print
# print(nextState)
SolvePath(nextState, goalState)
# a method to print the final pathway
def PrintPath(pathway):
for step in pathway:
print(step[0])
# driver
SolvePath(wood_island, harvard_square)
| KaylaRuby/bps | SubwaySolution.py | SubwaySolution.py | py | 2,867 | python | en | code | 0 | github-code | 13 |
37979036913 | from encode import text_encoder
from decode import text_decoder
def main():
user_choice = str(input(
"- Enter 1 for encoding text into an image file\n- Enter 2 for decoding text from an image file\n- Enter 3 to exit\nEnter your choice: "))
if int(user_choice) == 1:
text_encoder()
main()
elif int(user_choice) == 2:
text_decoder()
main()
elif int(user_choice) == 3:
print("Exiting...")
return
else:
print("Invalid response!\nPlease Try again.")
main()
main()
| AkshayBenny/lsb-python | app.py | app.py | py | 557 | python | en | code | 0 | github-code | 13 |
70601954259 | import json
import logging
import os
from utility.dynamo_utility import get_item
from utility.decimal_encoder import DecimalEncoder
def handler(event, context):
try:
if 'identifier' not in event['queryStringParameters']:
logging.error('Bad query param')
return {'statusCode': 400,
'body': json.dumps({'error_message': 'identifier not provided'})}
identifier = event['queryStringParameters']['identifier']
url_item = get_item(os.environ['DynamoTableName'], identifier)
logging.info("item fetch success with identifier={}".format(identifier))
except Exception as e:
logging.error("Failed quering url item. Continuing. {}".format(e))
return {'statusCode': 500,
'body': json.dumps({'error_message': 'Failed quering url item'})}
else:
response = {
"statusCode": 200,
"body": json.dumps(url_item,
cls=DecimalEncoder)
}
return response
| fatihaydilek/async-url-processor | url/get.py | get.py | py | 1,032 | python | en | code | 0 | github-code | 13 |
71800302737 | from common.graph.node import Node
from common.graph.edge import Edge
from common.container.uri import Uri
from common.container.linkeditem import LinkedItem
from common.utility.mylist import MyList
import itertools
import logging
from tqdm import tqdm
class Graph:
def __init__(self, kb, logger=None):
self.kb = kb
self.logger = logger or logging.getLogger(__name__)
self.nodes, self.edges = set(), set()
self.entity_items, self.relation_items = [], []
self.suggest_retrieve_id = 0
def create_or_get_node(self, uris, mergable=False):
if isinstance(uris, (int)):
uris = self.__get_generic_uri(uris, 0)
mergable = True
new_node = Node(uris, mergable)
for node in self.nodes:
if node == new_node:
return node
return new_node
def add_node(self, node):
if node not in self.nodes:
self.nodes.add(node)
def remove_node(self, node):
self.nodes.remove(node)
def add_edge(self, edge):
if edge not in self.edges:
self.add_node(edge.source_node)
self.add_node(edge.dest_node)
self.edges.add(edge)
def remove_edge(self, edge):
edge.prepare_remove()
self.edges.remove(edge)
if edge.source_node.is_disconnected():
self.remove_node(edge.source_node)
if edge.dest_node.is_disconnected():
self.remove_node(edge.dest_node)
def count_combinations(self, entity_items, relation_items, number_of_entities, top_uri):
total = 0
for relation_item in relation_items:
rel_uris_len = len(relation_item.top_uris(top_uri))
for entity_uris in itertools.product(*[items.top_uris(top_uri) for items in entity_items]):
total += rel_uris_len * len(list(itertools.combinations(entity_uris, number_of_entities)))
return total
def __one_hop_graph(self, entity_items, relation_items, threshold=None, number_of_entities=1):
top_uri = 1
total = self.count_combinations(entity_items, relation_items, number_of_entities, top_uri)
if threshold is not None:
while total > threshold:
top_uri -= 0.1
total = self.count_combinations(entity_items, relation_items, number_of_entities, top_uri)
with tqdm(total=total, disable=self.logger.level >= 10) as pbar:
for relation_item in relation_items:
for relation_uri in relation_item.top_uris(top_uri):
for entity_uris in itertools.product(*[items.top_uris(top_uri) for items in entity_items]):
for entity_uri in itertools.combinations(entity_uris, number_of_entities):
pbar.update(1)
result = self.kb.one_hop_graph(entity_uri[0], relation_uri,
entity_uri[1] if len(entity_uri) > 1 else None)
if result is not None:
for item in result:
m = int(item["m"]["value"])
uri = entity_uri[1] if len(entity_uri) > 1 else 0
if m == 0:
n_s = self.create_or_get_node(uri, True)
n_d = self.create_or_get_node(entity_uri[0])
e = Edge(n_s, relation_uri, n_d)
self.add_edge(e)
elif m == 1:
n_s = self.create_or_get_node(entity_uri[0])
n_d = self.create_or_get_node(uri, True)
e = Edge(n_s, relation_uri, n_d)
self.add_edge(e)
elif m == 2:
n_s = self.create_or_get_node(uri)
n_d = self.create_or_get_node(relation_uri)
e = Edge(n_s, Uri(self.kb.type_uri, self.kb.parse_uri), n_d)
self.add_edge(e)
def find_minimal_subgraph(self, entity_items, relation_items, double_relation=False, ask_query=False,
sort_query=False, h1_threshold=None):
self.entity_items, self.relation_items = MyList(entity_items), MyList(relation_items)
if double_relation:
self.relation_items.append(self.relation_items[0])
# Find subgraphs that are consist of at least one entity and exactly one relation
# self.logger.info("start finding one hop graph")
self.__one_hop_graph(self.entity_items, self.relation_items, number_of_entities=int(ask_query) + 1,
threshold=h1_threshold)
# self.logger.info("finding one hop graph finished")
if len(self.edges) > 100:
return
# Extend the existing edges with another hop
# self.logger.info("Extend edges with another hop")
self.__extend_edges(self.edges, relation_items)
def __extend_edges(self, edges, relation_items):
new_edges = set()
total = 0
for relation_item in relation_items:
for relation_uri in relation_item.uris:
total += len(edges)
with tqdm(total=total, disable=self.logger.level >= 10) as pbar:
for relation_item in relation_items:
for relation_uri in relation_item.uris:
for edge in edges:
pbar.update(1)
new_edges.update(self.__extend_edge(edge, relation_uri))
for e in new_edges:
self.add_edge(e)
def __extend_edge(self, edge, relation_uri):
output = set()
var_node = None
if edge.source_node.are_all_uris_generic():
var_node = edge.source_node
if edge.dest_node.are_all_uris_generic():
var_node = edge.dest_node
ent1 = edge.source_node.first_uri_if_only()
ent2 = edge.dest_node.first_uri_if_only()
if not (var_node is None or ent1 is None or ent2 is None):
result = self.kb.two_hop_graph(ent1, edge.uri, ent2, relation_uri)
if result is not None:
for item in result:
if item[1]:
if item[0] == 0:
n_s = self.create_or_get_node(1, True)
n_d = var_node
e = Edge(n_s, relation_uri, n_d)
output.add(e)
elif item[0] == 1:
n_s = var_node
n_d = self.create_or_get_node(1, True)
e = Edge(n_s, relation_uri, n_d)
output.add(e)
elif item[0] == 2:
n_s = var_node
n_d = self.create_or_get_node(1, True)
e = Edge(n_s, relation_uri, n_d)
output.add(e)
self.suggest_retrieve_id = 1
elif item[0] == 3:
n_s = self.create_or_get_node(1, True)
n_d = var_node
e = Edge(n_s, relation_uri, n_d)
output.add(e)
elif item[0] == 4:
n_d = self.create_or_get_node(relation_uri)
n_s = self.create_or_get_node(0, True)
e = Edge(n_s, Uri(self.kb.type_uri, self.kb.parse_uri), n_d)
output.add(e)
n_s = self.create_or_get_node(1, True)
e = Edge(n_s, Uri(self.kb.type_uri, self.kb.parse_uri), n_d)
output.add(e)
return output
def __get_generic_uri(self, uri, edges):
return Uri.generic_uri(uri)
def generalize_nodes(self):
"""
if there are nodes which have none-generic uri that is not in the list of possible entity/relation,
such uris will be replaced by a generic uri
:return: None
"""
uris = sum([items.uris for items in self.entity_items] + [items.uris for items in self.relation_items], [])
for node in self.nodes:
for uri in node.uris:
if uri not in uris and not uri.is_generic():
generic_uri = self.__get_generic_uri(uri, node.inbound + node.outbound)
node.replace_uri(uri, generic_uri)
def merge_edges(self):
to_be_removed = set()
for edge_1 in self.edges:
for edge_2 in self.edges:
if edge_1 is edge_2 or edge_2 in to_be_removed:
continue
if edge_1 == edge_2:
to_be_removed.add(edge_2)
for item in to_be_removed:
self.remove_edge(item)
def __str__(self):
return "\n".join([edge.full_path() for edge in self.edges])
| AskNowQA/SQG | common/graph/graph.py | graph.py | py | 9,318 | python | en | code | 47 | github-code | 13 |
19596222320 | #!/usr/bin/env python
from datetime import datetime
class Settings:
user = None
orm = None
settings = Settings()
def set_default_user(user):
settings.user = user
def set_default_orm(orm):
settings.orm = orm
def auth_user(username, password, email, active=True, staff=False, superuser=False, user=None, orm=None):
user = user or settings.user
orm = orm or settings.orm
authuser = orm['auth.user']()
authuser.last_modified_by = user or authuser
authuser.last_modified_on = datetime.now()
authuser.created_by = user or authuser
authuser.created_on = datetime.now()
authuser.username = unicode(username)
authuser.password = make_password_hash(password)
authuser.email = email
authuser.is_active = active
authuser.is_staff = staff
authuser.is_superuser = superuser
return authuser
def make_password_hash(password):
import random
import hashlib
from django.contrib.auth.hashers import make_password
salt = hashlib.sha1(str(random.random())).hexdigest()[:5]
return make_password(password, salt=salt, hasher='sha1')
def yabi_user(username, user=None, orm=None):
user = user or settings.user
orm = orm or settings.orm
yabi_user = orm['yabi.User']()
yabi_user.last_modified_by = user
yabi_user.last_modified_on = datetime.now()
yabi_user.created_by = user
yabi_user.created_on = datetime.now()
yabi_user.name = username
return yabi_user
def yabi_backend(name, description, scheme, hostname, port, path, max_connections=None, lcopy=True, link=True, submission='', user=None, orm=None):
user = user or settings.user
orm = orm or settings.orm
yabi_backend = orm['yabi.Backend']()
yabi_backend.last_modified_by = user
yabi_backend.last_modified_on = datetime.now()
yabi_backend.created_by = user
yabi_backend.created_on = datetime.now()
yabi_backend.name = name
yabi_backend.description = description
yabi_backend.scheme = scheme
yabi_backend.hostname = hostname
yabi_backend.port = port
yabi_backend.path = path
yabi_backend.max_connections = max_connections
yabi_backend.lcopy_supported = lcopy
yabi_backend.link_supported = link
yabi_backend.submission = submission
return yabi_backend
def yabi_credential(credentialuser, description, username="", password="", cert="", key="", user=None, orm=None):
user = user or settings.user
orm = orm or settings.orm
yabi_credential = orm['yabi.Credential']()
yabi_credential.last_modified_by = user
yabi_credential.last_modified_on = datetime.now()
yabi_credential.created_by = user
yabi_credential.created_on = datetime.now()
yabi_credential.description = description
yabi_credential.username = username
yabi_credential.password = password
yabi_credential.cert = cert
yabi_credential.key = key
yabi_credential.user = credentialuser
yabi_credential.expires_on = datetime(2111, 1, 1, 12, 0)
yabi_credential.encrypted = False
yabi_credential.encrypt_on_login = False
return yabi_credential
def yabi_backendcredential(backend,credential,homedir,visible = False,default_stageout = False,submission='', user=None, orm=None):
user = user or settings.user
orm = orm or settings.orm
yabi_backendcredential = orm['yabi.BackendCredential']()
yabi_backendcredential.last_modified_by = user
yabi_backendcredential.last_modified_on = datetime.now()
yabi_backendcredential.created_by = user
yabi_backendcredential.created_on = datetime.now()
yabi_backendcredential.backend = backend
yabi_backendcredential.credential = credential
yabi_backendcredential.homedir = homedir
yabi_backendcredential.visible = visible
yabi_backendcredential.default_stageout = default_stageout
yabi_backendcredential.submission = submission
return yabi_backendcredential
def yabi_filetype(name, description, extension_list, user=None, orm=None):
user = user or settings.user
orm = orm or settings.orm
yabi_filetype = orm['yabi.FileType']()
yabi_filetype.last_modified_by = user
yabi_filetype.last_modified_on = datetime.now()
yabi_filetype.created_by = user
yabi_filetype.created_on = datetime.now()
yabi_filetype.name = name
yabi_filetype.description = description
if extension_list:
yabi_filetype.save() # gives it an id
for extension in extension_list:
yabi_filetype.extensions.add(extension)
return yabi_filetype
def yabi_parameterswitchuse(display_text, formatstring,description, user=None, orm=None):
user = user or settings.user
orm = orm or settings.orm
yabi_parameterswitchuse = orm['yabi.ParameterSwitchUse']()
yabi_parameterswitchuse.last_modified_by = user
yabi_parameterswitchuse.last_modified_on = datetime.now()
yabi_parameterswitchuse.created_by = user
yabi_parameterswitchuse.created_on = datetime.now()
yabi_parameterswitchuse.display_text = display_text
yabi_parameterswitchuse.formatstring = formatstring
yabi_parameterswitchuse.description = description
return yabi_parameterswitchuse
def yabi_tool(name, display_name, path, description, backend, fs_backend, enabled=True, accepts_input=False, cpus='', walltime='',module='',queue='',max_memory='',job_type='',lcopy=False, link=False, user=None, orm=None):
user = user or settings.user
orm = orm or settings.orm
yabi_tool = orm['yabi.Tool']()
yabi_tool.last_modified_by = user
yabi_tool.last_modified_on = datetime.now()
yabi_tool.created_by = user
yabi_tool.created_on = datetime.now()
yabi_tool.name = name
yabi_tool.display_name = display_name
yabi_tool.path = path
yabi_tool.description = description
yabi_tool.enabled = enabled
yabi_tool.backend = backend
yabi_tool.fs_backend = fs_backend
yabi_tool.accepts_input = accepts_input
yabi_tool.cpus = cpus
yabi_tool.walltime = walltime
yabi_tool.module = module
yabi_tool.queue = queue
yabi_tool.max_memory = max_memory
yabi_tool.job_type = job_type
yabi_tool.lcopy_supported = lcopy
yabi_tool.link_supported = link
return yabi_tool
def yabi_toolparameter(tool, switch, switch_use, rank, mandatory, hidden, output_file, extension_param, possible_values, default_value, helptext, batch_bundle_files, file_assignment, use_output_filename, user=None, orm=None):
user = user or settings.user
orm = orm or settings.orm
yabi_toolparameter = orm['yabi.ToolParameter']()
yabi_toolparameter.last_modified_by = user
yabi_toolparameter.last_modified_on = datetime.now()
yabi_toolparameter.created_by = user
yabi_toolparameter.created_on = datetime.now()
yabi_toolparameter.tool = tool
yabi_toolparameter.switch = switch
yabi_toolparameter.switch_use = switch_use
yabi_toolparameter.rank = rank
yabi_toolparameter.mandatory = mandatory
yabi_toolparameter.hidden = hidden
yabi_toolparameter.output_file = output_file
yabi_toolparameter.extension_param = extension_param
yabi_toolparameter.possible_values = possible_values
yabi_toolparameter.default_value = default_value
yabi_toolparameter.helptext = helptext
yabi_toolparameter.batch_bundle_files = batch_bundle_files
yabi_toolparameter.file_assignment = file_assignment
yabi_toolparameter.use_output_filename = use_output_filename
return yabi_toolparameter
def yabi_tooloutputextension(tool, extension, user=None, orm=None):
user = user or settings.user
orm = orm or settings.orm
yabi_tooloutputextension = orm['yabi.ToolOutputExtension']()
yabi_tooloutputextension.last_modified_by = user
yabi_tooloutputextension.last_modified_on = datetime.now()
yabi_tooloutputextension.created_by = user
yabi_tooloutputextension.created_on = datetime.now()
yabi_tooloutputextension.tool = tool
yabi_tooloutputextension.file_extension = extension
yabi_tooloutputextension.must_exist = None
yabi_tooloutputextension.must_be_larger_than = None
return yabi_tooloutputextension
def yabi_toolgroup(name, user=None, orm=None):
user = user or settings.user
orm = orm or settings.orm
yabi_toolgroup = orm['yabi.ToolGroup']()
yabi_toolgroup.last_modified_on = datetime.now()
yabi_toolgroup.last_modified_by = user
yabi_toolgroup.created_on = datetime.now()
yabi_toolgroup.created_by = user
yabi_toolgroup.name = name
return yabi_toolgroup
def yabi_toolset(name, user=None, orm=None):
user = user or settings.user
orm = orm or settings.orm
yabi_toolset = orm['yabi.ToolSet']()
yabi_toolset.last_modified_on = datetime.now()
yabi_toolset.last_modified_by = user
yabi_toolset.created_on = datetime.now()
yabi_toolset.created_by = user
yabi_toolset.name = name
return yabi_toolset
def yabi_toolgrouping(toolgroup, tool, toolset, user=None, orm=None):
user = user or settings.user
orm = orm or settings.orm
yabi_toolgrouping = orm['yabi.ToolGrouping']()
yabi_toolgrouping.last_modified_on = datetime.now()
yabi_toolgrouping.last_modified_by = user
yabi_toolgrouping.created_on = datetime.now()
yabi_toolgrouping.created_by = user
yabi_toolgrouping.tool_group = toolgroup
yabi_toolgrouping.tool = tool
yabi_toolgrouping.tool_set = toolset
return yabi_toolgrouping
def yabi_fileextension(pattern, user=None, orm=None):
user = user or settings.user
orm = orm or settings.orm
fileextension = orm['yabi.FileExtension']()
fileextension.last_modified_by = user
fileextension.last_modified_on = datetime.now()
fileextension.created_by = user
fileextension.created_on = datetime.now()
fileextension.pattern = pattern
return fileextension | google-code-export/yabi | yabiadmin/yabiadmin/yabi/migrationutils/__init__.py | __init__.py | py | 9,959 | python | en | code | 0 | github-code | 13 |
26157611138 | from lmpc import *
class Relax(LMPC):
def __init__(self, T, dt, N, J, R):
super().__init__(T, dt, N, J, R)
self.name = 'relax'
self.W = 0.5*self.w_L + 0.5*self.w_C # lateral safety distance for FCC and RCC
self.const1, self.const2 = 10000, 10000 # costs on slack variables
def evaluate_stage_cost(self, x, u):
not_done = 1
if x[0] >= self.xF[0] and (x[1] - self.xF[1])**2 < self.tol and (x[2] - self.xF[2])**2 < self.tol and (x[3] - self.xF[3])**2 < self.tol:
not_done = 0
#(x - self.xF).T@self.Q@(x - self.xF)
return u.T@self.R@u + not_done*self.dt
def append_SS(self, j): # needs iteration number
costs = self.stage_costs[:,:self.completion[j]+1,j]
ctg = np.zeros(costs.shape)
for i in range(ctg.shape[1]):
ctg[0,i] = costs[0,i:].sum()
self.SS = np.concatenate((self.SS, np.concatenate((ctg, self.x_trajectory[:,:self.completion[j]+1,j]), axis = 0)), axis = 1)
def solve_FHOCP(self, x_0):
model = gp.Model('prediction')
model.setParam('OutputFlag', 0)
model.setParam('Presolve', 0)
#model.setParam('MIPGap', 1e-10)
model.setParam('IntFeasTol', 1e-8)
#model.setParam('FeasibilityTol', 1e-4)
x = model.addMVar((self.n, self.N + 1), lb = -100, name = 'x')
u = model.addMVar((self.m, self.N), lb = -10, name = 'u')
T = model.addMVar(self.N, vtype = GRB.BINARY, name = 'T')
delta = model.addMVar(self.SS.shape[1], vtype = GRB.BINARY, name = 'delta')
e_xf = model.addMVar(1, name = 'e_xf')
e_yf = model.addMVar(self.N, lb = -1000, name = 'e_yf')
e_f = model.addMVar(self.N, name = 'e_f')
e_xr = model.addMVar(1, name = 'e_xr')
e_r = model.addMVar(self.N, name = 'e_r')
d = x_0[0] # initialized at each optimization cycle
phi = max(1, abs(x_0[0])) # psi = 5 must be tuned, initialized at each optimization cycle
model.addConstr(x[:,0] == x_0) # initial condition
model.addConstr(x[:,self.N] == self.SS[1:,:]@delta) # terminal constraint
model.addConstr(delta.sum() == 1)
terminal_cost = self.SS[0,:]@delta
stage_costs = 0
for k in range(self.N):
stage_costs += self.dt*T[k] + u[:,k]@self.R@u[:,k] + e_f[k]@e_f[k]*self.const1 + e_r[k]@e_r[k]*self.const2
model.addConstr(x[1,k]/self.W - x[0,k]/self.L + d * e_xf + e_yf[k]/phi + e_f[k] >= 1) #FCC
model.addConstr(x[1,k]/self.W + x[0,k]/self.L - d * e_xr + e_r[k] >= 1) #RCC
model.addConstr(e_yf[k] == x[1,k] - self.W)
model.addConstr(x[1,k] <= 1.5*self.w_L - 0.5*self.w_C) # y max
model.addConstr(x[1,k] >= -0.5*self.w_L + 0.5*self.w_C) # ymin
model.addConstr(x[2,k] >= self.road_min_speed - self.v_L) # min x speed
model.addConstr(x[2,k] <= self.road_max_speed - self.v_L) # max x speed
model.addConstr(x[3,k] <= 0.17*(x[2,k] + self.v_L)) # side slip
model.addConstr(x[3,k] >= -0.17*(x[2,k] + self.v_L))
model.addConstr(u[0,k] <= 1) # ax_max
model.addConstr(u[0,k] >= -4) # ax_min
model.addConstr(u[1,k] <= 2) # ay_max
model.addConstr(u[1,k] >= -2) # ay_min
model.addConstr(x[:,k+1] == self.A@x[:,k] + self.B@u[:,k]) # dynamics
model.addConstr(200 * T[k] >= self.xF[0] - x[0,k]) # far enough?
model.addConstr(100 * T[k] >= x[1,k]@x[1,k] - 2*self.xF[1]*x[1,k] + self.xF[1]*self.xF[1]) # central enough?
model.addConstr(100 * T[k] >= x[2,k]@x[2,k] - 2*self.xF[2]*x[2,k] + self.xF[2]*self.xF[2]) # right speed?
model.addConstr(100 * T[k] >= x[3,k]@x[3,k] - 2*self.xF[3]*x[3,k] + self.xF[3]*self.xF[3]) # still enough?
model.setObjective(terminal_cost + stage_costs, GRB.MINIMIZE)
model.optimize()
self.current_comp_time = model.Runtime
self.x_star = x.x
self.u_star = u.x
self.e_f = e_f.x
self.e_r = e_r.x
def plot(self):
fig = plt.figure()
ax1 = plt.subplot2grid((2, 2), (0, 0), colspan=2)
ax2 = plt.subplot2grid((2, 2), (1, 0), colspan=1)
ax3 = plt.subplot2grid((2, 2), (1, 1), colspan=1)
vehicle_L = Rectangle((-self.l_C/2, -self.w_C/2), self.l_C, self.w_C, label = 'Vehicle L', color = 'r')
ax1.add_artist(vehicle_L)
#ax1.set_aspect('equal')
ax1.set_xlabel('x (m)')
ax1.set_ylabel('y (m)')
ax2.set_xlabel('time step')
ax2.set_ylabel('x dot (m/s)')
ax3.set_xlabel('time step')
ax3.set_ylabel('y dot (m/s)')
# road sides
ax1.axhline(-0.5*self.w_L, lw = 2, color = '0.5')
ax1.axhline(1.5*self.w_L, lw = 2, color = '0.5')
ax1.axhline(0.5*self.w_L, ls = '--', lw = 2, color = '0.7')
ax1.plot([-self.d, self.d], [-0.5*self.w_L + 0.5*self.w_C] * 2, lw = 1, color = 'r')
ax1.plot([-self.d, self.d], [1.5*self.w_L - 0.5*self.w_C] * 2, lw = 1, color = 'r')
# FCC, RCC
ax1.plot([(-1.5*self.w_L+self.W)*self.L/self.W, (0.5*self.w_L+self.W)*self.L/self.W], [1.5*self.w_L, -0.5*self.w_L], lw = 1, ls = '--', color = '0.5')
ax1.plot([(1.5*self.w_L-self.W)*self.L/self.W, -(0.5*self.w_L+self.W)*self.L/self.W], [1.5*self.w_L, -0.5*self.w_L], lw = 1, ls = '--', color = '0.5')
for j in range(self.J+1):
ax1.plot(self.x_trajectory[0,:self.completion[j]+1,j], self.x_trajectory[1,:self.completion[j]+1,j], marker = '.' , markersize = 4, lw = 0.75, label = 'j = ' + str(j))
ax2.plot(self.time[:self.completion[j]+1], self.x_trajectory[2,:self.completion[j]+1,j], lw = 0.75, label = 'j = ' + str(j))
ax3.plot(self.time[:self.completion[j]+1], self.x_trajectory[3,:self.completion[j]+1,j], lw = 0.75, label = 'j = ' + str(j))
handles, labels = ax3.get_legend_handles_labels()
fig.legend(handles, labels, loc='right')
fig.tight_layout()
fig.set_size_inches(11, 7)
fig.savefig('plots/' + 'TRAJ_' + self.name + '_N=' + str(self.N) + '_r=' + str(self.r) + '_J=' + str(self.J) + '.pdf', bbox_inches='tight', dpi = 500)
plt.close()
fig, ax = plt.subplots()
ax.set_xlabel('time step')
ax.set_ylabel('computational time (s)')
for j in range(self.J+1):
ax.plot(self.time, self.comp_time[0,:,j])
fig.set_size_inches(11, 7)
fig.savefig('plots/' + 'COMPT_' + self.name + '_N=' + str(self.N) + '_r=' + str(self.r) + '_J=' + str(self.J) + '.pdf', bbox_inches='tight', dpi = 500)
plt.close()
def animate(self):
for j in range(1,self.J+1):
self.create_fig(j)
def create_fig(self, j):
fig, ax = plt.subplots()
lineE, = ax.plot([], [], 'o', lw=3, color = 'b')
lineP, = ax.plot([], [], '.', lw=3, color = '0.1')
ax.set_xlim(-100, 120)
ax.set_ylim(-5, 10)
for j2 in range(j):
ax.plot(self.x_trajectory[0,:self.completion[j]+1,j2], self.x_trajectory[1,:self.completion[j]+1,j2], '.')
ax.axhline(-0.5*self.w_L, lw = 2, color = '0.5')
ax.axhline(1.5*self.w_L, lw = 2, color = '0.5')
ax.axhline(0.5*self.w_L, ls = '--', lw = 2, color = '0.7')
ax.plot([(-1.5*self.w_L+self.W)*self.L/self.W, (0.5*self.w_L+self.W)*self.L/self.W], [1.5*self.w_L, -0.5*self.w_L], lw = 1, ls = '--', color = '0.5')
ax.plot([(1.5*self.w_L-self.W)*self.L/self.W, -(0.5*self.w_L+self.W)*self.L/self.W], [1.5*self.w_L, -0.5*self.w_L], lw = 1, ls = '--', color = '0.5')
vehicle_L = Rectangle((-self.l_C/2, -self.w_C/2), self.l_C, self.w_C, label = 'Vehicle L', color = 'r')
ax.add_artist(vehicle_L)
def init():
lineE.set_data([], [])
lineP.set_data([], [])
return lineE, lineP,
def update_animation(i, j):
x = self.x_trajectory[0,[i],j]
y = self.x_trajectory[1,[i],j]
lineE.set_data(x, y)
xp = self.all_predictions[0,:,i,j]
yp = self.all_predictions[1,:,i,j]
lineP.set_data(xp, yp)
if i >= self.completion[j]-1:
plt.close(lineE.axes.figure)
return lineE, lineP,
anim = FuncAnimation(fig, update_animation, init_func=init, fargs = (j,), frames=self.completion[1], interval=50, blit=True, repeat = False)
plt.show()
| ivarben/SF280X | source code/relax.py | relax.py | py | 8,481 | python | en | code | 0 | github-code | 13 |
15734853943 | import torch.nn as nn
from Layers.resnet import ResNetLayer
import numpy as np
class ResNet(nn.Module):
''' A encoder models with self attention mechanism. '''
def __init__(
self, position_encoding_layer, n_layers, n_head, d_features, max_seq_length, d_meta, d_k=None, d_v=None, dropout=0.1, use_bottleneck=True, d_bottleneck=256):
super().__init__()
if d_k == None or d_v == None:
if d_k == d_v:
d_reduce_param = np.floor(d_features / n_head).astype(int)
d_k, d_v = d_reduce_param, d_reduce_param
elif d_k == None:
d_k = d_v
else:
d_v = d_k
self.position_enc = position_encoding_layer(d_features=d_features, max_length=max_seq_length, d_meta=d_meta)
self.layer_stack = nn.ModuleList([
ResNet(d_features, n_head, d_k, d_v, dropout, use_bottleneck=use_bottleneck, d_bottleneck=d_bottleneck)
for _ in range(n_layers)])
def forward(self, feature_sequence, position, non_pad_mask=None, slf_attn_mask=None):
'''
Arguments:
input_feature_sequence {Tensor, shape [batch, max_sequence_length, d_features]} -- input feature sequence
position {Tensor, shape [batch, max_sequence_length (, d_meta)]} -- input feature position sequence
non_pad_mask {Tensor, shape [batch, length, 1]} -- index of which position in a sequence is a padding
slf_attn_mask {Tensor, shape [batch, length, length]} -- self attention mask
Returns:
enc_output {Tensor, shape [batch, max_sequence_length, d_features]} -- encoder output (representation)
encoder_self_attn_list {List, length: n_layers} -- encoder self attention list,
each element is a Tensor with shape [n_head * batch, max_sequence_length, max_sequence_length]
'''
encoder_self_attn_list = []
enc_output = feature_sequence
# Add position information at the beginning
pos_enc = self.position_enc(position)
enc_output = feature_sequence + pos_enc
for enc_layer in self.layer_stack:
enc_output, encoder_self_attn = enc_layer(
enc_output,
non_pad_mask=non_pad_mask,
slf_attn_mask=slf_attn_mask)
encoder_self_attn_list += [encoder_self_attn]
return enc_output, encoder_self_attn_list | Jincheng-Sun/Kylearn-pytorch | Modules/ResNet.py | ResNet.py | py | 2,479 | python | en | code | 0 | github-code | 13 |
17911085377 | import numpy
import image_processor
from puzzle_model import PuzzleModel
import copy
import sys
def policy_action(model, policy):
max_reward = None
best_action = None
# for action in range(environment.nA): # [0, 1, 2, 3, 4, 5]
options = model.get_options()
for action in options:
reward = policy[model.state][action]
if max_reward is None or reward > max_reward:
max_reward = reward
best_action = action
return best_action
def get_action(model, policy, epsilon):
if model.state not in policy:
policy[model.state] = dict()
for action in model.get_options():
if action not in policy[model.state]:
policy[model.state][action] = 0
a = numpy.random.random()
if a < epsilon:
actions = model.get_options()
return actions[numpy.random.randint(len(actions))]
else:
return policy_action(model, policy)
if __name__ == "__main__":
print(sys.argv[1])
image_loc = sys.argv[1]
# input params
gamma = 0.9
alpha = 0.5
epsilon = 0.3
episodes = 3000
# setup
# env = gym.make('Taxi-v3').unwrapped
tube_dict = image_processor.process(image_loc)
print(tube_dict)
model = PuzzleModel(tube_dict)
# pol = numpy.zeros((env.nS, env.nA))
policy = dict()
for e in range(episodes):
if e % 100 == 0:
print(e)
# env.reset()
model = PuzzleModel(tube_dict)
reward = 0
next_action = get_action(model, policy, epsilon)
completed = False
while not completed: # and reward > -20:
prev_state = copy.deepcopy(model.state)
prev_action = copy.deepcopy(next_action)
# None, reward, completed, None = env.step(next_action) # act(epsilon, env, next_action)
model.process_move(next_action)
if model.win_state:
reward = 1
elif model.stuck_state:
reward = -1
else:
reward = 0
completed = model.win_state or model.stuck_state
if not completed:
next_action = get_action(model, policy, epsilon)
fixed_action = get_action(model, policy, 0.0)
policy[prev_state][prev_action] += alpha*(reward+gamma*policy[model.state][fixed_action] - policy[prev_state][prev_action])
else:
policy[prev_state][prev_action] += alpha*(reward - policy[prev_state][prev_action])
# show optimal policy
model = PuzzleModel(tube_dict)
while not model.win_state or not model.stuck_state:
action = get_action(model, policy, 0.0)
print(action)
model.process_move(action)
print('done')
| tkim338/ball-sorter | q_learner_solver.py | q_learner_solver.py | py | 2,764 | python | en | code | 0 | github-code | 13 |
7957983049 | import matplotlib.pyplot as plt
import numpy as np
with open('result.txt') as f:
array = [float(line) for line in f]
x = np.arange(len(array))
plt.bar(x , array, align='center', alpha=0.5, color='purple' )
plt.plot( array )
plt.title("Reachability Plot")
plt.xlabel("Data Point")
plt.ylabel("Reachability Distance")
plt.savefig('plot.png')
# plt.show()
| shashank-yadav/OPTICS-dataMining | plot.py | plot.py | py | 362 | python | en | code | 0 | github-code | 13 |
24909569607 | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.forms import UserChangeForm
from search_admin_autocomplete.admin import SearchAutoCompleteAdmin
from import_export.admin import ImportExportModelAdmin
from import_export import resources
from import_export import fields
from simple_history.admin import SimpleHistoryAdmin
from django.utils.translation import gettext_lazy as _
from .models import Tag, User, AuditRecord
class UserResource(resources.ModelResource):
full_name = fields.Field(column_name="Full Name")
def dehydrate_full_name(self, obj):
return "%s %s" % (obj.first_name, obj.last_name)
class Meta:
model = User
fields = (
"id",
"full_name",
"first_name",
"last_name",
"email",
"is_active",
"date_joined",
"form_on_file",
"email_confirmed",
)
import_id_fields = ["first_name", "last_name", "email"]
class TagResource(resources.ModelResource):
class Meta:
model = Tag
fields = ["owner", "tag"]
import_id_fields = ["owner", "tag"]
class UserAdmin(
ImportExportModelAdmin, SearchAutoCompleteAdmin, BaseUserAdmin, SimpleHistoryAdmin
):
resource_class = UserResource
fieldsets = (
(None, {"fields": ("email", "password")}),
(
_("Membership"),
{
"fields": (
"first_name",
"last_name",
"image",
"phone_number",
"form_on_file",
"email_confirmed",
)
},
),
(_("BOTs"), {"fields": ("telegram_user_id", "uses_signal")}),
(
_("Permissions"),
{
"fields": (
"is_active",
"is_staff",
"is_superuser",
"groups",
"user_permissions",
)
},
),
(_("Important dates"), {"fields": ("last_login", "date_joined")}),
)
add_fieldsets = (
(
None,
{
"classes": ("wide",),
"fields": ("email", "password1", "password2"),
},
),
)
list_display = (
"email",
"first_name",
"last_name",
"form_on_file",
"last_login",
"date_joined",
)
search_fields = ["email", "first_name", "last_name"]
ordering = ("email", "first_name", "last_name")
import_id_fields = () # 'email', 'first_name', 'last_name', 'is_staff', 'form_on_file', 'last_login','date_joined')
# admin.site.register(User,ImportExportModelAdmin)
admin.site.register(User, UserAdmin)
class TagAdmin(ImportExportModelAdmin, SimpleHistoryAdmin, SearchAutoCompleteAdmin):
list_display = ("tag", "owner", "last_used", "description")
resource_class = TagResource
search_fields = ["tag", "owner__first_name", "owner__last_name", "owner__email"]
admin.site.register(Tag, TagAdmin)
class AuditRecordAdmin(ImportExportModelAdmin, SimpleHistoryAdmin):
list_display = ("user", "action", "recorded")
resource_class = AuditRecord
admin.site.register(AuditRecord, AuditRecordAdmin)
| MakerSpaceLeiden/makerspaceleiden-crm | members/admin.py | admin.py | py | 3,389 | python | en | code | 6 | github-code | 13 |
33821644519 | from urllib import request, error
import socket
try:
response = request.urlopen("http://www.jd123.com/test.html")
except error.HTTPError as e: # 成功捕获Bad Request 异常
print(type(e.reason))
print(e.reason, e.code, e.headers)
try:
response = request.urlopen("https://jd.com", timeout=0.00002)
except error.HTTPError as e: # 成功捕获time out 异常
print('error.HTTPError:', e.reason)
except error.URLError as e: # 这里r.reason的类型是socket.timeout类
print(type(e.reason))
print('error.URLError:', e.reason)
# 判断r.reason的类型是否为socket.timeout类
if isinstance(e.reason, socket.timeout):
print('超时错误')
else:
print('成功发送请求')
| lzxin96/python_spider | src/urllib/HTTPErrorDemo.py | HTTPErrorDemo.py | py | 733 | python | en | code | 0 | github-code | 13 |
12135561123 | '''
文本文件的读取
'''
import os
import tensorflow as tf
def read_csv(filelist):
# 构建文件队列
file_queue = tf.train.string_input_producer(filelist)
# 定义读取器
reader = tf.TextLineReader()
# 使用读取器在文件队列中读取数据
k, v = reader.read(file_queue)
# 解码
records = [['None'], ['None']]
example, label = tf.decode_csv(v, record_defaults=records)
# 批处理
example_bat, label_bat = tf.train.batch([example, label],
batch_size=15,
num_threads=1)
return example_bat,label_bat
if __name__ == '__main__':
#构建文件列表
dir_name = '../test_data/'
file_names = os.listdir(dir_name)
file_list = []
for f in file_names:
file_list.append(os.path.join(dir_name,f))
example,label = read_csv(file_list)
#开启Session,执行
with tf.Session() as sess:
#线程协调器
coord = tf.train.Coordinator()
#开启队列运行的线程
threads = tf.train.start_queue_runners(sess,coord)
exam,lab = sess.run([example,label])
print(exam)
print(lab)
#等待线程结束,回收资源
coord.request_stop()
coord.join(threads)
| 15149295552/Code | Month08/day16/02_read_csv.py | 02_read_csv.py | py | 1,320 | python | en | code | 1 | github-code | 13 |
74330556177 | import math
def isPrime(x):
if x < 3:
return x == 2
n = int(math.sqrt(x) + 1)
for i in range(2, n+1):
if x % i == 0:
return False
return True
def primeFactors(x):
if x == 1:
return []
divisors = set()
d = 2
n = x
while n > 1:
if n % d == 0:
divisors.add(d)
n //= d
else:
d += 1
return divisors
def conssecutive(n, i):
if i == 4:
return i
elif len(primeFactors(n)) == 4:
return conssecutive(n+1, i+1)
else:
return i
condition = True
n = 2
while condition:
i = conssecutive(n, 0)
if i == 2:
print(n, i, primeFactors(n))
if i == 4:
print(n, i)
condition = False
else:
#print(n, i)
n += i + 1
print(n)
| spegesilden/projecteuler | e47/DistinctPrimeFactors.py | DistinctPrimeFactors.py | py | 833 | python | en | code | 0 | github-code | 13 |
38329387702 | # -*- coding:utf-8 -*-
from tkinter import *
from tkinter.ttk import *
root = Tk()
root.geometry("200x200")
# this will create style object
style = Style()
# this will create a style and we'll name it W.TButton (ttk.Button)
style.configure('W.TButton', font=('calibri', 10, 'bold', 'underline'),
foreground='red')
# affecting style to the button
btn1 = Button(root, text="Quit !",
style="W.TButton",
command=root.destroy)
btn1.grid(row=0, column=2, padx=50)
btn2 = Button(root, text="Click Me !!", command=None)
btn2.grid(row=1, column=2, pady=10, padx = 50)
root.mainloop() | ZCyborgs/Test | GUI_test.py | GUI_test.py | py | 621 | python | en | code | 0 | github-code | 13 |
38047473008 | ########################
# Framework
########################
import AthenaCommon.AtlasUnixGeneratorJob
from AthenaCommon.AlgSequence import AlgSequence
topSequence = AlgSequence()
from AthenaCommon.AppMgr import theApp
theApp.EvtMax = 10
########################
# Generate config XML files
########################
include("pureNewSteering_menu.py")
########################
# L2 Steering:
########################
from TrigSteering.TestingTrigSteeringConfig import TestingTrigSteer_L2
log.info("setting up TestingTrigSteer_L2 configurable:")
hltSteer_L2 = TestingTrigSteer_L2('hltSteer_L2', hltFile='pureNewSteering_menu.xml', lvl1File='')
topSequence += hltSteer_L2
hltSteer_L2.Navigation.ClassesToPayload += [ 'TrigRoiDescriptor' ]
########################
# EF Steering:
########################
from TrigSteering.TestingTrigSteeringConfig import TestingTrigSteer_EF
log.info("setting up TestingTrigSteer_EF configurable:")
hltSteer_EF = TestingTrigSteer_EF('hltSteer_EF')
topSequence += hltSteer_EF
hltSteer_EF.Navigation.ClassesToPayload += [ 'TrigRoiDescriptor' ]
########################
# TrigDecision Makers
########################
theApp.Dlls += [ "TrigDecisionMaker" ]
theApp.TopAlg += [ "TrigDec::TrigDecisionMaker/TrigDecMaker" ]
theApp.TopAlg += [ "TrigDec::TrigAODConfigDataMaker/TrigAODCfgMaker" ]
########################
# POOL
########################
import AthenaPoolCnvSvc.WriteAthenaPool
## get a handle on the ServiceManager
from AthenaCommon.AppMgr import ServiceMgr as svcMgr
#Explicitly specify the output file catalog
from PoolSvc.PoolSvcConf import PoolSvc
svcMgr += PoolSvc()
svcMgr.PoolSvc.WriteCatalog = "xmlcatalog_file:Catalog1.xml"
from AthenaPoolCnvSvc.AthenaPoolCnvSvcConf import AthenaPoolCnvSvc
svcMgr += AthenaPoolCnvSvc()
svcMgr.AthenaPoolCnvSvc.CommitInterval = 10;
from AthenaPoolCnvSvc.WriteAthenaPool import AthenaPoolOutputStream
Stream1 = AthenaPoolOutputStream( "Stream1" )
Stream1.OutputFile = "HLT.root"; # ** mandatory parameter ** // The output file name
Stream1.ItemList += [ "TrigDec::TrigDecision#TrigDecision" ]
Stream1.ItemList += [ "TrigConf::HLTAODConfigData#AODConfig" ]
Stream1.ItemList += [ "TrigConf::Lvl1AODConfigData#AODConfig" ]
Stream1.ItemList += [ "TrigConf::Lvl1AODPrescaleConfigData#AODConfig" ]
theApp.Dlls += [ "TrigEventAthenaPoolPoolCnv" ]
#--------------------------------------------------------------
# Set output level threshold (2=DEBUG, 3=INFO, 4=WARNING, 5=ERROR, 6=FATAL)
#--------------------------------------------------------------
svcMgr.MessageSvc.OutputLevel = 3
svcMgr.PoolSvc.OutputLevel = 2
svcMgr.AthenaPoolCnvSvc.OutputLevel = 2
Stream1.OutputLevel = 2
| rushioda/PIXELVALID_athena | athena/Trigger/TrigAnalysis/TrigDecisionMaker/share/trigDec_pureSteeringL2_WritePOOL.py | trigDec_pureSteeringL2_WritePOOL.py | py | 2,675 | python | en | code | 1 | github-code | 13 |
73875539537 | from architectures.mnist import Encoder as MnistEncoder, Decoder as MnistDecoder
from architectures.cifar10 import Encoder as Cifar10Encoder, Decoder as Cifar10Decoder
from architectures.celeba import Encoder as CelebaEncoder, Decoder as CelebaDecoder
def get_architecture(identifier: str, z_dim: int):
if identifier == 'mnist' or identifier == 'fmnist':
return MnistEncoder(z_dim), MnistDecoder(z_dim)
if identifier == 'cifar10':
return Cifar10Encoder(z_dim), Cifar10Decoder(z_dim)
if identifier == 'celeba':
return CelebaEncoder(z_dim), CelebaDecoder(z_dim)
raise ValueError("Unknown architecture")
| gmum/cwae-pytorch | src/factories/architecture_factory.py | architecture_factory.py | py | 662 | python | en | code | 6 | github-code | 13 |
39286497774 | from django.urls import path, include
from Profiles_API import views
from rest_framework.routers import DefaultRouter
# URL = http://127.0.0.1:8000/api/profile/
# For a specific user http://127.0.0.1:8000/api/profile/[id_name]
router = DefaultRouter()
# Since in views.py 'queryset' property is set so 'basename' parameter
# will be picked up from there automatically and we don't need to specify it
# in this case
router.register('profile', views.UserProfileViewset)
urlpatterns = [
path('hello-view/', views.HelloApiView.as_view()),
# Registering Viewset to routers for DRF to take care of it automatically
path('', include(router.urls)),
path('login/', views.UserLoginApiView.as_view())
]
| neet1313/DRF-Project1-Profile | Profiles_API/urls.py | urls.py | py | 730 | python | en | code | 0 | github-code | 13 |
37961360728 | OutputLevel = INFO
doJiveXML = False
doVP1 = False
doWriteESD = False
doWriteAOD = False
doReadBS = True
doAuditors = True
import os
if os.environ['CMTCONFIG'].endswith('-dbg'):
doEdmMonitor = True
doNameAuditor = True
else:
doEdmMonitor = False
doNameAuditor = False
DetDescrVersion = "ATLAS-GEO-20-00-02"
import AthenaCommon.SystemOfUnits as Units
from AthenaCommon.AthenaCommonFlags import athenaCommonFlags
from AthenaCommon.GlobalFlags import globalflags
globalflags.DetGeo = 'atlas'
globalflags.DataSource = 'data'
if 'doReadBS' in dir() and doReadBS:
globalflags.InputFormat = 'bytestream'
globalflags.ConditionsTag = 'COMCOND-BLKPA-006-07'
else:
globalflags.InputFormat = 'pool'
globalflags.DetDescrVersion = DetDescrVersion
globalflags.print_JobProperties()
if len(globalflags.ConditionsTag())!=0:
from IOVDbSvc.CondDB import conddb
conddb.setGlobalTag(globalflags.ConditionsTag())
from AthenaCommon.BeamFlags import jobproperties
jobproperties.Beam.energy = 7.*Units.TeV
jobproperties.Beam.beamType = 'collisions'
if 'doReadBS' in dir() and doReadBS:
jobproperties.Beam.bunchSpacing = 75
from RecExConfig.RecFlags import rec
rec.Commissioning=False
from AthenaCommon.DetFlags import DetFlags
DetFlags.ID_setOn()
DetFlags.Calo_setOff()
DetFlags.Muon_setOff()
DetFlags.Print()
from InDetRecExample.InDetJobProperties import InDetFlags
InDetFlags.doTruth = (globalflags.DataSource == 'geant4' and globalflags.InputFormat() == 'pool')
InDetFlags.doxKalman = False
InDetFlags.doiPatRec = False
InDetFlags.doNewTracking = True
InDetFlags.doMonitoringGlobal = True
InDetFlags.doMonitoringPixel = False
InDetFlags.doMonitoringSCT = False
InDetFlags.doMonitoringTRT = True
InDetFlags.doMonitoringAlignment = False
InDetFlags.doTrkNtuple = True
InDetFlags.doPixelTrkNtuple = True
InDetFlags.doTrtTrkNtuple = True
InDetFlags.doPrintConfigurables = True
from TrkDetDescrSvc.TrkDetDescrJobProperties import TrkDetFlags
TrkDetFlags.TRT_BuildStrawLayers = True
from LumiBlockComps.LuminosityToolDefault import LuminosityToolOnline, LuminosityToolDefault
theLumiTool = LuminosityToolOnline()
from IOVDbSvc.CondDB import conddb
conddb.addFolder('TRIGGER', '/TRIGGER/LUMI/LBLB')
theLumiTool.LBLBFolderName = "/TRIGGER/LUMI/LBLB"
from AthenaCommon.AppMgr import ToolSvc
ToolSvc += theLumiTool
include("InDetRecExample/InDetRec_all.py")
from TRT_Monitoring.TRT_MonitoringConf import TRT_Monitoring_Tool
#TRT_Monitoring_Tool.m_useConditionsHTStatus = True
#TRT_Monitoring_Tool.m_useArgonStraws = True
TRT_Monitoring_Tool.doArgonXenonSeperation = True
TRT_Monitoring_Tool.useHoleFinder = True #care
from IOVDbSvc.CondDB import conddb
conddb.addOverride("/TRT/Cond/StatusHT","TrtStrawStatusHT-ArTest-00-00")
theApp.EvtMax = 10
if not doReadBS:
ServiceMgr.PoolSvc.AttemptCatalogPatch=True
ServiceMgr.EventSelector.InputCollections = ["/afs/cern.ch/atlas/maxidisk/d158/CSC.005200.T1_McAtNlo_Jimmy.RDO.pool.root" ]
if doReadBS:
# ServiceMgr.ByteStreamInputSvc.FullFileName = [ "/afs/cern.ch/user/e/eyazici/rawdata/data12_8TeV.00205010.physics_ZeroBiasOverlay.merge.RAW/data12_8TeV.00205010.physics_ZeroBiasOverlay.merge.RAW._lb0137._SFO-ALL._0001.1"]
#AB ServiceMgr.ByteStreamInputSvc.FullFileName = [ "/afs/cern.ch/user/e/eyazici/rawdata/data12_8TeV.00201113.physics_ZeroBiasOverlay.merge.RAW/data12_8TeV.00201113.physics_ZeroBiasOverlay.merge.RAW._lb0423._SFO-ALL._0001.1"]
#ServiceMgr.ByteStreamInputSvc.FullFileName = [ "/afs/cern.ch/work/e/ecelebi/public/data11_7TeV.00179710.physics_ZeroBias.merge.RAW" ]
ServiceMgr.ByteStreamInputSvc.FullFileName = [ "/afs/cern.ch/user/e/eyazici/public/data12_8TeV.00201113.physics_ZeroBiasOverlay.merge.RAW._lb0423._SFO-ALL._0001.1"]
# ServiceMgr.ByteStreamInputSvc.FullFileName = [ "/tmp/rjungst/testinput"]
| rushioda/PIXELVALID_athena | athena/InnerDetector/InDetMonitoring/TRT_Monitoring/share/jobOptions_artest.py | jobOptions_artest.py | py | 3,931 | python | en | code | 1 | github-code | 13 |
20571896082 | import pymongo
client = pymongo.MongoClient("mongodb://127.0.0.1:27017/") #client is important
newDB = client["firsttest"] #if it doesnt exist it will create new db#can use .<name>
newCollection = newDB.testing #creating collection .<name> inside db
#newUpdate = newCollection.update_one({"id": 5},{"$set":{"name":"Kala"}})
newUpdateMany = newCollection.update_many({"sex":"M"},{"$set":{"sex":"F"}})
| ritheasen/project-test | 54 python mongodb updating record.py | 54 python mongodb updating record.py | py | 409 | python | en | code | 0 | github-code | 13 |
16450116681 | from general_functions import get_image, save_json, definePageRowCollumn
from formating_functions import format_name
def retrieve_perks(table, icons_path, json_path, project_url):
perks_list = []
for table_row in table.findAll('tr')[1:]:
perk = {}
row_headers = table_row.findAll('th')
icon_field = row_headers[0]
name_field = row_headers[1]
description_field = table_row.find('td')
character_field = row_headers[2]
perk['name'] = name_field.a.text
formated_perk_name = format_name(perk['name'])
perk['description'] = description_field.find('div', class_='formattedPerkDesc').text
perk['character'] = character_field.text.strip().replace('.All', 'General')
perk['icon'] = f"{project_url}/{icons_path}/{formated_perk_name}.png"
print(f"Baixando o icone do perk: {perk['name']}")
get_image(icon_field.find('a').get('href'),
f'{icons_path}/{formated_perk_name}.png',
"Erro ao baixar o icone, tentando novamente...")
print("Icone baixado com sucesso")
perks_list.append(perk)
definePageRowCollumn(perks_list, 5, 3)
save_json(json_path, perks_list) | GregorioFornetti/Projeto-dbd-roleta | scraping/perks.py | perks.py | py | 1,223 | python | en | code | 0 | github-code | 13 |
35775246254 | #coding=utf-8
#!/usr/bin/env python
from aip import AipOcr
import Translate
import numpy as np
from PIL import Image, ImageDraw, ImageFont
#读取本地图片测试
def Get_Image():
with open('./HTML/0.jpg', 'rb') as fp:
return fp.read()
#创建客户端
def Create_Client():
APP_ID = '17517601'
API_KEY = 'FLjwlLDSGb7tsZfpzgO00F6D'
SECRET_KEY = '8yGi1u18NMiIrxgckn74wgnPG9uO2cMl'
return AipOcr(APP_ID, API_KEY, SECRET_KEY)
#识别字体
def Recognize_Word(Image,client):
Reswords=[]
TransWords=[]
client.general(Image)
options = {}
options["recognize_granularity"] = "big"
options["language_type"] = "CHN_ENG"
options["detect_direction"] = "true"
options["detect_language"] = "true"
options["vertexes_location"] = "true"
options["probability"] = "true"
Result=client.general(Image, options)
Res1=Result.items()
Res2=0
Left=[]
top=[]
width=[]
height=[]
Num=0
matrix=[[0,0],[0,0],[0,0],[0,0]]
cnt_i=0
cnt_j=0
cnt_Num=0
for i,j in Res1:
if(isinstance(j,list)):
for m in j:
Res2=m.items()
#print(m)
for i0,j0 in Res2:
if (isinstance(j0, dict)):
if str(i0)=='location':
Res3=j0.items()
#print(Res3)
for i1,j1 in Res3:
if str(i1)=='left':
Left.append(j1)
elif str(i1)=='top':
top.append(j1)
elif str(i1) == 'width':
width.append(j1)
elif str(i1) == 'height':
height.append(j1)
Num=Num+1
elif (isinstance(j0, list)):
if str(i0)=='vertexes_location':
for s in j0:
for m in s.values():
#print(type(m),m,cnt_i,cnt_j)
matrix[cnt_i][cnt_j]=m
cnt_j = cnt_j + 1
cnt_i = cnt_i + 1
cnt_j=0
cnt_i=0
else:
Reswords.append(j0)
cnt_Num=Num
while cnt_Num>0:
TransWords.append(Translate.Translate_Chinese_To_English(str(Reswords[Num-cnt_Num])))
cnt_Num=cnt_Num-1
return Left,top,width,height,Num,Reswords,TransWords
if '__main__' == __name__:
Img=Get_Image()
Client=Create_Client()
Left,top,width,height,Num,Reswords,TransWords=Recognize_Word(Img,Client)
print(TransWords) | Toiler-haitao/2019-HMI-ISP-01 | Project (final)/Info.py | Info.py | py | 2,956 | python | en | code | 6 | github-code | 13 |
21579204345 | from Gurobi_direct.OptModel_m import OptModel_gurobi
#from column.ColumnAlgorithm import column_generating
import matplotlib.pyplot as plt
import re
from Data.Data import Data
class Solution:
'''
1.OptModel输出的最优解 → routes
2.可视化
'''
def __init__(self):
self.model = OptModel_gurobi()
self.data = Data()
self.X = [[([0] * self.data.vehicleNum) for i in range(self.data.NodeNum)] for j in range(self.data.NodeNum)]
self.S = [[([0] * self.data.vehicleNum) for i in range(self.data.NodeNum)] for j in range(self.data.NodeNum)]
self.y = [[] for k in range(self.data.vehicleNum)]
self.m = 0
self.routes = []
self.routeNum = 0
def start(self):
self.data.readData()
self.model.start()
self.getSolution()
def getSolution(self):
'''
1.OptModel输出的最优解 → routes
2.可视化
:return:
'''
#1.OptModel输出的最优解 → routes
for i in self.model.model.getVars():
str = re.split(r"_",i.VarName)
if(str[0] == 'X' and i.x == 1) :
self.X[int(str[1])][int(str[2])][int(str[3])] = i.x
if(str[0] == "s" and i.x == 1):
self.S[int(str[1])][int(str[2])] = i.x
if(str[0] == "m"):
self.m = i.x
if(str[0] == "y"):
self.y[int(str[1])] = i.x
print("车辆数为:",self.m)
for k in range(self.data.vehicleNum):
for i in range(self.data.NodeNum):
for j in range(self.data.NodeNum):
if(self.X[i][j][k] > 0 and (not(i == 0 and j == self.data.NodeNum))):
print("x[{0},{1},{2}] = {3}" .format(i,j,k,self.X[i][j][k]))
#可视化
cost_list = []
for k in range(self.data.vehicleNum):
if(self.y[k] > 0):
i = 0
subRoute = []
cost = 0
subRoute.append(i)
finish = False
while(not finish):
for j in range(self.data.NodeNum):
if (self.X[i][j][k] > 0):
subRoute.append(j)
cost += self.data.disMatrix[i][j]
i = j
if(j == self.data.NodeNum -1):
finish = True
#if(len(subRoute) >= 3):
subRoute[len(subRoute)-1] = 0
self.routes.append(subRoute)
cost_list.append(cost)
self.routeNum +=1
#print("\n\n==============Route of Vehicles===============")
for i in range(len(self.routes)):
print(self.routes[i],'cost:',cost_list[i])
print("\n\n==============Drawing the Graph==============")
plt.figure(0)
plt.xlabel('x')
plt.ylabel('y')
plt.title("R101")
plt.scatter(self.data.cor_X[0],self.data.cor_Y[0],c='blue',alpha=1,marker=',',linewidths=3,label='depot')
plt.scatter(self.data.cor_X[1:-1], self.data.cor_Y[1:-1], c='black', alpha=1, marker='o', linewidths=3,
label='customer')
for k in range(self.routeNum):
for i in range(len(self.routes[k]) - 1):
a = self.routes[k][i]
b = self.routes[k][i+1]
x = [self.data.cor_X[a],self.data.cor_X[b]]
y = [self.data.cor_Y[a], self.data.cor_Y[b]]
plt.plot(x,y,'k',linewidth = 1)
plt.grid(False)
plt.legend(loc='upper right')
plt.show()
if __name__ == '__main__':
s = Solution()
s.start() | LiuZunzeng/Code_VRPTW | Visualizition/Solution_origin.py | Solution_origin.py | py | 3,756 | python | en | code | 0 | github-code | 13 |
42071613589 | from itertools import *
def multiples(n):
a=sum(x for x in range(n) if ((x % 3) ==0 or (x % 5)==0))
return a
if __name__ =="__main__":
print(multiples(1000))
| prizmaweb/practice | multiples.py | multiples.py | py | 176 | python | en | code | 0 | github-code | 13 |
31611957552 | import pytest
import os
import requests
from dotenv import load_dotenv
from weather_app import get_weather_data
class MockResponse:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
def json(self):
return self.json_data
def raise_for_status(self):
if self.status_code != 200:
raise requests.exceptions.HTTPError
def test_get_weather_data(monkeypatch):
load_dotenv() # Load environment variables from .env file
api_key = os.getenv("API_KEY")
mock_api_key = api_key
mock_location = "New York"
expected_url = f"http://api.openweathermap.org/data/2.5/weather?q={mock_location}&appid={mock_api_key}"
mock_response = {
"weather": [{"icon": "01d", "description": "clear sky"}],
"main": {"temp": 293.15, "humidity": 50},
"wind": {"speed": 3.5, "deg": 180},
"rain": {"1h": 0.0},
"snow": {"1h": 0.0}
}
def mock_get(url):
assert url == expected_url
return MockResponse(mock_response, 200)
monkeypatch.setattr("requests.get", mock_get)
weather_data, _ = get_weather_data(mock_location)
assert weather_data == "Temperature: 293.15 K\n" \
"Temperature (Celsius): 20.0 °C\n" \
"Humidity: 50%\n" \
"Weather Conditions: clear sky\n" \
"Wind Speed: 3.5 m/s\n" \
"Wind Direction: 180°\n" \
"Rainfall: 0.0 mm\n" \
"Snowfall: 0.0 mm"
| ranms25/Python-Weather-App | tests/test_get_weather_data.py | test_get_weather_data.py | py | 1,623 | python | en | code | 0 | github-code | 13 |
8934399359 | ################ While Loop with Else #############################
a = 1
while (a<=20):
print(a)
a+=1
else:
print("Code Successfully Executed ")
b = 10
while (b<=100):
print(b)
b+=10
print("While Lopp Code Sucessfull Executed ")
else:
print("Else Part Executed NOW Emjoy :)") | haresh22/learn_python | 5.1.py | 5.1.py | py | 323 | python | en | code | 0 | github-code | 13 |
10053011737 | # -*- coding: UTF-8 -*-
from django.contrib import admin
from stocks.models import (Stock, StockPair, PairTransaction, BoughtSoldTransaction, Account, SubAccount,
AccountStock, Snapshot, SnapshotStock, Transaction, AccountStocksRange, AccountStockGroup,
AccountStockGroupStock)
from markdownx.admin import MarkdownxModelAdmin
def add_star(modeladmin, request, queryset):
queryset.update(star=True)
add_star.short_description = "Star"
def remove_star(modeladmin, request, queryset):
queryset.update(star=True)
remove_star.short_description = "Unstar"
def combine_pair_transactions(modeladmin, request, queryset):
base = queryset[0]
sold_amount = 0
bought_amount = 0
sold_total = 0
bought_total = 0
for obj in queryset:
if obj.finished or obj.archived or obj.pair != base.pair or obj.account != base.account\
or obj.bought_stock != base.bought_stock:
raise RuntimeError('Cannot combine these pair transactions. ')
bought_total += obj.bought_total
sold_total += obj.sold_total
sold_amount += obj.sold_amount
bought_amount += obj.bought_amount
base.bought_price = bought_total / bought_amount
base.bought_amount = bought_amount
base.sold_price = sold_total / sold_amount
base.sold_amount = sold_amount
base.save()
for obj in queryset[1:]:
obj.delete()
combine_pair_transactions.short_description = "Combine"
class StockAdmin(admin.ModelAdmin):
list_display = ['name', 'code', 'price', 'market', 'star']
list_filter = ['star', 'watching']
search_fields = ['name', 'code', ]
actions = [add_star, remove_star, ]
class PairTransactionAdmin(admin.ModelAdmin):
list_display = ['account', 'sold_stock', 'bought_stock', 'profit', 'started', 'finished']
list_filter = ['finished', 'account', 'archived', 'pair']
readonly_fields = ['profit', ]
actions = [combine_pair_transactions, ]
fieldsets = [
(None, {
'fields': ('account', 'pair'),
}),
('正向', {
'fields': (
('sold_stock', 'sold_price', 'sold_amount'),
('bought_stock', 'bought_price', 'bought_amount'),
('started',),
)
}),
('反向', {
'fields': (
('bought_sold_price',),
('sold_bought_back_price', 'bought_back_amount', ),
('finished',),
)
}),
('其它', {
'fields': (
('profit',),
('order',),
('archived',),
)
}),
]
class BoughtSoldTransactionAdmin(admin.ModelAdmin):
list_display = ['bought_stock', 'bought_price', 'profit', 'started', 'finished']
list_filter = ['finished', 'account', 'archived', ]
readonly_fields = ['profit', ]
class TransactionAdmin(admin.ModelAdmin):
list_display = ['account', 'action', 'stock', 'price', 'amount', 'date', 'has_updated_account', ]
list_filter = ['account', 'action', 'has_updated_account']
class PairAdmin(admin.ModelAdmin):
list_display = ['__str__', 'current_value', 'order', 'star']
list_editable = ['order', ]
list_filter = ['star', ]
actions = [add_star]
class AccountStockInline(admin.TabularInline):
model = AccountStock
class AccountStocksRangeInline(admin.TabularInline):
model = AccountStocksRange
extra = 0
class SubAccountInline(admin.TabularInline):
model = SubAccount
class AccountAdmin(admin.ModelAdmin):
list_display = ['name', 'slug', 'public', ]
inlines = [AccountStockInline, SubAccountInline, AccountStocksRangeInline]
class SnapshotStockInline(admin.TabularInline):
model = SnapshotStock
class SnapshotAdmin(MarkdownxModelAdmin):
model = Snapshot
inlines = [SnapshotStockInline]
list_display = ['account', 'serial_number', 'date']
list_filter = ['account', ]
class AccountStockGroupStockAdminInline(admin.TabularInline):
model = AccountStockGroupStock
autocomplete_fields = ['stock']
extra = 0
@admin.register(AccountStockGroup)
class AccountStockGroupAdmin(admin.ModelAdmin):
list_display = ['account', 'name']
inlines = [AccountStockGroupStockAdminInline, ]
admin.site.register(Stock, StockAdmin)
admin.site.register(StockPair, PairAdmin)
admin.site.register(PairTransaction, PairTransactionAdmin)
admin.site.register(Transaction, TransactionAdmin)
admin.site.register(BoughtSoldTransaction, BoughtSoldTransactionAdmin)
admin.site.register(Account, AccountAdmin)
admin.site.register(Snapshot, SnapshotAdmin)
| fruitschen/fruits_learning | stocks/admin.py | admin.py | py | 4,679 | python | en | code | 1 | github-code | 13 |
36245364003 | import pandas as pd
import glob
import seaborn as sns
import matplotlib as mpl
import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.legend_handler import HandlerBase
import copy
from networkx.algorithms.connectivity.connectivity import average_node_connectivity
from matplotlib.lines import Line2D
from itertools import chain
from scipy.stats import ranksums
import matplotlib.colors as mcolors
from matplotlib.patches import Circle
from collections import Counter
from pathlib import Path
mpl.rc('font',family='Arial')
class TextHandler(HandlerBase):
def create_artists(self, legend, orig_handle,xdescent, ydescent,
width, height, fontsize,trans):
h = copy.copy(orig_handle)
h.set_position((width/2.,height/2.))
h.set_transform(trans)
h.set_ha("center");h.set_va("center")
fp = orig_handle.get_font_properties().copy()
fp.set_size(fontsize)
# uncomment the following line,
# if legend symbol should have the same size as in the plot
h.set_font_properties(fp)
return [h]
def network_chicken(g, type = []):
# chicken
features_chicken = []
if type == "Ecoli":
name_dataset = "Ec518" #"SAL143"
folder = "EcoliSNPs"
results_folder = "SMOTE pre-process" #"Results"
type_data = "combination" #"accessory_core_intergenic"
else:
name_dataset = "SAL143"
folder = "SalmonellaSNPs"
results_folder = "Results"
type_data = "accessory_core_intergenic"
# Load Antibiotic Data:
antibiotic_df = pd.read_csv(folder+"/"+name_dataset+'_AMR_data_RSI.csv', header = [0])
# Get files in directory:
directory = folder+"/Population Correction/"+results_folder+"/"+type_data
print(antibiotic_df.columns[1:])
name_anti = []
for count, anti in enumerate(antibiotic_df.columns[1:]):
if folder == "EcoliSNPs":
if anti in ["AMC", "CTX-C"]:
continue
file_name = directory+"/features_"+name_dataset+"_"+anti+'.csv'
my_file = Path(file_name)
try:
my_abs_path = my_file.resolve(strict=True)
except FileNotFoundError:
continue
data=pd.read_csv(file_name, index_col=[0], header=[0])
features=data.index
name_anti.append(anti)
if len(features) > 0:
g.add_node(anti, color_="silver")
for f in features:
if len(f) == 13:
g.add_node(f, color_="darkorange")
else:
if f[0] == "c":
g.add_node(f, color_="forestgreen")
elif f[0] == "i":
g.add_node(f, color_="royalblue")
else:
g.add_node(f, color_="orange")
g.add_edge(anti,f, color="black")
return g, features_chicken, name_anti
def draw_network(g, axn, labels, name_antibiotic, connect=False):
node_max_size = 240
fontsize=6
node_min_size = 3
node_degree_dict=nx.degree(g)
nodes_sel = [x for x in g.nodes() if node_degree_dict[x]>0]
color_map=nx.get_node_attributes(g, 'color_')
df_node = pd.DataFrame(columns=["Count","Antibiotics"])
for n in nodes_sel:
if n in name_antibiotic:
continue
neigh = g.neighbors(n)
neigh_list = []
for nn in neigh:
neigh_list.append(nn)
c = color_map.get(n)
if c == "darkorange":
df_node.loc[n,"Type"] = "13-mer"
elif c == "forestgreen":
df_node.loc[n,"Type"] = "core genome snp"
elif c == "royalblue":
df_node.loc[n,"Type"] = "intergenic region snp"
elif c == "orange":
df_node.loc[n,"Type"] = "accessory gene"
df_node.loc[n,"Count"] = len(neigh_list)
df_node.loc[n,"Antibiotics"] = ', '.join(neigh_list)
pos = nx.spring_layout(g,scale=3)
color_s=[color_map.get(x) for x in g.nodes()]
edges = g.edges()
colors = [g[u][v]['color'] for u,v in edges]
node_size = []
edge_colors = []
linewidth_val = []
alpha_val = []
node_shape_list = []
nodes_name = []
for i, n in enumerate(g.nodes):
color_n = color_map.get(n)
nodes_name.append(n)
if n in name_antibiotic:
edge_colors.append(color_s[i])
color_s[i] = "white"
node_size.append(node_max_size)
linewidth_val.append(3)
alpha_val.append(1)
node_shape_list.append("o")
else:
edge_colors.append(color_n)
node_size.append(node_min_size)
alpha_val.append(1)
linewidth_val.append(1)
node_shape_list.append("o")
node_shape_list = np.array(node_shape_list)
edge_colors = np.array(edge_colors)
node_size = np.array(node_size)
alpha_val = np.array(alpha_val)
linewidth_val = np.array(linewidth_val)
color_s = np.array(color_s)
nodes = np.array(nodes_name)
id_o = np.where(node_shape_list == "o")[0]
options_o = {"edgecolors": list(edge_colors[id_o]), "node_size": list(node_size[id_o]), "alpha": list(alpha_val[id_o]), "linewidths":list(linewidth_val[id_o])} #[v * 1000 for v in d.values()]
nx.draw_networkx_nodes(g, pos, nodelist= nodes[id_o], node_shape = "o", node_color=list(color_s[id_o]), **options_o, ax=axn)
nx.draw_networkx_edges(g, pos, alpha=0.2, edge_color = colors, width=0.2, ax=axn)
nx.draw_networkx_labels(g, pos, labels, font_size=fontsize, font_color="k", ax=axn)
axn.margins(x=0.15)
if connect == True:
connectivity = np.round(average_node_connectivity(g),3)
axn.set_title("Connectivity = {}".format(connectivity), fontsize = 30)
# Plot Ecoli network
h=nx.Graph()
h, _, name_antibiotic = network_chicken(h, type="Ecoli")
color_map=nx.get_node_attributes(h, 'color_')
legend_node_names = []
legend_node_number = []
labels = {}
k = 1
for n in h.nodes:
color_n = color_map.get(n)
if n in name_antibiotic:
labels[n] = n
else:
legend_node_number.append(str(k))
legend_node_names.append(n)
labels[n] = ""
k+=1
legend_node_names = np.array(legend_node_names)
legend_node_number = np.array(legend_node_number)
# Networkx
fig = plt.figure(figsize=(10, 15))
ax0 = fig.add_subplot(211)
plt.rcParams.update({'font.size': 20})
draw_network(h,ax0,labels, name_antibiotic)
color_map=nx.get_node_attributes(h, 'color_')
c_map = []
for key in color_map.keys():
c_map.append(color_map[key])
c_map = Counter(c_map)
print(c_map)
input("cont")
legend_elements = []
for i in c_map.keys():
if i == 'silver':
legend_elements.append(Line2D([], [], marker='o', markeredgecolor='silver', label='Antibiotic',
color = 'w', markerfacecolor = 'silver', markersize=10, alpha=1))
elif i == 'darkorange':
legend_elements.append(Line2D([], [], marker='o', markeredgecolor='darkorange', label='13-mers',
color = 'w', markerfacecolor = 'darkorange', markersize=10, alpha=1))
elif i == 'forestgreen':
legend_elements.append(Line2D([], [], marker='o', markeredgecolor='forestgreen', label='Core Genome SNPs',
color = 'w', markerfacecolor = 'forestgreen', markersize=10, alpha=1))
elif i == 'royalblue':
legend_elements.append(Line2D([], [], marker='o', markeredgecolor='royalblue', label='Intergenic Region SNPs',
color = 'w', markerfacecolor = 'royalblue', markersize=10, alpha=1))
elif i == 'orange':
legend_elements.append(Line2D([], [], marker='o', markeredgecolor='orange', label='Accessory Genes',
color = 'w', markerfacecolor = 'orange', markersize=10, alpha=1))
ax0.legend(handles = legend_elements, loc='upper center', bbox_to_anchor=(0.5, -0.01),
fancybox=True, shadow=True, ncol=4, fontsize = 12,
title="Nodes", title_fontsize=15)
# Plot Salmonella network
h=nx.Graph()
h, _, name_antibiotic = network_chicken(h, type="Salmonella")
color_map=nx.get_node_attributes(h, 'color_')
legend_node_names = []
legend_node_number = []
labels = {}
k = 1
for n in h.nodes:
color_n = color_map.get(n)
if n in name_antibiotic:
labels[n] = n
else:
legend_node_number.append(str(k))
legend_node_names.append(n)
labels[n] = ""
k+=1
legend_node_names = np.array(legend_node_names)
legend_node_number = np.array(legend_node_number)
# Networkx
ax1 = fig.add_subplot(212)
draw_network(h,ax1,labels, name_antibiotic)
color_map=nx.get_node_attributes(h, 'color_')
plt.tight_layout()
plt.savefig('Figure_S9.svg', dpi=300, bbox_inches='tight')
| tan0101/Commercial_WGS2023 | Scripts/network_ML_features.py | network_ML_features.py | py | 9,371 | python | en | code | 0 | github-code | 13 |
6921526157 | from heat.common import exception
from heat.common.i18n import _
from heat.engine import properties
from heat.engine import resource
from common.mixins import f5_bigip
from common.mixins import F5BigIPMixin
class F5CmSync(resource.Resource, F5BigIPMixin):
'''Sync the device configuration to the device group.'''
PROPERTIES = (
BIGIP_SERVER,
DEVICE_GROUP,
DEVICE_GROUP_PARTITION
) = (
'bigip_server',
'device_group',
'device_group_partition'
)
properties_schema = {
BIGIP_SERVER: properties.Schema(
properties.Schema.STRING,
_('Reference to the BigIP Server resource.'),
required=True
),
DEVICE_GROUP: properties.Schema(
properties.Schema.STRING,
_('Name of the device group to sync BIG-IP device to.'),
required=True
),
DEVICE_GROUP_PARTITION: properties.Schema(
properties.Schema.STRING,
_('Partition name where device group is located on the device.'),
required=True
)
}
@f5_bigip
def handle_create(self):
'''Sync the configuration on the BIG-IP® device to the device group.
:raises: ResourceFailure exception
'''
try:
dg_name = self.properties[self.DEVICE_GROUP]
dg_part = self.properties[self.DEVICE_GROUP_PARTITION]
self.bigip.tm.cm.device_groups.device_group.exists(
name=dg_name, partition=dg_part
)
config_sync_cmd = 'config-sync to-group {}'.format(
self.properties[self.DEVICE_GROUP]
)
self.bigip.tm.cm.exec_cmd('run', utilCmdArgs=config_sync_cmd)
except Exception as ex:
raise exception.ResourceFailure(ex, None, action='CREATE')
@f5_bigip
def check_create_complete(self, token):
'''Determine whether the BIG-IP®'s sync status is 'In-Sync'.
:raises: ResourceFailure
'''
sync_status = self.bigip.tm.cm.sync_status
sync_status.refresh()
status = \
(sync_status.entries['https://localhost/mgmt/tm/cm/sync-status/0']
['nestedStats']['entries']['status']['description'])
if status.lower() == 'in sync':
return True
return False
@f5_bigip
def handle_delete(self):
'''Delete sync resource, which has no communication with the device.'''
return True
def resource_mapping():
return {'F5::Cm::Sync': F5CmSync}
| F5Networks/f5-openstack-heat-plugins | f5_heat/resources/f5_cm_sync.py | f5_cm_sync.py | py | 2,570 | python | en | code | 7 | github-code | 13 |
27629837484 | import matplotlib.pyplot as plt
import numpy as np
from numpy.fft import fft,fftfreq
def myctft(T,T1,fs):
f = 10;
time_x = np.arange(-T,T + 1/(fs*f), 1/(fs*f))
x = np.sin(2 * np.pi * f * time_x)
time_y = np.arange(-T1, T1 + 1 / (fs * f), 1 / (fs * f))
if T1 <= T:
y = np.sin(2 * np.pi * f * time_y)
else:
y = x
num = int((time_y.size - time_x.size)/2);
zero_arr = np.zeros(num);
y = np.concatenate((zero_arr,y), axis = 0)
y = np.concatenate((y,zero_arr), axis = 0)
y_fft = 2*abs(fft(y))/time_y.size
freq = fftfreq(time_y.size,1/(f*fs))
plt.figure(1)
plt.plot(time_x, x)
plt.title('x(t) = sin(2*pi*f*t)')
plt.xlabel('Time')
plt.ylabel('x(t)')
plt.grid(True, which='both')
plt.show()
plt.figure(2)
plt.plot(time_y, y)
plt.title('y(t) = sin(2*pi*f*t)')
plt.xlabel('Time')
plt.ylabel('y(t)')
plt.grid(True, which='both')
plt.show()
plt.figure(3)
plt.plot(freq, y_fft)
plt.title('Amplitude Spectrum of y')
plt.xlabel('Frequency')
plt.ylabel('Y(jw)')
plt.grid(True, which='both')
plt.show()
T = float(input("Input T: "))
T1 = float(input("Input T1: "))
fs = float(input("Input sampling frequency: "))
#f = float(input("Input frequency: "))
myctft(T,T1,fs) | shantanutyagi67/CT303_Labs | Lab 1/py files/q2.py | q2.py | py | 1,319 | python | en | code | 1 | github-code | 13 |
5772944665 | #!/usr/bin/python3
""" Lockboxes """
def canUnlockAll(boxes):
"""
- boxes is a list of lists
- A key with the same number as the box will open that box
- assuming all keys to be positive integers
- The first box boxes[0] is unlocked
- Return True if all boxes can be opened, else return False
"""
canUnlockAll = False
keys = {0: True}
n = len(boxes)
while (True):
n_keys = len(keys)
for i in range(n):
if boxes[i] and keys.get(i, False):
for j in boxes[i]:
if j < n:
keys[j] = True
boxes[i] = None
if not (len(keys) > n_keys):
break
if n_keys == len(boxes):
canUnlockAll = True
return canUnlockAll
| HenryKenDephil/alx-interview | 0x01-lockboxes/0-lockboxes.py | 0-lockboxes.py | py | 794 | python | en | code | 0 | github-code | 13 |
36628629925 | #zad1
A=[1-x for x in range(1,11,1)]
print(A)
B=[4**x for x in range(0,8,1)]
print(B)
C=[x for x in B if x%2==0]
print(C)
#zad2
import random
lista1=[int(random.random()*100) for x in range(10)]
print(lista1)
lista2=[x for x in lista1 if x%2==0]
print(lista2)
#zad5
def pole_trapezu(a,b,h):
pole=((a+b)*h)/2
if pole<=0:
print('Error')
return -1
else:
print('pole trapezu = ')
return pole
print(pole_trapezu(3,8,2))
print(pole_trapezu(6,10,4))
print(pole_trapezu(3,7,5))
#zad4
def prostokatny(a,b,c):
if a**2+b**2==c**2:
return True
elif c**2-a**2==b**2:
return True
elif c**2-b**2==a**2:
return True
else:
return False
print(prostokatny(3,4,5))
print(prostokatny(3,6,7)) | SnowKid99/WD_zad_lab3 | main.py | main.py | py | 765 | python | en | code | 0 | github-code | 13 |
14917653379 | import csv
def print_matrix(matrix):
longest_len = 1
for row in matrix:
for n in row:
if len(str(n)) > longest_len:
longest_len = len(str(n))
longest_len += 1
for row in matrix:
for n in row:
n_len = len(str(n))
n_spaces = longest_len - n_len
print (" " + str(n) + " "*n_spaces + "|", end='', flush=True)
n_spaces = 1
print("")
def build_matrix(columns, rows):
matrix = [[1 for x in range(columns)] for y in range(rows)]
i = 0
with open('my_train.csv', 'r') as new_file:
filewriter = csv.reader(new_file, delimiter='\t')
for row in filewriter:
for j in range(0, columns):
if j == 0:
matrix[i][j] = 1
else:
matrix[i][j] = float(row[j-1])
i += 1
return matrix
def build_t_matrix(columns, rows):
matrix = [[1 for x in range(columns)] for y in range(rows)]
j = 0
with open('my_train.csv', 'r') as new_file:
filewriter = csv.reader(new_file, delimiter='\t')
for row in filewriter:
for i in range(rows):
matrix[i][j] = float(row[i])
j += 1
temp_matrix = [[1 for x in range(150)] for y in range(1)]
temp_matrix += matrix
temp_matrix.pop(-1)
return matrix
def build_y_matrix(columns, rows, filename):
matrix = [[0 for x in range(columns)] for y in range(rows)]
i = 0
with open(filename, 'r') as new_file:
filewriter = csv.reader(new_file, delimiter='\t')
for row in filewriter:
matrix[i][0] = float(row[5])
i+=1
return matrix
def can_multiply(a, b):
a_columns = len(a[0])
b_rows = len(b)
if a_columns == b_rows:
return True
else:
return False
def multiply_matrix (a, b):
a_columns = len(a[0])
a_rows = len(a)
b_columns = len(b[0])
matrix = [[0 for x in range(b_columns)] for y in range(a_rows)]
result = 0
for i in range(a_rows):
for k in range(b_columns):
for j in range(a_columns):
result += a[i][j]*b[j][k]
matrix[i][k] = result
result = 0
return matrix | RenataRomero/LinearRegression | matrix.py | matrix.py | py | 2,303 | python | en | code | 0 | github-code | 13 |
36689093055 | import sys
import cx_Freeze
build_exe_options = {"packages": ["os","pygame","codecs"], "excludes": ["tkinter"],"include_files" : ["zeroTurn.png","ThunderboltTurns.png","water.png"]}
cx_Freeze.setup( name = "TailGunner",
version = "0.1",
description = "GameProject",
options = {"build_exe": build_exe_options},
executables = [cx_Freeze.Executable("main.py")])
| AlexanderLuasan/Tailgunner | setup.py | setup.py | py | 400 | python | en | code | 0 | github-code | 13 |
24365786102 | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
from scrapy import Item, Field
class fang_list_item(Item):
# 实体类型
item_type = Field()
# 房源编号
newcode = Field()
# 唯一标识 [url]
item_url = Field()
# 楼盘名称
ad_name = Field()
# 楼盘地址
address = Field()
# 楼盘区域
ad_area = Field()
# 楼盘板块
ad_region = Field()
# 环线位置
loop_location = Field()
# 销售状态
sales_status = Field()
# 物业类别 [住宅、别墅]
property_category = Field()
# 项目特色 [经济住宅、景观居所]
features = Field()
# 显示楼盘价格
show_price = Field()
class house_detail_item(Item):
# 房源编号
newcode = Field()
# 实体类型
item_type = Field()
# 页面连接
item_url = Field()
# 楼盘名
house_adname = Field()
# 建筑类别
building_type = Field()
# 项目特色
building_features = Field()
# 装修情况
decoration = Field()
# 产权年限
property_years = Field()
# 环线位置
loop_location = Field()
# 开发商
developer = Field()
# 销售状态
sales_status = Field()
# 开盘时间
opening_time = Field()
# 交房时间
delivery_time = Field()
# 售楼地址
sales_address = Field()
# 预售许可证
# presale_permit = Field()
# 占地面积
land_area = Field()
# 建筑面积
build_area = Field()
# 容积率
volume_rate = Field()
# 绿化率
greening_rate = Field()
# 停车位
parking_count = Field()
# 栋座总数
build_count = Field()
# 总户数
house_count = Field()
# 物业公司
property_company = Field()
# 物业费
property_costs = Field()
# 物业费描述
property_costs_description = Field()
# 楼层情况
floor_description = Field()
# 价格信息
price_time = Field()
# 价格均价
price_unit = Field()
# 起始价格
price_start = Field()
# 价格描述
price_description = Field()
# 项目简介
project_description = Field()
# 周边设施_学校
school = Field()
# 周边设施_商场
market = Field()
# 周边设施_医院
hospital = Field()
# 周边设施_银行
bank = Field()
# 其他
other = Field()
# 周边设施_交通
traffic = Field()
class house_info_item(Item):
'''
房源资讯信息
'''
# 房源状态实体类型
item_type = Field()
# 资讯连接
item_url = Field()
# 房源信息
newcode = Field()
# 资讯时间
info_date = Field()
# 资讯标题
info_title = Field()
# 资讯详情
info_detail = Field()
class house_type_item(Item):
'''
房源户型信息
'''
# 户型id
house_type_id = Field()
newcode = Field()
# 户型连接
item_url = Field()
# 状态
item_type = Field()
# 户型名称
house_type_name = Field()
# 户型地址
# house_type_url = Field()
# 室
house_type_room_cnt = Field()
# 厅
house_type_hall_cnt = Field()
# 厨
house_type_kitchen_cnt = Field()
# 卫
house_type_toilet_cnt = Field()
# 建筑面积
house_type_size = Field()
# 实用面积
house_type_living_size = Field()
# 户型描述
house_type_desc = Field()
# 主力户型
house_type_ismain = Field()
# 总价
house_type_totalprice = Field()
# 户型状态
house_type_status = Field()
# houseimageurl
house_type_image_url = Field()
class house_photo_item(Item):
'''
房源图片信息
'''
item_type = Field()
item_url = Field()
# 房源编码
newcode = Field()
house_photo_id = Field()
# 图片连接
house_photo_url = Field()
# 图片类型
house_photo_type = Field()
# 图片标题
house_photo_title = Field()
# 房源标签
house_photo_tag = Field()
# 价格
class house_price(Item):
item_type = Field()
item_url = Field()
newcode = Field()
# time
time = Field()
avg_price = Field()
price_desc = Field()
# 预售许可证
class house_permit(Item):
item_type = Field()
item_url = Field()
newcode = Field()
permit_no = Field()
permit_time = Field()
permit_desc = Field()
| chensource/BeginningPython | spider/fang_scrapy/fang_link/fang_link/items.py | items.py | py | 4,493 | python | en | code | 0 | github-code | 13 |
12393564398 | import torch
import torch.optim as optim
from torch.autograd import Variable
import torch.nn as nn
from torch.utils.data import DataLoader
class AutoEncoder:
def __init__(self, encoder, decoder, use_cuda=True):
self.enc = encoder
self.dec = decoder
self.use_cuda = use_cuda
# Dimensions of samples
# self.sample_size = self.enc.input_size()
# Dimensions of encoding
# self.code_size = self.dec.input_size()
# Use MSELoss for now
self.criterion = nn.MSELoss()
if self.use_cuda:
self.enc = self.enc.cuda()
self.dec = self.dec.cuda()
self.criterion = self.criterion.cuda()
self.enc_optim = optim.Adam(self.enc.parameters())
self.dec_optim = optim.Adam(self.dec.parameters())
def train(self, data, epochs, batch_size, iters_per_log = 1):
'''
Trains the Auto Encoder for some number of epochs.
'''
self.data = DataLoader(torch.FloatTensor(data), batch_size=batch_size, shuffle=True)
tot_loss = 0
log_iters_count = 0
for epoch in range(epochs):
for cur_iter, data_batch in enumerate(self.data):
# Zero out the gradients
self.enc.zero_grad()
self.dec.zero_grad()
# Sample data
data_samples = Variable(data_batch)
if self.use_cuda:
data_samples = data_samples.cuda()
# Target is the data itself
target = data_samples
if self.use_cuda:
target = target.cuda()
# Encode the sample
encoding = self.enc.forward(data_samples)
# Decode the sample
recon = self.dec.forward(encoding)
# Calculate the loss
loss = self.criterion(recon, target)
# Calculate some gradients
loss.backward()
# Run update step
self.enc_optim.step()
self.dec_optim.step()
tot_loss += loss.data[0]
log_iters_count += 1
if log_iters_count % iters_per_log == 0:
print('Epoch {} Iter {}'.format(epoch, cur_iter))
print('Loss', tot_loss/iters_per_log)
tot_loss = 0
def reconstruct(self, samples):
"""
Reconstruct some samples.
Args:
@samples Should be a numpy array of size (samples, sample_size).
"""
return self._reconstruct(Variable(torch.FloatTensor(samples)))
def _reconstruct(self, samples):
"""
Reconstruct some samples.
Args:
@samples Should be a torch Variable of size (samples, sample_size).
"""
if self.use_cuda:
samples = samples.cuda()
return self.dec.forward(self.enc.forward(samples))
def encode(self, samples):
samples = Variable(torch.FloatTensor(samples))
if self.use_cuda:
samples = samples.cuda()
return self.enc.forward(samples)
def decode(self, codes):
codes = Variable(torch.FloatTensor(codes))
if self.use_cuda:
codes = codes.cuda()
return self.dec.forward(codes)
def save_state_dict(self, path):
param_dict = {'enc': self.enc.state_dict(), 'dec': self.dec.state_dict()}
torch.save(param_dict, path)
def load_state_dict(self, path):
param_dict = torch.load(path)
self.enc.load_state_dict(param_dict['enc'])
self.dec.load_state_dict(param_dict['dec'])
| cheng-xie/motionEncode | autoencoder/autoencoder.py | autoencoder.py | py | 3,720 | python | en | code | 0 | github-code | 13 |
21860634730 | import json
from urllib.parse import urlparse, urlunsplit
from urllib.request import Request, urlopen
TIKTOK_VM = "https://vm.tiktok.com"
def follow_url(url):
request = Request(url)
response = urlopen(request)
ugly = response.geturl()
return ugly
def resolve_tiktok(url):
html_url = follow_url(url)
parsed = urlparse(html_url)
html_url = urlunsplit((parsed.scheme, parsed.netloc, parsed.path, "", ""))
ugly = follow_url(html_url)
return ugly
def response(status, body):
return {
'statusCode': status,
"headers": {"Content-Type": "application/json"},
'body': json.dumps(body)
}
def lambda_handler(event, context):
status = 400
try:
url = event["queryStringParameters"]["url"]
except KeyError:
return response(400, "Invalid Tiktok URL")
if not url.startswith(TIKTOK_VM):
return response(400, "Invalid Tiktok URL")
safe_url = resolve_tiktok(url)
return response(200, safe_url)
| jjdelc/verbose-happiness | lambda.py | lambda.py | py | 1,000 | python | en | code | 0 | github-code | 13 |
45740039214 | # Ciholas, Inc. - www.ciholas.com
# Licensed under: creativecommons.org/licenses/by/4.0
# System libraries
import pyqtgraph as pg
from pyqtgraph.Qt import QtWidgets, QtCore
from functools import partial
# Local libraries
from cdp import BootloadProgress
from network_objects import *
from settings import *
from generic_plots import *
class PlotBootloadProgress(QtWidgets.QMainWindow):
type = BootloadProgress.type
def __init__(self, serial):
QtWidgets.QMainWindow.__init__(self)
self.serial = serial
self.central = QtWidgets.QScrollArea()
self.central.setWidgetResizable(True)
self.central_inner_widget = QtWidgets.QWidget()
self.grid_layout = QtWidgets.QGridLayout()
self.setWindowTitle("CUWB Monitor- Bootload Progress")
self.sub_windows ={}
self.id_total = 0
self.prev_count = 0
self.from_id_id_labels = {}
self.from_id_count_labels = {}
self.from_id_freq_labels = {}
self.from_id_enable_checks = {}
self.from_id_count = {}
self.from_id_frequency_deques = {}
self.from_ids = np.array([])
self.grid_layout.addWidget(QtWidgets.QLabel("Serial#"), 0, 0)
self.grid_layout.addWidget(QtWidgets.QLabel("Packet Count"), 0, 1)
self.grid_layout.addWidget(QtWidgets.QLabel("Frequency"), 0, 2)
self.grid_layout.addWidget(QtWidgets.QLabel("Print"), 0, 3)
self.running = True
self.timer = self.startTimer(QPLOT_FREQUENCY)
self.updateLabels()
#This allows for a dynamic window size where the number of serials already in the window after
#one pass affects the size of the serial choice window.
row_height = 20
self.resize(400, row_height+(row_height * len(self.from_id_id_labels)))
self.central_inner_widget.setLayout(self.grid_layout)
self.central.setWidget(self.central_inner_widget)
self.setCentralWidget(self.central)
def updateLabels(self):
if BootloadProgress.type in UwbNetwork.nodes[self.serial].cdp_pkts_count:
_current_size = UwbNetwork.nodes[self.serial].cdp_pkts_count[self.type] - self.prev_count
if _current_size > 1000: _current_size = 1000
self.prev_count = UwbNetwork.nodes[self.serial].cdp_pkts_count[self.type]
for idx in range(_current_size):
_target_id = UwbNetwork.nodes[self.serial].cdp_pkts[self.type][idx - _current_size].serial_number.as_int
if not (_target_id in self.from_ids):
self.from_id_id_labels.update([(self.id_total, QtWidgets.QLabel())])
self.from_id_count_labels.update([(self.id_total, QtWidgets.QLabel())])
self.from_id_freq_labels.update([(self.id_total, QtWidgets.QLabel())])
self.from_id_enable_checks.update([(self.id_total, QtWidgets.QCheckBox())])
self.from_id_count.update([(_target_id, 0)])
self.from_id_frequency_deques.update([(_target_id, deque([], FREQUENCY_CALCULATION_DEQUE_LENGTH))])
self.from_ids = np.sort(np.append(self.from_ids, _target_id))
_row = self.id_total
_column = 0
self.grid_layout.addWidget(self.from_id_id_labels[self.id_total], _row + 1, _column + 0)
self.grid_layout.addWidget(self.from_id_count_labels[self.id_total], _row + 1, _column + 1)
self.grid_layout.addWidget(self.from_id_freq_labels[self.id_total], _row + 1, _column + 2)
self.grid_layout.addWidget(self.from_id_enable_checks[self.id_total], _row + 1, _column + 3)
if _column > 0:
_row = 2
self.grid_layout.addWidget(QtWidgets.QLabel("Serial#"), _row, _column + 0)
self.grid_layout.addWidget(QtWidgets.QLabel("Packet Count"), _row, _column + 1)
self.grid_layout.addWidget(QtWidgets.QLabel("Frequency"), _row, _column + 2)
self.grid_layout.addWidget(QtWidgets.QLabel("Print"), _row, _column + 3)
self.id_total += 1
self.from_id_count[_target_id] += 1
if _target_id in self.from_ids:
_row = np.where(self.from_ids==_target_id)[0][0]
if self.from_id_enable_checks[_row].isChecked():
print(UwbNetwork.nodes[self.serial].cdp_pkts[self.type][idx - _current_size])
if _target_id in self.sub_windows.keys():
_packet = UwbNetwork.nodes[self.serial].cdp_pkts[self.type][idx - _current_size]
self.sub_windows[_target_id].updateLabels(_packet)
for _target_id in self.from_ids:
self.from_id_frequency_deques[_target_id].append((self.from_id_count[_target_id], time.monotonic()))
for _row in range(self.id_total):
_target_id = int(self.from_ids[_row])
if self.from_id_id_labels[_row].text() != '0x{:08X}'.format(_target_id):
self.from_id_id_labels[_row].setText('0x{:08X}'.format(_target_id))
self.from_id_id_labels[_row].setStyleSheet(GetClickableColor())
self.from_id_id_labels[_row].mouseReleaseEvent = partial(self.labelClickEvent, _target_id)
_freq = UwbNetwork.nodes[self.serial].calculate_frequency(self.from_id_frequency_deques[_target_id])
self.from_id_count_labels[_row].setText('{:5d}'.format(self.from_id_count[_target_id]))
self.from_id_freq_labels[_row].setText('{:5.1f}Hz'.format(_freq))
def labelClickEvent(self, item_serial, e):
self.sub_windows[item_serial] = PlotBootloadProgressSubWindow(item_serial, self)
if UwbNetwork.nodes[self.serial].cdp_pkts_count[self.type] != 0:
index = -1
while -index < UwbNetwork.nodes[self.serial].cdp_pkts_count[self.type]:
packet = UwbNetwork.nodes[self.serial].cdp_pkts[self.type][index]
if packet.serial_number.as_int == item_serial:
self.sub_windows[item_serial].updateLabels(packet)
break
index -= 1
self.sub_windows[item_serial].show()
def timerEvent(self, e):
if not UwbNetwork.running:
self.killTimer(self.timer)
self.running = False
self.close()
return
if self.running:
self.updateLabels()
else:
self.killTimer(self.timer)
self.close()
def closeEvent(self, e):
self.killTimer(self.timer)
self.running = False
for window in self.sub_windows.values():
if window.isVisible():
window.close()
self.close()
def reset(self):
for window in self.sub_windows.values():
window.reset()
class PlotBootloadProgressSubWindow(QtWidgets.QMainWindow):
def __init__(self, device_serial, parent):
QtWidgets.QMainWindow.__init__(self)
self.central = QtWidgets.QWidget()
self.grid_main = QtWidgets.QGridLayout()
self.central.setLayout(self.grid_main)
self.setCentralWidget(self.central)
self.parent = parent
self.setWindowTitle("CUWB Monitor- Bootload Progress ID 0x{:08X}".format(device_serial))
self.device_serial = device_serial
length = 400
width = 180
self.resize(length, width)
self.running = True
self.timer = self.startTimer(QPLOT_FREQUENCY)
self.createLayout()
def createLayout(self):
self.bootload_widgets = []
grid_rows = []
first_row = 0
total_rows = 4
createRows(self.bootload_widgets, grid_rows, self.grid_main, first_row, total_rows)
curr_row = 0
spacer_size = 20
#Row 0
graphics = pg.GraphicsLayoutWidget(show = False)
graphics.setFixedSize(380.0,70.0)
grid_rows[curr_row].addWidget(graphics, 0, 0)
grid_rows[curr_row].addItem(QtWidgets.QSpacerItem(spacer_size, 0, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum), 0, 1)
graphics.ci.setContentsMargins(0,0,0,0)
self.flags_progress = pg.ImageItem()
#Pyqtgraph color schemes made it to where the unset flags are 1 and set flags are 0
self.flags_progress.setImage(np.ones((200,1)))
flags_viewer = graphics.addViewBox()
flags_viewer.setMouseEnabled(False, False)
flags_viewer.setAspectLocked(False)
#flags_viewer.setRange(rect = graphics.ci.boundingRect())
flags_viewer.addItem(self.flags_progress)
color_scheme = pg.colormap.get("CET-D3")
flags_color = pg.ColorBarItem(values = (0,1), colorMap = color_scheme)
flags_color.setImageItem(self.flags_progress)
curr_row += 1
#Row 1
grid_rows[curr_row].addWidget(QtWidgets.QLabel("Last Signal's Strength: "), 0, 0)
self.last_packet_rssi_label = QtWidgets.QLabel("?")
grid_rows[curr_row].addWidget(self.last_packet_rssi_label, 0, 1)
grid_rows[curr_row].addItem(QtWidgets.QSpacerItem(spacer_size, 0), 0, 2)
grid_rows[curr_row].addWidget(QtWidgets.QLabel("Last Heard Packet Time: "), 0, 3)
self.last_packet_time_label = QtWidgets.QLabel("?")
grid_rows[curr_row].addWidget(self.last_packet_time_label, 0, 4)
grid_rows[curr_row].addItem(QtWidgets.QSpacerItem(spacer_size, 0, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum), 0, 5)
curr_row += 1
#Row 2
grid_rows[curr_row].addWidget(QtWidgets.QLabel("Max # of Sectors per Flag: "), 0, 0)
self.max_sectors_label = QtWidgets.QLabel("?")
grid_rows[curr_row].addWidget(self.max_sectors_label, 0, 1)
grid_rows[curr_row].addItem(QtWidgets.QSpacerItem(spacer_size, 0), 0, 2)
grid_rows[curr_row].addWidget(QtWidgets.QLabel("Last Max Sector Flag: "), 0, 3)
self.last_max_sector_label = QtWidgets.QLabel("?")
grid_rows[curr_row].addWidget(self.last_max_sector_label, 0, 4)
grid_rows[curr_row].addItem(QtWidgets.QSpacerItem(spacer_size, 0, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum), 0, 5)
curr_row += 1
#Row 3
grid_rows[curr_row].addWidget(QtWidgets.QLabel("Completion: "), 0, 0)
self.sectors_complete_label = QtWidgets.QLabel("?")
grid_rows[curr_row].addWidget(self.sectors_complete_label, 0, 1)
grid_rows[curr_row].addItem(QtWidgets.QSpacerItem(spacer_size, 0, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum), 0, 2)
def updateLabels(self, packet):
MAX_SIGNAL_STRENGTH = 255
self.last_packet_rssi_label.setText("{}%".format(((packet.last_received_total_path_rssi/MAX_SIGNAL_STRENGTH)*100)))
self.last_packet_time_label.setText("{}s".format(packet.last_heard_packet_time))
self.max_sectors_label.setText("{}".format(packet.max_sectors_per_flag))
self.last_max_sector_label.setText("{}".format(packet.last_max_sector_flag))
self.sectors_complete_label.setText("{}%".format(packet.percentage))
flags = []
#Parse each byte down into a list of bool and then if the boolean is true that means the flag is set
for byte in packet.flags:
flags_of_byte = [bool(byte & (1<<n)) for n in range(8)]
for flag in flags_of_byte:
#Pyqtgraph color schemes made it to where the unset flags are 1 and set flags are 0
if flag:
flags.append([0])
else:
flags.append([1])
flags_array = np.array(flags)
self.flags_progress.setImage(flags_array)
def timerEvent(self, e):
if not UwbNetwork.running or not self.parent.running:
self.killTimer(self.timer)
self.close()
return
def closeEvent(self, e):
self.killTimer(self.timer)
self.close()
def reset(self):
self.last_packet_rssi_label.setText("?")
self.last_packet_time_label.setText("?")
self.max_sectors_label.setText("?")
self.last_max_sector_label.setText("?")
self.sectors_complete_label.setText("0%")
self.flags_progress.setValue(0)
| ciholas/cuwb-monitor | libs/plots/public/individual_plots/plot_bootload_progress.py | plot_bootload_progress.py | py | 12,458 | python | en | code | 0 | github-code | 13 |
73175247378 | import subprocess
out = "/home/user/out"
def checkout(cmd, text):
result = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, encoding='utf-8')
if text in result.stdout and result.returncode == 0:
return True
else:
return False
def checkout_negative(cmd, text):
result = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8')
if (text in result.stdout or text in result.stderr) and result.returncode != 0:
return True
else:
return False
def get_crc32(dir: str):
res2_crc = subprocess.run("crc32 {}/arx2.7z".format(dir), shell=True, stdout=subprocess.PIPE, encoding='utf-8')
if res2_crc.returncode == 0:
return res2_crc.stdout[-1]
else:
return None
| ludmila0704/pythonProject_AUTO_LINUXX | checker.py | checker.py | py | 814 | python | en | code | 0 | github-code | 13 |
35216342264 | from django.forms import ModelForm
from rakes.models import Rake, Module
class RakeForm(ModelForm):
class Meta:
model = Rake
fields = ['RakeName', 'Module1', 'Module2',
'Module3', 'Module4', 'Module5', 'Module6', 'Module7', 'Module8', 'Module9', ]
class ModuleForm(ModelForm):
class Meta:
model = Module
fields = ['ModuleName', 'Wagon1Number', 'Wagon2Number',
'Wagon3Number', 'Wagon4Number', 'Wagon5Number']
| vinaykumar1908/082021i | rakes/forms.py | forms.py | py | 493 | python | en | code | 0 | github-code | 13 |
17041680154 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayInsSceneCouponSendModel(object):
def __init__(self):
self._channel_user_id = None
self._channel_user_source = None
self._dimension_id = None
self._dimension_type = None
self._market_type = None
self._out_biz_no = None
self._service_scenario = None
@property
def channel_user_id(self):
return self._channel_user_id
@channel_user_id.setter
def channel_user_id(self, value):
self._channel_user_id = value
@property
def channel_user_source(self):
return self._channel_user_source
@channel_user_source.setter
def channel_user_source(self, value):
self._channel_user_source = value
@property
def dimension_id(self):
return self._dimension_id
@dimension_id.setter
def dimension_id(self, value):
self._dimension_id = value
@property
def dimension_type(self):
return self._dimension_type
@dimension_type.setter
def dimension_type(self, value):
self._dimension_type = value
@property
def market_type(self):
return self._market_type
@market_type.setter
def market_type(self, value):
self._market_type = value
@property
def out_biz_no(self):
return self._out_biz_no
@out_biz_no.setter
def out_biz_no(self, value):
self._out_biz_no = value
@property
def service_scenario(self):
return self._service_scenario
@service_scenario.setter
def service_scenario(self, value):
self._service_scenario = value
def to_alipay_dict(self):
params = dict()
if self.channel_user_id:
if hasattr(self.channel_user_id, 'to_alipay_dict'):
params['channel_user_id'] = self.channel_user_id.to_alipay_dict()
else:
params['channel_user_id'] = self.channel_user_id
if self.channel_user_source:
if hasattr(self.channel_user_source, 'to_alipay_dict'):
params['channel_user_source'] = self.channel_user_source.to_alipay_dict()
else:
params['channel_user_source'] = self.channel_user_source
if self.dimension_id:
if hasattr(self.dimension_id, 'to_alipay_dict'):
params['dimension_id'] = self.dimension_id.to_alipay_dict()
else:
params['dimension_id'] = self.dimension_id
if self.dimension_type:
if hasattr(self.dimension_type, 'to_alipay_dict'):
params['dimension_type'] = self.dimension_type.to_alipay_dict()
else:
params['dimension_type'] = self.dimension_type
if self.market_type:
if hasattr(self.market_type, 'to_alipay_dict'):
params['market_type'] = self.market_type.to_alipay_dict()
else:
params['market_type'] = self.market_type
if self.out_biz_no:
if hasattr(self.out_biz_no, 'to_alipay_dict'):
params['out_biz_no'] = self.out_biz_no.to_alipay_dict()
else:
params['out_biz_no'] = self.out_biz_no
if self.service_scenario:
if hasattr(self.service_scenario, 'to_alipay_dict'):
params['service_scenario'] = self.service_scenario.to_alipay_dict()
else:
params['service_scenario'] = self.service_scenario
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayInsSceneCouponSendModel()
if 'channel_user_id' in d:
o.channel_user_id = d['channel_user_id']
if 'channel_user_source' in d:
o.channel_user_source = d['channel_user_source']
if 'dimension_id' in d:
o.dimension_id = d['dimension_id']
if 'dimension_type' in d:
o.dimension_type = d['dimension_type']
if 'market_type' in d:
o.market_type = d['market_type']
if 'out_biz_no' in d:
o.out_biz_no = d['out_biz_no']
if 'service_scenario' in d:
o.service_scenario = d['service_scenario']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/AlipayInsSceneCouponSendModel.py | AlipayInsSceneCouponSendModel.py | py | 4,314 | python | en | code | 241 | github-code | 13 |
40281442883 | #take an input
a = input("Enter a number")
b = input("Enter a operator ")
c = input("Enter num2")
# convert input into integer
a = int(a)
c = int(c)
#checking b
if b == '/':
print( a / c)
elif b == '*':
print(a * c)
elif b == '+':
print(a + c)
elif b == '-':
print(a - c)
else:
print("Invalid Operation")
#Thankyou
print('Thankyou for using the calculator')
| sohamthalpati/python | calculator.py | calculator.py | py | 389 | python | en | code | 2 | github-code | 13 |
71223538897 | #!/usr/bin/env python3
import sys, re, collections, pprint
with open(sys.argv[1]) as f:
data = [ list(map(int,re.findall(r'-?\d+', l))) for l in f ]
dist = lambda p,q: (abs(p[0]-q[0]) + abs(p[1]-q[1]))
grid = collections.defaultdict(lambda: [])
for sx,sy,bx,by in data:
d = dist((sx,sy),(bx,by))
for i in range(d+1):
ay1, ay2 = sy - i, sy + i
ax1, ax2 = sx - d + i, sx + d - i
grid[ay1].append([ax1,ax2])
grid[ay2].append([ax1,ax2])
for y in grid:
if len(grid[y]) < 2: continue
merged = []
for interval in sorted(grid[y]):
if merged and interval[0]-1 <= merged[-1][1]: merged[-1][1] = max(merged[-1][1],interval[1])
else: merged.append(interval)
grid[y] = merged
row = int(sys.argv[2])
print(f"Part 1: y={row}, positions={grid[row][0][1] - grid[row][0][0]}")
for y in grid:
if not 0 <= y <= 4_000_000: continue
if len(grid[y]) < 2: continue
for i in range(1,len(grid[y])):
x = grid[y][i-1][1]+1
if grid[y][i][0]-1 == x: print(f"Part 2: beacon={x},{y} freq={x*4_000_000+y}") | ivanpesin/aoc | 2022/2022.15/sol.py | sol.py | py | 1,088 | python | en | code | 0 | github-code | 13 |
43070090563 | # coding:utf-8
import tensorflow as tf
# input and weight using placeholder
x = tf.placeholder(tf.float32, [1, 2])
w1 = tf.Variable(tf.random_normal([2, 3], stddev=1, seed=1))
w2 = tf.Variable(tf.random_normal([3, 1], stddev=1, seed=1))
a = tf.matmul(x, w1)
y = tf.matmul(a, w2)
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
# feed dict to x and get y
print("y in tf3_4.py is:\n", sess.run(y, feed_dict={x: [[0.7, 0.5]]}))
| caoshen/ai-practice-tf-notes | tf/tf3_4.py | tf3_4.py | py | 486 | python | en | code | 0 | github-code | 13 |
73333818898 | from unittest import result
from django.shortcuts import render
from flask import Flask, request, render_template, url_for, flash, redirect
from flask_login import LoginManager, UserMixin, login_required, login_user, logout_user, current_user
import datetime
import sqlite3
import pandas as pd
import pandas.io.sql as psql
#postgres://qubbkhqdeylkex:28b87a762a0fe1dd841f224df2caa5408c99ffe03fa9b47d60155379ad0e4101@ec2-52-204-157-26.compute-1.amazonaws.com:5432/dfn5omign98a33
#"dbname=postgres user=postgres password=Aukors123"
def get_db_connection():
connect = sqlite3.connect("limonchan.db")
conn = connect.cursor()
return connect,conn
def createtable(name):
connect, conn = get_db_connection()
def deleteuser(name):
connect, conn = get_db_connection()
command = f"""DELETE FROM USERS WHERE username = '{name}'"""
conn.execute(command)
connect.commit()
connect.close()
def deleteboard(board):
connect, conn = get_db_connection()
command = f"DELETE FROM BOARDS WHERE board = '{board}'"
conn.execute(command)
command2 = f"DROP TABLE {board}"
conn.execute(command2)
connect.commit()
connect.close()
def gettables():
connect, conn = get_db_connection()
conn.execute("""SELECT table_name FROM information_schema.tables
WHERE table_schema = 'public'""")
tables = []
tabletuple = conn.fetchall()
for i in tabletuple:
tables.append(i[0])
connect.close()
return tables
def table(tablename):
connect, conn = get_db_connection()
tabletuple = gettables()
tables = []
for i in tabletuple:
tables.append(i[0])
my_table = psql.read_sql(f'select * from {tablename}', connect)
connect.close()
return my_table
def altering(tablename,columnname):
connect, conn = get_db_connection()
conn.execute(f"ALTER TABLE {tablename} ADD {columnname} TEXT")
connect.commit()
connect.close()
def updatetable(tablename, creator, boardname):
connect, conn = get_db_connection()
conn.execute(f"UPDATE {tablename} SET CREATOR = '{creator}' WHERE board = '{boardname}'")
connect.commit()
connect.close()
def makeadmin(username):
connect, conn = get_db_connection()
conn.execute(f"UPDATE users SET isadmin = 'yes' WHERE username = '{username}'")
connect.commit()
connect.close()
| taylananas/limonchan | sitefund.py | sitefund.py | py | 2,356 | python | en | code | 0 | github-code | 13 |
73050117459 | import socket
import threading
def receive_messages(client_socket):
while True:
try:
message = client_socket.recv(1024).decode('utf-8')
if message:
print('Message reçu :', message)
except:
break
client_socket.close()
def start_chat_client():
host = '127.0.0.1' # Adresse IP du serveur
port = 8000 # Port du serveur
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect((host, port))
receive_thread = threading.Thread(target=receive_messages, args=(client_socket,))
receive_thread.start()
while True:
message = input("Votre message : ")
client_socket.sendall(message.encode('utf-8'))
client_socket.close()
if __name__ == '__main__':
start_chat_client() | tgbhy/python | onlinechat/client.py | client.py | py | 855 | python | en | code | 0 | github-code | 13 |
19455764072 | from FileOperations import *
from Find_Domain import *
from Crawling import crawling
#Taking input from user
url = str(input("Please enter the url here: "))
directoryName = getDomainName(url)
domainName = directoryName
makeFolder(directoryName)
toCrawlPath,crawledPath,backup_crawledPath = makeFiles(directoryName,url)
chooseMethod = int(input("Which method you wish to use to fetch HTML:\n1 for urllib\n2 for requests (Recommended)\nEnter your choice here: "))
mainSet = set()
crawledSet = set()
crawledSet.add(url)
print("Crawling: \n"+url)
set_to_file(crawledSet,crawledPath)
crawling(chooseMethod,url,url,toCrawlPath)
while (True):
mainSet = file_to_set(toCrawlPath)
make_file_empty(toCrawlPath)
#Terminating condition
if(len(mainSet) == 0):
print("All links crawled.")
break
#Take each link from mainSet and crawl it
for linkInSet in mainSet:
if domainName != getDomainName(linkInSet):
print("Found link to external site..")
continue
if linkInSet in crawledSet:
print("Duplicate link found..")
continue
crawling(chooseMethod,url,linkInSet,toCrawlPath)
crawledSet.add(linkInSet)
print(linkInSet)
#Delete contents of crawled and write updated crawledset
#Making backup before deleting
if os.path.isfile(backup_crawledPath):
os.remove(backup_crawledPath) #Delete old backup
os.rename(crawledPath,backup_crawledPath) #Backup created
make_file_empty(crawledPath) #Delete the content of crawled.txt
set_to_file(crawledSet,crawledPath) #Write new content
| VaibhavDN/Crawler | CrawlerMain.py | CrawlerMain.py | py | 1,640 | python | en | code | 0 | github-code | 13 |
6337508322 | '''
User Story 10:
Marriage after 14
'''
from datetime import datetime, timedelta
monthWordToInt = {
"JAN": "01",
"FEB": "02",
"MAR": "03",
"APR": "04",
"MAY": "05",
"JUN": "06",
"JUL": "07",
"AUG": "08",
"SEP": "09",
"OCT": "10",
"NOV": "11",
"DEC": "12",
}
def marriageAfter14FamParse(input, newFam):
fams = []
indivs = []
for i, b in zip(input, newFam):
if i[2] != "NA":
fams = []
fams.append(b[0])
fams.append(i[0])
fams.append(i[1])
fams.append(i[3])
fams.append(i[5])
indivs.append(fams)
# print(indivs)
return indivs
def checkIndividuals(indivs, familyparsed):
errors = []
for i in indivs:
for j in familyparsed:
if j[3] == i[0] or j[4] == i[0]: #check logic
if i[3] != "NA": #if has a birthday
try:
birthDate = datetime.strptime(i[3], '%Y-%m-%d')
marriageDate = datetime.strptime(j[2], '%Y-%m-%d')
except ValueError:
birthDate = datetime.strptime("2018-01-01", '%Y-%m-%d')
marriageDate = datetime.strptime("2018-01-01", '%Y-%m-%d')
if(marriageDate < (birthDate+ timedelta(days=5475))):
print("ERROR: INDIVIDUAL: US10: " + i[0] + " marriage date occurs before they are 15 on line " + str(j[0]))
errors.append("ERROR: INDIVIDUAL: US10: " + i[0] + " marriage date occurs before they are 15 on line " + str(j[0]))
return errors
def main(individualTable, familyTable, newFam):
return checkIndividuals(individualTable, marriageAfter14FamParse(familyTable, newFam)) | chloequinto/SSW_555_Project | package/userStories/us10.py | us10.py | py | 1,908 | python | en | code | 0 | github-code | 13 |
6089901987 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : test.salary_process.py
@Time : 2020/08/24 13:21:52
@Author : Tong tan
@Version : 1.0
@Contact : raogx.vip@hotmail.com
'''
from salary.process import Process
from salary.operators import GzOperator
from salary.operators import MergeOperator
from salary.split_operator import SalaryTplSplit
from salary.config import SalaryConfig
class TestSalaryProcess(object):
def test_gz_opreator_process(self):
proc = Process(GzOperator(SalaryConfig()),'202008')
proc.process_validat()
assert proc.name == '工资模板处理器'
assert proc.operator.period == '202008'
def test_merge_opreator_process(self):
proc = Process(MergeOperator(SalaryConfig()),'202008')
proc.process_validat()
assert proc.name == '工资奖金模板处理器'
assert proc.operator.period == '202008'
def test_split_opreator_process(self):
proc = Process(SalaryTplSplit(SalaryConfig()),'202008')
proc.process_split()
assert proc.operator.period == '202008' | versnoon/mg_hr_salary_support_pro | tests/test_salary_process.py | test_salary_process.py | py | 1,141 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.