seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
16070369380 | #the three cup monte game in python
from random import shuffle
def three_cup_monte():
cups = ["","0",""] #...list of cups
shuffle(cups) #...shuffling the cups
guess = int(input("Enter any choice between 0, 1 and 2 : "))
if cups[guess] == "0": #...if the guess is correct, the 'if' statement executes
return "Correct"
else: #...if the guess is wrong, the 'else' statement is executed
return "Wrong"
print(three_cup_monte()) #...calling the function
#list comprehension
def comp():
fruits = ["apple", "banana", "cherry", "kiwi", "mango"]
newlist = [x for x in fruits if "a" in x]
return newlist
print(comp())
#reverse a string
def rev():
s = input()
l = len(s)
string = s[l::-1]
return string
print(rev())
#count number of word in sentance
def number_of_words():
s = input()
res = len(s.split())
return res
print(number_of_words())
#minimum and maximum in array
n = int(input())
a = list(map(int, input().split()))[:n]
mini = a[0]
for i in range(n):
if a[i] < mini:
mini = a[i]
maxi = a[0]
for i in range(n):
if a[i] > maxi:
maxi = a[i]
print(mini, end=' ')
print(maxi)
#Kth min and max of array
n = int(input())
k = int(input())
m = list(map(int, input().split()))[:n]
m.sort()
for i in range(k):
print(m[i],end=' ')
#Leap Year
year = int(input())
if year % 4 == 0:
if year % 100 == 0:
if year % 400 == 0:
print("leap year")
else:
print("Not leap year")
else:
print("leap year")
else:
print("Not leap year")
#union of two array
m = int(input())
n = int(input())
arr1 = list(map(int, input().split()))[:m]
arr2 = list(map(int, input().split()))[:n]
i,j = 0,0
while i < m and j < n:
if arr1[i] < arr2[j]:
print(arr1[i],end=' ')
i += 1
elif arr2[j] < arr1[i]:
print(arr2[j],end=' ')
j += 1
else:
print(arr2[j],end=' ')
j += 1
i += 1
while i < m:
print(arr1[i],end=' ')
i += 1
while j < n:
print(arr2[j],end=' ')
j += 1
#intersection of two arrays
m = int(input())
n = int(input())
arr1 = list(map(int, input().split()))[:m]
arr2 = list(map(int, input().split()))[:n]
i,j = 0,0
while i < m and j < n:
if arr1[i] < arr2[j]:
i += 1
elif arr2[j] < arr1[i]:
j += 1
else:
print(arr2[j],end=' ')
j += 1
i += 1
#duplicates characters of string
s = input()
n = len(s)
for i in range(n):
for j in range(i+1,n):
if s[i] == s[j]:
print(s[i],end=' ')
#common of any array
n1 = int(input())
a1 = list(map(int,input().split()))[:n1]
a2 = list(map(int,input().split()))[:n1]
a3 = list(map(int,input().split()))[:n1]
'''converting the list to set'''
s1 = set(a1)
s2 = set(a2)
s3 = set(a3)
'''set has attribute intersection'''
set1 = s1.intersection(s2)
result = set1.intersection(s3)
print(list(result))
#vowels or consonants
s = input()
vowels = 0
consonents = 0
for i in range(len(s)):
ch = s[i]
if (ch >= 'a' and ch <= 'z') or (ch >= 'A' and ch <= 'Z'):
if ch == 'a' or ch == 'e' or ch == 'i' or ch == 'o' or ch == 'u':
vowels += 1
else:
consonents += 1
print("vovels", vowels)
print("consonents", consonents)
| Shusovan/Practice-Programs | practice.py | practice.py | py | 3,472 | python | en | code | 0 | github-code | 13 |
14468237764 | data = []
with open("day21.txt") as f:
data = [x.strip() for x in f.readlines()]
monkies = {}
for row in data:
tokens = row.split(":")
monkey = tokens[0]
rules = tokens[1].strip().split(" ")
if len(rules) == 1:
monkies[monkey] = (int(rules[0]), None)
else:
monkies[monkey] = (None, rules)
monkey_found = False
while not monkey_found:
for monkey, rules in monkies.items():
value, math = rules
if monkey == "root" and value:
print(value)
monkey_found = True
break
if value:
continue
else:
left_val, _ = monkies[math[0]]
right_val, _ = monkies[math[2]]
if not left_val or not right_val:
continue
if math[1] == "+":
monkies[monkey] = (left_val + right_val, None)
elif math[1] == "*":
monkies[monkey] = (left_val * right_val, None)
elif math[1] == "/":
monkies[monkey] = (left_val // right_val, None)
elif math[1] == "-":
monkies[monkey] = (left_val - right_val, None)
| PeterDowdy/AdventOfCode2022 | day21_1.py | day21_1.py | py | 1,155 | python | en | code | 0 | github-code | 13 |
16397470691 | from ._base_api import BaseRequests
from requests.models import Response
class GenresRequests(BaseRequests):
def __init__(self):
super().__init__()
self.base_url += '/genres'
# Get all genres
def get_all_genres(self, auth_token: str):
url = f"{self.base_url}"
headers = {
"Accept": "application/json",
"Authorization": f"Bearer {auth_token}"
}
return self.get_request(url, headers)
# Get all genres - return xml hardcoded. This is just for testing purposes
def get_all_genres_return_xml_hardcoded(self, auth_token: str):
genres = [
{"id": 1, "name": "Action"},
{"id": 2, "name": "Horror"},
{"id": 3, "name": "Romance"},
{"id": 4, "name": "Science Fiction"},
{"id": 5, "name": "Disaster Film"},
{"id": 6, "name": "Epic Romance"},
{"id": 7, "name": "Superhero Film"},
{"id": 8, "name": "Space Western"},
{"id": 9, "name": "Comedy"},
{"id": 10, "name": "Adventure"},
{"id": 11, "name": "Western"}
]
xml_str = '<?xml version="1.0" encoding="UTF-8"?>\n'
xml_str += '<genres>\n'
for genre in genres:
xml_str += ' <genre>\n'
xml_str += f' <id>{genre["id"]}</id>\n'
xml_str += f' <name>{genre["name"]}</name>\n'
xml_str += ' </genre>\n'
xml_str += '</genres>'
response = Response()
response._content = xml_str.encode('utf-8')
response.status_code = 200
response.headers = {'Content-Type': 'application/xml'}
return response
# Get genre by id
def get_genre_by_id(self, auth_token: str, genre_id: int):
url = f"{self.base_url}/{genre_id}"
headers = {
"Accept": "application/json",
"Authorization": f"Bearer {auth_token}"
}
print(f"GET: {url}")
return self.get_request(url, headers)
# Create genre
def post_genre(self, auth_token: str, payload: dict[str, any]):
url = f"{self.base_url}"
headers = {
"Accept": "application/json",
"Authorization": f"Bearer {auth_token}"
}
print(f"POST: {url}")
return self.post_request(url, payload, headers)
# Create genres - bulk
def post_genre_bulk(self, auth_token: str, payload: list[dict[str, any]]):
url = f"{self.base_url}/bulk"
headers = {
"Accept": "application/json",
"Authorization": f"Bearer {auth_token}"
}
print(f"POST: {url}")
return self.post_request(url, payload, headers)
# Update genre
def put_genre(self, auth_token: str, genre_id: int, payload: dict[str, any]):
url = f"{self.base_url}/{genre_id}"
headers = {
"Accept": "application/json",
"Authorization": f"Bearer {auth_token}"
}
print(f"PUT: {url}")
return self.put_request(url, payload, headers)
# Delete genre
def delete_genre(self, auth_token: str, genre_id: int):
url = f"{self.base_url}/{genre_id}"
headers = {
"Accept": "application/json",
"Authorization": f"Bearer {auth_token}"
}
print(f"DELETE: {url}")
return self.delete_request(url, headers)
def search_genres(self, auth_token: str, queryParams: dict[str, any] = {}):
url = f"{self.base_url}/search"
headers = {
"Accept": "application/json",
"Authorization": f"Bearer {auth_token}"
}
return self.get_request_with_params(url, queryParams, headers) | dneprokos/python-rest-api-tests | api/genres_requests.py | genres_requests.py | py | 3,754 | python | en | code | 0 | github-code | 13 |
38203893927 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('team', '0004_auto_20160403_1033'),
]
operations = [
migrations.AddField(
model_name='fieldplayerstats',
name='team',
field=models.ForeignKey(default=0, to='team.Team'),
),
migrations.AddField(
model_name='goal',
name='goal_time',
field=models.TimeField(default=b'00:00'),
),
migrations.AddField(
model_name='goaltenderstats',
name='team',
field=models.ForeignKey(default=0, to='team.Team'),
),
migrations.AlterUniqueTogether(
name='fieldplayerstats',
unique_together=set([('player', 'season', 'team')]),
),
migrations.AlterUniqueTogether(
name='goaltenderstats',
unique_together=set([('player', 'season', 'team')]),
),
migrations.AlterUniqueTogether(
name='player',
unique_together=set([('number', 'team')]),
),
]
| snikers12/Hockey_club | team/migrations/0005_auto_20160403_1054.py | 0005_auto_20160403_1054.py | py | 1,182 | python | en | code | 0 | github-code | 13 |
7406452460 |
# File: EasterSunday.py
# Description: EasterSunday assignment
# Student Name: Lei Liu
# Student UT EID: LL28379
# Course Name: CS 303E
# Unique Number: 51200
# Date Created: 9/10/16
# Date Last Modified: 9/10/16
months=["January", "February", "March", "April", "May"]
def main():
y=input('Enter year: ')
y=int(y)
a=y%19
b=y/100
b=int(b)
c=y%100
d=b/4
d=int(d)
e=b%4
g=((8*b)+13)/25
g=int(g)
h=((19*a)+b-d-g+15)%30
j=c/4
j=int(j)
k=c%4
m=(a+(11*h))/319
m=int(m)
r=((2*e)+(2*j)-k-h+m+32)%7
n=(h-m+r+90)/25
n=int(n)
p=(h-m+r+n+19)%32
print('')
y=str(y)
p=str(p)
n=int(n)
print('In ' + y + ' Easter Sunday is on ' + p + ' ' + months[n-1] + '.')
main() | LeiLiu95/Python-Projects | EasterSunday/EasterSunday.py | EasterSunday.py | py | 740 | python | en | code | 0 | github-code | 13 |
28306385939 | from flask_app.config.mysqlconnection import connectToMySQL
import datetime
from flask import flash, session
from flask_app.models import user, comment
class Post:
DB = "CodingDojo_Wall_schema"
def __init__( self , data ):
self.id = data['id']
self.content = data['content']
self.created_at = data['created_at']
self.updated_at = data['updated_at']
self.users_id = data['users_id']
self.creator = None
self.comments = None
@classmethod
def save(cls, data ):
query = """
INSERT INTO posts ( content, created_at, updated_at, users_id )
VALUES ( %(content)s, now(), now(), %(users_id)s );
"""
result = connectToMySQL(cls.DB).query_db( query, data )
return result
@classmethod
def get_all_posts(cls):
query = "SELECT * from posts JOIN users WHERE posts.users_id = users.id"
results = connectToMySQL(cls.DB).query_db(query)
postlist = []
for single_post in results:
this_post = cls(single_post)
this_post_creator = {
"id": single_post['users.id'],
"first_name": single_post['first_name'],
"last_name": single_post['last_name'],
"email": single_post['email'],
"password": single_post['password'],
"created_at": single_post['users.created_at'],
"updated_at": single_post['users.updated_at']
}
author = user.User(this_post_creator)
this_post.creator = author
this_post_id = {
"id": single_post['id']
}
comments = comment.Comment.get_all_comments(this_post_id)
this_post.comments = comments
postlist.append(this_post)
return postlist
@classmethod
def delete(cls, id):
query = """DELETE FROM posts
WHERE id = %(id)s;"""
return connectToMySQL(cls.DB).query_db(query,{"id": id})
@staticmethod
def validate_post(post):
is_valid = True
if len(post['post']) < 1:
flash("* Content cannot be blank.", 'post')
is_valid = False
return is_valid | meghann-mccall/testing_deployment | flask_app/models/post.py | post.py | py | 2,257 | python | en | code | 0 | github-code | 13 |
13187967325 | import json
import pandas as pd
import time
import glob
from pathlib import Path
from variables import outputFolder, outputCSV, outputJSON
# Storing the current time in seconds since the Epoch.
start_time = time.time()
# Reading all the files in the folder and subfolders.
read_files = glob.glob(outputFolder + "dev/*.json")
# It creates a new dataframe.
df = pd.DataFrame()
# Reading the files and creating a dataframe for each file.
for f in read_files:
datetimeFromFilename = Path(f).stem.split("_")[-1] #Path().stem give the filename without extension and then we return the last element [-1] of the list created by the split
print(datetimeFromFilename)
with open(f, 'r') as current_file:
#raw = current_file.read()
#current_object = json.loads(raw)
current_object = json.load(current_file)
df_curr = pd.json_normalize(current_object, sep="_")
df_curr['created_at'] = datetimeFromFilename
df = pd.concat([df, df_curr])
print(df)
df["created_at"] = pd.to_datetime(df["created_at"])
print(df) | 0xPale/project-sorare-data | Python/DEV/test.py | test.py | py | 1,058 | python | en | code | 2 | github-code | 13 |
1528154256 | class Node:
def __init__(self,val,left=None,right=None):
self.val=val
self.left=left
self.right=right
class Tree:
def __init__(self,root):
self.root=root
def count(self,node):
if node==None:
return 0
return 1+self.count(node.left)+self.count(node.right)
def chkcom(self):
if self.root==None:
return
n=self.count(self.root)
flag=self.func(self.root,0,n)
print(flag)
def func(self,node,index,n):
if node==None:
return True
if index>=n:
return False
return self.func(node.left,2*index+1,n) and self.func(node.right,2*index+2,n)
root = Node(1)
root.left = Node(2)
root.right = Node(3)
root.left.left = Node(4)
root.left.right = Node(5)
root.right.left = Node(6)
t=Tree(root)
t.chkcom() | stuntmartial/DSA | Trees/Checking_Printing/CompleteBinTree_Rec.py | CompleteBinTree_Rec.py | py | 869 | python | en | code | 0 | github-code | 13 |
33549161053 | # -*- coding: utf-8 -*-
import sys
import time
from PyQt5.QtWidgets import *
#ウィンドウのリサイズを禁止するクラス
class SampleWindow(QWidget):
def __init__(self):
QWidget.__init__(self)
self.setWindowTitle("Sample Window")
self.setGeometry(300,300,200,150)
self.setMinimumHeight(100)
self.setMinimumWidth(250)
self.setMaximumHeight(100)
self.setMaximumWidth(250)
def main():
app = QApplication(sys.argv)
w = QWidget()
w.resize(250, 150)
w.show()
sys.exit(app.exec_())
if __name__ == '__main__':
myApp = QApplication(sys.argv)
myWindow = SampleWindow()
myWindow.show()
myApp.exec_()
sys.exit(0)
'''
import sys
from PyQt5 import QtWidgets
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
window = QtWidgets.QWidget()
window.setWindowTitle('テスト')
window.show()
sys.exit(app.exec_())
'''
| kyoush/GUI | win01.py | win01.py | py | 1,000 | python | ja | code | 0 | github-code | 13 |
32020159674 | from PyQt4 import QtCore, QtGui
import FrameBrowser
import NumpyArrayTableView
class NumpyArrayTableWidget(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QTableWidget.__init__(self, parent)
self.mainLayout = QtGui.QVBoxLayout(self)
self.mainLayout.setMargin(0)
self.mainLayout.setSpacing(0)
self.browser = FrameBrowser.FrameBrowser(self)
self.view = NumpyArrayTableView.NumpyArrayTableView(self)
self.mainLayout.addWidget(self.browser)
self.mainLayout.addWidget(self.view)
self.connect(self.browser,
QtCore.SIGNAL("indexChanged"),
self.browserSlot)
def setArrayData(self, data):
self._array = data
self.view.setArrayData(self._array)
if len(self._array.shape) > 2:
self.browser.setNFrames(self._array.shape[0])
else:
self.browser.setNFrames(1)
def browserSlot(self, ddict):
if ddict['event'] == "indexChanged":
if len(self._array.shape) == 3:
self.view.setCurrentArrayIndex(ddict['new']-1)
self.view.reset()
"""
if __name__ == "__main__":
import numpy
a = QtGui.QApplication([])
d = numpy.random.normal(0,1, (5, 1000,1000))
for i in range(5):
d[i, :, :] += i
#m = NumpyArrayTableModel(numpy.arange(100.), fmt="%.5f")
#m = NumpyArrayTableModel(numpy.ones((100,20)), fmt="%.5f")
w = NumpyArrayTableWidget()
w.setArrayData(d)
#m.setCurrentIndex(4)
#m.setArrayData(numpy.ones((100,100)))
w.show()
a.exec_()"""
| platipodium/mossco-code | scripts/postprocess/GUI/lib/table_view/NumpyArrayTableWidget.py | NumpyArrayTableWidget.py | py | 1,607 | python | en | code | 3 | github-code | 13 |
4997976995 | import numpy as np
class TrainDatasetConfig(object):
""" Configuration of the training routine (params passed to the Dataset and DataLoader"""
def __init__(self):
self.data = "/gpfswork/rech/rnt/uuj49ar/bird_dataset"
self.sigma = 7 # internal, changing is likely to break code or accuracy
self.path_thickness = 1 # internal, changing is likely to break code or accuracy
self.batch_size = 64 # please account for optuna's n_jobs
self.num_workers = 4
self.num_data = 1082
class ValDatasetConfig(object):
"""" Configuration of the validation routine"""
def __init__(self):
self.data = "/gpfswork/rech/rnt/uuj49ar/bird_dataset"
train_batch_size = TrainDatasetConfig().batch_size
train_num_workers = TrainDatasetConfig().num_workers
self.batch_size = train_num_workers * (train_batch_size // (train_num_workers * 2))
self.num_workers = train_num_workers
class NetConfig(object):
""" Configuration of the network"""
def __init__(self):
self.resume_training = False
batch_size = TrainDatasetConfig().batch_size
self.init_chkp = '../experiment/optuna_training/run_B'+str(batch_size)+'.pth'
self.max_channels = 64
self.min_linear_layers = 0
self.max_linear_layers = 3
self.min_linear_unit_size = 8
self.max_linear_unit_size = 256 # can be overriden for the input size of layer taking the flattenened image
class ExecutionConfig(object):
""" Configuration of the training loop"""
def __init__(self):
self.epochs = 5 # 5
self.chkp_folder = '../experiment/optuna_training/'
self.gpus = 1 #1
self.num_validation_sanity_steps = 0
class OptunaConfig(object): # put None in suggest to use the default value
""" Configuration of the Optuna study: what to optimise and by what means"""
def __init__(self):
# Computations for HyperBand configuration
n_iters = int(TrainDatasetConfig().num_data * ExecutionConfig().epochs / TrainDatasetConfig().batch_size)
reduction_factor = int(round(np.exp(np.log(n_iters) / 4))) # for 5 brackets (see Optuna doc)
self.n_jobs = 1 # number of parallel optimisations
self.n_iters = n_iters
self.reduction_factor = reduction_factor
self.timeout = 24*3600 # 2*3600 # seconds, if None is in both limits, use CTRL+C to stop
self.n_trials = 50 # 500 # will stop whenever the time or number of trials is reached
self.pruner = 'Hyperband' # options: Hyperband, Median, anything else -> no pruner
self.suggest_optimiser = None # ['SGD', 'Adam']default is hardcoded to Adam
self.default_optimiser = 'Adam'
self.suggest_learning_rate = [1e-5, 1e-2]
self.default_learning_rate = 0.001
self.suggest_weight_decay = None #[0, 1e-5]
# self.default_weight_decay = 0.00936239234038259
self.default_weight_decay = 0.00
self.suggest_net_architecture = None # [0,4]
self.default_net_architecture = 0
| victoria-brami/BRAMI_Victoria_a3 | optimization/optuna_training_configuration.py | optuna_training_configuration.py | py | 3,112 | python | en | code | 0 | github-code | 13 |
73797745616 | # Built-in package
# Third-party packages
import graphene as gql
from django.db.transaction import atomic
# Local packages
from api_v1.domain.planet import models, types, crud
class CreatePlanet(types.PlanetOutputMutation, gql.Mutation):
class Arguments:
data = types.PlanetCreateInput(required=True)
@atomic
# skipcq: PYL-E0213, PYL-R0201
def mutate(
_root: models.Planet, _info: gql.ResolveInfo, data: types.PlanetCreateInput
) -> models.Planet:
return crud.create_planet(data)
class UpdatePlanet(types.PlanetOutputMutation, gql.Mutation):
class Arguments:
data = types.PlanetUpdateInput(required=True)
where = types.PlanetWhereUniqueInput(required=True)
@atomic
# skipcq: PYL-E0213, PYL-R0201
def mutate(
_root: models.Planet,
_info: gql.ResolveInfo,
where: types.PlanetWhereUniqueInput,
data: types.PlanetUpdateInput,
) -> models.Planet:
return crud.update_planet(where, data)
class DeletePlanet(types.PlanetOutputMutation, gql.Mutation):
class Arguments:
where = types.PlanetWhereUniqueInput(required=True)
@atomic
# skipcq: PYL-E0213, PYL-R0201
def mutate(
_root: models.Planet,
_info: gql.ResolveInfo,
where: types.PlanetWhereUniqueInput,
) -> models.Planet:
return crud.delete_planet(where)
class Query(gql.ObjectType):
planet = gql.Field(types.Planet)
planets = types.PlanetFilterConnectionField(
types.Planet, where=types.PlanetWhereInput()
)
# skipcq: PYL-E0213, PYL-R0201
def resolve_planet(
_root: models.Planet,
_info: gql.ResolveInfo,
where: types.PlanetWhereUniqueInput,
) -> models.Planet:
return crud.get_planet(where)
# skipcq: PYL-E0213, PYL-R0201
def resolve_planets(
_root: models.Planet, _info: gql.ResolveInfo, where: types.PlanetWhereInput
) -> list[models.Planet]:
return crud.get_planets(where)
class Mutation(gql.ObjectType):
create_planet = CreatePlanet.Field(required=True)
update_planet = UpdatePlanet.Field()
delete_planet = DeletePlanet.Field()
| dbritto-dev/lqn-graphql-challenge | api_v1/domain/planet/schema.py | schema.py | py | 2,179 | python | en | code | 0 | github-code | 13 |
22085284704 |
import os
import dgl
import time
import torch
import random
import numpy as np
import pandas as pd
import dgl.function as fn
from ogb.nodeproppred import DglNodePropPredDataset
from datasets.dgl_planetoid_dataset import DglPlanetoidDataset
from networks.gcn import GCN_Node
from networks.gat import GAT_Node
from networks.graphsage import GraphSage_Node
from optims.optim_ogbn_acc import ModelOptLearning_OGBN_Acc
from optims.optim_ogbn_proteins import ModelOptLearning_OGBN_Proteins
from download_dataset import data_preprocess
### set random seed
def set_seed(args):
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
random.seed(args.seed)
np.random.seed(args.seed)
torch.random.manual_seed(args.seed)
if args.device >= 0:
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
### load and preprocess dataset
def load_dataset(args):
# load ogb dataset
start_time = time.time()
# load node prediction dataset
if 'ogbn' in args.dataset_name:
dataset = DglNodePropPredDataset(name=args.dataset_name, root=args.dataset_path)
else:
dataset = DglPlanetoidDataset(name=args.dataset_name, root=args.dataset_path)
split_idx = dataset.get_idx_split()
if not 'sg_factor' in dataset.graph[0].ndata.keys():
dataset = data_preprocess(args, dataset)
if 'ogbn-proteins' == args.dataset_name:
graph=dataset.graph[0]
graph.update_all(fn.copy_e('feat', 'e'), fn.mean('e', 'feat'))
end_time = time.time()
print('- ' * 30)
print(f'{args.dataset_name} dataset loaded, using {end_time-start_time} seconds.')
print('- ' * 30)
return dataset, split_idx
### add new arguments
def add_args(args, dataset):
args.dataset_name = args.dataset_name.lower()
args.input_dim = dataset.graph[0].ndata['feat'].shape[1]
if args.dataset_name in 'ogbn-proteins':
args.output_dim = dataset.num_tasks
elif args.dataset_name in ['ogbn-arxiv', 'cora', 'citeseer', 'pubmed']:
args.output_dim = dataset.num_classes
args.task_type = dataset.task_type
args.eval_metric = dataset.eval_metric
args.identity = (f"{args.dataset_name}-"+
f"{args.model}-"+
f"{args.num_layer}-"+
f"{args.embed_dim}-"+
f"{args.norm_type}-"+
f"{args.norm_affine}-"+
f"{args.activation}-"+
f"{args.dropout}-"+
f"{args.skip_type}-"+
f"{args.lr}-"+
f"{args.lr_min}-"+
f"{args.lr_patience}-"+
f"{args.weight_decay}-"+
f"{args.seed}"
)
if not os.path.exists(args.logs_perf_dir):
os.mkdir(args.logs_perf_dir)
if not os.path.exists(os.path.join(args.logs_perf_dir, args.dataset_name)):
os.mkdir(os.path.join(args.logs_perf_dir, args.dataset_name))
args.perf_xlsx_dir = os.path.join(args.logs_perf_dir, args.dataset_name, 'xlsx')
args.perf_imgs_dir = os.path.join(args.logs_perf_dir, args.dataset_name, 'imgs')
args.perf_dict_dir = os.path.join(args.logs_perf_dir, args.dataset_name, 'dict')
args.perf_best_dir = os.path.join(args.logs_perf_dir, args.dataset_name, 'best')
if not os.path.exists(args.logs_stas_dir):
os.mkdir(args.logs_stas_dir)
if not os.path.exists(os.path.join(args.logs_stas_dir, args.dataset_name)):
os.mkdir(os.path.join(args.logs_stas_dir, args.dataset_name))
args.stas_xlsx_dir = os.path.join(args.logs_stas_dir, args.dataset_name, 'xlsx')
args.stas_imgs_dir = os.path.join(args.logs_stas_dir, args.dataset_name, 'imgs')
return args
### load gnn model
def load_model(args):
if args.model == 'GCN':
model = GCN_Node(args.input_dim, args.embed_dim, args.output_dim, args.num_layer, args).to(args.device)
if args.model == 'GraphSage':
model = GraphSage_Node(args.input_dim, args.embed_dim, args.output_dim, args.num_layer, args).to(args.device)
if args.model == 'GAT':
model = GAT_Node(args.input_dim, args.embed_dim, args.output_dim, args.num_layer, args).to(args.device)
print('- ' * 30)
if torch.cuda.is_available():
print(f'{args.model} with {args.norm_type} norm, {args.dropout} dropout, on gpu', torch.cuda.get_device_name(0))
else:
print(f'{args.model} with {args.norm_type} norm, {args.dropout} dropout')
print('- ' * 30)
return model
### load model optimizing and learning class
def ModelOptLoading(model, optimizer, scheduler, args):
if args.dataset_name in ['cora', 'citeseer', 'pubmed', 'ogbn-arxiv','ogbn-products', 'ogbn-mag']:
modelOptm = ModelOptLearning_OGBN_Acc(
model=model,
optimizer=optimizer,
scheduler=scheduler,
args=args)
elif 'proteins' in args.dataset_name:
modelOptm = ModelOptLearning_OGBN_Proteins(
model=model,
optimizer=optimizer,
scheduler=scheduler,
args=args)
return modelOptm
def print_best_log(args, eopch_slice=0,
metric_list='all'):
key_metric=f'valid-{args.eval_metric}'
logs_table = pd.read_excel(os.path.join(args.perf_xlsx_dir, args.identity+'.xlsx'))
metric_log = logs_table[key_metric]
best_epoch = metric_log[eopch_slice:].idxmax()
best_frame = logs_table.loc[best_epoch]
if not os.path.exists(args.perf_best_dir):
os.mkdir((args.perf_best_dir))
best_frame.to_excel(os.path.join(args.perf_best_dir, args.identity+'.xlsx'))
if metric_list == 'all':
print(best_frame)
else:
for metric in metric_list:
print(f'{metric }: {best_frame[metric]}')
return 0
| chenchkx/SuperNorm | utils/utils_node.py | utils_node.py | py | 6,107 | python | en | code | 5 | github-code | 13 |
17053174524 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class InstalmentPlanTuitionDTO(object):
def __init__(self):
self._amount = None
self._biz_time = None
self._order_id = None
self._partner_id = None
self._plan_open_id = None
self._schedule_time = None
self._serial_no = None
self._smid = None
self._status = None
self._trade_no = None
self._user_id = None
@property
def amount(self):
return self._amount
@amount.setter
def amount(self, value):
self._amount = value
@property
def biz_time(self):
return self._biz_time
@biz_time.setter
def biz_time(self, value):
self._biz_time = value
@property
def order_id(self):
return self._order_id
@order_id.setter
def order_id(self, value):
self._order_id = value
@property
def partner_id(self):
return self._partner_id
@partner_id.setter
def partner_id(self, value):
self._partner_id = value
@property
def plan_open_id(self):
return self._plan_open_id
@plan_open_id.setter
def plan_open_id(self, value):
self._plan_open_id = value
@property
def schedule_time(self):
return self._schedule_time
@schedule_time.setter
def schedule_time(self, value):
self._schedule_time = value
@property
def serial_no(self):
return self._serial_no
@serial_no.setter
def serial_no(self, value):
self._serial_no = value
@property
def smid(self):
return self._smid
@smid.setter
def smid(self, value):
self._smid = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
@property
def trade_no(self):
return self._trade_no
@trade_no.setter
def trade_no(self, value):
self._trade_no = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.amount:
if hasattr(self.amount, 'to_alipay_dict'):
params['amount'] = self.amount.to_alipay_dict()
else:
params['amount'] = self.amount
if self.biz_time:
if hasattr(self.biz_time, 'to_alipay_dict'):
params['biz_time'] = self.biz_time.to_alipay_dict()
else:
params['biz_time'] = self.biz_time
if self.order_id:
if hasattr(self.order_id, 'to_alipay_dict'):
params['order_id'] = self.order_id.to_alipay_dict()
else:
params['order_id'] = self.order_id
if self.partner_id:
if hasattr(self.partner_id, 'to_alipay_dict'):
params['partner_id'] = self.partner_id.to_alipay_dict()
else:
params['partner_id'] = self.partner_id
if self.plan_open_id:
if hasattr(self.plan_open_id, 'to_alipay_dict'):
params['plan_open_id'] = self.plan_open_id.to_alipay_dict()
else:
params['plan_open_id'] = self.plan_open_id
if self.schedule_time:
if hasattr(self.schedule_time, 'to_alipay_dict'):
params['schedule_time'] = self.schedule_time.to_alipay_dict()
else:
params['schedule_time'] = self.schedule_time
if self.serial_no:
if hasattr(self.serial_no, 'to_alipay_dict'):
params['serial_no'] = self.serial_no.to_alipay_dict()
else:
params['serial_no'] = self.serial_no
if self.smid:
if hasattr(self.smid, 'to_alipay_dict'):
params['smid'] = self.smid.to_alipay_dict()
else:
params['smid'] = self.smid
if self.status:
if hasattr(self.status, 'to_alipay_dict'):
params['status'] = self.status.to_alipay_dict()
else:
params['status'] = self.status
if self.trade_no:
if hasattr(self.trade_no, 'to_alipay_dict'):
params['trade_no'] = self.trade_no.to_alipay_dict()
else:
params['trade_no'] = self.trade_no
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = InstalmentPlanTuitionDTO()
if 'amount' in d:
o.amount = d['amount']
if 'biz_time' in d:
o.biz_time = d['biz_time']
if 'order_id' in d:
o.order_id = d['order_id']
if 'partner_id' in d:
o.partner_id = d['partner_id']
if 'plan_open_id' in d:
o.plan_open_id = d['plan_open_id']
if 'schedule_time' in d:
o.schedule_time = d['schedule_time']
if 'serial_no' in d:
o.serial_no = d['serial_no']
if 'smid' in d:
o.smid = d['smid']
if 'status' in d:
o.status = d['status']
if 'trade_no' in d:
o.trade_no = d['trade_no']
if 'user_id' in d:
o.user_id = d['user_id']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/InstalmentPlanTuitionDTO.py | InstalmentPlanTuitionDTO.py | py | 5,610 | python | en | code | 241 | github-code | 13 |
72342597779 | ''' IMPORTING NECCESARY PACKAGES'''
from tkinter import * #tkinter is a GUI package for python
from tkinter import ttk
from tkinter import messagebox
from PIL import ImageTk,Image
import webbrowser
''' IMPORTING SUCCESSFUL'''
#Compilation of various phone models
#[a,b,c,d,e]
# a = Phone Name, b = price, c = size in inches, d = memory, e =type of keyboard, f=battery life
iPhone8 = [['iPhone 8 64GB', 729, 4.7, 64,'t', 14],['iPhone 8 128GB', 799, 4.7, 128,'t', 14]] #0,1,2,3,4,5
iPhone8Plus = [['iPhone 8 Plus 64GB', 899, 5.5, 64,'t', 21],['iPhone 8 Plus 128GB', 969, 5.5, 128,'t', 21]]
iPhone11Pro = [["iPhone 11 Pro 64GB",1649,5.8,64,'t', 16],["iPhone 11 Pro 256GB",1889,5.8,256,'t', 16],["iPhone 11 Pro 512GB",2199,5.8,512,'t', 16]]
iPhone11ProMax = [["iPhone 11 Pro Max 64GB",1799,6.5,64,'t', 21],["iPhone 11 Pro Max 256GB",2039,6.5,256,'t', 21],["iPhone 11 Pro Max 512GB",2349,6.5,512,'t', 21]]
Nokia = [["Nokia 800 Tough 4GB",172,2.4,4,'k', 12], ["Nokia 106 4MB",60.99,1.8,0.004,'k', 14], ["Nokia C1 16GB",68,5.45,16,'t',48]]
Samsung = [["Samsung Galaxy Fold 512GB Large 7.3",3088,7.3,512,'t', 24], ["Samsung Galaxy S20 Ultra 128GB", 1898,6.9,128,'t', 37]]
Xiaomi = [['Redmi Note 8 64GB', 195, 6.22,64,'t', 41],["Xiaomi Redmi Note 8 Pro 128GB",325,6.53,128,'t', 41],["Xiaomi Mi Note 10 Pro 256GB",740,6.47,256,'t', 50], ["Xiaomi Black Shark 3 Pro",911,6.67,256,'t',66]]
BlackBerry = [["BlackBerry Q10 16GB",200,3.1,16,'q', 13], ["Blackberry Key2 64GB",605,4.5,64,'q',86]]
Sony = [['Sony Xperia 1', 1299, 6.5, 512,'t', 79], ['Sony Xperia 5', 1099, 6.1, 128, 't', 96]]
Huawei = [["Huawei P40 Pro 8+ 5G",1448,6.58,256,'t',94], ["Huawei Y6s",155,6.09,64,'t',86], ["Huawei Y7p",245,6.39,64,'t',57]]
Oneplus = [["Oneplus 7T",750,6.55,128,'t',90]]
Google = [["Google Pixel 4 5.7",1119,5.7,64,'t',62], ["Google Pixel 4 5.7",1269,5.7,128,'t',62]]
vivo = [["vivo V17",358,6.44,128,'t',64], ["vivo Y12 64GB",169,6.35,64,'t',61]]
Oppo = [["Oppo A5s (AX5s)",199,6.2,64,'t',61], ["Oppo Reno 10x Zoom",1299,6.6,256,'t',107], ["Oppo F15",480,6.4,128,'t',66]]
Asus = [["Asus Zenfone 5 ZE620KL",437,5.7,64,'t',58], ["Zenfone Max Pro M1 ZB601KL/ZB602K",348,5.99,32,'t',51]]
Lenovo = [["Lenovo A7000 Plus",242,5.5,16,'t',56], ["Lenovo K10 Note",273,6.3,64,'t',61]]
HTC = [['Wildfire R70 32GB', 245, 6.53, 32, 't', 35], ['Desire 19s 32GB', 299, 6.2, 32, 't', 30], ['Exodus 1 64GB', 768, 5.7, 64, 't', 30], ['Wildfire X 32GB', 400, 6.2, 32, 't', 30], ['One X9 32GB', 520, 5.5, 32, 't', 25], ['Desire 19+ 64GB', 380, 6.2, 64, 't', 35], ['Desire 19+ 128GB', 480, 6.2, 128, 't', 35], ['Desire 12 16GB', 160, 5.5, 16, 't', 28], ['Desire 12 32GB', 220, 5.5, 32, 't', 28]]
LG = [['LG G6 32GB', 680, 5.7, 32, 't', 33], ['LG G6 64G', 938, 5.7, 64, 't', 33], ['LG G6 128GB', 1345, 5.7, 128, 't', 33], ['G8 ThinQ 128GB', 925, 6.1, 128, 't', 30], ['V50 ThinQ 128GB', 1100, 6.4, 128, 't', 40], ['V40 ThinQ 64GB', 800, 6.4, 64, 't', 28], ['V40 ThinQ 128GB', 1000, 6.4, 128, 't', 28], ['LG G7 ThinQ 64GB', 1000, 6.1, 64, 't', 34], ['LG G7 ThinQ 128GB', 1250, 6.1, 128, 't', 34], ['LG G8 ThinQ 128GB', 1300, 6.1, 128, 't', 38]]
PhoneBrands = [iPhone8,iPhone8Plus,iPhone11Pro,iPhone11ProMax,Nokia,Samsung,Xiaomi,BlackBerry,Sony,Huawei,Oneplus,Google,vivo,Oppo,Asus,Lenovo,HTC,LG]
memorysize = {1:64,2:64,3:128,4:256,5:512}
screensize = {1:5,2:6.5,3:6.5}
batterylife = {1:15,2:25,3:25}
first_filter = []
second_filter = []
third_filter = []
fourth_filter = []
fifth_filter = []
phone = []
# Combine all the Phones into 1 list
for x in range(0,len(PhoneBrands)):
for y in range(0,len(PhoneBrands[x])):
phone.append(PhoneBrands[x][y])
''' APP LAYOUT'''
root = Tk() #opening a GUI window
root.geometry('800x750+150+20') #shows where window lies in the screen and swindow size
root.configure(bg='grey') #background color for the window
root.title("HandPhone Recommendation System") #title for the window
root.resizable(width=False, height=False) #'window cannot be resized
top_frame = Frame(root,height=60,width=895,bg='orange') #frame for the welcome text
path = "allphones.jpg" #background image path
img = ImageTk.PhotoImage(Image.open(path)) #opening image
label = Label(top_frame,image = img ,height=70,width=1080) #background image for the welcome frame
label.image=img
label.place(x=0,y=0) #positioning the image in the window
top_frame.place(x=0,y=5) #placing the frame
tf_label = Label(top_frame,text='Welcome To The System',font=('arial', 33, 'bold'),fg='dark blue',bg='gray89',height=50)
tf_label.pack(anchor='center')
top_frame.pack_propagate(False)
#root.iconbitmap("icon.ico") #photo yet to upload (this is for icon of the window)
#app frame
frame = LabelFrame(root,height=80,width=1080) #lower frame ( content frame )
frame.place(x=10, y=70)
frame.pack_propagate(False)
path = "background.jpg"
img = ImageTk.PhotoImage(Image.open(path))
label = Label(frame,image = img ,height=400,width=1280) #background imgae for app frame
label.image=img
label.place(x=0,y=0)
"""All functions"""
#to clear in budget field and refresh the window after searching for one time
def refresh():
budget.delete(0, 'end') #clears the entry widget data
disable() #disables lower frames of the window
#for clearing filter data for next data input
def clear():
first_filter.clear()
second_filter.clear()
third_filter.clear()
fourth_filter.clear()
fifth_filter.clear()
#opens borwser to search for the phone
def callback(url):
#print(url_path)
webbrowser.open_new(url)
#produces the end results of filtering process
def recommend(filter_data, filter_count):
if len(filter_data) == 0:
messagebox.showinfo("Error", "There are no phones within your requirements")
refresh()
clear()
#print("There are no phones within your requirements")
elif filter_count == 5:
top = Toplevel()
top.title("Phone models")
Label(top,text="These phones are most suitable for you",font=("Helvetica 19 bold"),fg='orange').grid(row=0,column=0)
for i in range(0,len(filter_data)):
Label(top,text=filter_data[i][0], font=("Helvetica 19 bold")).grid(row=i+1,column=0)
link1 = Label(top, text="Click to search for this phone", fg="blue", cursor="hand2")
link1.grid(row=i+1, column= 1)
url_path= "https://www.amazon.com/s?k=" + filter_data[i][0]
#b1=ttk.Button(top, text='Click to buy this phone',command=callback ).grid(row=i+1, column=1)
#link1.bind(url_path,"<Button-1>", lambda event , callback(event,url_path))
link1.bind("<Button-1>", lambda e: callback)
#print(filter_data[i][0]) #prints phone models in the terminal
#print(url_path)
clear() #clears all filter data for next input
#enabling frame children when pressing next button
def enable(childList):
for child in childList:
child.configure(state='active')
#Conduct a run of filtering to narrow down available phones based on user inputs(first filter append) and enable next frame
def pri():
#print(type(str(budget_value.get())),type(budget_value.get()))
try:
if str(budget_value.get()) : #check if there value is entered in entry widget
enable(keyboard_frame.winfo_children()) #enables next frame
for i in range(0,len(phone)):
if phone[i][1] < budget_value.get():
first_filter.append(phone[i])
else:
continue
except:
messagebox.showerror("Error", "Please enter value!") #show warning for no data entered
#Conduct a run of filtering to narrow down available phones based on user inputs (second filter append) and enable next frame
def key_next():
if not keyboard.get() : #if no radiobutton is selected show warning
messagebox.showwarning("Error", "Please select a keyboard interface!")
else:
enable(memory_frame.winfo_children()) #enables next frame
for i in range(0,len(first_filter)):
if first_filter[i][4] == keyboard.get():
second_filter.append(first_filter[i])
#Check for recommendation if to continue or not
recommend(second_filter,2)
#Conduct a run of filtering to narrow down available phones based on user inputs (third filter append) and enable next frame
def mem_next():
if not memory.get() : # check :if no memory is selected, pop up a warning
messagebox.showwarning("Error", "Please select a memory requirement!")
else:
enable(size_frame.winfo_children())
#Prompt user for new requirement(memory space) to further filter out phones for user
try:
if 0<int(memory.get())<6:
for value in memorysize:
if int(memory.get()) == value:
memory_m = memorysize[value]
except:
pass
if int(memory.get()) == 1:
for i in range(0,len(second_filter)):
if second_filter[i][3] < memory_m:
third_filter.append(second_filter[i])
else:
continue
else:
for i in range(0,len(second_filter)):
if second_filter[i][3] >= memory_m: #includes bigger memory phones
third_filter.append(second_filter[i])
else:
continue
recommend(third_filter,3)
#Conduct a run of filtering to narrow down available phones based on user inputs (fourth filter append) and enable next frame
def size_next():
if not size.get(): #is any size selected? check for it
messagebox.showwarning("Error", "Please select required phone size!")
else:
enable(battery_frame.winfo_children())
try:
if 0<int(size.get())<4:
for value in screensize:
if int(size.get()) == value:
screen = screensize[value]
except:
pass
if int(size.get()) == 1:
for i in range(0,len(third_filter)):
if third_filter[i][2] <= screen:
fourth_filter.append(third_filter[i])
else:
continue
elif int(size.get()) == 2:
for i in range(0,len(third_filter)):
if third_filter[i][2] < screen and third_filter[i][2] > 5:
fourth_filter.append(third_filter[i])
else:
continue
else:
for i in range(0,len(third_filter)):
if third_filter[i][2] >= screen:
fourth_filter.append(third_filter[i])
else:
continue
recommend(fourth_filter,4)
#for disabling the lower frames
def disable():
for child in keyboard_frame.winfo_children():
child.configure(state='disable')
for child in memory_frame.winfo_children():
child.configure(state='disable')
for child in size_frame.winfo_children():
child.configure(state='disable')
for child in battery_frame.winfo_children():
child.configure(state='disable')
#Conduct a run of filtering to narrow down available phones based on user inputs (fifth filter append) and search for the result models
def batt_next():
if not battery.get(): # check if battery field is selected
messagebox.showwarning("Error", "Please select all fields!")
else:
try:
if 0<int(battery.get())<4:
for value in batterylife:
if int(battery.get()) == value:
battery_m = batterylife[value]
except:
pass
if int(battery.get()) == 1:
for i in range(0,len(fourth_filter)):
if fourth_filter[i][5] <= battery_m:
fifth_filter.append(fourth_filter[i])
else:
continue
elif int(battery.get()) == 2:
for i in range(0,len(fourth_filter)):
if fourth_filter[i][5] < battery_m and fourth_filter[i][5] > 5:
fifth_filter.append(fourth_filter[i])
else:
continue
else:
for i in range(0,len(fourth_filter)):
if fourth_filter[i][5] >= battery_m:
print("4")
fifth_filter.append(fourth_filter[i])
else:
continue
#print(fifth_filter)
refresh()
recommend(fifth_filter,5)
#budget frame and getting data to budget_value
budget_value= IntVar()
budget_frame = LabelFrame(frame)
budget_frame.grid(row=1, column = 0, sticky=W)
Label(budget_frame, text='Enter your max budget for the phone: $', font=("Helvetica", 13), fg = 'black' ).grid(row=1, column=0, padx= 10, sticky=W)
budget = Entry(budget_frame,width = 25,fg='blue',textvariable=budget_value, font=("Helvetica", 15))
budget.delete(0, 'end')
budget.insert(0,'Enter currency in Dollar')
budget.bind("<FocusIn>", lambda args: budget.delete('0', 'end'))
budget.grid(row=1, column=1, padx= 10,pady=10, ipady = 5,sticky =E)
ttk.Button(budget_frame, text='NEXT',command=pri).grid(row=1, column=2)
#keyboard interface frame and getting keyboard interface value to keyboard variable
keyboard_frame = LabelFrame(frame)
keyboard_frame.grid(row=2, column=0, padx=10, pady= 10, sticky=W)
Label(keyboard_frame, text="Please input the type of interface for phone :",font=("Helvetica", 13), fg = 'black').grid(row=2, column=0, sticky=W)
keyboard= StringVar()
Radiobutton(keyboard_frame, text="QWERTY Keyboard", variable=keyboard, value='q',font=("Helvetica", 13), fg = 'red',indicatoron=5).grid(row= 2, column=1,sticky=W)
Radiobutton(keyboard_frame, text="Keypad Keyboard", variable=keyboard, value='k',font=("Helvetica", 13), fg = 'blue').grid(row= 3, column=1,sticky=W)
Radiobutton(keyboard_frame, text="Touchscreen", variable=keyboard, value='t',font=("Helvetica", 13), fg = 'orange').grid(row= 4, column=1,sticky=W)
ttk.Button(keyboard_frame, text='NEXT',command=key_next).grid(row=2, column=4)
#memory frame and getting memory size to memory variable
memory_frame = LabelFrame(frame)
memory_frame.grid(row=5, column=0, padx=10, pady= 10, sticky=W)
Label(memory_frame, text="Please input the minimum desired memory space(GB) of phone :",font=("Helvetica", 13), fg = 'black').grid(row=5, column=0, sticky=W)
memory= IntVar()
Radiobutton(memory_frame, text=" <64 GB ", variable=memory, value=1,font=("Helvetica", 13), fg = 'green').grid(row= 5, column=1,sticky=W)
Radiobutton(memory_frame, text=" 64 GB ", variable=memory, value=2,font=("Helvetica", 13), fg = 'green').grid(row= 6, column=1,sticky=W)
Radiobutton(memory_frame, text=" 128 GB ", variable=memory, value=3,font=("Helvetica", 13), fg = 'green').grid(row= 7, column=1,sticky=W)
Radiobutton(memory_frame, text=" 256 GB ", variable=memory, value=4,font=("Helvetica", 13), fg = 'green').grid(row= 8, column=1,sticky=W)
Radiobutton(memory_frame, text=" 512 GB ", variable=memory, value=5,font=("Helvetica", 13), fg = 'green').grid(row= 9, column=1,sticky=W)
ttk.Button(memory_frame, text='NEXT',command=mem_next).grid(row=5, column=2)
#phone size frame and getting phone size to size variable
size_frame = LabelFrame(frame)
size_frame.grid(row=10, column=0, padx=10, pady= 10, sticky=W)
Label(size_frame, text="Please input the desired phone screen size :",font=("Helvetica", 13)).grid(row=10, column=0, sticky=W)
size= IntVar()
Radiobutton(size_frame, text="Small <5 inches", variable=size, value=1,font=("Helvetica", 13), fg = 'orange').grid(row= 10, column=1,sticky=W)
Radiobutton(size_frame, text="Medium <6.5 inches", variable=size, value=2,font=("Helvetica", 13), fg = 'orange').grid(row=11, column=1,sticky=W)
Radiobutton(size_frame, text="Large than or equals to 6.5 inches", variable=size, value=3,font=("Helvetica", 13), fg = 'orange').grid(row= 12, column=1,sticky=W)
ttk.Button(size_frame, text='NEXT',command=size_next).grid(row=10, column=2)
#battery life frame and getting battery life value to battery variable
battery_frame = LabelFrame(frame)
battery_frame.grid(row=13, column=0, padx=10, pady= 10, sticky=W)
Label(battery_frame, text="Please input the desired battery life (talktime) of phone :",font=("Helvetica", 13)).grid(row=13, column=0, sticky=W)
battery= IntVar()
Radiobutton(battery_frame, text="Short <5 hours", variable=battery, value=1,font=("Helvetica", 13), fg = 'blue').grid(row= 13, column=1,sticky=W)
Radiobutton(battery_frame, text="Medium <25 hours", variable=battery, value=2,font=("Helvetica", 13), fg = 'blue').grid(row= 14, column=1,sticky=W)
Radiobutton(battery_frame, text="Longer or equals to 25 hours", variable=battery, value=3,font=("Helvetica", 13), fg = 'blue').grid(row= 15, column=1,sticky=W)
#search buttons and styles
style = ttk.Style()
style.theme_use('alt')
style.configure('TButton', background = '#ADD8E6', foreground = 'black', width = 20, borderwidth=1, focusthickness=3, focuscolor='black')
style.map('TButton', background=[('active','#ADD8E6')])
Button(frame, text='SEARCH',command=batt_next,fg='black',bg='#ADD8E6',width = 20,font=("Helvetica", 13, 'bold')).grid(row=14, column=0,ipady = 4)
disable() # calling this function disables all frames except budget while loading the GUI for the first time
root.mainloop() #end GUI window | AnujTimsina/Phone-Recommendation-System | project.py | project.py | py | 17,924 | python | en | code | 0 | github-code | 13 |
37785254506 | import numpy as np
from scipy import interpolate
from matplotlib import pyplot as plot
x = np.array([0, 6, 0, -17, -31, -28, 0, 39, 63])
y = np.array([0, 6, 16, 17, 0, -28, -47, -39, 0])
x = np.r_[x, x[0]]
y = np.r_[y, y[0]]
tck = (interpolate.splprep([x, y], s=0, per=True))[0]
u = (interpolate.splprep([x, y], s=0, per=True))[1]
xi = (interpolate.splev(np.linspace(0, 1, 1000), tck))[0]
yi = (interpolate.splev(np.linspace(0, 1, 1000), tck))[1]
fig, ax = plot.subplots(1, 1)
ax.plot(x, y, 'or')
ax.plot(xi, yi, '-b')
plot.show()
| pdelfino/numerical-analysis | lista-4/delfino-5-questao.py | delfino-5-questao.py | py | 547 | python | en | code | 0 | github-code | 13 |
18347492038 | import mxnet as mx
import numpy as np
import cv2
from tools.rand_sampler import RandSampler
class DetRecordIter(mx.io.DataIter):
"""
The new detection iterator wrapper for mx.io.ImageDetRecordIter which is
written in C++, it takes record file as input and runs faster.
Supports various augment operations for object detection.
Parameters:
-----------
path_imgrec : str
path to the record file
path_imglist : str
path to the list file to replace the labels in record
batch_size : int
batch size
data_shape : tuple
(3, height, width)
label_width : int
specify the label width, use -1 for variable length
label_pad_width : int
labels must have same shape in batches, use -1 for automatic estimation
in each record, otherwise force padding to width in case you want t
rain/validation to match the same width
label_pad_value : float
label padding value
resize_mode : str
force - resize to data_shape regardless of aspect ratio
fit - try fit to data_shape preserving aspect ratio
shrink - shrink to data_shape only, preserving aspect ratio
mean_pixels : list or tuple
mean values for red/green/blue
kwargs : dict
see mx.io.ImageDetRecordIter
Returns:
----------
"""
def __init__(self, path_imgrec, batch_size, data_shape, path_imglist="",
label_width=-1, label_pad_width=-1, label_pad_value=-1,
resize_mode='force', mean_pixels=[123.68, 116.779, 103.939],
**kwargs):
super(DetRecordIter, self).__init__()
self.rec = mx.io.ImageDetRecordIter(
path_imgrec = path_imgrec,
path_imglist = path_imglist,
label_width = label_width,
label_pad_width = label_pad_width,
label_pad_value = label_pad_value,
batch_size = batch_size,
data_shape = data_shape,
mean_r = mean_pixels[0],
mean_g = mean_pixels[1],
mean_b = mean_pixels[2],
resize_mode = resize_mode,
**kwargs)
self.provide_label = None
self._get_batch()
if not self.provide_label:
raise RuntimeError("Invalid ImageDetRecordIter: " + path_imgrec)
self.reset()
@property
def provide_data(self):
return self.rec.provide_data
def reset(self):
self.rec.reset()
def iter_next(self):
return self._get_batch()
def next(self):
if self.iter_next():
return self._batch
else:
raise StopIteration
def _get_batch(self):
self._batch = self.rec.next()
if not self._batch:
return False
if self.provide_label is None:
# estimate the label shape for the first batch, always reshape to n*5
first_label = self._batch.label[0][0].asnumpy()
self.batch_size = self._batch.label[0].shape[0]
self.label_header_width = int(first_label[4])
self.label_object_width = int(first_label[5])
assert self.label_object_width >= 5, "object width must >=5"
self.label_start = 4 + self.label_header_width
self.max_objects = (first_label.size - self.label_start) // self.label_object_width
self.label_shape = (self.batch_size, self.max_objects, self.label_object_width)
self.label_end = self.label_start + self.max_objects * self.label_object_width
self.provide_label = [('label', self.label_shape)]
# modify label
label = self._batch.label[0].asnumpy()
label = label[:, self.label_start:self.label_end].reshape(
(self.batch_size, self.max_objects, self.label_object_width))
self._batch.label = [mx.nd.array(label)]
return True
class DetIter(mx.io.DataIter):
"""
Detection Iterator, which will feed data and label to network
Optional data augmentation is performed when providing batch
Parameters:
----------
imdb : Imdb
image database
batch_size : int
batch size
data_shape : int or (int, int)
image shape to be resized
mean_pixels : float or float list
[R, G, B], mean pixel values
rand_samplers : list
random cropping sampler list, if not specified, will
use original image only
rand_mirror : bool
whether to randomly mirror input images, default False
shuffle : bool
whether to shuffle initial image list, default False
rand_seed : int or None
whether to use fixed random seed, default None
max_crop_trial : bool
if random crop is enabled, defines the maximum trial time
if trial exceed this number, will give up cropping
is_train : bool
whether in training phase, default True, if False, labels might
be ignored
"""
def __init__(self, imdb, batch_size, data_shape, \
mean_pixels=[128, 128, 128], rand_samplers=[], \
rand_mirror=False, shuffle=False, rand_seed=None, \
is_train=True, max_crop_trial=50):
super(DetIter, self).__init__()
self._imdb = imdb
self.batch_size = batch_size
if isinstance(data_shape, int):
data_shape = (data_shape, data_shape)
self._data_shape = data_shape
self._mean_pixels = mx.nd.array(mean_pixels).reshape((3,1,1))
if not rand_samplers:
self._rand_samplers = []
else:
if not isinstance(rand_samplers, list):
rand_samplers = [rand_samplers]
assert isinstance(rand_samplers[0], RandSampler), "Invalid rand sampler"
self._rand_samplers = rand_samplers
self.is_train = is_train
self._rand_mirror = rand_mirror
self._shuffle = shuffle
if rand_seed:
np.random.seed(rand_seed) # fix random seed
self._max_crop_trial = max_crop_trial
self._current = 0
self._size = imdb.num_images
self._index = np.arange(self._size)
self._data = None
self._label = None
self._get_batch()
@property
def provide_data(self):
return [(k, v.shape) for k, v in self._data.items()]
@property
def provide_label(self):
if self.is_train:
return [(k, v.shape) for k, v in self._label.items()]
else:
return []
def reset(self):
self._current = 0
if self._shuffle:
np.random.shuffle(self._index)
def iter_next(self):
return self._current < self._size
def next(self):
if self.iter_next():
self._get_batch()
data_batch = mx.io.DataBatch(data=list(self._data.values()),
label=list(self._label.values()),
pad=self.getpad(), index=self.getindex())
self._current += self.batch_size
return data_batch
else:
raise StopIteration
def getindex(self):
return self._current // self.batch_size
def getpad(self):
pad = self._current + self.batch_size - self._size
return 0 if pad < 0 else pad
def _get_batch(self):
"""
Load data/label from dataset
"""
batch_data = mx.nd.zeros((self.batch_size, 3, self._data_shape[0], self._data_shape[1]))
batch_label = []
for i in range(self.batch_size):
if (self._current + i) >= self._size:
if not self.is_train:
continue
# use padding from middle in each epoch
idx = (self._current + i + self._size // 2) % self._size
index = self._index[idx]
else:
index = self._index[self._current + i]
# index = self.debug_index
im_path = self._imdb.image_path_from_index(index)
with open(im_path, 'rb') as fp:
img_content = fp.read()
img = mx.img.imdecode(img_content)
gt = self._imdb.label_from_index(index).copy() if self.is_train else None
data, label = self._data_augmentation(img, gt)
batch_data[i] = data
if self.is_train:
batch_label.append(label)
self._data = {'data': batch_data}
if self.is_train:
self._label = {'label': mx.nd.array(np.array(batch_label))}
else:
self._label = {'label': None}
def _data_augmentation(self, data, label):
"""
perform data augmentations: crop, mirror, resize, sub mean, swap channels...
"""
if self.is_train and self._rand_samplers:
rand_crops = []
for rs in self._rand_samplers:
rand_crops += rs.sample(label)
num_rand_crops = len(rand_crops)
# randomly pick up one as input data
if num_rand_crops > 0:
index = int(np.random.uniform(0, 1) * num_rand_crops)
width = data.shape[1]
height = data.shape[0]
crop = rand_crops[index][0]
xmin = int(crop[0] * width)
ymin = int(crop[1] * height)
xmax = int(crop[2] * width)
ymax = int(crop[3] * height)
if xmin >= 0 and ymin >= 0 and xmax <= width and ymax <= height:
data = mx.img.fixed_crop(data, xmin, ymin, xmax-xmin, ymax-ymin)
else:
# padding mode
new_width = xmax - xmin
new_height = ymax - ymin
offset_x = 0 - xmin
offset_y = 0 - ymin
data_bak = data
data = mx.nd.full((new_height, new_width, 3), 128, dtype='uint8')
data[offset_y:offset_y+height, offset_x:offset_x + width, :] = data_bak
label = rand_crops[index][1]
if self.is_train:
interp_methods = [cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, \
cv2.INTER_NEAREST, cv2.INTER_LANCZOS4]
else:
interp_methods = [cv2.INTER_LINEAR]
interp_method = interp_methods[int(np.random.uniform(0, 1) * len(interp_methods))]
data = mx.img.imresize(data, self._data_shape[1], self._data_shape[0], interp_method)
if self.is_train and self._rand_mirror:
if np.random.uniform(0, 1) > 0.5:
data = mx.nd.flip(data, axis=1)
valid_mask = np.where(label[:, 0] > -1)[0]
tmp = 1.0 - label[valid_mask, 1]
label[valid_mask, 1] = 1.0 - label[valid_mask, 3]
label[valid_mask, 3] = tmp
data = mx.nd.transpose(data, (2,0,1))
data = data.astype('float32')
data = data - self._mean_pixels
return data, label
| zhreshold/mxnet-ssd | dataset/iterator.py | iterator.py | py | 11,058 | python | en | code | 763 | github-code | 13 |
1060825849 | from skimage.io import imread
from skimage import img_as_float64
from sklearn.cluster import KMeans
import numpy as np
import matplotlib.pyplot as plt
import warnings
import math
def get_MSE(I, K):
sum = 0
for i in range(len(I)):
for j in range(len(I[0])):
for k in range(len(I[0][0])):
sum += (I[i][j][k] - K[i][j][k])**2
MSE = 1/(3*len(I)*len(I[0])) * sum
return MSE
def get_PSNR(I, K):
MSE = get_MSE(I, K)
PSNR = 20*math.log10(1) - 10*math.log10(MSE)
return PSNR
def draw_image(data, title):
fig, ax = plt.subplots()
ax.imshow(data)
ax.set_title(title, fontsize=20, fontfamily='Arial')
fig.set_figwidth(6)
fig.set_figheight(6)
plt.axis('off')
plt.show()
def reback_to_image(data):
x_,y_ = data.shape
im = np.reshape(data, (int(x_/y), int(x_/x), y_))
return im
def get_middle_matrix(obj_feat_matr_middle, pixels_of_clusters):
middles = []
for i in range(len(pixels_of_clusters)):
middles.append([np.mean(np.array(pixels_of_clusters[i])[:,1]), np.mean(np.array(pixels_of_clusters[i])[:,2]), np.mean(np.array(pixels_of_clusters[i])[:,3])])
for i in range(len(pixels_of_clusters)):
for j in range(len(pixels_of_clusters[i])):
for k in range(3):
obj_feat_matr_middle[pixels_of_clusters[i][j][0]][k] = middles[i][k]
return obj_feat_matr_middle
def get_median_matrix(obj_feat_matr_median, pixels_of_clusters):
medians = []
for i in range(len(pixels_of_clusters)):
medians.append([np.median(np.array(pixels_of_clusters[i])[:, 1]), np.median(np.array(pixels_of_clusters[i])[:, 2]),
np.median(np.array(pixels_of_clusters[i])[:, 3])])
for i in range(len(pixels_of_clusters)):
for j in range(len(pixels_of_clusters[i])):
for k in range(3):
obj_feat_matr_median[pixels_of_clusters[i][j][0]][k] = medians[i][k]
return obj_feat_matr_median
def get_middle_median_matrixes(n_clusters):
obj_feat_matr_middle = np.array(objects_features_matrix)
obj_feat_matr_median = np.array(objects_features_matrix)
pixels_for_clusters = []
for i in range(n_clusters):
pixels_for_clusters.append([])
for i in range(len(objects_features_matrix)):
element = [i]
for j in range(len(objects_features_matrix[i])):
element.append(objects_features_matrix[i][j])
pixels_for_clusters[kmeans.labels_[i]].append(element)
obj_feat_matr_middle = get_middle_matrix(obj_feat_matr_middle, pixels_for_clusters)
obj_feat_matr_median = get_median_matrix(obj_feat_matr_median, pixels_for_clusters)
return obj_feat_matr_middle, obj_feat_matr_median
warnings.filterwarnings("ignore")
image = imread('image/parrots.jpg')
image = img_as_float64(image)
draw_image(image, 'Inital image')
x, y, z = image.shape
objects_features_matrix = np.reshape(image, (x * y, z))
PSNR_middle = []
PSNR_median = []
print('Log:\n')
for n_clusters in range(8,21):
print('Number of clusters: ' + str(n_clusters))
kmeans = KMeans(init='k-means++', random_state=241, n_clusters=n_clusters)
print('[MESSAGE]: Please, wait! Start get clusters...')
kmeans.fit(objects_features_matrix)
print('[MESSAGE]: Succesfull finish get clusters!!!')
print('[MESSAGE]: Please, wait! Start get matrixes...')
obj_feat_matr_middle, obj_feat_matr_median = get_middle_median_matrixes(n_clusters)
print('[MESSAGE]: Succesfull finish get matrixes!!!')
image_middle = reback_to_image(obj_feat_matr_middle)
image_median = reback_to_image(obj_feat_matr_median)
draw_image(image_middle, 'Middle-clusterization image\nNumber of clusters = ' + str(n_clusters))
print('[MESSAGE]: Succesfull show middle-clusterization image!!!')
draw_image(image_median, 'Median-clusterization image\nNumber of clusters = ' + str(n_clusters))
print('[MESSAGE]: Succesfull show median-clusterization image!!!')
print('[MESSAGE]: Please, wait! Start calculate PSRN for middle-matrix and median-matrix...')
# PSNR_middle.append(compare_psnr(image, image_middle))
# PSNR_median.append(compare_psnr(image, image_median))
PSNR_middle.append(get_PSNR(image, image_middle))
PSNR_median.append(get_PSNR(image, image_median))
print('[MESSAGE]: Succesfull finish calculate PSRN for middle-matrix and median-matrix!!!')
print('[MESSAGE]: For n_clusters=' + str(n_clusters) + ':')
print(' 1) PSNR_middle: ' + str(PSNR_middle[n_clusters - 8]))
print(' 2) PSNR_median: ' + str(PSNR_median[n_clusters - 8]))
print('\n')
print('PSNR_middle: ' + str(PSNR_middle))
print('PSNR_median: ' + str(PSNR_median))
number_of_clusters = 8
while(PSNR_middle[number_of_clusters - 8] <= 20 and PSNR_median[number_of_clusters - 8] <= 20):
number_of_clusters += 1
print('Minimum number of clusters, when PSNR > 20: ' + str(number_of_clusters))
f = open('results/result.txt', 'w')
f.write(str(number_of_clusters))
print('\nMESSAGE: The required information has been successfully written to files: \'results/result.txt\'') | rrtty0/image_clustering | image_clustering.py | image_clustering.py | py | 5,135 | python | en | code | 0 | github-code | 13 |
22991327813 | import cv2
import os
from env import OUTPUT_FOLDER, join_path
from library.image import to_tk_image, apply_adjustments
class Video:
def __init__(self):
self.path = ""
self.video = None
self.fps = 29
self.delay = 33
self.process_this_frame = True
self.valid = False
self.fourcc = cv2.VideoWriter_fourcc(*'XVID')
self.dimension = (0, 0)
self.video_writer = None
self.video_name = "record.avi"
self.font = cv2.FONT_HERSHEY_DUPLEX
def init(self, source):
self.path = source
if os.path.exists(source):
self.video_name = os.path.basename(source)
self.video_name = "{}.avi".format(self.video_name[0: -4])
try:
self.video = cv2.VideoCapture(source)
self.dimension = (int(self.video.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(self.video.get(cv2.CAP_PROP_FRAME_HEIGHT)))
self.valid = self.video.isOpened()
if not self.valid:
print("Video {} can't open".format(source))
return
self.fps = self.video.get(cv2.CAP_PROP_FPS)
self.delay = int(1000 / self.fps)
except:
print("Error in opening video")
def play(self, container, window, take_guess=None, record=False):
if record:
self.video_writer = cv2.VideoWriter(join_path(OUTPUT_FOLDER, self.video_name),
self.fourcc, self.delay, self.dimension)
while self.valid and self.video.isOpened:
is_playing, frame = self.video.read()
if not is_playing:
return False
frame = apply_adjustments(frame)
locations = []
names = []
if self.process_this_frame and take_guess:
locations, names = take_guess(frame)
self.process_this_frame = not self.process_this_frame
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
if self.video_writer:
for (top, right, bottom, left), name in zip(locations, names):
if name == "Ivana Alawi":
name = "Nambawan"
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
cv2.putText(frame, name, (left + 6, bottom - 6), self.font, 1.0, (255, 255, 255), 1)
self.video_writer.write(frame)
container.update(to_tk_image(rgb_frame, (container.width, container.height)))
window.update()
cv2.waitKey(self.delay - 10)
self.stop()
def stop(self):
print("stopping video")
self.valid = False
if self.video:
self.video.release()
self.video = None
if self.video_writer:
self.video_writer.release()
self.video_writer = None
| iammeosjin/face-recognition | library/video.py | video.py | py | 3,382 | python | en | code | 0 | github-code | 13 |
20774509571 | import tkinter
from Scripts.ex1 import get_u
__authors__ = ["Mihaila Alexandra Ioana", "Dupu Robert-Daniel"]
__version__ = "1.1"
__status__ = "Dev"
def plus_asociativ(x, y, z):
if (x + y) + z == x + (y + z):
return True
return False
def inmultire_asociativa(x, y, z):
while (x * y) * z == x * (y * z):
x = x + 1
return x
def start_app(punctul_a, punctul_b):
title = "Exercitiul nr. 2"
size = "450x100"
root = tkinter.Tk()
root.title(title)
root.geometry(size)
frame = tkinter.Frame(root)
frame.pack()
p_a = tkinter.Label(frame, text=punctul_a)
p_a.pack()
p_b = tkinter.Label(frame, text=punctul_b)
p_b.pack()
root.mainloop()
def main():
u = get_u()
x = 1.0
y, z = u, u
punctul_a_adevarat = "a) ({0} + {1}) + {2} == {0} + ({1} + {2}) -> adevarat".format(x, y, z)
punctul_a_fals = "a) ({0} + {1}) + {2} != {0} + ({1} + {2}) -> fals".format(x, y, z)
if not plus_asociativ(x, y, z):
punctul_a = punctul_a_fals
else:
punctul_a = punctul_a_adevarat
punctul_b = "b) ({0} * {1}) * {2} != {0} * ({1} * {2}) -> x = {0}, y = {1}, z = {2}". \
format(inmultire_asociativa(x, y, z), y, z)
start_app(punctul_a, punctul_b)
if __name__ == '__main__':
main()
| alexandra-mihaila/CN | Tema1/Scripts/ex2.py | ex2.py | py | 1,307 | python | en | code | 1 | github-code | 13 |
12003522576 | from os import sep
import sys
from collections import deque
input = sys.stdin.readline
def dfs(graph, v):
dfs_visited[v] = True
dfs_list.append(v)
for i in graph[v]:
if not dfs_visited[i]:
dfs(graph, i)
def bfs(graph, v):
queue = deque([v])
bfs_visited[v] = True
while queue:
v = queue.popleft()
bfs_list.append(v)
for i in graph[v]:
if not bfs_visited[i]:
queue.append(i)
bfs_visited[i] = True
n, m, v = map(int, input().strip().split())
graph = [[] for _ in range(n + 1)]
# graph = [[]] * (m + 1)
# graph = [[] * (m + 1)]
# 위 코드는 동작하지 않는다. 이유가 뭐지 뭐가 다른거지
for i in range(m):
a, b = map(int, input().strip().split())
graph[a].append(b)
graph[b].append(a)
dfs_visited = [False] * (n + 1)
bfs_visited = [False] * (n + 1)
# graph.sort() 로 내부까지 정렬되지 않는다. 왜지?
for i in range(1, len(graph)):
graph[i].sort()
dfs_list = []
bfs_list = []
dfs(graph, v)
bfs(graph, v)
print(*dfs_list, sep=' ')
print(*bfs_list, sep=' ') | TK-moon/algorithm | baekjoon/1260.py | 1260.py | py | 1,057 | python | en | code | 0 | github-code | 13 |
31077664287 | from fastapi import FastAPI
from server.routes.peopleRoute import PeopleRouter
app = FastAPI()
app.include_router(PeopleRouter, tags=["People"], prefix="/people")
@app.get("/", tags=["Root"])
async def read_root():
return {"message": "Hello there people, add yourself :)"}
| tanvinsharma/fastapi_sample_app | server/app.py | app.py | py | 280 | python | en | code | 0 | github-code | 13 |
37777338021 | # -*- coding: utf-8 -*-
# This code shows an example of text translation from English to Simplified-Chinese.
# This code runs on Python 2.7.x and Python 3.x.
# You may install `requests` to run this code: pip install requests
# Please refer to `https://api.fanyi.baidu.com/doc/21` for complete api document
import requests
import random
from hashlib import md5
def fanyi(query, from_lang, to_lang):
# Set your own appid/appkey.
appid = ''
appkey = ''
# For list of language codes, please refer to `https://api.fanyi.baidu.com/doc/21`
endpoint = 'http://api.fanyi.baidu.com'
path = '/api/trans/vip/translate'
url = endpoint + path
print(from_lang, to_lang)
# Generate salt and sign
def make_md5(s, encoding='utf-8'):
return md5(s.encode(encoding)).hexdigest()
salt = random.randint(32768, 65536)
sign = make_md5(appid + query + str(salt) + appkey)
# Build request
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
payload = {'appid': appid, 'q': query, 'from': from_lang, 'to': to_lang, 'salt': salt, 'sign': sign}
# Send request
r = requests.post(url, params=payload, headers=headers)
result = r.json()
# Show response
#print(result)
retInfo = ''
i = 0
print(result)
for item in result['trans_result']:
retInfo = retInfo + item['src'] + '\n' + item['dst'] + '\n'
i = i + 1
print(retInfo)
#print(retInfo)
return retInfo
if __name__ == '__main__':
query = 'hello world \n form-urlencoded'
fanyi(query) | code-novice/image_tool | fanyi.py | fanyi.py | py | 1,568 | python | en | code | 1 | github-code | 13 |
3944522075 |
# 나눠지는 수가 존재하면 더 이상 소수가 아니므로, break
# check를 이용하여 나눠지는 수가 있을 때만 출력용 배열에 append 하여 합과 첫 값을 출력하거나,
# 아무 것도 없을 때는 -1을 출력
M = int(input())
N = int(input())
prime = []
for i in range(M, N+1):
if i != 1:#1이 아닌 요소(i)를 하나씩 꺼내면서 2~ i-1까지 나눠지는 수가 없을 때만을 골라낸다.
check = True
for j in range(2, i):
if i % j == 0:
check = False
break
if check:
prime.append(i)
if len(prime) == 0:
print(-1)
else:
print(sum(prime))
print(min(prime)) | hinhyu/Algorithm | 단계별문제풀이/09.수학2/2소수.py | 2소수.py | py | 704 | python | ko | code | 0 | github-code | 13 |
2574819803 | from bs4 import BeautifulSoup
movie_list=[]
import requests
html = "https://movie.douban.com/top250?start="
for i in range(0,10):
url = html+str(i*25)
r = requests.get(url,timeout=10)
soup = BeautifulSoup(r.text,"html.parser")
title = soup.find_all("div",class_="hd")
for each in title:
print(each.a.span.text)
| 1208606234/Python | 豆瓣250爬虫.py | 豆瓣250爬虫.py | py | 327 | python | en | code | 2 | github-code | 13 |
70865563537 |
import os
from PIL import Image
import numpy as np
in_dir = '/home/haojieyuan/Data/caltech101/eval_imgs'
out_dir = '/home/haojieyuan/Data/caltech101/eval_imgs_resized'
#in_dir = '/home/haojieyuan/Data/oxfordFlowers/eval_imgs'
#out_dir = '/home/haojieyuan/Data/oxfordFlowers/eval_imgs_resized'
fraction = 0.875
for image_name in os.listdir(in_dir):
target_height = 299.0
target_width = 299.0
im = Image.open(os.path.join(in_dir, image_name))
im = im.convert('RGB')
im_array = np.array(im) # height, width, channel
h, w ,c = im_array.shape
h = float(h)
w = float(w)
box_h_start = int((h - h*fraction)/2)
box_w_start = int((w - w*fraction)/2)
h_size = int(h - 2*box_h_start)
w_size = int(w - 2*box_w_start)
croped = im_array[box_h_start:box_h_start+h_size, box_w_start:box_w_start+w_size, :]
h, w, c = croped.shape
h = float(h)
w = float(w)
if h < w:
pad_h = True
else:
pad_h = False
if pad_h:
pad_upper = int((w-h)/2)
else:
pad_left = int((h-w)/2)
pad_shape = int(max([h, w]))
before_resize = np.zeros([pad_shape, pad_shape, 3])
if pad_h:
before_resize[pad_upper:pad_upper+h_size, :, :] = croped
else:
before_resize[:, pad_left:pad_left+w_size, :] = croped
#ratio = max([h/target_height, w/target_width])
#resized_h = h/ratio
#resized_w = w/ratio
#padding_h = (target_height-resized_h)/2.0
#padding_w = (target_width -resized_w)/2.0
im_out = Image.fromarray(np.uint8(before_resize))
im_out = im_out.resize((int(target_height), int(target_width)))
out_path = os.path.join(out_dir, image_name)
im_out.save(out_path, 'PNG')
| HaojieYuan/autoAdv | benchmark/fine_grain_datsets/eval/preprocess_imgs.py | preprocess_imgs.py | py | 1,724 | python | en | code | 1 | github-code | 13 |
72941912978 | from collections import defaultdict
from typing import List
class Solution:
def numPairsDivisibleBy60(self, time: List[int]) -> int:
#same with "two sum"用dict记录remainders
#time:o(n), space:o(1)
d = defaultdict(int)
res = 0
for song in time:
if song % 60 == 0:
res += d[0]
else:
res += d[60 - song % 60]
d[song % 60] += 1
return res
| isabellakqq/Alogorithm | HashMap/1010.PairsofSongsWithTotalDurationsDivisibleby60.py | 1010.PairsofSongsWithTotalDurationsDivisibleby60.py | py | 468 | python | en | code | 2 | github-code | 13 |
33375873754 | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit, minimize
def true_fun(x):
return np.cos(1.5 * np.pi * x)
def main():
n_samples = 30
np.random.seed(0)
x = np.sort(np.random.rand(n_samples))
y = true_fun(x) + np.random.randn(n_samples) * 0.1
x_test = np.linspace(0, 1, 100)
plt.figure(figsize=(14, 5))
degrees = [1, 4, 15]
ax = plt.subplot(1, len(degrees), 1)
# Mode 1 - using least squares
for degree in degrees:
p = np.polyfit(x, y, degree)
z = np.poly1d(p)
plt.plot(x_test, z(x_test), label=f"Poly degree={degree}")
plt.plot(x_test, true_fun(x_test), label="True function")
plt.scatter(x, y, color='b', label="Samples")
plt.title("Polyfit")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim([0,1])
plt.ylim([-2,2])
plt.legend()
# Mode 2 - curve fitting
ax = plt.subplot(1, len(degrees), 2)
def poly1(x, a, b):
return a * x + b
def poly4(x, a, b, c, d, e):
return a * x**4 + b * x**3 + c * x**2 + d * x + e
def poly15(x, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15):
return a15*x**15+a14*x**14+a13*x**13+a12*x**12+a11*x**11+a10*x**10+a9*x**9+a8*x**8 + \
a7*x**7+a6*x**6+a5*x**5+a4*x**4+a3*x**3+a2*x**2+a1*x+a0
popt, pcov = curve_fit(poly1, x, y)
plt.plot(x_test, poly1(x_test, *popt), label="Poly degree=1")
popt, pcov = curve_fit(poly4, x, y)
plt.plot(x_test, poly4(x_test, *popt), label="Poly degree=4")
popt, pcov = curve_fit(poly15, x, y)
plt.plot(x_test, poly15(x_test, *popt), label="Poly degree=15")
plt.plot(x_test, true_fun(x_test), label="True function")
plt.scatter(x, y, color='b', label="Samples")
plt.title("Scipy.curve_fit")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim([0,1])
plt.ylim([-2,2])
plt.legend()
# Mode 3 - scipy minimize
ax = plt.subplot(1, len(degrees), 3)
def loss(p, func):
ypred = func(p)
return np.mean(np.square(ypred(x) - y))
for degree in degrees:
res = minimize(loss, np.zeros(degree+1), args=(np.poly1d), method='BFGS')
plt.plot(x_test, np.poly1d(res.x)(x_test), label=f"Poly degree={degree}")
plt.plot(x_test, true_fun(x_test), label="True function")
plt.scatter(x, y, color='b', label="Samples")
plt.title("Scipy.minimize")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim([0,1])
plt.ylim([-2,2])
plt.legend()
plt.show()
if __name__ == "__main__":
main()
| scarrazza/DL2023 | Lecture_2/solutions/exercise7.py | exercise7.py | py | 2,585 | python | en | code | 1 | github-code | 13 |
34758867499 | """
PyTorch implementation of:
Learning Deep Features for Discriminative Localization
"""
import argparse
import copy
import os
import cv2
import numpy as np
import torchvision
import torch
from PIL import Image
from torchvision.models.resnet import resnet152, resnet18, resnet50
import ImageNetLabels
model_name_to_func = {
"resnet18": torchvision.models.resnet18,
"resnet34": torchvision.models.resnet34,
"resnet50": torchvision.models.resnet50,
"resnet101": torchvision.models.resnet101,
"resnet152": torchvision.models.resnet152,}
def parse_args():
parser = argparse.ArgumentParser(
"Class activation maps in pytorch")
parser.add_argument('--model_name', type=str,
help='name of model to use', required=True)
parser.add_argument('--input_image', type=str,
help='path to input image', required=True)
parser.add_argument('--save_gif', default=False,
help='save a gif animation', required=False, action='store_true')
args = parser.parse_args()
assert args.model_name in list(model_name_to_func.keys()), 'Model [%s] not found in supported models in [%s]' % (
args.model_name, list(model_name_to_func.keys()), )
return args
class ReshapeModule(torch.nn.Module):
def __init__(self):
super(ReshapeModule, self).__init__()
def forward(self, x):
b, c, h, w = x.shape
x = x.view(b*c, h*w).permute(1, 0)
return x
def modify_model_cam(model):
"""Modifies a pytorch model object to remove last
global average pool and replaces with a custom reshape
node that enables generating class activation maps as
forward pass
Args:
model: pytorch model graph
Raises:
ValueError: if no global average pool layer is found
Returns:
model: modified model with last global average pooling
replaced with custom reshape module
"""
# fetch all layers + globalavgpoollayers
alllayers = [n for n, m in model.named_modules()]
globalavgpoollayers = [n for n, m in model.named_modules(
) if isinstance(m, torch.nn.AdaptiveAvgPool2d)]
if globalavgpoollayers == []:
raise ValueError('Model does not have a Global Average Pool layer')
# check if last globalavgpool is second last layer - otherwise the method wont work
assert alllayers.index(globalavgpoollayers[-1]) == len(
alllayers)-2, 'Global Average Pool is not second last layer'
# remove last globalavgpool with our custom reshape module
model._modules[globalavgpoollayers[-1]] = ReshapeModule()
return model
def infer_with_cam_model(cam_model, image):
"""Run forward pass with image tensor and get class activation maps
as well as predicted class index
Args:
cam_model: pytorch model graph with custom reshape module modified using modify_model_cam()
image: torch.tensor image with preprocessing applied
Returns:
class activation maps and most probable class index
"""
with torch.no_grad():
output_cam_acti = cam_model(image)
_, output_cam_idx = torch.topk(torch.mean(
output_cam_acti, dim=0), k=10, dim=-1)
return output_cam_acti, output_cam_idx
def postprocess_cam(cam_image, image):
"""Process class activation map to generate a heatmap
overlay the heatmap on original image
Args:
cam_model: pytorch model graph with custom reshape module modified using modify_model_cam()
image: numpy array for image to overlay heatmap on top
Returns:
numpy array with image + overlayed heatmap
"""
h, w = image.shape[0:2]
sp = int(np.sqrt(cam_image.shape[0]))
assert cam_image.shape[0] == sp * \
sp, 'Only activation maps that are square are supported at the moment'
# make square class act map (if possible)
cam_image = np.reshape(cam_image, [sp, sp])
# normalise to be in range [0, 255]
cam_image = cam_image - np.min(cam_image)
cam_image = (cam_image/np.max(cam_image) * 255).astype(np.uint8)
# resize to input image shape and make a heatmap
cam_image_processed = cv2.applyColorMap(
cv2.resize(cam_image, (w, h)), cv2.COLORMAP_JET)
# BGR to RGB (opencv is BGR image, PIL output is RGB)
cam_image_processed = cam_image_processed[:, :, ::-1]
return cam_image_processed
if __name__ == '__main__':
args = parse_args()
# preprocessing for imagenet models
preprocess_imagenet = torchvision.transforms.Compose([
torchvision.transforms.Resize(256),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
# run on cuda if possible
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# fetch the model and apply cam modification
orig_model = model_name_to_func[args.model_name](pretrained=True)
orig_model.eval()
class_act_map_model = modify_model_cam(copy.deepcopy(orig_model))
class_act_map_model.to(device)
# load image, preproceess
filename = args.input_image
input_image_pil = Image.open(filename)
input_image = preprocess_imagenet(input_image_pil).unsqueeze(0)
input_image = input_image.to(device)
input_image_raw_np = np.asarray(input_image_pil)
# run inference with input image
selidx=0
output_cam_acti, output_cam_idx = infer_with_cam_model(
class_act_map_model, input_image)
print('Prediction [%s] at index [%d]' % (
ImageNetLabels.idx_to_class[output_cam_idx[selidx].item()], output_cam_idx[selidx]))
cam_image_raw = output_cam_acti[:, output_cam_idx[selidx].item()].cpu().detach().numpy()
cam_image_processed = postprocess_cam(
cam_image_raw, input_image_raw_np)
# overlay on top of original image
alpha = 0.5
cam_image_overlayed = (1-alpha) * input_image_raw_np + alpha * cam_image_processed
# save
Image.fromarray(cam_image_overlayed.astype(np.uint8)).save(
os.path.join('results', os.path.basename(args.input_image)))
# create gif animation if required
if args.save_gif:
cam_image_overlayed_gif = []
for al in [x/100. for x in range(50)]:
cam_image_overlayed_gif.append((1-al) * input_image_raw_np + al * cam_image_processed)
for al in reversed([x/100. for x in range(50)]):
cam_image_overlayed_gif.append((1-al) * input_image_raw_np + al * cam_image_processed)
factor = min([300./x for x in cam_image_overlayed_gif[0].shape[0:2]])
cam_image_overlayed_gif = [Image.fromarray(x.astype(np.uint8)).resize([int(factor * s) for s in reversed(x.shape[0:2])])
for x in cam_image_overlayed_gif]
cam_image_overlayed_gif[0].save(os.path.join('results', os.path.basename(args.input_image).split('.')[
0] + '.gif'), save_all=True, append_images=cam_image_overlayed_gif[1:], optimize=True, duration=40, loop=0)
| adeeplearner/ClassActivationMaps | class_activation_map.py | class_activation_map.py | py | 7,132 | python | en | code | 0 | github-code | 13 |
16755353865 | """Test depolarizing."""
import numpy as np
from toqito.channel_ops import apply_channel
from toqito.channels import depolarizing
def test_depolarizing_complete_depolarizing():
"""Maps every density matrix to the maximally-mixed state."""
test_input_mat = np.array(
[[1 / 2, 0, 0, 1 / 2], [0, 0, 0, 0], [0, 0, 0, 0], [1 / 2, 0, 0, 1 / 2]]
)
expected_res = 1 / 4 * np.identity(4)
res = apply_channel(test_input_mat, depolarizing(4))
bool_mat = np.isclose(expected_res, res)
np.testing.assert_equal(np.all(bool_mat), True)
def test_depolarizing_partially_depolarizing():
"""The partially depolarizing channel for `p = 0.5`."""
test_input_mat = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]])
param_p = 0.5
res = apply_channel(test_input_mat, depolarizing(4, param_p))
expected_res = (1 - param_p) * np.trace(test_input_mat) * np.identity(
4
) / 4 + param_p * test_input_mat
bool_mat = np.isclose(expected_res, res)
np.testing.assert_equal(np.all(bool_mat), True)
| vprusso/toqito | toqito/channels/tests/test_depolarizing.py | test_depolarizing.py | py | 1,073 | python | en | code | 118 | github-code | 13 |
19945505372 | #!/bin/env python3
import subprocess
import json
import re
def PortList():
CMD = "sudo netstat -pntl | awk '{print $4,$7}'|grep [0-9] |egrep -vw '%s'"
Result_str = subprocess.getoutput(CMD)
#print(Result_str)
tmp_list = Result_str.split("\n")
#print(tmp_list)
port_dict = {}
for line in tmp_list:
# print(line)
PORT_REG = re.search(r"(127.0.0.1:|:::|0.0.0.0:)(\d+).+\d+/(\S+)",line)
# print(PORT_REG)
if PORT_REG is not None:
match_line = (PORT_REG.groups())
port_dict [ match_line[1]] = match_line[2]
return port_dict
if __name__ == "__main__":
Results = PortList()
ports = []
for key in Results:
ports += [{'{#PNAME}':key,'{#PPORT}':Results[key]}]
print(json.dumps({'data':ports},sort_keys=True,indent=4,separators=(',',':')))
| cuijianzhe/work_scripts | port_discovery.py | port_discovery.py | py | 852 | python | en | code | 3 | github-code | 13 |
4502167142 | from TreeNode import TreeNode
def pathSumRecursive(root, targetSum):
res = []
def dfs(root, currSum, arr):
if not root:
return
arr.append(root.val)
if not root.left and not root.right and root.val == currSum:
res.append(list(arr))
else:
dfs(root.left, currSum - root.val, arr)
dfs(root.right, currSum - root.val, arr)
arr.pop()
dfs(root, targetSum, [])
return res
def pathSumIterative(root, targetSum):
if not root:
return []
stack = [(root,targetSum,[root.val])]
res = []
while stack:
node, currSum, path = stack.pop()
if not node.left and not node.right:
if currSum == node.val:
res.append(path)
if node.left:
stack.append((node.left, currSum - node.val, path+[node.left.val]))
if node.right:
stack.append((node.right, currSum - node.val, path+[node.right.val]))
return res
Node4 = TreeNode(val=4)
Node5 = TreeNode(val=5)
Node2 = TreeNode(val=2)
Node7 = TreeNode(val=7)
Node7_1 = TreeNode(val=7,left=Node4,right=Node5)
Node9 = TreeNode(val=9,left=Node2,right=Node7)
root = TreeNode(val=1, left=Node7_1, right=Node9)
print(pathSumRecursive(root,12)) | anhduy1202/Leetcode-Prep | Tree Concept/pathSumII.py | pathSumII.py | py | 1,271 | python | en | code | 0 | github-code | 13 |
10438620456 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
from src.CustomWidget import MySettingTableModel
from uifiles.Ui_parseANNT_settings import Ui_ParAnnt_settings
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from uifiles.Ui_parseANNT import Ui_parseANNT
from src.factory import Factory, WorkThread
import sys
import os
import re
import traceback
import inspect
from win32com import client as wc
from collections import OrderedDict
class ParANNT_settings(QDialog, Ui_ParAnnt_settings, object):
def __init__(
self,
parent=None):
super(ParANNT_settings, self).__init__(parent)
# self.thisPath = os.path.dirname(os.path.realpath(__file__))
self.factory = Factory()
self.thisPath = self.factory.thisPath
self.setupUi(self)
# 保存设置
self.parseANNT_settings = QSettings(
self.thisPath +
'/settings/parseANNT_settings.ini',
QSettings.IniFormat)
# File only, no fallback to registry or or.
self.parseANNT_settings.setFallbacksEnabled(False)
# 开始装载样式表
with open(self.thisPath + os.sep + 'style.qss', encoding="utf-8", errors='ignore') as f:
self.qss_file = f.read()
self.setStyleSheet(self.qss_file)
# 设置比例
self.splitter.setStretchFactor(1, 7)
items = ["tRNA Abbreviation",
"Protein Gene Full Name",
"Name From Word"
]
self.listWidget.addItems(items)
self.listWidget.itemClicked.connect(self.display_table)
self.display_table(self.listWidget.item(0))
@pyqtSlot()
def on_pushButton_clicked(self):
"""
add
"""
currentModel = self.tableView.model()
if currentModel:
currentData = currentModel.arraydata
header = currentModel.headerdata
currentModel.layoutAboutToBeChanged.emit()
length = len(header)
currentData.append([""] * length)
currentModel.layoutChanged.emit()
self.tableView.scrollToBottom()
@pyqtSlot()
def on_pushButton_2_clicked(self):
"""
delete
"""
indices = self.tableView.selectedIndexes()
currentModel = self.tableView.model()
if currentModel and indices:
currentData = currentModel.arraydata
rows = sorted(set(index.row() for index in indices), reverse=True)
for row in rows:
currentModel.layoutAboutToBeChanged.emit()
currentData.pop(row)
currentModel.layoutChanged.emit()
def depositeData(self):
name = self.listWidget.currentItem().text()
dict_data = self.parseANNT_settings.value(
"extract listed gene")
currentModel = self.tableView.model()
array = currentModel.arraydata
dict_data[name] = array
self.parseANNT_settings.setValue(
"extract listed gene", dict_data)
def display_table(self, listItem):
listItem.setSelected(True)
# self.listWidget.setItemSelected(listItem, True)
name = listItem.text()
ini_dict_data = {"tRNA Abbreviation": [["tRNA-Ala", "A"],
["tRNA-Cys", "C"],
["tRNA-Asp", "D"],
["tRNA-Glu", "E"],
["tRNA-Phe", "F"],
["tRNA-Gly", "G"],
["tRNA-His", "H"],
["tRNA-Ile", "I"],
["tRNA-Lys", "K"],
["tRNA-Leu", "L"],
["tRNA-Leu", "L"],
["tRNA-Met", "M"],
["tRNA-Asn", "N"],
["tRNA-Pro", "P"],
["tRNA-Gln", "Q"],
["tRNA-Arg", "R"],
["tRNA-Ser", "S"],
["tRNA-Ser", "S"],
["tRNA-Thr", "T"],
["tRNA-Val", "V"],
["tRNA-Trp", "W"],
["tRNA-Tyr", "Y"]],
"Protein Gene Full Name": [["atp6", "ATP synthase F0 subunit 6"],
["atp8", "ATP synthase F0 subunit 8"],
["cox1", "cytochrome c oxidase subunit 1"],
["cox2", "cytochrome c oxidase subunit 2"],
["cox3", "cytochrome c oxidase subunit 3"],
["cytb", "cytochrome b"],
["nad1", "NADH dehydrogenase subunit 1"],
["nad2", "NADH dehydrogenase subunit 2"],
["nad3", "NADH dehydrogenase subunit 3"],
["nad4", "NADH dehydrogenase subunit 4"],
["nad4L", "NADH dehydrogenase subunit 4L"],
["nad5", "NADH dehydrogenase subunit 5"],
["nad6", "NADH dehydrogenase subunit 6"]],
"Name From Word": [["trnY(gta)", "tRNA-Tyr(gta)"],
["trnY", "tRNA-Tyr(gta)"],
["trnW(tca)", "tRNA-Trp(tca)"],
["trnW", "tRNA-Trp(tca)"],
["trnV(tac)", "tRNA-Val(tac)"],
["trnV", "tRNA-Val(tac)"],
["trnT(tgt)", "tRNA-Thr(tgt)"],
["trnT", "tRNA-Thr(tgt)"],
["trnR(tcg)", "tRNA-Arg(tcg)"],
["trnR", "tRNA-Arg(tcg)"],
["trnQ(ttg)", "tRNA-Gln(ttg)"],
["trnQ", "tRNA-Gln(ttg)"],
["trnP(tgg)", "tRNA-Pro(tgg)"],
["trnP", "tRNA-Pro(tgg)"],
["trnN(gtt)", "tRNA-Asn(gtt)"],
["trnN", "tRNA-Asn(gtt)"],
["trnM(cat)", "tRNA-Met(cat)"],
["trnM", "tRNA-Met(cat)"],
["trnK(ctt)", "tRNA-Lys(ctt)"],
["trnK", "tRNA-Lys(ctt)"],
["trnI(gat)", "tRNA-Ile(gat)"],
["trnI", "tRNA-Ile(gat)"],
["trnH(gtg)", "tRNA-His(GTG)"],
["trnH", "tRNA-His(gtg)"],
["trnG(tcc)", "tRNA-Gly(tcc)"],
["trnG", "tRNA-Gly(tcc)"],
["trnF(gaa)", "tRNA-Phe(gaa)"],
["trnF", "tRNA-Phe(gaa)"],
["trnE(ttc)", "tRNA-Glu(ttc)"],
["trnE", "tRNA-Glu(ttc)"],
["trnD(gtc)", "tRNA-Asp(gtc)"],
["trnD", "tRNA-Asp(gtc)"],
["trnC(gca)", "tRNA-Cys(gca)"],
["trnC", "tRNA-Cys(gca)"],
["trnA(tgc)", "tRNA-Ala(tgc)"],
["trnA", "tRNA-Ala(tgc)"],
["tRNA-Val", "tRNA-Val(tac)"],
["tRNA-Tyr", "tRNA-Tyr(gta)"],
["tRNA-Trp", "tRNA-Trp(tca)"],
["tRNA-Thr", "tRNA-Thr(tgt)"],
["tRNA-Pro", "tRNA-Pro(tgg)"],
["tRNA-Phe", "tRNA-Phe(gaa)"],
["tRNA-Met", "tRNA-Met(cat)"],
["tRNA-Lys", "tRNA-Lys(ctt)"],
["tRNA-Ile", "tRNA-Ile(gat)"],
["tRNA-His", "tRNA-His(GTG)"],
["tRNA-Gly", "tRNA-Gly(tcc)"],
["tRNA-Glu", "tRNA-Glu(ttc)"],
["tRNA-Gln", "tRNA-Gln(ttg)"],
["tRNA-Cys", "tRNA-Cys(gca)"],
["tRNA-Asp", "tRNA-Asp(gtc)"],
["tRNA-Asn", "tRNA-Asn(gtt)"],
["tRNA-Arg", "tRNA-Arg(tcg)"],
["tRNA-Ala", "tRNA-Ala(tgc)"],
["small subunit ribosomal RNA", "12S"],
["small ribosomal RNA subunit RNA", "12S"],
["small ribosomal RNA", "12S"],
["s-rRNA", "12S"],
["ribosomal RNA small subunit", "12S"],
["ribosomal RNA large subunit", "16S"],
["large subunit ribosomal RNA", "16S"],
["large ribosomal RNA subunit RNA", "16S"],
["large ribosomal RNA", "16S"],
["l-rRNA", "16S"],
["cytochrome c oxidase subunit III", "COX3"],
["cytochrome c oxidase subunit II", "COX2"],
["cytochrome c oxidase subunit I", "COX1"],
["cytochrome c oxidase subunit 3", "COX3"],
["cytochrome c oxidase subunit 2", "COX2"],
["cytochrome c oxidase subunit 1", "COX1"],
["cytochrome b", "CYTB"],
["ND6", "NAD6"],
["ND5", "NAD5"],
["ND4L", "NAD4L"],
["ND4", "NAD4"],
["ND3", "NAD3"],
["ND2", "NAD2"],
["ND1", "NAD1"],
["NADH dehydrogenase subunit5", "NAD5"],
["NADH dehydrogenase subunit 6", "NAD6"],
["NADH dehydrogenase subunit 5", "NAD5"],
["NADH dehydrogenase subunit 4L", "NAD4L"],
["NADH dehydrogenase subunit 4", "NAD4"],
["NADH dehydrogenase subunit 3", "NAD3"],
["NADH dehydrogenase subunit 2", "NAD2"],
["NADH dehydrogenase subunit 1", "NAD1"],
["CYT B", "CYTB"],
["COXIII", "COX3"],
["COXII", "COX2"],
["COXI", "COX1"],
["COIII", "COX3"],
["COII", "COX2"],
["COI", "COX1"],
["COB", "CYTB"],
["CO3", "COX3"],
["CO2", "COX2"],
["CO1", "COX1"],
["ATPase subunit 6", "ATP6"],
["ATPASE8", "ATP8"],
["ATPASE6", "ATP6"],
["ATPASE 8", "ATP8"],
["ATPASE 6", "ATP6"],
["ATP synthase F0 subunit 6", "ATP6"],
["16s rRNA", "16S"],
["16S subunit RNA", "16S"],
["16S ribosomal RNA", "16S"],
["16S rRNA", "16S"],
["12s rRNA", "12S"],
["12S subunit RNA", "12S"],
["12S ribosomal RNA", "12S"],
["12S rRNA", "12S"],
["12S Ribosomal RNA", "12S"]],
}
dict_data = self.parseANNT_settings.value(
"extract listed gene", None)
if not dict_data:
dict_data = ini_dict_data
self.parseANNT_settings.setValue(
"extract listed gene", dict_data)
header = ["Old Name", "New Name"]
array = dict_data[name]
tableModel = MySettingTableModel(array, header)
self.tableView.setModel(tableModel)
self.tableView.resizeColumnsToContents()
tableModel.dataChanged.connect(self.depositeData)
tableModel.layoutChanged.connect(self.depositeData)
class Parse_annotation(Factory): # 解析线粒体注释文件
def __init__(self, **kargs):
self.dict_args = kargs
self.outpath = kargs["exportPath"]
self.workPath = kargs["workPath"]
self.usernota_file = kargs["file"]
self.latin_name = kargs["name"]
self.template = kargs["temp"]
self.tbl2asn = kargs["t2n"]
codon = kargs["codon"]
self.codon_table = " [mgcode=%s]" % codon if codon != "" else ""
self.completeness = " [completeness=%s]" % kargs[
"complete"] if kargs["complete"] != "" else ""
self.strain = " [strain=%s]" % kargs[
"strain"] if kargs["strain"] != "" else ""
self.isolate = " [isolate=%s]" % kargs[
"isolate"] if kargs["isolate"] != "" else ""
self.synonym = " [synonym=%s]" % kargs[
"synonym"] if kargs["synonym"] != "" else ""
self.host = " [host=%s]" % kargs["host"] if kargs["host"] != "" else ""
self.country = " [country=%s]" % kargs[
"country"] if kargs["country"] != "" else ""
self.others = " " + kargs["others"] if kargs["others"] != "" else ""
self.lineage = " [lineage=%s]" % kargs[
"lineage"] if kargs["lineage"] != "" else ""
self.release_date = r" -H %s" % kargs[
"release_date"] if kargs["release_date"] != "" else ""
# 生成替换文件
self.dict_tRNA = kargs["dict_tRNA"]
self.dict_replace = kargs["dict_replace"]
self.dict_product = kargs["dict_product"]
# 执行
sequence, table_csv, feature_tbl = self.main_table()
self.sequence = sequence
self.latin_ = self.latin_name.replace(' ', '_')
# self.Log_Show("Saving results...\n", self.log_show)
genome = '>{self.latin_} [topology=circular] [location=mitochondrion] \
[organism={self.latin_name}]{self.codon_table}{self.completeness}{self.strain}\
{self.isolate}{self.synonym}{self.host}{self.country}{self.lineage}{self.others}\n{self.sequence}\n'.format(
self=self)
with open(self.outpath + os.sep + self.latin_name + '_PCGs_each.fas', 'w', encoding="utf-8") as f:
f.write(self.pro)
with open(self.outpath + os.sep + self.latin_name + '_PCGs_all.fas', 'w', encoding="utf-8") as f1:
f1.write(self.PCGs_all)
with open(self.outpath + os.sep + 'geneCounter.txt', 'w', encoding="utf-8") as f2:
output = "genes not found:\n" + \
"\t".join(
self.allGenes) + "\nSuperfluous genes\n" + "\t".join(self.superfluousGene)
f2.write(output)
# 存全基因组的fas文件
with open(self.outpath + os.sep + self.latin_name + '.fsa', 'w', encoding="utf-8") as f3:
f3.write(genome)
with open(self.outpath + os.sep + self.latin_name + '.csv', 'w', encoding="utf-8") as f:
f.write(table_csv.replace('\t', ','))
with open(self.outpath + os.sep + self.latin_name + '.tbl', 'w', encoding="utf-8") as f2:
f2.write(feature_tbl)
template_line = " -t " + \
"\"%s\""%self.template if self.template != "" else ""
commands = self.tbl2asn + template_line + \
" -p " + self.outpath + " -a s -a b -V vb" + self.release_date
with open(self.outpath + os.sep + "commands.txt", "w", errors="ignore") as f:
f.write(commands)
os.system(commands)
self.dict_args["progressSig"].emit(100)
os.remove(self.xml_11)
def match_genes(self, each_gene):
flag = False
for j in self.allGenes:
rgx = each_gene.replace("(", "\(").replace(")", "\)")
if re.search(rgx + "$", j, re.I):
flag = True
self.allGenes.remove(j)
if not flag:
self.superfluousGene.append(each_gene)
# 判断是否该基因复制了,标志是基因名后面加了-*,如"cytb-1","cytb-2","cytb-3"
def if_duplicated(self, name):
flag, dupl, cleanName = False, "", name
rgx = re.compile(r"-\d+$")
search_ = rgx.search(name)
if search_:
flag = True
dupl = search_.group()
cleanName = rgx.sub("", name)
# 如果用户标记的是-1,不被识别为重复基因
if dupl == "-1":
flag = False
return flag, dupl, cleanName # (True, '-2', 'cytb')
def TableReplace(self, name, size, seq, pro):
seq = seq.strip() # 有换行符
p = re.compile(
r'COX[1-3]|NAD4L|NAD[1-6]|ND[1-6]|COB|CYTB|ATP[68]', re.I) # 忽略大小写
match = p.search(name)
sign = 'H'
print(name, size, seq, pro)
assert name
flag, dupl, cleanName = self.if_duplicated(name)
if name[0] == '-':
name = name.strip('-')
sign = 'L'
dict1 = {"a": "t", "t": "a", "c": "g", "g": "c"}
# 将序列反转,注意这里必须将序列小写才能完成转换,因为字典是小写的,这样也可以区分互补与没互补的
seq1 = seq[::-1].lower()
seq = ''
for i in seq1:
if i in 'atcg':
seq += dict1[i] # 互补序列
else:
seq += i # 遇到兼并碱基就原样返回
if name.startswith('tRNA') or name.startswith('-tRNA'): # tRNA基因
self.match_genes(name)
regular = re.search(r'(tRNA-\w+)[(](\w+)[)]', name)
if regular:
new_name, inv_codon = regular.groups()
inv_codon = inv_codon.upper()
else: # 如果用户没有提供反密码子,就返回空
new_name = name.strip('-') # 负号去掉
inv_codon = ''
return new_name + dupl if flag else new_name, '', '', inv_codon, sign, seq, pro
elif match: # None就不会执行, 这里是蛋白基因
new_name = match.group().lower() # 明天看看这里有没有错
if new_name == 'nad4l': # nad4l情况的处理
new_name = 'nad4L'
self.match_genes(new_name)
ini = seq[0:3]
if int(size) % 3 == 0:
ter = seq[-3:]
self.PCGs_all += seq
elif int(size) % 3 == 1:
ter = seq[-1]
self.PCGs_all += seq[:-1]
elif int(size) % 3 == 2:
ter = seq[-2:]
self.PCGs_all += seq[:-2]
pro += '>' + new_name + \
dupl + '\n' + seq + '\n' if flag else '>' + \
new_name + '\n' + seq + '\n'
# 蛋白基因的时候,反密码子返回一个空,互补是,反密码子返回大写的
return new_name + dupl if flag else new_name, ini.upper(), ter.upper(), '', sign, seq, pro
else:
self.match_genes(name)
return name, '', '', '', sign, seq, pro # 序列也要返回
def ser_leu(self, feature, i): # 处理LEU,SER的简写
if re.search(r'\s[TU]AG\s', i, re.I):
feature = 'L1'
cr = 'CUN'
elif re.search(r'\s[TU]AA\s', i, re.I):
feature = 'L2'
cr = 'UUR'
elif re.search(r'\s[GTUA]C[TU]\s', i, re.I):
feature = 'S1'
cr = 'AGN'
elif re.search(r'\s[TU]GA\s', i, re.I):
feature = 'S2'
cr = 'UCN'
else:
feature = feature
cr = None
return feature, cr
def add_table(self, name, item): # 处理join的情况
list_dict = list(self.dict_tbl.keys())
def is_index_start(list_):
return len(list_) == 2 and list_[0].isdigit() and list_[1].isdigit()
if not self.dict_tbl.get(name):
self.dict_tbl[name] = item
else:
insert_ = "\t".join(item[0].split("\t")[:2]) # 前面2个索引12025\t12850
index = []
if list_dict.index(name) == 0: # 如果是第一个item,就要特殊处理
if self.count_1st_item == 0: # 第一次
self.first_index = "\t".join(
self.dict_tbl[name][0].split("\t")[:2])
for num, i in enumerate(self.dict_tbl[name]): # 遍历找插入位置的索引
# 如果读到了索引行且下一行接着的是注释
if i[0].isdigit() and self.dict_tbl[name][num + 1].startswith("\t"):
new = self.dict_tbl[name][num].replace(
self.first_index, insert_) # 替换为新的起始
self.dict_tbl[name][num] = new # 换掉
index.append(num + 1)
for numb, each_index in enumerate(index):
# 必须加上数字,因为之前添加内容以后索引就变了
self.dict_tbl[name].insert(
each_index + numb, self.first_index)
else:
for num, i in enumerate(self.dict_tbl[name]): # 遍历找插入位置的索引
list_i = i.split("\t")
# 如果读到了索引行且下一行接着的是self.first_index
if i[0].isdigit() and self.dict_tbl[name][num + 1] == self.first_index:
index.append(num + 1)
elif is_index_start(list_i) and self.dict_tbl[name][num + 1] == self.first_index:
index.append(num + 1)
for numb, each_index in enumerate(index):
self.dict_tbl[name].insert(each_index + numb, insert_)
self.count_1st_item += 1
else:
for num, i in enumerate(self.dict_tbl[name]): # 遍历找插入位置的索引
# 如果读到了索引行且下一行接着的是注释
if i[0].isdigit() and self.dict_tbl[name][num + 1].startswith("\t"):
index.append(num + 1)
else:
list_i = i.split("\t")
# 如果已经有加过索引了,并且找到最下面的索引
if is_index_start(list_i) and not is_index_start(self.dict_tbl[name][num + 1].split("\t")):
index.append(num + 1)
for numb, each_index in enumerate(index):
# 必须加上数字,因为之前添加内容以后索引就变了
self.dict_tbl[name].insert(each_index + numb, insert_)
def forSqn(self, i, flag, cleanName):
# 只识别蛋白基因、tRNA和rRNA到gb文件
#i = 'cox1 1 1644 1644 ATG TAA + sequence'
match = re.match(
r'COX[1-3]|NAD4L|NAD[1-6]|ND[1-6]|COB|CYTB|ATP[68]', i, re.IGNORECASE)
list_line = i.split('\t')
if i.startswith('tRNA'):
if list_line[8] == 'L': # 写进tbl文件
if re.match(r'tRNA-Leu|tRNA-Ser', i, re.I):
feature = ''
item_tbl = '%s\t%s\ttRNA\n\t\t\tproduct\t%s\n\t\t\tcodon_recognized\t%s\n'\
% (list_line[2], list_line[1], cleanName, self.ser_leu(feature, i)[1])
self.add_table(
self.ser_leu(feature, i)[0], item_tbl.split("\n")[:-1])
else:
item_tbl = '%s\t%s\ttRNA\n\t\t\tproduct\t%s\n'\
% (list_line[2], list_line[1], cleanName)
self.add_table(
list_line[0], item_tbl.split("\n")[:-1])
else:
if re.match(r'tRNA-Leu|tRNA-Ser', i, re.I):
feature = ''
item_tbl = '%s\t%s\ttRNA\n\t\t\tproduct\t%s\n\t\t\tcodon_recognized\t%s\n'\
% (list_line[1], list_line[2], cleanName, self.ser_leu(feature, i)[1])
self.add_table(
self.ser_leu(feature, i)[0], item_tbl.split("\n")[:-1])
else:
item_tbl = '%s\t%s\ttRNA\n\t\t\tproduct\t%s\n'\
% (list_line[1], list_line[2], cleanName)
self.add_table(
list_line[0], item_tbl.split("\n")[:-1])
elif match: # 匹配上了蛋白基因
name_ = cleanName.lower() if not re.search(
r"nad4l", cleanName, re.I) else "nad4L"
if list_line[8] == 'L':
# 这一步的or是否会存在问题
if list_line[6] == 'T' or list_line[6] == 'TA':
item_tbl = "%s\t%s\tgene\n\t\t\tgene\t%s\n%s\t%s\tCDS\n\t\t\tproduct\t%s\n\t\t\ttransl_except\t(pos:complement(%s),aa:TERM)\n\t\t\tnote\tTAA stop codon is completed by the addition of 3' A residues to the mRNA\n"\
% (list_line[2], list_line[1], name_, list_line[2], list_line[1], self.dict_product[cleanName], list_line[1])
self.add_table(
list_line[0], item_tbl.split("\n")[:-1])
else:
item_tbl = "%s\t%s\tgene\n\t\t\tgene\t%s\n%s\t%s\tCDS\n\t\t\tproduct\t%s\n"\
% (list_line[2], list_line[1], name_, list_line[2], list_line[1], self.dict_product[cleanName])
self.add_table(
list_line[0], item_tbl.split("\n")[:-1])
else:
# 这一步的or是否会存在问题
if list_line[6] == 'T' or list_line[6] == 'TA':
item_tbl = "%s\t%s\tgene\n\t\t\tgene\t%s\n%s\t%s\tCDS\n\t\t\tproduct\t%s\n\t\t\ttransl_except\t(pos:%s,aa:TERM)\n\t\t\tnote\tTAA stop codon is completed by the addition of 3' A residues to the mRNA\n"\
% (list_line[1], list_line[2], name_, list_line[1], list_line[2], self.dict_product[cleanName], list_line[2])
self.add_table(
list_line[0], item_tbl.split("\n")[:-1])
else:
item_tbl = "%s\t%s\tgene\n\t\t\tgene\t%s\n%s\t%s\tCDS\n\t\t\tproduct\t%s\n"\
% (list_line[1], list_line[2], name_, list_line[1], list_line[2], self.dict_product[cleanName])
self.add_table(
list_line[0], item_tbl.split("\n")[:-1])
elif i.startswith('12S') or i.startswith('16S'):
if list_line[8] == 'L':
if cleanName == '12S':
item_tbl = '%s\t%s\trRNA\n\t\t\tproduct\t12S ribosomal RNA\n'\
% (list_line[2], list_line[1])
self.add_table(
list_line[0], item_tbl.split("\n")[:-1])
elif cleanName == '16S':
item_tbl = '%s\t%s\trRNA\n\t\t\tproduct\t16S ribosomal RNA\n'\
% (list_line[2], list_line[1])
self.add_table(
list_line[0], item_tbl.split("\n")[:-1])
else:
if cleanName == '12S':
item_tbl = '%s\t%s\trRNA\n\t\t\tproduct\t12S ribosomal RNA\n'\
% (list_line[1], list_line[2])
self.add_table(
list_line[0], item_tbl.split("\n")[:-1])
elif cleanName == '16S':
item_tbl = '%s\t%s\trRNA\n\t\t\tproduct\t16S ribosomal RNA\n'\
% (list_line[1], list_line[2])
self.add_table(
list_line[0], item_tbl.split("\n")[:-1])
# 如果是L1\S1等,就要判断一下
dict_key = self.ser_leu(feature, i)[0] if re.match(
r'tRNA-Leu|tRNA-Ser', i, re.I) else list_line[0]
# 重复基因加一个note
if flag:
for i in self.dict_tbl[dict_key]: # 删除其他note和transexcept注释
if "note" in i:
self.dict_tbl[dict_key].remove(i)
for i in self.dict_tbl[dict_key]: # 前面删除过,所以要重新读
if "transl_except" in i:
self.dict_tbl[dict_key].remove(i)
self.dict_tbl[dict_key].append("\t\t\tnote\tduplicated")
def str_table(self, table_dict):
list_star = list(table_dict.keys()) # 将字典的键生成列表,是数字的列表
list_star = sorted(list_star) # 对字典的键排序
previous = 0 # 定义上一个基因的stop
# 这是用于生成gb文件的
table = '''Gene\tPosition\t\tSize\tIntergenic nucleotides\tCodon\t\tAnti-codon
\tFrom\tTo\t\t\tStart\tStop\t\tStrand\tSequence\n'''
# 生成表格用的table
table_csv = table
self.pro = ''
self.PCGs_all = ">PCGs\n"
self.allGenes = ["tRNA-His(gtg)", "tRNA-Gln(ttg)", "tRNA-Phe(gaa)", "tRNA-Met(cat)",
"tRNA-Val(tac)", "tRNA-Ala(tgc)", "tRNA-Asp(gtc)", "tRNA-Asn(gtt)",
"tRNA-Pro(tgg)", "tRNA-Ile(gat)", "tRNA-Lys(ctt)", "tRNA-Ser(gct)",
"tRNA-Trp(tca)", "tRNA-Thr(tgt)", "tRNA-Cys(gca)", "tRNA-Leu(tag)",
"tRNA-Ser(tga)", "tRNA-Leu(taa)", "tRNA-Glu(ttc)", "tRNA-Tyr(gta)",
"tRNA-Arg(tcg)", "tRNA-Gly(tcc)", "ATP6", "ATP8", "COX1", "COX2",
"COX3", "CYTB", "NAD1", "NAD2", "NAD3", "NAD4", "NAD5", "NAD4L",
"NAD6", "12S", "16S"]
self.superfluousGene = []
feature_tbl = '>Feature %s\n' % self.latin_name.replace(' ', '_')
self.dict_tbl = OrderedDict()
self.count_1st_item = 0 # 计算遇到多少个第一个item了,add_table函数里面
gapCount = 0
overlapCount = 0
num = 60
each = 30 / len(list_star)
for j in list_star:
list1 = table_dict[j].split('\t')
name, size, seq = list1[0], list1[3], list1[4]
start, stop = j, int(list1[2])
if stop > previous:
space = str(start - previous - 1)
previous = stop # 将previous赋值为该基因的stop位置
else: # 如果一个基因在另一个基因内部
space = "0"
if int(space) > 0:
gapCount += 1
if int(space) < 0:
overlapCount += 1
if space == '0':
space = '' # space = 0时,变为None
new_name, ini, ter, inv_codon, sign, seq1, self.pro = self.TableReplace(
name, size, seq, self.pro)
flag, dupl, cleanName = self.if_duplicated(new_name)
list_ = [new_name, str(start), str(
stop), size, space, ini, ter, inv_codon, sign, seq1]
# 用于生成SQN文件的
self.forSqn("\t".join(list_), flag, cleanName)
# 生成csv table
if cleanName.startswith("tRNA"):
if re.match(r'tRNA-Leu|tRNA-Ser', cleanName, re.I):
csv_name = "trn" + self.ser_leu("", "\t".join(list_))[0]
else:
csv_name = "trn" + self.dict_tRNA[cleanName]
elif cleanName == "12S":
csv_name = "rrnS"
elif cleanName == "16S":
csv_name = "rrnL"
else:
csv_name = cleanName
list_[0] = csv_name
table_csv += "\t".join(list_) + '\n'
num += each
self.dict_args["progressSig"].emit(num)
list_feature_tbl = []
for key in self.dict_tbl.keys():
list_feature_tbl += self.dict_tbl[key]
feature_tbl += "\n".join(list_feature_tbl)
# 统计gap和重叠有多少个
table_csv += "Gap:,%d,Overlap:%d\n" % (gapCount, overlapCount)
return table_csv, feature_tbl
def save_docx_as(self):
import pythoncom
pythoncom.CoInitialize() # 多线程编程,必须加上这2句才能用win32com模块
word = wc.Dispatch('Word.Application')
doc = word.Documents.Open(self.usernota_file)
sequence = re.sub(r'\[.+?\]|\n| |\t|\r', '', doc.Content.Text).upper()
self.xml_11 = self.workPath + os.sep + "xml_11.xml"
doc.SaveAs(self.xml_11, 11)
doc.Close()
return sequence
def Replace(self, name):
if name in list(self.dict_replace.keys()):
new_name = self.dict_replace[name]
else:
new_name = name
return new_name
# def fetch_sequence(self): # 得到全部序列
# docxfile = docx.Document(self.usernota_file)
# docxText = ""
# for para in docxfile.paragraphs:
# docxText += para.text.strip()
# docxText = re.sub(r'\[.+?\]|\n| |\t|\r', '', docxText)
# return docxText.upper()
def fetch_name_seq(self):
f = self.read_file(self.xml_11)
content = f.read()
f.close()
rgx_content = re.compile(
r'(<aml:annotation aml:id="(\d+)" w:type="Word\.Comment)\.Start"/>(.+)\1\.End"/><w:r.+\1(.+?)</aml:content></aml:annotation></w:r>', re.S | re.I)
# group(2)是id,group(3)是文本内容,group(4)是注释内容
rgx_text = re.compile(r"<w:t>(.+?)</w:t>", re.I | re.S)
# fetch文本内容,包括comment的文本内容
rgx_comment = re.compile(
r'''<aml:annotation aml:id="\d+" w:type="Word.Comment".+?</aml:annotation>''', re.I | re.S)
# 找出注释那部分的内容,如果嵌入文本内容内部,将其替换为空
ini_pos = 0
list_name_seq = []
while rgx_content.search(content, ini_pos):
match = rgx_content.search(content, ini_pos)
text, comment, ini_pos = match.group(3), match.group(
4), match.span()[0] + 56 # 要跳过start的位置必须加上56
text = rgx_comment.sub("", text) # 将其他ID的注释内容替换为空(有些ID的注释嵌入另一个的内部)
sequence = "".join(rgx_text.findall(text))
comment_text = "".join(
rgx_text.findall(comment)).strip() # 去掉前面单独的空格
if re.match(r" *\- +", comment_text): # 用户标记-号带空格的情况
comment_text = re.sub(r" *\- +", "", comment_text) # 把负号都替换掉
comment_text = "-" + \
re.split(
r" +|\r+|\n+|\t+", comment_text)[0] # 只保留空白字符前面的注释
else:
comment_text = re.split(
r" +|\r+|\n+|\t+", comment_text)[0] # 只保留空白字符前面的注释
sequence = re.sub(r'\n| |\t|\r', '', sequence) # 去除了空格、换行符和用户信息等
##找到序列的起始位置
text_before = content[:match.span(3)[0]] # Word.Comment.Start 之前的所有文本
text_before = rgx_comment.sub("", text_before) # 将其他ID的注释内容替换为空(有些ID的注释嵌入另一个的内部)
sequence_before = "".join(rgx_text.findall(text_before))
sequence_before = re.sub(r'\n| |\t|\r', '', sequence_before)
seq_start = len(sequence_before) + 1
list_name_seq.append((comment_text, sequence.upper(), seq_start))
return list_name_seq
def main_table(self):
sequence = self.save_docx_as() # 先将docx文件另存, 并得到序列
# self.dict_args["progressSig"].emit(10)
# sequence = self.fetch_sequence() # 生成存序列
self.dict_args["progressSig"].emit(10)
list_name_seq = self.fetch_name_seq()
self.dict_args["progressSig"].emit(30)
table_dict = OrderedDict()
start = 0
num = 30
each = 30 / len(list_name_seq)
for i in list_name_seq:
name, seq, start = i
stop = start + len(seq) - 1
name = self.Replace(name)
size = str(len(seq))
assert (seq in sequence), seq
# rgx_span = re.compile(seq, re.I)
# # 从上一个的起始位置开始搜索,必须先编译
# span = rgx_span.search(sequence, int(start)).span()
# start, stop = str(span[0] + 1), str(span[1])
table_dict[start] = name + '\t' + str(start) + \
'\t' + str(stop) + '\t' + size + '\t' + seq + '\n'
num += each
self.dict_args["progressSig"].emit(num)
table_csv, feature_tbl = self.str_table(table_dict)
return sequence, table_csv, feature_tbl
class ParseANNT(QDialog, Ui_parseANNT, Factory):
exception_signal = pyqtSignal(str) # 定义所有类都可以使用的信号
progressSig = pyqtSignal(int) # 控制进度条
startButtonStatusSig = pyqtSignal(list)
def __init__(
self,
workPath=None,
t2n_exe=None,
inputDocxs=None,
focusSig=None,
parent=None):
super(ParseANNT, self).__init__(parent)
self.setupUi(self)
self.parent = parent
self.factory = Factory()
self.thisPath = self.factory.thisPath
self.workPath = workPath
self.t2n_exe = '"' + t2n_exe + '"'
self.inputDocxs = inputDocxs
self.focusSig = focusSig
# 保存设置
self.parseANNT_settings = QSettings(
self.thisPath +
'/settings/parseANNT_settings.ini',
QSettings.IniFormat)
# File only, no fallback to registry or or.
self.parseANNT_settings.setFallbacksEnabled(False)
dict_data = self.parseANNT_settings.value(
"extract listed gene")
self.dict_tRNA_abbre = dict(dict_data["tRNA Abbreviation"])
self.dict_product = dict(dict_data["Protein Gene Full Name"])
self.dict_replace = dict(dict_data["Name From Word"])
# 开始装载样式表
with open(self.thisPath + os.sep + 'style.qss', encoding="utf-8", errors='ignore') as f:
self.qss_file = f.read()
self.setStyleSheet(self.qss_file)
self.startButtonStatusSig.connect(self.factory.ctrl_startButton_status)
self.progressSig.connect(self.runProgress)
# 信号槽
self.exception_signal.connect(self.popupException)
self.comboBox_5.lineEdit().setLineEditNoChange(True)
self.comboBox_4.installEventFilter(self)
self.comboBox_5.installEventFilter(self)
# 恢复用户的设置
self.guiRestore()
## brief demo
country = self.factory.path_settings.value("country", "UK")
url = "http://phylosuite.jushengwu.com/dongzhang0725.github.io/documentation/#5-14-1-1-Brief-example" if \
country == "China" else "https://dongzhang0725.github.io/dongzhang0725.github.io/documentation/#5-14-1-1-Brief-example"
self.label_5.clicked.connect(lambda: QDesktopServices.openUrl(QUrl(url)))
@pyqtSlot()
def on_pushButton_3_clicked(self):
"""
Slot documentation goes here.
"""
fileNames = QFileDialog.getOpenFileNames(
self,
"Choose Word Files",
filter="Word Documents(*.docx);;Word 97-2003 Documents(*.doc);;Word Macro-Enabled Documents(*.docm);;"
"Word Template(*.dotx);;Word 97-2003 Template(*.dot);;Word Macro-Enabled Template(*.dotm);;"
"OpenDocument Text(*.odt);;")
if fileNames[0]:
self.inputDocx(self.comboBox_4, fileNames[0])
@pyqtSlot()
def on_pushButton_4_clicked(self):
"""
Slot documentation goes here.
"""
fileName = QFileDialog.getOpenFileName(self, "Choose Template File")
if fileName[0]:
self.inputDocx(self.comboBox_5, [fileName[0]])
@pyqtSlot()
def on_pushButton_clicked(self):
"""
Slot documentation goes here.
"""
self.dict_args = {}
self.dict_args["t2n"] = self.t2n_exe
self.dict_args["workPath"] = self.workPath
self.dict_args["files"] = self.comboBox_4.fetchListsText()
if not self.dict_args["files"]:
QMessageBox.information(
self,
"Information",
"<p style='line-height:25px; height:25px'>Please input Word file(s) first</p>")
return
self.dict_args["temp"] = self.comboBox_5.fetchCurrentText()
####
self.dict_args["dict_tRNA"] = self.dict_tRNA_abbre
self.dict_args["dict_product"] = self.dict_product
self.dict_args["dict_replace"] = self.dict_replace
self.dict_args["name"] = self.lineEdit.text().strip()
if not self.dict_args["name"]:
QMessageBox.information(
self,
"Information",
"<p style='line-height:25px; height:25px'>Organism name is needed!</p>")
self.lineEdit.setFocus()
return
self.dict_args["codon"] = str(
self.comboBox_9.currentText()).split(" ")[0]
self.dict_args["complete"] = str(
self.comboBox.currentText()).split(" ")[0]
self.dict_args["strain"] = self.lineEdit_2.text().strip()
self.dict_args["isolate"] = self.lineEdit_3.text().strip()
self.dict_args["synonym"] = self.lineEdit_4.text().strip()
self.dict_args["host"] = self.lineEdit_7.text().strip()
self.dict_args["country"] = self.lineEdit_8.text().strip()
self.dict_args["others"] = self.lineEdit_5.text().strip()
self.dict_args["lineage"] = self.stripName(self.lineEdit_6.text()).replace(";", "; ")
tuple_date = self.releaseDate() #"2018", "4", "3"
sort_date = [tuple_date[1], tuple_date[2], tuple_date[0]]
self.dict_args[
"release_date"] = "/".join(sort_date) #07/27/2017
self.dict_args["progressSig"] = self.progressSig
self.dict_args["file"] = self.dict_args["files"][0]
base = os.path.splitext(os.path.basename(self.dict_args["file"]))[0]
self.dict_args["exportPath"] = self.factory.creat_dirs(
self.workPath + os.sep + base + "_parseANNT_results")
ok = self.factory.remove_dir(self.dict_args["exportPath"], parent=self)
if not ok:
# 提醒是否删除旧结果,如果用户取消,就不执行
return
self.worker = WorkThread(self.run_command, parent=self)
self.worker.start()
# if self.dict_args["files"]:
# for i in self.dict_args["files"]:
# base = os.path.basename(i)
# self.dict_args["exportPath"] = self.factory.creat_dirs(
# self.workPath + os.sep + base + "_parseANNT_results")
# self.dict_args["file"] = i
# self.worker = WorkThread(self.run_command, parent=self)
# self.worker.start()
# else:
# QMessageBox.critical(
# self,
# "No input file",
# "<p style='line-height:25px; height:25px'>Please input file(s) first!</p>",
# QMessageBox.Ok)
@pyqtSlot()
def on_toolButton_clicked(self):
setting = ParANNT_settings(self)
# 隐藏?按钮
setting.setWindowFlags(setting.windowFlags() | Qt.WindowMinMaxButtonsHint)
setting.exec_()
def inputDocx(self, combobox, fileNames):
try:
list_new_inputs = []
if fileNames:
flag = False
rgx_path = re.compile(r'[^\w_.)(-:\\]')
rgx_name = re.compile(r'[^\w_.)(-]')
for num, i in enumerate(fileNames):
if os.path.exists(i):
dir = os.path.dirname(i)
if rgx_path.search(dir):
QMessageBox.warning(
self,
"Parse Annotation",
"<p style='line-height:25px; height:25px'>Invalid symbol found in file path, please copy the file to desktop and try again!</p>")
continue
base = os.path.basename(i)
if rgx_name.search(base):
base = rgx_name.sub("_", base)
flag = True
os.rename(i, dir + os.sep + base)
list_new_inputs.append(dir + os.sep + base)
if flag:
QMessageBox.information(
self,
"Parse Annotation",
"<p style='line-height:25px; height:25px'>Invalid symbol found in file name, replacing it with '_'!</p>")
combobox.refreshInputs(list_new_inputs)
except:
self.exceptionInfo = ''.join(
traceback.format_exception(
*sys.exc_info())) # 捕获报错内容,只能在这里捕获,没有报错的地方无法捕获
self.exception_signal.emit(self.exceptionInfo) # 激发这个信号
def run_command(self):
try:
# 先清空文件夹
time_start = datetime.datetime.now()
self.startButtonStatusSig.emit(
[
self.pushButton,
self.progressBar,
"start",
self.dict_args["exportPath"],
self.qss_file,
self])
parseANNT = Parse_annotation(**self.dict_args)
self.startButtonStatusSig.emit(
[
self.pushButton,
self.progressBar,
"stop",
self.dict_args["exportPath"],
self.qss_file,
self])
self.focusSig.emit(self.dict_args["exportPath"])
time_end = datetime.datetime.now()
self.time_used_des = "Start at: %s\nFinish at: %s\nTotal time used: %s\n\n" % (str(time_start), str(time_end),
str(time_end - time_start))
with open(self.dict_args["exportPath"] + os.sep + "summary and citation.txt", "w", encoding="utf-8") as f:
f.write("If you use PhyloSuite v1.2.3, please cite:\nZhang, D., F. Gao, I. Jakovlić, H. Zou, J. Zhang, W.X. Li, and G.T. Wang, PhyloSuite: An integrated and scalable desktop platform for streamlined molecular sequence data management and evolutionary phylogenetics studies. Molecular Ecology Resources, 2020. 20(1): p. 348–355. DOI: 10.1111/1755-0998.13096.\n\n" + self.time_used_des)
except BaseException:
self.exceptionInfo = ''.join(
traceback.format_exception(
*sys.exc_info())) # 捕获报错内容,只能在这里捕获,没有报错的地方无法捕获
self.exception_signal.emit(self.exceptionInfo) # 激发这个信号
self.startButtonStatusSig.emit(
[
self.pushButton,
self.progressBar,
"except",
self.dict_args["exportPath"],
self.qss_file,
self])
def releaseDate(self):
text = self.dateEdit.text() #2018/4/3
return text.split("/")
def popupException(self, exception):
rgx = re.compile(r'Permission.+?[\'\"](.+\.(csv|docx|doc|odt|docm|dotx|dotm|dot))[\'\"]')
# rgxDocx = re.compile(r'Permission.+?[\'\"](.+?\.docx)[\'\"]')
if rgx.search(exception):
csvfile = rgx.search(exception).group(1)
reply = QMessageBox.critical(
self,
"Parse Annotation",
"<p style='line-height:25px; height:25px'>Please close '%s' file first!</p>"%os.path.basename(csvfile),
QMessageBox.Yes,
QMessageBox.Cancel)
if reply == QMessageBox.Yes:
os.startfile(csvfile)
# elif rgxDocx.search(exception):
# docxfile = rgxDocx.search(exception).group(1)
# reply = QMessageBox.critical(
# self,
# "Parse Annotation",
# "<p style='line-height:25px; height:25px'>Please close 'docx' file first!</p>",
# QMessageBox.Yes,
# QMessageBox.Cancel)
# if reply == QMessageBox.Yes:
# os.startfile(docxfile)
else:
msg = QMessageBox(self)
msg.setIcon(QMessageBox.Critical)
msg.setText(
'The program encountered an unforeseen problem, please report the bug at <a href="https://github.com/dongzhang0725/PhyloSuite/issues">https://github.com/dongzhang0725/PhyloSuite/issues</a> or send an email with the detailed traceback to dongzhang0725@gmail.com')
msg.setWindowTitle("Error")
msg.setDetailedText(exception)
msg.setStandardButtons(QMessageBox.Ok)
msg.exec_()
def eventFilter(self, obj, event):
name = obj.objectName()
if isinstance(obj, QComboBox):
if name in ["comboBox_4", "comboBox_5"]:
if (event.type() == QEvent.DragEnter):
if event.mimeData().hasUrls():
# must accept the dragEnterEvent or else the dropEvent
# can't occur !!!
event.accept()
return True
if (event.type() == QEvent.Drop):
files = [u.toLocalFile() for u in event.mimeData().urls()]
self.inputDocx(obj, files)
return super(ParseANNT, self).eventFilter(obj, event) # 0
def guiSave(self):
# Save geometry
self.parseANNT_settings.setValue('size', self.size())
# self.parseANNT_settings.setValue('pos', self.pos())
for name, obj in inspect.getmembers(self):
# if type(obj) is QComboBox: # this works similar to isinstance, but
# missed some field... not sure why?
if isinstance(obj, QComboBox):
# save combobox selection to registry
if name in ["comboBox_4", "comboBox_5"]:
values = obj.fetchListsText()
self.parseANNT_settings.setValue(name, values)
else:
text = obj.currentText()
if text:
allItems = [
obj.itemText(i) for i in range(obj.count())]
allItems.remove(text)
sortItems = [text] + allItems
self.parseANNT_settings.setValue(name, sortItems)
if isinstance(obj, QLineEdit):
text = obj.text()
self.parseANNT_settings.setValue(name, text)
if isinstance(obj, QDateEdit):
year, month, day = self.releaseDate()
self.parseANNT_settings.setValue(name, (year, month, day))
def guiRestore(self):
# Restore geometry
size = self.factory.judgeWindowSize(self.parseANNT_settings, 712, 515)
self.resize(size)
self.factory.centerWindow(self)
# self.move(self.parseANNT_settings.value('pos', QPoint(875, 254)))
for name, obj in inspect.getmembers(self):
if isinstance(obj, QComboBox):
if name == "comboBox_4":
if self.inputDocxs:
self.inputDocx(obj, self.inputDocxs)
else:
values = self.parseANNT_settings.value(name, [])
self.inputDocx(obj, values)
elif name == "comboBox_5":
allItems = [obj.itemText(i) for i in range(obj.count())]
values = self.parseANNT_settings.value(name, allItems)
self.inputDocx(obj, values)
else:
allItems = [obj.itemText(i) for i in range(obj.count())]
values = self.parseANNT_settings.value(name, allItems)
model = obj.model()
obj.clear()
for num, i in enumerate(values):
item = QStandardItem(i)
# 背景颜色
if num % 2 == 0:
item.setBackground(QColor(255, 255, 255))
else:
item.setBackground(QColor(237, 243, 254))
model.appendRow(item)
if isinstance(obj, QLineEdit):
value = self.parseANNT_settings.value(
name, "") # get stored value from registry
obj.setText(value) # restore checkbox
if isinstance(obj, QDateEdit):
year, month, day = self.parseANNT_settings.value(
name, ("2018", "4", "3"))
obj.setDate(QDate(int(year), int(month), int(day)))
return False
def runProgress(self, num):
oldValue = self.progressBar.value()
done_int = int(num)
if done_int > oldValue:
self.progressBar.setProperty("value", done_int)
QCoreApplication.processEvents()
def closeEvent(self, event):
self.guiSave()
@pyqtSlot()
def on_pushButton_2_clicked(self):
"""
Cancel
"""
self.close()
def stripName(self, name):
return re.sub(r"\s", "", name)
# def resizeEvent(self, event):
# self.comboBox_4.view().setMaximumWidth(self.comboBox_4.width())
# self.comboBox_5.view().setMaximumWidth(self.comboBox_5.width())
if __name__ == "__main__":
app = QApplication(sys.argv)
PhyloSuite = ParseANNT()
PhyloSuite.show()
sys.exit(app.exec_())
| dongzhang0725/PhyloSuite | PhyloSuite/src/Lg_parseANNT.py | Lg_parseANNT.py | py | 58,110 | python | en | code | 118 | github-code | 13 |
26925052196 | class Solution:
def longestConsecutive(self, nums: List[int]) -> int:
nums_set = set(nums)
ans = 0
for num in nums:
if num - 1 not in nums_set:
tmp = 1
while num + 1 in nums_set:
num += 1
tmp += 1
ans = max(ans, tmp)
return ans
# 执行用时:
# 48 ms
# , 在所有 Python3 提交中击败了
# 53.69%
# 的用户
# 内存消耗:
# 15.4 MB
# , 在所有 Python3 提交中击败了
# 9.20%
# 的用户 | hwngenius/leetcode | learning/Array/128.py | 128.py | py | 543 | python | zh | code | 1 | github-code | 13 |
70179488979 | """ Convert any colour to the ANSI format to write in colours in your terminal.
Note: The conversion to an ANSI escape sequence may induce some colour variations.
Also notice that some colours can't be mixed together as foreground and background.
"""
RESET = "\x1b[0m"
def RGBtoANSI(text: str, foregound=[255, 255, 255], background=[],):
"""Write a text in RGB colour.
Args:
text (str): the text you want to write.
foregound (list, optional): RGB foregound's colour. Defaults to [255, 255, 255].
background (list, optional): RGB background's colour. Defaults to [].
Raises:
ValueError: the foregound colour can't be an empty list.
Returns:
string: the ANSI code for the foreground and the background.
"""
if foregound != []:
if background == []:
return f"\033[38;2;{foregound[0]};{foregound[1]};{foregound[2]}m{str(text)}{RESET}"
else:
return f"\033[38;2;{foregound[0]};{foregound[1]};{foregound[2]}m\033[48;2;{background[0]};{background[1]};{background[2]}m{str(text)}{RESET}"
else:
raise ValueError(
"The foregound can't be an empty list!\nNo paramaters will write the text in write"
)
def HEXtoRGB(fg="#000000"):
"""Convert a hexadecimal colour to its RGB triplet.
Args:
fg (str, optional): Hexadecimal colour value. Defaults to "#000000".
Raises:
ValueError: The colour is not a correct hexadecimal value.
Returns:
list: triplet of RGB values.
"""
foreground = ""
while True:
foreground = fg.lstrip("#").lower()
if len(foreground) == 6:
return list(int(foreground[i: i + 2], base=16) for i in (0, 2, 4))
else:
raise ValueError("Enter a valid hexadecimal value")
return 1
def HEXtoANSI(text, foreground="#ffffff", background=""):
from ansiconverter.converter import HEXtoRGB
foregroundRGB = HEXtoRGB(foreground)
if foreground != "":
if background != "":
backgroundRGB = HEXtoRGB(background)
return RGBtoANSI(text, foregroundRGB, backgroundRGB)
else:
return RGBtoANSI(text, foregroundRGB)
else:
raise ValueError("Please enter at least one foreground colour.")
def RGBtoHEX(rgb=[255, 255, 255]):
"""Convert any RGB colour to hexadecimal.
Args:
rgb (list, optional): the colour you want to convert. Defaults to [255, 255, 255].
Returns:
string: colour's hexadecimal value
"""
if rgb != []:
return f"#{rgb[0]:02x}{rgb[1]:02x}{rgb[2]:02x}"
else:
raise (ValueError(f"The colour can't be an empty list. Please retry."))
| SpcFORK/iASCII | venv/lib/python3.10/site-packages/ansiconverter/converter.py | converter.py | py | 2,718 | python | en | code | 1 | github-code | 13 |
38032949698 | __author__ = "Tulay Cuhadar Donszelmann <tcuhadar@cern.ch>"
__version__ = '0.10.21'
import logging
import os
import sys
from ART.docopt_dispatch import dispatch
from ART import ArtBase, ArtGrid, ArtBuild
from ART.art_misc import get_atlas_env, set_log
MODULE = "art"
#
# First list the double commands
#
@dispatch.on('compare', 'ref')
def compare_ref(path, ref_path, **kwargs):
"""Compare the output of a job."""
set_log(kwargs)
art_directory = os.path.dirname(os.path.realpath(sys.argv[0]))
files = kwargs['file']
entries = kwargs['entries']
mode = kwargs['mode']
exit(ArtBase(art_directory).compare_ref(path, ref_path, files, entries, mode))
@dispatch.on('compare', 'grid')
def compare_grid(package, test_name, **kwargs):
"""Compare the output of a job."""
set_log(kwargs)
art_directory = os.path.dirname(os.path.realpath(sys.argv[0]))
(nightly_release, project, platform, nightly_tag) = get_atlas_env()
days = int(kwargs['days'])
user = kwargs['user']
files = kwargs['file']
entries = kwargs['entries']
mode = kwargs['mode']
exit(ArtGrid(art_directory, nightly_release, project, platform, nightly_tag).compare(package, test_name, days, user, files, entries=entries, mode=mode, shell=True))
@dispatch.on('list', 'grid')
def list(package, **kwargs):
"""List the jobs of a package."""
set_log(kwargs)
art_directory = os.path.dirname(os.path.realpath(sys.argv[0]))
(nightly_release, project, platform, nightly_tag) = get_atlas_env()
job_type = 'grid'
index_type = kwargs['test_type']
json_format = kwargs['json']
user = kwargs['user']
exit(ArtGrid(art_directory, nightly_release, project, platform, nightly_tag).list(package, job_type, index_type, json_format, user))
@dispatch.on('log', 'grid')
def log(package, test_name, **kwargs):
"""Show the log of a job."""
set_log(kwargs)
art_directory = os.path.dirname(os.path.realpath(sys.argv[0]))
(nightly_release, project, platform, nightly_tag) = get_atlas_env()
user = kwargs['user']
exit(ArtGrid(art_directory, nightly_release, project, platform, nightly_tag).log(package, test_name, user))
@dispatch.on('output', 'grid')
def output(package, test_name, **kwargs):
"""Get the output of a job."""
set_log(kwargs)
art_directory = os.path.dirname(os.path.realpath(sys.argv[0]))
(nightly_release, project, platform, nightly_tag) = get_atlas_env()
user = kwargs['user']
exit(ArtGrid(art_directory, nightly_release, project, platform, nightly_tag).output(package, test_name, user))
@dispatch.on('submit')
def submit(sequence_tag, **kwargs):
"""Submit nightly jobs to the grid, NOT for users."""
set_log(kwargs)
art_directory = os.path.dirname(os.path.realpath(sys.argv[0]))
(nightly_release, project, platform, nightly_tag) = get_atlas_env()
job_type = 'grid' if kwargs['type'] is None else kwargs['type']
user = os.getenv('USER', 'artprod')
inform_panda = user == 'artprod'
package = kwargs['package']
config = kwargs['config']
no_action = kwargs['no_action']
wait_and_copy = True
exit(ArtGrid(art_directory, nightly_release, project, platform, nightly_tag, max_jobs=int(kwargs['max_jobs'])).task_list(job_type, sequence_tag, inform_panda, package, no_action, wait_and_copy, config))
@dispatch.on('grid')
def grid(script_directory, sequence_tag, **kwargs):
"""Run jobs from a package on the grid, needs release and grid setup."""
set_log(kwargs)
art_directory = os.path.dirname(os.path.realpath(sys.argv[0]))
(nightly_release, project, platform, nightly_tag) = get_atlas_env()
job_type = 'grid' if kwargs['type'] is None else kwargs['type']
inform_panda = False
package = None
config = None
no_action = kwargs['no_action']
wait_and_copy = False
exit(ArtGrid(art_directory, nightly_release, project, platform, nightly_tag, script_directory=script_directory, skip_setup=True, max_jobs=int(kwargs['max_jobs'])).task_list(job_type, sequence_tag, inform_panda, package, no_action, wait_and_copy, config))
@dispatch.on('run')
def run(script_directory, sequence_tag, **kwargs):
"""Run jobs from a package in a local build, needs release and grid setup."""
set_log(kwargs)
art_directory = os.path.dirname(os.path.realpath(sys.argv[0]))
(nightly_release, project, platform, nightly_tag) = get_atlas_env()
job_type = 'build' if kwargs['type'] is None else kwargs['type']
exit(ArtBuild(art_directory, nightly_release, project, platform, nightly_tag, script_directory, max_jobs=int(kwargs['max_jobs']), ci=kwargs['ci']).task_list(job_type, sequence_tag))
@dispatch.on('copy')
def copy(indexed_package, **kwargs):
"""Copy outputs to eos area."""
set_log(kwargs)
art_directory = os.path.dirname(os.path.realpath(sys.argv[0]))
(nightly_release, project, platform, nightly_tag) = get_atlas_env()
# NOTE: default depends on USER, not set it here but in ArtGrid.copy
dst = kwargs['dst']
user = kwargs['user']
no_unpack = kwargs['no_unpack']
tmp = kwargs['tmp']
seq = int(kwargs['seq'])
keep_tmp = kwargs['keep_tmp']
exit(ArtGrid(art_directory, nightly_release, project, platform, nightly_tag).copy(indexed_package, dst=dst, user=user, no_unpack=no_unpack, tmp=tmp, seq=seq, keep_tmp=keep_tmp))
@dispatch.on('validate')
def validate(script_directory, **kwargs):
"""Check headers in tests."""
set_log(kwargs)
art_directory = os.path.dirname(os.path.realpath(sys.argv[0]))
exit(ArtBase(art_directory).validate(script_directory))
@dispatch.on('included')
def included(script_directory, **kwargs):
"""Show list of files which will be included for art submit/art grid."""
set_log(kwargs)
art_directory = os.path.dirname(os.path.realpath(sys.argv[0]))
(nightly_release, project, platform, nightly_tag) = get_atlas_env()
job_type = kwargs['type'] # None will list all types
index_type = kwargs['test_type']
exit(ArtBase(art_directory).included(script_directory, job_type, index_type, nightly_release, project, platform))
@dispatch.on('config')
def config(package, **kwargs):
"""Show configuration."""
set_log(kwargs)
art_directory = os.path.dirname(os.path.realpath(sys.argv[0]))
(nightly_release, project, platform, nightly_tag) = get_atlas_env()
config = kwargs['config']
exit(ArtBase(art_directory).config(package, nightly_release, project, platform, config))
@dispatch.on('createpoolfile')
def createpoolfile(package, **kwargs):
"""Show configuration."""
set_log(kwargs)
art_directory = os.path.dirname(os.path.realpath(sys.argv[0]))
(nightly_release, project, platform, nightly_tag) = get_atlas_env()
exit(ArtGrid(art_directory, nightly_release, project, platform, nightly_tag).createpoolfile())
if __name__ == '__main__':
if sys.version_info < (2, 7, 0):
sys.stderr.write("You need python 2.7 or later to run this script\n")
exit(1)
logging.basicConfig()
dispatch(__doc__, version=os.path.splitext(os.path.basename(__file__))[0] + ' ' + __version__)
| rushioda/PIXELVALID_athena | athena/Tools/ART/scripts/art.py | art.py | py | 7,142 | python | en | code | 1 | github-code | 13 |
35967395544 | class RandomAgent:
def __init__(self, env):
self.env = env
self.actions_cnt = env.action_space.n
self._max_iter = 2
self._gamma = 0.99
self._final_reward_weight = 1.0
def predict_action(self, state):
"""
Return action that should be done from input state according to current policy.
Args:
state: list of points - results of raycasting
return:
action: int
"""
# some cool RL staff
return self.env.action_space.sample()
def evaluate(self):
"""
Generate CAD model, reconstruct it and count the reward according
to MSE between original and reconstructed models and number of steps.
Args:
environment: Environment
max_iter: int - max number of iterations to stop (~15)
gamma: float - discounted factor
w: float - weight of mse to final episode reward
return:
episode_reward: float
"""
state = self.env.reset()
episode_reward = 0.0
states, actions = [], []
for t in range(self._max_iter):
action = self.predict_action(state)
actions.append(action)
state, reward, done, info = self.env.step(action)
print("STEP: ", t, "REWARD: ", reward)
states.append(state)
self.env.render(action, state)
episode_reward += reward * self._gamma ** t
if done:
break
final_reward = self.env.final_reward()
print("Hausdorff reward: ", final_reward)
episode_reward += self._final_reward_weight / final_reward # QUESTION
return episode_reward, states, actions
| PotapovaSofia/NextBestViewRL | rl/random_agent.py | random_agent.py | py | 1,788 | python | en | code | 1 | github-code | 13 |
9713477057 | def checkio(game_result):
# create entries for columns (down results)
columns = ["".join(col) for col in list(zip(*game_result))]
# create angle entries from top left to bot right
# and bot left to top right
top_left = ""
bot_left = ""
for n in range(0, 3):
top_left += game_result[n][n]
bot_left += game_result[2-n][n]
# add all entries to original results
game_result += columns
game_result += [top_left, bot_left]
# test results
if "XXX" in game_result:
return "X"
if "OOO" in game_result:
return "O"
return "D"
#These "asserts" using only for self-checking and not necessary for auto-testing
if __name__ == '__main__':
assert checkio([
u"X.O",
u"XX.",
u"XOO"]) == "X", "Xs wins"
assert checkio([
u"OO.",
u"XOX",
u"XOX"]) == "O", "Os wins"
assert checkio([
u"OOX",
u"XXO",
u"OXX"]) == "D", "Draw"
assert checkio([
u"O.X",
u"XX.",
u"XOO"]) == "X", "Xs wins again"
| stroke-one/CheckiO_Solutions | home/x-o-referee.py | x-o-referee.py | py | 1,076 | python | en | code | 0 | github-code | 13 |
40787832502 | terrain = [[int(c) for c in line.strip()] for line in open('Day 09.input')]
for t in terrain:
t.insert(0, 9)
t.append(9)
terrain.insert(0, [9] * len(terrain[0]))
terrain.append([9] * len(terrain[0]))
total = 0
for x in range(1, len(terrain)-1):
for y in range(1, len(terrain[0])-1):
tile = terrain[x][y]
neighbours = [(-1, 0), (0, -1), (1, 0), (0, 1)]
low = True
for dx, dy in neighbours:
if terrain[x+dx][y+dy] <= tile:
low = False
if low:
total += terrain[x][y] + 1
print(total)
| Mraedis/AoC2021 | Day 09/Day 09.1.py | Day 09.1.py | py | 576 | python | en | code | 1 | github-code | 13 |
28188913635 | """
Longest Substring Without Repeating Characters
Given a string, find the length of the longest substring without repeating characters.
Example 1:
Input: "abcabcbb"
Output: 3
Explanation: The answer is "abc", with the length of 3.
Example 2:
Input: "bbbbb"
Output: 1
Explanation: The answer is "b", with the length of 1.
Example 3:
Input: "pwwkew"
Output: 3
Explanation: The answer is "wke", with the length of 3.
Note that the answer must be a substring, "pwke" is a subsequence and not a substring.
"""
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
collection = []
temp = []
result = ""
for char in s:
if char not in temp:
temp.append(char)
else:
if len(temp) > len(collection):
collection = temp
temp = temp[temp.index(char)+1:]
temp.append(char)
if len(temp) > 0 and len(temp) > len(collection):
collection = temp
return len(result.join(collection))
| brownhash/leetcode | longest_substring.py | longest_substring.py | py | 1,071 | python | en | code | 0 | github-code | 13 |
21307810332 | import turtle
import random
t = turtle.Turtle()
t.speed(0)
p1c = turtle.Turtle() #player1 cash display
p2c = turtle.Turtle() #player2 cash display
r = turtle.Turtle() #rent and winning message display
def screen(): # SCREEN COLOUR, SIZE
sc = turtle.Screen()
sc.bgcolor('dark sea green')
sc.setup(width=600,height=600)
def square(x, y, length): #BORDERS
t.penup()
t.goto(x, y)
t.pendown()
for i in range(4):
t.forward(length)
t.right(90)
t.hideturtle()
def h_lines(x,y): #VERTICAL LINES ON HORIZONTAL PROPERTIES
t.penup()
t.goto(x,y)
for i in range(9):
t.pendown()
t.right(90)
t.forward(50)
t.left(90)
t.forward(50)
t.left(90)
t.forward(50)
t.right(90)
def v_lines(x,y): #HORIZONTAL LINES ON VERTICAL PROPERTIES
t.penup()
t.goto(x,y)
for i in range(9):
t.pendown()
t.forward(50)
t.right(90)
t.forward(50)
t.right(90)
t.forward(50)
t.right(180)
def htile(x,y,colour,name): #HORIZONTAL PROPERTIES
t.penup()
t.goto(x,y)
t.pendown()
t.fillcolor(colour)
t.begin_fill()
for i in range(2):
t.forward(50)
t.right(90)
t.forward(15)
t.right(90)
t.end_fill()
t.speed(0)
t.color('black')
t.penup()
if y<0:
t.goto(x+25,y-30)
else:
t.goto(x+25,y+10)
t.write(name, align='center', font=('Courier', 8, 'bold'))
t.hideturtle()
def vtile(x,y,colour,name): #VERTICAL PROPERTIES
t.penup()
t.goto(x,y)
t.pendown()
t.fillcolor(colour)
t.begin_fill()
for i in range(2):
t.forward(15)
t.right(90)
t.forward(50)
t.right(90)
t.end_fill()
t.speed(0)
t.color('black')
t.penup()
if x<0:
t.goto(x-10,y-25)
else:
t.goto(x+30,y-25)
t.write(name, align='center', font=('Courier', 8, 'bold'))
t.hideturtle()
def centre(): #WORDS IN CENTRE
t.penup()
t.goto(0,0)
t.write("MONOPOLY", align='center', font=('Courier', 35, 'bold'))
t.hideturtle()
def roll_dice():
return (random.randint(2,12))
def p1_cash_display(c):
p1c.hideturtle()
p1c.penup()
p1c.goto(-100,-75)
to_be_printed = "P1 Cash=" + str(c)
p1c.write(to_be_printed, align='center', font=('Courier', 15, 'bold'))
def p2_cash_display(c):
p2c.hideturtle()
p2c.penup()
p2c.goto(100,-75)
to_be_printed = "P2 Cash=" + str(c)
p2c.write(to_be_printed, align='center', font=('Courier', 15, 'bold'))
def rent_display(p,rent_cost):
r.hideturtle()
r.penup()
r.goto(0,175)
to_be_printed = "P{} owns this,${} charged".format(p,rent_cost)
r.write(to_be_printed, align='center', font=('Courier', 15, 'bold'))
class Tile:
def __init__(self, price, rent, ownership, design):
self.p = price
self.r = rent
self.o = ownership
self.d = design
#SCREEN, GRID
t.hideturtle()
centre()
screen()
square(-225,225,450)
square(-275,275,550)
v_lines(-275,225)
v_lines(225,225)
h_lines(-225,275)
h_lines(-225,-225)
t.hideturtle()
#bottom right corner tile
t0 = Tile(00,0,3,htile(225,-225,"dark magenta","GO"))
#bottom row
t1 = Tile(60,6,0,htile(175,-225,"brown","BR1"))
t2 = Tile(00,-100,3,htile(125,-225,"dark magenta","$100"))
t3 = Tile(60,6,0,htile(75,-225,"brown", "BR2"))
t4 = Tile(00,200,3,htile(25,-225,"light slate gray", "-$200"))
t5 = Tile(200,20,0,htile(-25,-225,"black", "T1"))
t6 = Tile(100,10,0,htile(-75,-225,"medium turquoise","LB1"))
t7 = Tile(0,100,3,htile(-125,-225,"light slate gray","-$100"))
t8 = Tile(100,10,0,htile(-175,-225,"medium turquoise","LB2"))
t9 = Tile(120,12,0,htile(-225,-225,"medium turquoise","LB3"))
#bottom left corner tile
t10 = Tile(0,0,3,htile(-275,-225,"light slate gray","JAIL"))
#left column
t11 = Tile(140,14,0,vtile(-240,-175,"deep pink","P1"))
t12 = Tile(150,15,0,vtile(-240,-125,"white","U1"))
t13 = Tile(140,14,0,vtile(-240,-75,"deep pink","P2"))
t14 = Tile(160,16,0,vtile(-240,-25,"deep pink","P3"))
t15 = Tile(200,20,0,vtile(-240,25,"black","T2"))
t16 = Tile(180,18,0,vtile(-240,75,"orange","O1"))
t17 = Tile(00,-50,3,vtile(-240,125,"dark magenta","$50"))
t18 = Tile(180,18,0,vtile(-240,175,"orange","O2"))
t19 = Tile(200,20,0,vtile(-240,225,"orange","O3"))
#top left corner tile
t20 = Tile(0,-200,3,htile(-275,240,"dark magenta","$200"))
#top row
t21 = Tile(220,22,0,htile(-225,240,"red","R1"))
t22 = Tile(00,-50,3,htile(-175,240,"dark magenta","$50"))
t23 = Tile(220,22,0,htile(-125,240,"red","R2"))
t24 = Tile(240,24,0,htile(-75,240,"red","R3"))
t25 = Tile(200,20,0,htile(-25,240,"black","T3"))
t26 = Tile(260,26,0,htile(25,240,"yellow","Y1"))
t27 = Tile(260,26,0,htile(75,240,"yellow","Y2"))
t28 = Tile(150,15,0,htile(125,240,"white","U2"))
t29 = Tile(260,26,0,htile(175,240,"yellow","Y3"))
#top right corner tile
t30 = Tile(0,0,3,htile(225,240,"light slate gray","GOJAIL"))
#right column
t31 = Tile(300,30,0,vtile(225,225,"green","G1"))
t32 = Tile(300,30,0,vtile(225,175,"green","G2"))
t33 = Tile(00,-150,3,vtile(225,125,"dark magenta","$150"))
t34 = Tile(320,30,0,vtile(225,75,"green","G3"))
t35 = Tile(200,20,0,vtile(225,25,"black","T4"))
t36 = Tile(0,100,3,vtile(225,-25,"light slate gray","-$100"))
t37 = Tile(350,35,0,vtile(225,-75,"blue","B1"))
t38 = Tile(00,-50,3,vtile(225,-125,"dark magenta","$50"))
t39 = Tile(400,40,0,vtile(225,-175,"blue","B2"))
dd= {
0:t0,#bottom right corner tile
1:t1,2:t2,3:t3,4:t4,5:t5,6:t6,7:t7,8:t8,9:t9, #bottom row
10:t10, #bottom left corner tile
11:t11,12:t12,13:t13,14:t14,15:t15,16:t16,17:t17,18:t18,19:t19, #left column
20:t20, #top left corner tile
21:t21,22:t22,23:t23,24:t24,25:t25,26:t26,27:t27,28:t28,29:t29, #top row
30:t30, #top right corner tile
31:t31,32:t32,33:t33,34:t34,35:t35,36:t36,37:t37,38:t38,39:t39, #right column
40:t0 #bottom right corner tile
}
#Player Mechanics, info
p1 = turtle.Turtle()
p1.penup()
p1.goto(250,-250)
p1.right(90)
p1.color("blue")
cp1 = 0 #current position
p1_cash = 1000
p2 = turtle.Turtle()
p2.penup()
p2.goto(250,-250)
p2.right(90)
p2.color("red")
cp2 = 0 #current position
p2_cash = 1000
#MAIN LOOP, MOVEMENT, GAMEPLAY
while p1_cash >=0 and p2_cash>= 0:
#Player 1's Turn
wn = turtle.Screen()
answer = wn.textinput("P1 Next Roll", "Press OK to roll the dice, Cancel to quit:")
r.clear()
if answer is None:
break
#Movement
for i in range(roll_dice()):
if cp1 == 0 or cp1 == 10 or cp1 == 20 or cp1 == 30 or cp1 == 40:
if cp1 == 40:
cp1 = 0
p1_cash+=100
p1.right(90)
p1.forward(50)
cp1 += 1
else:
p1.forward(50)
cp1 += 1
#Option to purchase if unowned
if dd[cp1].o == 0:
y_n = wn.textinput("P1 Purchase?","Type Y to purchase for ${}".format(dd[cp1].p))
#if player says yes
if y_n == "Y" or y_n == "y":
r.clear()
dd[cp1].o = 1
p1_cash -= dd[cp1].p
#if property is already owned by other player 2, rent is charged
elif dd[cp1].o == 2:
rent_display(2,dd[cp1].r)
p1_cash -= dd[cp1].r
p2_cash += dd[cp1].r
#Go to Jail function
elif cp1 == 30:
p1_cash -= 50
p1.goto(-250,-250)
p1.right(180)
cp1 = 10
#If tile cannot be owned as it is not a property, money is transferred to/ from player
else:
p1_cash -= dd[cp1].r
#CASH DISPLAY
p1c.clear()
p1_cash_display(p1_cash)
p2c.clear()
p2_cash_display(p2_cash)
#End loop immediately if cash is negative
if p1_cash <0 or p2_cash<0:
break
#Player 2's Turn
wn = turtle.Screen()
answer = wn.textinput("P2 Next Roll", "Press OK to roll the dice, Cancel to quit:")
r.clear()
if answer is None:
break
#Movement
for i in range(roll_dice()):
if cp2 == 0 or cp2 == 10 or cp2==20 or cp2==30 or cp2 ==40:
if cp2 == 40:
cp2 = 0
p2_cash+=100
p2.right(90)
p2.forward(50)
cp2 += 1
else:
p2.forward(50)
cp2 += 1
#Option to purchase if unowned
if dd[cp2].o == 0:
y_n = wn.textinput("P2 Purchase?","Type Y to purchase for ${}".format(dd[cp2].p))
#if player says yes
if y_n == "Y" or y_n == "y":
dd[cp2].o = 2
p2_cash -= dd[cp2].p
#if property is already owned by other player 1, rent is charged
elif dd[cp2].o == 1:
r.clear()
rent_display(1,dd[cp2].r)
p2_cash -= dd[cp2].r
p1_cash += dd[cp2].r
#Go to Jail function
elif cp2 == 30:
p2_cash -= 50
p2.goto(-250,-250)
p2.right(180)
cp2 = 10
#If tile cannot be owned as it is not a property, money is transferred to/from player
else:
p2_cash -= dd[cp2].r
#CASH DISPLAY
p1c.clear()
p1_cash_display(p1_cash)
p2c.clear()
p2_cash_display(p2_cash)
#End loop immediately if cash is negative
if p1_cash <0 or p2_cash<0:
break
#WINNER DECIDER
r.hideturtle()
r.penup()
r.goto(0,175)
#if p2 wins
if p1_cash <0:
winner = "P2 Wins !!!"
r.write(winner, align='center', font=('Courier', 15, 'bold'))
#if p1 wins
elif p2_cash<0:
winner = "P1 Wins !!!"
r.write(winner, align='center', font=('Courier', 15, 'bold'))
#if game is ended prematurely
else:
winner = "No winner found!"
r.write(winner, align='center', font=('Courier', 15, 'bold'))
turtle.done()
| imk8/Monopoly | Monopoly V4.py | Monopoly V4.py | py | 9,754 | python | en | code | 0 | github-code | 13 |
10546779873 | import os
import shlex
import subprocess
def executeOnBenchmarks(fptaylorpath, folder_path, results_folder):
if os.path.exists(folder_path+results_folder):
print("WARNING!!! FPTaylor results already computed!")
return
else:
os.makedirs(folder_path+results_folder)
for file in os.listdir(folder_path):
if file.endswith(".txt"):
print("FpTaylor on: " + str(file))
exe_line=fptaylorpath+" --rel-error true "+folder_path+file
exe = shlex.split(exe_line)
trace = open(folder_path+results_folder+"/"+file, "w+")
pUNI = subprocess.Popen(exe, shell=False, stdout=trace,stderr=trace)
pUNI.wait()
print("Done")
def getAbsoluteError(folder_path):
my_dict={}
for file in os.listdir(folder_path):
if file.endswith(".txt"):
f = open(folder_path + "/" + file, "r")
text = f.readlines()
file_name = file.split(".")[0]
value=None
for line in text:
if "Absolute error (exact):" in line:
value=str(line.split(":")[1])
value=value.split("(")[0]
value=value.strip()
break
my_dict[file_name.lower()]=value
my_dict[file_name.lower()+"_gaussian"] = value
my_dict[file_name.lower()+"_exp"] = value
f.close()
return my_dict
print("Done")
def getRelativeError(folder_path):
my_dict={}
for file in os.listdir(folder_path):
if file.endswith(".txt"):
f = open(folder_path + "/"+ file, "r")
text = f.readlines()
file_name = file.split(".")[0]
value=None
for line in text:
if "Relative error (exact):" in line:
value=str(line.split(":")[1])
break
my_dict[file_name.lower()]=value
my_dict[file_name.lower()+"_gaussian"] = value
my_dict[file_name.lower()+"_exp"] = value
f.close()
return my_dict
print("Done")
def getBounds(folder_path):
my_dict={}
for file in os.listdir(folder_path):
if file.endswith(".txt"):
f = open(folder_path + "/" + file, "r")
text = f.readlines()
file_name = file.split(".")[0]
value=None
for line in text:
if "Bounds (floating-point):" in line:
value=str(line.split(":")[1])
my_dict[file_name.lower()]=value
my_dict[file_name.lower()+"_gaussian"] = value
my_dict[file_name.lower()+"_exp"] = value
f.close()
return my_dict
print("Done")
def getFPTaylorResults(fptaylor_command, benchmarks_path):
results_folder="results"
executeOnBenchmarks(fptaylor_command, benchmarks_path, results_folder)
abs_my_dict = getAbsoluteError(benchmarks_path+results_folder)
rel_my_dict = getRelativeError(benchmarks_path+results_folder)
range_my_dict = getBounds(benchmarks_path+results_folder)
if not len(abs_my_dict) == len(rel_my_dict) and not len(range_my_dict) == len(rel_my_dict):
print("WARNING!!! Mismatch in dictionaries in FPTaylor")
return range_my_dict, abs_my_dict, rel_my_dict | soarlab/paf | src/FPTaylor.py | FPTaylor.py | py | 3,308 | python | en | code | 0 | github-code | 13 |
37376083979 | import os
import ee
import json
import requests
import numpy as np
import pandas as pd
import datetime as dt
service_account = "fire-water-chart@appspot.gserviceaccount.com"
credentials = ee.ServiceAccountCredentials(service_account, "privatekey.json")
ee.Initialize(credentials)
def serializer(df_pre, df_fire):
return json.loads(
pd.merge(df_fire, df_pre, how="left", on="date").to_json(orient="records")
)
def get_geometry(iso, adm1=None):
"""
Return a geometry as a GeoJSON from geostore.
Parameters
----------
iso : str
Country iso code.
adm1 : int or bool, optional
Admin 1 code. Must be non-negative.
Returns
-------
geometry : GeoJSON
GeoJSON object describing the geometry.
Examples
--------
>>> get_geometry('BRA', 22)
{'crs': {},
'features': [{'geometry': {'coordinates': [[[[-62.8922, -12.8601],
[-62.8921, -12.8588],
[-62.9457, -12.8571],
...]]],
'type': 'MultiPolygon'},
'properties': None,
'type': 'Feature'}],
'type': 'FeatureCollection'}
"""
if adm1:
if adm1 < 0:
raise ValueError("Code number %s must be non-negative." % adm1)
url = f"https://api.resourcewatch.org/v2/geostore/admin/{iso}/{adm1}"
else:
url = f"https://api.resourcewatch.org/v2/geostore/admin/{iso}"
r = requests.get(url)
geometry = r.json().get("data").get("attributes").get("geojson")
return geometry
def get_dates(date_text=None):
"""
Return relevant dates
Parameters
----------
date_text : str, optional
String with the last date. Format should be YYYY-MM-DD.
Returns
-------
dates : DatetimeIndex
List of dates.
start_date : Timestamp
First date for moving window computation.
end_date : Timestamp
Last date for moving window computation.
start_year_date : Timestamp
First day of the 1 year range.
"""
if date_text:
try:
dt.datetime.strptime(date_text, "%Y-%m-%d")
except ValueError:
raise ValueError("Incorrect data format, should be YYYY-MM-DD")
date = pd.to_datetime(date_text)
else:
date = pd.to_datetime("today").normalize()
nDays_year = len(
pd.date_range(
date.replace(month=1, day=1), date.replace(month=12, day=31), freq="D"
)
)
start_year_date = date - dt.timedelta(days=nDays_year)
start_date = date - dt.timedelta(days=nDays_year + 61)
end_date = date + dt.timedelta(days=61)
dates = pd.date_range(start_date, end_date, freq="D").astype(str)
return dates, start_date, date, end_date, start_year_date
def nestedMappedReducer(featureCol, imageCol):
"""
Computes mean values for each geometry in a FeatureCollection and each image in an ImageCollection.
To prevent "Computed value is too large" error we will map reduceRegion() over the FeatureCollection instead of using reduceRegions().
Parameters
----------
featureCol : ee.FeatureCollection
FeatureCollection with the geometries that we want to intersect with.
imageCol : ee.ImageCollection
ImageCollection with a time series of images.
Returns
-------
featureCol : ee.FeatureCollection
FeatureCollection with the mean values for each geometry and date.
"""
def mapReducerOverImgCol(feature):
def imgReducer(image):
return (
ee.Feature(
feature.geometry().centroid(100),
image.reduceRegion(
geometry=feature.geometry(),
reducer=ee.Reducer.mean(),
tileScale=10,
maxPixels=1e13,
bestEffort=True,
),
)
.set({"date": image.date().format("YYYY-MM-dd")})
.copyProperties(feature)
)
return imageCol.map(imgReducer)
return featureCol.map(mapReducerOverImgCol).flatten()
def fire_water_chart(request):
# Set CORS headers for the preflight request
if request.method == "OPTIONS":
# Allows GET requests from any origin with the Content-Type
# header and caches preflight response for an 3600s
headers = {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Methods": "POST",
"Access-Control-Allow-Headers": "Content-Type",
"Access-Control-Max-Age": "3600",
}
return ("", 204, headers)
# Set CORS headers for the main request
headers = {"Access-Control-Allow-Origin": "*"}
request_json = request.get_json()
# Get geometry as GeoJSON from geostore
geometry = get_geometry(request_json["iso"], request_json["adm1"])
# Convert geometry to ee.Geometry
aoi = ee.Geometry(geometry.get("features")[0].get("geometry"))
# Get relevant dates
dates, start_date, date, end_date, start_year_date = get_dates(
request_json["date_text"]
)
# Read ImageCollection
dataset = (
ee.ImageCollection("UCSB-CHG/CHIRPS/DAILY")
.filter(
ee.Filter.date(
start_date.strftime("%Y-%m-%d"),
(end_date + dt.timedelta(days=1)).strftime("%Y-%m-%d"),
)
)
.filterBounds(aoi)
)
chirps = dataset.select("precipitation")
# Get mean precipitation values over time
count = chirps.size()
data = (
nestedMappedReducer(ee.FeatureCollection(geometry.get("features")), chirps)
.toList(count)
.getInfo()
)
df_pre = pd.DataFrame(map(lambda x: x.get("properties"), data))
# VIIRS fire alerts
confidence = "h" #'n', 'l'
if request_json["adm1"]:
query = f"SELECT alert__date, SUM(alert__count) AS alert__count \
FROM data WHERE iso = '{request_json['iso']}' AND adm1::integer = {request_json['adm1']} AND confidence__cat = '{confidence}' AND alert__date >= '{start_date}' AND alert__date <= '{end_date}' \
GROUP BY iso, adm1, alert__date, confidence__cat \
ORDER BY alert__date"
else:
query = f"SELECT alert__date, SUM(alert__count) AS alert__count \
FROM data WHERE iso = '{request_json['iso']}' AND confidence__cat = '{confidence}' AND alert__date >= '{start_date}' AND alert__date <= '{end_date}' \
GROUP BY iso, alert__date, confidence__cat \
ORDER BY alert__date"
url = f"https://data-api.globalforestwatch.org/dataset/gadm__viirs__adm2_daily_alerts/latest/query/json"
sql = {"sql": query}
r = requests.get(url, params=sql)
data = r.json().get("data")
if data:
df_fire = pd.DataFrame.from_dict(pd.json_normalize(data))
# Fill missing dates with 0
df_fire = (
df_fire.set_index("alert__date")
.reindex(dates, fill_value=0)
.reset_index()
.rename(columns={"index": "alert__date"})
)
df_fire.rename(
columns={"alert__date": "date", "alert__count": "fire"}, inplace=True
)
else:
df_fire = pd.DataFrame({"date": dates, "fire": 0})
# Moving averages
# 1 week moving average
df_pre["precipitation_w"] = (
df_pre[["date", "precipitation"]].rolling(window=7, center=True).mean()
)
df_fire["fire_w"] = df_fire[["date", "fire"]].rolling(window=7, center=True).mean()
# 2 month moving average
df_pre["precipitation_2m"] = (
df_pre[["date", "precipitation"]].rolling(window=61, center=True).mean()
)
df_fire["fire_2m"] = (
df_fire[["date", "fire"]].rolling(window=61, center=True).mean()
)
# take current year days
df_pre = df_pre[
(df_pre["date"] >= start_year_date.strftime("%Y-%m-%d"))
& (df_pre["date"] <= date.strftime("%Y-%m-%d"))
]
df_fire = df_fire[
(df_fire["date"] >= start_year_date.strftime("%Y-%m-%d"))
& (df_fire["date"] <= date.strftime("%Y-%m-%d"))
]
return (json.dumps(serializer(df_pre, df_fire)), 200, headers)
| Vizzuality/mongabay-data | cloud_functions/fire_tool/main.py | main.py | py | 8,299 | python | en | code | 0 | github-code | 13 |
17159219517 | import matplotlib.pyplot as plt
import numpy as np
q0=-5; qf = 80; tf = 4
min_acc = 4*abs(qf-q0)/tf**2
print("acc needs to be bigger than ", min_acc)
mode_aorv = 0
if mode_aorv == 0:
acc = 30
tb = tf/2 -np.sqrt( acc**2 * tf**2 -4*acc*np.abs(qf-q0)) /2/acc
if (qf-q0)>=0:
vel = acc * tb
else:
acc = acc * -1
vel = acc * tb
else:
vel = 25
tb = (vel*tf - abs(qf-q0))/vel
if (qf-q0)>=0:
acc = vel / tb
else:
vel = vel * -1
acc = vel/tb
# Acceleration Region # 1st Region
t1 = np.arange(0, tb, step=0.01)
q1 = q0 + vel/(2*tb) * (t1**2)
dq1 = acc * t1
ddq1 = acc*np.ones(np.size(t1))
# Constant Velocity Region # 2nd Region
t2 = np.arange(tb, tf-tb, step=0.01)
q2 = (qf+q0-vel*tf)/2 + vel* t2
dq2 = vel*np.ones(np.size(t2))
ddq2 = 0*np.ones(np.size(t2))
# Decceleration Region # 3rd Region
t3 = np.arange(tf-tb, tf+0.01, step=0.01)
q3 = qf - acc/2 * tf**2 + acc*tf*t3 - acc/2*t3**2
dq3 = acc * tf - acc*t3
ddq3 = -acc * np.ones(np.size(t3))
# Total Region
t = np.concatenate((t1,t2,t3))
q = np.concatenate((q1,q2,q3))
dq = np.concatenate((dq1,dq2,dq3))
ddq = np.concatenate((ddq1,ddq2,ddq3))
# Plotting Graph
fig, (ax1, ax2, ax3) = plt.subplots(3)
ax1.set(xlabel = "time is second", ylabel = "joint pose in deg")
ax1.plot(t, q)
ax2.set(xlabel = "time is second", ylabel = "joint vel in deg/s")
ax2.plot(t, dq)
ax3.set(xlabel = "time is second", ylabel = "joint acc in deg/sec^2")
ax3.plot(t, ddq)
plt.show()
| Phayuth/robotics_manipulator | trajectory_planner/traj_plan_linear_parabolic.py | traj_plan_linear_parabolic.py | py | 1,499 | python | en | code | 0 | github-code | 13 |
9087156470 | #https://www.acmicpc.net/problem/14699
#백준 14699번 관악산 등산(위상정렬)
#import sys
#input = sys.stdin.readline
from collections import deque
n,m = map(int, input().split())
heights = list(map(int, input().split()))
indegree = [0]*n
graph = [[] for _ in range(n)]
for _ in range(m):
a, b = map(int, input().split())
if heights[a-1] > heights[b-1]:
indegree[b-1]+=1
else:
indegree[a-1]+=1
graph[a-1].append(b-1)
graph[b-1].append(a-1)
result = [1]*n
q = deque()
for i in range(n):
if indegree[i] == 0:
q.append(i)
print(indegree)
while q:
now = q.popleft()
for i in graph[now]:
indegree[i]-=1
if indegree[i] == 0:
result[i]+=result[now]
q.append(i)
for i in result:
print(i)
'''
높이 순서대로 위상 정렬을 실시해야 시간 초과가 발생X
''' | MinsangKong/DailyProblem | 06-08/2-2.py | 2-2.py | py | 909 | python | ko | code | 0 | github-code | 13 |
26073185684 | from conexion import ConexionPG
from atributos_conexion import ATRIBUTOS
class Modelo:
_conexion = None
@classmethod
def inicializar_conexion(cls):
if cls._conexion is None:
cls._conexion = ConexionPG(
**ATRIBUTOS
)
class Editorial(Modelo):
def __init__(self, nombre, editorial_id=None):
self.nombre = nombre
self.editorial_id = editorial_id
def guardar(self):
self.__class__._conexion._ejecutar_sql(
"INSERT INTO Editorial (nombre) VALUES (%s)",
(self.nombre,)
)
self.__class__._conexion._ejecutar_sql(
"SELECT editorial_id FROM Editorial WHERE nombre=%s ORDER BY editorial_id DESC LIMIT 1",
(self.nombre, )
)
nueva_editorial = self.__class__._conexion._leer_desde_sql()
self.editorial_id = nueva_editorial[0][0]
def actualizar(self):
self.__class__._conexion._ejecutar_sql(
"UPDATE Editorial SET nombre = %s WHERE editorial_id = %s",
(self.nombre, self.editorial_id)
)
@classmethod
def buscar_por_nombre(cls, nombre):
cls._conexion._ejecutar_sql(
"SELECT nombre, editorial_id FROM Editorial WHERE nombre=%s ORDER BY editorial_id DESC LIMIT 1",
(nombre, )
)
editorial_buscada = cls._conexion._leer_desde_sql()
return Editorial(
editorial_buscada[0][0],
editorial_buscada[0][1]
)
class Libro(Modelo):
def __init__(
self, titulo, autor, esta_disponible,
editorial_id=None, libro_id=None
):
self.titulo = titulo
self.autor = autor
self.esta_disponible = esta_disponible
self.editorial_id = editorial_id
self.libro_id = libro_id
def guardar(self):
self.__class__._conexion._ejecutar_sql(
"INSERT INTO Libro (titulo, autor, esta_disponible, editorial_id) VALUES"
"(%s, %s, %s, %s)",
(self.titulo, self.autor, self.esta_disponible, self.editorial_id)
)
self.__class__._conexion._ejecutar_sql(
"SELECT libro_id FROM Libro WHERE titulo=%s ORDER BY libro_id DESC LIMIT 1",
(self.titulo, )
)
nuevo_libro = self.__class__._conexion._leer_desde_sql()
self.libro_id = nuevo_libro[0][0]
Libro.inicializar_conexion()
Editorial.inicializar_conexion()
"""
mi_libro = Libro("Cien Anios de Soledad", "Gabriel Garcia Marquez", False, Editorial.buscar_por_nombre("Alfaguara").editorial_id)
mi_libro.guardar()
print(mi_libro.libro_id)
"""
editorial = Editorial.buscar_por_nombre("Alfaguara")
editorial.nombre = "Santillana"
editorial.actualizar() | benjymb/asesoria_10_1 | modelos.py | modelos.py | py | 2,760 | python | es | code | 0 | github-code | 13 |
74084859857 | import pytest
from twisted.internet.error import DNSLookupError
@pytest.mark.parametrize(
'retry_middleware_response',
(({'FAKEUSERAGENT_FALLBACK': 'firefox'}, 503), ),
indirect=True
)
def test_random_ua_set_on_response(retry_middleware_response):
assert 'User-Agent' in retry_middleware_response.headers
@pytest.mark.parametrize(
'retry_middleware_exception',
(({'FAKEUSERAGENT_FALLBACK': 'firefox'},
DNSLookupError('Test exception')), ),
indirect=True
)
def test_random_ua_set_on_exception(retry_middleware_exception):
assert 'User-Agent' in retry_middleware_exception.headers
| alecxe/scrapy-fake-useragent | tests/test_retry_middleware.py | test_retry_middleware.py | py | 621 | python | en | code | 658 | github-code | 13 |
23607475192 | from pathlib import Path
from torch.utils.data import Dataset, ConcatDataset, DataLoader
from torchvision import transforms
from torchvision.datasets import ImageFolder
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import numpy as np
import cv2
import bcolz
import pickle
import torch
import mxnet as mx
from tqdm import tqdm
import os
import matplotlib.pyplot as plt
def load_bin(path, rootdir, image_size=[112, 112]):
test_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.501960784, 0.501960784, 0.501960784])
]) # H*W*C --> C*H*W, (X-127.5)/128 = (X/255.0-0.5)/0.501960784, 0.501960784=128/255.0
if not os.path.isdir(rootdir):
os.mkdir(rootdir)
bins, issame_list = pickle.load(open(path, 'rb'), encoding='bytes')
data = bcolz.fill([len(bins), 3, image_size[0], image_size[1]], dtype=np.float32, rootdir=rootdir, mode='w')
for i in range(len(bins)):
_bin = bins[i]
img = mx.image.imdecode(_bin).asnumpy() # imdecode: three channel color output + RGB formatted output
# plt.subplot(121)
# plt.imshow(img)
# print("1\n", img.shape, type(img), img.transpose(2, 0, 1))
# # print("2\n", img.astype(np.uint8))
img = Image.fromarray(img.astype(np.uint8))
data[i, ...] = test_transform(img)
# print("3\n", data[i, ...].shape, type(data[i, ...]), data[i, ...])
# plt.show()
# break
i += 1
if i % 1000 == 0:
print('loading bin', i)
print(data.shape)
np.save(rootdir + '_list', np.array(issame_list))
return data, issame_list
# origin
def load_mx_rec(rec_path):
save_path = os.path.join(rec_path, 'imgs_mxnet')
if not os.path.isdir(save_path):
os.makedirs(save_path)
train_indx_path = os.path.join(rec_path, 'train.idx')
train_rec_path = os.path.join(rec_path, 'train.rec')
imgrec = mx.recordio.MXIndexedRecordIO(train_indx_path, train_rec_path, 'r')
img_info = imgrec.read_idx(0)
header, _ = mx.recordio.unpack(img_info)
max_idx = int(header.label[0])
# print("max_idx", str(max_idx))
for idx in tqdm(range(1, max_idx)):
# xz codes
img_info = imgrec.read_idx(idx)
header, s = mx.recordio.unpack(img_info)
# print("header={}".format(header))
label = int(header.label[0])
label_path = os.path.join(save_path, str(label))
if not os.path.isdir(label_path):
os.mkdir(label_path)
img_path = os.path.join(label_path, '{}.jpg'.format(idx))
with open(img_path, 'wb') as f:
f.write(s)
f.close()
# if idx == 100:
# assert False
# def load_mx_rec(rec_path):
# save_path = os.path.join(rec_path, 'imgs_test')
# if not os.path.isdir(save_path):
# os.makedirs(save_path)
# train_indx_path = os.path.join(rec_path, 'train.idx')
# train_rec_path = os.path.join(rec_path, 'train.rec')
# imgrec = mx.recordio.MXIndexedRecordIO(train_indx_path, train_rec_path, 'r')
#
# img_info = imgrec.read_idx(0)
# header, _ = mx.recordio.unpack(img_info)
# max_idx = int(header.label[0])
# print("max_idx", str(max_idx))
# txt_path = './log.txt'
#
# for idx in range(1, max_idx):
# # xz codes
# img_info = imgrec.read_idx(idx)
# header, s = mx.recordio.unpack(img_info)
# print("header={}".format(header))
# label = int(header.label[0])
#
# # img = mx.image.imdecode(s).asnumpy() # imdecode: three channel color output + RGB formatted output
# # img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
#
# label_path = os.path.join(save_path, str(label))
# origin_img_path = os.path.join(label_path, '{}.jpg'.format(idx))
# print("origin_img_path={}".format(origin_img_path))
#
# img1 = mx.image.imdecode(s).asnumpy()
# # print(img1)
# # print("img1 type={}, {}".format(type(img1), type(img1[0])))
# img1 = img1.astype(np.float32)
# # print(img1.shape)
#
# img_path = os.path.join("/mnt/ssd/faces/iccv_challenge/train/ms1m-retinaface-t1/imgs",
# str(label), '{}.jpg'.format(idx))
# print("img_path={}".format(img_path))
# img2 = cv2.imread(img_path)
# img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)
# # print("img2 type={}, {}".format(type(img2), type(img2[0])))
# img2 = img2.astype(np.float32)
#
# delta = img1 - img2
# sum_data = np.sum(np.abs(delta))
# mean_data = np.mean(np.abs(delta))
#
# # if sum_data > max_sample:
# # max_sample = sum_data
# #
# # if sum_data < min_sample:
# # min_sample = sum_data
# #
# # if mean_data > max_mean_sample:
# # max_mean_sample = mean_data
# #
# # if mean_data < min_mean_sample:
# # min_mean_sample = mean_data
#
# log_str = "img_path={}, sum_data={}, mean_data={}\n".format(img_path, sum_data, mean_data)
# with open(txt_path, 'a+') as f:
# f.write(log_str)
# f.close()
#
# print(log_str)
#
# if idx == 100000:
# assert False
# if __name__ == '__main__':
# # load_bin(path='../lfw.bin', rootdir=os.path.join('./', 'lfw'))
# # print("load bin...")
# lfw, lfw_issame = get_one_val_data('./', 'lfw')
# print("4\n", lfw.shape, type(lfw[0]), lfw[0]) | CN1Ember/feathernet_mine | quan_table/insightface_v2/insightface_data/iccv_ms1m_data_pipe.py | iccv_ms1m_data_pipe.py | py | 5,559 | python | en | code | 1 | github-code | 13 |
38586861242 | import cv2
from pyzbar import pyzbar
if __name__ == "__main__":
barcodes = ['images/barcode-3.jpg']
for barcode_file in barcodes:
# load the image to opencv
img = cv2.imread(barcode_file)
# decode detected barcodes & get the image
# that is drawn
img = pyzbar.decode(img)
# show the image
cv2.imshow("img", img)
cv2.waitKey(0) | pawelzakieta97/BHL2020Melson | test.py | test.py | py | 399 | python | en | code | 0 | github-code | 13 |
15724680361 | from huaweicloudsdkcore.auth.credentials import BasicCredentials
from huaweicloudsdkcore.exceptions import exceptions
from huaweicloudsdkcore.http.http_config import HttpConfig
from huaweicloudsdkecs.v2 import *
from VPC.VPC import VPC
from error import error
class ECS:
def __init__(self, ak: str, sk: str, project_id: str):
self._endpoint = "https://ecs.ru-moscow-1.hc.sbercloud.ru"
self._ak = ak
self._sk = sk
self._project_id = project_id
self._config = None
self._credentials = None
self._client = None
self._set_configs()
self._set_client()
self._json_of_ecs = self.get_json_of_ecs()
def _set_configs(self):
self._config = HttpConfig.get_default_config()
self._config.ignore_ssl_verification = True
self._credentials = BasicCredentials(self._ak, self._sk, self._project_id)
def _set_client(self):
self._client = EcsClient.new_builder() \
.with_http_config(self._config) \
.with_credentials(self._credentials) \
.with_endpoint(self._endpoint) \
.build()
def change_name_of_ecs(self, ecs_name_old: str, ecs_name_new: str):
try:
if not self.check_ecs_name(ecs_name_new):
option = UpdateServerOption(name=ecs_name_new)
update_body = UpdateServerRequestBody(option)
request = UpdateServerRequest(self._get_ecs_id_by_name(ecs_name_old), update_body)
response = self._client.update_server(request)
self._json_of_ecs = self.get_json_of_ecs()
return 1
return 2
except exceptions.ClientRequestException as e:
error(e)
return 0
def get_json_of_ecs(self):
try:
request = ListServersDetailsRequest()
response = self._client.list_servers_details(request)
return response.servers
except exceptions.ClientRequestException as e:
error(e)
def get_json_of_flavors(self):
try:
request = ListFlavorsRequest()
response = self._client.list_flavors(request)
return response.flavors
except exceptions.ClientRequestException as e:
error(e)
def check_ecs_name(self, ecs_name: str):
for ecs in self._json_of_ecs:
if ecs.name == ecs_name:
return True
return False
def _get_ecs_id_by_name(self, ecs_name: str):
for i in range(len(self._json_of_ecs)):
if ecs_name == self._json_of_ecs[i].name:
return self._json_of_ecs[i].id
def delete_ecs_by_name(self, ecs_name: str):
try:
ecs_id = [self._get_ecs_id_by_name(ecs_name), ]
body = DeleteServersRequestBody(True, True, ecs_id)
request = DeleteServersRequest(body)
response = self._client.delete_servers(request)
self._json_of_ecs = self.get_json_of_ecs()
return True
except exceptions.ClientRequestException as e:
error(e)
return False
def create_new_ecs(self, ecs_name: str,
flavor_ref: str,
image_ref: str,
vpc_name: str, subnet_name: str,
volume_type: str, volume_size: int):
try:
if not self.check_ecs_name(ecs_name):
vpc = VPC(self._ak, self._sk, self._project_id)
subnet_id = vpc.get_subnet_id_by_name(subnet_name)
vpc_id = vpc.get_vpc_id_by_name(vpc_name)
nics = [PostPaidServerNic(subnet_id), ]
root_volume = PostPaidServerRootVolume(volume_type, volume_size)
server = PostPaidServer(name=ecs_name,
flavor_ref=flavor_ref, image_ref=image_ref,
vpcid=vpc_id, nics=nics,
root_volume=root_volume)
server_body = CreatePostPaidServersRequestBody(server=server)
request = CreatePostPaidServersRequest(body=server_body)
response = self._client.create_post_paid_servers(request)
self._json_of_ecs = self.get_json_of_ecs()
return 1
return 2
except exceptions.ClientRequestException as e:
error(e)
return 0 | Belka258/cloud_tg_bot | ECS/ECS.py | ECS.py | py | 4,587 | python | en | code | 0 | github-code | 13 |
28350910903 | class Solution:
def minOperations(self, nums: List[int], numsDivide: List[int]) -> int:
mini=min(numsDivide)
seti=set(numsDivide)
dic=Counter(nums)
nums=list(set(nums))
nums.sort()
ans=0
for i in nums:
if i>mini:
break
flag=0
for j in seti:
if j%i!=0:
flag=1
break
if flag==0:
return ans
if flag==1:
ans+=dic[i]
return -1 | saurabhjain17/leetcode-coding-questions | 2344-minimum-deletions-to-make-array-divisible/2344-minimum-deletions-to-make-array-divisible.py | 2344-minimum-deletions-to-make-array-divisible.py | py | 560 | python | en | code | 1 | github-code | 13 |
18818597224 | from rest_framework.routers import DefaultRouter
from django.urls import path, include
from myapp.views.user_views import (
MyTokenObtainPairView, RegisterViewSet,
)
from myapp.views.article_views import ArticleListView
router = DefaultRouter()
router.register('accounts/register', RegisterViewSet,
basename='register')
router.register('article/list', ArticleListView,
basename='article_list')
urlpatterns = [
path('', include(router.urls)),
path('accounts/login/', MyTokenObtainPairView.as_view(),
name='token_obtain_pair'),
] | nabeelahmdd/blog-api | myapp/urls.py | urls.py | py | 580 | python | en | code | 0 | github-code | 13 |
24975834963 | """
The `GPM` module contains all functions related to the *processing* of the GPM-IMERG near realtime satellite derived precipitation for the Southwest Pacific
"""
# ignore user warnings
import warnings
warnings.simplefilter("ignore", UserWarning)
# import matplotlib
import matplotlib
# matplotlib.use('Agg') # uncomment to use the non-interactive Agg backend
import os
import pathlib
from datetime import datetime, timedelta
import pandas as pd
import xarray as xr
from matplotlib import pyplot as plt
# import the local utils package
from . import utils
def get_files_to_download(dpath=None):
# checks paths
if dpath is None:
dpath = pathlib.Path.cwd().parents[2].joinpath('data/GPM_IMERG/daily')
else:
if type(dpath) != pathlib.PosixPath:
dpath = pathlib.Path(dpath)
if not dpath.exists():
raise ValueError(f"The path {str(dpath)} does not exist")
# get the list of files present in the directory
lfiles_local = list(dpath.glob("GPM_IMERG_daily.v06.????.??.??.nc"))
lfiles_local.sort()
# get the list of files that should be present
today = datetime.today()
if today.hour < 12:
lag = 1
else:
lag = 2
last_day = today - timedelta(days=lag)
# list of files that should be present according to the current date
lfiles_complete = [dpath.joinpath(f"GPM_IMERG_daily.v06.{date:%Y.%m.%d}.nc") for date in pd.date_range(start='2001-01-01', end=f"{last_day:%Y-%m-%d}")]
# list of files missing
lfiles_missing = list(set(lfiles_complete) - set(lfiles_local))
lfiles_missing.sort()
if len(lfiles_missing) >= 1:
return lfiles_missing
else:
return None
def download(dpath=None, lfiles=None, proxy=None, lon_min=125., lon_max=240., lat_min=-50., lat_max=25., interp=True):
from subprocess import call
from shutil import which
import xarray as xr
curl = which("curl")
# checks paths
if dpath is None:
dpath = pathlib.Path.cwd().parents[2].joinpath('data/GPM_IMERG/daily')
else:
if type(dpath) != pathlib.PosixPath:
dpath = pathlib.Path(dpath)
if not dpath.exists():
raise ValueError(f"The path {str(dpath)} does not exist")
if lfiles is not None:
print(f"will be downloading {len(lfiles)} files")
dates_to_download = [get_date_from_file(fname) for fname in lfiles]
for date in dates_to_download:
root_url = f"https://gpm1.gesdisc.eosdis.nasa.gov/data/GPM_L3/GPM_3IMERGDL.06/{date:%Y/%m}"
fname = f"3B-DAY-L.MS.MRG.3IMERG.{date:%Y%m%d}-S000000-E235959.V06.nc4"
fname_out = f'GPM_IMERG_daily.v06.{date:%Y.%m.%d}.nc'
### ==============================================================================================================
# build the command
if proxy:
cmd = f"{curl} --silent --proxy {proxy} -n -c ~/.urs_cookies -b ~/.urs_cookies -L --url {root_url}/{fname} -o {dpath}/{fname}"
else:
cmd = f"{curl} --silent -n -c ~/.urs_cookies -b ~/.urs_cookies -L --url {root_url}/{fname} -o {dpath}/{fname}"
print(f"trying to download {fname_out} in {str(dpath)}")
# execute the command
r = call(cmd, shell=True)
if r != 0:
print("download failed for date {:%Y-%m-%d}".format(date))
pass
else:
stat_info = os.stat(str(dpath.joinpath(fname)))
if stat_info.st_size > 800000:
dset_in = xr.open_dataset(dpath.joinpath(fname), engine='netcdf4')
dset_in = dset_in[['HQprecipitation','precipitationCal']]
if interp:
trmm_grid = make_trmm_grid()
dset_in = dset_in.interp_like(trmm_grid)
dset_in = dset_in.transpose('time','lat','lon')
# roll in the longitudes to go from -180 → 180 to 0 → 360
dset_in = utils.roll_longitudes(dset_in)
dset_in = dset_in.sel(lon=slice(lon_min, lon_max), lat=slice(lat_min, lat_max))
dset_in.to_netcdf(dpath.joinpath(fname_out), unlimited_dims='time')
dpath.joinpath(fname).unlink()
dset_in.close()
trmm_grid.close()
else:
print(f'\n! file size for input file {fname} is too small, netcdf file {fname_out} is not yet available to download from {root_url}\n')
# cleaning the nc4 files
for nc4_file in list(dpath.glob("*.nc4")):
nc4_file.unlink()
pass
def get_files_list(dpath='/media/nicolasf/END19101/data/GPM-IMERG', ndays=None, date=None, lag=1):
"""
[summary]
[extended_summary]
Parameters
----------
dpath : [type], optional
[description], by default None
ndays : int, optional
[description], by default 30
date : [type], optional
[description], by default None
lag : int, optional
[description], by default 1
Raises
------
ValueError
[description]
ValueError
[description]
"""
# checks paths
if dpath is None:
dpath = pathlib.Path.cwd().parents[2].joinpath('data/GPM_IMERG/daily')
else:
if type(dpath) != pathlib.PosixPath:
dpath = pathlib.Path(dpath)
if not dpath.exists():
raise ValueError(f"The path {str(dpath)} does not exist")
if ndays is not None:
if date is None:
date = datetime.now() - timedelta(days=lag)
lfiles = []
for d in pd.date_range(end=date, start=date - timedelta(days=ndays - 1)):
if dpath.joinpath(f"GPM_IMERG_daily.v06.{d:%Y.%m.%d}.nc").exists():
lfiles.append(dpath.joinpath(f"GPM_IMERG_daily.v06.{d:%Y.%m.%d}.nc"))
lfiles.sort()
else:
raise ValueError(f"GPM_IMERG_daily.v06.{d:%Y.%m.%d}.nc is missing")
if len(lfiles) != ndays:
print(f"!!! warning, only {len(lfiles)} days will be used to calculate the rainfall accumulation, instead of the intended {ndays}")
else:
lfiles = list(dpath.glob("GPM_IMERG_daily.v06.????.??.??.nc"))
lfiles.sort()
return lfiles
def make_dataset(lfiles=None, dpath=None, varname='precipitationCal', ndays=None, check_lag=True):
if lfiles is None:
lfiles = get_files_list(dpath, ndays=ndays)
dset = xr.open_mfdataset(lfiles, concat_dim='time', combine='nested', parallel=True, engine='netcdf4')[[varname]]
# get the last date in the dataset
last_date = dset.time.to_series().index[-1]
last_date = datetime(last_date.year, last_date.month, last_date.day)
ndays_in_dset = len(dset.time)
# checks that the lag to realtime does not exceed 2
if (check_lag) and (ndays is not None):
if (datetime.now() - last_date).days > 2:
print(f"something is wrong, the last date in the dataset is {last_date:%Y-%m-%d}, the expected date should be not earlier than {datetime.now() - timedelta(days=2):%Y-%m-%d}")
if ndays_in_dset != ndays:
print(f"something is wrong with the number of time-steps, expected {ndays}, got {ndays_in_dset}")
# adds the number of days and the last date as *attributes* of the dataset
dset.attrs['ndays'] = ndays_in_dset
dset.attrs['last_day'] = f"{last_date:%Y-%m-%d}"
return dset
def set_attrs(dset, ndays=None, last_day=None):
"""
set number of days and last day as global attributes
in a xarray dataset
Parameters
----------
dset : xarray.Dataset
The xarray Dataset
ndays : int, optional
The number of days, by default None
last_day : str or datetime, optional
The last day of the `ndays` period, by default None
Returns
-------
xarray.Dataset
The xarray Dataset with attributes set
"""
if ndays is not None:
dset.attrs['ndays'] = ndays
if last_day is not None:
if isinstance(last_day, str):
dset.attrs['last_day'] = last_day
elif isinstance(last_day, datetime):
dset.attrs['last_day'] = f"{last_day:%Y-%m-%d}"
return dset
def get_attrs(dset):
"""
return (in order) the last day and the number of days
in a (GPM-IMERG) dataset
e.g.:
last_day, ndays = get_attrs(dset)
Parameters
----------
dset : xarray dataset with global attributes 'last_day' and 'ndays'
Returns
-------
tuple
last_day, ndays
"""
last_day = datetime.strptime(dset.attrs['last_day'], '%Y-%m-%d')
ndays = dset.attrs['ndays']
return last_day, ndays
def calculate_realtime_accumulation(dset):
"""
"""
# calculates the accumulation, make sure we keep the attributes
dset = dset.sum('time', keep_attrs=True)
dset = dset.compute()
# expand the dimension time to have singleton dimension with last date of the ndays accumulation
dset = dset.expand_dims({'time':[datetime.strptime(dset.attrs['last_day'], "%Y-%m-%d")]})
return dset
def convert_rainfall_OBS(dset, varin='precipitationCal', varout='precip', timevar='time'):
"""
converts the rainfall - anomalies or raw data - originally in mm/day
in the GPM-IMERG dataset to mm per month ...
"""
import pandas as pd
import numpy as np
from datetime import datetime
from calendar import monthrange
from dateutil.relativedelta import relativedelta
# cheks that the variable is expressed in mm/day
if not('units' in dset[varin].attrs and dset[varin].attrs['units'] in ['mm','mm/day']):
print(f"Warning, the variable {varin} has no units attributes or is not expressed in mm or mm/day")
return None
else:
# coonvert cdftime to datetimeindex
index = dset.indexes[timevar]
if type(index) != pd.core.indexes.datetimes.DatetimeIndex:
index = index.to_datetimeindex()
dset[timevar] = index
# gets the number of days in each month
ndays = [monthrange(x.year, x.month)[1] for x in index]
# adds this variable into the dataset
dset['ndays'] = ((timevar), np.array(ndays))
# multiply to get the anomalies in mm / month
dset['var'] = dset[varin] * dset['ndays']
# rename
dset = dset.drop(varin)
dset = dset.rename({'var':varout})
return dset
def get_climatology(dpath=None, ndays=None, date=None, window_clim=2, lag=None, clim=[2001, 2020], zarr=True):
"""
[summary]
[extended_summary]
Parameters
----------
dpath : [type], optional
[description], by default None
ndays : int, optional
[description], by default 30
date : [type], optional
[description], by default None
window_clim : int, optional
The window, in days, for the selection of the days in the climatology, by default 2 (2 days each side of target day)
lag : int, optional
The lag to realtime, in days, by default 1
zarr : bool, optional
Whether to open a zarr file or a netcdf file, default to True (opens the zarr version)
Returns
-------
[type]
[description]
Raises
------
ValueError
[description]
"""
if dpath is None:
dpath = pathlib.Path.cwd().parents[1].joinpath('data/GPM_IMERG/daily')
else:
if type(dpath) != pathlib.PosixPath:
dpath = pathlib.Path(dpath)
if not dpath.exists():
raise ValueError(f"The path {str(dpath)} does not exist")
if date is None:
date = datetime.now() - timedelta(days=lag)
if zarr:
clim_file = dpath.joinpath(f'GPM_IMERG_daily.v06.{clim[0]}.{clim[1]}_precipitationCal_{ndays}d_runsum.zarr')
dset_clim = xr.open_zarr(clim_file)
else:
clim_file = dpath.joinpath(f'GPM_IMERG_daily.v06.{clim[0]}.{clim[1]}_precipitationCal_{ndays}d_runsum.nc')
dset_clim = xr.open_dataset(clim_file, engine='netcdf4')
dates_clim = [date + timedelta(days=shift) for shift in list(range(-window_clim, window_clim+1))]
time_clim = dset_clim.time.to_index()
time_clim = [time_clim[(time_clim.month == d.month) & (time_clim.day == d.day)] for d in dates_clim]
dset_clim_ref = []
for t in time_clim:
dset_clim_ref.append(dset_clim.sel(time=t))
dset_clim_ref = xr.concat(dset_clim_ref, dim='time')
dset_clim_ref = dset_clim_ref.chunk({'time':-1, 'lat':10, 'lon':10})
dset_clim.close()
return dset_clim_ref
def calc_anoms_and_pctscores(dset, dset_clim):
"""
[summary]
[extended_summary]
Parameters
----------
dset : [type]
[description]
dset_clim : [type]
[description]
Returns
-------
[type]
[description]
"""
# anomalies in mm
anoms = dset - dset_clim.mean('time')
# percentage of score, compared to the climatological values
pctscore = calculate_percentileofscore(dset.squeeze(), dset_clim)
pctscore = pctscore.expand_dims({'time':dset.time})
dset['pctscore'] = pctscore
dset['anoms'] = anoms['precipitationCal']
return dset
def get_rain_days_stats(dset, varname='precipitationCal', timevar='time', threshold=1, expand_dim=True):
"""
return the number of days since last rainfall, and the number of day and wet days
according to a threshold defining what is a wet day (by default 1 mm/day)
Parameters
----------
dset : xarray.Dataset
The xarray dataset containing the daily rainfall over a period of time
varname : str, optional
The name of the precipitation variables, by default 'precipitationCal'
timevar : str, optional
The name of the time variable, by default 'time'
threshold : int, optional
The threshold (in mm/day) for defining what is a 'rain day', by default 1
expand_dim : bool, optional
Whether or not to add a bogus singleton time dimension and coordinate
in the dataset, by default 'False'
Return
------
dset : xarray.Dataset
An xarray dataset with new variables:
- wet_days : number of wet days
- dry_days : number of dry days
- days_since_rain : days since last rain
"""
# imports
from datetime import datetime
import numpy as np
import xarray as xr
# number of days in the dataset from the attributes
ndays = dset.attrs['ndays']
# last day in the dataset from the attributes
last_day = datetime.strptime(dset.attrs['last_day'], "%Y-%m-%d")
# all values below threshold are set to 0
dset = dset.where(dset[varname] >= threshold, other=0)
# all values above or equal to threshold are set to 1
dset = dset.where(dset[varname] == 0, other=1)
# clip (not really necessary ...)
dset = dset.clip(min=0., max=1.)
# now calculates the number of days since last rainfall
days_since_last_rain = dset.cumsum(dim=timevar, keep_attrs=True)
days_since_last_rain[timevar] = ((timevar), np.arange(ndays)[::-1])
days_since_last_rain = days_since_last_rain.idxmax(dim=timevar)
# now calculates the number of wet and dry days
dset = dset.sum(timevar)
number_dry_days = (ndays - dset).rename({varname:'dry_days'})
# put all that into a dataset
dset = dset.rename({varname:'wet_days'})
dset = dset.merge(number_dry_days)
dset = dset.merge(days_since_last_rain.rename({varname:'days_since_rain'}))
# expand the dimension (add a bogus time dimension with the date of the last day)
if expand_dim:
dset = dset.expand_dims(dim={timevar:[last_day]}, axis=0)
# make sure the attributes are added back
dset.attrs['ndays'] = ndays
dset.attrs['last_day'] = f"{last_day:%Y-%m-%d}"
return dset
def calculate_percentileofscore(dset, clim, varname='precipitationCal', timevar='time'):
"""
calculates the percentile of score of a dataset given a climatology
[extended_summary]
Parameters
----------
dset : xarray.Dataset
The input dataset, typically the real time GPM/IMERG dataset
clim : xarray.Dataset
The climatology (needs to vary along a 'timevar' dimension as well)
varname : str, optional
The name of the variable (needs to be the same in both the input and
climatological dataset), by default 'precipitationCal'
timevar : str, optional
The name of the variable describing the time, by default 'time'
Returns
-------
xarray.Dataset
The resulting dataset (containing the percentile of score)
"""
from scipy.stats import percentileofscore
import xarray as xr
try:
import dask
except ImportError("dask is not available ..."):
pass
def _percentileofscore(x, y):
return percentileofscore(x.ravel(), y.ravel(), kind='weak')
pctscore = xr.apply_ufunc(_percentileofscore, clim[varname], dset[varname], input_core_dims=[[timevar], []],
vectorize=True, dask='parallelized')
return pctscore
def make_trmm_grid():
import numpy as np
import xarray as xr
lat_values = np.linspace(-59.875, 59.875, num=480, endpoint=True)
lon_values = np.linspace(-179.875, 179.875, num=1440, endpoint=True)
d = {}
d['lat'] = (('lat'), lat_values)
d['lon'] = (('lon'), lon_values)
d = xr.Dataset(d)
return d
def get_date_from_file(filename, sep='.',year_index=-4, month_index=-3, day_index=-2):
import pathlib
from datetime import date
from dateutil.relativedelta import relativedelta
if not type(filename) == pathlib.PosixPath:
filename = pathlib.Path(filename)
# get the filename
fname = filename.name
fname = fname.split('.')
year = fname[year_index]
month = fname[month_index]
day = fname[day_index]
d = list(map(int, [year, month, day]))
d = date(*d)
return d
def get_dates_to_download(dpath='/home/nicolasf/operational/ICU/ops/data/GPM_IMERG/daily/extended_SP', lag=1):
import pathlib
from datetime import date
from dateutil.relativedelta import relativedelta
import numpy as np
import pandas as pd
if not type(dpath) == pathlib.PosixPath:
dpath = pathlib.Path(dpath)
lfiles = list(dpath.glob("GPM_IMERG_daily.v06.????.??.??.nc"))
lfiles.sort()
last_file = lfiles[-1]
print(f"Last downloaded file in {str(dpath)} is {last_file.name}\n")
last_date = get_date_from_file(last_file)
last_date = last_date + timedelta(days=1)
today = date.today()
download_date = today - relativedelta(days=lag)
dates_to_download = pd.date_range(start=last_date, end=download_date, freq='1D')
return dates_to_download
def update(lag=1, opath='/home/nicolasf/operational/ICU/ops/data/GPM_IMERG/daily/extended_SP', proxy=None, lon_min=125., lon_max=240., lat_min=-50., lat_max=25., interp=True):
import os
import pathlib
from subprocess import call
from shutil import which
import xarray as xr
curl = which("curl")
if not type(opath) == pathlib.PosixPath:
opath = pathlib.Path(opath)
# first clean the *.nc4 files if any
for nc4_file in list(opath.glob("*.nc4")):
nc4_file.unlink()
# get the dates
dates_to_download = get_dates_to_download(opath, lag=lag)
# then loop over the dates, and download the files
for date in dates_to_download:
root_url = f"https://gpm1.gesdisc.eosdis.nasa.gov/data/GPM_L3/GPM_3IMERGDL.06/{date:%Y/%m}"
fname = f"3B-DAY-L.MS.MRG.3IMERG.{date:%Y%m%d}-S000000-E235959.V06.nc4"
fname_out = f'GPM_IMERG_daily.v06.{date:%Y.%m.%d}.nc'
### ==============================================================================================================
# build the command
if proxy:
cmd = f"{curl} --silent --proxy {proxy} -n -c ~/.urs_cookies -b ~/.urs_cookies -L --url {root_url}/{fname} -o {opath}/{fname}"
else:
cmd = f"{curl} --silent -n -c ~/.urs_cookies -b ~/.urs_cookies -L --url {root_url}/{fname} -o {opath}/{fname}"
print(f"trying to download {fname_out} in {str(opath)}")
# execute the command
r = call(cmd, shell=True)
if r != 0:
print("download failed for date {:%Y-%m-%d}".format(date))
pass
else:
stat_info = os.stat(str(opath.joinpath(fname)))
if stat_info.st_size > 800000:
dset_in = xr.open_dataset(opath.joinpath(fname), engine='netcdf4')
dset_in = dset_in[['HQprecipitation','precipitationCal']]
if interp:
trmm_grid = make_trmm_grid()
dset_in = dset_in.interp_like(trmm_grid)
dset_in = dset_in.transpose('time','lat','lon')
# roll in the longitudes to go from -180 → 180 to 0 → 360
dset_in = utils.roll_longitudes(dset_in)
dset_in = dset_in.sel(lon=slice(lon_min, lon_max), lat=slice(lat_min, lat_max))
dset_in.to_netcdf(opath.joinpath(fname_out), unlimited_dims='time')
opath.joinpath(fname).unlink()
dset_in.close()
trmm_grid.close()
else:
print(f'\n! file size for input file {fname} is too small, netcdf file {fname_out} is not yet available to download from {root_url}\n')
# cleaning the nc4 files
for nc4_file in list(opath.glob("*.nc4")):
nc4_file.unlink()
pass
def save(dset, opath=None, kind='accum', complevel=4):
"""
saves a dataset containing either:
- the accumulation statistics (rainfall accumulation, anomalies and percentage of score): kind='accum' or
- the nb days statistics: dry days, wet days and days since last rain: kind='ndays'
Parameters
----------
dset : xarray.Dataset
The xarray dataset to save to disk
opath : string or pathlib.PosixPath, optional
The path where to save the dataset, by default None
kind : str, optional
The kind of dataset, either 'accum' or 'ndays', by default 'accum'
complevel : int, optional
The compression level, by default 4
"""
# make sure the correct attributes are set for the latitudes and longitudes
dict_lat = dict(units = "degrees_north", long_name = "Latitude")
dict_lon = dict(units = "degrees_east", long_name = "Longitude")
dset['lat'].attrs.update(dict_lat)
dset['lon'].attrs.update(dict_lon)
if opath is None:
opath = pathlib.Path.cwd()
else:
if type(opath) != pathlib.PosixPath:
opath = pathlib.Path(opath)
if not opath.exists():
opath.mkdir(parents=True)
last_date, ndays = get_attrs(dset)
# build the filename
filename = f"GPM_IMERG_{kind}_{ndays}ndays_to_{last_date:%Y-%m-%d}.nc"
# saves to disk, using compression level 'complevel' (by default 4)
dset.to_netcdf(opath.joinpath(filename), encoding=dset.encoding.update({'zlib':True, 'complevel':complevel}))
print(f"\nsaving {filename} in {str(opath)}")
def get_virtual_station(dset, lat=None, lon=None, varname='precipitationCal'):
"""
get a time-series from the GPM-IMERG accumulation dataset
[extended_summary]
Parameters
----------
dset : xarray.Dataset
The input dataset
lat : [type], optional
[description], by default None
lon : [type], optional
[description], by default None
varname : str, optional
[description], by default 'precipitationCal'
Returns
-------
[type]
[description]
"""
# this to ignore the runtime warning when converting the CFTimeindex to a datetime index
import warnings
warnings.filterwarnings("ignore")
sub = dset.sel(lat=lat, lon=lon, method='nearest')
index = sub.time.to_index().to_datetimeindex()
extracted_lat = float(sub.lat.data)
extracted_lon = float(sub.lon.data)
dist = utils.haversine((lon, lat), (extracted_lon, extracted_lat))
sub = sub[varname].load()
sub = sub.to_dataframe()[[varname]]
sub.index = index
sub = sub.rename({varname:'observed'}, axis=1)
return sub, (extracted_lon, extracted_lat), dist
def get_virtual_station_climatology(dpath=None, fname="daily_climatology_5days_rm_2001_2020.nc", lat=None, lon=None, varname='precipitationCal'):
import pathlib
import xarray as xr
if not isinstance(dpath, pathlib.PosixPath):
dpath = pathlib.Path(dpath)
clim = xr.open_dataset(dpath.joinpath(fname))
clim = clim.sel(lon=lon, lat=lat, method='nearest')
return clim[varname]
def join_clim(df, clim):
df['climatology'] = clim.sel(dayofyear = df.index.day_of_year).data
return df
def subset_daily_clim(dset, last_day, clim_period=[2001, 2020], buffer=3):
"""
takes a (multiple files) daily dataset, and extract N days (parameter `buffer`)
around each day of year for a climatological period (parameter `clim_period`)
centered around `last_day`
"""
from datetime import datetime
from dateutil.relativedelta import relativedelta
import numpy as np
import warnings
warnings.filterwarnings("ignore")
ldates = []
for y in np.arange(clim_period[0], clim_period[-1] + 1):
d = datetime(y, last_day.month, last_day.day)
d = [d + relativedelta(days=x) for x in range(-buffer, buffer+1)]
ldates += d
ldates = np.array(ldates)
dates = pd.Index(ldates)
dates = dates.to_series()
clim = dset.sel(time=slice(*map(str, clim_period)))
clim['time'] = clim.indexes['time'].to_datetimeindex()
dates = dates.loc[clim.time.to_index()[0]:clim.time.to_index()[-1],]
clim = clim.sel(time=dates.values)
return clim
def calibrate_SPI(dset, variable='precipitationCal', dimension='time', return_gamma = False):
"""
calibrate the SPI over a climatological dataset (typically obtained using `subset_daily_clim`
with appropriate buffer ...)
"""
import numpy as np
import xarray as xr
from scipy import stats as st
ds_ma = dset[variable]
ds_In = np.log(ds_ma)
ds_In = ds_In.where(np.isinf(ds_In) == False) #= np.nan #Change infinity to NaN
ds_mu = ds_ma.mean(dimension)
#Overall Mean of Moving Averages
ds_mu = ds_ma.mean(dimension)
#Summation of Natural log of moving averages
ds_sum = ds_In.sum(dimension)
#Computing essentials for gamma distribution
n = ds_In.count(dimension) #size of data
A = np.log(ds_mu) - (ds_sum/n) #Computing A
alpha = (1/(4*A))*(1+(1+((4*A)/3))**0.5) #Computing alpha (a)
beta = ds_mu/alpha
if return_gamma:
gamma_func = lambda data, a, scale: st.gamma.cdf(data, a=a, scale=scale)
gamma = xr.apply_ufunc(gamma_func, ds_ma, alpha, beta, dask='allowed')
return gamma, alpha, beta
else:
return alpha, beta
def calculate_SPI(dataarray, alpha, beta, name='SPI'):
import xarray as xr
from scipy import stats as st
gamma_func = lambda data, a, scale: st.gamma.cdf(data, a=a, scale=scale)
gamma = xr.apply_ufunc(gamma_func, dataarray, alpha, beta, dask='allowed')
norminv = lambda data: st.norm.ppf(data, loc=0, scale=1)
norm_spi = xr.apply_ufunc(norminv, gamma, dask='allowed')
return norm_spi.to_dataset(name=name) | nicolasfauchereau/ICU_Water_Watch | ICU_Water_Watch/GPM.py | GPM.py | py | 29,920 | python | en | code | 11 | github-code | 13 |
6904164898 | from keras.datasets import mnist
import numpy as np
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(X_train.shape[0], -1)/255
X_test = X_test.reshape(X_test.shape[0], -1)/255
y_train = np.eye(10)[y_train]
y_test = np.eye(10)[y_test] | ousinnGitHub/ML_Study | AI_study/Python/demo05/src/KNN.py | KNN.py | py | 270 | python | en | code | 0 | github-code | 13 |
17625334645 | #!/usr/bin/env python3
from src.euler import Euler
from math import exp
class ExpEuler(Euler):
def initial_values(self):
""" Initial value for variable y. """
self.y = 1
self.y_list = []
def diff_equation_system(self, x: float, dx: float):
""" Derivative 'dy/dx = y'. Hence it is known that dy = y * dx. """
dy = self.y * dx
self.y += dy
self.y_list.append(self.y)
if __name__ == '__main__':
e = ExpEuler()
for id, step in enumerate([0.5, 0.05, 0.005, 0.0005]):
x_list = e.get_results(step)
y_list_exact = [exp(x) for x in x_list]
e.draw_data_and_error(id, x_list, e.y_list, step, y_list_exact)
Euler.show_plots() | Zazzik1/Euler | exp.py | exp.py | py | 718 | python | en | code | 2 | github-code | 13 |
9525034485 | from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QWidget
from widgetTempletes import SliderWidget, intInputWidget
class DiseaseTab(QWidget):
def __init__(self, parent, plotCanvas, simulation):
super(QWidget, self).__init__(parent)
self.layout = QtWidgets.QVBoxLayout()
self.plotCanvas = plotCanvas
self.simulation = simulation
self.layout.addStretch(1)
self.radSlider = SliderWidget(self, "Spread Radius")
self.radSlider.slider.sliderInput.valueChanged.connect(lambda: self.changeRadius())
self.layout.addLayout(self.radSlider.layout)
self.infectionLength = intInputWidget(self, "Infection Length")
self.layout.addLayout(self.infectionLength.layout)
self.infectionLength.intInput.textChanged.connect(lambda: self.changeInfLength())
self.contSlider = SliderWidget(self, "Rate of Infection")
self.layout.addLayout(self.contSlider.layout)
self.contSlider.slider.sliderInput.valueChanged.connect(lambda: self.changeCont())
self.mortSlider = SliderWidget(self, "Mortality Rate")
self.layout.addLayout(self.mortSlider.layout)
self.mortSlider.slider.sliderInput.valueChanged.connect(lambda: self.changeMort())
self.layout.addStretch(1)
self.setLayout(self.layout)
def changeRadius(self):
self.plotCanvas.radius = self.radSlider.value * 0.005
self.plotCanvas.updateGraph()
def changeCont(self):
self.simulation.contRate = self.contSlider.value * 0.01
def changeInfLength(self):
length = self.infectionLength.intInput.text()
if length != "":
self.simulation.infLength = int(length)
def changeMort(self):
self.simulation.mortRate = self.mortSlider.value * 0.01 | BaileyDalton007/Epidemic-Simulator | tabs/disease_tab.py | disease_tab.py | py | 1,817 | python | en | code | 1 | github-code | 13 |
26083606654 | import cplex
import itertools #import para fazer o permutation
import math
from math import sin, cos, sqrt, atan2, radians,e
c=list()
c1=list()
c2=list()
latitude = list()
longitude = list()
texto = open('testecoord.txt') #testecoord - burma14, testecoord2- att48, testecoord3 - bayg29
for linha in texto:
linha= linha.rstrip()
aux_txt = linha.split(' ')
c1.append(aux_txt[:])
aux_txt.clear()
print(c1)
texto.close()
var1=int(len(c1))
var2=int(len(c1[0]))
for i in range(var1):
latitude.append(float(c1[i][0]))
longitude.append(float(c1[i][1]))
print('latitude')
print(latitude)
print('longitude')
print(longitude)
for a in range(var1): #for para calcular a distância através da latitude e longitude
for b in range(var1):
lat1 = radians(latitude[a])
lon1 = radians(longitude[a])
lat2 = radians(latitude[b])
lon2 = radians(longitude[b])
dlon = lon2 - lon1
dlat = lat2 - lat1
operacao1 = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
operacao2 = 2 * atan2(sqrt(operacao1), sqrt(1 - operacao1))
if a != b:
c2.insert(b,6371.0 * operacao2)
else:
c2.insert(b,99999)
c.append(c2[:])
c2.clear()
i=var1
j=i
I=range(i)
J=range(j)
IJ = [(a,b) for a in I for b in J] #criação da lista com identificação dos pontos, ex.: IJ = [(0,0),(0,1),...(4,4)]
subrotas = [] #lista subrotas vai receber todos os pontos para acompanhar se tem restrição para todas as subrotas
cpx=cplex.Cplex()
nx = ['x(' + str(i)+','+str(j)+')' for i in I for j in J] #lista para nomear os index do dicionário, ex.: ['x(0,0)','x(0,1)',...'x(4,4)']
ix = {(a,b): idx for idx, (a,b) in enumerate(IJ)} #dicionário para fazer a conversão, ex.: Chamar ix[(0,0)] = 0 | ix[(0,1)] = 1
cpx.variables.add(obj = [c[a][b] for b in range(j)\
for a in range(i)],
lb = [0.0]*i*j, ub=[cplex.infinity]*i*j,types = ['B']*i*j, names = nx) #Função objetivo
vet_1=[]
[vet_1.append(1) for a in I] #vetor de 1 para adicionar as restrições
[cpx.linear_constraints.add(lin_expr=[cplex.SparsePair(\
[ix[(a,b)] for b in J],\
[1.0 for b in J])],\
senses = 'E',\
rhs = [vet_1[a]]) for a in I ] #restrição de i só poder ir para um j
[cpx.linear_constraints.add(lin_expr=[cplex.SparsePair(\
[ix[(a,b)] for a in J],\
[1.0 for a in J])],\
senses = 'E',\
rhs = [vet_1[b]]) for b in I ] #restrição de i só poder receber um j
v=0 #vértice
visitados = [v] #vetor visitados vai armazernar os valores em uma rota para determinar se possui subrota
aux = [v] #usado para manipular a lista visitados
[subrotas.append(a) for a in I] #preencheu o vetor das subrotas para verificar se acabaram as subrotas
cpx.solve()
print('RESULTADO FUNÇÃO OBJETIVO')
print(cpx.solution.get_objective_value())
res = cpx.solution.get_values() #lista com valores binários para identificar para onde cada ponto foi
while len(visitados) != i: #repetir enquanto a rota não conseguir fechar sem acabar com as subrotas
while len(subrotas) > 1: #repetir enquanto não tiver criado restrição para as subrotas encontradas no último resultado do cplex
print('subrotas')
print(subrotas)
while aux: #função de retornar uma lista de um ciclo a partir de um primeiro ponto
a= aux.pop()
for b in J:
if res[ix[(a,b)]] == 1:
if b not in visitados:
# aux2.append(b)
visitados.append(b)
print('visitados dentro do for')
print(visitados)
#aux2.clear()
aux.append(b)
else:
aux.clear()
if len(visitados) != i: #se a lista criada não possuir o mesmo número de pontos do sistema vai ser criado restrição
print('visitados')
print(visitados)
a= list(itertools.permutations(visitados,2)) #criar uma permutação a partir das surotas da lista visitados[]
print('permutação')
print(a)
cpx.linear_constraints.add(lin_expr=[cplex.SparsePair(\
[ix[a[i]] for i in range(len(a))],\
[1.0 for i in range(len(a))])],\
senses = 'L',\
rhs = [len(visitados)-1]) #restrição criada a partir da permutação da subrota visitados[]
[subrotas.remove(visitados[a]) for a in range(len(visitados))] #remove da lista subrota os pontos que já foram criado restrições
print('subrotas')
print(subrotas)
if len(subrotas) >= 2: #se tiver mais alguma subrota
aux.append(subrotas[0]) #setando um valor para ele procurar subrota
visitados.clear() #limpando o vetor para ele preencher com o novo ciclo
cpx.solve()
print('RESULTADO FUNÇÃO OBJETIVO')
print(cpx.solution.get_objective_value())
res = cpx.solution.get_values()
v=0 #linha 56
visitados = [v] #linha 57
aux = [v] #linha 58
while aux: #verificando se o novo ciclo com as restrições possui subrota
a= aux.pop()
for b in J:
if res[ix[(a,b)]] == 1:
if b not in visitados:
visitados.append(b)
print('visitados dentro do for')
print(visitados)
aux.append(b)
else:
aux.clear()
if len(visitados) != i: #caso ainda exista subrota, retando os valores para repetir o processo e adicionar mais subrotas
[subrotas.append(a) for a in I]
v=0
visitados = [v]
aux = [v]
cpx.write("modelo_matematico_tsp_versao_4.1.lp")
cpx.solve()
print('RESULTADO FUNÇÃO OBJETIVO')
print(cpx.solution.get_objective_value())
print(visitados)
| vinimartins6/TravelingSalesmanProblem | TSP_Subrotas_versao_4.1(lat,long).py | TSP_Subrotas_versao_4.1(lat,long).py | py | 7,159 | python | pt | code | 0 | github-code | 13 |
72651273939 | # -*- coding: utf-8 -*-
# code for console Encoding difference. Dont' mind on it
import imp
import sys
imp.reload(sys)
try:
sys.setdefaultencoding("UTF8")
except Exception as E:
pass
import testValue
from popbill import CashbillService, PopbillException
cashbillService = CashbillService(testValue.LinkID, testValue.SecretKey)
cashbillService.IsTest = testValue.IsTest
cashbillService.IPRestrictOnOff = testValue.IPRestrictOnOff
cashbillService.UseStaticIP = testValue.UseStaticIP
cashbillService.UseLocalTimeYN = testValue.UseLocalTimeYN
"""
현금영수증과 관련된 안내 메일을 재전송 합니다.
- https://developers.popbill.com/reference/cashbill/python/api/etc#SendEmail
"""
try:
print("=" * 15 + " 현금영수증 안내메일 재전송 " + "=" * 15)
# 팝빌회원 사업자번호
CorpNum = testValue.testCorpNum
# 현금영수증 문서번호
MgtKey = "20220803-001"
# 수신 메일주소
# 팝빌 개발환경에서 테스트하는 경우에도 안내 메일이 전송되므로,
# 실제 거래처의 메일주소가 기재되지 않도록 주의
Receiver = ""
result = cashbillService.sendEmail(CorpNum, MgtKey, Receiver)
print("처리결과 : [%d] %s" % (result.code, result.message))
except PopbillException as PE:
print("Exception Occur : [%d] %s" % (PE.code, PE.message))
| linkhub-sdk/popbill.cashbill.example.py | sendEmail.py | sendEmail.py | py | 1,363 | python | ko | code | 0 | github-code | 13 |
6238882254 | from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.cache import cache
from django.core.cache.backends.base import DEFAULT_TIMEOUT
from django.shortcuts import render
from django.urls import reverse_lazy
from rest_framework.decorators import api_view
from rest_framework.permissions import BasePermission
from rest_framework.response import Response
from .functions import has_is_owner, get_rating
from .models import Test, Question, Results, VotesMember
from .serializers import TestSerializer, ResultSerializer
CACHE_TTL = getattr(settings, "CACHE_TTL",DEFAULT_TIMEOUT)
# Create your views here.
@api_view(["GET"])
def get_tests(request):
data =get_tests_cache()
serializer = TestSerializer(data, many=True)
return Response(serializer.data)
@api_view(["GET", "DELETE"])
def get_test(request, pk):
if request.method == "GET":
data = get_test_cache(pk)
votes_mamber, created = VotesMember.objects.get_or_create(user=request.user, test=data)
serializer = TestSerializer(data, many=False)
return Response(serializer.data)
else:
test = Test.objects.get(id=pk)
test.delete()
return Response("Was deleted")
@api_view(["POST"])
def post_result(request):
test_id = request.data['test']["id"]
test = Test.objects.get(id=test_id)
test_questions = request.data['test_result']
end = {"result": {}}
rating = 0
for i in test_questions:
print(test_questions[i])
question = Question.objects.get(id=i)
correct_choice = question.get_true_choice()
if test_questions[i] == correct_choice.choice_text:
end["result"][question.question_text] = "True"
rating += 1
else:
end["result"][question.question_text] = "False"
rating = get_rating(rating, len(test_questions))
person = VotesMember.objects.get(user=request.user, test=test)
try:
result, created = Results.objects.get_or_create(test=test, user=person,
rating=rating)
except:
result = Results.objects.get(test=test, user=person,)
result.rating = rating
end['id'] = result.id
print(end)
return Response(end)
@api_view(["GET"])
def get_result(request, pk):
result = Results.objects.get(id=pk)
if has_is_owner(request.user, result):
error = "It was not you who took this test"
to_json = {"error": error}
return Response(to_json)
serializer = ResultSerializer(result, many=False)
return Response(serializer.data)
def get_test_cache(id):
if cache.get(id):
test = cache.get(id)
print("DATA FROM CACHE")
return test
else:
try:
test = Test.objects.get(id =id)
cache.set(id,test)
print("DATA FROM DB")
return test
except:
return Response("ERROR_CACHE_TEST")
def get_tests_cache():
tests_cache = cache.get("tests")
if tests_cache:
tests = tests_cache
print("TESTS FROM CACHE")
return tests
else:
try:
tests = Test.objects.all()
cache.set("tests", tests)
print("TESTS FROM DB")
return tests
except:
return Response("ERROR_CACHE_TESTS")
| NazarSenchuk/TestSite | backend/api/views.py | views.py | py | 3,374 | python | en | code | 0 | github-code | 13 |
8145231451 | import os
import dropbox
from dropbox.files import WriteMode
class TransferData:
def __init__(self, access_token):
self.access_token = access_token
def upload_file(self, file_from, file_to):
dbx = dropbox.Dropbox(self.access_token)
for root,dirs,files in os.walk(file_from):
for filename in files:
local_path = os.path.join(root,filename)
relativepath=os.path.relpath(local_path,file_from)
dropbox_path=os.path.join(file_to,relativepath)
with open(local_path,'rb') as f:
dbx.files_upload(f.read(),dropbox_path,mode=WriteMode('overwrite'))
def main():
access_token = 'Wi9zUronYwkAAAAAAAAAAcfqys7KUMOmGwQ00m5mG3ybXKZtN-Z0nKgMVWi5wQ8W'
transferData = TransferData(access_token)
file_from = str(input("Enter the file path to transfer: "))
file_to = input("Enter the file path to be uploaded to the dropbox: ") # The full path to upload the file to, including the file name
# API v2
transferData.upload_file(file_from, file_to)
print("file has been moved!!!")
main()
| Raghavkhandelwal7/dropbox-boom-box- | dropbox.py | dropbox.py | py | 1,141 | python | en | code | 0 | github-code | 13 |
73055113937 | #!/usr/bin/env python3
# coding:utf-8
import re
import os
from bs4 import BeautifulSoup
import requests
Url = 'http://www.cnblogs.com/kuangbin/archive/2012/10/02/2710606.html'
Headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:41.0) Gecko/20100101 Firefox/41.0'}
def get_link_from_html(url):
links = []
html = requests.get(url, headers=Headers).content
# print(html)
soup = BeautifulSoup(html, 'lxml')
for u in soup.find_all('a', href=True):
#由于有些搜寻结果不含'href'键,所以会出现KeyError错误,有两种解决方式
# Way 1
# try:
# links.append(u.attrs['href'])
# except KeyError:
# pass
# Way 2
if u is not None:
links.append(u['href'])
return links
def main():
print(get_link_from_html(Url))
if __name__ == '__main__':
main()
| flintx/PythonExecrise | 0009/0009.py | 0009.py | py | 813 | python | en | code | 1 | github-code | 13 |
19466134025 | import random
import string
import sys
import os
import time
from time import gmtime, strftime
import scores
def rand_capital():
cap_and_countries = []
with open("countries-and-capitals.txt", "r") as f:
for line in f.readlines():
line = line.strip("\n")
line = line.split(" | ")
cap_and_countries.append(line)
cap_and_country = random.choice(cap_and_countries)
return cap_and_country
def life_points(capital, country):
points = len(capital) - capital.count(" ") + 2
guesses = []
with open("guesses.txt", "r") as f:
for line in f.readlines():
line = line.strip("\n")
guesses.append(line)
if len(guesses) > 0:
print("Not in word: ", end="")
for guess in guesses:
if guess not in capital or list(guess) != capital:
print(guess, " ", end="")
if len(guess) == 1:
points -= 1
elif len(guess) > 1:
points -= 2
if points <= 0:
looser(country)
elif points > 0:
print("\n", "You have " + str(points) + " life points.")
elif len(guesses) == 0:
print("You have " + str(points) + " life points.")
return guesses
def get_input():
guess = input("Enter a letter or the whole word: ")
guess = guess.upper()
return guess
def check_guess(capital, guesses, start):
cap1 = list(len(capital) * "_")
if len(guesses[-1]) > 1:
if list(guesses[-1]) == capital:
winner(guesses, start, capital)
elif len(guesses[-1]) == 1:
for i in range(len(capital)):
if capital[i] in guesses:
cap1[i] = capital[i]
if cap1 == capital:
winner(guesses, start, capital)
return cap1
def update_guesses(guesses):
with open("guesses.txt", "a") as f:
f.write(guesses[-1] + "\n")
def clear_guesses():
with open("guesses.txt", "w") as f:
f.write("")
def winner(guesses, start, capital):
print("\n", "Congratulations! You won!")
end = int(time.time())
cap2 = "".join(capital)
name = input("Enter your name: ")
score = name + "," + str(strftime("%d. %m. %Y %H:%M", gmtime())) + "," + str(end - start) + "," + str(len(guesses)) + "," + cap2
scores.update_scores(score)
print("\n", name + ", " + "you guessed the capital after " + str(len(guesses)) + " letters. It took you " + str(end - start) + " seconds.")
scores.main_w()
main()
def looser(country):
os.system("clear")
print("\n", "The capital of " + country)
print(" You lost all your life points!")
scores.main_w()
main()
def choice(cap_and_country):
capital = list(cap_and_country[1].upper())
country = cap_and_country[0]
start = int(time.time())
print(capital)
while True:
#os.system("clear")
guesses = life_points(capital, country)
if len(guesses) == 0:
cap1 = len(capital) * "_"
print(cap1)
guess = get_input()
guesses.append(guess)
update_guesses(guesses)
elif cap1 == capital:
False
elif len(guesses) > 0:
cap1 = check_guess(capital, guesses, start)
print(cap1)
guess = get_input()
guesses.append(guess)
update_guesses(guesses)
def main():
while True:
title_text = "Would you like to play(1) or want to exit program(0)?"
print(title_text)
user_input = input("Enter one of the numbers: ")
if user_input == "1":
clear_guesses()
cap_and_country = rand_capital()
choice(cap_and_country)
elif user_input == "0":
sys.exit(0)
else:
print("Wrong input.")
if __name__ == "__main__":
main()
| turo62/exercise | exercise/sandbox1.py | sandbox1.py | py | 3,889 | python | en | code | 0 | github-code | 13 |
20135497380 | import sys
def split_columns(line):
"""Splits the tokens separated by two or more spaces.
This is the format of the "table" that's emitted by llvm-cov report.
"""
return [item.strip() for item in line.split(' ') if item]
def parse_llvm_cov_report_from_stdin():
"""Interprets the whole stdin as a "table" from llvm-cov report.
Returns:
(column_names, rows)
"""
# Parse the table's header.
header_line = next(sys.stdin)
column_names = split_columns(header_line)
if len(column_names) <= 1 or column_names[0] != 'Filename':
raise RuntimeError(f'Unexpected header: ${header_line}')
# Parse the table's contents.
column_names = split_columns(header_line)
rows = []
for line in sys.stdin:
cells = split_columns(line)
if len(cells) <= 1:
# Empty line, or line with separators ("---..."), or line with a subtitle
# (e.g., "Files which contain no functions:").
continue
if len(cells) != len(column_names):
raise RuntimeError(f'Unexpected row line: ${line}')
rows.append(cells)
return column_names, rows
def get_total_line_coverage(column_names, rows):
# Look for a triple of columns named "Lines", "Missed Lines" and "Cover".
lines_coverage_column = column_names.index('Lines') + 2
if column_names[lines_coverage_column] != 'Cover':
raise RuntimeError(f'Failed to find line coverage column: ${column_names}')
if rows[-1][0] != 'TOTAL':
raise RuntimeError(f'Could not find totals row: ${rows[-1]}')
return rows[-1][lines_coverage_column]
def main():
column_names, rows = parse_llvm_cov_report_from_stdin()
print(get_total_line_coverage(column_names, rows))
if __name__ == '__main__':
sys.exit(main())
| GoogleChromeLabs/chromeos_smart_card_connector | scripts/parse-coverage-output.py | parse-coverage-output.py | py | 1,702 | python | en | code | 131 | github-code | 13 |
42783500565 | import streamlit as st
import pandas as pd
def process(input_df, sample_time, sample_mode):
# 数据处理,根据采样时间和模式来判断
# 采样时间【每秒:1S;每分钟:1Min;每小时:1H】
# 采样模式【周期首个值:first;周期均值:mean;周期末值:last】
input_df['时间'] = pd.to_datetime(input_df['时间'], format='%Y年%m月%d日 %H:%M:%S') # 转格式
input_df.set_index('时间', inplace=True) # 将时间列设置为索引
if sample_mode == '首位':
df_resampled = input_df.resample(sample_time).first()
elif sample_mode == '末位':
df_resampled = input_df.resample(sample_time).last()
else:
df_resampled = input_df.resample(sample_time).mean()
df_resampled.dropna(how='all', inplace=True) # 删除一整行都没有数据的值(有时候数据是断开的)
return df_resampled
def is_null(input_df):
# 输出:数据个数,数据字符串,数据
missing_data_index = input_df[input_df.isna().any(axis=1)].index
missing_data_index_str = ', '.join(map(str, missing_data_index))
return len(missing_data_index), missing_data_index_str, missing_data_index
@st.cache_data
def convert_df(input_df):
return input_df.to_csv().encode('gbk')
st.set_page_config(
page_title="数据处理工具",
page_icon=":memo:",
layout="wide",
initial_sidebar_state="expanded",
)
# 边栏中创建单选按钮
st.sidebar.markdown("# :chestnut:操作步骤")
selection = st.sidebar.radio("步骤", ("1. 数据处理", "2. 分组与绘图"), label_visibility="hidden")
# 根据选择的选项显示不同的输入组件
if selection == "1. 数据处理":
uploaded_file = st.sidebar.file_uploader("上传EXCEL文件:", type=["xlsx"], accept_multiple_files=False)
if uploaded_file is not None:
# 读取上传的文件内容
df = pd.read_excel(uploaded_file)
num, col2 = st.sidebar.columns(2)
input_number = num.number_input("周期:", value=1, step=1)
selected_unit = col2.selectbox("单位", ("秒(s)", "分钟(min)", "小时(hour)"),
index=1, disabled=True, label_visibility="hidden")
unit = {"秒(s)": "S",
"分钟(min)": "Min",
"小时(hour)": "H"}
sample = str(input_number) + unit[selected_unit]
# st.write("结果:", sample)
mode = st.sidebar.radio("模式:", ("首位", "末位", "均值"))
# st.write("结果:", mode)
df = process(df, sample, mode) # 根据时间和模式采样
# 文件导出按钮
csv = convert_df(df)
st.download_button(
label="下载CSV文件",
data=csv,
file_name='output.csv',
mime='text/csv',
)
row_visible = st.number_input(':question:想看几行:question:', value=5, step=1)
# 在主页面展示DataFrame的前n行
st.write(f"文件预览(前{row_visible}行):")
st.write(df.head(row_visible))
# 显示DataFrame的统计信息
st.write("统计信息:")
df_describe = df.describe(include='all').rename(
index={"count": "数量", "mean": "均值", "std": "标准差", "min": "最小值min",
"25%": "25%分位数", "50%": "50%中位数", "75%": "75%分位数", "max": "最大值max"}
)
st.write(df_describe) # 不会改变原表
num, war, null_df = is_null(df) # 判断是否含缺失值
if num != 0:
st.warning(f"警告:数据处理后存在 {num} 个缺失值,具体为:\n{war}", icon="⚠️")
st.write("下面展示缺失数据的详细情况(不建议导出文件绘图):")
st.write(df.loc[null_df]) # 展示缺失数据
else:
st.success(f"通过:数据处理后不存在个缺失值,请将CSV文件下载到本地后再进行绘图", icon="✅")
elif selection == "2. 分组与绘图":
uploaded_file = st.sidebar.file_uploader("上传CSV文件:", type=["csv"], accept_multiple_files=False)
if uploaded_file is not None:
df = pd.read_csv(uploaded_file, index_col="时间", encoding="gbk") # 读取
columns = df.columns.tolist() # 获取列名
df.index = pd.to_datetime(df.index) # index转为时间格式
st.sidebar.write(":bulb:数据时间范围(请勿超出):")
st.sidebar.write(f"{df.index[0]}~~{df.index[-1]}")
# 创建侧边栏并添加起始时间和终止时间的输入框
start_date = st.sidebar.date_input("选择起始日期", value=df.index[0])
start_time = st.sidebar.time_input("选择起始时间", value=df.index[0])
end_date = st.sidebar.date_input("选择终止日期", value=df.index[-1])
end_time = st.sidebar.time_input("选择终止时间", value=df.index[-1])
# 将起始时间和终止时间转换为datetime对象
start_datetime = pd.to_datetime(str(start_date) + ' ' + str(start_time))
end_datetime = pd.to_datetime(str(end_date) + ' ' + str(end_time))
st.write(f'所选时间段:{start_datetime} - {end_datetime}')
if start_datetime < end_datetime:
if end_datetime > df.index[0] and start_datetime < df.index[-1]:
# 根据起始时间和终止时间筛选DataFrame
filtered_df = df.loc[start_datetime:end_datetime] # 时间段筛选
# with st.expander("分组观测"):
# num_parts = st.number_input("平均分组", value=5, step=1, format="%d")
# part_size = len(filtered_df.columns) // num_parts
#
# result_data = {'组': [], '最大值max': [], '最小值min': []}
#
# for i in range(num_parts):
# start_index = i * part_size
# end_index = start_index + part_size
# if i == num_parts - 1: # 最后一个部分
# end_index = len(filtered_df.columns)
# part_data = filtered_df.iloc[:, start_index:end_index]
# col_range = f'{part_data.columns[0]}-{part_data.columns[-1]}'
# part_max = part_data.max().max()
# part_min = part_data.min().min()
# result_data['组'].append(col_range)
# result_data['最大值max'].append(part_max)
# result_data['最小值min'].append(part_min)
#
# st.write(pd.DataFrame(result_data))
selected_columns = st.multiselect("选择", columns,
placeholder="选择需要观察的通道",
label_visibility="hidden")
if selected_columns:
filtered_df = filtered_df[selected_columns] # 数据通道(列)筛选
# 画图
chart = st.line_chart(filtered_df)
# 最值点
st.markdown('#### 最值点:')
max_values = filtered_df.max(axis=0) # 最大值
max_index = filtered_df.idxmax(axis=0) # 最大值index
min_values = filtered_df.min(axis=0) # 最大值
min_index = filtered_df.idxmin(axis=0) # 最小值index
result_df = pd.DataFrame({'最大值max': max_values,
'首个最大值时间点': max_index,
'最小值min': min_values,
'首个最小值时间点': min_index})
st.write(result_df)
st.markdown('#### 详细信息:')
st.write(filtered_df) # 展示表格
else:
st.error(f"警告:选取时间已经超出数据时间范围({df.index[0]}-{df.index[-1]})!", icon="⚠️")
else:
st.error("警告:起始时间不能超过终止时间!", icon="⚠️")
| AWei02/Huangguan | app.py | app.py | py | 8,435 | python | zh | code | 0 | github-code | 13 |
22193444413 | from ovirtsdk.api import API
from ovirtsdk.xml import params
try:
api = API(url="https://HOST",
username="Subhayu",
password="a@123",
ca_file="ca.crt")
vm_name = "dummy1"
vm_memory = 512 * 1024 * 1024
vm_cluster = api.clusters.get(name="Default")
vm_template = api.templates.get(name="Blank")
# assigning the parameters to operating system
vm_os = params.OperatingSystem(boot=[params.Boot(dev="hd")])
vm_params = params.VM(name=vm_name,
memory=vm_memory,
cluster=vm_cluster,
template=vm_template,
os = vm_os)
try:
api.vms.add(vm=vm_params)
print("Virtual machine '%s' added." % vm_name)
except Exception as ex:
print("Adding virtual machine '%s' failed: %s" % (vm_name, ex))
api.disconnect()
except Exception as ex:
print("Unexpected error: %s" % ex) | subhayuroy/ComputationalForensics | Virtualization/virtual.py | virtual.py | py | 978 | python | en | code | 3 | github-code | 13 |
3693421145 | from datetime import date
from unittest.mock import MagicMock
from django.contrib import admin
from django.test import TestCase
from djmoney.money import Money
from salesmanagement.manager.admin import ProductAdmin
from salesmanagement.manager.factories import CompanyFactory, ProductFactory, ProductsSaleFactory
from salesmanagement.manager.inlines import ProductSalesInline
from salesmanagement.manager.models import Product
class ProductAdminTest(TestCase):
def setUp(self):
self.companies = CompanyFactory.create_batch(2)
self.product = ProductFactory.create(companies=self.companies)
self.admin = ProductAdmin(Product, admin.site)
def test_productssales_inlines(self):
"""ProducSalesInline must be installed"""
self.assertIn(ProductSalesInline, self.admin.inlines)
def test_media_assets(self):
"""Must add a related_links.css"""
self.assertIn('js/list_filter_collapse.js', self.admin.Media.js)
def test_current_cost_field(self):
"""current_cost must be installed"""
self.assertIn('current_cost', self.admin.list_display)
self.assertIn('current_cost', self.admin.fields)
self.assertIn('current_cost', self.admin.readonly_fields)
def test_current_cost_result(self):
"""Must return current product cost"""
self.add_products_and_sales()
self.admin.request = MagicMock(GET={})
self.assertEqual(Money(4.5, 'BRL'), self.admin.current_cost(self.product))
def test_current_cost_result_filtered_by_company_changelist(self):
"""Must return current product cost filtered by company in change list view"""
self.add_products_and_sales()
self.admin.request = MagicMock(GET={'company__id__exact': self.companies[0].pk})
self.assertEqual(Money(3.5, 'BRL'), self.admin.current_cost(self.product))
def test_current_cost_result_filtered_by_company_change_view(self):
"""Must return current product cost filtered by company in change view"""
self.add_products_and_sales()
get_data = dict(_changelist_filters=f'company__id__exact={self.companies[0].pk}')
self.admin.request = MagicMock(GET=get_data)
self.assertEqual(Money(3.5, 'BRL'), self.admin.current_cost(self.product))
def test_current_price_field(self):
"""current_price must be installed"""
self.assertIn('current_price', self.admin.list_display)
self.assertIn('current_price', self.admin.fields)
self.assertIn('current_price', self.admin.readonly_fields)
def test_current_price_result(self):
"""Must return current product price"""
self.add_products_and_sales()
self.admin.request = MagicMock(GET={})
self.assertEqual(Money(15, 'BRL'), self.admin.current_price(self.product))
def test_current_price_result_filtered_by_company_changelist(self):
"""Must return current product price filtered by company in change list view"""
self.add_products_and_sales()
self.admin.request = MagicMock(GET={'company__id__exact': self.companies[0].pk})
self.assertEqual(Money(5, 'BRL'), self.admin.current_price(self.product))
def test_current_price_result_filtered_by_company_change_view(self):
"""Must return current product price filtered by company in change view"""
self.add_products_and_sales()
get_data = dict(_changelist_filters=f'company__id__exact={self.companies[0].pk}')
self.admin.request = MagicMock(GET=get_data)
self.assertEqual(Money(5, 'BRL'), self.admin.current_price(self.product))
def test_queryset_filtered_by_company(self):
"""Must return only products from company filter"""
# create one product that only self.companies[0] has
ProductFactory.create(companies=(self.companies[0],))
# filter by self.companies[1]
self.admin.request = MagicMock(GET={'company__id__exact': self.companies[1].pk})
expected = Product.objects.filter(company=self.companies[1])
self.assertQuerysetEqual(self.admin.get_queryset(self.admin.request), expected, transform=lambda x: x)
def test_disable_filters(self):
"""Must remove filter that's in _disable_filters from list_filter"""
self.admin.request = MagicMock(GET={'_disable_filters': 'company'})
self.admin.list_filter = ('name', 'category', 'company')
list_filter = self.admin.get_list_filter(self.admin.request)
self.assertEqual(['name', 'category'], list_filter)
def add_products_and_sales(self):
month_june = date(day=1, month=7, year=2018)
month_july = date(day=1, month=7, year=2018)
sales_data = (dict(company=self.companies[0], product=self.product, sold=5, cost=Money(3.5, 'BRL'),
total=Money(57.5, 'BRL'), sale_month=month_june),
dict(company=self.companies[0], product=self.product, sold=7, cost=Money(3.5, 'BRL'),
total=Money(35, 'BRL'), sale_month=month_july),
dict(company=self.companies[1], product=self.product, sold=7, cost=Money(4.5, 'BRL'),
total=Money(55.5, 'BRL'), sale_month=month_june),
dict(company=self.companies[1], product=self.product, sold=7, cost=Money(4.5, 'BRL'),
total=Money(105, 'BRL'), sale_month=month_july),)
self.sales = []
for params in sales_data:
self.sales.append(ProductsSaleFactory(**params))
| rubimpassos/finxiChallenge | salesmanagement/manager/tests/test_admin_product.py | test_admin_product.py | py | 5,540 | python | en | code | 0 | github-code | 13 |
29903430763 | import sys
# from rdflib import Graph, URIRef
import sys
from perseo.main import get_files, nt2ttl, uniqid, nt2ttl_quad
#
# from ..pyperseo.perseo.main import get_files , nt2ttl, uniqid
# from pyperseo.functions import get_files, nt2ttl, uniqid
argv = sys.argv[1:]
if argv[0] == "uniqids":
all_files = get_files(argv[0],"csv")
for a in all_files:
afile = argv[1] + "/" + a
uniqid(afile)
elif argv[0] == "uniqid":
file = argv[1] + ".csv"
uniqid(file)
elif argv[0] == "nt2ttl":
file = argv[1] + ".nt"
nt2ttl(file)
elif argv[0] == "quad":
file = argv[1] + ".nt"
nt2ttl_quad(file)
else:
print("You must provide an argument depending on your choosen functionality, like 'uniqid', 'uniqids' or 'nt2ttl'")
| pabloalarconm/PERSEO | cde_implementation/medusa.py | medusa.py | py | 764 | python | en | code | 0 | github-code | 13 |
28986708353 | import torch
import torch.nn as nn
import torch.nn.functional as F
from actor import Actor
class Critic(Actor):
"""Critic (Value) Model."""
def __init__(self, state_size, action_size, seed):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
hidden (int): Number of nodes in hidden layers
"""
super(Actor, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, 128)
self.fc2 = nn.Linear(128 + action_size, 128)
self.fc3 = nn.Linear(128, 1)
# Batch normalization
self.bn = nn.BatchNorm1d(128)
# Initialize the hidden layer weights
self.reset_parameters()
def forward(self, state, action):
"""Build a critic (value) network that maps (state, action) pairs -> Q-values."""
# Pass the states into the first layer
# Pass the input through all the layers apllying ReLU activation except for the output layer
x = F.relu(self.fc1(state))
# Batch Normalization of the first layer
x = self.bn(x)
# Concatenate the first layer output with the action
x = torch.cat((x, action), dim=1)
x = F.relu(self.fc2(x))
# Pass the input through all the layers apllying ReLU activation, but the last
x = torch.sigmoid(self.fc3(x))
# Return the Q-Value for the input state-action
return x
| afilimonov/udacity-deeprl-p2-continous-control | critic.py | critic.py | py | 1,619 | python | en | code | 0 | github-code | 13 |
26312106012 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 1 18:45:15 2022
@author: nathan
"""
import pandas as pd
import os
#Enter the path to the demand file
def main(path_result, path_input, scenario):
path = f'{path_input}/{scenario}_Demand_Real_Forecasted.xlsx'
demand = pd.read_excel(path, sheet_name='Zonal_Demand_Real')
demand = demand.set_index(demand['date'])
demand = demand.drop(['date'], axis=1)
demand = demand.astype(float)
maxDemand = demand.max(0)
maxDemandDict = maxDemand.to_dict()
maxHour = demand.idxmax(0)
maxHourDict = maxHour.to_dict()
results = pd.concat([maxHour, maxDemand],axis=1)
results = results.set_axis(['Max Hour', 'Max Demand'], axis=1)
with pd.ExcelWriter(f'{path_result}/{scenario}_demand_results.xlsx') as path:
results.to_excel(path, sheet_name= 'results')
if __name__ == "__main__":
main() | NathanDeMatos/UVic-ESD | OutputAnalysis/Scenario Analysis/demand_Summary.py | demand_Summary.py | py | 926 | python | en | code | 0 | github-code | 13 |
36979023839 | from flask import Flask,jsonify
from main.service import user_service
app = Flask(__name__)
@app.route('/new-user', methods=['POST'])
def create_new_user():
user = user_service.create_new_user()
return jsonify(user.__str__())
@app.route('/get-user/<idUser>',methods=['GET'])
def getUser(idUser):
user = user_service.get_user_by_Id(idUser)
return jsonify(user.__str__())
if __name__ == '__main__':
app.run()
| RafaelTeckGomes/my-project | apps-python/main/controller/user_controller.py | user_controller.py | py | 444 | python | en | code | 0 | github-code | 13 |
2336975191 | # Tuples are a type of data sequences
# tuples are immutable you cannot append or delete the elements
x = (40,41,42) # () denotes declaration of the tuples. all 3 values are packed into a tuple.
x[0] # will give 40
# this is basically assiging the values 30 to age and 17 to years_of_school
# where split is comma seperated values
(age,years_of_school) = "30,17".split(',')
#functions can return tuples as values
def square_info(x):
A = x ** 2
P = 4 * x
print("Area and Perimeter:")
return A,P
print(square_info(3))
| mayurguptaiiitm/python_learning | tuples_learn.py | tuples_learn.py | py | 544 | python | en | code | 0 | github-code | 13 |
6971912636 | import tensorflow as tf
from tensorflow.python.ops.rnn_cell import LSTMCell
import numpy as np
class LSTMAutoencoder(object):
"""Basic version of LSTM-autoencoder.
(cf. http://arxiv.org/abs/1502.04681)
Usage:
ae = LSTMAutoencoder(hidden_num, inputs)
sess.run(ae.train)
"""
def __init__(
self,
hidden_num,
batch_size,
window_size,
element_num,
reverse=True,
decode_without_input=False,
):
"""
Args:
hidden_num : number of hidden elements of each LSTM unit.
batch_size : batch_size.
window_size : the number of frames inside a datapoint
element_num : the size of the feature vector
reverse : Option to decode in reverse order.
decode_without_input : Option to decode without input.
"""
self.hidden_num = hidden_num
self.batch_size = batch_size
self.window_size = window_size
self.element_num = element_num
self.input_data = tf.placeholder(tf.float32, shape=[None, self.window_size, self.element_num], name = "input")
inputs = [tf.squeeze(t, [1]) for t in tf.split(self.input_data, self.window_size, 1)]
cell = tf.nn.rnn_cell.LSTMCell(self.hidden_num, use_peepholes=True)
self._enc_cell = cell
self._dec_cell = cell
with tf.variable_scope('encoder'):
(self.z_codes, self.enc_state) = tf.contrib.rnn.static_rnn(self._enc_cell, inputs, dtype=tf.float32)
with tf.variable_scope('decoder') as vs:
dec_weight_ = tf.Variable(
tf.truncated_normal([self.hidden_num, self.element_num], dtype=tf.float32), name='dec_weight')
dec_bias_ = tf.Variable(tf.constant(0.1, shape=[self.element_num], dtype=tf.float32), name='dec_bias')
if decode_without_input:
dec_inputs = [tf.zeros(tf.shape(inputs[0]), dtype=tf.float32) for _ in range(len(inputs))]
(dec_outputs, dec_state) = tf.contrib.rnn.static_rnn(
self._dec_cell,
dec_inputs,
initial_state=self.enc_state,
dtype=tf.float32
)
if reverse:
dec_outputs = dec_outputs[::-1]
dec_output_ = tf.transpose(tf.stack(dec_outputs), [1, 0, 2])
dec_weight_ = tf.tile(tf.expand_dims(dec_weight_, 0), [self.batch_size, 1, 1])
self.output_ = tf.matmul(dec_output_, dec_weight_) + dec_bias_
else:
dec_state = self.enc_state
dec_input_ = tf.zeros(tf.shape(inputs[0]), dtype=tf.float32)
dec_outputs = []
for step in range(len(inputs)):
if step > 0:
vs.reuse_variables()
(dec_input_, dec_state) = self._dec_cell(dec_input_, dec_state)
dec_input_ = tf.matmul(dec_input_, dec_weight_) + dec_bias_
dec_outputs.append(dec_input_)
if reverse:
dec_outputs = dec_outputs[::-1]
self.output_ = tf.transpose(tf.stack(dec_outputs), [1,0, 2])
self.input_ = tf.transpose(tf.stack(inputs), [1, 0, 2])
self.loss = tf.reduce_mean(tf.square(self.input_ - self.output_), name = "loss")
| icucockpit/PatientMonitoring-DeepLearning-py | blueprint/ICUCockpit/anomaly_detection/LSTM_autoencoder_.py | LSTM_autoencoder_.py | py | 3,344 | python | en | code | 0 | github-code | 13 |
7828034634 | import astropy
import astropy.io.fits as fits
import numpy as np
import gfs_sublink_utils as gsu
import make_color_image
import matplotlib
import matplotlib.pyplot as pyplot
import glob
import os
import time
import sys
sq_arcsec_per_sr = 42545170296.0
c = 3.0e8
def export_image(hdulist,camnum,filtername,label='',outdir='.',nonscatter=False,filterlabel=None):
#get image in W/m/m^2/Sr
#hdulist=fits.open(bbfile)
fils=hdulist['FILTERS'].data['filter']
fi=np.where(fils==filtername)[0][0]
efl=hdulist['FILTERS'].data['lambda_eff']
efl_microns=1.0e6 * efl[fi]
if nonscatter is False:
key='CAMERA'+'{}'.format(camnum)+'-BROADBAND'
else:
key='CAMERA'+'{}'.format(camnum)+'-BROADBAND-NONSCATTER'
print('loading data into variable.. ')
s=time.time()
camdata=hdulist[key].data[fi,:,:]
f=time.time()
print('loading data finished in (s): ',f-s )
print(camdata.shape)
#convert to nJy
redshift=hdulist['BROADBAND'].header['redshift']
pixsize_kpc=hdulist[key].header['CD1_1']
pixsize_arcsec=pixsize_kpc/(gsu.illcos.kpc_proper_per_arcmin(redshift).value/60.0)
print(pixsize_kpc,pixsize_arcsec)
pixel_Sr = (pixsize_arcsec**2)/sq_arcsec_per_sr #pixel area in steradians: Sr/pixel
to_nJy_per_Sr = (1.0e9)*(1.0e14)*(efl_microns**2)/c #((pixscale/206265.0)^2)*
to_nJy_per_pix = to_nJy_per_Sr*pixel_Sr
newdata=camdata*to_nJy_per_pix
#save new image
if filterlabel is None:
fname=filtername.split('/')[1]
else:
fname=filterlabel
outputname=os.path.join(outdir, label+'cam'+'{:02d}'.format(camnum)+'_'+fname+'_pix'+'{:6.4f}'.format(pixsize_arcsec)+'_nJy.fits')
print(outputname)
outhdu=fits.PrimaryHDU(newdata)
outhdu.header['redshift']=hdulist['BROADBAND'].header['redshift']
outhdu.header['PIXSIZE']=(pixsize_arcsec,'arcsec')
outhdu.header['PIXKPC']=(pixsize_kpc,'kpc')
outlist=fits.HDUList([outhdu])
outlist.writeto(outputname,overwrite=True)
#fits.writeto(outputname,newdata,overwrite=True)
return newdata,outputname
def export_hdst_filters(hdu_obj,camnum,vl,outdir='.',nonscatter=False):
if hdu_obj.__class__ != fits.hdu.hdulist.HDUList:
#pass filenames instead of hdu obj
these_files=hdu_obj
fn_u=these_files[np.where(np.core.defchararray.find(these_files, 'cam{:02d}'.format(camnum)+'_U_') != -1)[0]][0] ; u=(fits.open(fn_u)[0]).data
fn_b=these_files[np.where(np.core.defchararray.find(these_files, 'cam{:02d}'.format(camnum)+'_B_') != -1)[0]][0] ; b=fits.open(fn_b)[0].data
fn_v=these_files[np.where(np.core.defchararray.find(these_files, 'cam{:02d}'.format(camnum)+'_V_') != -1)[0]][0] ; v=fits.open(fn_v)[0].data
fn_i=these_files[np.where(np.core.defchararray.find(these_files, 'cam{:02d}'.format(camnum)+'_I_') != -1)[0]][0] ; i=fits.open(fn_i)[0].data
fn_z=these_files[np.where(np.core.defchararray.find(these_files, 'cam{:02d}'.format(camnum)+'_Z_') != -1)[0]][0] ; z=fits.open(fn_z)[0].data
fn_j=these_files[np.where(np.core.defchararray.find(these_files, 'cam{:02d}'.format(camnum)+'_nircam_f115w_') != -1)[0]][0] ; j=fits.open(fn_j)[0].data
fn_h=these_files[np.where(np.core.defchararray.find(these_files, 'cam{:02d}'.format(camnum)+'_nircam_f150w_') != -1)[0]][0] ; h=fits.open(fn_h)[0].data
fn_k=these_files[np.where(np.core.defchararray.find(these_files, 'cam{:02d}'.format(camnum)+'_nircam_f200w_') != -1)[0]][0] ; k=fits.open(fn_k)[0].data
redshift=fits.open(fn_v)[0].header['redshift']
else:
u,fn_u=export_image(hdu_obj,camnum,'hst/wfc3_f336w',label=vl,outdir=outdir,filterlabel='U')
b,fn_b=export_image(hdu_obj,camnum,'hst/acs_f435w',label=vl,outdir=outdir,filterlabel='B')
v,fn_v=export_image(hdu_obj,camnum,'hst/acs_f606w',label=vl,outdir=outdir,filterlabel='V')
i,fn_i=export_image(hdu_obj,camnum,'hst/acs_f775w',label=vl,outdir=outdir,filterlabel='I')
z,fn_z=export_image(hdu_obj,camnum,'hst/acs_f850lp',label=vl,outdir=outdir,filterlabel='Z')
j,fn_j=export_image(hdu_obj,camnum,'jwst/nircam_f115w',label=vl,outdir=outdir,filterlabel='nircam_f115w')
h,fn_h=export_image(hdu_obj,camnum,'jwst/nircam_f150w',label=vl,outdir=outdir,filterlabel='nircam_f150w')
k,fn_k=export_image(hdu_obj,camnum,'jwst/nircam_f200w',label=vl,outdir=outdir,filterlabel='nircam_f200w')
redshift=hdu_obj['BROADBAND'].header['redshift']
scalefactor=(1.0/(1.0+redshift))
rr=h+k
gg=i+z+j
bb=u+b+v
fig = pyplot.figure(figsize=(6,6), dpi=600)
pyplot.subplots_adjust(left=0.0, right=1.0, bottom=0.0, top=1.0,wspace=0.0,hspace=0.0)
axi = fig.add_subplot(1,1,1)
axi.set_xticks([]) ; axi.set_yticks([])
alph=10.0*(0.33/scalefactor)**5 ; Q=5.0
rgbthing = make_color_image.make_interactive_nasa(bb,gg,rr,alph,Q)
axi.imshow(rgbthing,interpolation='nearest',aspect='auto',origin='lower')
outfile=os.path.join(outdir,'rgb_'+vl+'.pdf')
fig.savefig(outfile,dpi=300,facecolor='Black')
pyplot.close(fig)
return
def export_run(sim_folder,camnums=np.arange(19),nonscatter=False,outdir='/astro/snyder_lab2/New_HydroART_images/VELA_v2/luvoir_mocks'):
sunrise_dirs=np.sort(np.asarray(glob.glob(os.path.join(sim_folder,'*_sunrise'))))
print(sunrise_dirs)
sim_label=os.path.basename(sim_folder)
print(sim_label)
output_dir=os.path.join(outdir,sim_label)
print(output_dir)
if not os.path.lexists(output_dir):
os.mkdir(output_dir)
for sim_dir in sunrise_dirs:
time_folder=os.path.basename(sim_dir)
time_label=time_folder.split('_')[-2]
print(time_label)
bb_file=os.path.join(sim_dir,'images/broadbandz.fits.gz')
if not os.path.lexists(bb_file):
bb_file=os.path.join(sim_dir,'images/broadbandz.fits')
if not os.path.lexists(bb_file):
continue
#check if exported fits files exist already.. in export_hdst_filters_function ?
these_files=np.asarray(glob.glob(os.path.join(output_dir,'*/'+sim_label+'_'+time_label+'*.fits')),dtype=np.str_)
print(these_files.shape, these_files, bb_file, sim_dir, output_dir, sim_label, time_label)
if these_files.shape[0]==8*camnums.shape[0]:
print('Files exist: ', these_files.shape[0], these_files[0])
hdu_obj=these_files
else:
#print('loading hdulist... ')
#s=time.time()
hdu_obj=fits.open(bb_file,lazy_load_hdus=False)
#load all camera data
#this doesn't work: hdu_obj[slice:slice].readall()
l=len(hdu_obj)
#f=time.time()
#print('loaded hdulist... in (s):',f-s)
for cn in camnums:
cs='{:02d}'.format(cn)
cam_dir=os.path.join(output_dir, 'hires_images_cam'+cs)
print(cam_dir)
if not os.path.lexists(cam_dir):
os.mkdir(cam_dir)
export_hdst_filters(hdu_obj,cn,sim_label+'_'+time_label,outdir=cam_dir,nonscatter=nonscatter)
return
| gsnyder206/mock-surveys | original_illustris/export_images.py | export_images.py | py | 7,324 | python | en | code | 7 | github-code | 13 |
70523840339 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from typing import Counter
import back_link as link
def get_pwd(ID):
db,cursor=link.conn()
cursor.execute("select pwd from ID_pwd where ID= '"+ID+"'")
li=''
row=cursor.fetchone()
li=str(row[0])
db.close()
return li
| CUG-LXY/undergraduateproject | py&sqlfor数据库/back_login.py | back_login.py | py | 317 | python | en | code | 0 | github-code | 13 |
1966347101 | from Balance.conversor import converte_amount_str
from Balance.conversor import negative_amount
"""arquivo que contem uma unica funcao que converte o valor inserido em string para ser retornado no final"""
class Category:
ledger: list = [] # e o extrato, aquilo q e mostrado quando damos print no objeto
balance: float = 0
categories: list = [] # usada no gráfico
spent: float = 0
spentmap: dict = {} # dicionario dos valores gastos, tendo como chave o nome da categoria. Usado no gráfico de gastos;
def __init__(self, category: str):
self.__category: str = category
self.__balance: float = Category.balance
self.__ledger: list = []
Category.categories.append(self.__category)
self.__spent: float = Category.spent
self.__spentmap = Category.spentmap
def check_funds(self, amount):
if float(amount) > self.__balance:
return False
else:
return True
def deposit(self: object, amount: float, description: str ="None"):
Category.ledger.append({'amount': amount, 'description': description})
self.__balance: float = Category.balance + float(amount)
def withdraw(self, amount, description: str ='None'):
x = Category.check_funds(self, amount)
if x == True:
Category.ledger.append({'amount': negative_amount(amount), 'description': description})
self.__balance = self.__balance - float(amount)
Category.spent = Category.spent + float(amount)
found = False
for chave in Category.spentmap.keys():
if chave == f'{self.__category}':
Category.spentmap[f'{self.__category}'] = Category.spentmap[f'{self.__category}'] + float(amount)
found = True
if found == False:
Category.spentmap[f'{self.__category}'] = float(amount)
return True
else:
return False
def get_balance(self) -> str:
return f'O saldo é {self._Category__balance}'
def transfer(self, amount, another_category):
"""# Retira do saldo da categoria atual (self) e deposita em outra categoria (anotther_category)"""
if Category.check_funds(self, amount) == True:
Category.withdraw(self, amount, "Transfer to f'{another_category}'")
Category.deposit(another_category, amount, "Transfer from {self.__category}")
return True
else:
return False
def __str__(self: object) -> str:
"""É o que aparece na tela quando o usa-se o comando print no objeto -> print(objeto)"""
return f'{self.formatacao()}'
def formatacao(self: object) -> str:
"""Formata o ledger quando faz o print do objeto"""
print(f'{self.__category:*^30}')
for element in self.__ledger:
print('{: <23}'.format(element['description']), '{: >6}'.format(converte_amount_str(element['amount'])))
print('{: ^7} {: <23}'.format('Total: ', f'{self._Category__balance}'))
return ''
def bottom_chart_format():
f = ' '
if len(Category.categories) >= 1:
c1 = list(Category.categories[0])
maior = len((c1))
if len(Category.categories) >= 2:
c2 = list(Category.categories[1])
if len((c1)) > len((c2)):
for x in range((len(c1) - len(c2))):
c2.append(f)
if len((c2)) > len((c1)):
for x in range((len(c2) - len(c1))):
c1.append(f)
maior = len((c2))
else:
return c1
if len(Category.categories) >= 3:
c3 = list(Category.categories[2])
if len((c3)) > maior:
for x in range((len((c3)) - maior)):
c2.append(f)
c1.append(f)
maior = len((c3))
elif maior > len((c3)):
for x in range(maior - len((c3))):
c3.append(f)
else:
return c1, c2
if len(Category.categories) == 4:
c4 = list(Category.categories[3])
if len((c4)) > maior:
for x in range((len((c3)) - maior)):
c3.append(f)
c2.append(f)
c1.append(f)
maior = len((c4))
elif maior > len((c4)):
for x in range(maior - len((c4))):
c4.append(f)
return c1, c2, c3, c4
else:
return c1, c2, c3
def create_spend_chart():
percentage0:float = 0
percentage1:float = 0
percentage2:float = 0
percentage3:float = 0
p0:str = ''
p1:str = ''
p2:str = ''
p3:str = ''
fin:str = '0|'
if len(Category.categories) == 1:
percentage0 = 10*(Category.spentmap[Category.categories[0]] / Category.spent)
if len(Category.categories) == 2:
percentage0 = 10*(Category.spentmap[Category.categories[0]] / Category.spent)
percentage1 = 10*(Category.spentmap[Category.categories[1]] / Category.spent)
if len(Category.categories) == 3:
percentage0 = 10*(Category.spentmap[Category.categories[0]] / Category.spent)
percentage1 = 10*(Category.spentmap[Category.categories[1]] / Category.spent)
percentage2 = 10*(Category.spentmap[Category.categories[2]] / Category.spent)
if len(Category.categories) == 4:
percentage0 = 10*(Category.spentmap[Category.categories[0]] / Category.spent)
percentage1 = 10*(Category.spentmap[Category.categories[1]] / Category.spent)
percentage2 = 10*(Category.spentmap[Category.categories[2]] / Category.spent)
percentage3 = 10*(Category.spentmap[Category.categories[3]] / Category.spent)
print('Percentage spent by category')
for x in range(10, 0, -1):
if percentage0 >= x:
p0 = 'o'
else:
p0 = ''
if percentage1 >= x:
p1 = 'o'
else:
p1 = ''
if percentage2 >= x:
p2 = 'o'
else:
p2 = ''
if percentage3 >= x:
p3 = 'o'
else:
p3 = ''
print(f'{x: >2}{fin:^2}{p0: ^3}{p1: ^3}{p2: ^3}{p3: ^3}')
| vagamerous/FreeCodeCamp-Projects | Balance/freeCodeCampProject3.py | freeCodeCampProject3.py | py | 5,787 | python | en | code | 0 | github-code | 13 |
17034183124 | from __future__ import unicode_literals, division
import logging
import os
import shlex
from collections import deque
from itertools import starmap
from threading import Thread, Event
from time import time
from typing import Text, Sequence
import attr
import psutil
from pathlib2 import Path
from clearml_agent.session import Session
from clearml_agent.definitions import ENV_WORKER_TAGS
try:
from .gpu import gpustat
except ImportError:
gpustat = None
log = logging.getLogger(__name__)
class BytesSizes(object):
@staticmethod
def kilobytes(x):
# type: (float) -> float
return x / 1024
@staticmethod
def megabytes(x):
# type: (float) -> float
return x / (1024*1024)
@staticmethod
def gigabytes(x):
# type: (float) -> float
return x / (1024*1024*1024)
class ResourceMonitor(object):
@attr.s
class StatusReport(object):
task = attr.ib(default=None, type=str)
queue = attr.ib(default=None, type=str)
queues = attr.ib(default=None, type=Sequence[str])
def to_dict(self):
return {
key: value
for key, value in attr.asdict(self).items()
if value is not None
}
def __init__(
self,
session, # type: Session
worker_id, # type: ResourceMonitor.StatusReport,
sample_frequency_per_sec=2.0,
report_frequency_sec=30.0,
first_report_sec=None,
worker_tags=None,
):
self.session = session
self.queue = deque(maxlen=1)
self.queue.appendleft(self.StatusReport())
self._worker_id = worker_id
self._sample_frequency = sample_frequency_per_sec
self._report_frequency = report_frequency_sec
self._first_report_sec = first_report_sec or report_frequency_sec
self._num_readouts = 0
self._readouts = {}
self._previous_readouts = {}
self._previous_readouts_ts = time()
self._thread = None
self._exit_event = Event()
self._gpustat_fail = 0
self._gpustat = gpustat
self._active_gpus = None
if not worker_tags and ENV_WORKER_TAGS.get():
worker_tags = shlex.split(ENV_WORKER_TAGS.get())
self._worker_tags = worker_tags
if Session.get_nvidia_visible_env() == 'none':
# NVIDIA_VISIBLE_DEVICES set to none, marks cpu_only flag
# active_gpus == False means no GPU reporting
self._active_gpus = False
elif not self._gpustat:
log.warning('ClearML-Agent Resource Monitor: GPU monitoring is not available')
else:
# None means no filtering, report all gpus
self._active_gpus = None
try:
active_gpus = Session.get_nvidia_visible_env()
# None means no filtering, report all gpus
if active_gpus and active_gpus != "all":
self._active_gpus = [g.strip() for g in str(active_gpus).split(',')]
except Exception:
pass
def set_report(self, report):
# type: (ResourceMonitor.StatusReport) -> ()
if report is not None:
self.queue.appendleft(report)
def get_report(self):
# type: () -> ResourceMonitor.StatusReport
return self.queue[0]
def start(self):
self._exit_event.clear()
self._thread = Thread(target=self._daemon)
self._thread.daemon = True
self._thread.start()
return self
def stop(self):
self._exit_event.set()
self.send_report()
def send_report(self, stats=None):
report = dict(
machine_stats=stats,
timestamp=(int(time()) * 1000),
worker=self._worker_id,
tags=self._worker_tags,
**self.get_report().to_dict()
)
log.debug("sending report: %s", report)
try:
self.session.get(service="workers", action="status_report", **report)
except Exception:
log.warning("Failed sending report: %s", report)
return False
return True
def _daemon(self):
seconds_since_started = 0
reported = 0
try:
while True:
last_report = time()
current_report_frequency = (
self._report_frequency if reported != 0 else self._first_report_sec
)
while (time() - last_report) < current_report_frequency:
# wait for self._sample_frequency seconds, if event set quit
if self._exit_event.wait(1 / self._sample_frequency):
return
# noinspection PyBroadException
try:
self._update_readouts()
except Exception as ex:
log.warning("failed getting machine stats: %s", report_error(ex))
self._failure()
seconds_since_started += int(round(time() - last_report))
# check if we do not report any metric (so it means the last iteration will not be changed)
# if we do not have last_iteration, we just use seconds as iteration
# start reporting only when we figured out, if this is seconds based, or iterations based
average_readouts = self._get_average_readouts()
stats = {
# 3 points after the dot
key: round(value, 3) if isinstance(value, float) else [round(v, 3) for v in value]
for key, value in average_readouts.items()
}
# send actual report
if self.send_report(stats):
# clear readouts if this is update was sent
self._clear_readouts()
# count reported iterations
reported += 1
except Exception as ex:
log.exception("Error reporting monitoring info: %s", str(ex))
def _update_readouts(self):
readouts = self._machine_stats()
elapsed = time() - self._previous_readouts_ts
self._previous_readouts_ts = time()
def fix(k, v):
if k.endswith("_mbs"):
v = (v - self._previous_readouts.get(k, v)) / elapsed
if v is None:
v = 0
return k, self._readouts.get(k, 0) + v
self._readouts.update(starmap(fix, readouts.items()))
self._num_readouts += 1
self._previous_readouts = readouts
def _get_num_readouts(self):
return self._num_readouts
def _get_average_readouts(self):
def create_general_key(old_key):
"""
Create key for backend payload
:param old_key: old stats key
:type old_key: str
:return: new key for sending stats
:rtype: str
"""
key_parts = old_key.rpartition("_")
return "{}_*".format(key_parts[0] if old_key.startswith("gpu") else old_key)
ret = {}
# make sure the gpu/cpu stats are always ordered in the accumulated values list (general_key)
ordered_keys = sorted(self._readouts.keys())
for k in ordered_keys:
v = self._readouts[k]
stat_key = self.BACKEND_STAT_MAP.get(k)
if stat_key:
ret[stat_key] = v / self._num_readouts
else:
general_key = create_general_key(k)
general_key = self.BACKEND_STAT_MAP.get(general_key)
if general_key:
ret.setdefault(general_key, []).append(v / self._num_readouts)
else:
pass # log.debug("Cannot find key {}".format(k))
return ret
def _clear_readouts(self):
self._readouts = {}
self._num_readouts = 0
def _machine_stats(self):
"""
:return: machine stats dictionary, all values expressed in megabytes
"""
cpu_usage = psutil.cpu_percent(percpu=True)
stats = {"cpu_usage": sum(cpu_usage) / len(cpu_usage)}
virtual_memory = psutil.virtual_memory()
stats["memory_used"] = BytesSizes.megabytes(virtual_memory.used)
stats["memory_free"] = BytesSizes.megabytes(virtual_memory.available)
disk_use_percentage = psutil.disk_usage(Text(Path.home())).percent
stats["disk_free_percent"] = 100 - disk_use_percentage
sensor_stat = (
psutil.sensors_temperatures()
if hasattr(psutil, "sensors_temperatures")
else {}
)
if "coretemp" in sensor_stat and len(sensor_stat["coretemp"]):
stats["cpu_temperature"] = max([t.current for t in sensor_stat["coretemp"]])
# update cached measurements
net_stats = psutil.net_io_counters()
stats["network_tx_mbs"] = BytesSizes.megabytes(net_stats.bytes_sent)
stats["network_rx_mbs"] = BytesSizes.megabytes(net_stats.bytes_recv)
io_stats = psutil.disk_io_counters()
stats["io_read_mbs"] = BytesSizes.megabytes(io_stats.read_bytes)
stats["io_write_mbs"] = BytesSizes.megabytes(io_stats.write_bytes)
# check if we need to monitor gpus and if we can access the gpu statistics
if self._active_gpus is not False and self._gpustat:
try:
gpu_stat = self._gpustat.new_query()
for i, g in enumerate(gpu_stat.gpus):
# only monitor the active gpu's, if none were selected, monitor everything
if self._active_gpus and str(i) not in self._active_gpus:
continue
stats["gpu_temperature_{:d}".format(i)] = g["temperature.gpu"]
stats["gpu_utilization_{:d}".format(i)] = g["utilization.gpu"]
stats["gpu_mem_usage_{:d}".format(i)] = (
100.0 * g["memory.used"] / g["memory.total"]
)
# already in MBs
stats["gpu_mem_free_{:d}".format(i)] = (
g["memory.total"] - g["memory.used"]
)
stats["gpu_mem_used_%d" % i] = g["memory.used"]
except Exception as ex:
# something happened and we can't use gpu stats,
log.warning("failed getting machine stats: %s", report_error(ex))
self._failure()
return stats
def _failure(self):
self._gpustat_fail += 1
if self._gpustat_fail >= 3:
log.error(
"GPU monitoring failed getting GPU reading, switching off GPU monitoring"
)
self._gpustat = None
BACKEND_STAT_MAP = {"cpu_usage_*": "cpu_usage",
"cpu_temperature_*": "cpu_temperature",
"disk_free_percent": "disk_free_home",
"io_read_mbs": "disk_read",
"io_write_mbs": "disk_write",
"network_tx_mbs": "network_tx",
"network_rx_mbs": "network_rx",
"memory_free": "memory_free",
"memory_used": "memory_used",
"gpu_temperature_*": "gpu_temperature",
"gpu_mem_used_*": "gpu_memory_used",
"gpu_mem_free_*": "gpu_memory_free",
"gpu_utilization_*": "gpu_usage"}
def report_error(ex):
return "{}: {}".format(type(ex).__name__, ex)
| allegroai/clearml-agent | clearml_agent/helper/resource_monitor.py | resource_monitor.py | py | 11,656 | python | en | code | 205 | github-code | 13 |
15505240734 | from typing import List
from app.api import crud
from app.api.models import BookDB, BookSchema
from fastapi import APIRouter, HTTPException, Path
router = APIRouter()
@router.post("/", response_model=BookDB, status_code=201)
async def create_book(payload: BookSchema):
book_id = await crud.post(payload)
response_object = {
"id": book_id,
"title": payload.title,
"author": payload.author,
}
return response_object
@router.get("/{id}/", response_model=BookDB)
async def read_book(
id: int = Path(..., gt=0),
):
book = await crud.get(id)
if not book:
raise HTTPException(status_code=404, detail="Book not found")
return book
@router.get("/", response_model=List[BookDB])
async def read_all_books():
return await crud.get_all()
@router.put("/{id}/", response_model=BookDB)
async def update_book(
payload: BookSchema,
id: int = Path(..., gt=0),
):
book = await crud.get(id)
if not book:
raise HTTPException(status_code=404, detail="Book not found")
book_id = await crud.put(id, payload)
response_object = {
"id": book_id,
"title": payload.title,
"author": payload.author,
}
return response_object
@router.delete("/{id}/", response_model=BookDB)
async def delete_book(id: int = Path(..., gt=0)):
book = await crud.get(id)
if not book:
raise HTTPException(status_code=404, detail="Book not found")
await crud.delete(id)
return book
| jitsejan/fastapi-postgres-crud-vuejs | backend/app/api/books.py | books.py | py | 1,499 | python | en | code | 0 | github-code | 13 |
18089515767 | import sys
from ctypes import *
import time
import pysdl2.sdl2 as sdl2
from pysdl2.sdl2.keycode import *
from glfuncs import *
from glconstants import *
import Square
import Hexagon
from Program import Program
def debugcallback(source,typ, id_,severity, length, message, obj ):
print(message)
sdl2.SDL_Init(sdl2.SDL_INIT_VIDEO)
win = sdl2.SDL_CreateWindow( b"ETGG",20,20, 512,512, sdl2.SDL_WINDOW_OPENGL)
if not win:
print("Could not create window")
raise RuntimeError()
sdl2.SDL_GL_SetAttribute(sdl2.SDL_GL_CONTEXT_PROFILE_MASK, sdl2.SDL_GL_CONTEXT_PROFILE_CORE)
sdl2.SDL_GL_SetAttribute(sdl2.SDL_GL_DEPTH_SIZE, 24)
sdl2.SDL_GL_SetAttribute(sdl2.SDL_GL_STENCIL_SIZE, 8)
sdl2.SDL_GL_SetAttribute(sdl2.SDL_GL_CONTEXT_MAJOR_VERSION,3)
sdl2.SDL_GL_SetAttribute(sdl2.SDL_GL_CONTEXT_MINOR_VERSION,3)
sdl2.SDL_GL_SetAttribute(sdl2.SDL_GL_CONTEXT_FLAGS,sdl2.SDL_GL_CONTEXT_DEBUG_FLAG)
sdl2.SDL_GL_SetAttribute(sdl2.SDL_GL_CONTEXT_PROFILE_MASK, sdl2.SDL_GL_CONTEXT_PROFILE_CORE)
rc = sdl2.SDL_GL_CreateContext(win)
if not rc:
print("Cannot create GL context")
raise RuntimeError()
glDebugMessageControl(GL_DONT_CARE,GL_DONT_CARE,GL_DONT_CARE, 0, None, 1 )
glEnable(GL_DEBUG_OUTPUT_SYNCHRONOUS)
glDebugMessageCallback(debugcallback,None)
prog = Program("vs.txt","fs.txt")
prog.use()
square = Square.Square()
hexagon = Hexagon.Hexagon()
keyset = set()
glClearColor(0.2,0.4,0.6,1.0)
r=0
ev=sdl2.SDL_Event()
prev = sdl2.SDL_GetTicks()
while 1:
while 1:
if not sdl2.SDL_PollEvent(byref(ev)):
break
if ev.type == sdl2.SDL_QUIT:
sys.exit(0)
elif ev.type == sdl2.SDL_KEYDOWN:
k = ev.key.keysym.sym
print("key down:",k)
if k == SDLK_q:
sys.exit(0)
elif k == SDLK_1:
if k not in keyset:
keyset.add(k)
elif k == SDLK_2:
if k not in keyset:
keyset.add(k)
elif ev.type == sdl2.SDL_KEYUP:
k = ev.key.keysym.sym
print("key up:",k)
keyset.discard(k)
elif ev.type == sdl2.SDL_MOUSEBUTTONDOWN:
print("mouse down:",ev.button.button,ev.button.x,ev.button.y)
elif ev.type == sdl2.SDL_MOUSEBUTTONUP:
print("mouse up:",ev.button.button,ev.button.x,ev.button.y)
#elif ev.type == sdl2.SDL_MOUSEMOTION:
#print("mouse move:",ev.motion.x,ev.motion.y)
glClearColor(r,0.4,0.6,1.0)
glClear(GL_COLOR_BUFFER_BIT)
if SDLK_1 in keyset:
square.draw(prog)
if SDLK_2 in keyset:
hexagon.draw(prog)
sdl2.SDL_GL_SwapWindow(win)
| TylermEvans/Portfolio | ETGG2801 Labs/lab 3/main.py | main.py | py | 2,680 | python | en | code | 1 | github-code | 13 |
69829707219 | import numpy as np
class Matrix:
def __init__(self , shape , elems = []):
'''
Summary:
Initializes a matrix with a given shape. At the init
phase, the user can pass elements if they would like. Else,
they will be zeros.
Parameters
----------
shape : a given 2-dimensional shape of rows and columns. Given
in the form shape = [int, int]
elems : float, fill the matrix at initializion.
'''
## If we don't fill the matrix, initialize it.
if elems == []:
self.elems = []
## Loop over each row and along each column.
## Initialize the rows
row = 0
while row < shape[0]:
## Initialize the columns
col = 0
self.elems.append([])
while col < shape[1]:
self.elems[-1].append(0)
## Count along the columns
col = col + 1
## Count along the rows
row = row + 1
## If we pass it elements, it just keeps those elements.
else:
self.elems = elems
self.shape = shape
def add(self , other):
'''
Summary:
Adds an array other to our initialized array, self.
Parameters
----------
other : a Matrix of identical dimenions to self.
'''
output = Matrix(self.shape)
## Make sure the shapes are the same.
if other.shape[0] != self.shape[0] and other.shape[1] != self.shape[1]:
exit('The matrices you are trying to add are not the same size')
## Count along the rows and columns and add each element.
for rows in range(len(self.elems)):
for cols in range(len(self.elems[rows])):
output.elems[rows][cols] = self.elems[rows][cols] + other.elems[rows][cols]
return output
def transpose(self):
'''
Summary:
Computes the transpose of a matrix.
'''
output = Matrix(self.shape)
## Set each matrix[i,j] = output[j,i]
## Modeled after Eq. 6 in Michael Lam's ASTP720 Matrix Notes.
for rows in range(len(self.elems)):
for cols in range(len(self.elems[rows])):
output.elems[cols][rows] = self.elems[rows][cols]
return output
def residual(self, ith_row, jth_col):
'''
Summary:
Returns self with the ith_row and jth_column removed.
Parameters
----------
ith_row : int, the row you want removed.
jth_col : int, the column you want removed.
'''
## Initialize the new Matrix to be smaller than self.
output = Matrix([self.shape[0]-1, self.shape[1]-1])
## Empty the elements so we can append to them.
output.elems = []
## Remove the row and column
for rows in range(len(self.elems)):
## Check if we're at the ith_row
if rows == ith_row:
## Do nothing, jump to beginning
continue
## Add a new row
output.elems.append([])
for cols in range(len(self.elems)):
## Check if we're at the ith_row
if cols == jth_col:
## Do nothing
continue
## Output the value at self[row][cols] to the
## newly initalized row.
output.elems[-1].append(self.elems[rows][cols])
return output
def multiply_matrix(self, other):
'''
Summary:
Computes the product of two matrices. Returns a matrix.
Parameters
----------
other : the matrix we will multiply self by.
'''
if len(self.elems[0]) != len(other.elems[1]):
exit("This matrix multiplication is not valid, the number of rows \
in Matrix A must equal the number of columns in Matrix B. ")
output = Matrix([self.shape[0], other.shape[1]], elems=[])
## Loop over the rows and columns. This loop is modeled after
## Eq. 4 in Michael Lam's ASTP720 Matrix notes.
for rows in range(len(output.elems)):
for cols in range(len(output.elems[rows])):
index_value = 0
for k in range(0, len(self.elems[0])):
index_value = index_value + self.elems[rows][k] * other.elems[k][cols]
output.elems[rows][cols] = index_value
return output
def multiply_constant(self, other):
'''
Summary:
Computes the product of our number and self. Returns a matrix.
Parameters
----------
other : float/int, a constant we will multiply our matrix with.
'''
output = Matrix(self.shape)
## Multiply each element by other.
for rows in range(len(self.elems)):
for cols in range(len(self.elems[rows])):
output.elems[rows][cols] = self.elems[rows][cols] * other
return output
def determinant(self):
'''
Summary:
Computes the determinant of a matrix. Returns a value.
Parameters
----------
None.
'''
## Initialize the output and the row we will use to calculate
## the determinant.
output = 0
starting_row = 0
## If the matrix is a 2x2 - we will calculate
## the determinant analytically.
## This was suggested by a source found here:
## https://integratedmlai.com/find-the-determinant-of-a-matrix-with-pure-python-without-numpy-or-scipy/
if self.shape[0] == 2 and self.shape[1] == 2:
output = self.elems[0][0] * self.elems[1][1] - self.elems[1][0] * self.elems[0][1]
return output
## Compute the determinant
for cols in range(len(self.elems)):
## Calculate the residual
residual = self.residual(starting_row, cols)
## Calculate the determinant of the residual.
residual_determinant = residual.determinant()
## Calculate the cofactor.
cofactor = (-1)**(int(starting_row+1+cols+1)) * residual_determinant
## Compute the determinant.
output = output + cofactor * self.elems[starting_row][cols]
return output
def inverse(self):
'''
Summary:
Computes the inverse of a matrix. Returns a matrix.
Parameters
----------
None.
'''
## Initialize the output matrix.
output = Matrix(self.shape)
## Calculate the determinant
determinant = self.determinant()
for rows in range(len(self.elems)):
for cols in range(len(self.elems[rows])):
## Calculate the transpose of the residual
residual = self.transpose().residual(rows, cols)
## Calculate the determinant of the transposed residual.
residual_determinant = residual.determinant()
## Calculate the tranposed cofactor.
cofactor = (-1)**(int(rows+1+cols+1)) * residual_determinant
## Calculate the inverse of the matrix.
output.elems[rows][cols] = cofactor / determinant
return output
def trace(self):
'''
Summary:
Computes the trace of a matrix. Returns a value.
Parameters
----------
None.
'''
## Initialize the output.
output = 0
for rows in range(len(self.elems)):
for cols in range(len(self.elems[rows])):
## Sum the diagonal
if rows == cols:
output = output + self.elems[rows][cols]
return output
def LU_decomposition(self):
'''
Summary:
Computes the LU decomposition. Returns two matrices - L and U.
Parameters
----------
None.
'''
## Initialize the matrices.
L = Matrix(self.shape)
U = Matrix(self.shape)
for rows in range(len(self.elems)):
for cols in range(rows, len(self.elems[rows])):
## Compute u_ij from Eq. 29 of Michael Lam's Matrix notes.
temp_sum_U = 0
k = 0
while k < (rows):
temp_sum_U = temp_sum_U + L.elems[rows][k] * U.elems[k][cols]
k = k + 1
U.elems[rows][cols] = (self.elems[rows][cols] - temp_sum_U)
for cols in range(rows, len(self.elems[rows])):
## If we are on the diagonal, l_ii = 1.
if rows == cols:
L.elems[rows][cols] = 1
continue
## Compute l_ij from Eq. 28 of Michael Lam's Matrix notes.
temp_sum_L = 0
k = 0
while k < (rows):
temp_sum_L = temp_sum_L + L.elems[cols][k] * U.elems[k][rows]
k = k+1
L.elems[cols][rows] = 1/U.elems[rows][rows] * (self.elems[cols][rows] - temp_sum_L)
return L, U
| brendrach/Computational_Astro_ASTP720 | Assignment_2/matrix.py | matrix.py | py | 10,263 | python | en | code | 2 | github-code | 13 |
7882722233 | '''
INFER GDI VALUES BY SIMULATING GENETREES UNDER THE MSC+M MODEL
'''
import re
import copy
import subprocess
import os
from .classes import BppCfile, BppCfileParam, GeneTrees, gdi, AlgoMode, MigrationRates
from .module_ete3 import Tree, TreeNode
from .module_helper import readlines, dict_merge, get_bundled_bpp_path
from .module_bpp import bppcfile_write
from .module_tree import get_attribute_filtered_tree
## INFERENCE OF GDI FROM GENETREES
'''
The functions in this section are responsible for outputing the list of 10^6 gene tree topologies with
associated branch lengths.
A fully specified MSC+M model consists of the following:
1) Tree topology
2) Branch lengths (tau)
3) Effective population sizes (theta)
4) Source and destination of migration events
5) Rate corresponding to each migration event (M)
Such a model defines the joint distribution of gene tree topolgies and coalescence times,
and simulation can be used to sample this distribution.
'''
def tree_to_extended_newick(
tree: Tree
) -> str:
'''
'tree' is an ete3 Tree object which contains the topology, and the tau and theta values as node attributes.
The output is an extended newick tree that also contains information about the tau and theta values.
'''
# create the extended newick version of the tree topology which contains the tau and theta values
tree_str = tree.write(features = ['tau', 'theta'],format=1)
# filter out extra material not related to required parameters
tree_str = re.sub(r':1\[&&NHX', '', tree_str)
tree_str = re.sub(':theta=', ' #', tree_str)
tree_str = re.sub(':tau=None', '', tree_str)
tree_str = re.sub(':tau=', ' :', tree_str)
tree_str = re.sub(r'\]', '', tree_str)
tree_str = re.sub(r'\)', ') ', tree_str)
tree_str = re.sub(r'\(', ' (', tree_str)
# add in data corresponding to root node, which is not added in by ete3 for some reason
root = tree.get_tree_root()
tree_str = re.sub(';', f'{root.name} :{root.tau} #{root.theta};', tree_str)
return tree_str
# default parameters for a 'bpp --simulate' control file used for simulating gene trees
default_BPP_simctl_dict:BppCfileParam = {
'seed': '1111',
'treefile': 'MyTree.tre',
'Imapfile': 'MyImap.txt',
'species&tree': None,
'popsizes': None,
'newick': None,
'loci&length': '1000000 500',
}
# create the bpp --simulate cfile for simulating gene trees
def create_simulate_cfile(
tree: Tree,
mode: AlgoMode,
migration_df: MigrationRates
) -> None: # writes control file to disk
'''
- 'tree' is an ete3 Tree object.
- 'mode' specifies whether the algo is running in merge or split mode.
- 'migration_df' is the DataFrame object containing the source, destination, and rate (M) for all migration events.
the function writes a 'bpp --simulate' control file to disk specifying the parmeters of the simulation.
All populations in the simulation generate two sequences, as this facilitates the estiamtion of the gdi from gene trees
(performed in 'get_gdi_from_sim').
'''
# get tree object needed to create simulation
sim_tree = get_attribute_filtered_tree(tree, mode, newick=False)
leaf_names = set([leaf.name for leaf in sim_tree])
# infer the parameters of the simulation dict from the tree object
sim_dict = {}
sim_dict['species&tree'] = f'{len(leaf_names)} {" ".join(leaf_names)}'
sim_dict['popsizes'] = f' {"2 "*len(leaf_names)}'
sim_dict['newick'] = tree_to_extended_newick(sim_tree)
# write the control dict
ctl_dict = dict_merge(copy.deepcopy(default_BPP_simctl_dict), sim_dict)
bppcfile_write(ctl_dict,"sim_ctl.ctl")
# if migration rates were inferred, append lines corresponding to migration events and rates
mig_events = f'migration = {len(migration_df["source"])}\n {migration_df.to_string(header = False, index = False)}'
with open("sim_ctl.ctl", "a") as myfile:
myfile.write(mig_events)
def run_BPP_simulate(
control_file: BppCfile,
) -> None: # handles the bpp subprocess
'''
Use 'bpp --simulate' to sample gene trees from a given MSC+M model
'''
print(f"\ninferring gdi using gene tree simulation...", end="\r")
# runs BPP in a dedicated subprocess
process = subprocess.Popen(
f"{get_bundled_bpp_path()} --simulate {control_file}",
shell = True,
bufsize = 1,
stdout = subprocess.PIPE,
stderr = subprocess.STDOUT,
encoding = 'utf-8',
errors = 'replace'
)
# this is necessary so that the program does not hang while the simulations are completing
while True:
realtime_output = process.stdout.readline()
# exit if process is stopped
if realtime_output == '' and process.poll() is not None:
print(" ", end="\r")
break
# final wrapper function to simulation gene trees according to the given MSC+M model
def genetree_simulation(
tree: Tree,
mode: AlgoMode,
migration_df: MigrationRates
) -> GeneTrees:
'''
Handle the file system operations, and bpp control file creation to simulate gene trees. Return the gene trees as a list
'''
os.mkdir('genetree_simulate')
os.chdir('genetree_simulate')
create_simulate_cfile(tree, mode, migration_df)
run_BPP_simulate('sim_ctl.ctl')
genetree_lines = readlines('MyTree.tre')
os.remove('MyTree.tre') # delete the tree file (it is very large)
os.chdir('..')
return genetree_lines
def get_gdi_from_sim(
node: TreeNode,
genetree_lines: GeneTrees
) -> gdi:
'''
Get the gdi of a given TreeNode from the simulated data.
The gdi is defined as "the probability that the first coalescence is between the two A sequences and it happens before
reaching species divergence when we trace the genealogy backwards in time"
This definition allows us to estimate the gdi from simulated gene tree topologies. After simulating many genetrees for a
fully specified MSC+M model with two sequences per population, the gdi of a given population can be estimated by counting '
the proportion of genetrees where the two sequences from the population coalesce before reaching the divergence time between
that population and its sister node.
'''
node_name:str = node.name
ancestor:TreeNode = node.up; tau_AB:float = ancestor.tau
# create the regex corresponding to the required topology
correct_genetree = f'\({str(node_name).lower()}[12]\^{node_name}[:][0][.][\d]#,{str(node_name).lower()}[12]\^{node_name}:[0][.][\d]#\)'
correct_genetree = re.sub('#', '{6}', correct_genetree) # this is needed due to no '{''}' characters being allowed within f strings
# find all occurrences of the correct topology
genetrees = [re.search(correct_genetree, element) for element in genetree_lines]
genetrees = [element.group(0) for element in genetrees if element]
# isolate the time at which each correct topology is achieved
num_match = (re.search('0.\d{6}', genetrees[0])); num_start = num_match.span()[0]; num_end = num_match.span()[1]
times = [float(element[num_start:num_end]) for element in genetrees]
# isolate the ocurrences that are after the split time for the populations
before_split = [element for element in times if element < tau_AB]
# gdi is the proportion of the loci where this topology is observed
node_gdi = len(before_split)/len(genetree_lines)
return round(node_gdi, 2) | abacus-gene/hhsd | hhsd/module_gdi_simulate.py | module_gdi_simulate.py | py | 7,931 | python | en | code | 0 | github-code | 13 |
10008771909 | # -*- coding: utf-8 -*-\
import psutil
import socket
import time
import datetime
import array
import redis
import pyodbc
import telnetlib
# telnet
HOST = "localhost"
PORT = 1433
TIMEOUT = 1
t = telnetlib.Telnet()
# sql server
sqlstr01 = "SELECT isnull(datediff(ss,min(dtd.database_transaction_begin_time),getdate()),0) as opentransec FROM sys.dm_tran_database_transactions dtd with(nolock) ,sys.dm_tran_session_transactions dts with(nolock) where dtd.transaction_id = dts.transaction_id and ( (database_transaction_state = 4 and is_user_transaction =1) or (database_transaction_state = 3 and is_user_transaction =0) )"
sqlstr02 = "SELECT count(9) as counts from sys.sysprocesses with(nolock) where blocked>0"
conn = pyodbc.connect('DRIVER={SQL Server};SERVER=localhost;DATABASE=master;')
cursor = conn.cursor()
#redis
pool = redis.ConnectionPool(host="redis.devops.pousheng.com", port=6379, password="redis")
r = redis.Redis(connection_pool=pool)
clientname = socket.gethostname()
a=0
while a<28 :
cpupercent = psutil.cpu_percent(0)
localtime = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
memtotal = psutil.virtual_memory().total
memfree = psutil.virtual_memory().free
# disk
disklist=[]
disks = psutil.disk_partitions()
for disk in disks:
disklabel = disk.mountpoint
disktype = disk.fstype
if disktype == 'NTFS' :
disksize = psutil.disk_usage(disklabel)
disktotal = disksize.total
diskfree = disksize.free
diskfreepercent = 100 - disksize.percent
diskdic = {"disklabel":disklabel,"disktotal":disktotal,"diskfree":diskfree,"diskfreepercent":diskfreepercent}
disklist.append(diskdic)
# opentran
opentrantime = -1
sql01check='0'
telentcheck=0
try:
cursor.execute( sqlstr01 )
rows01 = cursor.fetchall()
for row01 in rows01:
opentrantime = row01.opentransec
except Exception as errsub:
sql01check = errsub.args[0]
if sql01check != '0':
try:
t.open(HOST,PORT,TIMEOUT)
except Exception as err:
telentcheck = 1
if telentcheck==1:
opentrantime = -1
else:
try:
conn = pyodbc.connect('DRIVER={SQL Server};SERVER=localhost;DATABASE=master;')
cursor = conn.cursor()
cursor.execute( sqlstr01 )
rows01 = cursor.fetchall()
for row01 in rows01:
opentrantime = row01.opentransec
except Exception as err:
opentrantime = -1
# block
blocktime = -1
sql02check='0'
telentcheck=0
try:
cursor.execute( sqlstr02 )
rows02 = cursor.fetchall()
for row02 in rows02:
blocktime = row02.counts
except Exception as errsub:
sql02check = errsub.args[0]
if sql02check != '0':
try:
t.open(HOST,PORT,TIMEOUT)
except Exception as err:
telentcheck = 1
if telentcheck==1:
blocktime = -1
else:
try:
conn = pyodbc.connect('DRIVER={SQL Server};SERVER=localhost;DATABASE=master;')
cursor = conn.cursor()
cursor.execute( sqlstr02 )
rows02 = cursor.fetchall()
for row02 in rows02:
blocktime = row02.counts
except Exception as err:
blocktime = -1
try:
uptimelist = r.time()
uptime =uptimelist[0]
dic = {"cpu":cpupercent,"block":blocktime,"opentran":opentrantime,"mem":{"memtotal":memtotal,"memfree":memfree},"disks":disklist,"uptime":uptime}
r.hset("noprd",clientname,dic)
except Exception as err:
time.sleep(20)
time.sleep(2)
| zhangjiongcn/BR | Client/win/ccidbv2.py | ccidbv2.py | py | 3,893 | python | en | code | 0 | github-code | 13 |
23145540323 | import pyrebase
import os
from datetime import datetime
firebaseConfig = {
}
def initFirebase():
return pyrebase.initialize_app(firebaseConfig)
def getStorage(firebase):
return firebase.storage()
def uploadImages(firebase, ip):
storage = getStorage(firebase)
images = os.listdir('./images')
currDateTime = datetime.now()
strDate = currDateTime.strftime("%d:%b:%Y")
strTime = currDateTime.strftime("%H:%M:%S.%f")
cloudDirectory = '/images/'+ip+'-'+strDate+'-'+strTime+'/'
print(images)
for image in images:
storage.child(cloudDirectory+image).put('./images/'+image)
return cloudDirectory
| SyedAhris/folio3GANsFastAPI | app/firebase.py | firebase.py | py | 620 | python | en | code | 0 | github-code | 13 |
40244845245 | import unittest
from unittest.mock import MagicMock
from proto_pb2.links.links_pb2 import CreateLinksRequest, ReadLinksRequest, UpdateLinksRequest, DeleteLinksRequest
from server_functions.servicers.links_servicer import LinksServicer
class TestCreateRecordLinks(unittest.TestCase):
def setUp(self):
self.servicer = LinksServicer() # Замініть на ваш клас
def test_create_record_links_success(self):
# Prepare a valid request
valid_request = servicer.CreateLinksRequest(
projectId=1,
token="example_token",
status=1
)
# Mock the Session context manager to avoid actual database operations
with unittest.mock.patch('your_module.Session') as mock_session:
# Set up the mock session
session_mock = MagicMock()
mock_session.return_value.__enter__.return_value = session_mock
# Mock the execute and commit methods
session_mock.execute.return_value = None
session_mock.commit.return_value = None
# Call the method with the valid request
response = self.servicer.CreateRecordLinks(valid_request, None)
# Assert the success and message fields in the response
self.assertTrue(response.success)
self.assertEqual(response.message[0], "Record created")
def test_create_record_links_missing_required_field(self):
# Prepare a request with a missing required field
invalid_request = links_pb2.CreateLinksRequest(
token="example_token",
status=1
)
# Call the method with the invalid request
response = self.servicer.CreateRecordLinks(invalid_request, None)
# Assert that the response indicates failure and contains an error message
self.assertFalse(response.success)
self.assertIn("Error: <projectId> is required", response.message[0])
def test_create_record_links_invalid_status(self):
# Prepare a request with an invalid status
invalid_request = links_pb2.CreateLinksRequest(
projectId=1,
token="example_token",
status=2 # Invalid status, should be 0 or 1
)
# Call the method with the invalid request
response = self.servicer.CreateRecordLinks(invalid_request, None)
# Assert that the response indicates failure and contains an error message
self.assertFalse(response.success)
self.assertIn("Error: <status> cannot be 2. Only allowed values - 0 or 1", response.message[0])
# Add more test cases as needed...
if __name__ == '__main__':
unittest.main()
| YaroslavaShyt/API | server_functions/servicers/tests/test_links_servicer.py | test_links_servicer.py | py | 2,690 | python | en | code | 0 | github-code | 13 |
39109595492 |
import numpy as np
import tensorflow as tf
from gym import utils
from gym.envs.mujoco import mujoco_env
from asynch_mb.meta_envs.base import MetaEnv
class InvertedPendulumEnv(mujoco_env.MujocoEnv, utils.EzPickle, MetaEnv):
def __init__(self):
utils.EzPickle.__init__(self)
mujoco_env.MujocoEnv.__init__(self, 'inverted_pendulum.xml', 2)
def step(self, a):
# reward = 1.0
reward = self._get_reward()
self.do_simulation(a, self.frame_skip)
ob = self._get_obs()
# notdone = np.isfinite(ob).all() and (np.abs(ob[1]) <= .2)
# done = not notdone
done = False
return ob, reward, done, {}
def reset_model(self):
qpos = self.init_qpos + self.np_random.uniform(size=self.model.nq, low=-0.01, high=0.01)
qvel = self.init_qvel + self.np_random.uniform(size=self.model.nv, low=-0.01, high=0.01)
self.set_state(qpos, qvel)
return self._get_obs()
def _get_reward(self):
old_ob = self._get_obs()
reward = -((old_ob[1]) ** 2)
return reward
def _get_obs(self):
return np.concatenate([self.sim.data.qpos, self.sim.data.qvel]).ravel()
def viewer_setup(self):
v = self.viewer
v.cam.trackbodyid = 0
v.cam.distance = self.model.stat.extent
def reward(self, obs, acts, next_obs):
assert obs.ndim == 2
assert obs.shape == next_obs.shape
assert obs.shape[0] == acts.shape[0]
return -(obs[:, 1]) ** 2
def tf_reward(self, obs, acts, next_obs):
return - tf.square(obs[:, 1])
if __name__ == "__main__":
env = InvertedPendulumEnv()
env.reset()
for _ in range(1000):
_ = env.render()
ob, rew, done, info = env.step(env.action_space.sample()) # take a random action
| zzyunzhi/asynch-mb | asynch_mb/envs/mb_envs/inverted_pendulum.py | inverted_pendulum.py | py | 1,815 | python | en | code | 12 | github-code | 13 |
31843765421 | import re
import sys
import requests
if __name__ == "__main__":
fp = sys.argv[1]
bad_links = []
with open(fp) as f:
try:
for x in re.findall('(?:http|ftp|https):\/\/[\w_-]+(?:(?:\.[\w_-]+)+)[\w.,@?^=%&:\/~+#-]*[\w@?^=%&\/~+#-]?', f.read()):
print(x)
if(re.match('404',requests.get(x).text)):
bad_links.append(x)
except requests.RequestException as e:
bad_links.append(x)
sys.exit(1 if len(bad_links) != 0 else 0)
| oliverclark15/continuous-cv | deadlinker.py | deadlinker.py | py | 459 | python | en | code | 0 | github-code | 13 |
70756536659 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" Программа pcad_univ_tk.py - графическая оболочка для
программы-модуля pcad_univ_cp.py (командной строки) для получения
заготовок файлов "Перечня элементов" и "Спецификации" из PCB-файла. """
# Автор: Л.М.Матвеев
import common
common.pyscr = True # False # 'pyscripter' in dir()
from common import trm_code, pyscr
import sys
#------------------------------------------------------------------------------
def printm(s):
""" Печать Юникод-строк в терминал и скриптер.
Автоматический перевод строки не производится. """
if pyscr:# or True:
sys.stdout.write(s)
else:
sys.stdout.write(s.encode(trm_code))
if prn_only:
return
txt.insert(END, s)
txt.yview(END)
root.update()
#..............................................................................
prn_only = True
# Переопределение функции печати
common.printm = printm
from os.path import join, split #, splitext, exists
from tkinter import *
from tkinter import filedialog
# from Tkinter import *
# import tkFileDialog # filedialog
try:
from ini import last_open, last_check
last_open = last_open
last_check = last_check
except ImportError:
last_open = ''#splitext(__file__)[0] + '.pcb'
last_check = ' '
import pcad_univ_cp
#------------------------------------------------------------------------------
def save_old_values():
""""""
prg_code = 'utf-8'
ss = ['# -*- coding: %s -*-\n\n' % prg_code,
'last_open = r"%s"\n\n' % last_open,
'last_check = "%s"\n\n' % mode_str.get()]
ini_file_name = join(split(__file__)[0], 'ini.py')
#ini_file_name ='_'.join(splitext(ini_file_name)) # Для отладки
fd = open(ini_file_name, 'wb')
fd.write(''.join(ss).encode(prg_code))
fd.close()
printm('\n Сохранение файла состояния произведено.\n')
printm(' %s\n' % ini_file_name.replace('/', '\\'))
#..............................................................................
#------------------------------------------------------------------------------
root = Tk()
root.title(' Нестандартные операции с PCB-файлами.')
root.resizable(False, False) # запрет изм разм окна по гориз и по верт
fra1 = LabelFrame(root, text=' Лог операций ', labelanchor='n')
fra1.pack()
txt = Text(fra1, font="Verdana 10")
scr = Scrollbar(fra1, command=txt.yview)
txt.configure(yscrollcommand=scr.set)
scr.grid(row=0, column=1, sticky=NS, padx=3, pady=3)
txt.grid(row=0, column=0, padx=3, pady=4)
edval = StringVar(value=last_open)
#------------------------------------------------------------------------------
def select_file():
""" Выбор файла (и т.п.) """
global last_open
fp, fne = split(last_open)
if not fp:
fp = None
if not fne:
fne = None
fpne2 = filedialog.askopenfilename(initialdir=fp,
title = 'Выбор исходного файла',
filetypes = [('PCB-файл', '*.PCB')]
if mode_str.get() != 'copy_atr'
else [('PCAD-файл', ('*.SCH', '*.PCB')),],
initialfile = fne)
if fpne2:
last_open = fpne2.replace('/', '\\')
edval.set(last_open)
edFN.xview_moveto(1)
#..............................................................................
#------------------------------------------------------------------------------
fra42 = LabelFrame(root, labelanchor='n', text=' Дополнительно ')
fra42.pack(side=LEFT)
rButts = {'sbor':{'t':'Две стороны без трасс', 'x':1, 'y':0},
'sbor_cu':{'t':'Две стороны', 'x':0, 'y':0},
'flip':{'t':'Отзеркаливание', 'x':0, 'y':1},
'clear':{'t':'Очистка к изготовлению', 'x':1, 'y':1},
'clear_nets':{'t':'Удаление цепей', 'x':0, 'y':2},
'rd2_assy':{'t':'RefDes2 на Assy', 'x':1, 'y':2},
'gerb_opt':{'t':'Опции Герберов', 'x':0, 'y':3},
'copy_atr':{'t':'copy Attr', 'x':1, 'y':3},
'add_rd2':{'t':'add RefDes2', 'x':0, 'y':4},
#'del_rd2':{'t':'del RefDes2', 'x':1, 'y':4},
'add_rd5':{'t':'add RefDes5', 'x':0, 'y':5},
'del_rd5':{'t':'del RefDes5', 'x':1, 'y':5},
}
mode_str = StringVar(value=last_check)
for k, v in rButts.items():
rb = Radiobutton(fra42, text=v['t'], variable=mode_str, value=k)
rb.grid(row=v['y'], column=v['x'], sticky=W)
# rb.bind('<Button-1>')
v['Radiobutton'] = rb
#..............................................................................
#------------------------------------------------------------------------------
fra3 = LabelFrame(root, text=' Имя файла ', labelanchor='n')
fra3.pack()
btSel = Button(fra3, text='<<', command=select_file)
#btSel.bind('<Button-1>')
btSel.grid(row=0, column=2, padx=3, pady=3)
ed_width = (txt['width'] - btSel['width'] - 10)
edFN = Entry(fra3, bd=2, textvariable=edval, width=38, state='readonly')
edFN['readonlybackground'] = edFN['background']
edFN.grid(row=0, column=0, padx=3, sticky=EW)
edFN.xview_moveto(1)
btRun = Button(fra3, text=u"Выполнить", width=10,
command=lambda : pcad_univ_cp.run_mode(mode_str.get(), edval.get()))
#btRun.bind('<Button-1>')
btRun.grid(row=0, column=3, padx=3)
#..............................................................................
prn_only = False # not prn_only #
print(1)
root.mainloop()
print(2)
prn_only = True
save_old_values()
| leonid-matvieiev/pcad_univ | pcad_univ_tk.py | pcad_univ_tk.py | py | 5,975 | python | ru | code | 0 | github-code | 13 |
18888050420 | import pytest
from unittest import TestCase
from gensim.models import LdaModel, Nmf, LsiModel
from ..src.model.tm.tm_train import (
build_gensim_model,
compute_coherence_score,
evaluate_topic_models)
import pandas as pd
from ..src.preprocessing.rawdata_preprocessing import PREPROCESS_RAW
from ..src.preprocessing.tm_preprocessing import TM_PREPROCESS_TRAIN
@pytest.fixture
def generate_variables():
sentiment = ['positive', 'negative']
time = ['18/6/21', '29/7/19']
text = [
'This is a very healthy dog food. Good for their digestion.',
'THis product is definitely not as good as some other gluten free cookies!'
]
df = pd.DataFrame({
'Sentiment': sentiment,
'Time': time,
'Text': text
})
df = PREPROCESS_RAW(df)
bow_dict, bow_corpus, tfidf_model, tfidf_corpus = TM_PREPROCESS_TRAIN(df)
lemma_text = [[bow_dict[bow[0]] for bow in sent] for sent in bow_corpus]
num_topics = [2]
return bow_dict, bow_corpus, tfidf_model, tfidf_corpus, lemma_text, num_topics
class BuildGensimModelTests(TestCase):
@pytest.fixture(autouse=True)
def init_vars(self, generate_variables):
bow_dict, bow_corpus, tfidf_model, tfidf_corpus, \
lemma_text, num_topics = generate_variables
self.bow_dict = bow_dict
self.bow_corpus = bow_corpus
self.tfidf_model = tfidf_model
self.tfidf_corpus = tfidf_corpus
self.lemma_text = lemma_text
self.num_topics = num_topics
def test_build_lda_model(self):
model = build_gensim_model(
'lda', self.bow_corpus, self.bow_dict, self.num_topics[0])
assert isinstance(model, LdaModel)
def test_build_nmf_model(self):
model = build_gensim_model(
'nmf', self.bow_corpus, self.bow_dict, self.num_topics[0])
assert isinstance(model, Nmf)
def test_build_lsa_model(self):
model = build_gensim_model(
'lsa', self.bow_corpus, self.bow_dict, self.num_topics[0])
assert isinstance(model, LsiModel)
def test_build_non_gensim_model(self):
with pytest.raises(ValueError):
build_gensim_model('bert', self.bow_corpus, self.bow_dict, self.num_topics[0])
@pytest.mark.parametrize(
'model_name', ['lda', 'nmf', 'lsa']
)
def test_compute_coherence_score(generate_variables, model_name):
bow_dict, bow_corpus, _, _, \
lemma_text, num_topics = generate_variables
score = compute_coherence_score(
model_name, bow_corpus, bow_dict, lemma_text, num_topics[0])
assert isinstance(score, float)
def test_evaluate_topic_models(generate_variables):
bow_dict, bow_corpus, tfidf_model, tfidf_corpus, \
lemma_text, num_topics = generate_variables
model_names = ['lda', 'nmf', 'lsa']
corpuses = {
'bow': bow_corpus,
'tfidf': tfidf_corpus
}
results = evaluate_topic_models(
model_names, corpuses, bow_dict, lemma_text, num_topics)
assert results.shape == (6, 4)
| nivii26/DSA4263-Voice-of-Customer-VOC-analysis | root/unit_testing/test_tm_train.py | test_tm_train.py | py | 3,049 | python | en | code | 2 | github-code | 13 |
12329827648 | #
# @lc app=leetcode.cn id=399 lang=python
#
# [399] 除法求值
#
# @lc code=start
class Solution(object):
def calcEquation(self, equations, values, queries):
"""
:type equations: List[List[str]]
:type values: List[float]
:type queries: List[List[str]]
:rtype: List[float]
"""
class UnionSet(object):
def __init__(self, n):
self.parent = [i for i in range(n)]
# weight 保存子节点/父节点的值
self.weight = [1.0 for _ in range(n)]
def find(self, node):
'''路径压缩'''
if self.parent[node] == node:
return node
origin_parent = self.parent[node]
self.parent[node] = self.find(origin_parent)
self.weight[node] *= self.weight[origin_parent]
return self.parent[node]
def union(self, x, y, val):
'''归并'''
x_root = self.find(x)
y_root = self.find(y)
if x_root == y_root:
return
self.parent[x_root] = y_root
self.weight[x_root] = self.weight[y] * val / self.weight[x]
def compare(self, x, y):
if x is None or y is None:
return -1.0
root_x = self.find(x)
root_y = self.find(y)
if root_x != root_y:
return -1.0
return self.weight[x] / self.weight[y]
union_set = UnionSet(2 * len(equations))
letter2id = {}
index = 0
for equation, value in zip(equations, values):
x, y = equation
if x not in letter2id:
letter2id[x] = index
index += 1
if y not in letter2id:
letter2id[y] = index
index += 1
union_set.union(letter2id[x], letter2id[y], value)
answers = []
for query in queries:
x, y = query
if x not in letter2id or y not in letter2id:
answers.append(-1.0)
continue
res = union_set.compare(letter2id[x], letter2id[y])
answers.append(res)
return answers
# @lc code=end
if __name__ == "__main__":
solution = Solution()
equations = [["a","b"]]
values = [0.5]
queries = [["a","b"],["b","a"],["a","c"],["x","y"]]
answers = solution.calcEquation(equations, values, queries)
print(answers)
| Llunch4w/leetcode-cn | 399.除法求值.py | 399.除法求值.py | py | 2,656 | python | en | code | 0 | github-code | 13 |
10669336465 | import os
import click
from flask import Flask
from flask_login import current_user
from todoism.blueprints.home import home_bp
from todoism.blueprints.auth import auth_bp
from todoism.blueprints.todo import todo_bp
from todoism.extensions import babel, db, login_manager, csrf
from todoism.settings import config
from todoism.models import Item
from todoism.apis.v1 import api_v1
def create_app(config_name=None):
if config_name == None:
config_name = os.getenv('FLASK_CONFIG', 'development')
app = Flask('todoism')
app.config.from_object(config[config_name])
register_blueprints(app)
register_extensions(app)
return app
def register_blueprints(app):
app.register_blueprint(home_bp)
app.register_blueprint(auth_bp)
app.register_blueprint(todo_bp)
app.register_blueprint(api_v1, url_prefix='/api/v1')
def register_extensions(app):
babel.init_app(app)
db.init_app(app)
login_manager.init_app(app)
csrf.init_app(app)
def register_template_context(app):
@app.context_processor
def make_template_context():
if current_user.is_authenticated:
active_items = Item.query.with_parent(current_user).filter_by(done=False).count()
else:
active_items = None
return dict(active_items=active_items)
def register_error(app):
pass
def register_commands(app):
@app.cli.command()
@click.option('--drop', is_flag=True, help='Create after drop.')
def initdb(drop):
if drop:
click.confirm('are you sure?', abort=True)
db.drop_all()
click.echo('Drop tables.')
db.create_all()
click.echo('Initialized database.') | parkerhsu/Flask_Practice | Todoism/todoism/__init__.py | __init__.py | py | 1,697 | python | en | code | 0 | github-code | 13 |
35381091215 | def solution(phone_book):
answer = True
# print(phone_book)
# map을 만듦
# map 안에 element가 존재하는지 여부 확인
# dictOfPhone = { i : 1 for i in phone_book }
dictOfPhone = {}
for phone in phone_book:
phone_len = len(phone)
for i in range(1, phone_len+1):
# print(phone[0:i])
if phone[0:i] in dictOfPhone:
dictOfPhone[phone[0:i]] += 1
else:
dictOfPhone[phone[0:i]] = 1
# print(dictOfPhone)
for phone in phone_book:
# print(phone)
if dictOfPhone[phone] > 1:
answer = False
break
return answer | gitJaesik/algorithm_archive | programmers/전화번호_목록/python.py | python.py | py | 707 | python | en | code | 0 | github-code | 13 |
36256597633 | from tkinter import *
from tkinter import ttk
from tkinter import messagebox
from poke_api import get_poke_info
#Creating the window
root = Tk()
root.title("Pokemon Info Viewer")
root.resizable(False, False)
# adding frame to the window
frm_top = ttk.Frame(root)
frm_top.grid(row=0, column=0, columnspan=2, padx=10, pady=10)
frm_btm_left = ttk.LabelFrame(root, text='Info')
frm_btm_left.grid(row=1, column=0, sticky=N, padx=(15,0))
frm_btm_right = ttk.LabelFrame(root, text='Stats')
frm_btm_right.grid(row=1, column=1, sticky=N, padx=10, pady=(0,10))
# addding widgets to top frame
lbl_name = ttk.Label(frm_top, text='Pokemon name:')
lbl_name.grid(row=0, column=0)
ent_name = ttk.Entry(frm_top)
ent_name.grid(row=0, column=1, padx=10)
def get_pokeinfo_btn_click():
#Find the name of teh Pokemon
poke_name =ent_name.get().strip()
if poke_name == '':
return
#Find the Pokemon info from the PokeApi
poke_info = get_poke_info(poke_name)
if poke_info is None:
error_occured = f"Information about {poke_name} Pokemon is not available in the Pokeapi."
messagebox.showinfo(title='Error', message=error_occured, icon='error')
return
poke_type = [type['type']['name'] for type in poke_info['types']]
poke_type_input = ', '.join(poke_type).title()
lbl_height_input['text'] = f"{poke_info['height']} dm"
lbl_Weight_input['text'] = f"{poke_info['weight']} hg"
lbl_type_input['text'] = poke_type_input
prog_hp['value'] = poke_info['stats'][0]['base_stat']
prog_attack['value'] = poke_info['stats'][1]['base_stat']
prog_defense['value'] = poke_info['stats'][2]['base_stat']
prog_special_attack['value'] = poke_info['stats'][3]['base_stat']
prog_special_defense['value'] = poke_info['stats'][4]['base_stat']
prog_speed['value'] = poke_info['stats'][5]['base_stat']
return
btn_info = ttk.Button(frm_top, text='Get Info', command=get_pokeinfo_btn_click)
btn_info.grid(row=0, column=2)
# addding widgets to bottom left frame
lbl_height = ttk.Label(frm_btm_left, text='Height:')
lbl_height.grid(row=0, column=0, sticky=E)
lbl_height_input = ttk.Label(frm_btm_left, text='TBD')
lbl_height_input.grid(row=0, column=1)
lbl_Weight = ttk.Label(frm_btm_left, text='Weight:')
lbl_Weight.grid(row=1, column=0)
lbl_Weight_input = ttk.Label(frm_btm_left, text='TBD')
lbl_Weight_input.grid(row=1, column=1)
lbl_type = ttk.Label(frm_btm_left, text='Type:')
lbl_type.grid(row=2, column=0, sticky=E)
lbl_type_input = ttk.Label(frm_btm_left, text='TBD')
lbl_type_input.grid(row=2, column=1)
# addding widgets to bottom right frame
lbl_hp = ttk.Label(frm_btm_right, text='HP:')
lbl_hp.grid(row=0, column=0,sticky=E)
prog_hp = ttk.Progressbar(frm_btm_right, orient=HORIZONTAL, length=200,maximum=255)
prog_hp.grid(row=0, column=1)
lbl_attack = ttk.Label(frm_btm_right, text='Attack:')
lbl_attack.grid(row=1, column=0,sticky=E)
prog_attack = ttk.Progressbar(frm_btm_right, orient=HORIZONTAL, length=200,maximum=255)
prog_attack.grid(row=1, column=1,pady=5, padx=(0,5))
lbl_defense = ttk.Label(frm_btm_right, text='Defense:')
lbl_defense.grid(row=2, column=0,sticky=E)
prog_defense = ttk.Progressbar(frm_btm_right, orient=HORIZONTAL, length=200,maximum=255)
prog_defense.grid(row=2, column=1,pady=5, padx=(0,5))
lbl_special_attack = ttk.Label(frm_btm_right, text='Special Attack:')
lbl_special_attack.grid(row=3, column=0,sticky=E,)
prog_special_attack = ttk.Progressbar(frm_btm_right, orient=HORIZONTAL, length=200,maximum=255)
prog_special_attack.grid(row=3, column=1, pady=5, padx=(0,5))
lbl_special_defense = ttk.Label(frm_btm_right, text='Special Defense:')
lbl_special_defense.grid(row=4, column=0,sticky=E)
prog_special_defense = ttk.Progressbar(frm_btm_right, orient=HORIZONTAL, length=200,maximum=255)
prog_special_defense.grid(row=4, column=1, pady=5, padx=(0,5))
lbl_speed = ttk.Label(frm_btm_right, text='Speed:')
lbl_speed.grid(row=5, column=0,sticky=E)
prog_speed = ttk.Progressbar(frm_btm_right, orient=HORIZONTAL, length=200,maximum=255)
prog_speed.grid(row=5, column=1,padx=5,pady=5)
root.mainloop()
| kunjthakkar/Lab_009 | poke_viewer.py | poke_viewer.py | py | 4,152 | python | en | code | 0 | github-code | 13 |
16128317553 | def my_gen():
try:
yield "value"
except ValueError:
yield "Handling Exception"
finally:
print("cleaning up")
x = my_gen()
next(x)
e = ValueError("some error")
print(x.throw(e)) # "Handling Exception"
print(x.close()) # Cleaning Up
| udhayprakash/PythonMaterial | python3/09_Iterators_generators_coroutines/03_generators/08_error_handling.py | 08_error_handling.py | py | 274 | python | en | code | 7 | github-code | 13 |
21642475925 | from django.shortcuts import render
from ArchiveApp.models import Movies, MovieReview, Admin
from django.http.response import HttpResponseRedirect
from django.core.files.storage import FileSystemStorage
# Main Screetn
def Main(request):
datas = Movies.objects.all()
return render(request, "Main.html", {"recentones" : datas})
def Review(request):
if request.method == "GET":
review_info = MovieReview.objects.get(review_idx=request.GET.get("idx"))
movie_title = Movies.objects.get(idx=request.GET.get("idx"))
return render(request, "Review.html", {"review_info" : review_info, "movie_title" : movie_title})
# Login & Regiseter & QNA
def Request(request):
return render(request, "Request.html")
def Register(request):
return render(request, "Register.html")
def RegisterOK(request):
if request.method =="POST":
Admin(
id = request.POST.get("id"),
pwd = request.POST.get("pwd")
).save()
return HttpResponseRedirect("/")
def Login(request):
return render(request, "Login.html")
#CRUD Operations
def MoviesInsert(request):
return render(request, "moviesinsert.html")
def MoviesInsertOK(request):
if request.method == "POST":
if ("img" in request.FILES):
upload_img = request.FILES["img"]
fs = FileSystemStorage()
fs.save(upload_img.name, upload_img)
Movies(
idx = Movies.objects.order_by("idx").last().idx + 1,
title=request.POST.get("title"),
date=request.POST.get("date"),
genre=request.POST.get("genre"),
rate=request.POST.get("rate"),
img = upload_img.name
).save()
else:
Movies(
idx = Movies.objects.order_by("idx").last().idx + 1,
title=request.POST.get("title"),
date=request.POST.get("date"),
genre=request.POST.get("genre"),
rate=request.POST.get("rate"),
).save()
idx = Movies.objects.order_by("idx").last().idx
return render(request, "reviewinsert.html", {"idx": idx})
def ReviewInsert(request):
if request.method=="GET":
idx = request.GET.get("idx")
return render(request, "reviewinsert.html", {"idx": idx})
def ReviewInsertOK(request):
if request.method == "POST":
MovieReview(
review_idx = request.POST.get("review_idx"),
description=request.POST.get("description"),
violence=request.POST.get("violence"),
exposure=request.POST.get("exposure"),
torture=request.POST.get("torture"),
weak=request.POST.get("weak"),
drug=request.POST.get("drug"),
fear=request.POST.get("fear"),
shocking=request.POST.get("shocking"),
profanity=request.POST.get("profanity"),
discrimination=request.POST.get("discrimination"),
).save()
return HttpResponseRedirect("/")
def MoviesUpdate(request):
updatemovie = Movies.objects.get(idx=request.GET.get("review_idx"))
return render(request, "movieupdate.html", {"updatemovie" : updatemovie})
def MoviesUpdateOK(request):
if request.method == "POST":
if("img" in request.FILES):
index = request.POST.get("idx")
upload_img = request.FILES["img"]
fs = FileSystemStorage()
fs.save(upload_img.name, upload_img)
updatemovie = Movies.objects.get(idx=request.POST.get("idx"))
updatemovie.img = upload_img.name
updatemovie.title = request.POST.get("title")
updatemovie.date = request.POST.get("date")
updatemovie.genre = request.POST.get("genre")
updatemovie.rate = request.POST.get("rate")
updatemovie.save()
else:
index = request.POST.get("idx")
updatemovie = Movies.objects.get(idx=request.POST.get("idx"))
updatemovie.img
updatemovie.title = request.POST.get("title")
updatemovie.date = request.POST.get("date")
updatemovie.genre = request.POST.get("genre")
updatemovie.rate = request.POST.get("rate")
updatemovie.save()
return HttpResponseRedirect("/reviewupdate/?idx=%s" % index)
def ReviewUpdate(request):
updatereview = MovieReview.objects.get(review_idx=request.GET.get("idx"))
return render(request, "reviewupdate.html", {"updatereview" : updatereview})
def ReviewUpdateOK(request):
if request.method == "POST":
updatereview = MovieReview.objects.get(review_idx=request.POST.get("review_idx"))
updatereview.description = request.POST.get("description")
updatereview.violence = request.POST.get("violence")
updatereview.exposure = request.POST.get("exposure")
updatereview.torture = request.POST.get("torture")
updatereview.profanity = request.POST.get("profanity")
updatereview.weak = request.POST.get("weak")
updatereview.fear = request.POST.get("fear")
updatereview.drug = request.POST.get("drug")
updatereview.shocking = request.POST.get("shocking")
updatereview.discrimination = request.POST.get("discrimination")
updatereview.save()
return HttpResponseRedirect("/")
def Delete(request):
deletereview = MovieReview.objects.get(review_idx=request.GET.get("review_idx"))
deletereview.delete()
deletemovie = Movies.objects.get(idx=request.GET.get("review_idx"))
deletemovie.delete()
return(HttpResponseRedirect("/"))
| LeeKwanDong/MovieArchive | ArchiveApp/views.py | views.py | py | 5,622 | python | en | code | 0 | github-code | 13 |
15807281999 | import os
from shutil import copyfile
from sys import exit
ritogame = input("Please enter the location of your Riot Games folder (default C:/Riot Games/)\n>")
if ritogame == "": # Sets default if user did not enter anything.
ritogame = "C:/Riot Games/"
if ritogame[-1] != "/": # Adds trailing slash.
ritogame = ritogame + "/"
ritogame = ritogame + "League of Legends/Config/" # Adds League of Legends folder from Riot Games folder.
print("Running sanity checks...")
print("Using", ritogame, "as config directory")
dExists = os.path.isdir(ritogame)
print("Config directory exists:", dExists)
if dExists == False: # Kills program if directory does not exist.
print("Invalid config directory,", ritogame)
exit("Invalid config directory.")
if os.path.exists(ritogame + "/cfgs/"): # Checks if custom config directory exists.
print("Custom configs folder exists.")
else: # If it does not, create it.
print("Custom configs folder does not exist, creating...")
os.makedirs(ritogame + "/cfgs/")
print("Created.")
if os.path.isfile(ritogame + "/cfgs/default.ini"): # Check if default config file exists.
print("Default config file exists.")
else:
print("Default config file does not exist, copying current config...")
copyfile(ritogame + "input.ini", ritogame + "cfgs/default.ini")
print("Copied.")
| SingedSimp/lolkeys | startup.py | startup.py | py | 1,365 | python | en | code | 0 | github-code | 13 |
27249786110 | import io
import os
import re
from setuptools import find_packages
from setuptools import setup
def read(filename):
filename = os.path.join(os.path.dirname(__file__), filename)
text_type = type(u"")
with io.open(filename, mode="r", encoding='utf-8') as fd:
return re.sub(text_type(r':[a-z]+:`~?(.*?)`'), text_type(r'``\1``'), fd.read())
setup(
name="useless",
version='1.1.0',
url="https://github.com/nogoodusername/py-useless-package",
author="KN",
author_email="kshitij.nagvekar@workindia.in",
description="This is an example package d",
long_description=read("README.md"),
packages=find_packages(exclude=('tests',)),
install_requires=[
"Django",
"workindia-basemodels",
"workindia-generic-adapter"
],
setup_requires=[
'wheel',
'pip>=20'
],
python_requires='>=3.6',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
"Operating System :: OS Independent"
],
) | nogoodusername/py-useless-package | setup.py | setup.py | py | 1,199 | python | en | code | 0 | github-code | 13 |
7997485807 | # Author: Charse
'''
在特征降维中, 主成分分析(Principal Componment Analysis)
是最为经典个实用的特征降维技术,特别时在辅助图像识别方面有突出的表现
'''
import pandas
import numpy
from sklearn.svm import LinearSVC
from sklearn.decomposition import PCA
from matplotlib import pyplot as plt
from sklearn.metrics import classification_report
digits_train = pandas.read_csv("http://archive.ics.uci.edu/ml/machine-learning-databases/optdigits/optdigits.tra", header=None)
digits_test = pandas.read_csv("http://archive.ics.uci.edu/ml/machine-learning-databases/optdigits/optdigits.tes", header=None)
X_digits = digits_train[numpy.arange(64)]
y_digits = digits_train[64]
estimator = PCA(n_components=2)
X_pca = estimator.fit_transform(X_digits)
def plot_pca_scatter():
colors = ['black', 'blue', 'purple', 'yellow', 'white', 'red', 'lime', 'cyan', 'orange', 'gray']
for i in range(len(colors)): # python 3 中的用法时时直接使用range遍历, 但是在2 中使用的是xrange
px = X_pca[:, 0][y_digits.as_matrix() == i]
py = X_pca[:, 1][y_digits.as_matrix() == i]
plt.scatter(px, py, c=colors[i])
plt.legend(numpy.arange(0, 10).astype(str))
plt.xlabel('First Principal Component')
plt.ylabel("Second Principal Component")
plt.show()
plot_pca_scatter()
X_train = digits_train[numpy.arange(64)]
y_train = digits_train[64]
X_test = digits_test[numpy.arange(64)]
y_test = digits_test[64]
svc = LinearSVC()
svc.fit(X_train, y_train)
y_predict = svc.predict(X_test)
estimator = PCA(n_components=20)
# 利用训练特征决定fit 20个正交维度的方向, 并转换(transform) 原训练特征
pca_X_train = estimator.fit_transform(X_train)
# 测试特征也按照上述的20个正交维度方向进行转换(transform)
pca_X_test = estimator.transform(X_test)
# 使用默认配置车不是花LinearSVC, 对压缩过后的二十维度特征的寻;训练数据进行建模
# 并在测试数据上做出预测, 存储在pca_y_predict 中
pca_svc = LinearSVC()
pca_svc.fit(pca_X_train, y_train)
pca_y_predict = pca_svc.predict(pca_X_test)
# 对使用原始图像高纬度特征训练的支持向量机分类器的性能做出评估
print(svc.score(X_test, y_test))
print(classification_report(y_test, y_predict, target_names=numpy.arange(10).astype(str)))
# 使用PCA压缩重建的低维图像特征训练的支持向量机分类器的性能做出评估
print(pca_svc.score(pca_X_test, y_test))
print(classification_report(y_test, pca_y_predict, target_names=numpy.arange(10).astype(str)))
'''
特点分析:
降维/压缩问题则是选取数据具有代表性的特征,在保持数据多样性(Variance) 的基础上,规避掉大量的
特征冗余和噪声,不过这个过程也很有可能会损失一些有用的模式信息,经过大量的实践证明
相较于损失的少部分模型性能,维度压缩能够节省大量用于模型训练的时间,这样一来,使得PCA所带来的模型综合效率变得更加划算
'''
| Wangchangchung/ClassicModel | non-supervision/dimensionality-reduction/PCAreduction.py | PCAreduction.py | py | 3,044 | python | zh | code | 0 | github-code | 13 |
39466033860 | class lcs:
def __init__(self,x,y):
self.x=x
self.y=y
m=len(x)
n=len(y)
self.c=[]
self.b=[]
for i in range(m+1):
new_c=[]
new_b=[]
for j in range(n+1):
new_b.append("")
new_c.append(0)
self.c.append(new_c)
self.b.append(new_b)
def lcs_compute(self):
m=len(self.x)
n=len(self.y)
for i in range(m+1):
self.c[i][0]=0
for i in range(n+1):
self.c[0][i]=0
for i in range(1,m+1):
for j in range(1,n+1):
if (self.x[i-1]==self.y[j-1]):
self.c[i][j]=self.c[i-1][j-1]+1
self.b[i][j]="\\"
elif self.c[i-1][j]>=self.c[i][j-1]:
self.c[i][j]=self.c[i-1][j]
self.b[i][j]="|"
else:
self.c[i][j]=self.c[i][j-1]
self.b[i][j]="-"
def print_lcs(self,i,j):
if (self.b[i][j]==""):
return
if (self.b[i][j]=="\\"):
self.print_lcs(i-1,j-1)
print(self.x[i-1],end='')
elif (self.b[i][j]=="-"):
self.print_lcs(i,j-1)
elif (self.b[i][j]=="|"):
self.print_lcs(i-1,j)
def run():
print("Enter X")
x=input()
print("Enter Y")
y=input()
obj=lcs(x,y)
obj.lcs_compute()
print("Longest Common Subsequence-->",end='')
obj.print_lcs(len(x),len(y))
print()
run()
| anubhabMajumdar/Classic-Algorithms-in-Python- | lcs.py | lcs.py | py | 1,264 | python | en | code | 0 | github-code | 13 |
4533298325 | n = int(input())
a = 2
nums = []
while True:
if n == 1:
break
if n % a == 0:
nums.append(a)
n = n / a
a = 2
else:
a += 1
for i in nums:
print(i) | hajihye123/Cherry_picker | treecreeper/step09/11653.py | 11653.py | py | 202 | python | en | code | 0 | github-code | 13 |
73526045456 | def main():
import pandas as pd
import numpy as np
import seaborn as sns
import geopandas as gpd
from matplotlib import cm
from scipy import stats
from itertools import permutations
import matplotlib.pyplot as plt
plt.rcParams['svg.fonttype'] = 'none'
meta = pd.read_table('data/metadata.tsv')
syns = pd.read_table('data/general_envo_names.tsv')
higher_level = {'sediment' : 'other',
'bird gut' : 'other animal',
'cat gut' : 'mammal gut',
'insect associated' : 'other animal',
'human urogenital tract' : 'other human',
'dog gut' : 'mammal gut',
'fermented food' : 'anthropogenic',
'groundwater' : 'aquatic',
'coral associated' : 'other animal',
'rat gut' : 'mammal gut',
'human associated' : 'other human',
'cattle gut' : 'mammal gut',
'deer gut' : 'mammal gut',
'mouse gut' : 'mammal gut',
'river associated' : 'aquatic',
'primate gut' : 'mammal gut',
'human respiratory tract' : 'other human',
'cattle rumen' : 'mammal gut',
'human saliva' : 'other human',
'activated sludge' : 'anthropogenic',
'lake associated' : 'aquatic',
'wastewater' : 'anthropogenic',
'chicken gut' : 'other animal',
'air' : 'other',
'human mouth' : 'other human',
'plant associated' : 'soil/plant',
'water associated' : 'aquatic',
'pig gut' : 'mammal gut',
'human skin' : 'other human',
'marine' : 'aquatic',
'soil' : 'soil/plant',
'built environment' : 'anthropogenic',
'human gut' : 'human gut',
'anthropogenic': 'anthropogenic',
'bear gut' : 'mammal gut'}
is_host_associated = {'human gut' : True,
'soil/plant' : False,
'aquatic' : False,
'anthropogenic' : False,
'other human' : True,
'mammal gut' : True,
'other animal' : True,
'other' : False}
meta = meta.merge(syns[['general_envo_name',
'host_tax_id',
'microontology']],
on=['microontology',
'host_tax_id'])
meta['higher'] = meta['general_envo_name'].map(lambda g: higher_level.get(g, 'other'))
meta.set_index('sample_accession', inplace=True)
fig, ax = plt.subplots(figsize=(8, 6))
countries = gpd.read_file(gpd.datasets.get_path("naturalearth_lowres"))
countries.plot(color="lightgrey", ax=ax)
color_map = {'human gut' : (0.8509803921568627, 0.37254901960784315, 0.00784313725490196),
'soil/plant' : (0.10588235294117647, 0.6196078431372549, 0.4666666666666667),
'aquatic' : (0.4588235294117647, 0.4392156862745098, 0.7019607843137254),
'anthropogenic' : (0.9058823529411765, 0.1607843137254902, 0.5411764705882353),
'other human' : (0.4, 0.6509803921568628, 0.11764705882352941),
'mammal gut' : (0.9019607843137255, 0.6705882352941176, 0.00784313725490196),
'other animal' : (0.6509803921568628, 0.4627450980392157, 0.11372549019607843),
'other' : (0.4, 0.4, 0.4)}
for hab,c in color_map.items():
sel = meta.query('higher == @hab')
sel.plot(x="longitude",
y="latitude",
kind="scatter",
c=[c for _ in range(len(sel))],
label=hab,
colormap="YlOrRd",
s=3.5,
ax=ax)
sns.despine(fig, trim=True)
fig.tight_layout()
ax.legend(loc=1)
fig.savefig('analysis/map_habitat.png', dpi=1200)
fig,ax = plt.subplots()
general = meta['general_envo_name'].value_counts()
general = general[general > 100]
general['other'] = len(meta) - general.sum()
general.plot(kind='barh', ax=ax)
ax.set_xlabel('Number of samples')
sns.despine(fig, trim=True)
fig.tight_layout()
fig.savefig('analysis/samples_per_habitat.svg')
samples = pd.read_table('data/samples-min500k-assembly-prodigal-stats.tsv',
index_col=0)
gmsc = pd.read_table("data/gmsc_amp_genes_envohr_source.tsv.gz")
gmsc = gmsc[gmsc.is_metagenomic == True]
gmsc = gmsc[['amp', 'sample']].drop_duplicates().groupby('sample').agg('size')
samples = pd.concat([samples, gmsc],
axis=1).rename({0: 'ampsphere_amps'},
axis=1)
for c in ['inserts_filtered',
'smORFs',
'assembly_total_length',
'ampsphere_amps']:
meta[c] = samples[c]
meta['smorfs_per_assembly_mbps'] = meta.eval('1_000_000 * smORFs/assembly_total_length')
meta['ampsphere_amps_per_assembly_mbps'] = meta.eval('1_000_000 * ampsphere_amps/assembly_total_length')
inserts_filtered = meta.groupby('general_envo_name').sum()['inserts_filtered']
inserts_filtered = inserts_filtered.reindex(general.index)
inserts_filtered['other'] = meta['inserts_filtered'].sum() - inserts_filtered.sum()
inserts_filtered //= 1000_000_000
fig, ax = plt.subplots()
inserts_filtered.plot(kind='barh', ax=ax)
ax.set_xlabel('Reads after filtering (billions)')
sns.despine(fig, trim=True)
fig.tight_layout()
fig.savefig('analysis/hq_inserts_per_habitat.svg')
smORFs = meta.groupby('general_envo_name').sum()['smORFs']
smORFs = smORFs.reindex(general.index)
smORFs['other'] = meta['smORFs'].sum() - smORFs.sum()
fig, ax = plt.subplots()
(smORFs // 1000_000).plot(kind='barh', ax=ax)
ax.set_xlabel('smORFs (raw, millions)')
sns.despine(fig, trim=True)
fig.tight_layout()
fig.savefig('analysis/smorfs_per_habitat.svg')
assembly_total_length = meta.groupby('general_envo_name').sum()['assembly_total_length']
assembly_total_length = assembly_total_length.reindex(general.index)
assembly_total_length['other'] = meta['assembly_total_length'].sum() - assembly_total_length.sum()
fig, ax = plt.subplots()
(smORFs // 1000_000).plot(kind='barh', ax=ax)
ax.set_xlabel('smORFs (raw, millions)')
sns.despine(fig, trim=True)
fig.tight_layout()
meta['is_host_associated'] = meta['general_envo_name'].map(lambda c : is_host_associated[higher_level.get(c, 'other')])
meta['is_host_associated'] = meta.is_host_associated.map(lambda i: 'host' if i else 'non-host')
c_general_envo_name = meta['general_envo_name'].value_counts()
sel = meta[meta.general_envo_name.map(lambda e: c_general_envo_name[e] >= 100)]
sel = sel.query('assembly_total_length > 1_000_000')
sel = sel.query('ampsphere_amps_per_assembly_mbps < 4')
order = sel.groupby('general_envo_name').median()['ampsphere_amps_per_assembly_mbps'].sort_values().index
sels = []
for h in order:
cur = sel[sel.general_envo_name == h]
sels.append(cur.sample(100, replace=True))
sell2000=pd.concat(sels)
fig,ax = plt.subplots()
ax.clear()
ax.set_xlim(-1,2)
sns.boxplot(x='is_host_associated',
y='ampsphere_amps_per_assembly_mbps',
order=['host', 'non-host'],
ax=ax,
showfliers=False,
data=meta,
color='white',
)
sns.swarmplot(x='is_host_associated',
y='ampsphere_amps_per_assembly_mbps',
order=['host', 'non-host'],
ax=ax,
data=meta.sample(1000),
)
plt.xlabel('')
plt.ylabel('AMPs per assembled Mbp')
fig.savefig('analysis/host_vs_nonhost.svg')
ax.clear()
sns.boxplot(x='general_envo_name',
y='ampsphere_amps_per_assembly_mbps',
order=order,
ax=ax,
color='white',
showfliers=False,
data=meta)
sns.swarmplot(x='general_envo_name',
y='ampsphere_amps_per_assembly_mbps',
hue='is_host_associated',
order=order,
ax=ax,
data=sell2000,
s=2.0)
for x in ax.get_xticklabels():
x.set_rotation(90)
ax.set_xlabel('Habitat')
ax.set_ylabel('AMPs per assembled Mbp')
fig.tight_layout()
sns.despine(fig, trim=True)
fig.savefig('analysis/ampsphere_amps_per_assembly_mbps.svg')
fig.savefig('analysis/ampsphere_amps_per_assembly_mbps.png', dpi=150)
sel = sel.query('higher == "anthropogenic"')
meta[meta['smorfs_per_assembly_mbps'].isna()].iloc[0]
sel.groupby('general_envo_name').mean()['ampsphere_amps_per_assembly_mbps'].sort_values()
stats.mannwhitneyu(
meta.query('is_host_associated == "host"')['ampsphere_amps_per_assembly_mbps'],
meta.query('is_host_associated != "host"')['ampsphere_amps_per_assembly_mbps'],
)
## MannwhitneyuResult(statistic=667422714.5, pvalue=0.0)
stats.mannwhitneyu(
meta.query('general_envo_name == "cat gut"')['ampsphere_amps_per_assembly_mbps'],
meta.query('general_envo_name == "human gut"')['ampsphere_amps_per_assembly_mbps'],
)
## MannwhitneyuResult(statistic=2109689.0, pvalue=0.4132628743871489)
stats.mannwhitneyu(
meta.query('general_envo_name == "cat gut"')['ampsphere_amps_per_assembly_mbps'].dropna(),
meta.query('general_envo_name == "human gut"')['ampsphere_amps_per_assembly_mbps'].dropna()
)
## MannwhitneyuResult(statistic=2109689.0, pvalue=0.4132628743871489)
stats.mannwhitneyu(
meta.query('general_envo_name == "pig gut"')['ampsphere_amps_per_assembly_mbps'].dropna(),
meta.query('general_envo_name == "human gut"')['ampsphere_amps_per_assembly_mbps'].dropna()
)
## MannwhitneyuResult(statistic=21329405.0, pvalue=8.53430397919904e-66)
stats.mannwhitneyu(
meta.query('general_envo_name == "cattle gut"')['ampsphere_amps_per_assembly_mbps'].dropna(),
meta.query('general_envo_name == "human gut"')['ampsphere_amps_per_assembly_mbps'].dropna()
)
## MannwhitneyuResult(statistic=1433967.0, pvalue=9.630559839707916e-129)
stats.mannwhitneyu(
meta.query('general_envo_name == "chicken gut"')['ampsphere_amps_per_assembly_mbps'].dropna(),
meta.query('general_envo_name == "human gut"')['ampsphere_amps_per_assembly_mbps'].dropna()
)
## MannwhitneyuResult(statistic=11641192.0, pvalue=2.8189367898077096e-19)
stats.mannwhitneyu(
meta.query('general_envo_name == "dog gut"')['ampsphere_amps_per_assembly_mbps'].dropna(),
meta.query('general_envo_name == "human gut"')['ampsphere_amps_per_assembly_mbps'].dropna()
)
## MannwhitneyuResult(statistic=3381821.0, pvalue=0.0031117002469289875)
stats.mannwhitneyu(
meta.query('general_envo_name == "mouse gut"')['ampsphere_amps_per_assembly_mbps'].dropna(),
meta.query('general_envo_name == "human gut"')['ampsphere_amps_per_assembly_mbps'].dropna()
)
## MannwhitneyuResult(statistic=5433989.0, pvalue=0.0009130878297249731)
stats.mannwhitneyu(
meta.query('general_envo_name == "rat gut"')['ampsphere_amps_per_assembly_mbps'].dropna(),
meta.query('general_envo_name == "human gut"')['ampsphere_amps_per_assembly_mbps'].dropna()
)
## MannwhitneyuResult(statistic=3574553.0, pvalue=0.004195263094511834)
sps = ['human gut', 'cat gut',
'dog gut', 'chicken gut',
'pig gut', 'cattle gut',
'mouse gut', 'rat gut',
'primate gut']
for s, sn in permutations(sps, 2):
u, p = mannwhitneyu(
meta[meta.general_envo_name == s]['ampsphere_amps_per_assembly_mbps'].dropna(),
meta[meta.general_envo_name == sn]['ampsphere_amps_per_assembly_mbps'].dropna()
)
tests.append((s, sn, u, p))
tests = pd.DataFrame(tests, columns=['s1', 's2', 'U', 'pval'])
tests.to_csv('analysis/mannwhitneyu_test_mammalguts.tsv', sep='\t', header=True, index=None)
c = sel.smorfs_per_assembly_mbps.copy()
fig,ax = plt.subplots()
ax.clear()
ax.hist(c, bins=1000)
fig.savefig('analysis/smorfs_per_assembly_mbps.svg')
plt.close()
d = sel.ampsphere_amps_per_assembly_mbps.copy()
sns.kdeplot(data=c, label='smORFs')
sns.kdeplot(data=d*1000, label='AMPSphere AMPs * 1000')
plt.xlabel('Per assembly mbps')
plt.ylabel('Density AU')
plt.legend()
plt.savefig('analysis/graphs_from_luis/amp_smorfs_sample.svg')
m = meta[['ena_ers_sample_id', 'database',
'access_status', 'study', 'study_accession',
'general_envo_name', 'higher',
'inserts_filtered', 'assembly_total_length',
'smORFs', 'ampsphere_amps', 'is_host_associated']]
m.rename({'higher': 'macro_environment',
'general_envo_name': 'micro_environment'},
axis=1,
inplace=True)
m.to_csv('analysis/table_supp1.tsv',
sep='\t',
header=True,
index=True)
if __name__ == '__main__':
main()
| BigDataBiology/AMPsphere | General_Scripts/11_host_non_host_amps/main.py | main.py | py | 13,572 | python | en | code | 0 | github-code | 13 |
14386340720 | import json
import datetime
from restrepo.utils import flatten, string_to_datetime, datetime_to_string_zulu
from restrepo.utils import cleanup_string, content_tostring
from restrepo.db.mixins import DictAble
from restrepo.db.ead import c_node_selector
from restrepo.db.solr import build_equality_query
from restrepo.db.archivefile import get_archivefile
from restrepo import config
def get_archive_file_ids(xml_root):
xpath = '%s[@level="file"]/did/unitid' % c_node_selector
return [x.text for x in xml_root.xpath(xpath)]
class EadTextElement(DictAble):
"""This is an element of an EAD file.
"""
def __init__(
self,
title,
text_lines,
xpath,
ead_file,
context,
sequenceNumber,
prev=None,
show_in_tree=True,
):
self.is_component = False
self.title = title
self.xpath = xpath
self._text_lines = text_lines
self._ead_file = ead_file
self._context = context
self.sequenceNumber = sequenceNumber
self.archive = ead_file._cache['archive']
self.archive_id = ead_file._cache['archive_id']
self.country = ead_file._cache['country']
self.ead_id = ead_file._cache['ead_id']
self.institution = ead_file._cache['institution']
self._text_lines = text_lines
self.prev = prev
self.next = None
self.number_of_scans = 0
if self.prev:
self.prev.next = self
self.show_in_tree = show_in_tree
self.status = self.get_status(context)
@classmethod
def _field_names(cls):
# these attributes will be indexed by SOLR
return [
'archive',
'archive_id',
'archiveFile',
'breadcrumbs',
'country',
'custodhist',
'date',
'date_from',
'date_to',
'description',
'ead_id',
'eadcomponent_id',
'findingaid',
'institution',
'is_archiveFile',
'is_component',
'level',
'language',
'number_of_scans',
'parent',
'scopecontent',
# 'sequenceNumber',
'show_in_tree',
'status',
'search_source',
'text',
'title',
'xpath',
]
def get_solr_data(self):
"""returns a dictionary that is to be indexed by SOLR"""
data = {}
for k in self._field_names():
v = getattr(self, k, None)
if isinstance(v, datetime.datetime) or \
isinstance(v, datetime.date):
data[k] = datetime_to_string_zulu(v)
else:
data[k] = v
# only add 'sequenceNumber' if we have calculated it (this allows us to update separate components without recalculating the index)
if getattr(self, 'sequenceNumber', None) is not None:
data['sequenceNumber'] = self.sequenceNumber
return data
def _xpath_contained_in(self, xpath, parent_xpath):
if parent_xpath.endswith('[1]'):
parent_xpath = parent_xpath[:-len('[1]')]
if '/@' in parent_xpath:
parent_xpath = parent_xpath.split('/@')[0]
if parent_xpath.endswith('/text()'):
parent_xpath = parent_xpath[:-(len('/text()'))]
return xpath.startswith(parent_xpath) and len(parent_xpath) < len(xpath)
@property
def parent(self):
"""return id of parent"""
parent = self.get_parent()
if parent:
return parent.eadcomponent_id
def get_parent(self):
"""return the 'parent', which for text nodes is the first previous sibling that has 'show_in_tree' True"""
if self.show_in_tree:
return None
node = self.prev
while node:
if node.show_in_tree:
return node
node = node.prev
@property
def text_lines(self):
return self._text_lines
@property
def text(self):
return [l.strip() for l in self.text_lines]
@property
def eadcomponent_id(self):
return '%s/%s' % (self.ead_id, self.xpath)
@property
def is_rootlevel(self):
return self.parent is None
@property
def search_source(self):
return ' '.join(self.text_lines)
# XXX: breadcrumbs seems to not be used anymore
@property
def breadcrumbs(self):
current_node = self.get_parent()
breadcrumbs = []
while current_node:
if current_node.show_in_tree:
breadcrumbs.append(current_node)
current_node = current_node.get_parent()
breadcrumbs = [[x.xpath, x.title] for x in breadcrumbs]
return unicode(json.dumps(breadcrumbs))
def get_status(self, context=None):
return config.STATUS_NEW
class EadComponent(EadTextElement):
"""A c-node in an ead-file"""
def __init__(
self,
element,
ead_file,
prev,
sequenceNumber,
context=None,
):
"""
element is a c-node within ead_file
"""
self._element = element
self.xpath = self._element.getroottree().getpath(self._element)
self.next = None
self.level = self._element.attrib.get('level', None)
self.prev = prev
EadTextElement.__init__(
self,
title=self.get_title(),
text_lines=[],
xpath=self.xpath,
ead_file=ead_file,
context=context,
show_in_tree=self._show_in_tree(),
prev=prev,
sequenceNumber=sequenceNumber,
)
self.is_component = True
self.number_of_scans = self.get_number_of_scans(context)
def _show_in_tree(self):
if not self.get_parent():
return True
if self.is_archiveFile:
return False
if self.get_parent().show_in_tree is False:
return False
return True
def has_children(self):
node = self.next
if node and self._xpath_contained_in(node.xpath, self.xpath):
return True
def _xpath_contained_in(self, xpath, parent_xpath):
return xpath.startswith(parent_xpath) and len(parent_xpath) < len(xpath)
def get_parent(self):
node = self.prev
while node:
if self._xpath_contained_in(self.xpath, node.xpath):
return node
node = node.prev
@property
def search_source(self):
# this is the basis for full-text search, and also for the snippets
result = []
s = getattr(self, 'archiveFile')
if s:
result += [s + ' - ']
attributes = [
'title',
'description',
'scopecontent',
]
result += [getattr(self, att, '') or '' for att in attributes]
result += self.text
result = ' '.join(result)
result = cleanup_string(result)
return result
@property
def date(self):
"""return the text content of did/unitdate"""
el = self._element.find('did/unitdate')
if el is not None:
return el.text
else:
return el
def _date_range(self):
el = self._element.find('did/unitdate')
if el is not None:
datestring = el.attrib.get('normal', '')
if '/' in datestring:
date_from, date_to = datestring.split('/')
else:
date_from = datestring
date_to = ''
try:
date_from = string_to_datetime(date_from)
except:
date_from = None
try:
date_to = string_to_datetime(
date_to,
default=datetime.datetime(2000, 12, 31),
)
except:
date_to = None
return date_from, date_to
else:
return (None, None)
@property
def date_from(self):
return self._date_range()[0]
@property
def date_to(self):
return self._date_range()[1]
@property
def findingaid(self):
return self._ead_file._cache['findingaid']
@property
def description(self):
el = self._element.find('did/physdesc')
if el is not None:
return flatten(el)
else:
return ''
@property
def language(self):
return self._ead_file._cache['language']
@property
def scopecontent(self):
el = self._element.find('scopecontent')
if el is not None:
return content_tostring(el)
else:
return ''
@property
def custodhist(self):
el = self._element.find('custodhist')
if el is not None:
return content_tostring(el)
else:
return ''
def get_title(self):
el_title = self._element.find('did/unittitle')
if el_title is not None:
return el_title.text
else:
return ''
@property
def is_archiveFile(self):
return self.level == 'file'
@property
def archiveFile(self):
if self.is_archiveFile:
s = self._element.find('did/unitid').text
if s != None:
return unicode(s)
else:
return None
@property
def text_lines(self):
return [self._element.text or '']
def get_number_of_scans(self, context):
#
# cf also db.archivefile.ArchiveFile.number_of_scans
#
if self.is_archiveFile:
solr_query = build_equality_query(
archiveFile=self.archiveFile,
archive_id=self.archive_id,
)
result = context.solr_scan.search(q=solr_query, rows=1)
return result.total_results
else:
return 0
def get_status(self, context):
if self.is_archiveFile:
# get the archivefile from the db (this is relatively expensive...)
archivefile = get_archivefile(context, archive_id=self.archive_id, archiveFile=self.archiveFile)
if archivefile:
return archivefile.status
else:
return config.STATUS_NEW
else:
return config.STATUS_NEW
| sejarah-nusantara/repository | src/restrepo/restrepo/db/eadcomponent.py | eadcomponent.py | py | 10,508 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.