seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
71075554025 | class Solution:
def findShortestSubArray(self, nums: List[int]) -> int:
count = Counter(nums)
degree = max(count.values())
candidates = {}
for n in count:
if count[n] == degree:
candidates[n] = [-1, -1]
res = float('inf')
for i in range(len(nums)):
if nums[i] in candidates and candidates[nums[i]][0] == -1:
candidates[nums[i]][0] = i
for i in range(len(nums)-1, -1, -1):
if nums[i] in candidates and candidates[nums[i]][1] == -1:
candidates[nums[i]][1] = i
for c in candidates.values():
res = min(res, c[1] - c[0] + 1)
return res
| nango94213/Leetcode-solution | 697-degree-of-an-array/697-degree-of-an-array.py | 697-degree-of-an-array.py | py | 817 | python | en | code | 2 | github-code | 36 |
14992228919 | from flask import Flask
from flask import request
from urllib.parse import urlencode
import requests
import json
server = Flask(__name__)
api_key = "AIzaSyAWtsz4ALYdHQJKRSeGv-invChqgL7tAFs"
@server.route('/location')
def location():
city_name = request.values.get('city-name')
data_type = "json"
endpoint = f"https://maps.googleapis.com/maps/api/place/autocomplete/{data_type}"
params = {"input": city_name, "key": api_key}
url_params = urlencode(params)
print(url_params)
url = f"{endpoint}?{url_params}"
print(url)
r = requests.get(url)
print(r.status_code)
data = {}
index = 1
for res in r.json()['predictions']:
if(res['description'].split(',')[0].lower() == city_name):
key = 'location_'+str(index)
location = {}
if(len(res['description'].split(','))<3):
location['city'] = res['description'].split(',')[0]
location['country'] = res['description'].split(',')[1]
else:
location['city'] = res['description'].split(',')[0]
location['province'] = res['description'].split(',')[1]
location['country'] = res['description'].split(',')[2]
data[key] = location
index += 1
return data
if __name__ == '__main__':
server.run(debug=True)
| KJS89/Wuduplz | Web mining/final/locationList.py | locationList.py | py | 1,190 | python | en | code | 2 | github-code | 36 |
22007463848 | #!/usr/bin/env python3
import os
import subprocess
import sys
def get_souffle_version():
lines = subprocess.check_output(['souffle', '--version']).decode().split('\n')
line = ([l for l in lines if 'Version' in l] + lines)[0]
version = line.split(': ')[1].replace('-', '.').replace('(64bit Domains)', '').split('.')
while len(version) < 4:
version.append('0')
if len(version[3]) > 4: version[3] = '0'
return tuple(int(x) for x in version[:4])
def main(fname: str):
print('Patching disabled for now ...')
patching = False
if get_souffle_version() < (2, 0, 2, 1188):
print('Souffle version too old, not patching')
patching = False
with open(fname, 'r') as f:
content = f.read()
# content = content.replace('mk<t_brie_ii__1_0__0_1__11__10__01>()', 'mk<CastRelationDataUsed>()')
content = content.replace('struct t_brie_ii__1_0__0_1__11__10__01 {',
'#include "llvm/Typegraph/souffle-datastructures.h"\n\nstruct t_brie_ii__1_0__0_1__11__10__01 {')
if patching and 'TG_SOUFFLE_NO_CUSTOM_DATA' not in os.environ:
content = content.replace('<t_brie_ii__1_0__0_1__11__10__01>', '<CastRelationDataUsed>')
content = content.replace(',t_brie_ii__1_0__0_1__11__10__01,', ',CastRelationDataUsed,')
content = content.replace('<t_brie_i__0__1>', '<CastRelationDataUsed1>')
content = content.replace(',t_brie_i__0__1,', ',CastRelationDataUsed1,')
with open(fname, 'w') as f:
f.write(content)
print(f'Patched "{fname}"')
if __name__ == '__main__':
main(sys.argv[1])
| typro-type-propagation/TyPro-CFI | llvm-typro/lib/Typegraph/patch_souffle_datastructures.py | patch_souffle_datastructures.py | py | 1,497 | python | en | code | 3 | github-code | 36 |
37636175480 | # 861. Score After Flipping Matrix
# Medium
# 333
# 88
# Favorite
# Share
# We have a two dimensional matrix A where each value is 0 or 1.
# A move consists of choosing any row or column, and toggling each value in that row or column: changing all 0s to 1s, and all 1s to 0s.
# After making any number of moves, every row of this matrix is interpreted as a binary number, and the score of the matrix is the sum of these numbers.
# Return the highest possible score.
# Example 1:
# Input: [[0,0,1,1],[1,0,1,0],[1,1,0,0]]
# Output: 39
# Explanation:
# Toggled to [[1,1,1,1],[1,0,0,1],[1,1,1,1]].
# 0b1111 + 0b1001 + 0b1111 = 15 + 9 + 15 = 39
# Note:
# 1 <= A.length <= 20
# 1 <= A[0].length <= 20
# A[i][j] is 0 or 1.
class Solution:
def matrixScore(self, A: List[List[int]]) -> int:
cols = {}
for row in A:
if row[0]!=1:
apply = lambda x:1-x
else:
apply = lambda x:x
for c in range(len(row)):
row[c]=apply(row[c])
if row[c]:
cols[c] = cols.get(c,0)+1
row_cnt = len(A)
col_cnt = len(A[0])
ans = 0
for c in range(col_cnt):
v = cols.get(c,0)
ans+=2**(col_cnt-c-1)*max(v,row_cnt-v)
return ans
| sunnyyeti/Leetcode-solutions | 861_Score_After_Flipping_Matrix.py | 861_Score_After_Flipping_Matrix.py | py | 1,317 | python | en | code | 0 | github-code | 36 |
43303443334 | #! /usr/bin/env python
import colorsys
def hsv2ansi(h, s, v):
# h: 0..1, s/v: 0..1
if s < 0.1:
return int(v * 23) + 232
r, g, b = map(lambda x: int(x * 5), colorsys.hsv_to_rgb(h, s, v))
return 16 + (r * 36) + (g * 6) + b
def ramp_idx(i, num):
assert num > 0
i0 = float(i) / num
h = 0.57 + i0
s = 1 - pow(i0,3)
v = 1
return hsv2ansi(h, s, v)
def ansi_ramp(num):
return [ramp_idx(i, num) for i in range(num)]
ansi_ramp80 = ansi_ramp(80)
if __name__ == '__main__':
import sys
from py.io import ansi_print
colors = int(sys.argv[1]) if len(sys.argv) > 1 else 80
for col in range(colors):
ansi_print('#', "38;5;%d" % ramp_idx(col, colors), newline=False, flush=True)
| mozillazg/pypy | rpython/tool/ansiramp.py | ansiramp.py | py | 742 | python | en | code | 430 | github-code | 36 |
19655830809 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on 03/03/2021
@author: phongdk
"""
import os
from datetime import datetime
import icecream
DATA_DIR = os.getenv('DATA_DIR', '/shared_storage/bi_mlearn_training/coccoc_shopping')
DATA_FILENAME = f'{DATA_DIR}/data/shopee_sample.pkl'
DOC2VEC_FILENAME = f"{DATA_DIR}/models/top2vec_2M_learn.model"
INDEXER_FILENAME = f"{DATA_DIR}/models/indexer.pkl"
def time_format():
return f'{datetime.now()} |> '
"""
CONFIG DEBUG MODE -> other files just import config
"""
icecream.ic.configureOutput(prefix=time_format,
includeContext=True)
icecream.install()
"""
CONFIG LOG FORMAT
"""
LOGGING_CONFIG = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(filename)s: %(message)s'
},
},
'handlers': {
'default': {
'level': 'INFO',
'formatter': 'standard',
'class': 'logging.StreamHandler',
'stream': 'ext://sys.stdout', # Default is stderr
},
},
'loggers': {
'': { # root logger
'handlers': ['default'],
'level': 'DEBUG',
'propagate': False
},
'gensim': {
'handlers': ['default'],
'level': 'ERROR',
'propagate': False
},
'apscheduler': {
'handlers': ['default'],
'level': 'ERROR',
'propagate': False
},
'__main__': { # if __name__ == '__main__'
'handlers': ['default'],
'level': 'DEBUG',
'propagate': False
},
}
}
| phongdk92/shopping_retrieval | src/config.py | config.py | py | 1,703 | python | en | code | 0 | github-code | 36 |
953778032 | pkgname = "libxxf86misc"
pkgver = "1.0.4"
pkgrel = 0
build_style = "gnu_configure"
configure_args = ["--enable-malloc0returnsnull"]
hostmakedepends = ["pkgconf"]
makedepends = ["xorgproto", "libxext-devel", "libx11-devel"]
pkgdesc = "XFree86-Misc X extension library"
maintainer = "q66 <q66@chimera-linux.org>"
license = "MIT"
url = "https://xorg.freedesktop.org"
source = f"$(XORG_SITE)/lib/libXxf86misc-{pkgver}.tar.bz2"
sha256 = "a89c03e2b0f16239d67a2031b9003f31b5a686106bbdb3c797fb88ae472af380"
def post_install(self):
self.install_license("COPYING")
@subpackage("libxxf86misc-devel")
def _devel(self):
return self.default_devel()
configure_gen = []
| chimera-linux/cports | main/libxxf86misc/template.py | template.py | py | 668 | python | en | code | 119 | github-code | 36 |
36121027603 | import logging
import sys
import torch
import yaml
from tagging_trainer import TaggingTrainer
from forte.common.configuration import Config
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
if __name__ == "__main__":
task = sys.argv[1]
assert task in ["ner", "pos"], "Not supported nlp task type: {}".format(
task
)
extractor_configs = yaml.safe_load(
open("configs/config_extractors.yml", "r")
)
# Configure output extractor based on the task, see
# BioSeqTaggingExtractor for more information.
output_configs = extractor_configs["feature_scheme"]["output_tag"][
"extractor"
]["config"]
if task == "ner":
output_configs["entry_type"] = "ft.onto.base_ontology.EntityMention"
output_configs["attribute"] = "ner_type"
output_configs["tagging_unit"] = "ft.onto.base_ontology.Token"
elif task == "pos":
output_configs["entry_type"] = "ft.onto.base_ontology.Token"
output_configs["attribute"] = "pos"
config = {
"config_data": Config(
{},
default_hparams=yaml.safe_load(
open("configs/config_data.yml", "r")
),
),
"config_model": Config(
{},
default_hparams=yaml.safe_load(
open("configs/config_model.yml", "r")
),
),
"config_extractors": extractor_configs,
"device": torch.device("cuda")
if torch.cuda.is_available()
else torch.device("cpu"),
}
trainer: TaggingTrainer = TaggingTrainer(task_type=task, **config)
trainer.run()
# Save training state to disk
trainer.save(config["config_data"].train_state_path)
torch.save(trainer.model, "model.pt")
| asyml/forte | examples/tagging/main_train_tagging.py | main_train_tagging.py | py | 1,785 | python | en | code | 230 | github-code | 36 |
2412656493 | from karp5.server import searching
from karp5.tests.util import get_json
def test_autocomplete_with_q(client_w_panacea):
result = get_json(client_w_panacea, "autocomplete?q=sig")
assert result["hits"]["total"] == 0
# https://ws.spraakbanken.gu.se/ws/karp/v5/autocomplete?multi=kasta,docka&resource=saldom&mode=external
# https://ws.spraakbanken.gu.se/ws/karp/v5/autocomplete?q=kasus&resource=saldom,dalin,hellqvist
# https://ws.spraakbanken.gu.se/ws/karp/v5/autocomplete?q=kasta&resource=saldom
def test_getcontext_panacea(client_w_panacea):
result = get_json(client_w_panacea, "getcontext/panacea")
center_source = {
"english": [
{
"corpus_prob": 4.89859255463686e-06,
"lemma_english": "suppression",
"package_prob": 0.0,
"pos_english": "No",
"target_prob": 3.8369042032346446e-05,
},
{
"corpus_prob": 0.0,
"lemma_english": "mining",
"package_prob": 0.0,
"pos_english": "No",
"target_prob": 0.0,
},
{
"corpus_prob": 3.5721305315087224e-05,
"lemma_english": "removal",
"package_prob": 0.1393939393939394,
"pos_english": "No",
"target_prob": 0.0002100906287505403,
},
{
"corpus_prob": 7.780117586776189e-06,
"lemma_english": "degradation",
"package_prob": 0.18484848484848485,
"pos_english": "No",
"target_prob": 3.622252219836902e-05,
},
{
"corpus_prob": 0.0,
"lemma_english": "quarrying",
"package_prob": 0.0,
"pos_english": "No",
"target_prob": 0.0,
},
{
"corpus_prob": 1.6607189268563003e-05,
"lemma_english": "breakdown",
"package_prob": 0.015151515151515152,
"pos_english": "No",
"target_prob": 2.4148348132246012e-05,
},
{
"corpus_prob": 0.0001688573668833647,
"lemma_english": "reduction",
"package_prob": 0.6545454545454545,
"pos_english": "No",
"target_prob": 0.0007480621621411321,
},
{
"corpus_prob": 0.0,
"lemma_english": "dismantling",
"package_prob": 0.0,
"pos_english": "No",
"target_prob": 0.0,
},
{
"corpus_prob": 2.4973216945207523e-07,
"lemma_english": "run-down",
"package_prob": 0.0,
"pos_english": "No",
"target_prob": 0.0,
},
{
"corpus_prob": 2.3052200257114636e-07,
"lemma_english": "dismantlement",
"package_prob": 0.006060606060606061,
"pos_english": "No",
"target_prob": 5.366299584943559e-07,
},
{
"corpus_prob": 4.89859255463686e-06,
"lemma_english": "collapse",
"package_prob": 0.0,
"pos_english": "No",
"target_prob": 0.00017198990169744105,
},
],
"lemma_german": "Abbau",
"lexiconName": "panacea",
"lexiconOrder": 0,
"pos_german": "No",
}
post = [
{
"_id": "qLsaOXABkxee4U0WZNC1",
"_index": "panacea_test_upload_02",
"_score": None,
"_source": {
"english": [{"lemma_english": "image"}, {"lemma_english": "copy"}],
"lemma_german": "Abbild",
"lexiconName": "panacea",
},
"_type": "lexicalentry",
"sort": ["Abbild"],
},
{
"_id": "yrsaOXABkxee4U0WZdFo",
"_index": "panacea_test_upload_02",
"_score": None,
"_source": {
"english": [
{"lemma_english": "figure"},
{"lemma_english": "projection"},
{"lemma_english": "illustration"},
{"lemma_english": "image"},
{"lemma_english": "mapping"},
],
"lemma_german": "Abbildung",
"lexiconName": "panacea",
},
"_type": "lexicalentry",
"sort": ["Abbildung"],
},
{
"_id": "-bsaOXABkxee4U0WYMj7",
"_index": "panacea_test_upload_02",
"_score": None,
"_source": {
"english": [
{"lemma_english": "termination"},
{"lemma_english": "disconnection"},
{"lemma_english": "demolition"},
{"lemma_english": "abortion"},
{"lemma_english": "breaking-off"},
{"lemma_english": "abort"},
],
"lemma_german": "Abbruch",
"lexiconName": "panacea",
},
"_type": "lexicalentry",
"sort": ["Abbruch"],
},
{
"_id": "V7saOXABkxee4U0WXsJb",
"_index": "panacea_test_upload_02",
"_score": None,
"_source": {
"english": [
{"lemma_english": "cover"},
{"lemma_english": "coverage"},
{"lemma_english": "covering"},
],
"lemma_german": "Abdeckung",
"lexiconName": "panacea",
},
"_type": "lexicalentry",
"sort": ["Abdeckung"],
},
{
"_id": "lbsaOXABkxee4U0WZM4c",
"_index": "panacea_test_upload_02",
"_score": None,
"_source": {
"english": [
{"lemma_english": "imprint"},
{"lemma_english": "impression"},
{"lemma_english": "printing"},
{"lemma_english": "print"},
{"lemma_english": "copy"},
],
"lemma_german": "Abdruck",
"lexiconName": "panacea",
},
"_type": "lexicalentry",
"sort": ["Abdruck"],
},
{
"_id": "LbsaOXABkxee4U0WYMZl",
"_index": "panacea_test_upload_02",
"_score": None,
"_source": {
"english": [{"lemma_english": "dinner"}, {"lemma_english": "supper"}],
"lemma_german": "Abendessen",
"lexiconName": "panacea",
},
"_type": "lexicalentry",
"sort": ["Abendessen"],
},
{
"_id": "ebsaOXABkxee4U0WX8SF",
"_index": "panacea_test_upload_02",
"_score": None,
"_source": {
"english": [
{"lemma_english": "adventure"},
{"lemma_english": "affair"},
{"lemma_english": "venture"},
],
"lemma_german": "Abenteuer",
"lexiconName": "panacea",
},
"_type": "lexicalentry",
"sort": ["Abenteuer"],
},
{
"_id": "AbsaOXABkxee4U0WXMHw",
"_index": "panacea_test_upload_02",
"_score": None,
"_source": {
"english": [{"lemma_english": "deprivation"}, {"lemma_english": "denial"}],
"lemma_german": "Aberkennung",
"lexiconName": "panacea",
},
"_type": "lexicalentry",
"sort": ["Aberkennung"],
},
{
"_id": "-7saOXABkxee4U0WZdJp",
"_index": "panacea_test_upload_02",
"_score": None,
"_source": {
"english": [
{"lemma_english": "refuse"},
{"lemma_english": "waste"},
{"lemma_english": "offal"},
{"lemma_english": "rubbish"},
{"lemma_english": "drop"},
{"lemma_english": "trash"},
{"lemma_english": "litter"},
{"lemma_english": "scrap"},
{"lemma_english": "discard"},
{"lemma_english": "garbage"},
{"lemma_english": "release"},
],
"lemma_german": "Abfall",
"lexiconName": "panacea",
},
"_type": "lexicalentry",
"sort": ["Abfall"],
},
{
"_id": "KLsaOXABkxee4U0WWb_M",
"_index": "panacea_test_upload_02",
"_score": None,
"_source": {
"english": [{"lemma_english": "intercept"}, {"lemma_english": "interception"}],
"lemma_german": "Abfangen",
"lexiconName": "panacea",
},
"_type": "lexicalentry",
"sort": ["Abfangen"],
},
]
pre = []
assert result["pre"] == pre
assert result["center"]["_source"] == center_source
for post_entry, expected in zip(result["post"], post):
assert post_entry["_source"] == expected["_source"]
| spraakbanken/karp-backend-v5 | karp5/tests/integration_tests/server/test_searching_integration.py | test_searching_integration.py | py | 9,519 | python | en | code | 4 | github-code | 36 |
30838795713 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 11 18:11:32 2021
@author: mathisagathe
"""
from pymongo import MongoClient
import matplotlib.pyplot as plt
client = MongoClient("10.35.7.4", username = "mathis", password = "MathisM21", authsource = "mathisdb")
db=client.mathisdb
collection = db["TripAdvisor"]
r1 = {"country":"France"}
nbrestoFR = collection.find((r1)).count()
print("Le nombre total de restaurants en France sur TA est de : ",nbrestoFR)
#Nombre de restaurants en France servant des repas végétariens
r2 = {"$and":
[
{"country":"France"},
{"vegetarian_friendly":"Y"}
]
}
nbvege = collection.find((r2)).count()
#Nombre de restaurants en France servant des repas gluten free
r3 = {"$and":
[
{"country":"France"},
{"gluten_free":"Y"}
]
}
nbgf = collection.find((r3)).count()
# Graphique : Pourcentage de restaurants Végétariens et sans gluten en France
# https://www.python-graph-gallery.com/pie-plot-matplotlib-basic
# https://www.kaggle.com/stefanoleone992/tripadvisor-european-restaurants-eda
print("Le nombre total de restaurants en France servant des repas végétariens est de : ",nbvege)
print("Le nombre total de restaurants en France servant des repas sans gluten est de : ",nbgf)
#Top 5 des villes européennes avec le plus de restaurants
r3 = collection.aggregate([
{"$group":{"_id":"$city","nb":{"$sum":1}}},
{"$sort":{"nb":-1}},
{"$limit":6}
])
for i in r3:
print(i)
| romanelollier/School_Project_BigData | requetes_mod.py | requetes_mod.py | py | 1,536 | python | fr | code | 0 | github-code | 36 |
33406629366 | """
Main Neural Network Pipeline.
"""
#-------------------------- set gpu using tf ---------------------------#
import tensorflow as tf
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
#------------------- start importing keras module ---------------------#
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, Conv1D, GlobalAveragePooling2D
from keras.callbacks import ModelCheckpoint
from datagenerator import DataGenerator
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
import numpy as np
import itertools
import os
# Parameters
if os.path.abspath('~') == '/Users/ghunk/~':
data_root = "/Users/ghunk/Desktop/GRADUATE/CSC_464/Final_Project/Dataset/stft_binaural_0.5s/"
else:
data_root = "/scratch/ghunkins/stft_binaural_0.5s/"
elevations = [-45, -30, -15, 0, 15, 30, 45]
azimuths = [15*x for x in range(24)]
el_az = list(itertools.product(elevations, azimuths))
classes = [str(x) + '_' + str(y) for x, y in el_az]
encoder = LabelEncoder()
encoder.fit(classes)
params = {'batch_size': 32,
'Y_encoder': encoder,
'shuffle': True}
LIMIT = 2000000
RANDOM_STATE = 3
# Datasets
IDs = os.listdir(data_root)[:LIMIT]
Train_IDs, Test_IDs, _, _, = train_test_split(IDs, np.arange(len(IDs)), test_size=0.2, random_state=RANDOM_STATE)
# Generators
training_generator = DataGenerator(**params).generate(Train_IDs)
validation_generator = DataGenerator(**params).generate(Test_IDs)
# Design model
model = Sequential()
model.add(Conv2D(256, kernel_size=(804, 1), activation='relu', input_shape=(804, 47, 1)))
model.add(Conv2D(256, kernel_size=(1, 3), strides=(1, 2), activation='relu'))
model.add(Conv2D(256, kernel_size=(1, 3), strides=(1, 2), activation='relu'))
model.add(Conv2D(256, kernel_size=(1, 3), strides=(1, 2), activation='relu'))
model.add(GlobalAveragePooling2D(data_format='channels_last'))
model.add(Dense(168, activation='relu'))
model.add(Dense(168, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.summary()
# set callback: https://machinelearningmastery.com/check-point-deep-learning-models-keras/
filepath="weights-improvement-{epoch:02d}-{val_acc:.2f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
# Train model on dataset
model.fit_generator(generator = training_generator,
steps_per_epoch = len(Train_IDs)//params['batch_size'],
nb_epoch = 50,
validation_data = validation_generator,
validation_steps = len(Test_IDs)//params['batch_size'],
verbose=2,
callbacks=callbacks_list)
model.save("./model_200000_job_epoch12.h5py")
| ghunkins/Binaural-Source-Localization-CNN | Neural_Net/v3/neuralnet.py | neuralnet.py | py | 2,940 | python | en | code | 9 | github-code | 36 |
42831133132 | #!/usr/bin/env python3
import struct
import json
import sys
def write_tileset(filename, spec):
with open(spec['base'], 'rb') as src:
base_data = src.read()
with open(spec['overlay'], 'rb') as src:
overlay_data = src.read()
if 'cc2' in spec:
with open(spec['cc2'], 'rb') as src:
cc2_data = src.read()
else:
cc2_data = b''
tis = open(filename, 'wb')
tis.write(b'CCTILE02')
tis.write(struct.pack('I', len(spec['name'])))
tis.write(bytes(spec['name'], 'utf-8'))
tis.write(struct.pack('I', len(spec['desc'])))
tis.write(bytes(spec['desc'], 'utf-8'))
tis.write(struct.pack('B', spec['size']))
tis.write(struct.pack('I', len(base_data)))
tis.write(base_data)
tis.write(struct.pack('I', len(overlay_data)))
tis.write(overlay_data)
tis.write(struct.pack('I', len(cc2_data)))
tis.write(cc2_data)
tis.close()
# Generate default tilesets if called from the command line
if __name__ == '__main__':
if len(sys.argv) < 2:
print('Usage: {} spec.json [...]'.format(sys.argv[0]))
sys.exit(1)
for arg in sys.argv[1:]:
with open(arg, 'r') as spec_file:
spec = json.load(spec_file)
tis_filename = arg.rsplit('.', 1)[0] + '.tis'
write_tileset(tis_filename, spec)
| zrax/cctools | res/gen_tilesets.py | gen_tilesets.py | py | 1,323 | python | en | code | 18 | github-code | 36 |
21885184448 | """
Device discovery
"""
import enum
import re
from dataclasses import asdict, dataclass
from queue import Empty, Queue
from socket import inet_ntoa
from typing import Dict, Generator, List, Optional
import click
import requests
import usb
from zeroconf import ServiceBrowser, ServiceInfo, ServiceStateChange, Zeroconf
from brewblox_ctl import const, tabular, utils
BREWBLOX_DNS_TYPE = '_brewblox._tcp.local.'
DISCOVER_TIMEOUT_S = 5
DISCOVERY_LEN = 4 # USB / TCP / mDNS
MODEL_LEN = 7 # 'Spark 2' / 'Spark 3' / 'Spark 4'
MAX_ID_LEN = 24 # Spark 4 IDs are shorter
HOST_LEN = 4*3+3
class DiscoveryType(enum.Enum):
all = 1
usb = 2
mdns = 3
mqtt = 4
# aliases
wifi = 3
lan = 3
def __str__(self):
return self.name
@staticmethod
def choices():
return list((str(v) for v in DiscoveryType.__members__))
@dataclass
class DiscoveredDevice:
discovery: str
model: str
device_id: str
device_host: str = ''
def __post_init__(self):
self.device_id = self.device_id.lower()
@dataclass
class HandshakeMessage:
brewblox: str
firmware_version: str
proto_version: str
firmware_date: str
proto_date: str
system_version: str
platform: str
reset_reason_hex: str
reset_data_hex: str
device_id: str
model: str = ''
def __post_init__(self):
self.device_id = self.device_id.lower()
if self.platform == 'photon':
self.model = 'Spark 2'
elif self.platform == 'p1':
self.model = 'Spark 3'
elif self.platform == 'esp32':
self.model = 'Spark 4'
else:
self.model = self.platform
def match_id_services(config: Optional[dict]) -> Dict[str, str]: # [ID, service_name]
"""Gets the --device-id value for all Spark services in config.
Because IDs are yielded during discovery,
values are returned with the ID as key,
and a comma-separated string of services as value
"""
if not config:
return {}
output: Dict[str, List[str]] = {}
for name, service in config.get('services', {}).items():
if not service.get('image', '').startswith('ghcr.io/brewblox/brewblox-devcon-spark'):
continue
match = re.match(
r'.*\-\-device\-id(\w|=)(?P<id>\w+)',
service.get('command', ''))
if match:
id = match.group('id').lower()
output.setdefault(id, []).append(name)
return {
id: ', '.join(services)
for id, services
in output.items()
}
def find_device_by_host(device_host: str) -> Optional[DiscoveredDevice]:
utils.info(f'Querying device with address {device_host}...')
try:
resp = requests.get(f'http://{device_host}', timeout=5)
resp.raise_for_status()
content = resp.text
if not content.startswith('!BREWBLOX'):
raise RuntimeError('Host did not respond with a Brewblox handshake')
handshake = HandshakeMessage(*content.split(','))
utils.info(f'Found a {handshake.model} with ID {handshake.device_id}')
return DiscoveredDevice(
discovery='TCP',
device_id=handshake.device_id,
model=handshake.model,
device_host=device_host,
)
except Exception as ex:
utils.warn(f'Failed to fetch device info: {str(ex)}')
return None
def discover_usb() -> Generator[DiscoveredDevice, None, None]:
devices = [
*usb.core.find(find_all=True,
idVendor=const.VID_PARTICLE,
idProduct=const.PID_PHOTON),
*usb.core.find(find_all=True,
idVendor=const.VID_PARTICLE,
idProduct=const.PID_P1),
# Spark 4 does not support USB control, and is not listed
]
for dev in devices:
dev: usb.core.Device
id = usb.util.get_string(dev, dev.iSerialNumber).lower()
model = {const.PID_PHOTON: 'Spark 2', const.PID_P1: 'Spark 3'}[dev.idProduct]
yield DiscoveredDevice(discovery='USB',
model=model,
device_id=id)
def discover_mdns() -> Generator[DiscoveredDevice, None, None]:
queue: Queue[ServiceInfo] = Queue()
conf = Zeroconf()
def on_service_state_change(zeroconf: Zeroconf, service_type, name, state_change):
if state_change == ServiceStateChange.Added:
info = zeroconf.get_service_info(service_type, name)
queue.put(info)
try:
ServiceBrowser(conf, BREWBLOX_DNS_TYPE, handlers=[on_service_state_change])
while True:
info = queue.get(timeout=DISCOVER_TIMEOUT_S)
if not info or not info.addresses or info.addresses == [b'\x00\x00\x00\x00']:
continue # discard simulators
id = info.properties[b'ID'].decode()
model = info.properties[b'HW'].decode()
host = inet_ntoa(info.addresses[0])
yield DiscoveredDevice(discovery='mDNS',
model=model,
device_id=id,
device_host=host)
except Empty:
pass
finally:
conf.close()
def discover_device(discovery_type: DiscoveryType) -> Generator[DiscoveredDevice, None, None]:
if discovery_type in [DiscoveryType.all,
DiscoveryType.usb]:
yield from discover_usb()
if discovery_type in [DiscoveryType.all,
DiscoveryType.mdns,
DiscoveryType.mqtt]:
yield from discover_mdns()
def list_devices(discovery_type: DiscoveryType,
compose_config: Optional[dict]):
id_services = match_id_services(compose_config)
table = tabular.Table(
keys=[
'discovery',
'model',
'device_id',
'device_host',
'service'
],
headers={
'discovery': 'Discovery'.ljust(DISCOVERY_LEN),
'model': 'Model'.ljust(MODEL_LEN),
'device_id': 'Device ID'.ljust(MAX_ID_LEN),
'device_host': 'Device host'.ljust(HOST_LEN),
'service': 'Service',
}
)
utils.info('Discovering devices...')
table.print_headers()
for dev in discover_device(discovery_type):
table.print_row({
**asdict(dev),
'service': id_services.get(dev.device_id, ''),
})
def choose_device(discovery_type: DiscoveryType,
compose_config: Optional[dict],
) -> Optional[DiscoveredDevice]:
id_services = match_id_services(compose_config)
table = tabular.Table(
keys=[
'index',
'discovery',
'model',
'device_id',
'device_host',
'service'
],
headers={
'index': 'Index',
'discovery': 'Discovery'.ljust(DISCOVERY_LEN),
'model': 'Model'.ljust(MODEL_LEN),
'device_id': 'Device ID'.ljust(MAX_ID_LEN),
'device_host': 'Device host'.ljust(HOST_LEN),
'service': 'Service',
}
)
devs = []
utils.info('Discovering devices...')
table.print_headers()
for dev in discover_device(discovery_type):
# TODO(Bob) less hacky check
if discovery_type == DiscoveryType.mqtt and dev.model != 'Spark 4':
continue
devs.append(dev)
table.print_row({
**asdict(dev),
'index': len(devs),
'service': id_services.get(dev.device_id, ''),
})
if not devs:
click.echo('No devices discovered')
return None
idx = click.prompt('Which device do you want to use?',
type=click.IntRange(1, len(devs)),
default=1)
return devs[idx-1]
| BrewBlox/brewblox-ctl | brewblox_ctl/discovery.py | discovery.py | py | 7,942 | python | en | code | 3 | github-code | 36 |
74508033382 | #!/bin/python3
import sys
n = int(input().strip())
for i in range(n):
line = ""
for j in range(n):
if j < (n - i - 1):
line = line + " "
else:
line = line + "#"
print(line)
| costincaraivan/hackerrank | algorithms/warmup/python3/staircase.py | staircase.py | py | 237 | python | en | code | 1 | github-code | 36 |
75009815143 | import asyncio
import logging
from random import randint
from src.streaming import Consumer, Producer, Topic
LOGGER = logging.getLogger(__name__)
GROUP_ID = "WEBSITE_NER"
async def amain():
"""Consume website changes, produce NER results."""
consumer = await Consumer.create(Topic.CHANGE.value, group_id=GROUP_ID)
producer = await Producer.create()
async for message in consumer:
data = message.value
LOGGER.info("processing event %s %s", data["id"], data["domain"])
await producer.send_and_wait(Topic.NER.value, calculate_ner(data))
def calculate_ner(data: dict) -> dict:
return {
"domain": data["domain"],
"id": data["id"],
# some fancy NER detection results
"ner": {
"persons": randint(0, 10),
"locations": randint(0, 10),
"brands": randint(0, 10),
},
}
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s [%(module)s:%(lineno)s] %(message)s')
asyncio.run(amain())
| SebastianRemander/eda-demo | src/producers/website_ner.py | website_ner.py | py | 1,060 | python | en | code | 1 | github-code | 36 |
23297663163 | import pathlib
import pandas as pd
from model import dez
datadir = pathlib.Path(__file__).parents[0].joinpath('data')
def test_populate_solution_land_distribution():
expected = pd.read_csv(datadir.joinpath('lbt_ocean_dist.csv'), index_col=0)
de = dez.DEZ('Limiting bottom trawling')
# We freeze applicable zones as solution_dez_matrix.csv is likely to change
de.applicable_zones = ['DEZ1: Epipelagic, EEZ', 'DEZ2: Epipelagic, ABNJ']
de._populate_world_ocean_allocation()
de._populate_solution_ocean_distribution()
pd.testing.assert_frame_equal(de.get_ocean_distribution(), expected, check_dtype=False) | ProjectDrawdown/solutions | model/tests/test_dez.py | test_dez.py | py | 634 | python | en | code | 203 | github-code | 36 |
39433044372 | from html.parser import HTMLParser
from urllib.error import HTTPError
import urllib.request
from urllib.request import urlopen
from urllib import parse
import sys
import pandas as pd
url = 'https://www.yapo.cl/chile/autos?ca=5_s&l=0&st=s&br=68&mo=36'
url_2 = 'https://www.chileautos.cl/autos/busqueda?s={:d}&q=(C.Marca.Peugeot._.Modelo.2008.)&l={:d}'
class LinkParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.in_tbody = False
self.keep_going = True
self.interesting_data = []
# This is a function that HTMLParser normally has
# but we are adding some functionality to it
def handle_starttag(self, tag, attrs):
# We are looking for the begining of a link. Links normally look
# like <a href="www.someurl.com"></a>
if tag =='table':
self.in_tbody = True
if tag == 'a':
for (key, value) in attrs:
if key == 'href':
# We are grabbing the new URL. We are also adding the
# base URL to it. For example:
# www.netinstructions.com is the base and
# somepage.html is the new URL (a relative URL)
#
# We combine a relative URL with the base URL to create
# an absolute URL like:
# www.netinstructions.com/somepage.html
if self.search in value:
newUrl = parse.urljoin(self.baseUrl, value)
# And add it to our colection of links:
if newUrl not in self.links:
self.links = self.links + [newUrl]
def handle_endtag(self, tag):
if tag == 'table':
self.in_tbody = False
self.keep_going = False
def handle_data(self, data):
if self.in_tbody and self.keep_going:
if ' ' not in data and ' Cargos' not in data:
self.interesting_data.append(data)
elif ' Cargos' in data:
print('###########')
print(data)
print('###########')
self.reset()
# filtered_data = data.split('Cargos')
# print(filtered_data[0])
# This is a new function that we are creating to get links
# that our spider() function will call
def getLinks(self, url, search, base_url):
self.links = []
# Remember the base URL which will be important when creating
# absolute URLs
self.baseUrl = base_url
# Use the urlopen function from the standard Python 3 library
try:
req = urllib.request.Request(url,
data=None,
headers={
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'})
response = urlopen(req)
# Make sure that we are looking at HTML and not other things that
# are floating around on the internet (such as
# JavaScript files, CSS, or .PDFs for example)
if 'text/html' in response.getheader('Content-Type'):
htmlBytes = response.read()
# Note that feed() handles Strings well, but not bytes
# (A change from Python 2.x to Python 3.x)
htmlString = htmlBytes.decode("utf-8")
self.search = search
self.feed(htmlString)
return self.interesting_data, self.links
else:
return "", []
except HTTPError as error:
print(url)
print(error)
return "", []
def get_links_chileautos(marca, modelo, anho=None):
baseurl = 'https://www.chileautos.cl'
query_format = '(C.Marca.{:s}._.Modelo.{:s}.)'.format(marca, modelo)
query_url = '/autos/busqueda?s={:s}&q={:s}&l={:s}'.format('{:d}',query_format,'{:d}')
full_url = parse.urljoin(baseurl, query_url)
all_links = []
step = 60
parser = LinkParser()
data, links = parser.getLinks(full_url.format(0, step), search='/autos/busqueda?s=', base_url=baseurl)
for start in range(0, len(links) * step, step):
data, links = parser.getLinks(full_url.format(start, step), search='auto/usado/details/CL-AD', base_url=baseurl)
all_links = all_links + links
# for link in links:
# print(link)
# print(len(links))
# print(data.find('listing-item__details'))
return all_links
def get_price_chileautos(url):
baseurl = 'https://www.chileautos.cl'
parser = LinkParser()
data, l = parser.getLinks(url, search='', base_url=baseurl)
if data == "" and l == []:
return None
anho = data[1].split(' ')[0]
data.append('Anho')
data.append(anho)
# print(','.join(data))
try:
# print(len(data), data)
tuple_data = [(data[i], data[i + 1]) for i in range(0, len(data), 2)]
return dict(tuple_data)
except IndexError:
print(data)
if __name__ == '__main__':
marca = 'Volkswagen'
modelo = 'ESCARABAJO'
links = get_links_chileautos(marca=marca, modelo=modelo)
all_dict = []
count=0
for link in links:
print("Processing: {:s}".format(link))
data_dict = get_price_chileautos(link)
if data_dict is not None:
all_dict.append(data_dict)
# if count == 3:
# break
# else:
# count += 1
all_keys = []
for d in all_dict:
for key in d.keys():
if key not in all_keys:
all_keys.append(key)
# else:
# print("{:s} repetida".format(key))
all_data_to_dict = {}
for key in all_keys:
all_data_to_dict[key] = []
for d in all_dict:
for dkey in d.keys():
all_data_to_dict[dkey].append(d[dkey])
max_len = 0
for akey in all_data_to_dict.keys():
max_len = max(max_len, len(all_data_to_dict[akey]))
for akey2 in all_data_to_dict.keys():
if len(all_data_to_dict[akey2]) < max_len:
all_data_to_dict[akey2].append("")
# print(all_data_to_dict)
df = pd.DataFrame(all_data_to_dict, columns=all_keys)
df.to_csv('all_{:s}-{:s}.csv'.format(marca, modelo), sep=',')
# print(df)
| simontorres/experimentos | depreciacion_autos/webcrawl.py | webcrawl.py | py | 6,480 | python | en | code | 0 | github-code | 36 |
9817120997 | import itertools
import os
import pandas as pd
import pickle
import random
import torch
from torchvision import datasets
from tqdm import tqdm
from scipy import rand
import data_handler
from models import AlexNet, Vgg16
def generate_model_testset_results(model, testset_path):
"""
Evaluate whole 'imagenetv2-matched-frequency-format-val' dataset
on given model and saves the results ("img_name", "max_confidence", "pred_label", "true_label" for each image)
in a DataFrame via pickle.
For information on the dataset see https://github.com/modestyachts/ImageNetV2
"""
img_folder = datasets.ImageFolder(root=testset_path)
img_names, true_labels_idx, pred_labels_idx, pred_max_confidences = [], [], [], []
for img_path in tqdm(img_folder.imgs):
pil_img = img_folder.loader(img_path[0])
img_name = img_path[0].split(os.sep)[-1]
# preprocessing and prediction
input_tensor = data_handler.transform()(pil_img)
input_tensor = input_tensor.unsqueeze(0)
output = model.predict(input_tensor)
# output has unnormalized scores. To get probabilities, run a softmax on it.
probabilities = torch.nn.functional.softmax(output[0], dim=0)
img_names.append(img_name)
pred_max_confidences.append(probabilities.detach().numpy().max())
pred_labels_idx.append(probabilities.detach().numpy().argmax())
true_labels_idx.append(int(img_path[0].split(os.sep)[-2]))
df = pd.DataFrame([img_names, pred_max_confidences, pred_labels_idx, true_labels_idx]).transpose()
df.columns = ["img_name", "max_confidence", "pred_label", "true_label"]
df["pred_is_correct"] = df["pred_label"] == df["true_label"]
return df
def create_questionnairs(imgs_idx, xai_methods, model_names, df_vgg, df_alex, seed=None):
if seed:
random.seed(seed)
# create first half of question with fixed images for all questionnaire forms
questionnaires_list = get_fixed_img_questionnaires(imgs_idx, xai_methods, model_names)
# adding images works directly on the reference of 'questionnaires_list'
add_random_unique_images(questionnaires_list, imgs_idx, df_alex, df_vgg, model_names, xai_methods)
return questionnaires_list
def get_fixed_img_questionnaires(imgs_idx, xai_methods, models):
NUM_QUESTIONNAIRES = 12
NUM_IMGS = 12
questionnaires_list = []
random_imgs_idx = [imgs_idx.pop(random.randint(0, len(imgs_idx) - 1)) for i in range(NUM_IMGS)]
permutations = list(itertools.product(random_imgs_idx, models, xai_methods))
# distribute permutations on questionnaires
for q in range(NUM_QUESTIONNAIRES):
questionnaire = []
for i in range(NUM_IMGS):
if (q + i) > (NUM_IMGS - 1):
questionnaire.append(permutations[i * NUM_IMGS:i * NUM_IMGS + NUM_IMGS][(q + i) - NUM_IMGS])
else:
questionnaire.append(permutations[i * NUM_IMGS:i * NUM_IMGS + NUM_IMGS][q + i])
questionnaires_list.append(questionnaire)
return questionnaires_list
def add_random_unique_images(questionnaires_list, imgs_idx, df_alex, df_vgg, model_names, xai_methods):
FINAL_QUESTIONNAIRE_SIZE = 24
for idx_qn, questionnaire in enumerate(questionnaires_list):
df_variants_count = pd.DataFrame(list(itertools.product(xai_methods, model_names, [True, False]))).groupby(
[0, 1, 2]).count()
df_variants_count["count"] = 0
# evaluate variants for the already drawn fixed questions
for idx_q, question in enumerate(questionnaire):
if question[1] == "alex":
if df_alex["pred_is_correct"][question[0]]:
questionnaires_list[idx_qn][idx_q] += (True,)
df_variants_count.loc[question[2], "alex", True]["count"] += 1
else:
questionnaires_list[idx_qn][idx_q] += (False,)
df_variants_count.loc[question[2], "alex", False]["count"] += 1
else:
if df_vgg["pred_is_correct"][question[0]]:
questionnaires_list[idx_qn][idx_q] += (True,)
df_variants_count.loc[question[2], "vgg", True]["count"] += 1
else:
questionnaires_list[idx_qn][idx_q] += (False,)
df_variants_count.loc[question[2], "vgg", False]["count"] += 1
"""
add addtional random images to each questionnaire such that for every variant in df_variants_count the
count will be 1
"""
while df_variants_count["count"].sum() != FINAL_QUESTIONNAIRE_SIZE:
rand_img_idx = imgs_idx.pop(random.randint(0, len(imgs_idx) - 1))
alex_pred = df_alex.loc[rand_img_idx]["pred_is_correct"]
vgg_pred = df_alex.loc[rand_img_idx]["pred_is_correct"]
df_alex_options = df_variants_count.loc[:, "alex", alex_pred]
df_alex_options = df_alex_options[df_alex_options["count"] == 0]
df_vgg_options = df_variants_count.loc[:, "vgg", vgg_pred]
df_vgg_options = df_vgg_options[df_vgg_options["count"] == 0]
if not df_alex_options.empty:
rand_variant = df_alex_options.index[random.randint(0, df_alex_options.shape[0] - 1)]
question = (rand_img_idx, rand_variant[1], rand_variant[0], rand_variant[2])
questionnaire.append(question)
df_variants_count.loc[rand_variant]["count"] += 1
elif not df_vgg_options.empty:
rand_variant = df_vgg_options.index[random.randint(0, df_vgg_options.shape[0] - 1)]
question = (rand_img_idx, rand_variant[1], rand_variant[0], rand_variant[2])
questionnaire.append(question)
df_variants_count.loc[rand_variant]["count"] += 1
def save_questionnaires(questionnaires_list, path):
with open(path, 'wb') as f:
pickle.dump(questionnaires_list, f)
def shuffle_questions(questionnaire):
for questionnaire in questionnaire:
random.shuffle(questionnaire)
def main():
"""
create questionnaires
must only be evaluated if testset hasn't already been evaluated
"""
folder_vgg = os.path.join(os.path.curdir, "data", "stats", "df_vgg.pickle")
folder_alex = os.path.join(os.path.curdir, "data", "stats", "df_alexnet.pickle")
if not (os.path.exists(folder_alex) and os.path.exists(folder_vgg)):
models = [Vgg16(), AlexNet()]
for model in models:
model.eval()
folder = os.path.join(os.path.curdir, "data", "imagenetv2-matched-frequency-format-val")
df = generate_model_testset_results(model, folder)
df.to_pickle(f"data/stats/df_{model.name}_2.pickle")
imgs_idx = list(range(10000))
xai_methods = ['gradCAM', 'LRP', 'SHAP', 'LIME', 'ConfidenceScores', 'IntegratedGradients']
model_names = ["alex", "vgg"]
df_vgg = pd.read_pickle(folder_vgg)
df_alex = pd.read_pickle(folder_alex)
questionnaires_list = create_questionnairs(imgs_idx, xai_methods, model_names, df_vgg, df_alex, seed=3)
shuffle_questions(questionnaires_list)
folder = os.path.join(os.path.curdir, "data", "question_generation", "questionnaires_shuffled.pickle")
save_questionnaires(questionnaires_list, folder)
# additionally shuffle questions in questionnairs
folder = os.path.join(os.path.curdir, "data", "question_generation", "questionnaires.pickle")
questionnaires_list = data_handler.get_questionnaires(folder)
shuffle_questions(questionnaires_list)
folder = os.path.join(os.path.curdir, "data", "question_generation", "questionnaires_shuffled.pickle")
save_questionnaires(questionnaires_list, folder)
if __name__ == '__main__':
main()
| tlabarta/helpfulnessofxai | experiment_creator.py | experiment_creator.py | py | 7,853 | python | en | code | 0 | github-code | 36 |
37452044980 | import os, sys, subprocess, time
def CAPTURE(cmd): return subprocess.run(f'{cmd}', shell=True, capture_output=True).stdout.decode('utf-8').strip(' \n') # capture & format terminal output
def SBATCH(job_id, partition, nodes, ntasks, memory, walltime, out_err, task=None, email=None, conda_env=None):
labels = ['job-name','partition','nodes','ntasks-per-node','mem','time','output','error', 'parsable'] # specify relevant directives
inputs = [job_id, partition, nodes, ntasks, memory, walltime, *[ f'{out_err}/%x{f".{task}" if task else ""}.{suffix}' for suffix in ['out','err'] ], None] # organise settings
if email: labels.extend(['mail-user','mail-type']); inputs.extend([email,'END']) # add optional user email address
sbatch = ''.join([ f'#SBATCH --{option}{f"={info}" if info else ""} \n' for option,info in zip(labels,inputs) ]) # format directives & settings
sbatch += '\nsource $HOME/.bash_profile\n'
if conda_env: sbatch += f'\nconda activate {conda_env}'+'\necho ENVIRONMENT $CONDA_DEFAULT_ENV ACTIVE\n' # add optional conda environment
return '#!/bin/bash\n'+f'{sbatch}\n'+'echo RUNNING ON `hostname`\n'
def ezSub(i, check, user, limit): # auto submission
total = int(CAPTURE(f'squeue -u {user} -h | wc -l')) # find current tasks
if total > limit:
print(f'\nSUBMISSION LIMIT REACHED: WAITING TO SUBMIT TASK {i}...')
time.sleep(check)
ezSub(i, check, user, limit) # check every 5 mins if task can be submitted
else:
return # submit task (with dependancy) if possible
def REVIEW(id_file):
print('\nREVIEWING TASKS...')
with open(id_file, 'a+') as id_update:
*non_fails, failed = states = ['PENDING', 'RUNNING', 'COMPLETED', 'CANCELLED', 'FAILED'] # slurm job state categories
categories = {category:set() for category in states } # slurm job state categories
id_update.seek(0) # reset file position (read from beginning)
sub_info = { sub_id:script for sub_id,script, *_ in [line.strip('\n').split('\t') for line in id_update.readlines()] } # task job ids & scripts
sub_ids = ",".join(sub_info.keys()) # task job ids
headers, *sacct_info = [ line.split('|') for line in CAPTURE(f'sacct -p -j {sub_ids}').split('\n') ] # slurm accounting data
for info in sacct_info:
sub_id, step, state = [ info[headers.index(column)].strip('+') for column in ['JobID','JobName','State'] ]
if not sub_id.endswith(('batch','extern')): categories[failed if not state in non_fails else state].add(sub_info[sub_id]) # store task state
pending, running, completed, cancelled, failed = categories.values()
problems = failed.difference( set().union(pending, running, completed) ) # failed but not running (i.e. re-submitted)
if pending: print(f'\n{len(pending)} task{"s" if len(pending) > 1 else ""} pending.')
if running: print(f'\n{len(running)} task{"s" if len(running) > 1 else ""} running.')
if (pending or running) and not problems: print('\nNo problems identified for current tasks.')
if completed and not problems and not pending and not running: total = len(completed); print(f'\nAll {total} tasks have completed.')
if problems:
print('\nIt seem\'s that there were problems with the following tasks that havn\'t been dealt with yet:\n')
[ print(os.path.basename(task)) for task in sorted(problems) ]
proceed = False
if problems:
while proceed is False:
response = input('\nwould you like to repeat these tasks now? (y/n): ') # instruct task resubmission
proceed, repeat = response in ['y','n'], response == 'y' # interpret instructions
if repeat is True:
for script_file,*_ in sorted(problems):
resub_id = CAPTURE(f'sbatch {script_file}') # resubmit task
print(resub_id, script_file, '(RESUBMITTED)', sep='\t', file=id_update) # record resubmitted id
print('\nREVIEW COMPLETE\n')
| mattheatley/sra_download | core.py | core.py | py | 4,087 | python | en | code | 0 | github-code | 36 |
22996305248 |
class Solution:
def findTilt(self, root):
if root is None:
return 0
result = 0
queue = [root]
while len(queue):
node = queue.pop()
if node.left is None:
a = 0
else:
a = self.bfs_sum(node.left)
queue.append(node.left)
if node.right is None:
b = 0
else:
b = self.bfs_sum(node.right)
queue.append(node.right)
result += abs(a-b)
return result
def bfs_sum(self, node):
res = 0
queue = [node]
while len(queue):
curr_node = queue.pop()
if curr_node is None:
continue
else:
res += curr_node.val
queue.append(curr_node.left)
queue.append(curr_node.right)
return res
| xmu-ggx/leetcode | 563.py | 563.py | py | 923 | python | en | code | 0 | github-code | 36 |
26672929841 | #!/usr/bin/env python
import argparse
import itertools
import pandas as pd
def getCmdArgs():
p = argparse.ArgumentParser(description="Add scenes in neighboring rows of scenes in a given scene ID list.")
p.add_argument("-l", "--list", dest="scn_list", required=True, default=None, metavar="CSV_OF_SCENE_LIST", help="A CSV file of scene list. It must have at least the first column as the list of scene IDs (e.g. LC80010042015211LGN01)")
p.add_argument("--lead", dest="nlead", required=False, default=1, metavar="NUM_OF_LEADING_ROWS_TO_BUFFER", help="Number of leading rows to buffer from a scene, e.g. 2 leading rows of path=18,row=30 will add two path/row pairs, (1) path=18,row=31, (2) path=18,row=32.")
p.add_argument("--trail", dest="ntrail", required=False, default=1, metavar="NUM_OF_TRAILING_ROWS_TO_BUFFER", help="Number of trailing rows to buffer from a scene, e.g. 2 leading rows of path=18,row=30 will add two path/row pairs, (1) path=18,row=29, (2) path=18,row=28.")
p.add_argument("-o", "--output", dest="output", required=True, default=None, metavar="OUTPUT_PRD_LIST", help="Name of output CSV file of the list of path,row,start_date,end_date, of the scenes after row buffering.")
cmdargs = p.parse_args()
return cmdargs
def scnIdToPathRowDay(scn_id):
path = int(scn_id[3:6])
row = int(scn_id[6:9])
year = int(scn_id[9:13])
doy = int(scn_id[13:16])
return path, row, year, doy
def main(cmdargs):
scn_csv = cmdargs.scn_list
nlead = cmdargs.nlead
ntrail = cmdargs.ntrail
prd_csv = cmdargs.output
out_header = ["path", "row", "start_date", "end_date"]
prd_header = ["path", "row", "year", "doy"]
with open(prd_csv, "w") as out_fobj:
out_fobj.write(",".join(out_header))
out_fobj.write("\n")
scn_df = pd.read_csv(scn_csv, usecols=[0])
scn_list = scn_df.iloc[:, 0].tolist()
prd_list = zip(*[scnIdToPathRowDay(scn) for scn in scn_list])
prd_dict = {nm:prd for nm, prd in zip(prd_header, prd_list)}
prd_df = pd.DataFrame(prd_dict)
# Add buffer rows
buf_row_add = range(-1*ntrail, nlead+1)
buf_row_add.remove(0)
buf_df_list = [prd_df.copy() for bra in buf_row_add]
for bra, bd in itertools.izip(buf_row_add, buf_df_list):
bd["row"] = bd["row"] + bra
all_prd_df = pd.concat([prd_df]+buf_df_list, axis=0)
all_prd_df = all_prd_df.drop_duplicates(prd_header, keep=False)
all_prd_df = all_prd_df.sort_values(["year", "doy", "path", "row"])
datestr = ["{0:04d}{1:03d}".format(getattr(row, "year"), getattr(row, "doy"))
for row in all_prd_df.itertuples()]
all_prd_df[out_header[2]] = pd.to_datetime(datestr, format="%Y%j")
all_prd_df[out_header[3]] = pd.to_datetime(datestr, format="%Y%j")
all_prd_df.to_csv(out_fobj, header=False, index=False, columns=out_header,
mode="a", date_format="%Y-%m-%d")
if __name__ == "__main__":
cmdargs = getCmdArgs()
main(cmdargs)
| zhanlilz/landsat-tools | landsat-albedo-pipeline/buffer_scene_list.py | buffer_scene_list.py | py | 3,074 | python | en | code | 3 | github-code | 36 |
32453645847 | from tkinter import *
import re
import ast
class ResizingCanvas(Canvas):
"""
Class for making dynamic resizing for the text window we are using.
Example usage:
root = Tk()
txt_panel = Text(root)
txt_panel.pack(fill=BOTH, expand=YES)
ResizingCanvas(txt_panel, width=850, height=400,
bg="red", highlightthickness=0)
root.mainloop()
"""
def __init__(self, text_panel, root, **kwargs):
Canvas.__init__(self, text_panel, **kwargs)
self.bind("<Configure>", self.on_resize)
self.root = root
self.height = self.winfo_reqheight()
self.width = self.winfo_reqwidth()
def on_resize(self, event):
self.width = event.width
self.height = event.height
self.config(width=self.width, height=self.height)
self.scale("all", 0, 0, 1.1, 1.1)
class AutocompleteEntry(Text):
def __init__(self, *args, **kwargs):
Text.__init__(self, *args, **kwargs)
self.lista = ''
self.var = self["textvariable"]
if self.var == '':
self.var = self["textvariable"] = StringVar()
self.var.trace('w', self.changed) # Binds Writing to self.changed
self.bind("<Right>", self.selection)
self.bind("<Up>", self.up)
self.bind("<Down>", self.down)
self.lb_up = False
def changed(self, name, index, mode):
if self.var.get().split(' ')[-1] == '':
self.lb.destroy()
self.lb_up = False
else:
words = self.comparison()
if words:
if not self.lb_up:
self.lb = Listbox()
self.lb.bind("<Double-Button-1>", self.selection)
self.lb.bind("<Right>", self.selection)
self.lb.place(x=self.winfo_x(),
y=self.winfo_y() + self.winfo_height())
self.lb_up = True
self.lb.delete(0, END)
for w in words:
self.lb.insert(END, w)
else:
if self.lb_up:
self.lb.destroy()
self.lb_up = False
def selection(self, event):
if self.lb_up:
already_written = ''
for ind in range(len(self.var.get().split(' ')) - 1):
already_written += self.var.get().split(' ')[ind] + ' '
print(already_written)
self.var.set(already_written + self.lb.get(ACTIVE))
self.lb.destroy()
self.lb_up = False
# Change Cursor place
def up(self, event):
if self.lb_up:
if self.lb.curselection() == ():
index = '0'
else:
index = self.lb.curselection()[0]
if index != '0':
self.lb.selection_clear(first=index)
index = str(int(index) - 1)
self.lb.selection_set(first=index)
self.lb.activate(index)
def down(self, event):
if self.lb_up:
if self.lb.curselection() == ():
index = '0'
else:
index = self.lb.curselection()[0]
if index != END:
self.lb.selection_clear(first=index)
index = str(int(index) + 1)
self.lb.selection_set(first=index)
self.lb.activate(index)
def comparison(self):
try:
text = ast.parse(self.var.get())
self.lista = set()
for node in ast.walk(text):
if isinstance(node, ast.FunctionDef):
self.lista.add(node.name)
if isinstance(node, ast.Name):
self.lista.add(node.id)
except Exception:
print('Highlight the line for bad syntax')
print(self.lista)
pattern = re.compile('.*' + self.var.get().split(' ')[-1] + '.*')
return [w for w in self.lista if re.match(pattern, w)]
if __name__ == '__main__':
root = Tk()
text = AutocompleteEntry(root)
text.grid(row=0, column=0)
ResizingCanvas(text, root, width=500, height=300)
text.pack()
# entry.pack()
print(isinstance(text, Text))
# Button(text='nothing').grid(row=1, column=0)
# Button(text='nothing').grid(row=2, column=0)
# Button(text='nothing').grid(row=3, column=0)
root.mainloop()
| BrickText/BrickTextExperimental | autocomplete.py | autocomplete.py | py | 4,453 | python | en | code | 0 | github-code | 36 |
73368915303 | from __future__ import print_function
import numpy as np
np.random.seed(1337)
from itertools import product
from sklearn import cluster
from sklearn.externals import joblib
from keras.datasets import mnist
from sklearn.neighbors import KNeighborsClassifier
from scipy.misc import imresize
from keras.utils import np_utils
from sklearn.svm import SVC
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import StratifiedKFold
from sklearn import preprocessing
import matplotlib.pyplot as plt
import sys
EPS = 1e-9
class IMG:
def extract(self, img):
return img.flatten()
def finite_derivatives(img):
size = img.shape
dx = np.empty(img.shape, dtype=np.double)
dx[0, :] = 0
dx[-1, :] = 0
dx[1:-1, :] = (img[2:, :] - img[:-2, :]) / 2.0
dy = np.empty(img.shape, dtype=np.double)
dy[:, 0] = 0
dy[:, -1] = 0
dy[:, 1:-1] = (img[:, 2:] - img[:, :-2]) / 2.0
mag = (dx ** 2 + dy ** 2) ** 0.5
return mag, dx, dy
class HOG:
def __init__(self, orientations=9, cell=(8,8)):
self.orientations = orientations
self.cell = cell
def extract(self, img, mask=None):
if len(img.shape) == 3:
img = img[0]
if mask == None:
mask = np.ones(shape=img.shape, dtype=img.dtype)
mag, dx, dy = finite_derivatives(img)
phase = np.arctan2(dy, dx)
phase = phase.astype(np.float64)
#phase = np.abs(phase)
size = img.shape
size = (size[0] / self.cell[0], size[1] / self.cell[1])
w = mask.astype(np.float64)
w *= mag
if np.sum(w) > EPS:
w /= np.sum(w)
ans = np.array([])
for i, j in product(range(size[0]), range(size[1])):
tl = (i * self.cell[0], j * self.cell[1])
br = ((i + 1) * self.cell[0], (j + 1) * self.cell[1])
roi = phase[tl[0]:br[0], tl[1]:br[1]]
wroi = w[tl[0]:br[0], tl[1]:br[1]]
hist, _ = np.histogram(roi, bins=self.orientations, range=(-np.pi, np.pi), weights=wroi, density=True)
#hist /= (np.sum(hist) + util.EPS)
if np.sum(wroi) < EPS:
hist = np.zeros(hist.shape, dtype=hist.dtype)
ans = np.hstack((ans, hist))
ans /= (np.sum(ans) + EPS)
return ans
class BOVW:
def __init__(self, extractor, k=10, size=(8, 8), pad=(1, 1), pool='hard', codebook_len=400000):
self.k = k
self.pad = pad
self.size = size
self.pool = pool
self.codebook_len = codebook_len
self.extractor = extractor
self.clusterer = cluster.KMeans(self.k, max_iter=20, n_init=1)
def load(self, name):
self.k, self.pad, self.size = joblib.load('{}_pms.pkl'.format(name))
self.extractor = joblib.load('{}_ext.pkl'.format(name))
self.clusterer = joblib.load('{}_clu.pkl'.format(name))
def save(self, name):
joblib.load((self.k, self.pad, self.size), '{}_pms.pkl'.format(name))
joblib.load(self.extractor, '{}_ext.pkl'.format(name))
joblib.load(self.clusterer, '{}_clu.pkl'.format(name))
def fit(self, X):
assert len(X) > 0
assert self.codebook_len > len(X)
# temporal assert
self.samples_per_image = (self.codebook_len + len(X) - 1)/len(X)
print("Samples per image {}".format(self.samples_per_image))
test = X[0]
if len(test.shape) == 3:
test = test[0]
'''
xr = np.linspace(0, test.shape[0] - self.size[0], self.pad[0])
yr = np.linspace(0, test.shape[1] - self.size[1], self.pad[1])
coords = product(xr, yr)
'''
v_len = len(self.extractor.extract(test[0:self.size[0], 0:self.size[1]]))
V = np.zeros(shape=(len(X) * self.samples_per_image, v_len), dtype='float32')
it = 0
for img in X:
assert len(img) == 1
if len(img.shape) == 3:
img = img[0]
coords = np.random.uniform(low=0, high=test.shape[0] - self.size[0], size=(self.samples_per_image, 2)).astype(np.int)
for i, j in coords:
V[it] = self.extractor.extract(img[i:i + self.size[0], j:j + self.size[1]])
it += 1
assert len(V) == it
self.clusterer.fit(V)
def transform(self, X):
assert len(X) > 0
test = X[0]
if len(test.shape) == 3:
test = test[0]
xr = np.arange(0, test.shape[0] - self.size[0], self.pad[0])
yr = np.arange(0, test.shape[1] - self.size[1], self.pad[1])
coords = list(product(xr, yr))
xr_len = len(xr)
yr_len = len(yr)
print('size {}, {} len {}'.format(test.shape, yr, yr_len))
v_len = len(self.extractor.extract(test[0:self.size[0], 0:self.size[1]]))
ft = np.zeros(shape=(len(coords), v_len), dtype='float32')
if self.pool == 'hard':
V = np.zeros(shape=(len(X), self.k), dtype='float32')
elif self.pool == 'soft':
V = np.zeros(shape=(len(X), 4 * self.k), dtype='float32')
#V = np.zeros(shape=(len(X), self.k), dtype='float32')
else:
raise Exception("Undefined pooling mode: {}".format(self.pool))
C = self.clusterer.cluster_centers_
zeros = np.zeros(shape = (len(C),), dtype=C.dtype)
for k in range(len(X)):
img = X[k]
if len(img.shape) == 3:
img = img[0]
#coords = np.random.uniform(low=0, high=test.shape[0]-self.size[0], size=(self.samples_per_image, 2)).astype(np.int)
it = 0
for i, j in coords:
ft[it] = self.extractor.extract(img[i:i + self.size[0], j:j + self.size[1]])
it += 1
assert len(ft) == it
if self.pool == 'hard':
idx = self.clusterer.predict(ft)
V[k], _ = np.histogram(idx, bins=self.k, range=(0, self.k))
elif self.pool == 'soft':
it2 = 0
for i, j in coords:
index = 0
if i > ((test.shape[0] - self.size[0])/2):
index += 2
if j > ((test.shape[1] - self.size[1])/2):
index += 1
S = np.linalg.norm(C - ft[it2], axis=1)
S = np.mean(S) - S
#V[k] += np.max([S, zeros], axis=0)
V[k][index*self.k:(index+1)*self.k] += np.max([S, zeros], axis=0)
it2 += 1
else:
raise Exception("Undefined pooling mode: {}".format(self.pool))
print("V shape {}".format(V.shape))
return V
def fit_transform(self, X):
self.fit(X)
return self.transform(X)
def create_model(config):
model = None
if config == 'img':
model = BOVW(IMG(), k=10, size=(15, 15), pad=(4, 4), pool='soft', codebook_len=60000)
elif config == 'hog':
model = BOVW(HOG(cell=(5, 5)), k=600, size=(15, 15), pad=(1, 1), pool='soft')
elif config == 'img-hard':
model = BOVW(IMG(), k=600, size=(15, 15), pad=(1, 1), pool='hard')
elif config == 'img-soft':
model = BOVW(IMG(), k=600, size=(15, 15), pad=(1, 1), pool='soft')
elif config == 'hog-hard':
model = BOVW(HOG(cell=(5, 5)), k=600, size=(15, 15), pad=(1, 1), pool='hard')
elif config == 'hog-soft-16':
model = BOVW(HOG(cell=(5, 5)), k=16, size=(15, 15), pad=(1, 1), pool='soft')
elif config == 'hog-soft-32':
model = BOVW(HOG(cell=(5, 5)), k=32, size=(15, 15), pad=(1, 1), pool='soft')
elif config == 'hog-soft-64':
model = BOVW(HOG(cell=(5, 5)), k=64, size=(15, 15), pad=(1, 1), pool='soft')
elif config == 'hog-soft-128':
model = BOVW(HOG(cell=(5, 5)), k=128, size=(15, 15), pad=(1, 1), pool='soft')
elif config == 'hog-soft-256':
model = BOVW(HOG(cell=(5, 5)), k=256, size=(15, 15), pad=(1, 1), pool='soft')
elif config == 'hog-soft-512':
model = BOVW(HOG(cell=(5, 5)), k=512, size=(15, 15), pad=(1, 1), pool='soft')
elif config == 'hog-soft-512-norm':
model = BOVW(HOG(cell=(5, 5)), k=512, size=(15, 15), pad=(1, 1), pool='soft')
elif config == 'hog-soft-1024':
model = BOVW(HOG(cell=(5, 5)), k=1024, size=(15, 15), pad=(1, 1), pool='soft')
return model
def load_mnist(img_cols, img_rows, nb_classes):
(X_train, y_train), (X_test, y_test) = mnist.load_data()
tmp = []
for x in X_train:
tmp.append(imresize(x, (img_rows, img_cols)))
X_train = np.array(tmp)
tmp = []
for x in X_test:
tmp.append(imresize(x, (img_rows, img_cols)))
X_test = np.array(tmp)
print("shapes {} {}".format(X_train.shape, X_test.shape))
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
#Y_train = np_utils.to_categorical(y_train, nb_classes)
#Y_test = np_utils.to_categorical(y_test, nb_classes)
return (X_train, y_train), (X_test, y_test)
def extract_IMG(X):
assert len(X) != 0
ext = IMG()
v_len = len(ext.extract(X[0]))
V = np.zeros(shape=(len(X), v_len), dtype="float32")
for i in range(len(X)):
V[i] = ext.extract(X[i])
return V
def extract_HOG(X):
assert len(X) != 0
ext = HOG()
v_len = len(ext.extract(X[0]))
V = np.zeros(shape=(len(X), v_len), dtype="float32")
for i in range(len(X)):
V[i] = ext.extract(X[i])
return V
if __name__ == '__main__':
bias = True
batch_size = 100
nb_epoch = 1
nb_classes = 10
img_rows, img_cols = 36, 36
(X_train, Y_train), (X_test, Y_test) = load_mnist(img_rows, img_cols, nb_classes)
print("X shape {}".format(X_train.shape))
X_train_small = X_train#X_train[range(10000)]
Y_train_small = Y_train#Y_train[range(10000)]
print("X shape {}".format(X_train_small.shape))
#bow = BOVW(HOG(cell=(5,5)), k=600, size=(15, 15), pad=(1,1), pool='hard')
bow = BOVW(IMG(), k=600, size=(15, 15), pad=(1, 1), pool='soft')
print("BOVW fit transform ...")
V_train = bow.fit_transform(X_train_small)
print("BOVW transform ...")
V_test = bow.transform(X_test)
'''
# 32x32 feature vector, 0.9498
V_train = extract_IMG(X_train_small)
V_test = extract_IMG(X_test)
'''
'''
# feature vector, 0.9498
V_train = extract_HOG(X_train_small)
V_test = extract_HOG(X_test)
'''
# BOVW 0.9488
clf = KNeighborsClassifier(5)
print("clf fit ...")
clf.fit(V_train, Y_train_small)
print("clf predict ...")
Y_pred = clf.predict(V_test)
print("Y test: {}".format(Y_test))
print("Y pred: {}".format(Y_pred))
acc = np.mean(Y_test == Y_pred)
print("Accuracy: {}".format(acc))
'''
clf = SVC(kernel='rbf')
parameters = {'C':10. ** np.arange(-3,3), 'gamma':2. ** np.arange(-5, 1)}
grid = GridSearchCV(clf, parameters, cv=StratifiedKFold(Y_train_small, 5), verbose=3, n_jobs=-1)
grid.fit(V_train, Y_train_small)
print("predicting")
print("score: {}".format(grid.score(X_test, y_test)))
print(grid.best_estimator_)
'''
def test_svc_hp(X_train, Y_train, X_test, Y_test):
for c in range(-3, 3):
c = 10 ** c
clf = SVC(kernel='rbf', C=c)
print("C = {}, clf fit ...".format(c))
clf.fit(V_train, Y_train_small)
print("clf predict ...")
Y_pred = clf.predict(V_test)
print("Y test: {}".format(Y_test))
print("Y pred: {}".format(Y_pred))
acc = np.mean(Y_test == Y_pred)
print("Accuracy: {}".format(acc))
#'''
| jmendozais/lung-nodule-detection | bovw.py | bovw.py | py | 12,019 | python | en | code | 11 | github-code | 36 |
75127774184 | import sys
from cravat import BaseAnnotator
from cravat import InvalidData
import sqlite3
import os
class CravatAnnotator(BaseAnnotator):
def setup(self):
self.cursor.execute('select distinct chr from omim where chr not null;')
if hasattr(self, 'supported_chroms'):
self.supported_chroms |= {r[0] for r in self.cursor}
else:
self.supported_chroms = {r[0] for r in self.cursor}
def annotate(self, input_data, secondary_data=None):
q = 'select omim_id from omim where chr = "{chr}" and pos = {pos} and ref = "{ref}" and alt = "{alt}"'.format(
chr = input_data["chrom"], pos=int(input_data["pos"]), ref = input_data["ref_base"], alt = input_data["alt_base"])
self.cursor.execute(q)
row = self.cursor.fetchone()
if row:
omim_id = str(row[0]).replace('|', '; ')
out = {'omim_id': omim_id}
else:
out = None
return out
def cleanup(self):
pass
if __name__ == '__main__':
annotator = CravatAnnotator(sys.argv)
annotator.run() | KarchinLab/open-cravat-modules-karchinlab | annotators/omim/omim.py | omim.py | py | 1,137 | python | en | code | 1 | github-code | 36 |
1904857127 | from scipy import linalg
import numpy as np
import scipy.optimize as sopt
import matplotlib.pyplot as plt
from scipy.optimize import fsolve
from scipy.optimize import LinearConstraint
from matplotlib import cm
import tabulate
import math
START_POINT = [2,2]
# fun - main muction
def fun(x: np.ndarray) -> np.float64:
return ((x[0]-2)**2) + (x[1]-1)**2
# hfunc - represents an "= 0" fun
def hfun(x: np.ndarray) -> np.float64:
return x[0]-2*x[1]+1
# gfunc - represents an ">= 0" fun
def gfun(x: np.ndarray) -> np.float64:
return -0.25 * x[0]**2 - x[1]**2+1
def gplusfun(g, x, u, c):
return min(g(x), np.divide(u, c))
def modified_lagrange_method(fun, points, epsx, g_constarins, h_constrains, u=[0], a=[0], c=0.1, beta=2, counter=0, func_counter = 0, _callback=None):
"""Minimize a function with given constrains
Arguments:
points {[float]} -- [array of calculated points]
epsx {[float]} -- [epsilon]
g_constarins {[Callable]} -- [array of inequality constrains]
h_constrains {[Callable]} -- [array of equality constrains]
Keyword Arguments:
u {list} -- [Langrange factor for inequality, must be same length as g_constarins] (default: {[0]})
a {list} -- [Langrange factor for equality, must be same length as h_constrains] (default: {[0]})
c {float} -- [penalty factor] (default: {0.1})
beta {int} -- [growth rate of penalty factor must be in range [2;26]] (default: {2})
counter {int} -- [counter] (default: {0})
callback - function that takes dict x, witch contains all intermediate values such as x, u, a, c, f(x), L(x,u,a,c)
"""
def lagrange(x):
if(len(g_constarins) != 0 and len(g_constarins) != 0):
array_of_constrains_g = np.array(
[gplusfun(g_constrain, x, u_i, c) for g_constrain, u_i in zip(g_constarins, u)])
array_of_constrains_h = np.array(
[h_constrain(x) for h_constrain in h_constrains])
return fun(x) - sum([u_i * g for u_i, g in zip(u, array_of_constrains_g)]) + 0.5*sum(c * array_of_constrains_g**2) - sum([a_i * g for a_i, g in zip(a, array_of_constrains_h)]) + 0.5*sum(c * array_of_constrains_h**2)
elif(len(h_constrains) != 0 and len(g_constarins) == 0):
array_of_constrains_h = np.array(
[h_constrain(x) for h_constrain in h_constrains])
return fun(x) - sum([a_i * h for a_i, h in zip(a, array_of_constrains_h)]) + 0.5*sum(c * array_of_constrains_h**2)
elif(len(h_constrains) == 0 and len(g_constarins) != 0):
array_of_constrains_g = np.array(
[gplusfun(g_constrain, x, u_i, c) for g_constrain, u_i in zip(g_constarins, u)])
return fun(x) - sum([u_i * g for u_i, g in zip(u, array_of_constrains_g)]) + 0.5*sum(c * array_of_constrains_g**2)
else:
return fun(x)
if _callback is not None:
_callback({"x": points[-1], "u": u, "a": a, "c": c, "f": fun(points[-1]), "L": lagrange(points[-1]), "iter": counter, "fiter": func_counter})
# BFGS - is most fast & eficient for my cases
res = sopt.minimize(
lagrange, x0=points[-1], method='BFGS')
next_val = res.x
func_counter = func_counter+res.nfev
counter = counter+res.nit
points.append(next_val)
u = np.array([max(0, (u_i - c*g_constrain(next_val)))
for g_constrain, u_i in zip(g_constarins, u)])
a = np.array([a_i-c*h_constrain(next_val)
for h_constrain, a_i in zip(h_constrains, a)])
c = beta*c
counter = counter+1
if(abs(next_val - points[-2])[0] < epsx and abs(next_val - points[-2])[1] < epsx):
return points
else:
return modified_lagrange_method(fun, points, epsx, g_constarins, h_constrains, u, a, c, beta, counter, func_counter,_callback)
def filter_zLim(X,Y,Z, zlim):
for i in range(0, len(Z)):
for j in range(0, len(Z)):
if Z[i][j] > zlim[1] or Z[i][j] < zlim[0]:
Z[i][j] = 4
return X, Y, Z
def printDecorator(f, res):
def wrapper(x):
res.append(x)
ret_val = f(x)
return ret_val
return wrapper
def trunc(number, digits) -> float:
stepper = 10.0 ** digits
return math.trunc(stepper * number) / stepper
def plotting():
xs = []
results_list = []
f = lambda x: xs.append(x["x"])
callback = printDecorator(f, results_list)
#Adjust plotting scale here where x in [a1,b1] and y in [a2,b2] [a1: b1: 20j, a2: b2: 20j]
X, Y = np.mgrid[2.4:0:20j, 2.5:-1.5:20j]
Z = fun(np.array([X, Y]))
fig = plt.figure()
ax = fig.add_subplot(1, 2, 1, projection='3d')
ax.set_xlim((0, 3))
ax.set_ylim((-1.5, 3))
ax.set_zlim((0, 4))
ax.plot_wireframe(X, Y, Z)
ax.contour(X, Y, gfun(np.array([X, Y])), levels=[0], colors='blue')
ax.contour(X, Y, hfun(np.array([X, Y])), levels=[0], colors='lime')
#Put list of constrains here, for my case its one constrain g(x) and one h(x)
g_constarins = np.array([gfun])
h_constrains = np.array([hfun])
vals = modified_lagrange_method(fun, list([START_POINT, ]), 1e-6,
g_constarins, h_constrains, _callback=callback)
#Print Results Table
header = results_list[0].keys()
rows = [x.values() for x in results_list[0:11] + [results_list[-1]]]
print(tabulate.tabulate(rows, header, tablefmt='grid'))
ax.plot(np.array(vals).T[0], np.array(vals).T[1], np.array(
list(map(fun, np.array(vals)))).T, "x-", color='red')
ax1 = fig.add_subplot(1, 2, 2)
#Adjust plotting scale here where x in [a1,b1] and y in [a2,b2] [a1: b1: 20j, a2: b2: 20j]
X, Y = np.mgrid[3: 0: 20j, 2.3: -1.5: 20j]
Z = fun(np.array([X, Y]))
ax1.contour(X, Y, Z, levels=40)
t = 0
for x in zip(np.array(vals).T[0], np.array(vals).T[1]):
if abs(fun(x) - t) > 1e-2:
ax1.annotate(trunc(fun(x),3), (x[0], x[1]))
t = fun(x)
ax1.plot(np.array(vals).T[0], np.array(vals).T[1], "x-", color='red')
for idx, g_constr in enumerate(g_constarins):
ax1.clabel(ax1.contour(X, Y, g_constr(np.array([X, Y])), levels=[0], colors='blue'), fmt=f"g{idx}(x)", fontsize=10)
for idx, h_constr in enumerate(h_constrains):
ax1.clabel(ax1.contour(X, Y, h_constr(np.array([X, Y])), levels=[0], colors='lime'), fmt=f"h{idx}(x)", fontsize=10)
plt.show()
plotting()
| BON4/FuncOptimization | constarint_optimization.py | constarint_optimization.py | py | 6,464 | python | en | code | 0 | github-code | 36 |
23006702098 |
import torch
import numpy as np
from torch._C import dtype
from torch import nn
from torch.nn import functional as F
from torchvision import transforms
from torchvision.transforms import functional as TF
class ImageAugmentation():
def __init__(self):
super().__init__()
"""
PepperSaltNoise
"""
def addPepperSaltNoise(self,detections,p=0.2,pn=0.05):
feat=detections
if(np.random.rand()<p):
bs,grids,dim=detections.shape
maxnum=detections.max().item()
minnum=detections.min().item()
peper=torch.full((dim,),maxnum)
salt=torch.full((dim,),minnum)
#add bs*grids*p Peppers
for _ in range(int(bs*grids*pn)):
row=np.random.randint(bs)
col=np.random.randint(grids)
feat[row][col]=peper
#add bs*grids*p Salts
for _ in range(int(bs*grids*pn)):
row=np.random.randint(bs)
col=np.random.randint(grids)
feat[row][col]=salt
return feat
"""
GaussianNoise
"""
def addGaussianNoise(self,detections,p=0.2,mean=0,var=0.0001):
feat=detections
if(np.random.randn()<p):
maxnum=detections.max().item()
normdet=detections/maxnum
#generate guassian noise
noise = torch.from_numpy(np.random.normal(mean, var ** 0.5, detections.shape))
newdet=normdet+noise
newdet=torch.clamp(newdet,0,1)
feat=newdet*maxnum
return feat.to(torch.float32)
"""
resizePool
"""
def resizePool(self,detections,p=0.2,poolsize=2,stride=2):
feat=detections
if(np.random.randn()<p):
m = nn.MaxPool2d(poolsize, stride=stride)
use_feat=detections[:,:49,:]
# nouse_feat=detections[:,49:,:]
bs,gs,dim=use_feat.shape
use_feat=use_feat.view(bs,int(np.sqrt(gs)),int(np.sqrt(gs)),dim)
#maxpool
output= m(use_feat.permute(0,3,1,2))
#upsample
output= F.interpolate(output, size=[int(np.sqrt(gs)),int(np.sqrt(gs))])
output=output.permute(0,2,3,1)
output=output.view(bs,-1,dim)
feat=output
# feat=torch.cat((output,nouse_feat),dim=1)
return feat
"""
RandomCrop
"""
def randomCrop(self,detections,p=0.2,cropsize=5):
feat=detections
if(np.random.randn()<p):
use_feat=detections[:,:49,:]
# nouse_feat=detections[:,49:,:]
bs,gs,dim=use_feat.shape
use_feat=use_feat.view(bs,int(np.sqrt(gs)),int(np.sqrt(gs)),dim)
use_feat=use_feat.permute(0,3,1,2)
#crop
startRange=np.sqrt(gs)-cropsize
startRow=np.random.randint(startRange)
startCol=np.random.randint(startRange)
output=use_feat[:,:,startRow:startRow+cropsize,startCol:startCol+cropsize]
#upsample
output= F.interpolate(output, size=[int(np.sqrt(gs)),int(np.sqrt(gs))])
output=output.permute(0,2,3,1)
output=output.view(bs,-1,dim)
feat=output
# feat=torch.cat((output,nouse_feat),dim=1)
return feat
"""
RandomHorizontalFlip
"""
def randomHorizontalFlip(self,detections,p=0.2):
feat=detections
if(np.random.randn()<p):
use_feat=detections[:,:49,:]
# nouse_feat=detections[:,49:,:]
bs,gs,dim=use_feat.shape
#reshape
use_feat=use_feat.view(bs,int(np.sqrt(gs)),int(np.sqrt(gs)),dim)
use_feat=use_feat.permute(0,3,1,2)
#HorizontalFlip
hflip=transforms.RandomHorizontalFlip(p=1)
output=hflip(use_feat)
#reshape
output=output.permute(0,2,3,1)
output=output.view(bs,-1,dim)
feat=output
# feat=torch.cat((output,nouse_feat),dim=1)
return feat
"""
RandomVerticalFlip
"""
def randomVerticalFlip(self,detections,p=0.2):
feat=detections
if(np.random.randn()<p):
use_feat=detections[:,:49,:]
# nouse_feat=detections[:,49:,:]
bs,gs,dim=use_feat.shape
#reshape
use_feat=use_feat.view(bs,int(np.sqrt(gs)),int(np.sqrt(gs)),dim)
use_feat=use_feat.permute(0,3,1,2)
#VerticalFlip
vflip=transforms.RandomVerticalFlip(p=1)
output=vflip(use_feat)
#reshape
output=output.permute(0,2,3,1)
output=output.view(bs,-1,dim)
feat=output
# feat=torch.cat((output,nouse_feat),dim=1)
return feat
"""
randRotate
"""
def randRotate(self,detections,p=0.5):
feat=detections
if(np.random.randn()<p):
use_feat=detections[:,:49,:]
# nouse_feat=detections[:,49:,:]
bs,gs,dim=use_feat.shape
#reshape
use_feat=use_feat.view(bs,int(np.sqrt(gs)),int(np.sqrt(gs)),dim)
use_feat=use_feat.permute(0,3,1,2)
#rotate
degree=np.random.randint(60)-30
output=TF.rotate(use_feat,degree)
#reshape
output=output.permute(0,2,3,1)
output=output.view(bs,-1,dim)
feat=output
# feat=torch.cat((output,nouse_feat),dim=1)
return feat
"""
channel shuffle
"""
def channelShuffle(self,detections,p=0.2):
feat=detections
if(np.random.randn()<p):
use_feat=detections[:,:49,:]
# nouse_feat=detections[:,49:,:]
bs,gs,dim=use_feat.shape
#reshape
use_feat=use_feat.view(bs,int(np.sqrt(gs)),int(np.sqrt(gs)),dim)
use_feat=use_feat.permute(0,3,1,2)
#channel shuffle
indexs=np.arange(dim)
np.random.shuffle(indexs)
output=use_feat[:,indexs,:,:]
#reshape
output=output.permute(0,2,3,1)
output=output.view(bs,-1,dim)
feat=output
# feat=torch.cat((output,nouse_feat),dim=1)
return feat
"""
randMask
"""
def randMask(self,detections,p=0.3,pn=0.1):
feat=detections
if(np.random.rand()<p):
bs,grids,dim=detections.shape
salt=torch.full((dim,),0.0)
#Mask
for _ in range(int(bs*grids*pn)):
row=np.random.randint(bs)
col=np.random.randint(grids)
feat[row][col]=salt
return feat
def randnChooseOne4(self,detections):
feat=detections
augs=['addPepperSaltNoise','resizePool','randomCrop','randRotate']
aug=augs[np.random.randint(len(augs))]
feat=getattr(self,aug)(feat,p=0.3)
return feat
| xmu-xiaoma666/SDATR | models/augmentation.py | augmentation.py | py | 6,969 | python | en | code | 18 | github-code | 36 |
15744632677 | import multiprocessing
import optparse
import os
import re
from error import InvalidProjectGroupsError
from error import NoSuchProjectError
from error import RepoExitError
from event_log import EventLog
import progress
# Are we generating man-pages?
GENERATE_MANPAGES = os.environ.get("_REPO_GENERATE_MANPAGES_") == " indeed! "
# Number of projects to submit to a single worker process at a time.
# This number represents a tradeoff between the overhead of IPC and finer
# grained opportunity for parallelism. This particular value was chosen by
# iterating through powers of two until the overall performance no longer
# improved. The performance of this batch size is not a function of the
# number of cores on the system.
WORKER_BATCH_SIZE = 32
# How many jobs to run in parallel by default? This assumes the jobs are
# largely I/O bound and do not hit the network.
DEFAULT_LOCAL_JOBS = min(os.cpu_count(), 8)
class UsageError(RepoExitError):
"""Exception thrown with invalid command usage."""
class Command:
"""Base class for any command line action in repo."""
# Singleton for all commands to track overall repo command execution and
# provide event summary to callers. Only used by sync subcommand currently.
#
# NB: This is being replaced by git trace2 events. See git_trace2_event_log.
event_log = EventLog()
# Whether this command is a "common" one, i.e. whether the user would
# commonly use it or it's a more uncommon command. This is used by the help
# command to show short-vs-full summaries.
COMMON = False
# Whether this command supports running in parallel. If greater than 0,
# it is the number of parallel jobs to default to.
PARALLEL_JOBS = None
# Whether this command supports Multi-manifest. If False, then main.py will
# iterate over the manifests and invoke the command once per (sub)manifest.
# This is only checked after calling ValidateOptions, so that partially
# migrated subcommands can set it to False.
MULTI_MANIFEST_SUPPORT = True
def __init__(
self,
repodir=None,
client=None,
manifest=None,
git_event_log=None,
outer_client=None,
outer_manifest=None,
):
self.repodir = repodir
self.client = client
self.outer_client = outer_client or client
self.manifest = manifest
self.git_event_log = git_event_log
self.outer_manifest = outer_manifest
# Cache for the OptionParser property.
self._optparse = None
def WantPager(self, _opt):
return False
def ReadEnvironmentOptions(self, opts):
"""Set options from environment variables."""
env_options = self._RegisteredEnvironmentOptions()
for env_key, opt_key in env_options.items():
# Get the user-set option value if any
opt_value = getattr(opts, opt_key)
# If the value is set, it means the user has passed it as a command
# line option, and we should use that. Otherwise we can try to set
# it with the value from the corresponding environment variable.
if opt_value is not None:
continue
env_value = os.environ.get(env_key)
if env_value is not None:
setattr(opts, opt_key, env_value)
return opts
@property
def OptionParser(self):
if self._optparse is None:
try:
me = "repo %s" % self.NAME
usage = self.helpUsage.strip().replace("%prog", me)
except AttributeError:
usage = "repo %s" % self.NAME
epilog = (
"Run `repo help %s` to view the detailed manual." % self.NAME
)
self._optparse = optparse.OptionParser(usage=usage, epilog=epilog)
self._CommonOptions(self._optparse)
self._Options(self._optparse)
return self._optparse
def _CommonOptions(self, p, opt_v=True):
"""Initialize the option parser with common options.
These will show up for *all* subcommands, so use sparingly.
NB: Keep in sync with repo:InitParser().
"""
g = p.add_option_group("Logging options")
opts = ["-v"] if opt_v else []
g.add_option(
*opts,
"--verbose",
dest="output_mode",
action="store_true",
help="show all output",
)
g.add_option(
"-q",
"--quiet",
dest="output_mode",
action="store_false",
help="only show errors",
)
if self.PARALLEL_JOBS is not None:
default = "based on number of CPU cores"
if not GENERATE_MANPAGES:
# Only include active cpu count if we aren't generating man
# pages.
default = f"%default; {default}"
p.add_option(
"-j",
"--jobs",
type=int,
default=self.PARALLEL_JOBS,
help=f"number of jobs to run in parallel (default: {default})",
)
m = p.add_option_group("Multi-manifest options")
m.add_option(
"--outer-manifest",
action="store_true",
default=None,
help="operate starting at the outermost manifest",
)
m.add_option(
"--no-outer-manifest",
dest="outer_manifest",
action="store_false",
help="do not operate on outer manifests",
)
m.add_option(
"--this-manifest-only",
action="store_true",
default=None,
help="only operate on this (sub)manifest",
)
m.add_option(
"--no-this-manifest-only",
"--all-manifests",
dest="this_manifest_only",
action="store_false",
help="operate on this manifest and its submanifests",
)
def _Options(self, p):
"""Initialize the option parser with subcommand-specific options."""
def _RegisteredEnvironmentOptions(self):
"""Get options that can be set from environment variables.
Return a dictionary mapping environment variable name
to option key name that it can override.
Example: {'REPO_MY_OPTION': 'my_option'}
Will allow the option with key value 'my_option' to be set
from the value in the environment variable named 'REPO_MY_OPTION'.
Note: This does not work properly for options that are explicitly
set to None by the user, or options that are defined with a
default value other than None.
"""
return {}
def Usage(self):
"""Display usage and terminate."""
self.OptionParser.print_usage()
raise UsageError()
def CommonValidateOptions(self, opt, args):
"""Validate common options."""
opt.quiet = opt.output_mode is False
opt.verbose = opt.output_mode is True
if opt.outer_manifest is None:
# By default, treat multi-manifest instances as a single manifest
# from the user's perspective.
opt.outer_manifest = True
def ValidateOptions(self, opt, args):
"""Validate the user options & arguments before executing.
This is meant to help break the code up into logical steps. Some tips:
* Use self.OptionParser.error to display CLI related errors.
* Adjust opt member defaults as makes sense.
* Adjust the args list, but do so inplace so the caller sees updates.
* Try to avoid updating self state. Leave that to Execute.
"""
def Execute(self, opt, args):
"""Perform the action, after option parsing is complete."""
raise NotImplementedError
@staticmethod
def ExecuteInParallel(
jobs, func, inputs, callback, output=None, ordered=False
):
"""Helper for managing parallel execution boiler plate.
For subcommands that can easily split their work up.
Args:
jobs: How many parallel processes to use.
func: The function to apply to each of the |inputs|. Usually a
functools.partial for wrapping additional arguments. It will be
run in a separate process, so it must be pickalable, so nested
functions won't work. Methods on the subcommand Command class
should work.
inputs: The list of items to process. Must be a list.
callback: The function to pass the results to for processing. It
will be executed in the main thread and process the results of
|func| as they become available. Thus it may be a local nested
function. Its return value is passed back directly. It takes
three arguments:
- The processing pool (or None with one job).
- The |output| argument.
- An iterator for the results.
output: An output manager. May be progress.Progess or
color.Coloring.
ordered: Whether the jobs should be processed in order.
Returns:
The |callback| function's results are returned.
"""
try:
# NB: Multiprocessing is heavy, so don't spin it up for one job.
if len(inputs) == 1 or jobs == 1:
return callback(None, output, (func(x) for x in inputs))
else:
with multiprocessing.Pool(jobs) as pool:
submit = pool.imap if ordered else pool.imap_unordered
return callback(
pool,
output,
submit(func, inputs, chunksize=WORKER_BATCH_SIZE),
)
finally:
if isinstance(output, progress.Progress):
output.end()
def _ResetPathToProjectMap(self, projects):
self._by_path = {p.worktree: p for p in projects}
def _UpdatePathToProjectMap(self, project):
self._by_path[project.worktree] = project
def _GetProjectByPath(self, manifest, path):
project = None
if os.path.exists(path):
oldpath = None
while path and path != oldpath and path != manifest.topdir:
try:
project = self._by_path[path]
break
except KeyError:
oldpath = path
path = os.path.dirname(path)
if not project and path == manifest.topdir:
try:
project = self._by_path[path]
except KeyError:
pass
else:
try:
project = self._by_path[path]
except KeyError:
pass
return project
def GetProjects(
self,
args,
manifest=None,
groups="",
missing_ok=False,
submodules_ok=False,
all_manifests=False,
):
"""A list of projects that match the arguments.
Args:
args: a list of (case-insensitive) strings, projects to search for.
manifest: an XmlManifest, the manifest to use, or None for default.
groups: a string, the manifest groups in use.
missing_ok: a boolean, whether to allow missing projects.
submodules_ok: a boolean, whether to allow submodules.
all_manifests: a boolean, if True then all manifests and
submanifests are used. If False, then only the local
(sub)manifest is used.
Returns:
A list of matching Project instances.
"""
if all_manifests:
if not manifest:
manifest = self.manifest.outer_client
all_projects_list = manifest.all_projects
else:
if not manifest:
manifest = self.manifest
all_projects_list = manifest.projects
result = []
if not groups:
groups = manifest.GetGroupsStr()
groups = [x for x in re.split(r"[,\s]+", groups) if x]
if not args:
derived_projects = {}
for project in all_projects_list:
if submodules_ok or project.sync_s:
derived_projects.update(
(p.name, p) for p in project.GetDerivedSubprojects()
)
all_projects_list.extend(derived_projects.values())
for project in all_projects_list:
if (missing_ok or project.Exists) and project.MatchesGroups(
groups
):
result.append(project)
else:
self._ResetPathToProjectMap(all_projects_list)
for arg in args:
# We have to filter by manifest groups in case the requested
# project is checked out multiple times or differently based on
# them.
projects = [
project
for project in manifest.GetProjectsWithName(
arg, all_manifests=all_manifests
)
if project.MatchesGroups(groups)
]
if not projects:
path = os.path.abspath(arg).replace("\\", "/")
tree = manifest
if all_manifests:
# Look for the deepest matching submanifest.
for tree in reversed(list(manifest.all_manifests)):
if path.startswith(tree.topdir):
break
project = self._GetProjectByPath(tree, path)
# If it's not a derived project, update path->project
# mapping and search again, as arg might actually point to
# a derived subproject.
if (
project
and not project.Derived
and (submodules_ok or project.sync_s)
):
search_again = False
for subproject in project.GetDerivedSubprojects():
self._UpdatePathToProjectMap(subproject)
search_again = True
if search_again:
project = (
self._GetProjectByPath(manifest, path)
or project
)
if project:
projects = [project]
if not projects:
raise NoSuchProjectError(arg)
for project in projects:
if not missing_ok and not project.Exists:
raise NoSuchProjectError(
"%s (%s)"
% (arg, project.RelPath(local=not all_manifests))
)
if not project.MatchesGroups(groups):
raise InvalidProjectGroupsError(arg)
result.extend(projects)
def _getpath(x):
return x.relpath
result.sort(key=_getpath)
return result
def FindProjects(self, args, inverse=False, all_manifests=False):
"""Find projects from command line arguments.
Args:
args: a list of (case-insensitive) strings, projects to search for.
inverse: a boolean, if True, then projects not matching any |args|
are returned.
all_manifests: a boolean, if True then all manifests and
submanifests are used. If False, then only the local
(sub)manifest is used.
"""
result = []
patterns = [re.compile(r"%s" % a, re.IGNORECASE) for a in args]
for project in self.GetProjects("", all_manifests=all_manifests):
paths = [project.name, project.RelPath(local=not all_manifests)]
for pattern in patterns:
match = any(pattern.search(x) for x in paths)
if not inverse and match:
result.append(project)
break
if inverse and match:
break
else:
if inverse:
result.append(project)
result.sort(
key=lambda project: (project.manifest.path_prefix, project.relpath)
)
return result
def ManifestList(self, opt):
"""Yields all of the manifests to traverse.
Args:
opt: The command options.
"""
top = self.outer_manifest
if not opt.outer_manifest or opt.this_manifest_only:
top = self.manifest
yield top
if not opt.this_manifest_only:
yield from top.all_children
class InteractiveCommand(Command):
"""Command which requires user interaction on the tty and must not run
within a pager, even if the user asks to.
"""
def WantPager(self, _opt):
return False
class PagedCommand(Command):
"""Command which defaults to output in a pager, as its display tends to be
larger than one screen full.
"""
def WantPager(self, _opt):
return True
class MirrorSafeCommand:
"""Command permits itself to run within a mirror, and does not require a
working directory.
"""
class GitcClientCommand:
"""Command that requires the local client to be a GITC client."""
| GerritCodeReview/git-repo | command.py | command.py | py | 17,769 | python | en | code | 267 | github-code | 36 |
74953743465 | """Unit tests for the config module."""
import os
import pytest
from wmtmetadata.config import Config, ConfigFromFile, ConfigFromHost
from wmtmetadata.host import HostInfo
from . import data_dir
tmp_dir = '/tmp'
sample_config_file = os.path.join(data_dir, 'wmt-config-siwenna.yaml')
host = 'siwenna.colorado.edu'
name = 'Hydrotrend'
fetched_config_file = 'wmt-config-{}.yaml'.format(host)
def test_config():
config = Config()
assert isinstance(config, Config)
def test_configfromfile():
config = ConfigFromFile(sample_config_file)
assert config.filename == sample_config_file
def test_configfromfile_load():
config = ConfigFromFile(sample_config_file)
config.load()
components = config.components.keys()
assert components.pop() == name
assert config.host['hostname'] == host
def test_configfromhost():
config = ConfigFromHost(host)
assert config.executor.info['name'] == host
@pytest.mark.skip(reason="Don't abuse remote test machine")
def test_configfromhost_build():
config = ConfigFromHost(host)
config.build()
@pytest.mark.skip(reason="Don't abuse remote test machine")
def test_configfromhost_fetch():
config = ConfigFromHost(host)
config.fetch(local_dir=tmp_dir)
assert os.path.isfile(os.path.join(tmp_dir, fetched_config_file))
@pytest.mark.skip(reason="Don't abuse remote test machine")
def test_configfromhost_load():
config = ConfigFromHost(host)
config.build()
config.fetch(local_dir=tmp_dir)
config.load()
components = config.components.keys()
assert components.pop() == name
| csdms/wmt-metadata | wmtmetadata/tests/test_config.py | test_config.py | py | 1,593 | python | en | code | 0 | github-code | 36 |
17541630757 | import glob
import os
import random
import shutil
import numpy as np
import torch
import torch.nn as nn
import torchvision.transforms.v2 as T
def set_seed(seed: int = 42):
"""Sets the seed for reproducibility."""
random.seed(seed)
np.random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def remove_glob(pathname: str):
for p in glob.glob(pathname, recursive=True):
if os.path.isfile(p):
os.remove(p)
elif os.path.isdir(p):
shutil.rmtree(p)
class ImageTransform:
"""Image transformation module."""
def __init__(self, input_size=384, phase="train"):
if phase == "train":
self.data_transform = nn.Sequential(
T.RandomResizedCrop(input_size, (0.25, 1.0), (3 / 4, 4 / 3)),
T.RandomChoice(
[
T.RandomRotation((0, 0)),
T.RandomRotation((90, 90)),
T.RandomRotation((180, 180)),
T.RandomRotation((270, 270)),
],
),
T.RandomHorizontalFlip(p=0.5),
T.ColorJitter(brightness=0.5),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
)
elif phase == "test":
self.data_transform = nn.Sequential(
T.Resize(input_size),
T.CenterCrop(input_size),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
)
def __call__(self, img):
return self.data_transform(img)
| xkurozaru/fewshot-finetune-domain-adaptation | common/utils.py | utils.py | py | 1,761 | python | en | code | 0 | github-code | 36 |
27414823832 |
import logging
import os
from copy import deepcopy
from harmony.util import shortened_id
from harmony.repository_state import RepositoryState
logger = logging.getLogger(__name__)
def commit(local_location_id, working_directory, location_states, repository_state):
"""
Scan the given working directory for changes and commit them to local
state storage.
That is, update location_states[local_location_id] with the
new current file states (digests, "who has what?").
Also update repository_state info ("who made which content decision in what
order?")
Parameters:
local_location_id:
ID of the location that is considered local (i.e. the one that belongs
to the working_directory instance)
working_directory:
WorkingDirectory instance representing the local working directory.
location_states:
LocationStates instance representing the local location state storage.
Will (possibly) be modified.
repository_state:
RepositoryState instance representing the local repository state
storage. Will (possibly) be modified.
return:
True iff any change was recorded.
"""
id_ = local_location_id
short_id = shortened_id(id_)
paths = set(working_directory.get_filenames()) \
| set(location_states.get_all_paths(id_))
# 1. update location state
# - detect renames (add WIPE entries later for those)
# - when a file is *added* that is known to other locations w/
# different digest, let user confirm what he wants to do (see
# above)
# - increase local clock
#
# 2. update repository state
# - if file changed in step 1:
# clock = current clock for local + max for each other location
# hash = current local hash
# (deviate from this if user selected to do something else)
# - if file did not change:
# no change in hash or clock
# Do all the file scanning before so we can be sure to do it at most
# once per file in the WD
wd_states = {
path: working_directory.generate_file_state(path)
for path in paths
if working_directory.file_maybe_modified(
location_states.get_file_state(id_, path)
)
}
location_state_cache = {
path: location_states.get_file_state(id_, path)
for path in paths
}
any_change = False
for path in paths:
if path in wd_states:
file_state = location_state_cache[path]
new_file_state = wd_states[path]
changed = location_states.update_file_state(id_, new_file_state)
if changed:
any_change = True
# If the file vanished but a new one with the same digest
# popped up, consider that a rename.
# Rename means, the old file is WIPEd (instead of just
# locally removed) and the new file is added as usual
if not new_file_state.exists():
logger.debug('{} vanished'.format(new_file_state.path))
# Iterate over paths to find a possible rename target
for path2 in paths:
# Rename to itself does not make sense
# Rename to a file that has not changed (or better: just appeared) does not make sense
if path2 == path or path2 not in wd_states:
continue
path2_state = location_state_cache[path2]
new_path2_state = wd_states[path2]
logger.debug('{} rename candidate {} ex before={} ex now={} self.digest={} candidate.digest={}'.format(
path, path2, path2_state.exists(),
new_path2_state.exists(),
file_state.digest, new_path2_state.digest
))
if not path2_state.exists() \
and new_path2_state.exists() \
and new_path2_state.digest == file_state.digest:
logger.info('Detected rename: {} -> {}'.format(path, path2))
new_file_state.wipe = True
new_file_state.digest = file_state.digest
break
repository_state.update_file_state(
new_file_state,
id_,
location_states.get_clock(id_) + 1,
)
logger.debug('{} committed: {} clk={}'.format(short_id, new_file_state.path, location_states.get_clock(id_) + 1))
else:
logger.debug('{} not actually changed: {}'.format(short_id, path))
else:
logger.debug('{} not changed: {}'.format(short_id, path))
return any_change
def merge(local_state, remote_state, merger_id):
"""
Merge two repository states ('local' and 'remote') into a common state if
possible, auto-detecting if a change only happened on one side and
propagating those changes.
For cases in which a file was changed on both sides, return details of the
conflict.
local_state:
RepositoryState() instance that reflects the local repository state.
remote_state:
RepositoryState() instance that reflects the remote repository state.
merger_id:
ID of the repository conducting the merge (assumed to correspond
to the 'local' repository)
return:
A pair (conflicts, merged).
$conflicts is a dictonary of the form { path: (local_entry, remote_entry),
... } whereas $path denotes the path of a file in conflict and $local_entry
and $remote_entry refer to the RepositoryState.Entry instances for that
file that are in conflict.
$merged is a newly created RepositoryState instance with selected merged
repository states.
If $conflicts is empty, $merged covers all files present either locally or
remotely.
"""
local_paths = set(local_state.get_paths())
remote_paths = set(remote_state.get_paths())
merged = RepositoryState(None)
conflicts = {}
for p in local_paths - remote_paths:
merged[p] = local_state[p]
for p in remote_paths - local_paths:
merged[p] = remote_state[p]
# conflicts can only arise in paths that are specified in both state
# files
paths = set(local_state.get_paths()) & set(remote_state.get_paths())
for path in paths:
local = local_state[path]
remote = remote_state[path]
c = local.clock.compare(remote.clock)
if c is None:
if local.contents_different(remote):
logger.debug('merge: {} in conflict: {} <-> {}'.format(
path, local.clock, remote.clock
))
conflicts[path] = (local, remote)
else:
logger.debug('merge: {} automerged (same content)'.format(path))
m = deepcopy(local)
m.clock.update(remote.clock)
m.clock.increase(merger_id)
merged[path] = m
elif c < 0:
logger.debug('merge: {} newer on remote'.format(path))
merged[path] = remote
else: # c >= 0:
logger.debug('merge: {} same version or newer on local'.format(path))
merged[path] = local
return conflicts, merged
def auto_rename(working_directory, repository_state):
from harmony.working_directory import WorkingDirectory
assert isinstance(working_directory, WorkingDirectory)
"""
Apply automatic renaming in the given working_directory.
That is, if working dir contains files that are WIPEd in $repository_state but
are present under a different name, automatically rename those to obtain
the repository file at a low cost.
Repository.commit() should be called after calling this to commit the
changes to the working directory.
precondition: WD clean
"""
# Automatically apply auto-renaming
# Auto-renaming
# -------------
# 1. Find any files $A with a WIPE entry.
# 2. Compute/get their digest (from location state)
# 3. Find a non-wiped file $B in repo that does not exist in the WD
# 4. Rename $A to $B
for path, entry in repository_state.files.items():
logger.debug('auto_rename: {}: path={} wipe={} in_wd={}'.format(path, entry.path, entry.wipe, (entry.path in working_directory)))
if entry.wipe and (entry.path in working_directory):
possible_targets = {
e.path for e in repository_state.files.values()
if e.path != path and e.digest == entry.digest and not e.wipe
}
logger.info(
'{} could be auto-renamed to any of {}'.format(
path, possible_targets
)
)
if possible_targets:
(working_directory.path / path).rename(working_directory.path / possible_targets.pop())
| Droggelbecher/harmony | harmony/file_state_logic.py | file_state_logic.py | py | 9,182 | python | en | code | 0 | github-code | 36 |
74273514024 | import numpy as np
import jax
from jax import lax, random, numpy as jnp
import flax
from flax.core import freeze, unfreeze
from flax import linen as nn
from flax import optim
from typing import Any, Callable, Sequence, Optional
import pickle
from tensorflow import keras
file_prefix = "struct"
activation = nn.relu # activation function
M = 300 # width parameter
L = 20 # depth
alpha = 10e-5 # learning rate
epochs = 3000
kernel_steps = [0,1,10,20,50,100,200,300,400,500,600,700,800,900,1000,1200,1500,1700,2000,2500,3000] # epochs at which the NTK is computed
var_w_s = [1.0,2.0,2.2] # variance parameter \sigma_w^2
var_b = 0. # variance parameter \sigma_b^2
# custom fully-connected network (MLP) class
class MLP(nn.Module):
widths: Sequence[int] # We need to specify all the layer width (including input and output widths)
v_w: float # variance parameter \sigma_w^2
v_b: float # variance parameter \sigma_b^2
activation: Callable # activation function (the same in all the hidden layers)
kernel_init: Callable = jax.nn.initializers.normal # Gaussian initialization
bias_init: Callable = jax.nn.initializers.normal
def setup(self):
self.layers = [nn.Dense(self.widths[l+1],
kernel_init = self.kernel_init(jnp.sqrt(self.v_w/self.widths[l])),
bias_init = self.bias_init(jnp.sqrt(self.v_b))
) for l in range(len(self.widths)-1)]
def __call__(self, inputs):
x = inputs
for i, lyr in enumerate(self.layers[:-1]):
x = lyr(x)
x = self.activation(x)
x = self.layers[-1](x)
return x
# the NTK on a single pair of samples (x1,x2)
def K(model):
def K(x1,x2,params):
f1 = jax.jacobian(lambda p: model.apply(p,x1))(params)
f2 = jax.jacobian(lambda p: model.apply(p,x2))(params)
leaves, struct = jax.tree_util.tree_flatten(jax.tree_multimap(jnp.multiply,f1,f2))
return sum([jnp.sum(leaf) for leaf in leaves])
return jax.jit(K)
# the NTK matrix (vectorization of K)
def K_matr(model):
_K = K(model)
def K_matr(X,Y,params):
f = lambda x1,x2: _K(x1,x2,params)
return jax.vmap(jax.vmap(f,(None,0)),(0,None))(X,Y)
return jax.jit(K_matr)
# MSE loss function
def mse(x_batched, y_batched):
def mse(params):
# MSE on a single pair (x,y)
def squared_error(x, y):
pred = model.apply(params, x)
return jnp.inner(y-pred, y-pred)/2.0
return jnp.mean(jax.vmap(squared_error)(x_batched,y_batched), axis=0) #Vectorized MSE
return jax.jit(mse)
# Load and preprocess MNIST
n_class = 10
ker_size_per_class = 10
mnist_n0 = 28*28
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], mnist_n0)
x_test = x_test.reshape(x_test.shape[0], mnist_n0)
# choose subset of data with ker_size_per_class samples from each class
ind = []
for k in range(ker_size_per_class):
ind += list(np.random.choice(np.argwhere(y_train==k).ravel(), size=ker_size_per_class, replace=False))
x_train, x_test = x_train/255.,x_test/255.
y_train, y_test = keras.utils.to_categorical(y_train, 10), keras.utils.to_categorical(y_test, 10)
x_ker = x_train[ind] # We compute the NTK only on a subset of samples
y_ker = y_train[ind]
# -------
key = random.PRNGKey(0)
subkeys = jax.random.split(key, num=len(var_w_s))
widths = [mnist_n0]+[M]*L+[n_class]
optimizer_def = optim.Adam(learning_rate=alpha) # Define Adam optimizer
loss = mse(x_train, y_train) # train loss function
loss_grad_fn = jax.value_and_grad(loss) # function to get loss value and gradient
test_loss = mse(x_test, y_test) # test loss function
for var_w, subkey in zip(var_w_s, subkeys):
model = MLP(widths = widths, v_w=var_w, v_b=var_b, activation = activation) # Define MLP model
params = model.init(subkey, x_train) # Initialize model
optimizer = optimizer_def.create(params) # Create optimizer with initial parameters
K_t = []
loss_t = []
test_loss_t = []
K_func = K_matr(model)
for i in range(epochs+1):
loss_val, grad = loss_grad_fn(optimizer.target) # Get gradient and train loss value
test_loss_val = test_loss(optimizer.target) # Get test loss value
test_loss_t.append(test_loss_val)
loss_t.append(loss_val)
# Compute the NTK for the chosen epochs
if i in kernel_steps:
print('Loss step {}: '.format(i), loss_val, test_loss_val)
K_t.append(K_func(x_ker,x_ker,optimizer.target))
optimizer = optimizer.apply_gradient(grad) # Update optimizer parameters
# Save the results
pickle.dump(jnp.array(K_t), open( "ntk_dynamics/"+file_prefix+"_w"+str(int(var_w*10))+"M"+str(M)+"L"+str(L), "wb" ) )
pickle.dump(jnp.array(loss_t), open( "ntk_dynamics/"+file_prefix+"_loss_w"+str(int(var_w*10))+"M"+str(M)+"L"+str(L), "wb" ) )
pickle.dump(jnp.array(test_loss_t), open( "ntk_dynamics/"+file_prefix+"_test_loss_w"+str(int(var_w*10))+"M"+str(M)+"L"+str(L), "wb" ) )
| mselezniova/ntk_beyond_limit | ntk_train_dynamics.py | ntk_train_dynamics.py | py | 5,198 | python | en | code | 0 | github-code | 36 |
36955110589 | import wttest
from wiredtiger import stat
from wtscenario import make_scenarios
# Test compact behaviour with overflow values.
class test_compact03(wttest.WiredTigerTestCase):
uri='table:test_compact03'
fileConfig = [
('1KB', dict(fileConfig='allocation_size=1KB,leaf_page_max=1KB')),
('4KB', dict(fileConfig='allocation_size=4KB,leaf_page_max=4KB')),
]
useTruncate = [
('no_truncate', dict(truncate=False)),
('truncate', dict(truncate=True))
]
scenarios = make_scenarios(fileConfig, useTruncate)
# Enable stats and use a cache size that can fit table in the memory.
conn_config = 'statistics=(all),cache_size=100MB'
# We want to test how compaction interacts with overflow values. Test flow is as follows:
#
# 1. Populate a table with relatively small page size.
# 2. Checkpoint and get stats on the table to confirm the size.
# 3. Add few thousand overflow values. It is expected that these will be written at the end of
# file.
# 4. Perform checkpoint to ensure overflow values are written on disk.
# 5. Delete middle ~90% of the normal values in the table.
# 6. Perform checkpoint so compact can find something to work with.
# 7. Call compact.
# 8. Get stats on compacted table expecting that there will no change in size given we have
# overflow keys at the end of file.
# 9. Insert some normal values again. They will be written in the free extents in the middle
# of the file. Therefore, expect no increase in file size.
#
# We want to have around 20000 leaf pages. With minimum 1KB page allocation size, the table
# is expected to have at least 25 MByte worth of data. We can then experiment with deleting
# range of keys in middle to test how compaction works.
normalValue = "abcde" * 10
overflowValue = "abcde" * 1000
nrecords = 400000 # To create ~25 MB table
expectedTableSize = 20 # Mbytes
nOverflowRecords = 5000
# Return stats that track the progress of compaction.
def getCompactProgressStats(self):
cstat = self.session.open_cursor(
'statistics:' + self.uri, None, 'statistics=(all)')
statDict = {}
statDict["pages_reviewed"] = cstat[stat.dsrc.btree_compact_pages_reviewed][2]
statDict["pages_skipped"] = cstat[stat.dsrc.btree_compact_pages_skipped][2]
statDict["pages_rewritten"] = cstat[stat.dsrc.btree_compact_pages_rewritten][2]
cstat.close()
return statDict
# Return the size of the file
def getSize(self):
# To allow this to work on systems without ftruncate,
# get the portion of the file allocated, via 'statistics=(all)',
# not the physical file size, via 'statistics=(size)'.
cstat = self.session.open_cursor(
'statistics:' + self.uri, None, 'statistics=(all)')
sz = cstat[stat.dsrc.block_size][2]
cstat.close()
return sz
# Create a table, add keys with both big and small values.
def test_compact03(self):
# FIXME-WT-11399: check the other assertions that are skipped when the tiered hook is
# enabled.
if self.runningHook('tiered'):
self.skipTest("this test generates occasional rollback errors when tiered is enabled")
mb = 1024 * 1024
# 1. Create a table with relatively small page size.
params = 'key_format=i,value_format=S,' + self.fileConfig
self.session.create(self.uri, params)
c = self.session.open_cursor(self.uri, None)
for i in range(self.nrecords):
c[i] = self.normalValue
c.close()
# 2. Checkpoint and get stats on the table to confirm the size.
self.session.checkpoint()
sizeWithoutOverflow = self.getSize()
self.pr('After populate ' + str(sizeWithoutOverflow // mb) + 'MB')
if not self.runningHook('tiered'):
self.assertGreater(sizeWithoutOverflow, self.expectedTableSize * mb)
# 3. Add overflow values.
c = self.session.open_cursor(self.uri, None)
for i in range(self.nOverflowRecords):
c[i + self.nrecords] = self.overflowValue
c.close()
# 4. Perform checkpoint to ensure overflow values are written to the disk.
self.session.checkpoint()
sizeWithOverflow = self.getSize()
self.pr('After inserting overflow values ' + str(sizeWithoutOverflow // mb) + 'MB')
if not self.runningHook('tiered'):
self.assertGreater(sizeWithOverflow, sizeWithoutOverflow)
# 5. Delete middle ~90% of the normal values in the table.
if self.truncate:
c1 = self.session.open_cursor(self.uri, None)
c2 = self.session.open_cursor(self.uri, None)
c1.set_key((self.nrecords // 100) * 5)
c2.set_key((self.nrecords // 100) * 95)
self.assertEqual(self.session.truncate(None, c1, c2, None), 0)
c1.close()
c2.close()
else:
c = self.session.open_cursor(self.uri, None)
for i in range((self.nrecords // 100) * 5, (self.nrecords // 100) * 95):
c.set_key(i)
self.assertEqual(c.remove(), 0)
c.close()
# 6. Perform checkpoint to ensure we have blocks available in the middle of the file.
self.session.checkpoint()
# 7 & 8. Call compact. We expect that the overflow values at the end of the file are not
# rewritten and therefore the file size will mostly remain the same. Give a leeway
# of 10%.
self.session.compact(self.uri)
sizeAfterCompact = self.getSize()
self.pr('After deleting values and compactions ' + str(sizeAfterCompact // mb) + 'MB')
if not self.runningHook('tiered'):
self.assertGreater(sizeAfterCompact, (sizeWithOverflow // 10) * 9)
# Verify that we did indeed rewrote some pages but that didn't help with the file size.
statDict = self.getCompactProgressStats()
self.assertGreater(statDict["pages_reviewed"],0)
self.assertGreater(statDict["pages_rewritten"],0)
self.assertEqual(statDict["pages_rewritten"] + statDict["pages_skipped"],
statDict["pages_reviewed"])
# 9. Insert some normal values and expect that file size won't increase as free extents
# in the middle of the file will be used to write new data.
# Insert around ~50% of the normal values in the table that we deleted earlier.
c = self.session.open_cursor(self.uri, None)
for i in range((self.nrecords // 100) * 25, (self.nrecords // 100) * 75):
c.set_key(i)
c.set_value(self.normalValue)
self.assertEqual(c.update(),0)
c.close()
# Perform compact.
self.session.compact(self.uri)
# Test that the file size doesn't increase.
sizeAfterNewInserts = self.getSize()
self.pr('After Inserting bunch of values ' + str(sizeAfterNewInserts // mb) + 'MB')
if not self.runningHook('tiered'):
self.assertEqual(sizeAfterCompact, sizeAfterNewInserts)
if __name__ == '__main__':
wttest.run()
| mongodb/mongo | src/third_party/wiredtiger/test/suite/test_compact03.py | test_compact03.py | py | 7,278 | python | en | code | 24,670 | github-code | 36 |
34385049617 | """ Code for computing SW distances between PDs [1]_ of point cloud summaries of activations
Notes
-----
Relevant section : Experiments with PH
Relevant library : `Persim` [2]_
References
----------
.. [1] Carrière, M.; Cuturi, M.; and Oudot, S. 2017. Sliced Wasserstein Kernel for
Persistence Diagrams. In Precup, D.; and Teh, Y. W., eds., Proceedings of the
34th International Conference on Machine Learning, volume 70 of Proceedings of
Machine Learning Research, 664–673. PMLR.
.. [2] Saul, N.; and Tralie, C. 2019. Scikit-TDA: Topological Data Analysis for Python.
"""
import pickle
import argparse
import numpy as np
from persim import sliced_wasserstein
# UPDATE THESE TO REFLECT YOUR OWN DIRECTORIES AND FILE NAMING CONVENTIONS:
# path to project directory containing all model and experiment files
projectdir = '/rcfs/projects/blaktop'
# path to experiment directory containing PH results
expdir = f'{projectdir}/resnet_cifar_experiments'
# directory prefix and filename suffix for PH results per model/batch
# e.g., path to PH results for model i on batch b is expdir/prefix_{i}/persistence_batch{b}filesuffix.p
prefix = 'resnet18_cifar_large'
filesuffix = '_1000'
# number of randomly initialized models (used in 'cross' mode, see below)
num_models = 100
def get_layers(PH):
""" Returns layers from PH dict keys in the correct order
Note
----
Specifically designed for the module names defined in `cifar_resnet.resnet18`
"""
# use key so that conv1 is first (before all block_seq)
return sorted(PH, key = lambda x : x.replace('conv',''))
def SW_dist_internal(PH, layers):
""" Computes SW distance between layers of a model """
nlayers = len(layers)
dist = np.zeros((nlayers, nlayers))
for i, layer_i in enumerate(layers[:-1]):
for j, layer_j in enumerate(layers[i+1:], start=i+1):
dist[i][j] = sliced_wasserstein(PH[layer_i]['dgms'][1], PH[layer_j]['dgms'][1])
dist[j][i] = dist[i][j]
return dist
def SW_dist_cross_model(PH_i, PH_j, layers):
""" Computes SW distances between layers for two differently initialized models """
nlayers = len(layers)
dist = np.zeros((nlayers, nlayers))
for i, layer_i in enumerate(layers):
for j, layer_j in enumerate(layers):
dist[i][j] = sliced_wasserstein(PH_i[layer_i]['dgms'][1], PH_j[layer_j]['dgms'][1])
return dist
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='SW Distances between PDs of Point Cloud Summaries')
parser.add_argument('-fs', type=int, help='run index of first random seed ResNet-18 model', default=0)
parser.add_argument('-ls', type=int, help='run index of last random seed ResNet-18 model (exclusive)', default=1)
parser.add_argument('-fb', type=int, help='index of first batch', default=0)
parser.add_argument('-lb', type=int, help='index of last batch (exclusive)', default=1)
parser.add_argument('-m', type=str, help="mode: either 'int' for single model internal distances, or 'cross' for distances between differently initialized models", default='int')
args = parser.parse_args()
for b in range(args.fb, args.lb):
filename = f'persistence_batch{b}{filesuffix}.p'
for i in range(args.fs, args.ls):
savepath = f'{expdir}/{prefix}_{i}'
PH = pickle.load(open(f'{savepath}/{filename}','rb'))
layers = get_layers(PH)
if args.m == 'int':
dist = SW_dist_internal(PH, layers)
else:
dist = []
for j in range(num_models):
if j != i:
otherpath = f'{expdir}/{prefix}_{j}'
PH_other = pickle.load(open(f'{otherpath}/{filename}','rb'))
dist_other = SW_dist_cross_model(PH, PH_other, layers)
else:
dist_other = SW_dist_internal(PH,layers)
dist.append(dist_other)
dist = np.concatenate(dist, axis=1)
np.save(f'{savepath}/sliced_wasserstein_batch{b}{filesuffix}_{args.m}', dist)
| pnnl/DeepDataProfiler | papers_with_code/ExperimentalObservations/AAAI-code-PH/SW_distances.py | SW_distances.py | py | 4,209 | python | en | code | 20 | github-code | 36 |
19483517480 | '''
函数说明:
Author: hongqing
Date: 2021-08-04 14:23:54
LastEditTime: 2021-08-04 15:23:25
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
numoffinger=21
class Net(nn.Module):
def __init__(self,type=1):
super(Net, self).__init__()
self.fc1 = nn.Linear(numoffinger-1, 255)
self.fc2 = nn.Linear(255, 255)
self.fc3 = nn.Linear(255, 3)
if(type==1):
self.fc1.weight.data.normal_(0, 3) # initialization
self.fc2.weight.data.normal_(0, 3)
self.fc3.weight.data.normal_(0, 3)
if(type==0):
self.fc1.weight.data.zero_() # initialization
self.fc2.weight.data.zero_()
self.fc3.weight.data.zero_()
if(type==2):
self.fc1.weight.data.random_(1,2) # initialization
self.fc2.weight.data.random_(1,2)
self.fc3.weight.data.random_(1,2)
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return x | KouseiHongqing/KouseiPose | mymodel.py | mymodel.py | py | 1,095 | python | en | code | 0 | github-code | 36 |
42157253168 | """empty message
Revision ID: c4665b8d682b
Revises: 10dbb0e0a903
Create Date: 2019-12-26 14:38:11.609539
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'c4665b8d682b'
down_revision = '10dbb0e0a903'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('goods', sa.Column('gage', sa.Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('goods', 'gage')
# ### end Alembic commands ###
| FeelingsLw/flask_demo2 | migrations/versions/c4665b8d682b_.py | c4665b8d682b_.py | py | 646 | python | en | code | 0 | github-code | 36 |
16879280056 | import requests
import json
import urllib3
from settings import settings as settings
from sdwan_operations import monitor as sdwanmn
import time
import sys, getopt
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def main(argv):
# To run the program use the syntax:
# python __main__.py -d <devfile> -f <fieldfile> -c <optional csvfile>
devfile = ''
fieldfile = ''
csvfile = ''
try:
opts, args = getopt.getopt(argv,"hd:f:c:",["devfile=", "fieldfile=", "csvfile"])
except getopt.GetoptError:
print ('python __main__.py -d <devfile> -f <fieldfile> -c <optional csvfile>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('python __main__.py -d <devfile> -f <fieldfile> -c <optional csvfile>')
sys.exit(1)
elif opt in ("-d", "--devfile"):
devfile = arg
elif opt in ("-f", "--fieldfile"):
fieldfile = arg
elif opt in ("-c", "--csvfile"):
csvfile = arg
# Read the device list from the supplied file (for example, dev_list.json)
try:
with open(devfile) as f:
dev_list = json.load(f)
except FileNotFoundError:
print ('python __main__.py -d <devfile> -f <fieldfile> -c <optional csvfile>')
sys.exit(2)
# Read the field list from the supplied file (for example, field_list.json)
try:
with open(fieldfile) as f:
field_list = json.load(f)
except FileNotFoundError:
print ('python __main__.py -d <devfile> -f <fieldfile> -c <optional csvfile>')
sys.exit(2)
while (True):
# Create a session and pass it as a parameter for show_port_stats
session = requests.Session()
session.auth = (settings.vmanage_username, settings.vmanage_password)
sdwanmn.show_port_stats(session, dev_list, field_list)
# Comment below to stop exporting data to csv
if csvfile != "":
sdwanmn.dump_to_csv(session,dev_list,field_list,csvfile)
time.sleep(60)
if __name__ == "__main__":
main(sys.argv[1:])
| stantiku/sdwan_monitor | __main__.py | __main__.py | py | 2,117 | python | en | code | 0 | github-code | 36 |
43114992788 | """
The codes are heavily borrowed from NeuS
"""
import os
import cv2 as cv
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import logging
import mcubes
from icecream import ic
from models.render_utils import sample_pdf
from models.projector import Projector
from tsparse.torchsparse_utils import sparse_to_dense_channel
from models.fast_renderer import FastRenderer
from models.patch_projector import PatchProjector
class SparseNeuSRenderer(nn.Module):
"""
conditional neus render;
optimize on normalized world space;
warped by nn.Module to support DataParallel traning
"""
def __init__(self,
rendering_network_outside,
sdf_network,
variance_network,
rendering_network,
n_samples,
n_importance,
n_outside,
perturb,
alpha_type='div',
conf=None
):
super(SparseNeuSRenderer, self).__init__()
self.conf = conf
self.base_exp_dir = conf['general.base_exp_dir']
# network setups
self.rendering_network_outside = rendering_network_outside
self.sdf_network = sdf_network
self.variance_network = variance_network
self.rendering_network = rendering_network
self.n_samples = n_samples
self.n_importance = n_importance
self.n_outside = n_outside
self.perturb = perturb
self.alpha_type = alpha_type
self.rendering_projector = Projector() # used to obtain features for generalized rendering
self.h_patch_size = self.conf.get_int('model.h_patch_size', default=3)
self.patch_projector = PatchProjector(self.h_patch_size)
self.ray_tracer = FastRenderer() # ray_tracer to extract depth maps from sdf_volume
# - fitted rendering or general rendering
try:
self.if_fitted_rendering = self.sdf_network.if_fitted_rendering
except:
self.if_fitted_rendering = False
def up_sample(self, rays_o, rays_d, z_vals, sdf, n_importance, inv_variance,
conditional_valid_mask_volume=None):
device = rays_o.device
batch_size, n_samples = z_vals.shape
pts = rays_o[:, None, :] + rays_d[:, None, :] * z_vals[..., :, None] # n_rays, n_samples, 3
if conditional_valid_mask_volume is not None:
pts_mask = self.get_pts_mask_for_conditional_volume(pts.view(-1, 3), conditional_valid_mask_volume)
pts_mask = pts_mask.reshape(batch_size, n_samples)
pts_mask = pts_mask[:, :-1] * pts_mask[:, 1:] # [batch_size, n_samples-1]
else:
pts_mask = torch.ones([batch_size, n_samples]).to(pts.device)
sdf = sdf.reshape(batch_size, n_samples)
prev_sdf, next_sdf = sdf[:, :-1], sdf[:, 1:]
prev_z_vals, next_z_vals = z_vals[:, :-1], z_vals[:, 1:]
mid_sdf = (prev_sdf + next_sdf) * 0.5
dot_val = None
if self.alpha_type == 'uniform':
dot_val = torch.ones([batch_size, n_samples - 1]) * -1.0
else:
dot_val = (next_sdf - prev_sdf) / (next_z_vals - prev_z_vals + 1e-5)
prev_dot_val = torch.cat([torch.zeros([batch_size, 1]).to(device), dot_val[:, :-1]], dim=-1)
dot_val = torch.stack([prev_dot_val, dot_val], dim=-1)
dot_val, _ = torch.min(dot_val, dim=-1, keepdim=False)
dot_val = dot_val.clip(-10.0, 0.0) * pts_mask
dist = (next_z_vals - prev_z_vals)
prev_esti_sdf = mid_sdf - dot_val * dist * 0.5
next_esti_sdf = mid_sdf + dot_val * dist * 0.5
prev_cdf = torch.sigmoid(prev_esti_sdf * inv_variance)
next_cdf = torch.sigmoid(next_esti_sdf * inv_variance)
alpha_sdf = (prev_cdf - next_cdf + 1e-5) / (prev_cdf + 1e-5)
alpha = alpha_sdf
# - apply pts_mask
alpha = pts_mask * alpha
weights = alpha * torch.cumprod(
torch.cat([torch.ones([batch_size, 1]).to(device), 1. - alpha + 1e-7], -1), -1)[:, :-1]
z_samples = sample_pdf(z_vals, weights, n_importance, det=True).detach()
return z_samples
def cat_z_vals(self, rays_o, rays_d, z_vals, new_z_vals, sdf, lod,
sdf_network, gru_fusion,
# * related to conditional feature
conditional_volume=None,
conditional_valid_mask_volume=None
):
device = rays_o.device
batch_size, n_samples = z_vals.shape
_, n_importance = new_z_vals.shape
pts = rays_o[:, None, :] + rays_d[:, None, :] * new_z_vals[..., :, None]
if conditional_valid_mask_volume is not None:
pts_mask = self.get_pts_mask_for_conditional_volume(pts.view(-1, 3), conditional_valid_mask_volume)
pts_mask = pts_mask.reshape(batch_size, n_importance)
pts_mask_bool = (pts_mask > 0).view(-1)
else:
pts_mask = torch.ones([batch_size, n_importance]).to(pts.device)
new_sdf = torch.ones([batch_size * n_importance, 1]).to(pts.dtype).to(device) * 100
if torch.sum(pts_mask) > 1:
new_outputs = sdf_network.sdf(pts.reshape(-1, 3)[pts_mask_bool], conditional_volume, lod=lod)
new_sdf[pts_mask_bool] = new_outputs['sdf_pts_scale%d' % lod] # .reshape(batch_size, n_importance)
new_sdf = new_sdf.view(batch_size, n_importance)
z_vals = torch.cat([z_vals, new_z_vals], dim=-1)
sdf = torch.cat([sdf, new_sdf], dim=-1)
z_vals, index = torch.sort(z_vals, dim=-1)
xx = torch.arange(batch_size)[:, None].expand(batch_size, n_samples + n_importance).reshape(-1)
index = index.reshape(-1)
sdf = sdf[(xx, index)].reshape(batch_size, n_samples + n_importance)
return z_vals, sdf
@torch.no_grad()
def get_pts_mask_for_conditional_volume(self, pts, mask_volume):
"""
:param pts: [N, 3]
:param mask_volume: [1, 1, X, Y, Z]
:return:
"""
num_pts = pts.shape[0]
pts = pts.view(1, 1, 1, num_pts, 3) # - should be in range (-1, 1)
pts = torch.flip(pts, dims=[-1])
pts_mask = F.grid_sample(mask_volume, pts, mode='nearest') # [1, c, 1, 1, num_pts]
pts_mask = pts_mask.view(-1, num_pts).permute(1, 0).contiguous() # [num_pts, 1]
return pts_mask
def render_core(self,
rays_o,
rays_d,
z_vals,
sample_dist,
lod,
sdf_network,
rendering_network,
background_alpha=None, # - no use here
background_sampled_color=None, # - no use here
background_rgb=None, # - no use here
alpha_inter_ratio=0.0,
# * related to conditional feature
conditional_volume=None,
conditional_valid_mask_volume=None,
# * 2d feature maps
feature_maps=None,
color_maps=None,
w2cs=None,
intrinsics=None,
img_wh=None,
query_c2w=None, # - used for testing
if_general_rendering=True,
if_render_with_grad=True,
# * used for blending mlp rendering network
img_index=None,
rays_uv=None,
# * used for clear bg and fg
bg_num=0
):
device = rays_o.device
N_rays = rays_o.shape[0]
_, n_samples = z_vals.shape
dists = z_vals[..., 1:] - z_vals[..., :-1]
dists = torch.cat([dists, torch.Tensor([sample_dist]).expand(dists[..., :1].shape).to(device)], -1)
mid_z_vals = z_vals + dists * 0.5
mid_dists = mid_z_vals[..., 1:] - mid_z_vals[..., :-1]
pts = rays_o[:, None, :] + rays_d[:, None, :] * mid_z_vals[..., :, None] # n_rays, n_samples, 3
dirs = rays_d[:, None, :].expand(pts.shape)
pts = pts.reshape(-1, 3)
dirs = dirs.reshape(-1, 3)
# * if conditional_volume is restored from sparse volume, need mask for pts
if conditional_valid_mask_volume is not None:
pts_mask = self.get_pts_mask_for_conditional_volume(pts, conditional_valid_mask_volume)
pts_mask = pts_mask.reshape(N_rays, n_samples).float().detach()
pts_mask_bool = (pts_mask > 0).view(-1)
if torch.sum(pts_mask_bool.float()) < 1: # ! when render out image, may meet this problem
pts_mask_bool[:100] = True
else:
pts_mask = torch.ones([N_rays, n_samples]).to(pts.device)
# import ipdb; ipdb.set_trace()
# pts_valid = pts[pts_mask_bool]
sdf_nn_output = sdf_network.sdf(pts[pts_mask_bool], conditional_volume, lod=lod)
sdf = torch.ones([N_rays * n_samples, 1]).to(pts.dtype).to(device) * 100
sdf[pts_mask_bool] = sdf_nn_output['sdf_pts_scale%d' % lod] # [N_rays*n_samples, 1]
feature_vector_valid = sdf_nn_output['sdf_features_pts_scale%d' % lod]
feature_vector = torch.zeros([N_rays * n_samples, feature_vector_valid.shape[1]]).to(pts.dtype).to(device)
feature_vector[pts_mask_bool] = feature_vector_valid
# * estimate alpha from sdf
gradients = torch.zeros([N_rays * n_samples, 3]).to(pts.dtype).to(device)
# import ipdb; ipdb.set_trace()
gradients[pts_mask_bool] = sdf_network.gradient(
pts[pts_mask_bool], conditional_volume, lod=lod).squeeze()
sampled_color_mlp = None
rendering_valid_mask_mlp = None
sampled_color_patch = None
rendering_patch_mask = None
if self.if_fitted_rendering: # used for fine-tuning
position_latent = sdf_nn_output['sampled_latent_scale%d' % lod]
sampled_color_mlp = torch.zeros([N_rays * n_samples, 3]).to(pts.dtype).to(device)
sampled_color_mlp_mask = torch.zeros([N_rays * n_samples, 1]).to(pts.dtype).to(device)
# - extract pixel
pts_pixel_color, pts_pixel_mask = self.patch_projector.pixel_warp(
pts[pts_mask_bool][:, None, :], color_maps, intrinsics,
w2cs, img_wh=None) # [N_rays * n_samples,1, N_views, 3] , [N_rays*n_samples, 1, N_views]
pts_pixel_color = pts_pixel_color[:, 0, :, :] # [N_rays * n_samples, N_views, 3]
pts_pixel_mask = pts_pixel_mask[:, 0, :] # [N_rays*n_samples, N_views]
# - extract patch
if_patch_blending = False if rays_uv is None else True
pts_patch_color, pts_patch_mask = None, None
if if_patch_blending:
pts_patch_color, pts_patch_mask = self.patch_projector.patch_warp(
pts.reshape([N_rays, n_samples, 3]),
rays_uv, gradients.reshape([N_rays, n_samples, 3]),
color_maps,
intrinsics[0], intrinsics,
query_c2w[0], torch.inverse(w2cs), img_wh=None
) # (N_rays, n_samples, N_src, Npx, 3), (N_rays, n_samples, N_src, Npx)
N_src, Npx = pts_patch_mask.shape[2:]
pts_patch_color = pts_patch_color.view(N_rays * n_samples, N_src, Npx, 3)[pts_mask_bool]
pts_patch_mask = pts_patch_mask.view(N_rays * n_samples, N_src, Npx)[pts_mask_bool]
sampled_color_patch = torch.zeros([N_rays * n_samples, Npx, 3]).to(device)
sampled_color_patch_mask = torch.zeros([N_rays * n_samples, 1]).to(device)
sampled_color_mlp_, sampled_color_mlp_mask_, \
sampled_color_patch_, sampled_color_patch_mask_ = sdf_network.color_blend(
pts[pts_mask_bool],
position_latent,
gradients[pts_mask_bool],
dirs[pts_mask_bool],
feature_vector[pts_mask_bool],
img_index=img_index,
pts_pixel_color=pts_pixel_color,
pts_pixel_mask=pts_pixel_mask,
pts_patch_color=pts_patch_color,
pts_patch_mask=pts_patch_mask
) # [n, 3], [n, 1]
sampled_color_mlp[pts_mask_bool] = sampled_color_mlp_
sampled_color_mlp_mask[pts_mask_bool] = sampled_color_mlp_mask_.float()
sampled_color_mlp = sampled_color_mlp.view(N_rays, n_samples, 3)
sampled_color_mlp_mask = sampled_color_mlp_mask.view(N_rays, n_samples)
rendering_valid_mask_mlp = torch.mean(pts_mask * sampled_color_mlp_mask, dim=-1, keepdim=True) > 0.5
# patch blending
if if_patch_blending:
sampled_color_patch[pts_mask_bool] = sampled_color_patch_
sampled_color_patch_mask[pts_mask_bool] = sampled_color_patch_mask_.float()
sampled_color_patch = sampled_color_patch.view(N_rays, n_samples, Npx, 3)
sampled_color_patch_mask = sampled_color_patch_mask.view(N_rays, n_samples)
rendering_patch_mask = torch.mean(pts_mask * sampled_color_patch_mask, dim=-1,
keepdim=True) > 0.5 # [N_rays, 1]
else:
sampled_color_patch, rendering_patch_mask = None, None
if if_general_rendering: # used for general training
# [512, 128, 16]; [4, 512, 128, 59]; [4, 512, 128, 4]
ren_geo_feats, ren_rgb_feats, ren_ray_diff, ren_mask, _, _ = self.rendering_projector.compute(
pts.view(N_rays, n_samples, 3),
# * 3d geometry feature volumes
geometryVolume=conditional_volume[0],
geometryVolumeMask=conditional_valid_mask_volume[0],
# * 2d rendering feature maps
rendering_feature_maps=feature_maps, # [n_views, 56, 256, 256]
color_maps=color_maps,
w2cs=w2cs,
intrinsics=intrinsics,
img_wh=img_wh,
query_img_idx=0, # the index of the N_views dim for rendering
query_c2w=query_c2w,
)
# (N_rays, n_samples, 3)
if if_render_with_grad:
# import ipdb; ipdb.set_trace()
# [nrays, 3] [nrays, 1]
sampled_color, rendering_valid_mask = rendering_network(
ren_geo_feats, ren_rgb_feats, ren_ray_diff, ren_mask)
# import ipdb; ipdb.set_trace()
else:
with torch.no_grad():
sampled_color, rendering_valid_mask = rendering_network(
ren_geo_feats, ren_rgb_feats, ren_ray_diff, ren_mask)
else:
sampled_color, rendering_valid_mask = None, None
inv_variance = self.variance_network(feature_vector)[:, :1].clip(1e-6, 1e6)
true_dot_val = (dirs * gradients).sum(-1, keepdim=True) # * calculate
iter_cos = -(F.relu(-true_dot_val * 0.5 + 0.5) * (1.0 - alpha_inter_ratio) + F.relu(
-true_dot_val) * alpha_inter_ratio) # always non-positive
iter_cos = iter_cos * pts_mask.view(-1, 1)
true_estimate_sdf_half_next = sdf + iter_cos.clip(-10.0, 10.0) * dists.reshape(-1, 1) * 0.5
true_estimate_sdf_half_prev = sdf - iter_cos.clip(-10.0, 10.0) * dists.reshape(-1, 1) * 0.5
prev_cdf = torch.sigmoid(true_estimate_sdf_half_prev * inv_variance)
next_cdf = torch.sigmoid(true_estimate_sdf_half_next * inv_variance)
p = prev_cdf - next_cdf
c = prev_cdf
if self.alpha_type == 'div':
alpha_sdf = ((p + 1e-5) / (c + 1e-5)).reshape(N_rays, n_samples).clip(0.0, 1.0)
elif self.alpha_type == 'uniform':
uniform_estimate_sdf_half_next = sdf - dists.reshape(-1, 1) * 0.5
uniform_estimate_sdf_half_prev = sdf + dists.reshape(-1, 1) * 0.5
uniform_prev_cdf = torch.sigmoid(uniform_estimate_sdf_half_prev * inv_variance)
uniform_next_cdf = torch.sigmoid(uniform_estimate_sdf_half_next * inv_variance)
uniform_alpha = F.relu(
(uniform_prev_cdf - uniform_next_cdf + 1e-5) / (uniform_prev_cdf + 1e-5)).reshape(
N_rays, n_samples).clip(0.0, 1.0)
alpha_sdf = uniform_alpha
else:
assert False
alpha = alpha_sdf
# - apply pts_mask
alpha = alpha * pts_mask
# pts_radius = torch.linalg.norm(pts, ord=2, dim=-1, keepdim=True).reshape(N_rays, n_samples)
# inside_sphere = (pts_radius < 1.0).float().detach()
# relax_inside_sphere = (pts_radius < 1.2).float().detach()
inside_sphere = pts_mask
relax_inside_sphere = pts_mask
weights = alpha * torch.cumprod(torch.cat([torch.ones([N_rays, 1]).to(device), 1. - alpha + 1e-7], -1), -1)[:,
:-1] # n_rays, n_samples
weights_sum = weights.sum(dim=-1, keepdim=True)
alpha_sum = alpha.sum(dim=-1, keepdim=True)
if bg_num > 0:
weights_sum_fg = weights[:, :-bg_num].sum(dim=-1, keepdim=True)
else:
weights_sum_fg = weights_sum
if sampled_color is not None:
color = (sampled_color * weights[:, :, None]).sum(dim=1)
else:
color = None
# import ipdb; ipdb.set_trace()
if background_rgb is not None and color is not None:
color = color + background_rgb * (1.0 - weights_sum)
# print("color device:" + str(color.device))
# if color is not None:
# # import ipdb; ipdb.set_trace()
# color = color + (1.0 - weights_sum)
###################* mlp color rendering #####################
color_mlp = None
# import ipdb; ipdb.set_trace()
if sampled_color_mlp is not None:
color_mlp = (sampled_color_mlp * weights[:, :, None]).sum(dim=1)
if background_rgb is not None and color_mlp is not None:
color_mlp = color_mlp + background_rgb * (1.0 - weights_sum)
############################ * patch blending ################
blended_color_patch = None
if sampled_color_patch is not None:
blended_color_patch = (sampled_color_patch * weights[:, :, None, None]).sum(dim=1) # [N_rays, Npx, 3]
######################################################
gradient_error = (torch.linalg.norm(gradients.reshape(N_rays, n_samples, 3), ord=2,
dim=-1) - 1.0) ** 2
# ! the gradient normal should be masked out, the pts out of the bounding box should also be penalized
gradient_error = (pts_mask * gradient_error).sum() / (
(pts_mask).sum() + 1e-5)
depth = (mid_z_vals * weights[:, :n_samples]).sum(dim=1, keepdim=True)
# print("[TEST]: weights_sum in render_core", weights_sum.mean())
# print("[TEST]: weights_sum in render_core NAN number", weights_sum.isnan().sum())
# if weights_sum.isnan().sum() > 0:
# import ipdb; ipdb.set_trace()
return {
'color': color,
'color_mask': rendering_valid_mask, # (N_rays, 1)
'color_mlp': color_mlp,
'color_mlp_mask': rendering_valid_mask_mlp,
'sdf': sdf, # (N_rays, n_samples)
'depth': depth, # (N_rays, 1)
'dists': dists,
'gradients': gradients.reshape(N_rays, n_samples, 3),
'variance': 1.0 / inv_variance,
'mid_z_vals': mid_z_vals,
'weights': weights,
'weights_sum': weights_sum,
'alpha_sum': alpha_sum,
'alpha_mean': alpha.mean(),
'cdf': c.reshape(N_rays, n_samples),
'gradient_error': gradient_error,
'inside_sphere': inside_sphere,
'blended_color_patch': blended_color_patch,
'blended_color_patch_mask': rendering_patch_mask,
'weights_sum_fg': weights_sum_fg
}
def render(self, rays_o, rays_d, near, far, sdf_network, rendering_network,
perturb_overwrite=-1,
background_rgb=None,
alpha_inter_ratio=0.0,
# * related to conditional feature
lod=None,
conditional_volume=None,
conditional_valid_mask_volume=None,
# * 2d feature maps
feature_maps=None,
color_maps=None,
w2cs=None,
intrinsics=None,
img_wh=None,
query_c2w=None, # -used for testing
if_general_rendering=True,
if_render_with_grad=True,
# * used for blending mlp rendering network
img_index=None,
rays_uv=None,
# * importance sample for second lod network
pre_sample=False, # no use here
# * for clear foreground
bg_ratio=0.0
):
device = rays_o.device
N_rays = len(rays_o)
# sample_dist = 2.0 / self.n_samples
sample_dist = ((far - near) / self.n_samples).mean().item()
z_vals = torch.linspace(0.0, 1.0, self.n_samples).to(device)
z_vals = near + (far - near) * z_vals[None, :]
bg_num = int(self.n_samples * bg_ratio)
if z_vals.shape[0] == 1:
z_vals = z_vals.repeat(N_rays, 1)
if bg_num > 0:
z_vals_bg = z_vals[:, self.n_samples - bg_num:]
z_vals = z_vals[:, :self.n_samples - bg_num]
n_samples = self.n_samples - bg_num
perturb = self.perturb
# - significantly speed up training, for the second lod network
if pre_sample:
z_vals = self.sample_z_vals_from_maskVolume(rays_o, rays_d, near, far,
conditional_valid_mask_volume)
if perturb_overwrite >= 0:
perturb = perturb_overwrite
if perturb > 0:
# get intervals between samples
mids = .5 * (z_vals[..., 1:] + z_vals[..., :-1])
upper = torch.cat([mids, z_vals[..., -1:]], -1)
lower = torch.cat([z_vals[..., :1], mids], -1)
# stratified samples in those intervals
t_rand = torch.rand(z_vals.shape).to(device)
z_vals = lower + (upper - lower) * t_rand
background_alpha = None
background_sampled_color = None
z_val_before = z_vals.clone()
# Up sample
if self.n_importance > 0:
with torch.no_grad():
pts = rays_o[:, None, :] + rays_d[:, None, :] * z_vals[..., :, None]
sdf_outputs = sdf_network.sdf(
pts.reshape(-1, 3), conditional_volume, lod=lod)
# pdb.set_trace()
sdf = sdf_outputs['sdf_pts_scale%d' % lod].reshape(N_rays, self.n_samples - bg_num)
n_steps = 4
for i in range(n_steps):
new_z_vals = self.up_sample(rays_o, rays_d, z_vals, sdf, self.n_importance // n_steps,
64 * 2 ** i,
conditional_valid_mask_volume=conditional_valid_mask_volume,
)
# if new_z_vals.isnan().sum() > 0:
# import ipdb; ipdb.set_trace()
z_vals, sdf = self.cat_z_vals(
rays_o, rays_d, z_vals, new_z_vals, sdf, lod,
sdf_network, gru_fusion=False,
conditional_volume=conditional_volume,
conditional_valid_mask_volume=conditional_valid_mask_volume,
)
del sdf
n_samples = self.n_samples + self.n_importance
# Background
ret_outside = None
# Render
if bg_num > 0:
z_vals = torch.cat([z_vals, z_vals_bg], dim=1)
# if z_vals.isnan().sum() > 0:
# import ipdb; ipdb.set_trace()
ret_fine = self.render_core(rays_o,
rays_d,
z_vals,
sample_dist,
lod,
sdf_network,
rendering_network,
background_rgb=background_rgb,
background_alpha=background_alpha,
background_sampled_color=background_sampled_color,
alpha_inter_ratio=alpha_inter_ratio,
# * related to conditional feature
conditional_volume=conditional_volume,
conditional_valid_mask_volume=conditional_valid_mask_volume,
# * 2d feature maps
feature_maps=feature_maps,
color_maps=color_maps,
w2cs=w2cs,
intrinsics=intrinsics,
img_wh=img_wh,
query_c2w=query_c2w,
if_general_rendering=if_general_rendering,
if_render_with_grad=if_render_with_grad,
# * used for blending mlp rendering network
img_index=img_index,
rays_uv=rays_uv
)
color_fine = ret_fine['color']
if self.n_outside > 0:
color_fine_mask = torch.logical_or(ret_fine['color_mask'], ret_outside['color_mask'])
else:
color_fine_mask = ret_fine['color_mask']
weights = ret_fine['weights']
weights_sum = ret_fine['weights_sum']
gradients = ret_fine['gradients']
mid_z_vals = ret_fine['mid_z_vals']
# depth = (mid_z_vals * weights[:, :n_samples]).sum(dim=1, keepdim=True)
depth = ret_fine['depth']
depth_varaince = ((mid_z_vals - depth) ** 2 * weights[:, :n_samples]).sum(dim=-1, keepdim=True)
variance = ret_fine['variance'].reshape(N_rays, n_samples).mean(dim=-1, keepdim=True)
# - randomly sample points from the volume, and maximize the sdf
pts_random = torch.rand([1024, 3]).float().to(device) * 2 - 1 # normalized to (-1, 1)
sdf_random = sdf_network.sdf(pts_random, conditional_volume, lod=lod)['sdf_pts_scale%d' % lod]
result = {
'depth': depth,
'color_fine': color_fine,
'color_fine_mask': color_fine_mask,
'color_outside': ret_outside['color'] if ret_outside is not None else None,
'color_outside_mask': ret_outside['color_mask'] if ret_outside is not None else None,
'color_mlp': ret_fine['color_mlp'],
'color_mlp_mask': ret_fine['color_mlp_mask'],
'variance': variance.mean(),
'cdf_fine': ret_fine['cdf'],
'depth_variance': depth_varaince,
'weights_sum': weights_sum,
'weights_max': torch.max(weights, dim=-1, keepdim=True)[0],
'alpha_sum': ret_fine['alpha_sum'].mean(),
'alpha_mean': ret_fine['alpha_mean'],
'gradients': gradients,
'weights': weights,
'gradient_error_fine': ret_fine['gradient_error'],
'inside_sphere': ret_fine['inside_sphere'],
'sdf': ret_fine['sdf'],
'sdf_random': sdf_random,
'blended_color_patch': ret_fine['blended_color_patch'],
'blended_color_patch_mask': ret_fine['blended_color_patch_mask'],
'weights_sum_fg': ret_fine['weights_sum_fg']
}
return result
@torch.no_grad()
def sample_z_vals_from_sdfVolume(self, rays_o, rays_d, near, far, sdf_volume, mask_volume):
# ? based on sdf to do importance sampling, seems that too biased on pre-estimation
device = rays_o.device
N_rays = len(rays_o)
n_samples = self.n_samples * 2
z_vals = torch.linspace(0.0, 1.0, n_samples).to(device)
z_vals = near + (far - near) * z_vals[None, :]
if z_vals.shape[0] == 1:
z_vals = z_vals.repeat(N_rays, 1)
pts = rays_o[:, None, :] + rays_d[:, None, :] * z_vals[..., :, None]
sdf = self.get_pts_mask_for_conditional_volume(pts.view(-1, 3), sdf_volume).reshape([N_rays, n_samples])
new_z_vals = self.up_sample(rays_o, rays_d, z_vals, sdf, self.n_samples,
200,
conditional_valid_mask_volume=mask_volume,
)
return new_z_vals
@torch.no_grad()
def sample_z_vals_from_maskVolume(self, rays_o, rays_d, near, far, mask_volume): # don't use
device = rays_o.device
N_rays = len(rays_o)
n_samples = self.n_samples * 2
z_vals = torch.linspace(0.0, 1.0, n_samples).to(device)
z_vals = near + (far - near) * z_vals[None, :]
if z_vals.shape[0] == 1:
z_vals = z_vals.repeat(N_rays, 1)
mid_z_vals = (z_vals[:, 1:] + z_vals[:, :-1]) * 0.5
pts = rays_o[:, None, :] + rays_d[:, None, :] * mid_z_vals[..., :, None]
pts_mask = self.get_pts_mask_for_conditional_volume(pts.view(-1, 3), mask_volume).reshape(
[N_rays, n_samples - 1])
# empty voxel set to 0.1, non-empty voxel set to 1
weights = torch.where(pts_mask > 0, torch.ones_like(pts_mask).to(device),
0.1 * torch.ones_like(pts_mask).to(device))
# sample more pts in non-empty voxels
z_samples = sample_pdf(z_vals, weights, self.n_samples, det=True).detach()
return z_samples
@torch.no_grad()
def filter_pts_by_depthmaps(self, coords, pred_depth_maps, proj_matrices,
partial_vol_origin, voxel_size,
near, far, depth_interval, d_plane_nums):
"""
Use the pred_depthmaps to remove redundant pts (pruned by sdf, sdf always have two sides, the back side is useless)
:param coords: [n, 3] int coords
:param pred_depth_maps: [N_views, 1, h, w]
:param proj_matrices: [N_views, 4, 4]
:param partial_vol_origin: [3]
:param voxel_size: 1
:param near: 1
:param far: 1
:param depth_interval: 1
:param d_plane_nums: 1
:return:
"""
device = pred_depth_maps.device
n_views, _, sizeH, sizeW = pred_depth_maps.shape
if len(partial_vol_origin.shape) == 1:
partial_vol_origin = partial_vol_origin[None, :]
pts = coords * voxel_size + partial_vol_origin
rs_grid = pts.unsqueeze(0).expand(n_views, -1, -1)
rs_grid = rs_grid.permute(0, 2, 1).contiguous() # [n_views, 3, n_pts]
nV = rs_grid.shape[-1]
rs_grid = torch.cat([rs_grid, torch.ones([n_views, 1, nV]).to(device)], dim=1) # [n_views, 4, n_pts]
# Project grid
im_p = proj_matrices @ rs_grid # - transform world pts to image UV space # [n_views, 4, n_pts]
im_x, im_y, im_z = im_p[:, 0], im_p[:, 1], im_p[:, 2]
im_x = im_x / im_z
im_y = im_y / im_z
im_grid = torch.stack([2 * im_x / (sizeW - 1) - 1, 2 * im_y / (sizeH - 1) - 1], dim=-1)
im_grid = im_grid.view(n_views, 1, -1, 2)
sampled_depths = torch.nn.functional.grid_sample(pred_depth_maps, im_grid, mode='bilinear',
padding_mode='zeros',
align_corners=True)[:, 0, 0, :] # [n_views, n_pts]
sampled_depths_valid = (sampled_depths > 0.5 * near).float()
valid_d_min = (sampled_depths - d_plane_nums * depth_interval).clamp(near.item(),
far.item()) * sampled_depths_valid
valid_d_max = (sampled_depths + d_plane_nums * depth_interval).clamp(near.item(),
far.item()) * sampled_depths_valid
mask = im_grid.abs() <= 1
mask = mask[:, 0] # [n_views, n_pts, 2]
mask = (mask.sum(dim=-1) == 2) & (im_z > valid_d_min) & (im_z < valid_d_max)
mask = mask.view(n_views, -1)
mask = mask.permute(1, 0).contiguous() # [num_pts, nviews]
mask_final = torch.sum(mask.float(), dim=1, keepdim=False) > 0
return mask_final
@torch.no_grad()
def get_valid_sparse_coords_by_sdf_depthfilter(self, sdf_volume, coords_volume, mask_volume, feature_volume,
pred_depth_maps, proj_matrices,
partial_vol_origin, voxel_size,
near, far, depth_interval, d_plane_nums,
threshold=0.02, maximum_pts=110000):
"""
assume batch size == 1, from the first lod to get sparse voxels
:param sdf_volume: [1, X, Y, Z]
:param coords_volume: [3, X, Y, Z]
:param mask_volume: [1, X, Y, Z]
:param feature_volume: [C, X, Y, Z]
:param threshold:
:return:
"""
device = coords_volume.device
_, dX, dY, dZ = coords_volume.shape
def prune(sdf_pts, coords_pts, mask_volume, threshold):
occupancy_mask = (torch.abs(sdf_pts) < threshold).squeeze(1) # [num_pts]
valid_coords = coords_pts[occupancy_mask]
# - filter backside surface by depth maps
mask_filtered = self.filter_pts_by_depthmaps(valid_coords, pred_depth_maps, proj_matrices,
partial_vol_origin, voxel_size,
near, far, depth_interval, d_plane_nums)
valid_coords = valid_coords[mask_filtered]
# - dilate
occupancy_mask = sparse_to_dense_channel(valid_coords, 1, [dX, dY, dZ], 1, 0, device) # [dX, dY, dZ, 1]
# - dilate
occupancy_mask = occupancy_mask.float()
occupancy_mask = occupancy_mask.view(1, 1, dX, dY, dZ)
occupancy_mask = F.avg_pool3d(occupancy_mask, kernel_size=7, stride=1, padding=3)
occupancy_mask = occupancy_mask.view(-1, 1) > 0
final_mask = torch.logical_and(mask_volume, occupancy_mask)[:, 0] # [num_pts]
return final_mask, torch.sum(final_mask.float())
C, dX, dY, dZ = feature_volume.shape
sdf_volume = sdf_volume.permute(1, 2, 3, 0).contiguous().view(-1, 1)
coords_volume = coords_volume.permute(1, 2, 3, 0).contiguous().view(-1, 3)
mask_volume = mask_volume.permute(1, 2, 3, 0).contiguous().view(-1, 1)
feature_volume = feature_volume.permute(1, 2, 3, 0).contiguous().view(-1, C)
# - for check
# sdf_volume = torch.rand_like(sdf_volume).float().to(sdf_volume.device) * 0.02
final_mask, valid_num = prune(sdf_volume, coords_volume, mask_volume, threshold)
while (valid_num > maximum_pts) and (threshold > 0.003):
threshold = threshold - 0.002
final_mask, valid_num = prune(sdf_volume, coords_volume, mask_volume, threshold)
valid_coords = coords_volume[final_mask] # [N, 3]
valid_feature = feature_volume[final_mask] # [N, C]
valid_coords = torch.cat([torch.ones([valid_coords.shape[0], 1]).to(valid_coords.device) * 0,
valid_coords], dim=1) # [N, 4], append batch idx
# ! if the valid_num is still larger than maximum_pts, sample part of pts
if valid_num > maximum_pts:
valid_num = valid_num.long()
occupancy = torch.ones([valid_num]).to(device) > 0
choice = np.random.choice(valid_num.cpu().numpy(), valid_num.cpu().numpy() - maximum_pts,
replace=False)
ind = torch.nonzero(occupancy).to(device)
occupancy[ind[choice]] = False
valid_coords = valid_coords[occupancy]
valid_feature = valid_feature[occupancy]
print(threshold, "randomly sample to save memory")
return valid_coords, valid_feature
@torch.no_grad()
def get_valid_sparse_coords_by_sdf(self, sdf_volume, coords_volume, mask_volume, feature_volume, threshold=0.02,
maximum_pts=110000):
"""
assume batch size == 1, from the first lod to get sparse voxels
:param sdf_volume: [num_pts, 1]
:param coords_volume: [3, X, Y, Z]
:param mask_volume: [1, X, Y, Z]
:param feature_volume: [C, X, Y, Z]
:param threshold:
:return:
"""
def prune(sdf_volume, mask_volume, threshold):
occupancy_mask = torch.abs(sdf_volume) < threshold # [num_pts, 1]
# - dilate
occupancy_mask = occupancy_mask.float()
occupancy_mask = occupancy_mask.view(1, 1, dX, dY, dZ)
occupancy_mask = F.avg_pool3d(occupancy_mask, kernel_size=7, stride=1, padding=3)
occupancy_mask = occupancy_mask.view(-1, 1) > 0
final_mask = torch.logical_and(mask_volume, occupancy_mask)[:, 0] # [num_pts]
return final_mask, torch.sum(final_mask.float())
C, dX, dY, dZ = feature_volume.shape
coords_volume = coords_volume.permute(1, 2, 3, 0).contiguous().view(-1, 3)
mask_volume = mask_volume.permute(1, 2, 3, 0).contiguous().view(-1, 1)
feature_volume = feature_volume.permute(1, 2, 3, 0).contiguous().view(-1, C)
final_mask, valid_num = prune(sdf_volume, mask_volume, threshold)
while (valid_num > maximum_pts) and (threshold > 0.003):
threshold = threshold - 0.002
final_mask, valid_num = prune(sdf_volume, mask_volume, threshold)
valid_coords = coords_volume[final_mask] # [N, 3]
valid_feature = feature_volume[final_mask] # [N, C]
valid_coords = torch.cat([torch.ones([valid_coords.shape[0], 1]).to(valid_coords.device) * 0,
valid_coords], dim=1) # [N, 4], append batch idx
# ! if the valid_num is still larger than maximum_pts, sample part of pts
if valid_num > maximum_pts:
device = sdf_volume.device
valid_num = valid_num.long()
occupancy = torch.ones([valid_num]).to(device) > 0
choice = np.random.choice(valid_num.cpu().numpy(), valid_num.cpu().numpy() - maximum_pts,
replace=False)
ind = torch.nonzero(occupancy).to(device)
occupancy[ind[choice]] = False
valid_coords = valid_coords[occupancy]
valid_feature = valid_feature[occupancy]
print(threshold, "randomly sample to save memory")
return valid_coords, valid_feature
@torch.no_grad()
def extract_fields(self, bound_min, bound_max, resolution, query_func, device,
# * related to conditional feature
**kwargs
):
N = 64
X = torch.linspace(bound_min[0], bound_max[0], resolution).to(device).split(N)
Y = torch.linspace(bound_min[1], bound_max[1], resolution).to(device).split(N)
Z = torch.linspace(bound_min[2], bound_max[2], resolution).to(device).split(N)
u = np.zeros([resolution, resolution, resolution], dtype=np.float32)
with torch.no_grad():
for xi, xs in enumerate(X):
for yi, ys in enumerate(Y):
for zi, zs in enumerate(Z):
xx, yy, zz = torch.meshgrid(xs, ys, zs, indexing="ij")
pts = torch.cat([xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)], dim=-1)
# ! attention, the query function is different for extract geometry and fields
output = query_func(pts, **kwargs)
sdf = output['sdf_pts_scale%d' % kwargs['lod']].reshape(len(xs), len(ys),
len(zs)).detach().cpu().numpy()
u[xi * N: xi * N + len(xs), yi * N: yi * N + len(ys), zi * N: zi * N + len(zs)] = -1 * sdf
return u
@torch.no_grad()
def extract_geometry(self, sdf_network, bound_min, bound_max, resolution, threshold, device, occupancy_mask=None,
# * 3d feature volume
**kwargs
):
# logging.info('threshold: {}'.format(threshold))
u = self.extract_fields(bound_min, bound_max, resolution,
lambda pts, **kwargs: sdf_network.sdf(pts, **kwargs),
# - sdf need to be multiplied by -1
device,
# * 3d feature volume
**kwargs
)
if occupancy_mask is not None:
dX, dY, dZ = occupancy_mask.shape
empty_mask = 1 - occupancy_mask
empty_mask = empty_mask.view(1, 1, dX, dY, dZ)
# - dilation
# empty_mask = F.avg_pool3d(empty_mask, kernel_size=7, stride=1, padding=3)
empty_mask = F.interpolate(empty_mask, [resolution, resolution, resolution], mode='nearest')
empty_mask = empty_mask.view(resolution, resolution, resolution).cpu().numpy() > 0
u[empty_mask] = -100
del empty_mask
vertices, triangles = mcubes.marching_cubes(u, threshold)
b_max_np = bound_max.detach().cpu().numpy()
b_min_np = bound_min.detach().cpu().numpy()
vertices = vertices / (resolution - 1.0) * (b_max_np - b_min_np)[None, :] + b_min_np[None, :]
return vertices, triangles, u
@torch.no_grad()
def extract_depth_maps(self, sdf_network, con_volume, intrinsics, c2ws, H, W, near, far):
"""
extract depth maps from the density volume
:param con_volume: [1, 1+C, dX, dY, dZ] can by con_volume or sdf_volume
:param c2ws: [B, 4, 4]
:param H:
:param W:
:param near:
:param far:
:return:
"""
device = con_volume.device
batch_size = intrinsics.shape[0]
with torch.no_grad():
ys, xs = torch.meshgrid(torch.linspace(0, H - 1, H),
torch.linspace(0, W - 1, W), indexing="ij") # pytorch's meshgrid has indexing='ij'
p = torch.stack([xs, ys, torch.ones_like(ys)], dim=-1) # H, W, 3
intrinsics_inv = torch.inverse(intrinsics)
p = p.view(-1, 3).float().to(device) # N_rays, 3
p = torch.matmul(intrinsics_inv[:, None, :3, :3], p[:, :, None]).squeeze() # Batch, N_rays, 3
rays_v = p / torch.linalg.norm(p, ord=2, dim=-1, keepdim=True) # Batch, N_rays, 3
rays_v = torch.matmul(c2ws[:, None, :3, :3], rays_v[:, :, :, None]).squeeze() # Batch, N_rays, 3
rays_o = c2ws[:, None, :3, 3].expand(rays_v.shape) # Batch, N_rays, 3
rays_d = rays_v
rays_o = rays_o.contiguous().view(-1, 3)
rays_d = rays_d.contiguous().view(-1, 3)
################## - sphere tracer to extract depth maps ######################
depth_masks_sphere, depth_maps_sphere = self.ray_tracer.extract_depth_maps(
rays_o, rays_d,
near[None, :].repeat(rays_o.shape[0], 1),
far[None, :].repeat(rays_o.shape[0], 1),
sdf_network, con_volume
)
depth_maps = depth_maps_sphere.view(batch_size, 1, H, W)
depth_masks = depth_masks_sphere.view(batch_size, 1, H, W)
depth_maps = torch.where(depth_masks, depth_maps,
torch.zeros_like(depth_masks.float()).to(device)) # fill invalid pixels by 0
return depth_maps, depth_masks
| One-2-3-45/One-2-3-45 | reconstruction/models/sparse_neus_renderer.py | sparse_neus_renderer.py | py | 44,709 | python | en | code | 1,164 | github-code | 36 |
5058919782 |
from django.db import models
# Create your models here.
class Meal(models.Model):
menu = models.TextField(blank=True)
def __str__(self):
return self.menu
class MealList(models.Model):
date = models.DateField(blank=True)
breakfast = models.ForeignKey(
Meal,
blank=True,
null=True,
on_delete=models.CASCADE,
related_name="breakfast_id",
)
lunch = models.ForeignKey(
Meal,
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="lunch"
)
dinner = models.ForeignKey(
Meal,
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="dinner"
)
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
class Meta:
db_table = 'meal_lists'
def update_meal(self, data, number):
if number == 1:
self.breakfast = data
if number == 2:
self.lunch = data
if number == 3:
self.dinner = data
self.save()
return self
| KaceTH/django_api-0.00.90 | MealTable/models.py | models.py | py | 1,139 | python | en | code | 0 | github-code | 36 |
74486754983 | def is_prime(num):
if(num>1):
for i in range(2,num):
if((num % i) != 0):
#number is prime
return 1
else:
return 0
#driver code
num= int(input("\n Enter the positive number: "))
print(is_prime(num))
if(num==1):
print("\n 1 is neither prime nor composite. ")
else:
if(is_prime(num)==1):
print("\n NUMBER IS PRIME.")
else:
print("\n NUMBER IS NOT PRIME. ")
| Charut24/Programming-Paradigms- | Lab Assignment 4_3.py | Lab Assignment 4_3.py | py | 490 | python | en | code | 0 | github-code | 36 |
16098373063 | #!/usr/bin/env python
"""Notepad-- : A very simple text editor."""
import sys
if sys.version_info < (3,):
import Tkinter as tk
import tkFileDialog as filedialog
else:
import tkinter as tk
from tkinter import filedialog
def create_gui(root):
"""Set up the GUI and functionality."""
root.title("Notepad--")
textbox = tk.Text(root)
textbox.pack(expand=True, fill=tk.BOTH)
# Functionality for opening and saving files.
def open_file():
"""Open a file"""
f = filedialog.askopenfile()
if f:
textbox.delete(1.0, tk.END)
textbox.insert(tk.END, f.read())
f.close()
def save_file():
"""Save a file"""
f = filedialog.asksaveasfile()
if f:
f.write(textbox.get(1.0, tk.END)[:-1])
f.close()
# Set up the command buttons
frame = tk.Frame(root)
frame.pack()
tk.Button(frame, text="Open", command=open_file).pack(side=tk.LEFT)
tk.Button(frame, text="Save", command=save_file).pack(side=tk.LEFT)
if __name__ == "__main__":
root = tk.Tk()
create_gui(root)
root.mainloop()
| jgat/notepad-- | notepad--.py | notepad--.py | py | 1,143 | python | en | code | 1 | github-code | 36 |
35090835676 | #!/usr/bin/python
######################################################################################
## Find disk usage under a root directory, examples:
## 1. python du_rootdir.py -d rootDir
## Find disk usage under the root directory "rootDir"
## 2. python du_rootdir.py -d rootDir -r true
## Find disk usage recursively under the root directory "rootDir"
## 3. python du_rootdir.py -d rootDir -r true -l 3
## Find disk usage recursively under the root directory "rootDir" with
## recursive level 3
## Author: Zhichang Guo, email: Zhichang.Guo@noaa.gov
######################################################################################
from pathlib import Path
import argparse
import os
import sys
def print_indented(result, level):
print('\t' * level + result)
def traverse_dir(dir):
iflag = 'not'
l = os.listdir(dir)
for d in l:
if os.path.isdir(dir + d):
if not d.startswith('.') or (d.startswith('.') and not iflag.upper() == 'NOT'):
fullpath = os.path.join(dir, d)
cmd = "du -sh " + " \'" + str(fullpath) + "\'"
# print(cmd)
result = os.popen(cmd).read()
# print(result)
print(result,end='')
def traverse_dir_recur(dir, max_level, level=0):
iflag = 'not'
l = os.listdir(dir)
for d in l:
if os.path.isdir(dir + d):
if not d.startswith('.') or (d.startswith('.') and not iflag.upper() == 'NOT'):
fullpath = os.path.join(dir, d)
if max_level == 0 or level < max_level:
traverse_dir_recur(dir + d + "/", max_level, level+1)
cmd = "du -sh " + " \'" + str(fullpath) + "\'"
# print(cmd)
result = os.popen(cmd).read()
# print_indented(result.rstrip("\n"), level+1)
if level > 0:
print('\033[1;31;43m ' + result.rstrip("\n") + ' \033[0;0m')
else:
print(result.rstrip("\n"))
#print(result)
def find_du(rootdir, rflag, max_level):
home = os.environ['HOME']
path = Path(rootdir)
owner = path.owner()
print("Owner: ",owner)
rootdir += '/'
if rflag == 'false':
traverse_dir(rootdir)
else:
traverse_dir_recur(rootdir, max_level)
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument('-d', '--rootd', help="name of the root directory", required=True)
ap.add_argument('-r', '--rflag', help="recurively or not", default="false")
ap.add_argument('-l', '--level', help="level", type=int, default=0)
MyArgs = ap.parse_args()
find_du(MyArgs.rootd, MyArgs.rflag, MyArgs.level)
| zhichang-guo/Scripts | du_rootdir.py | du_rootdir.py | py | 2,785 | python | en | code | 0 | github-code | 36 |
7595057258 | from os import path
from subprocess import check_output, check_call
from pathlib import Path
def _filename(fname):
import yapl
yapl_root = Path(yapl.__file__).parent
filename = fname + ".sh"
full_filename = yapl_root / filename
return str(full_filename)
def func(fname, *args):
shell_script = _filename(fname)
command = list([shell_script] + [str(a) for a in args])
result = check_output(command, text=True).strip()
return result
def proc(fname, *args):
shell_script = _filename(fname)
command = list([shell_script] + [str(a) for a in args])
check_call(command)
| padresmurfa/yapl | python_library/yapl/internal/shell_call.py | shell_call.py | py | 613 | python | en | code | 0 | github-code | 36 |
4758110469 | import os, random
import numpy as np
import torch
import argparse
from train import train
def init_seeds(seed=0):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def parsing_args(c):
parser = argparse.ArgumentParser(description='msflow')
parser.add_argument('--mode', default='train', type=str,
help='train or test.')
parser.add_argument('--resume', action='store_true', default=False,
help='resume training or not.')
parser.add_argument('--eval_ckpt', default='', type=str,
help='checkpoint path for evaluation.')
parser.add_argument('--gpu', default='0', type=str)
parser.add_argument('--class-name', default='bottle', type=str)
parser.add_argument('--lr', default=1e-4, type=float,
help='learning rate')
parser.add_argument('--batch-size', default=8, type=int,
help='train batch size')
parser.add_argument('--meta-epochs', default=25, type=int,
help='number of meta epochs to train')
parser.add_argument('--sub-epochs', default=4, type=int,
help='number of sub epochs to train')
parser.add_argument('--extractor', default='wide_resnet50_2', type=str,
help='feature extractor')
parser.add_argument('--pool-type', default='avg', type=str,
help='pool type for extracted feature maps')
parser.add_argument('--parallel-blocks', default=[2, 5, 8], type=int, metavar='L', nargs='+',
help='number of flow blocks used in parallel flows.')
parser.add_argument('--pro-eval', action='store_true', default=False,
help='evaluate the pro score or not.')
parser.add_argument('--pro-eval-interval', default=4, type=int,
help='interval for pro evaluation.')
args = parser.parse_args()
for k, v in vars(args).items():
setattr(c, k, v)
c.input_size = (256, 256) if c.class_name == 'transistor' else (512, 512)
return c
def main(c):
c = parsing_args(c)
os.environ['CUDA_VISIBLE_DEVICES'] = c.gpu
init_seeds(seed=c.seed)
c.version_name = 'msflow_{}_{}pool_pl{}'.format(c.extractor, c.pool_type, "".join([str(x) for x in c.parallel_blocks]))
c.ckpt_dir = os.path.join(c.work_dir, c.version_name, c.class_name)
train(c)
if __name__ == '__main__':
import default as c
main(c) | cool-xuan/msflow | main.py | main.py | py | 2,580 | python | en | code | 15 | github-code | 36 |
73202366184 | import os, sys
from langchain.llms import OpenAI
from langchain.chains.question_answering import load_qa_chain
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
prefix, query = sys.argv[1:3]
api_key = os.environ.get('OPENAI_API_KEY')
if api_key is None:
sys.exit("OPENAI_API_KEY is unset")
embeddings = OpenAIEmbeddings(openai_api_key=api_key)
llm = OpenAI(temperature=0.7, openai_api_key=api_key)
qa_chain = load_qa_chain(llm, chain_type="stuff")
fdir = os.path.dirname(prefix)
fname = os.path.basename(prefix)
faiss_index = FAISS.load_local(fdir, embeddings, fname)
# This model's maximum context length is 4097 tokens, 256 for the completion
ss = faiss_index.similarity_search(query.strip(), k=5)
ans = qa_chain.run(input_documents=ss, question=query)
print(ans.strip())
| d2jvkpn/x-ai | pkg/langchain/langchain_query.py | langchain_query.py | py | 833 | python | en | code | 0 | github-code | 36 |
9008219006 | print('Задача 1. Результаты')
import os
import random
# Одному программисту дали задачу для обработки неких результатов тестирования
# двух групп людей. Файл первой группы (group_1.txt) находится в папке task,
# файл второй группы (group_2.txt) — в папке Additional_info.
# На экран нужно было вывести сумму очков первой группы, затем разность очков
# опять же первой группы и напоследок — произведение очков уже второй группы.
# Программист оказался не очень опытным, писал код наобум и даже не стал его
# проверять. И оказалось, этот код просто не работает.
file = open('C:\\learn_work\\task\\group_1.txt', 'r', encoding='utf-8')
summa = 0
diff = 0
for i_line in file:
info = i_line.split()
summa += int(info[2])
diff -= int(info[2])
print('Сумма очков первой группы:', summa)
print('Разность очков первой группы:', diff)
file.close()
file_2 = open('C:\\learn_work\\task\\Additional_info\\group_2.txt', 'r', encoding='utf-8')
compose = 1
for i_line in file_2:
info = i_line.split()
compose *= int(info[2])
print('Произведение очков второй группы:', compose)
file_2.close()
| SertFly/Python_Basic_Training | Module_22 (Module_9)/Task_9_3_1.py | Task_9_3_1.py | py | 1,562 | python | ru | code | 0 | github-code | 36 |
34620661332 | from turtle import Turtle
import random
COLORS = ["red", "orange", "yellow", "green", "blue", "purple"]
BORDER = "black"
STARTING_MOVE_DISTANCE = 5
MOVE_INCREMENT = 5
class CarManager:
def __init__(self):
self.all_cars = []
self.initial_speed = STARTING_MOVE_DISTANCE
def createCar(self):
chance = random.randint(1, 4) # To create the cars less frequently so that there is space for turtle movement
if chance == 1:
new_car = Turtle()
new_car.speed("fastest")
new_car.penup()
new_car.shape("square")
new_car.shapesize(stretch_wid=1, stretch_len=2)
new_car.color(BORDER, random.choice(COLORS))
random_y = random.randint(-230, 230)
new_car.goto(300, random_y)
self.all_cars.append(new_car)
def moveCars(self):
for car in self.all_cars:
car.backward(self.initial_speed)
def fasterCars(self):
for car in self.all_cars:
# This is to delete all the cars from a certain level after level is passed
car.hideturtle()
car.goto(-350, car.ycor())
del car
self.initial_speed += MOVE_INCREMENT
| bose-aritra2003/Turtle-Crossing | car_manager.py | car_manager.py | py | 1,226 | python | en | code | 1 | github-code | 36 |
26546120550 | # Пример кода 1. Файл test_add_house
#Асбстрактный сервис по созданию сущности дома и квартир в нем (для ТСЖ)
from model.new_house import New_house
from model.rooms import Rooms
import time
def test_add_house(app):
app.new_house.fill_info(New_house(district="Железнодорожный", street="Тестовая", house_number="1",
postal_index="630132", tech_status="90",
maintenance_date="01.01.1992", build_date="01.01.1991",
surrounding_area="500", roof_area="250", attic_area="250",
cellar_area="250", stairs_area="300"))
time.sleep(3)
app.rooms.fill_flats(Rooms(entrance="1", room_number="1", floor_number="1",
total_area="45", living_area="28"))
# Пример кода 2. Файл edit_house
from model.new_house import New_house
def test_modify_house(app):
app.new_house.modify_first_house(New_house(district="Ленинский", street="ТестоваяИзмененная", house_number="2",
postal_index="630054", tech_status="95", surrounding_area="600",
roof_area="350", attic_area="350", cellar_area="350", stairs_area="200"))
| NickVolzh/tasks2 | task_review_service.py | task_review_service.py | py | 1,415 | python | ru | code | 0 | github-code | 36 |
74731484265 | import torch
from torch.optim.optimizer import Optimizer
class COCOB(Optimizer):
r"""Implements COCOB algorithm.
It has been proposed in `Training Deep Networks without Learning Rates Through Coin Betting`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
alpha (float, optional): It was proposed to increase the stability in the first iterations,
similarly and independently to the learning rate warm-up. The number roughly denotes the
number of rounds of warm-up (default 100)
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
.. _Training Deep Networks without Learning Rates Through Coin Betting:
https://arxiv.org/abs/1705.07795
"""
def __init__(self, params, alpha: float = 100, eps: float = 1e-8, weight_decay: float = 0):
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= alpha:
raise ValueError("Invalid alpha value: {}".format(alpha))
if not 0.0 <= weight_decay:
raise ValueError(
"Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(weight_decay=weight_decay)
self._alpha = alpha
self._eps = eps
super(COCOB, self).__init__(params, defaults)
@torch.no_grad()
def step(self, closure = None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise RuntimeError('COCOB does not support sparse gradients')
state = self.state[p]
# State initialization
if len(state) == 0:
# Sum of the negative gradients
state['sum_negative_gradients'] = torch.zeros_like(p).detach()
# Sum of the absolute values of the stochastic subgradients
state['grad_norm_sum'] = torch.zeros_like(p).detach()
# Maximum observed scale
state['L'] = self._eps*torch.ones_like(p).detach()
# Reward/wealth of the algorithm for each coordinate
state['reward'] = torch.zeros_like(p).detach()
# We need to save the initial point because this is a FTRL-based algorithm
state['x0'] = torch.clone(p.data).detach()
sum_negative_gradients, grad_norm_sum, L, reward, x0 = (
state['sum_negative_gradients'],
state['grad_norm_sum'],
state['L'],
state['reward'],
state['x0'],
)
if group['weight_decay'] != 0:
grad = grad.add(p, alpha=group['weight_decay'])
# update maximum rage of the gradients
torch.max(L, torch.abs(grad), out=L)
# udpate dual vector
sum_negative_gradients.sub_(grad)
# update sum of the absolute values of the gradients
grad_norm_sum.add_(torch.abs(grad))
# update the wealth
reward.addcmul_(grad, p.data.sub(x0), value=-1)
# reset the wealth to zero in case we lost all
torch.maximum(reward, torch.zeros_like(reward), out=reward)
# calculate denominator
den = torch.maximum(grad_norm_sum.add(L), L.mul(self._alpha)).mul(L)
# update model parameters
p.data.copy_(reward.add(L).mul(sum_negative_gradients).div(den).add(x0))
return loss
| bremen79/parameterfree | parameterfree/cocob.py | cocob.py | py | 4,274 | python | en | code | 73 | github-code | 36 |
26246312144 | import serial
from sense_hat import SenseHat
from socket import gethostname
from xbee import XBee
from statistics import median
def clear_matrix():
sense.clear()
def show_hostname():
hostname = gethostname()
sense.show_message("Hostname: " + hostname)
def receive_data(data):
print("received data: ", data)
if (data["rf_data"] == b"ping"):
# Received ping? -> Send pong back
send_data("pong")
elif (data["rf_data"] == b"pong"):
# Received pong? -> Store & calc RSSI
rssi_list.append(ord(data["rssi"]))
print_current_rssi_median()
if data["source_addr"] not in rssi_list:
rssi_dict[data["source_addr"]] = []
rssi_dict[data["source_addr"]].append(ord(data["rssi"]))
def send_data(data, dest_addr="\x00\x0A"):
xbee.send("tx",
frame_id="\x00",
dest_addr=dest_addr,
data=data)
def init_rssi_calc(n_pings=10):
rssi_list = []
dest_addr = "\x00\x0A" # 2byte hex value (TODO: Set according to adress of destination XBee module)
for i in range(n_pings):
send_data("ping", dest_addr)
def print_current_rssi_median():
med = median(rssi_list)
print("Current RSSI median with {} received pongs: {}".format(len(rssi_list), med))
dist = dist_from_rssi(med)
print("Current dist from RSSI: dist = {}".format(dist))
def dist_from_rssi(rssi):
n = 2.8 # path loss variable from 2 to 4
A = 33 # TODO: Measure reference RSSI (1m distance)
# RSSI = -10 * n * log_10(d) + A
# => Transformed to
# d = 10^(A/10n)
dist = 10**(A/10*n)
return dist
def three_anchor_bbox():
dist_dict = {}
for anchor in rssi_dict:
dist_dict[anchor] = dist_from_rssi(median(rssi_dict[anchor]))
x = 1/2 * (min([anchor_positions[anchor][0] + dist_dict[anchor] for anchor in anchor_positions]) + \
max([anchor_positions[anchor][0] - dist_dict[anchor] for anchor in anchor_positions]))
y = 1/2 * (min([anchor_positions[anchor][1] + dist_dict[anchor] for anchor in anchor_positions]) + \
max([anchor_positions[anchor][1] - dist_dict[anchor] for anchor in anchor_positions]))
print(">> BBox calculation done: X: {} | Y: {}".format(x,y))
def three_anchor_multilat():
print("... Starting multilateration.")
dist_dict = {}
for anchor in rssi_dict:
dist_dict[anchor] = dist_from_rssi(median(rssi_dict[anchor]))
# https://github.com/kamalshadi/Localization
import localization as lx
P=lx.Project(mode='2D',solver='LSE')
print("... adding anchors")
for anchor in anchor_positions:
P.add_anchor(anchor, anchor_positions[anchor])
t,label = P.add_target()
print("... adding measurements")
for dist in dist_dict:
P.add_measure(dist, dist_dict[dist])
print("... calculating...")
P.solve()
print("> Done! Multilat result:", t.loc)
def broadcast_ping():
rssi_dict = {}
# Broadcast ping
send_data("ping", "\xFF\xFF")
if __name__ == "__main__":
sense = SenseHat()
print(">> Opening serial port...")
ser = serial.Serial("/dev/ttyUSB1", 9600)
xbee = XBee(ser, callback=receive_data)
rssi_list = []
rssi_dict = {}
## TODO: Fill anchor position dictionary!
anchor_positions = {
"add1": (0, 1),
"add2": (0, 3),
"add3": (0, 4)
}
print(">> Waiting for events...")
print("Middle: clear_matrix, left: init_rssi_calc, right: three_anchor_bbox, down: broadcast_ping")
print("Sequence: broadcast_ping -> three_anchor_bbox / three_anchor_multilat")
while True:
for event in sense.stick.get_events():
if event.action == "pressed":
if event.direction == "middle":
print("** Event: Pressed.")
clear_matrix()
elif event.direction == "left":
print("** Event: Left")
init_rssi_calc()
elif event.direction == "right":
print("** Event: Right.")
three_anchor_bbox()
elif event.direction == "down":
print("** Event: Down")
broadcast_ping()
elif event.direction == "up":
print("** Event: up")
three_anchor_multilat()
| tristndev/UzL_DSN | Tutorial 4/ex04_01_RSSI_to_distance.py | ex04_01_RSSI_to_distance.py | py | 4,390 | python | en | code | 1 | github-code | 36 |
11624227934 | import re
import sys
from napalm import get_network_driver
from getpass import getpass
def pretty_print(d, indent=0):
for key, value in d.items():
print('\t' * indent + str(key))
if isinstance(value, dict):
pretty_print(value, indent+1)
elif isinstance(value, list):
for e in value:
print('\t' * (indent+1) + str(e))
else:
print('\t' * (indent+1) + str(value))
def build_dict(cfg):
""" Builds nested/deep dictionary from Cisco ios
config using recursion function 'child()', which also
changes "the most child" dictionaries to list if possible.
For global Cisco commands make special keys based on
first word in the command, e.g.: '# aaa #'
"""
def group_global_childless(dct):
for k in list(dct):
if not dct[k]:
dct.pop(k,None)
w = k.split()
if w[0] == 'no':
sec_name = f"# {w[1]} #"
else:
sec_name = f"# {w[0]} #"
if sec_name in dct.keys():
dct[sec_name].append(k)
else:
dct.update({sec_name: [k]})
def child(base_indent):
nonlocal n
result = {}
while True:
if n >= len(lines):
break
stripped = lines[n].lstrip()
indent = len(lines[n]) - len(stripped)
if base_indent >= indent:
break
n = n + 1
result.update({stripped: child(indent)})
# In case we got all values={} transform result to list
if not [v for v in result.values() if v]:
result = [k for k in result.keys()]
return result
n = 0
cfg, special_cases = cut_special_cases(cfg)
lines = cfg.splitlines()
lines = [line for line in lines if line
and not line.startswith('!')
and not line.startswith('end')]
dct = child(base_indent=-1)
dct.update(special_cases)
group_global_childless(dct)
return(dct)
def cut_special_cases(cfg):
""" Cut special cases (banners, boot markers) from config and
put them in special_cases dictionary, that is also returned
"""
special_cases = {}
rgx = r"((?:(?P<type>(?:set\s+)*banner\s\w+\s+)(?P<delim>\S+))((.*\r?\n)+?.*?)(\3).*)"
re_banners = re.findall(rgx,cfg)
for r in re_banners:
cfg = cfg.replace(r[0],"",1)
special_cases.update({f"# {r[1]}#": r[0].splitlines()})
rgx = r"boot-start-marker\r?\n(.*\r?\n)*boot-end-marker"
re_boot = re.search(rgx,cfg)
cfg = cfg.replace(re_boot[0],"",1)
special_cases.update({"# boot #": re_boot[0].splitlines()})
return cfg, special_cases
def main():
""" Reads config from file passed as argument. Or connect
to Cisco ios device asking interactively ip, user, password
Then prints result with indentation
"""
if len(sys.argv) >= 1:
file_name = sys.argv[1]
fp = open(file_name)
content = fp.read()
dct = build_dict(content)
else:
driver = get_network_driver('ios')
ipaddress = input('IP address: ')
username = input('Username: ')
ios_conn = driver(ipaddress, username, getpass())
ios_conn.open()
cfgs = ios_conn.get_config()
dct = build_dict(cfgs['running'])
pretty_print(dct,1)
if __name__ == "__main__":
main()
| pkomissarov/cisco-parsers | parsecfg.py | parsecfg.py | py | 3,480 | python | en | code | 0 | github-code | 36 |
73157619305 | from distutils.core import setup
from os.path import isdir
from itertools import product
all_packages = ['airborne_car_simulator']
packages = list(filter(isdir, all_packages))
setup(
name='ese615',
packages=packages,
version='0.1',
install_requires=[
'pyyaml',
'opencv-python',
'matplotlib == 3.7.1',
'numpy',
'scipy'])
| ngurnard/f1tenth_pitch_control | simulator/airborne_car_simulator/setup.py | setup.py | py | 398 | python | en | code | 0 | github-code | 36 |
16015771292 | import heapq
from sys import stdin
n = int(stdin.readline())
# print(f'n: {n}')
arr = list(map(int,stdin.readline().split()))
# print(f'arr: {arr}')
heap, answer = [], 0
for _ in range(n):
num = arr[_]
# print(f'num: {num}')
heapq.heappush(heap, num)
# print(f'heap: {heap}')
# print(len(heap))
# if len(heap) == n:
while len(heap) != 1:
a = heapq.heappop(heap)
b = heapq.heappop(heap)
# print(f'a, b: {a, b}')
heapq.heappush(heap, a + b)
# print(f'heap: {heap}')
answer += a * b
print(answer) | HiImConan/algorithm-study | 백준/Silver/14241. 슬라임 합치기/슬라임 합치기.py | 슬라임 합치기.py | py | 518 | python | en | code | 0 | github-code | 36 |
26825630541 | #!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
import argparse, os, sys, glob, time
from tqdm import tqdm
from skimage.transform import resize
import cPickle
from keras.layers import average
from keras.models import load_model, Model
from training_utils import scale_data, compute_time_series
import PlotCand_dom
from waterfaller import filterbank, waterfall
"""After taking in a directory of .fil files and a model,
outputs probabilities that the files contain an FRB. Also
returns the files that have FRBs in them, and optionally
saves those filenames to some specified document."""
# used for reading in h5 files
os.environ['HDF5_USE_FILE_LOCKING'] = 'FALSE'
def extract_candidates(fil_file, frb_cands, frbcand_path, NCHAN, NTIME, save_png=False):
# load filterbank file and candidate list
f = PlotCand_dom.FilReader(fil_file)
# other parameters
noplot = 1
nchan = f.header['nchans']
fch1 = f.header['fch1']
foff = f.header['foff']
fl = fch1 + (foff*nchan)
fh = fch1
tint = f.header['tsamp']
Ttot = f.header['tobs']
kill_time_range, kill_chans = [], []
source_name = f.header['source_name']
mask_file, smooth, zerodm, csv_file = [], [], [], [] # last arguments are missing
PlotCand_dom.extractPlotCand(fil_file, frb_cands, noplot, fl, fh, tint, Ttot, kill_time_range,
kill_chans, source_name, nchan, NCHAN, NTIME, mask_file, smooth,
zerodm, csv_file, save_png, frbcand_path)
def save_prob_to_disk(frb_info, pred, fname):
"""Given the original FRB candidate info and predictions
for each candidate, save candidate info and prediction probabilities
to disk in the same directory as the original .txt file."""
assert pred.size == frb_info.size, \
"Number of predictions don't match number of candidates ({0} vs. {1})".format(len(pred), len(frb_info))
if frb_info.size == 1: # length-1 structured arrays are buggy
frb_info = frb_info.reshape(1)
# sort original FRBcand file by largest SNR to be consistent with prediction order
frb_info[::-1].sort(order='snr')
# create new array to hold candidate data and probabilities
new_dt = np.dtype(frb_info.dtype.descr + [('frb_prob', 'f4')])
previous_names = ['snr','time','samp_idx','dm','filter','prim_beam']
FRBcand_with_probs = np.zeros(frb_info.shape, dtype=new_dt)
# populate new array with candidate data and predicted probabilities
FRBcand_with_probs[previous_names] = frb_info[previous_names]
FRBcand_with_probs['frb_prob'] = pred
# re-sort by sample index
FRBcand_with_probs.sort(order='samp_idx')
np.savetxt(fname, FRBcand_with_probs, fmt='%-12s')
def get_pulses(dir_spectra, num_channels, keep_spectra=False):
"""Imports *ALL SPECTRA* in given directory and appends them to one list.
Spectra are assumed to be in .pickle files which are subsequently deleted
after being imported."""
# get all pickled Spectra and prepare array to hold them in memory
pickled_spectra = np.sort(glob.glob('{}/*sec_DM*.pickle'.format(dir_spectra)))
print('Spectra found at {}'.format(pickled_spectra))
candidate_spectra = []
# add each Spectra to array
for spec_file in tqdm(pickled_spectra):
with open(spec_file, 'rb') as f:
spectra_obj = cPickle.load(f)
# print("File {0} has shape {1}".format(spec_file, spectra_obj.data.shape))
# resize image to correct size for neural network prediction
spectra_obj.data = resize(spectra_obj.data, (num_channels, 256), mode='symmetric', anti_aliasing=False)
candidate_spectra.append(spectra_obj)
# remove all pickle files matching this format
if not keep_spectra:
os.system('rm {}/*sec_DM*.pickle'.format(dir_spectra))
return pickled_spectra, np.array(candidate_spectra)
def create_ensemble(model_names):
"""Create ensemble of Keras models. The predictions from each model
are averaged to get one final probability for each test example. This
reduces variance, assuming each of the models tests a different hypothesis,
i.e. each of the models is not exactly the same."""
individual_outputs = []
for name in model_names:
m = load_model(name, compile=True)
# get prediction outputs from each model
individual_outputs.append(m.outputs[0])
# average all predictions
ensemble_out = average(individual_outputs)
# construct ensemble model with old inputs and averaged outputs
ensemble_model = Model(inputs=m.inputs, outputs=ensemble_out)
return ensemble_model
if __name__ == "__main__":
"""
Parameters
---------------
model_name: str
Path to trained model used to make prediction. Should be .h5 file
frb_cand_path: str
Path to .txt file that contains data about pulses within filterbank file. This
file should contain columns 'snr','time','samp_idx','dm','filter', and'prim_beam'.
filterbank_candidate: str
Path to candidate file to be predicted. Should be .fil file
NCHAN: int, optional
Number of frequency channels (default 64) to resize psrchive files to.
no-FRBcandprob: flag, optional
Whether or not to save edited FRBcand file containing pulse probabilities.
FRBcandprob: str, optional
Path to save FRBcandprob.txt (default is same path as frb_cand_path)
save_top_candidates: str, optional
Filename to save pre-processed candidates, just before they are thrown into CNN.
save_predicted_FRBs: str, optional
Filename to save every candidate predicted to contain an FRB.
"""
# Read command line arguments
parser = argparse.ArgumentParser()
# main arguments needed for prediction
parser.add_argument('frb_cand_path', type=str, help='Path to .txt file containing data about pulses.')
parser.add_argument('model_names', nargs='+', type=str,
help='Path to trained models used to make prediction. If multiple are given, use all to ensemble.')
parser.add_argument('-f', '--fil_file', dest='filterbank_candidate', type=str, required='--skip_extract' not in sys.argv,
help='Path to filterbank file with candidates to be predicted.')
# can set if pickle files are already in directory to avoid having to redo extraction
parser.add_argument('--skip_extract', action='store_true',
help='Whether to directly predict pickled spectra found in same dir as frb_cand_path.')
parser.add_argument('--NCHAN', type=int, default=64, help='Number of frequency channels to use from filterbank files.')
parser.add_argument('--NTIME', type=int, default=256, help='Number of time bins from filterbank files.')
parser.add_argument('--thresh', type=float, default=0.5, help='Threshold probability to admit whether example is FRB or RFI.')
parser.add_argument('--no-FRBcandprob', dest='suppress_prob_save', action='store_true',
help='Chooses not to save the FRBcand .txt file along with candidate probabilities.')
parser.add_argument('--keep_spectra', dest='keep_spectra', action='store_true',
help='Keep spectra pickle files after creating and using them. Default is to delete.')
parser.add_argument('--FRBcandprob', type=str, default=None,
help='Directory to save new FRBcand file with probabilities (default is same dir as frb_cand_path)')
parser.add_argument('--save_predicted_FRBs', type=str, default=None, help='Filename to save all candidates.')
parser.add_argument('--save_top_candidates', type=str, default=None, help='Filename to save plot of top 5 candidates.')
args = parser.parse_args()
parser.set_defaults(skip_extract=False, suppress_prob_save=False, keep_spectra=False)
# load file path
filterbank_candidate = args.filterbank_candidate
frb_cand_path = args.frb_cand_path
NCHAN = args.NCHAN
NTIME = args.NTIME
model_names = args.model_names # either single model or list of models to ensemble predict
frb_cand_info = np.loadtxt(frb_cand_path, dtype={'names': ('snr','time','samp_idx','dm','filter','prim_beam'),
'formats': ('f4', 'f4', 'i4','f4','i4','i4')})
if args.skip_extract is False:
print("Getting data about FRB candidates from " + frb_cand_path)
extract_candidates(filterbank_candidate, frb_cand_info, frb_cand_path, NCHAN, NTIME)
time.sleep(10) # give some leeway for extraction in background to finish
print("Retrieving candidate spectra")
spectra_paths, candidate_spectra = get_pulses(os.path.dirname(frb_cand_path), NCHAN, keep_spectra=args.keep_spectra)
# retrieve freq-time data from each spectra
ftdata = np.array([spec.data for spec in candidate_spectra])
# compute time series for every spectrogram in ftdata
print('Getting time series for each sample...'),
time_series = compute_time_series(ftdata)
print('All time series computed!\n')
# scale each channel to zero median and each array to unit stddev
print("\nScaling arrays."),
scale_data(ftdata)
print("Done scaling!")
# add num_channel dimension to vectors for Keras
ftdata = ftdata[..., None]
time_series = time_series[..., None]
# load model(s) and predict
if len(model_names) == 1:
model = load_model(model_names[0], compile=True)
else:
model = create_ensemble(model_names)
predictions = model.predict([ftdata, time_series], verbose=1)[:, 0]
print(predictions)
# save probabilities to disk along with candidate data
if not args.suppress_prob_save:
if not args.FRBcandprob:
FRBcand_prob_path = os.path.dirname(frb_cand_path) + '/FRBcand_prob.txt'
else:
FRBcand_prob_path = args.FRBcandprob + '/FRBcand_prob.txt'
print("Saving probabilities to {0}".format(FRBcand_prob_path))
save_prob_to_disk(frb_cand_info, predictions, FRBcand_prob_path)
# threshold predictions to choose FRB/RFI
voted_FRB_probs = predictions > args.thresh
# get paths to predicted FRBs and their probabilities
frb_filenames = spectra_paths[voted_FRB_probs]
predicted_frbs = candidate_spectra[voted_FRB_probs]
frb_probs = predictions[voted_FRB_probs]
# save all predicted FRBs to PDF, where each page contains spectrogram and 1D signal
if args.save_predicted_FRBs:
from matplotlib.backends.backend_pdf import PdfPages
print('Saving all predicted FRBs to {}.pdf'.format(args.save_predicted_FRBs))
with PdfPages(args.save_predicted_FRBs + '.pdf') as pdf:
for spec, prob, name in tqdm(zip(predicted_frbs, frb_probs, frb_filenames), total=len(predicted_frbs)):
frb_name = os.path.basename(name)
fig, ax = plt.subplots(nrows=2, ncols=1, figsize=(8, 6))
signal = np.sum(spec.data, axis=0) # 1D time series of array
# plot spectrogram on top and signal below it
ax[0].imshow(spec.data, extent=[spec.starttime, spec.starttime + len(signal)*spec.dt,
np.min(spec.freqs), np.max(spec.freqs)], origin='upper', aspect='auto')
ax[0].set(xlabel='time (s)', ylabel='freq (MHz)', title='{0}\nConfidence: {1}'.format(frb_name, prob))
ax[1].plot(np.linspace(spec.starttime, spec.starttime + len(signal)*spec.dt, len(signal)), signal)
ax[1].set(xlabel='time (s)', ylabel='flux (Janksy)')
pdf.savefig()
plt.close(fig)
# save the best 5 candidates to disk along with 1D signal
if args.save_top_candidates:
print("Saving top 5 candidates to {0}".format(args.save_top_candidates))
# sort probabilities high --> low to get top candidates in order
sorted_predictions = np.argsort(-predictions)
top_pred_spectra = candidate_spectra[sorted_predictions]
probabilities = predictions[sorted_predictions]
fig, ax_pred = plt.subplots(nrows=5, ncols=2, figsize=(14, 12))
for spec, prob, ax in zip(top_pred_spectra[:5], probabilities[:5], ax_pred):
signal = np.sum(spec.data, axis=0) # 1D time series of array
# plot spectrogram on left and signal on right
ax[0].imshow(spec.data, extent=[spec.starttime, spec.starttime + len(signal)*spec.dt,
np.min(spec.freqs), np.max(spec.freqs)], origin='upper', aspect='auto')
ax[0].set(xlabel='time (s)', ylabel='freq (MHz)', title='Confidence: {}'.format(prob))
ax[1].plot(np.linspace(spec.starttime, spec.starttime + len(signal)*spec.dt, len(signal)), signal)
ax[1].set(xlabel='time (s)', ylabel='flux (Janksy)')
fig.suptitle('Top 5 Predicted FRBs')
fig.tight_layout(rect=[0, 0.02, 1, 0.95])
fig.show()
fig.savefig(args.save_top_candidates, dpi=300)
print('Number of FRBs: {} / {} candidates'.format(np.sum(voted_FRB_probs), len(voted_FRB_probs)))
| DominicL3/hey-aliens | simulateFRBclassification/predict.py | predict.py | py | 13,210 | python | en | code | 6 | github-code | 36 |
1313517117 | from __future__ import print_function
import argparse
import keras
from data_utils import load_data
from sklearn.model_selection import train_test_split
from model import vgg16
from hyperspace import hyperdrive
num_classes = 10
batch_size = 32
epochs = 5
# The data, shuffled and split between train and test sets:
(x_train, y_train), (x_test, y_test) = load_data()
# Further split to create validation set
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=x_test.shape[0],
shuffle=True, random_state=0)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_val = keras.utils.to_categorical(y_val, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
x_train = x_train.astype('float32')
x_val = x_val.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_val /= 255
x_test /= 255
def objective(params):
"""
Objective function to be minimized.
Parameters
----------
`params` [list]
Hyperparameters to be set in optimization iteration.
- Managed by hyperdrive.
"""
kernel1 = int(params[0])
kernel2 = int(params[1])
# kernel3 = int(params[2])
# kernel4 = int(params[3])
# kernel5 = int(params[4])
# kernel6 = int(params[5])
# batch_size = int(params[6])
# model = vgg16(kernel1=kernel1, kernel2=kernel2, kernel3=kernel3,
# kernel4=kernel4, kernel5=kernel5, kernel6=kernel6)
model = vgg16(kernel1=kernel1, kernel2=kernel2)
model.compile(optimizer=keras.optimizers.rmsprop(lr=0.0001, decay=1e-6),
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_val, y_val),
shuffle=True)
# Score trained model.
scores = model.evaluate(x_val, y_val, verbose=1)
print('Validation loss:', scores[0])
print('Validation accuracy:', scores[1])
return scores[0]
def main():
parser = argparse.ArgumentParser(description='Setup experiment.')
parser.add_argument('--results_dir', type=str, help='Path to results directory.')
args = parser.parse_args()
hparams = [(2, 8), # kernel1
(2, 8)] # kernel2
# (2, 8), # kernel3
# (2, 8), # kernel4
# (2, 8), # kernel5
# (2, 8), # kernel6
# (32, 64)] # batch_size
hyperdrive(objective=objective,
hyperparameters=hparams,
results_path=args.results_dir,
model="GP",
n_iterations=11,
verbose=True,
random_state=0)
if __name__ == '__main__':
main()
| yngtodd/vgg_hyper | vgg_hyper/main.py | main.py | py | 2,878 | python | en | code | 0 | github-code | 36 |
43102871488 | import playment
client = playment.Client("your-x-api-key-here")
frames = [
"https://example.com/image_url_1",
"https://example.com/image_url_2",
"https://example.com/image_url_3"
]
"""
Create sensor_data variable
"""
sensor_data = playment.SensorData()
"""
Defining Sensor: Contain details of sensor
:param _id: This is the sensor's id.
:param name: Name of the sensor.
:param primary_view: Only one of the sensor can have primary_view as true.
:param state(optional): If you want this sensor not to be annotated, provide state as non_editable. Default is editable.
"""
sensor = playment.Sensor(_id="right", name="right", primary_view=True)
"""
Adding Sensor
"""
sensor_data.add_sensor(sensor=sensor)
"""
Preparing Frame Data
"""
for i in range(len(frames)):
# Preparing a sensor frame object with with sensor frame url and sensor_id
sensor_frame_object = playment.SensorFrameObject(frames[i], sensor.id)
# Preparing a frame with every sensor frame object
frame = playment.Frame(str(i), [sensor_frame_object])
# Adding the frame in sensor data
sensor_data.add_frame(frame=frame)
"""
Creating a job with sensor data
:param reference_id: This will be unique for every job in a given project.
:param tag: This will be provided by Playment and will only take one type of data. For e.g. ImageData or SensorData.
:param data: This is the data you are sending to Playment.
:param batch_id: This is an optional argument which will associate the job to the given batch if its left as none,
the job will be associated with the default batch. It is recommended to create a batch for a set of flus.
:param priority_weight(optional): Range of priority weight is [1,10] and integers only. 10 is the highest priority.
Default is 5.
"""
try:
job = client.create_job(reference_id="54", tag='sensor_fusion',
data=sensor_data, project_id="project_id")
except playment.PlaymentException as e:
print(e.code, e.message, e.data)
| crowdflux/playment-sdk-python | examples/video_job_creation.py | video_job_creation.py | py | 2,037 | python | en | code | 0 | github-code | 36 |
31310132227 | """Extract KML data into DataFrame."""
from typing import Dict, Sequence, Union
import numpy as np
import pandas as pd
from pykml import parser
from pykml.factory import KML_ElementMaker as KML
NS = {"t": "http://www.opengis.net/kml/2.2"}
def read_kml(filepath: str) -> KML.kml:
"""Read a KML file.
Parameters
----------
filepath : str
Path to the file to read.
Returns
-------
KML.kml
Root of a KML document.
"""
with open(filepath, "rb") as f:
root = parser.parse(f).getroot()
return root
def get_doc(root: KML.kml) -> KML.Document:
"""Get the document of a KML file.
Parameters
----------
root : KML.kml
Root of a KML document.
Returns
-------
KML.Document
Document of a KML file.
"""
doc = root.xpath("./t:Document", namespaces=NS)[0]
return doc
def get_folders(doc: KML.Document) -> KML.Folder:
"""Yield folder object children in a KML node.
Parameters
----------
doc : KML.Document
A KML node.
Yields
------
KML.Folder
A KML Folder object.
"""
for folder in doc.xpath("./t:Folder", namespaces=NS):
yield folder
def get_tree(doc: KML.Document) -> dict:
"""Return a dictionary with the data of a KML.Document.
Parameters
----------
doc : KML.Document
A KML node with data.
Returns
-------
dict
Data of a KML.Document.
"""
folders = {folder.name: folder for folder in get_folders(doc)}
for folder_name in folders:
subfolders = get_tree(folders[folder_name])
if len(subfolders) > 0:
folders[folder_name] = subfolders
placemarks = list(get_placemarks(doc))
if placemarks:
folders["placemarks"] = placemarks
return folders
def get_placemarks(doc: KML.Document) -> KML.Placemark:
"""Yield placemark object children in a KML node.
Parameters
----------
doc : KML.Document
A KML node.
Yields
------
KML.Placemark
A KML Placemark object.
"""
for folder in doc.xpath("./t:Placemark", namespaces=NS):
yield folder
def get_SimpleData(placemark: KML.Placemark) -> Dict[str, str]:
"""Return data from SimpleData KML fields in a placemark.
Parameters
----------
placemark : KML.Placemark
A Placemark object.
Returns
-------
dict
A dictionary with the data from placemark.
"""
data = {
simpledata.attrib.get("name"): simpledata.text
for simpledata in placemark.xpath(".//t:SimpleData", namespaces=NS)
}
return data
def get_description(placemark: KML.Placemark) -> str:
"""Return string with description from a placemark.
Parameters
----------
placemark : KML.Placemark
A Placemark object.
Returns
-------
str
String representing the Placemark description.
"""
description = placemark.xpath(".//t:description", namespaces=NS)
return "\n---\n".join(str(d) for d in description)
def get_coordinates(placemark: KML.Placemark) -> Dict[str, float]:
"""Return dict with coordinates of Placemark.
Parameters
----------
placemark : KML.Placemark
A KML Placemark with coordinates to get.
Returns
-------
Dict[str, float]
A dictionary with the coordinates of the Placemark.
"""
if hasattr(placemark, "Point"):
if hasattr(placemark.Point, "coordinates"):
lon, lat, alt = placemark.Point.coordinates.text.split(",")
return {
"Latitude": float(lat),
"Longitude": float(lon),
"Altitude": float(alt),
}
return {
"Latitude": np.nan,
"Longitude": np.nan,
"Altitude": np.nan,
}
def get_placemarks_data(
placemarks: Sequence[KML.Placemark]
) -> Dict[str, Union[str, float]]:
"""Get data from a sequence of placemarks.
Parameters
----------
placemarks : Sequence[KML.Placemark]
A list or tuple of placemarks to get its data.
Yields
------
dict
A dict with the data of placemarks.
"""
for placemark in placemarks:
yield dict(
description=get_description(placemark),
**get_coordinates(placemark),
**get_SimpleData(placemark),
)
def get_data(
tree: dict,
folders: Sequence[str] = None
) -> Dict[str, Union[str, float]]:
"""Yield data for each placemark in a tree.
Parameters
----------
tree : dict
A dictionary from get_tree().
folders : Sequence
A sequence with names of folders to include in the returned data.
Yields
------
Dict[str, Union[str, float]]
A dictionary with all data for a placemark in the given tree.
"""
if folders is None:
folders = tuple()
for node in tree:
if node == "placemarks":
for pdata in get_placemarks_data(tree[node]):
yield dict(
**{f"Folder{i}": f for i, f in enumerate(folders)},
**pdata,
)
else:
yield from get_data(
tree=tree[node],
folders=tuple([*folders, str(node)]),
)
def get_dataframe_from_tree(tree: dict) -> pd.core.frame.DataFrame:
"""Get a dataframe from a tree dict of a KML document.
Parameters
----------
tree : dict
Tree of a KML document, given by get_tree() function.
Returns
-------
pd.core.frame.DataFrame
A DataFrame with data from the tree.
"""
data = get_data(tree)
df = pd.DataFrame.from_records(data)
return df
def read_kml_data(filepath: str) -> pd.core.frame.DataFrame:
"""Read a KML file, returning its data as a Pandas DataFrame.
Parameters
----------
filepath : str
Path of the KML file to read and parse.
Returns
-------
pd.core.frame.DataFrame
A DataFrame with data from the KML file.
"""
root = read_kml(filepath)
doc = get_doc(root)
tree = get_tree(doc)
df = get_dataframe_from_tree(tree)
return df
| dankkom/kmldata | kmldata/parser.py | parser.py | py | 6,224 | python | en | code | 0 | github-code | 36 |
40279875263 | import math
import random
# random.seed(0.24975077935850032)
# random.seed(0.0329938859085428)
# print('Current seed - robustness', random.random())
"""
position 19,30 could be avoided with areaSize parameter and fitness function
Seed: 0.24975077935850032
This seed does a good job of exaggerating the difference between single iteration vs two iterations
"""
#last: 25
# 1, 6, 21 -> No path possible
# 2 -> visits overlay
# 4, 11, 22, 14 (worst i have seen) -> visits potentially overlay (when visited shared)
# 12 -> visits potentially overlay (when visited not shared)
# 9 is really good
def getBorder (d, existing={}, blockMode=True):
# do top
for i in range(d):
existing[ f'-1:{i}' if not blockMode else (-1, i) ] = None
for i in range(d):
existing[ f'{d}:{i}' if not blockMode else (d, i) ] = None
for i in range(-1,d+1):
existing[ f'{i}:{-1}' if not blockMode else (i, -1) ] = None
for i in range(-1,d+1):
existing[ f'{i}:{d}' if not blockMode else (i, d) ] = None
return existing
def produceRandomMaze (percentage, dimension):
start = (0, 0)
end = (dimension-1, dimension-1)
obstacleFrequency = math.floor(math.pow(dimension, 2) * percentage/100)
obstacles = {}
while obstacleFrequency > 0:
cords = (random.randint(0,dimension) ,random.randint(0,dimension))
if cords == start or cords == end or cords in obstacles:
continue
obstacles[cords] = None
obstacleFrequency-=1
return (start, end, getBorder(dimension, obstacles))
| 472mbah/path-finding | algorithms/automate/robustness.py | robustness.py | py | 1,581 | python | en | code | 0 | github-code | 36 |
22660738512 | # data_processing.py
from shapely import wkb
from shapely.geometry import shape
import binascii
import psycopg2
ewkb_data = None
def get_variable_ewkb():
global ewkb_data
print("variable ewkb")
print(ewkb_data)
return ewkb_data
def process_for_view(data):
global ewkb_data # Declare ewkb_data as a global variable
# Process the data as needed in your Python program
# For example, you can access the features using data['features']
# and perform further processing or analysis
print("Processing data...")
geojson_data = data
# Convert GeoJSON to Shapely geometry
shapely_geometry = shape(geojson_data['geometry'])
# Convert Shapely geometry to WKB (Well-Known Binary) format
wkb_data = shapely_geometry.wkb
# Convert bytes to hexadecimal string
wkb_hex = wkb_data.hex()
# Convert Shapely geometry to EWKB (Extended Well-Known Binary) format
ewkb_data = wkb.dumps(shapely_geometry, srid=4326, hex=True)
print(wkb_hex)
print(ewkb_data)
return ewkb_data
| Fakhrynm/serverdatatrainingsitepython | Processdataview.py | Processdataview.py | py | 1,054 | python | en | code | 0 | github-code | 36 |
7078437282 | import os
import base64
import argparse
from cliff.command import Command
from cliff.show import ShowOne
from cliff.lister import Lister
from meteoroid_cli.meteoroid.v1.client.function_client import FunctionClient
from meteoroid_cli.meteoroid.v1.errors import CommandError
from meteoroid_cli.meteoroid.v1.libs.decorator import fiware_arguments
NODE_JS_EXT = '.js'
PYTHON_EXT = '.py'
JAVA_EXT = '.jar'
SWIFT_EXT = '.swift'
PHP_EXT = '.php'
RUBY_EXT = '.rb'
GO_EXT = '.go'
BAL_BIN_EXT = '.balx'
ZIP_EXT = '.zip'
NODE_JS = 'nodejs'
PYTHON = 'python'
JAVA = 'java'
SWIFT = 'swift'
PHP = 'php'
RUBY = 'ruby'
GO = 'go'
DEFAULT = 'default'
EXT_LANG = {
NODE_JS_EXT: NODE_JS,
PYTHON_EXT: PYTHON,
JAVA_EXT: JAVA,
SWIFT_EXT: SWIFT,
PHP_EXT: PHP,
RUBY_EXT: RUBY,
GO_EXT: GO
}
class FunctionRequestDataBuilder:
def build(self, parsed_args):
data = {}
if hasattr(parsed_args, 'name'):
data['name'] = parsed_args.name
_, extension = os.path.splitext(parsed_args.file.name)
if extension == ZIP_EXT or extension == JAVA_EXT or extension == BAL_BIN_EXT:
data['code'] = base64.b64encode(parsed_args.file.read()).decode("ascii")
data['binary'] = True
else:
data['code'] = parsed_args.file.read().decode("utf-8")
if parsed_args.main is not None:
data['main'] = parsed_args.main
else:
if extension == JAVA_EXT:
err_message = ('Java actions require --main (-m) to specify '
'the fully-qualified name of the main class')
raise CommandError(err_message)
if parsed_args.language is not None:
data['language'] = parsed_args.language
else:
if extension != ZIP_EXT:
data['language'] = self.__get_default_language(extension)
if parsed_args.param is not None:
data['parameters'] = parsed_args.param
return data
def __get_default_language(self, extension):
language = EXT_LANG[extension]
return f'{language}:{DEFAULT}'
class StoreKeyPairAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if namespace.param is None:
param = []
else:
param = namespace.param
if len(values) == 2:
k, v = values
param.append({
'key': k,
'value': v
})
setattr(namespace, self.dest, param)
class FunctionShow(ShowOne):
"Show a function"
@fiware_arguments
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.add_argument('id', help='function id')
parser.add_argument('-co',
'--code',
action='store_true',
help='Show the source code')
return parser
def take_action(self, parsed_args):
response = FunctionClient().retrieve_function(
id=parsed_args.id,
fiware_service=parsed_args.fiwareservice,
fiware_service_path=parsed_args.fiwareservicepath,
code=parsed_args.code
)
parameters = list(map(lambda x: dict(x), response['parameters']))
response['parameters'] = parameters
columns = response.keys()
data = response.values()
return columns, data
class FunctionList(Lister):
"Show function list"
@fiware_arguments
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
return parser
def take_action(self, parsed_args):
response = FunctionClient().list_function(
fiware_service=parsed_args.fiwareservice,
fiware_service_path=parsed_args.fiwareservicepath
)
if len(response) > 0:
columns = response[0].keys()
data = [x.values() for x in response]
return columns, data
return (), ()
class FunctionCreate(ShowOne):
"Create a function"
@fiware_arguments
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.add_argument('name', help='Function name')
parser.add_argument('file',
type=argparse.FileType('rb'),
help='Function file name')
parser.add_argument('-l', '--language',
metavar='LANG:VERSION',
help='Program language')
parser.add_argument('-m', '--main',
metavar='MAIN_FILE_NAME',
help='Main file name for java')
parser.add_argument('-p', '--param',
nargs=2,
action=StoreKeyPairAction,
metavar=('KEY', 'VALUE'),
help='Inject param to Function')
return parser
def take_action(self, parsed_args):
try:
response = FunctionClient().create_function(
fiware_service=parsed_args.fiwareservice,
fiware_service_path=parsed_args.fiwareservicepath,
data=FunctionRequestDataBuilder().build(parsed_args)
)
parameters = list(map(lambda x: dict(x), response['parameters']))
response['parameters'] = parameters
columns = response.keys()
data = response.values()
return columns, data
except CommandError as e:
self.app.stdout.write(e.args[0])
return (), ()
class FunctionUpdate(ShowOne):
"Update a function"
@fiware_arguments
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.add_argument('id', help='Function id')
parser.add_argument('file',
type=argparse.FileType('rb'),
help='Function file name')
parser.add_argument('-l', '--language',
metavar='LANG:VERSION',
help='Program language')
parser.add_argument('-m', '--main',
metavar='MAIN_FILE_NAME',
help='Main file name for java')
parser.add_argument('-p', '--param',
nargs=2,
action=StoreKeyPairAction,
metavar='KEY VALUE',
help='Inject param to Function')
return parser
def take_action(self, parsed_args):
data = FunctionRequestDataBuilder().build(parsed_args)
data['id'] = parsed_args.id
try:
response = FunctionClient().update_function(
fiware_service=parsed_args.fiwareservice,
fiware_service_path=parsed_args.fiwareservicepath,
data=data
)
parameters = list(map(lambda x: dict(x), response['parameters']))
response['parameters'] = parameters
columns = response.keys()
data = response.values()
return columns, data
except CommandError as e:
self.app.stdout.write(e.args[0])
return (), ()
class FunctionDelete(Command):
"Delete a function"
@fiware_arguments
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.add_argument('id', help='Function id')
return parser
def take_action(self, parsed_args):
FunctionClient().delete_function(
id=parsed_args.id,
fiware_service=parsed_args.fiwareservice,
fiware_service_path=parsed_args.fiwareservicepath,
)
self.app.stdout.write('Success delete function\n')
| OkinawaOpenLaboratory/fiware-meteoroid-cli | meteoroid_cli/meteoroid/v1/function.py | function.py | py | 7,858 | python | en | code | 5 | github-code | 36 |
1419102307 | #!/usr/bin/env python
# coding=utf-8
'''
Author: Yuxiang Yang
Date: 2021-08-21 16:29:41
LastEditors: Yuxiang Yang
LastEditTime: 2021-08-21 17:02:54
FilePath: /leetcode/剑指 Offer 60. n个骰子的点数.py
Description:
把n个骰子扔在地上,所有骰子朝上一面的点数之和为s。输入n,打印出s的所有可能的值出现的概率。
假设n=1,和一共有6种,6=5*1+1
n=2, 和一共有11种(最小是2,最大是12),11=5*2+1
n=3, 和一共有16种(最小是3,最大是18),11=5*3+1
依次类推
https://leetcode-cn.com/problems/nge-tou-zi-de-dian-shu-lcof/solution/jian-zhi-offer-60-n-ge-tou-zi-de-dian-sh-z36d/
'''
class Solution(object):
def dicesProbability(self, n):
"""
:type n: int
:rtype: List[float]
"""
dp = [1 / 6] * 6 # 初始化一个骰子
for i in range(2, n+1): # 从第二个骰子开始遍历
tmp = [0] * (5*i+1) # 初始化下一个骰子
for j in range(len(dp)):
for k in range(6): # 固定下一个骰子的值,
tmp[j+k] += dp[j] / 6
dp = tmp
return dp
if __name__ == '__main__':
print(Solution().dicesProbability(1))
| yangyuxiang1996/leetcode | 剑指 Offer 60. n个骰子的点数.py | 剑指 Offer 60. n个骰子的点数.py | py | 1,226 | python | zh | code | 0 | github-code | 36 |
24503302432 | #%% Ejercicio 1
# Crear un programa que pida al usuario una letra, y si es vocal, muestre el mensaje
# "Es vocal". Sino, decirle al usuario que no es vocal
Letra = input('Porfavor ingrese una letra: ')
if Letra == 'a' or Letra == 'e' or Letra == 'i' or Letra == 'o' or Letra == 'u':
print('La letra {}, Es vocal'.format(Letra))
else:
print('La letra que ingreso no es vocal')
if Letra in 'aeiou':
print('Es una Vocal')
else:
print('Es una letra')
#%% Ejercicio 2
# Escribir un programa que, dado un número entero, muestre su valor absoluto.
# Nota: para los números positivos su valor absoluto es igual al número
# (el valor absoluto de 52 es 52), mientras que, para los negativos, su valor absoluto
# es el número multiplicado por -1 (el valor absoluto de -52 es 52).
Entero = float(input('Ingrese un número: '))
Curiosity = type(Entero)
if type(Entero) != int:
print('El valor absolto de {} es: {}'.format(Entero,abs(Entero)))
else:
print('El número ingresado no es Entero')
# #%% ejercicio222
# while True:
# n = raw_input("ingrese numero jugadores: ")
# try:
# n = int(n)
# return n
# # except ValueError:
# # print("Escribe un numero, no una letra.") | CriSarC/PythonExercises | 3.Condicionales/3.3.0Ejercicios.py | 3.3.0Ejercicios.py | py | 1,246 | python | es | code | 0 | github-code | 36 |
27551610430 | import argparse
import logging
import os
import shutil
import sys
import tarfile
import tempfile
import traceback
from zipfile import ZIP_DEFLATED, ZIP_STORED, ZipFile
import rdiffweb
from rdiffweb.core.librdiff import LANG, STDOUT_ENCODING, find_rdiff_backup, popen
logger = logging.getLogger(__name__)
# Increase the chunk size to improve performance.
CHUNK_SIZE = 4096 * 10
# Token used by rdiff-backup
TOKEN = b'Processing changed file '
class TarArchiver(object):
"""
Archiver to create tar archive (with compression).
"""
def __init__(self, dest, compression=''):
assert compression in ['', 'gz', 'bz2']
mode = "w|" + compression
# Open the tar archive with the right method.
if isinstance(dest, str):
self.z = tarfile.open(name=dest, mode=mode, encoding='UTF-8', format=tarfile.PAX_FORMAT)
self.fileobj = None
else:
self.z = tarfile.open(fileobj=dest, mode=mode, encoding='UTF-8', format=tarfile.PAX_FORMAT)
self.fileobj = dest
def addfile(self, filename, arcname, encoding):
assert isinstance(filename, bytes)
assert isinstance(arcname, bytes)
assert encoding
# Do not create a folder "./"
if os.path.isdir(filename) and arcname == b'.':
return
# The processing of symlink is broken when using bytes
# for files, so let convert it to unicode with surrogateescape.
filename = filename.decode('ascii', 'surrogateescape')
# The archive name must be unicode and will be convert back to UTF8
arcname = arcname.decode(encoding, 'surrogateescape')
# Add file to archive.
self.z.add(filename, arcname, recursive=False)
def close(self):
# Close tar archive
self.z.close()
# Also close file object.
if self.fileobj:
self.fileobj.close()
class ZipArchiver(object):
"""
Write files to zip file or stream.
Can write uncompressed, or compressed with deflate.
"""
def __init__(self, dest, compress=True):
compress = compress and ZIP_DEFLATED or ZIP_STORED
self.z = ZipFile(dest, 'w', compress)
def addfile(self, filename, arcname, encoding):
assert isinstance(filename, bytes)
assert isinstance(arcname, bytes)
assert encoding
# Do not create a folder "./"
if os.path.isdir(filename) and arcname == b'.':
return
# As of today ZipFile doesn't support symlink or named pipe.
# So we silently skip them. See bug #26269 and #18595
if os.path.islink(filename) or not (os.path.isfile(filename) or os.path.isdir(filename)):
return
# The filename need to be unicode.
filename = filename.decode('ascii', 'surrogateescape')
# The archive name must be unicode.
# But Zip doesn',t support surrogate, so let replace invalid char.
arcname = arcname.decode(encoding, 'replace')
# Add file to archive.
self.z.write(filename, arcname)
def close(self):
self.z.close()
class RawArchiver(object):
"""
Used to stream a single file.
"""
def __init__(self, dest):
assert dest
self.dest = dest
if isinstance(self.dest, str):
self.output = open(self.dest, 'wb')
else:
self.outout = dest
def addfile(self, filename, arcname, encoding):
assert isinstance(filename, bytes)
# Only stream files. Skip directories.
if os.path.isdir(filename):
return
with open(filename, 'rb') as f:
shutil.copyfileobj(f, self.outout)
def close(self):
self.outout.close()
ARCHIVERS = {
'tar': TarArchiver,
'tbz2': lambda dest: TarArchiver(dest, 'bz2'),
'tar.bz2': lambda dest: TarArchiver(dest, 'bz2'),
'tar.gz': lambda dest: TarArchiver(dest, 'gz'),
'tgz': lambda dest: TarArchiver(dest, 'gz'),
'zip': ZipArchiver,
'raw': RawArchiver,
}
# Log everything to stderr.
def _print_stderr(msg, exc_info=False):
"""
Print messages to stderr.
"""
assert isinstance(msg, str)
print(msg, file=sys.stderr)
if exc_info:
traceback.print_exc(file=sys.stderr)
sys.stderr.flush()
def _lookup_filename(base, path):
"""
Search for the given filename. This is used to mitigate encoding issue
with rdiff-backup2. That replace invalid character.
"""
assert isinstance(base, bytes)
assert isinstance(path, bytes)
# Easy path, if the file encoding is ok, will find the file.
fullpath = os.path.normpath(os.path.join(base, path))
if os.path.lexists(fullpath):
return fullpath, path
# Otherwise, Search the for a matching filename.
dirname = os.path.dirname(os.path.join(base, path))
basename = os.path.basename(path)
for file in os.listdir(dirname):
if basename == file.decode(STDOUT_ENCODING, 'replace').encode(STDOUT_ENCODING, 'replace'):
fullpath = os.path.join(dirname, file)
arcname = os.path.relpath(fullpath, base)
return fullpath, arcname
return None, None
def restore(restore, restore_as_of, kind, encoding, dest, log=logger.debug):
"""
Used to restore a file or a directory.
restore: relative or absolute file or folder to be restored (unquoted)
restore_as_of: date to restore
kind: type of archive to generate or raw to stream a single file.
encoding: encoding of the repository (used to properly encode the filename in archive)
dest: a filename or a file handler where to write the archive.
"""
assert isinstance(restore, bytes)
assert isinstance(restore_as_of, int)
assert kind in ARCHIVERS
# Generate a temporary location used to restore data.
# This location will be deleted after restore.
tmp_output = tempfile.mkdtemp(prefix=b'rdiffweb_restore_')
log('restoring data into temporary folder: %r' % tmp_output)
# Search full path location of rdiff-backup.
rdiff_backup_path = find_rdiff_backup()
# Need to explicitly export some environment variable. Do not export
# all of them otherwise it also export some python environment variable
# and might brake rdiff-backup process.
env = {
'LANG': LANG,
}
if os.environ.get('TMPDIR'):
env['TMPDIR'] = os.environ['TMPDIR']
cmd = [
rdiff_backup_path,
b'-v',
b'5',
b'--restore-as-of=' + str(restore_as_of).encode('latin'),
restore,
tmp_output,
]
log('executing %r with env %r' % (cmd, env))
# Open an archive.
archive = ARCHIVERS[kind](dest)
try:
# Read the output of rdiff-backup
with popen(cmd, env=env) as output:
for line in output:
# Since rdiff-backup 2.1.2b1 the line start with b'* '
if line.startswith(b'* '):
line = line[2:]
line = line.rstrip(b'\n')
log('rdiff-backup: %r' % line)
if not line.startswith(TOKEN):
continue
# A new file or directory was processed. Extract the filename and
# look for it on filesystem.
value = line[len(TOKEN) :]
fullpath, arcname = _lookup_filename(tmp_output, line[len(TOKEN) :])
if not fullpath:
log('error: file not found %r' % value)
continue
# Add the file to the archive.
log('adding %r' % fullpath)
try:
archive.addfile(fullpath, arcname, encoding)
except Exception:
# Many error may happen when trying to add a file to the
# archive. To be more resilient, capture error and continue
# with the next file.
log('error: fail to add %r' % fullpath, exc_info=1)
# Delete file once added to the archive.
if os.path.isfile(fullpath) or os.path.islink(fullpath):
os.remove(fullpath)
finally:
# Close the pipe
archive.close()
# Clean-up the directory.
if os.path.isdir(tmp_output):
shutil.rmtree(tmp_output, ignore_errors=True)
elif os.path.isfile(tmp_output):
os.remove(tmp_output)
def main():
parser = argparse.ArgumentParser(description='Rdiffweb restore script.')
parser.add_argument('--restore-as-of', type=int, required=True)
parser.add_argument('--encoding', type=str, default='utf-8', help='Define the encoding of the repository.')
parser.add_argument(
'--kind', type=str, choices=ARCHIVERS, default='zip', help='Define the type of archive to generate.'
)
parser.add_argument('restore', type=str, help='Define the path of the file or directory to restore.')
parser.add_argument('output', type=str, default='-', help='Define the location of the archive. Default to stdout.')
parser.add_argument('--version', action='version', version='%(prog)s ' + rdiffweb.__version__)
args = parser.parse_args()
# handle encoding of the path.
path = args.restore
if isinstance(path, str):
path = os.fsencode(path)
# handle output
if args.output == '-':
output = sys.stdout.buffer
else:
output = open(args.output, 'wb')
# Execute the restore.
try:
restore(path, args.restore_as_of, args.kind, args.encoding, output, log=_print_stderr)
except Exception:
_print_stderr('error: failure to create the archive', exc_info=1)
sys.exit(1)
if __name__ == "__main__":
main()
| ikus060/rdiffweb | rdiffweb/core/restore.py | restore.py | py | 9,753 | python | en | code | 114 | github-code | 36 |
28111130337 | from django.db.models import Avg
from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from dinnerevent.models import Review
from .forms import UserRegisterForm, ProfileForm
def register(request):
if request.method == "POST":
form = UserRegisterForm(request.POST)
profile_form = ProfileForm(request.POST)
if form.is_valid() and profile_form.is_valid():
user = form.save()
user_profile = profile_form.save(commit=False)
user_profile.user = user
user_profile.save()
profile_form.save_m2m()
username = form.cleaned_data.get('username')
messages.success(request, f'Bruker er laget for {username}')
return redirect('login')
else:
form = UserRegisterForm()
profile_form = ProfileForm()
return render(request, 'users/register.html', {'form': form, 'profile_form': profile_form})
@login_required
def profile(request, pk): # Denne viser profilen til brukerene
all_user_reviews = Review.objects.filter(event__user=User.objects.get(pk=pk))
avg_rating = all_user_reviews.aggregate(Avg('rating')).get('rating__avg')
return render(request, 'users/profile.html', {
'reviews': all_user_reviews,
'score': rounded_rating(avg_rating),
'user_profile': User.objects.get(pk=pk),
})
def rounded_rating(number):
"""Round a number to closet 1/2 integer"""
if number is not None:
return round(number * 2) / 2
return None
| taheeraahmed/Dinnersharing | middagproj/users/views.py | views.py | py | 1,640 | python | en | code | 0 | github-code | 36 |
22654441124 | from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
import numpy as np
from matplotlib import pyplot as plt
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import train_test_split
# same process as svm
np.random.seed(0)
iris = datasets.load_iris()
X = iris.data[:, 0:2]
y = iris.target
train_x, test_x, train_y, test_y = train_test_split(iris.data[:, :2], iris.target, test_size=0.3, random_state=0)
#hidden_layer_sizes=[(10,),(30,),(100,),(5,5),(10,10),(30,30)] #可选的神经元层数
#ativations=["logistic","tanh","relu"] #可选的激活函数
#learnrates=[0.1,0.01,0.001] #可选的学习率
solvers=["lbfgs","sgd","adam"] #可选的solver
for i, sol in enumerate(solvers):
classifier = MLPClassifier(activation="tanh", max_iter=1000000,
hidden_layer_sizes=(10,5), solver=sol, learning_rate_init=0.01)
classifier.fit(train_x, train_y)
train_score = classifier.score(train_x, train_y)
print('when solver =', sol, '\n','train_score=',train_score)
test_score = classifier.score(test_x, test_y)
print('test_score=',test_score,'\n')
x_min, x_max = train_x[:, 0].min() - 1, train_x[:, 0].max() + 2
y_min, y_max = train_x[:, 1].min() - 1, train_x[:, 1].max() + 2
plot_step = 0.02 # 步长
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = classifier.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.subplot(1, 3, i + 1)
plt.subplots_adjust(wspace=0.3, hspace=1)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(sol)
plt.show() | fh-Zh/Classification.old | bpnn.py | bpnn.py | py | 1,918 | python | en | code | 0 | github-code | 36 |
2884786469 | # coding:utf-8
# @Time : 2020/4/21 19:18
# @Author: Xiawang
# Description:
import datetime
import time
import requests
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.header import Header
'''
用于主流程监控定期执行并发送报警信息
'''
def get_fix_time():
now_time = datetime.datetime.now()
fix_time = (now_time + datetime.timedelta(minutes=10)).strftime("%Y-%m-%d %H:%M")
return fix_time
def run_pytest(module):
'''
:param module: mainprocess, open_api_lagou
'''
url = 'http://127.0.0.1:18980/data/pytest'
data = {"module": module}
print(url)
pytest_result = requests.post(url=url, json=data, verify=False).json()
print(pytest_result)
return pytest_result
def send_feishu_report(module, pytest_result):
if pytest_result.get('state') == 4:
content = pytest_result.get('data')
return send_feishu_bot(module=module, content=content)
if pytest_result.get('state') == 0:
summary_result = pytest_result['summary_result']
fail_results = ''
names = []
for case_name, case_fail_result in pytest_result['fail_result'].items(
):
fail_result = f'''用例{case_name}报错:{case_fail_result['error_type']},原因:{case_fail_result['log']},测试:{case_fail_result.get('tester_name')},开发:{case_fail_result.get('rd_name')}\n\n'''
fail_results += fail_result
names.extend([case_fail_result.get('tester_name'), case_fail_result.get('rd_name')])
if '' in names:
names.remove('')
elif None in names:
names.remove(None)
fix_time = get_fix_time()
name_template = f'''请{','.join(list(set(names)))}在{fix_time}之前,尽快处理并给出反馈'''
content = "{}\n\n具体失败结果:\n{}\n请大家对线上问题保持敬畏之心!\n{}".format(summary_result, fail_results, name_template)
print(content)
return send_feishu_bot(module=module, content=content)
def send_mail(module):
sender = 'autotest@lagoujobs.com'
sender_password = 'Lqq123456'
receivers = ['zane@lagou.com', 'sunnyzhang@lagou.com',
'sunnysun@lagou.com', 'huifang@lagou.com'
'bingoonchen@lagou.com','anan@lagou.com',
'foxtang01@lagou.com']
ret = True
try:
message = MIMEMultipart()
message['From'] = Header(f"自动化测试报告", 'utf-8')
message['To'] = Header("测试工程师", 'utf-8')
subject = f'{module}测试报告'
message['Subject'] = Header(subject, 'utf-8')
message.attach(
MIMEText('自动化测试报告详见附件', 'plain', 'utf-8')
)
report_file_path = f'/home/test/lg-apiscript-python/backend/templates/{module}_report.html'
print(report_file_path)
# report_file_path = '/Users/wang/Desktop/lg-project/lg_api_script/backend/templates/mainprocess_report.html'
att1 = MIMEText(open(report_file_path, 'rb').read(),
'base64', 'utf-8')
att1["Content-Type"] = 'application/octet-stream'
att1["Content-Disposition"] = f'attachment; filename={module}_report.html'
message.attach(att1)
server = smtplib.SMTP_SSL("smtp.exmail.qq.com", 465)
server.login(sender, sender_password)
server.sendmail(sender, receivers, message.as_string())
server.quit()
except Exception as e:
print(str(e))
ret = False
return ret
def send_feishu_bot(module, content):
module_bot = {
'test': 'https://open.feishu.cn/open-apis/bot/hook/882babeafa3e4f0b839d6ff41efa2b84',
'mainprocess': 'https://open.feishu.cn/open-apis/bot/hook/03654ef57c4f4418ba8802cfa1cf06a0',
'open_api_lagou': 'https://open.feishu.cn/open-apis/bot/hook/ad282603210042cdb3e414f36e1acbb8'
}
url = module_bot.get(module)
data = {
"title": "自动化测试结果:",
"text": content
}
if len(data['text']) >= 2000:
data['text'] = data['text'][:2000]
result = requests.post(url=url, json=data, verify=False).json()
return result.get('ok')
def get_number(string: str):
number_str = string.split(' ')[0]
if not number_str.isdigit():
number_str = '0'
number = int(number_str)
return number
def send_oss(pytest_result):
total_result = pytest_result['data']['result']['info']['result']
errors = total_result['fail_result']
name = "main_process_test"
source = "main_process_test.py"
for key, value in errors.items():
test_demo: str = key.strip()
error_type: str = value['error_type'].strip()
error_cause: str = value['log'].strip()
module_name = test_demo
if error_cause == '具体详情,请查看测试报告':
cause = error_type
level = 'WARNING'
user_ids = 'mornyue'
else:
cause = error_cause
level = 'PROBLEM'
user_ids = 'mornyue,huifang'
description = "主流程测试"
return oss_filter_event(module_name=module_name, name=name, cause=cause,
level=level, user_ids=user_ids, description=description, source=source)
def oss_filter_event(module_name, name, description, level, user_ids: str, cause, source):
"""
将消息发送到lg-alarm-filter模块的event接口,用来生成告警
接口需要参数:["moduleName", "name", "description", "level", "userids", "cause"]
:param module_name: 模块名抑或主机名
:param name: 类型
:param description: 告警描述
:param level: 告警级别
:param user_ids: 告警通知人,逗号分隔,且不能存在空格,后端解析没有对空格进行额外处理
:param cause: 告警引起原因
:param source: 数据来源
:return:
"""
# 防止userids传入有问题,加一层处理逻辑
if ',' in user_ids:
user_list = user_ids.split(',')
elif ',' in user_ids:
user_list = user_ids.split(',')
else:
user_list = [user_ids]
user_ids = ','.join([item.strip() for item in user_list])
url = 'http://10.10.5.138:8081/filter/event'
params = {
'moduleName': module_name,
'name': name,
'description': description,
'level': level,
'userids': user_ids,
'cause': cause,
'source': source
}
requests.post(url, json=params)
def main(module):
pytest_result = run_pytest(module)
if pytest_result.get('state', 0) != 1:
time.sleep(10)
print(1)
pytest_result = run_pytest(module)
print(pytest_result)
if pytest_result.get('state', 0) != 1:
send_feishu_result = send_feishu_report(module, pytest_result)
print(send_feishu_result)
if send_feishu_result == True:
send_mail(module)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='获取执行的模块')
parser.add_argument('--module', help='获取执行模块')
args = parser.parse_args()
if args.module is not None:
main(module=args.module) | Ariaxie-1985/aria | task/send_auto_test_report.py | send_auto_test_report.py | py | 7,283 | python | en | code | 0 | github-code | 36 |
23420270770 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('stock', '0088_auto_20160620_1304'),
]
operations = [
migrations.CreateModel(
name='Stock',
fields=[
('producto_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='stock.Producto')),
('stock_minimo', models.DecimalField(help_text=b'Cantidad minima del producto a mantener en Stock.', verbose_name=b'Stock Minimo', max_digits=10, decimal_places=3)),
('cantidad_existente', models.DecimalField(help_text=b'Cantidad Existente en Stock', verbose_name=b'Cantidad Existente', max_digits=10, decimal_places=3)),
],
bases=('stock.producto',),
),
migrations.AddField(
model_name='producto',
name='fecha_alta_producto',
field=models.DateTimeField(default=datetime.datetime(2016, 6, 22, 18, 14, 4, 175000, tzinfo=utc), help_text=b'La Fecha de Alta se asigna al momento de guardar los datos del Producto. No se requiere el ingreso de este dato.', verbose_name=b'Fecha de Alta'),
),
migrations.AlterField(
model_name='precioproducto',
name='fecha',
field=models.DateTimeField(default=datetime.datetime(2016, 6, 22, 18, 14, 4, 176000, tzinfo=utc), help_text=b'Ingrese la fecha y hora en la que se define el precio de venta del producto.'),
),
migrations.AlterField(
model_name='producto',
name='codigo_barra',
field=models.CharField(help_text=b'', max_length=100, verbose_name=b'Codigo de Barra'),
),
migrations.AlterField(
model_name='producto',
name='marca',
field=models.CharField(help_text=b'', max_length=100, verbose_name=b'Marca'),
),
migrations.AlterField(
model_name='producto',
name='producto',
field=models.CharField(help_text=b'Ingrese el nombre o descripcion del Producto.', max_length=100, verbose_name=b'Nombre del Producto'),
),
]
| pmmrpy/SIGB | stock/migrations/0089_auto_20160622_1414.py | 0089_auto_20160622_1414.py | py | 2,301 | python | es | code | 0 | github-code | 36 |
21592351209 | def get_all_measurements():
f = open("input_day_1.txt", "r")
list_of_measurements = f.read().split('\n')
return list_of_measurements
def get_count_of_increased_sequent(measurements) -> int:
"""
Count how many sequent increases are in a list of measurements
Args:
measurements (List[int]): list of measurements
Returns:
int: number of sequent increases
"""
if len(measurements) < 2:
raise ValueError("List contains less than 2 values to compare")
count_of_increased_sequent = 0
for i in range(len(measurements)-1):
if int(measurements[i+1]) > int(measurements[i]):
count_of_increased_sequent += 1
return count_of_increased_sequent
def get_count_of_increased_triplets(measurements) -> int:
"""
Count how many sequent increases are between triplets.
Compare a sum of each triplet to determine increase / decrease.
Triples are made as follow:
199 A
200 A B
208 A B C
210 B C D
200 E C D
207 E F D
240 E F G
269 F G H
260 G H
263 H
Args:
measurements (List[int]): list of measurements
Returns:
int: number of sequent increases between triplets
"""
if len(measurements) < 6:
raise ValueError("List contains less than 6 values to compare")
count_of_increased_triplets = 0
list_of_sum_of_three_items = []
for i in range(len(measurements)-2):
sum_of_three_items = (int(measurements[i]) + int(measurements[i+1]) + int(measurements[i+2]))
list_of_sum_of_three_items.append(sum_of_three_items)
for i in range(len(list_of_sum_of_three_items) - 1):
if int(list_of_sum_of_three_items[i + 1]) > int(list_of_sum_of_three_items[i]):
count_of_increased_triplets += 1
return count_of_increased_triplets
if __name__ == '__main__':
# part_1
print(get_count_of_increased_sequent(get_all_measurements()))
# part_2
print(get_count_of_increased_triplets(get_all_measurements()))
| PavcaHyx/advent-of-code-2021 | day_1/day_1.py | day_1.py | py | 2,180 | python | en | code | 0 | github-code | 36 |
27875338483 | import json
import requests
from openerp.tests.common import HttpCase
from openerp import api, exceptions, tools, models
HOST = '127.0.0.1'
PORT = tools.config['xmlrpc_port']
class Webhook(models.Model):
_inherit = 'webhook'
@api.one
def run_wehook_test_get_foo(self):
"""
This method is just to test webhook.
This needs receive a json request with
next json values: {'foo': 'bar'}
If value is different will raise a error.
"""
if 'bar' != self.env.request.jsonrequest['foo']:
raise exceptions.ValidationError(
"Wrong value received")
class TestWebhookPost(HttpCase):
def setUp(self):
super(TestWebhookPost, self).setUp()
self.webhook = self.env['webhook']
self.url_base = "http://%s:%s" % (HOST, PORT)
self.url = self.get_webhook_url()
def get_webhook_url(self, url='/webhook',
webhook_name="wehook_test"):
"""
:param string url: Full url of last url of webhook to use.
If you use a full url will return url
plus session_id
default: /webhook
:param string webhook_name: Name of webhook to process
default: webhook_test
:return: url with
http://IP:PORT/webhook/webhook_name?session_id=###
"""
webhook_name = webhook_name.replace('/', '')
if url.startswith('/'):
url = self.url_base + url + '/' + webhook_name
url += '?session_id=' + self.session_id
return url
def post_webhook_event(self, event, url, data, remote_ip=None,
headers=None, params=None):
"""
:param string event String: Name of webhook event.
:param string url: Full url of webhook services.
:param dict data: Payload data of request.
:param string remote_ip: Remote IP of webhook to set in
test variable.
:param dict headers: Request headers with main data.
:param dict params: Extra values to send to webhook.
"""
if headers is None:
headers = {}
if remote_ip is None:
remote_ip = '127.0.0.1'
headers.update({
'X-Webhook-Test-Event': event,
'X-Webhook-Test-Address': remote_ip,
})
headers.setdefault('accept', 'application/json')
headers.setdefault('content-type', 'application/json')
payload = json.dumps(data)
response = requests.request(
"POST", url, data=payload,
headers=headers, params=params)
return response.json()
def test_webhook_ping(self):
"""
Test to check that 'ping' generic method work fine!
'ping' event don't need to add it in inherit class.
"""
json_response = self.post_webhook_event(
'ping', self.url, {})
has_error = json_response.get('error', False)
self.assertEqual(
has_error, False, 'Error in webhook ping test!')
def test_webhook_get_foo(self):
"""
Test to check that 'get_foo' event from 'webhook_test'
work fine!
This event is defined in inherit method of test.
"""
json_response = self.post_webhook_event(
'get_foo', self.url, {'foo': 'bar'})
self.assertEqual(
json_response.get('error', False), False,
'Error in webhook get foo test!.')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| Blancorama/blancorama_tools | webhook/tests/test_webhook_post.py | test_webhook_post.py | py | 3,596 | python | en | code | 0 | github-code | 36 |
6688406212 | from langchain.chat_models import ChatOpenAI
from langchain.output_parsers import PydanticOutputParser
from langchain.prompts import ChatPromptTemplate
from pydantic import BaseModel, Field
from dotenv import load_dotenv
from typing import List
import pandas as pd
import os
# loading api key and defining vars
load_dotenv("../.env")
deck_name = os.environ.get("DECK_NAME")
csv_file_path = "../csv/" + deck_name + ".csv"
openai_api_key = os.environ.get("OPENAI_API_KEY")
model = "gpt-4"
temperature = 0.0
class FlashCard(BaseModel):
question: str = Field(description="The question for the flashcard")
answer: str = Field(description="The answer for the flashcard")
class FlashCardArray(BaseModel):
flashcards: List[FlashCard]
def create_flashcards_from_text(input_text: str, user_prompt: str, csv_file_path: str):
llm = ChatOpenAI(openai_api_key=openai_api_key, model=model, temperature=temperature)
print("Creating flashcards...")
pydantic_parser = PydanticOutputParser(pydantic_object=FlashCardArray)
format_instructions = pydantic_parser.get_format_instructions()
prompt = ChatPromptTemplate.from_template(template=user_prompt)
messages = prompt.format_messages(input_text=input_text, format_instructions=format_instructions)
output = llm(messages)
flashcards = pydantic_parser.parse(output.content)
list_of_flashcards = [card.dict() for card in flashcards.flashcards]
df = pd.DataFrame(list_of_flashcards)
if os.path.isfile(csv_file_path):
df.to_csv(csv_file_path, mode="a", header=False, index=False)
else:
df.to_csv(csv_file_path, mode="w", header=False, index=False)
def main():
try:
with open("input.txt", "r") as f:
input_text = f.read()
with open("prompt.txt", "r") as f:
user_prompt = f.read()
create_flashcards_from_text(input_text, user_prompt, csv_file_path)
except Exception as e:
print(f"Error occurred: {e}")
return
if __name__ == "__main__":
main()
| oresttokovenko/gpt-anki | src/generate_flashcards.py | generate_flashcards.py | py | 2,043 | python | en | code | 9 | github-code | 36 |
8695937067 | #from compiler import to_program
#from reader import read, Literal, Expr
from rpython.config.translationoption import get_combined_translation_config
from util import STDIN, STDOUT, STDERR, read_file, write
import api
import base
import evaluator.loader
import space
import sys, os
config = get_combined_translation_config(translating=True)
if config.translation.continuation:
import green
#def interactive():
# module = space.Module(u'shell', {}, extends=base.module)
# prompt = u"pyl> "
# write(STDOUT, prompt)
# source = os.read(0, 4096).decode('utf-8')
# while source != u"":
# try:
# program = to_program(read(source))
# write(STDOUT, program.call([module]).repr() + u"\n")
# except space.Error as error:
# print_traceback(error)
# write(STDOUT, prompt)
# source = os.read(0, 4096).decode('utf-8')
# if source == u"":
# write(STDOUT, u"\n")
# return 0
#
#def batch(path):
# module = space.Module(u'main', {}, extends=base.module)
# try:
# source = read_file(path)
# except OSError, error:
# os.write(2, "[Errno %d]: %s\n" % (error.errno, path) )
# return 1
# try:
# program = to_program(read(source))
# program.call([module])
# except space.Error as error:
# print_traceback(error)
# return 1
# return 0
#def entry_point(argv):
# E = 10 # Debugging assist
# green.process.init(config)
# api.init(argv)
# if len(argv) <= 1:
# return interactive()
# for arg in argv[1:]:
# if arg == '-E0':
# E = 0
# elif arg == '-E1':
# E = 1
# elif arg == '-E2':
# E = 2
# elif E == 0:
# # At this option, we're just parsing the input.
# try:
# source = read_file(arg)
# except OSError, error:
# os.write(2, "[Errno %d]: %s\n" % (error.errno, arg) )
# return 1
# for exp in read(source):
# write(STDOUT, exp.repr() + u"\n")
# else:
# status = batch(arg)
# if status != 0:
# return status
# return 0
def entry_point(argv):
if config.translation.continuation:
green.process.init(config)
api.init(argv)
try:
for arg in argv[1:]:
module = space.Module(u'main', {}, extends=base.module)
program = evaluator.loader.from_file(arg)
result = program.call([module])
os.write(1, (result.repr() + u'\n').encode('utf-8'))
except space.Error as error:
print_traceback(error)
return 1
return 0
def print_traceback(error):
out = u""
if len(error.stacktrace) > 0:
out = u"Traceback:\n"
for frame, start, stop in reversed(error.stacktrace):
out += u" %s: %s %s\n" % (
frame.module.name, start.repr(), stop.repr())
out += error.__class__.__name__.decode('utf-8')
write(STDERR, out + u": " + error.message + u"\n")
def target(*args):
return entry_point, None
def jitpolicy(driver):
from rpython.jit.codewriter.policy import JitPolicy
return JitPolicy()
if __name__=='__main__':
sys.exit(entry_point(sys.argv))
| cheery/pyllisp | main.py | main.py | py | 3,246 | python | en | code | 7 | github-code | 36 |
35217198422 | import requests
from urllib.request import urlopen
import urllib
from selenium import webdriver
from bs4 import BeautifulSoup
import http.client
from openpyxl import Workbook
from openpyxl import load_workbook
from openpyxl.writer.excel import ExcelWriter
from openpyxl.cell.cell import ILLEGAL_CHARACTERS_RE
import json
import string
import math
import time
http.client._MAXHEADERS = 1000
def urllib_download(IMAGE_URL, pName):
try:
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
urllib.request.install_opener(opener)
urllib.request.urlretrieve(IMAGE_URL, pName.replace("/","").replace("\\","")+'.jpg')
except:
print('no')
def getNodeText(node):
if(node == None):
return ""
else:
return node.get_text().strip()
retryCount = 0
def getHtmlFromUrl(url, type="get", para={}):
global retryCount
try:
url = urllib.parse.quote(url, safe=string.printable).replace(' ','%20')
request_obj=urllib.request.Request(url=url, headers={
'Content-Type': 'text/html; charset=utf-8',
"User-Agent": "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.86 Safari/537.36"
})
htmlHeader = requests.head(url)
if htmlHeader.status_code ==200:
response_obj=urllib.request.urlopen(request_obj)
html_code=response_obj.read()
return html_code
else:
return ''
except:
retryCount = retryCount + 1
if retryCount < 5:
print("retry index"+str(retryCount)+url)
time.sleep(60)
return getHtmlFromUrl(url)
else:
retryCount = 0
return ""
def requestJson(url):
r = requests.post(url, headers={
'Content-Type': 'application/x-www-form-urlencoded',
'cookie':'visid_incap_2255650=4oBBaRPnQfCVoYEiTmjTq/NVAWEAAAAAQUIPAAAAAAD69PQHUoB0KplKq7/j0+gH; nlbi_2255650=CJKhHYlMm17tpKyoBzOViAAAAACDEjp3gL6bj6YL8j9XE0d/; incap_ses_893_2255650=m1tJIuDRUEp3FE/5GpNkDPRVAWEAAAAAM2KkDpvtARtZral+cMXSVw==; _gcl_au=1.1.76703404.1627477493; _gid=GA1.2.730047202.1627477493; BCSessionID=83af10b8-9488-4b7b-a3b1-3640f178dca2; categoryView=grid; _ga_S46FST9X1M=GS1.1.1627477492.1.1.1627478562.0; _ga=GA1.2.31731397.1627477493; _gat_UA-139934-1=1; _uetsid=69fc2d30efa411eb8818eb045f8760e5; _uetvid=69fc3a70efa411ebba3a23c153f6e477; .Nop.Customer=d664d529-d14a-44b1-86b3-cbf5373277b4',
"User-Agent": "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.86 Safari/537.36"
})
datas = json.loads(r.text)
return datas
def getRenderdHtmlFromUrl(url):
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument("window-size=1024,768")
chrome_options.add_argument("--no-sandbox")
browser = webdriver.Chrome(chrome_options=chrome_options)
browser.get(url)
return BeautifulSoup(browser.page_source, "html.parser",from_encoding="utf-8")
def writeExcel(workSheet, headers, rowIndex, info):
cellIndex=1
for head in headers:
try:
if head in info:
content = ILLEGAL_CHARACTERS_RE.sub(r'', info[head])
workSheet.cell(rowIndex, cellIndex).value = content.strip()
else:
workSheet.cell(rowIndex, cellIndex).value = ""
cellIndex=cellIndex+1
except:
print(rowIndex)
def getProductInfo(url, typeStr, products):
print(str(len(products)) + url)
html_code = getHtmlFromUrl(url)
if len(html_code)>0:
sope= BeautifulSoup(html_code, "html.parser",from_encoding="utf-8")
pName = sope.find("h1", attrs={"itemprop":"name"})
specInfos = sope.find_all("h5")
Description = sope.find("div", attrs={"class":"full-description"})
pInfo = {
"link": url,
"Product Category1": 'Fiber Optic',
"Product Category2": typeStr,
"Product Name": getNodeText(pName),
"Description": getNodeText(Description)
}
for specInfo in specInfos:
title = getNodeText(specInfo)
if title == "Features":
pInfo["Features"] = getNodeText(specInfo.next_sibling.next_sibling)
if title == "Application":
pInfo["Application"] = getNodeText(specInfo.next_sibling.next_sibling)
products.append(pInfo.copy())
def getProductList(url, typestr, products):
html_code = getHtmlFromUrl(url)
if len(html_code)>0:
sope= BeautifulSoup(html_code, "html.parser",from_encoding="utf-8")
pLinkArea = sope.find("div", attrs={"class":"page-inner clearfix"})
pLinks = pLinkArea.find_all("a")
for Plink in pLinks:
print(Plink)
def getProducType(url, products):
html_code = getHtmlFromUrl(url)
if len(html_code)>0:
sope= BeautifulSoup(html_code, "html.parser",from_encoding="utf-8")
typeArea = sope.find("li", attrs={"class":"active dropdown"})
types = typeArea.find_all("li", attrs={"class":"dropdown-submenu"})
for type in types:
lLink = type.find("a")
getProductList(lLink["href"], getNodeText(lLink), products)
excelFileName="lcom.xlsx"
wb = Workbook()
workSheet = wb.active
products = []
# getProductInfo("http://www.tydexoptics.com/products/optics_for_detectors_and_sensors/", '', products)
getProductList('http://www.tydexoptics.com/products/spectroscopy/','', products)
# getProductInfo("http://www.tydexoptics.com/products/optics_for_detectors_and_sensors/", '', products)
# getProductInfo("http://www.tydexoptics.com/products/optics_for_meteorology_and_climatology/" '', products)
# getProductInfo("http://www.tydexoptics.com/products/libs/", '', products)
# getProductInfo("http://www.tydexoptics.com/products/atypical_components/", '', products)
headers=[
'link','Product Category1','Product Category2','Product Name','Features','Application','Description'
]
for index,head in enumerate(headers):
workSheet.cell(1, index+1).value = head.strip()
for index,p in enumerate(products):
writeExcel(workSheet, headers, index + 2, p)
print("flish")
wb.save(excelFileName) | Just-Doing/python-caiji | src/work/20210719/tydexoptics.py | tydexoptics.py | py | 5,791 | python | en | code | 1 | github-code | 36 |
34622506671 | import os
import zipfile
from pathlib import Path
import warnings
from shutil import rmtree
import time
import pandas as pd
import numpy as np
import SimpleITK as sitk
from tqdm import tqdm
from segmentation_metrics import compute_segmentation_scores
from survival_metrics import concordance_index
class AIcrowdEvaluator:
def __init__(
self,
ground_truth_segmentation_folder="data/ground_truth/segmentation/",
ground_truth_survival_file="data/ground_truth/survival/hecktor2021_patient_endpoint_testing.csv",
bounding_boxes_file="data/hecktor2021_bbox_testing.csv",
extraction_folder="data/extraction/",
round_number=1,
):
"""Evaluator for the Hecktor Challenge
Args:
ground_truth_folder (str): the path to the folder
containing the ground truth segmentation.
ground_truth_survival_file (str): the path to the file
containing the ground truth survival time.
bounding_boxes_file (str): the path to the csv file which defines
the bounding boxes for each patient.
extraction_folder (str, optional): the path to the folder where the
extraction of the .zip submission
will take place. Defaults to "data/tmp/".
This folder has to be created beforehand.
round_number (int, optional): the round number. Defaults to 1.
"""
self.groud_truth_folder = Path(ground_truth_segmentation_folder)
self.round = round_number
self.extraction_folder = Path(extraction_folder)
self.bounding_boxes_file = Path(bounding_boxes_file)
self.gt_df = pd.read_csv(ground_truth_survival_file).set_index(
"PatientID")
def _evaluate_segmentation(self, client_payload, _context={}):
submission_file_path = client_payload["submission_file_path"]
aicrowd_submission_id = client_payload["aicrowd_submission_id"]
aicrowd_participant_uid = client_payload["aicrowd_participant_id"]
submission_extraction_folder = self.extraction_folder / (
'submission' + str(aicrowd_submission_id) + '/')
submission_extraction_folder.mkdir(parents=True, exist_ok=True)
with zipfile.ZipFile(str(Path(submission_file_path).resolve()),
"r") as zip_ref:
zip_ref.extractall(str(submission_extraction_folder.resolve()))
groundtruth_paths = [
f for f in self.groud_truth_folder.rglob("*.nii.gz")
]
bb_df = pd.read_csv(str(
self.bounding_boxes_file.resolve())).set_index("PatientID")
results_df = pd.DataFrame()
missing_patients = list()
unresampled_patients = list()
resampler = sitk.ResampleImageFilter()
resampler.SetInterpolator(sitk.sitkNearestNeighbor)
for path in tqdm(groundtruth_paths):
patient_id = path.name[:7]
prediction_files = [
f
for f in self.extraction_folder.rglob(patient_id + "*.nii.gz")
]
if len(prediction_files) > 1:
raise Exception(
"There is too many prediction files for patient {}".format(
patient_id))
elif len(prediction_files) == 0:
results_df = results_df.append(
{
"dice_score": 0,
"hausdorff_distance_95": np.inf,
"recall": 0,
"precision": 0,
},
ignore_index=True)
missing_patients.append(patient_id)
continue
bb = np.array([
bb_df.loc[patient_id, "x1"], bb_df.loc[patient_id, "y1"],
bb_df.loc[patient_id, "z1"], bb_df.loc[patient_id, "x2"],
bb_df.loc[patient_id, "y2"], bb_df.loc[patient_id, "z2"]
])
image_gt = sitk.ReadImage(str(path.resolve()))
image_pred = sitk.ReadImage(str(prediction_files[0].resolve()))
resampler.SetReferenceImage(image_gt)
resampler.SetOutputOrigin(bb[:3])
voxel_spacing = np.array(image_gt.GetSpacing())
output_size = np.round(
(bb[3:] - bb[:3]) / voxel_spacing).astype(int)
resampler.SetSize([int(k) for k in output_size])
# Crop to the bonding box and/or resample to the original spacing
spacing = image_gt.GetSpacing()
if spacing != image_pred.GetSpacing():
unresampled_patients.append(patient_id)
image_gt = resampler.Execute(image_gt)
image_pred = resampler.Execute(image_pred)
results_df = results_df.append(
compute_segmentation_scores(
sitk.GetArrayFromImage(image_gt),
sitk.GetArrayFromImage(image_pred),
spacing,
),
ignore_index=True,
)
_result_object = {
"dice_score": results_df["dice_score"].mean(),
"hausdorff_distance_95":
results_df["hausdorff_distance_95"].median(),
"recall": results_df["recall"].mean(),
"precision": results_df["precision"].mean(),
}
rmtree(str(submission_extraction_folder.resolve()))
messages = list()
if len(unresampled_patients) > 0:
messages.append(
f"The following patient(s) was/were not resampled back"
f" to the original resolution: {unresampled_patients}."
f"\nWe applied a nearest neighbor resampling.\n")
if len(missing_patients) > 0:
messages.append(
f"The following patient(s) was/were missing: {missing_patients}."
f"\nA score of 0 and infinity were attributed to them "
f"for the dice score and Hausdorff distance respectively.")
_result_object["message"] = "".join(messages)
return _result_object
def _evaluate_survival(self, client_payload, _context={}):
submission_file_path = client_payload["submission_file_path"]
predictions_df = pd.read_csv(submission_file_path).set_index(
"PatientID")
if "Prediction" not in predictions_df.columns:
raise RuntimeError("The 'Prediction' column is missing.")
extra_patients = [
p for p in predictions_df.index if p not in self.gt_df.index
]
# Discard extra patient
if len(extra_patients) > 0:
predictions_df = predictions_df.drop(labels=extra_patients, axis=0)
# Check for redundant entries
if len(predictions_df.index) > len(list(set(predictions_df.index))):
raise RuntimeError("One or more patients appear twice in the csv")
# The following function concatenate the submission csv and the
# ground truth and fill missing entries with NaNs. The missing
# entries are then counted as non-concordant by the concordance_index
# function
df = pd.concat((self.gt_df, predictions_df), axis=1)
missing_patients = list(df.loc[pd.isna(df['Prediction']), :].index)
# Compute the c-index for anti-concordant prediction (e.g. risk score)
concordance_factor = -1
_result_object = {
"concordance_index":
concordance_index(
df["Progression free survival"].values,
concordance_factor * df["Prediction"],
event_observed=df["Progression"],
),
}
messages = list()
if len(missing_patients) > 0:
messages = (f"The following patient(s) was/were missing"
f" : {missing_patients}\nThey were considered as "
f"non-concordant")
if len(extra_patients) > 0:
messages.append(
f"The following patient(s) was/were dropped "
f"(since they are not present in the test): {missing_patients}."
)
_result_object["message"] = "".join(messages)
return _result_object
def _get_evaluation_function(self, task_id, client_payload, _context={}):
if task_id == "1":
return self._evaluate_segmentation(client_payload,
_context=_context)
elif task_id == "2":
return self._evaluate_survival(client_payload, _context=_context)
else:
raise ValueError(f"{task_id} is not recognized.")
def _evaluate(self, client_payload, _context={}):
"""
`client_payload` will be a dict with (atleast) the following keys :
- submission_file_path : local file path of the submitted file
- aicrowd_submission_id : A unique id representing the submission
- aicrowd_participant_id : A unique id for participant/team submitting (if enabled)
"""
task_id = os.environ["TASK_ID"]
return self._get_evaluation_function(task_id,
client_payload,
_context=_context)
if __name__ == "__main__":
ground_truth_segmentation_folder = ""
ground_truth_survival_file = ""
bounding_boxes_file = ""
_client_payload = {}
_client_payload["aicrowd_submission_id"] = 1123
_client_payload["aicrowd_participant_id"] = 1234
# Instantiate a dummy context
_context = {}
# Instantiate an evaluator
aicrowd_evaluator = AIcrowdEvaluator(
ground_truth_segmentation_folder=ground_truth_segmentation_folder,
ground_truth_survival_file=ground_truth_survival_file,
bounding_boxes_file=bounding_boxes_file,
)
# Evaluate Survival
_client_payload[
"submission_file_path"] = ""
os.environ["TASK_ID"] = "2"
start = time.process_time()
result = aicrowd_evaluator._evaluate(_client_payload, _context)
print(f"Time to compute the sample for the survival"
f" task: {time.process_time() - start} [s]")
print(f"The c-index is {result['concordance_index']}")
if result["message"] is not "":
print(f"The message is:\n {result['message']}")
# Evaluate Segmentation
os.environ["TASK_ID"] = "1"
_client_payload[
"submission_file_path"] = ""
start = time.process_time()
result = aicrowd_evaluator._evaluate(_client_payload, _context)
print(f"Time to compute the sample for the segmentation"
f" task: {time.process_time() - start} [s]")
print(f"The results are:\n"
f" - average dice score {result['dice_score']}\n"
f" - median hausdorff distance {result['hausdorff_distance_95']}\n"
f" - average recall {result['recall']}\n"
f" - average precision {result['precision']}")
if result["message"] is not "":
print(f"The message is:\n {result['message']}")
| voreille/hecktor | src/aicrowd_evaluator/evaluator.py | evaluator.py | py | 11,240 | python | en | code | 65 | github-code | 36 |
33083611476 | # ISO-8859-1
import csv
from urllib import request # -> Abre um url
def read(url):
with request.urlopen(url) as arquivo:
print('Baixando o CSV...')
dados = arquivo.read().decode('latin1')
print('Download completo!')
for linha in csv.reader(dados.splitlines()):
print('{} | {}'.format(linha[8], linha[3]))
if __name__ == "__main__":
read(r'http://files.cod3r.com.br/curso-python/desafio-ibge.csv')
| sarandrade/Python-Courses | Python 3 - Curso Completo do Básico ao Avançado/Seção 09 - Manipulação de Arquivos/106. Desafio CSV do IBGE (resp).py | 106. Desafio CSV do IBGE (resp).py | py | 453 | python | pt | code | 0 | github-code | 36 |
33279344042 | import requests, json
BaseURL = 'https://paper-api.alpaca.markets'
OrdersURL = '{}/v2/orders'.format(BaseURL)
Headers = {"APCA-API-KEY-ID": "PKU31JDVN0AYRLMI1MEQ", "APCA-API-SECRET-KEY": "LVgw3y2RuffyDAMsjDR2EfscgsokGNTsuSEn3LUb"}
def create_order(symbol, qty, side, type, time_in_force):
data = {
'symbol': symbol,
'qty': qty,
'side': side,
'type': type,
'time_in_force': time_in_force
}
r = requests.post(OrdersURL, json=data, headers=Headers)
return json.loads(r.content)
| Jacob-Kenney/JagBot | 4.Buy stocks/Buystocks.py | Buystocks.py | py | 534 | python | en | code | 1 | github-code | 36 |
16185478017 | from .channel import MessageChannel
class MessageBroker():
_wss = {}
def __init__(self, config: dict, channel: MessageChannel):
self.config = config
self.channel = channel
self.channel.add_handler(self._handle_message)
async def _handle_message(self, user_id, message):
if user_id is None:
for ws in self._wss.values():
await ws.send(message)
else:
ws = self._wss.get(user_id)
if ws is not None:
await ws.send(message)
def register_ws(self, user_id, ws):
self._wss[user_id] = ws
def unregister_ws(self, user_id):
del self._wss[user_id]
async def send_user_message(self, user_id, message):
await self.channel.publish(user_id, message)
async def broadcast_message(self, message):
await self.channel.publish(None, message)
| jaggerwang/sanic-in-practice | weiguan/adapter/message/broker.py | broker.py | py | 897 | python | en | code | 42 | github-code | 36 |
2317503124 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from core.loss.multi_task import MultiTaskProxy
from core.loss.segment import SemanticSegmentation
from core.loss.matting import ImageMatting
from core.loss.grad import MattingGrad, ImageGradient
class JointMattingParsingLoss(nn.Module):
def __init__(self, phase):
super(JointMattingParsingLoss, self).__init__()
assert phase in ['pretrain', 'end2end']
print('============> Using join loss: {}'.format(phase))
self.phase = phase
self._multi_task_weight(phase)
self.segment_loss = SemanticSegmentation()
self.matting_loss = ImageMatting()
# self.gradient_loss = MattingGrad()
def _multi_task_weight(self, phase):
if phase == 'end2end':
self.proxy = MultiTaskProxy(num_task=2)
def forward(self, target, output):
segment_pr, matting_pr = output['segment'], output['matting']
segment_gt, matting_gt = target['segment'], target['matting']
loss_segment = self.segment_loss(segment_pr=segment_pr, segment_gt=segment_gt, weight=None)
if self.phase == 'pretrain':
loss = loss_segment
return loss
else:
loss_matting = self.matting_loss(matting_pr=matting_pr, matting_gt=matting_gt, segment_gt=segment_gt)
loss = self.proxy([loss_segment, loss_matting])
return loss, loss_segment, loss_matting
| xinyunmian/matting | core/loss/joint.py | joint.py | py | 1,456 | python | en | code | 0 | github-code | 36 |
20157623405 | from collections import namedtuple
import sys
class Accumulator:
def __init__(self, input):
self.accumulator = 0
self.executed_instructions = []
self.next_instruction = 1
self.instructions = input
self.stopping_instruction = max(self.instructions.keys()) + 1
self.stopping_instruction_reached = False
def check_ins(self, ins):
if ins == self.stopping_instruction:
self.stopping_instruction_reached = True
self.next_instruction = None
elif ins in self.executed_instructions:
self.next_instruction = None
else:
self.executed_instructions.append(ins)
def acc(self):
self.accumulator += self.instructions[self.next_instruction]["value"]
self.next_instruction += 1
def jmp(self):
self.next_instruction += self.instructions[self.next_instruction]["value"]
def nop(self):
self.next_instruction += 1
def process_instructions(self):
funct = {"acc": self.acc, "jmp": self.jmp, "nop": self.nop}
while self.next_instruction:
funct[self.instructions[self.next_instruction]["action"]]()
self.check_ins(self.next_instruction)
return self.accumulator
def debug_instructions(self):
for ins in self.instructions.keys():
reset_value = ""
if self.instructions[ins]["action"] == "acc":
continue
elif self.instructions[ins]["action"] == "jmp":
self.instructions[ins]["action"] = "nop"
reset_value = "jmp"
else:
self.instructions[ins]["action"] = "jmp"
reset_value = "nop"
self.process_instructions()
if self.stopping_instruction_reached:
return self.accumulator, ins
else:
self.next_instruction = 1
self.accumulator = 0
self.instructions[ins]["action"] = reset_value
self.executed_instructions = []
if __name__ == "__main__":
# Read in the input ... always called `input`
# Customize depending on the type of data structure required
# Day 08: Just read text in split into list of lines,
# and pass on to Accumulator Class
with open("input") as f:
input_dict = {
idx: {"action": inst.split()[0], "value": int(inst.split()[1])}
for idx, inst in enumerate(f.read().splitlines(), 1)
}
acc = Accumulator(input_dict)
print(f"Infinite Loop Detected. Accumulator Value: {acc.process_instructions()}")
print(f"Debugging Instructions ... ")
acc_val, debug_line = acc.debug_instructions()
print(f"Debugged line: {debug_line}")
print(f"Accumulator Value: {acc_val}")
| davidcolton/adventofcode | 2020/day_08/accumulator.py | accumulator.py | py | 2,828 | python | en | code | 0 | github-code | 36 |
6129429499 | import argparse
from src.gt_merger import constants
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--obaFile', type=str, required=True, help='Path to CSV file exported from OBA Firebase '
'Export App')
parser.add_argument('--gtFile', type=str, required=True, help='Path to XLSX file including the Ground Truth data')
parser.add_argument('--outputDir', type=str, default=constants.OUTPUT_DIR,
help='Path to directory where the merged data and log data will be output')
parser.add_argument('--minActivityDuration', type=float, default=constants.MIN_ACTIVITY_DURATION,
help='Minimum activity time span (minutes, default value = ' +
str(constants.MIN_ACTIVITY_DURATION) +
'), shorter activities will be dropped before merging.')
parser.add_argument('--minTripLength', type=int, default=constants.MIN_TRIP_LENGTH,
help='Minimum length distance (meters, default value ' + str(constants.MIN_TRIP_LENGTH) +
') for a trip. Shorter trips will be dropped before merging')
parser.add_argument('--tolerance', type=int, default=constants.TOLERANCE,
help='Maximum tolerated difference (milliseconds, default value ' + str(constants.TOLERANCE) +
') between matched ground truth data activity and OBA data activity')
parser.add_argument('--iterateOverTol', dest='iterateOverTol', action='store_true')
parser.add_argument('--no-iterateOverTol', dest='iterateOverTol', action='store_false')
parser.set_defaults(iterateOverTol=False)
parser.add_argument('--removeStillMode', dest='removeStillMode', action='store_true')
parser.add_argument('--no-removeStillMode', dest='removeStillMode', action='store_false')
parser.set_defaults(removeStillMode=True)
parser.add_argument('--mergeOneToOne', dest='mergeOneToOne', action='store_true')
parser.add_argument('--no-mergeOneToOne', dest='mergeOneToOne', action='store_false')
parser.set_defaults(mergeOneToOne=False)
parser.add_argument('--repeatGtRows', dest='repeatGtRows', action='store_true')
parser.add_argument('--no-repeatGtRows', dest='repeatGtRows', action='store_false')
parser.set_defaults(mergeOneToOne=False)
parser.add_argument('--deviceList', type=str, default="",
help='Path to txt file including white list of OBA devices to be used for match and merge')
args = parser.parse_args()
return args
| CUTR-at-USF/onebusaway-travel-behavior-analysis | src/gt_merger/args.py | args.py | py | 2,668 | python | en | code | 0 | github-code | 36 |
71789567783 | # Binary search algorithm built with python
from random import randint
from insertion_sort import *
# Returns position in array if found otherwise returns -1
def binary_search(arr, target):
bottom = 0
top = len(arr) - 1
while bottom <= top:
mid = (top + bottom) // 2
if arr[mid] == target:
return mid
elif arr[mid] > target:
top = mid - 1
else:
bottom = mid + 1
return -1
if __name__ == "__main__":
arr = []
target = randint(1, 20)
for i in range(10):
arr.append(randint(1, 20))
arr = insertion_sort(arr)
print(f"Array: {arr}")
print(f"Target: {target}")
print(f"Position found: {binary_search(arr, target)}") | alex-hunter3/standard-algorithms | py/binary_search.py | binary_search.py | py | 649 | python | en | code | 0 | github-code | 36 |
74477036264 | from flask import Flask
from flask import render_template, request, jsonify
from engine.inv_ind import get_inv_ind
from engine.retrieval import get_retrieved_docs
app = Flask(__name__)
@app.route('/')
def hello():
return render_template("default.html")
@app.route('/index', methods=["GET", "POST"])
def index_files():
if request.method == 'POST':
form_result = request.form
form_r = form_result.to_dict(flat=False)
print("form_r", form_r)
inv_ind, docs, stats = get_inv_ind(corpus=form_r["corpus"][0], do_stem=form_r["stem"][0], do_stop=form_r["sw"][0])
print(stats)
# return render_template("index.html", data=inv_ind, docs=docs, stats=stats, do_stem=form_r["stem"][0], corpus=form_r["corpus"][0])
return render_template("index.html", data=inv_ind, docs=docs, stats=stats, opts=form_r)
@app.route('/result', methods=["GET", "POST"])
def send_result():
if request.method == 'POST':
form_result = request.form
form_r = form_result.to_dict(flat=False)
print(form_r)
docs = get_retrieved_docs(form_r["query"][0], form_r["corpus"][0], form_r["stem"][0], form_r["sw"][0])
num_docs = len(docs)
if list(docs[0].keys())[0] == "null":
num_docs = 0
return render_template("display.html", docs=docs, query=form_r["query"][0], num_docs=num_docs)
if __name__ == '__main__':
# app.run(host='0.0.0.0')
app.run() | ashishu007/IR-Engine | main.py | main.py | py | 1,441 | python | en | code | 0 | github-code | 36 |
31748860310 | import numpy as np
from timeit import default_timer as timer
from numba import vectorize
@vectorize(['float32(float32, float32)'], target='cuda')
def gpu_pow(a, b):
return a ** b
@vectorize(['float32(float32, float32)'], target='parallel')
def cpu_para_pow(a, b):
return a ** b
def cpu_pow(a, b, c):
for i in range(a.size):
c[i] = a[i] ** b[i]
def cpu_test():
vec_size = 100000000
a = b = np.array(np.random.sample(vec_size), dtype=np.float32)
c = np.zeros(vec_size, dtype=np.float32)
start = timer()
cpu_pow(a, b, c)
duration = timer() - start
print(duration)
def cpu_para_test():
vec_size = 100000000
a = b = np.array(np.random.sample(vec_size), dtype=np.float32)
c = np.zeros(vec_size, dtype=np.float32)
start = timer()
c = cpu_para_pow(a, b)
duration = timer() - start
print(duration)
def gpu_test():
vec_size = 100000000
a = b = np.array(np.random.sample(vec_size), dtype=np.float32)
c = np.zeros(vec_size, dtype=np.float32)
start = timer()
c = gpu_pow(a, b)
duration = timer() - start
print(duration)
def main():
cpu_para_test()
cpu_test()
gpu_test()
if __name__ == '__main__':
main()
| Purdue-Academic-Projects/AI_Final_Project | heart_rate_ai/cuda_test/cuda_tutorial.py | cuda_tutorial.py | py | 1,237 | python | en | code | 0 | github-code | 36 |
20077173649 | from django.urls import resolve
from rest_framework import status
from rest_framework.reverse import reverse
from rest_framework.test import APITestCase, APIRequestFactory
from cars.models import Car, Manufacturer
from cars.serializers import CarGetSerializer
from cars.views import CarListCreateView
factory = APIRequestFactory()
class CarListViewTest(APITestCase):
def setUp(self) -> None:
self.view_object = CarListCreateView()
self.view = CarListCreateView.as_view()
self.url = reverse("cars:cars")
self.request = factory.get(self.url)
def test_url_revers(self):
found = resolve(self.url)
self.assertEqual(found.func.__name__, self.view.__name__)
self.assertEqual(self.url, "/cars/")
def test_empty_car_list(self):
cars = Car.objects.all()
serializer = CarGetSerializer(cars, many=True)
response = self.view(self.request)
response.render()
self.assertEqual(response.data, serializer.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_not_empty_car_list(self):
manufacturer = Manufacturer.objects.create(make="Ford")
Car.objects.create(manufacturer=manufacturer, model="Mustang")
Car.objects.create(manufacturer=manufacturer, model="F-150")
cars = self.view_object.get_queryset()
serializer = CarGetSerializer(cars, many=True)
response = self.view(self.request)
response.render()
self.assertEqual(response.data, serializer.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
| tomasz-rzesikowski/cars_API | cars/tests/tests_views/tests_car_list_view.py | tests_car_list_view.py | py | 1,620 | python | en | code | 0 | github-code | 36 |
71002340904 | """
40. O custo ao consumidor de um carro novo é a soma do custo de fábrica, da comissão do
distribuidor, e dos impostos. A comissão e os impostos são calculados sobre o custo de fábrica,
de acordo com a tabela abaixo. Leia o custo de fábrica e escreva o custo ao consumidor.
------------------------------------------------------------------------------
| Custo de fábrica | % do Distribuidor | % dos impostos |
| até R$ 12.000,00 | 5 | isento |
| entre R$ 12.000,00 e 25.000,00 | 10 | 15 |
| acima de R$ 25.000,00 | 15 | 20 |
"""
try:
fabrica = float(input('Insira o custo de fábrica: '))
if fabrica > 0:
if (fabrica > 0) and (fabrica <= 12000):
dis = fabrica + (fabrica * 5 / 100)
total = dis + fabrica
if (fabrica > 12000) and (fabrica < 25000):
dis = fabrica + (fabrica * 10 / 100)
imposto = fabrica + (fabrica * 15 / 100)
total = dis + imposto + fabrica
if fabrica > 25000:
dis = fabrica + (fabrica * 15 / 100)
imposto = fabrica + (fabrica * 20 / 100)
total = dis + imposto + fabrica
else:
print('Número negativo')
print(f'O custo para consumidor: {total}')
except ValueError:
print('ERRO!!!! Só pode ser digitado números.') | Kaiquenakao/Python | Estruturas Logicas e Condicionais/Exercicio40.py | Exercicio40.py | py | 1,463 | python | pt | code | 5 | github-code | 36 |
23215865575 | class TimeBlock2:
"""
Creates an object that represents the available block of time in which a court is available to play in
"""
def __init__(self, club_id, initial_time, final_time, court_name, match_duration, court_value, court_size):
self.club_id = club_id
self.initial_time = initial_time.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
self.final_time = final_time.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
self.court_name = court_name
self.match_duration = match_duration
self.court_value = court_value
self.court_size = court_size
| rbilbeny/PadelOk | mysite/time_block.py | time_block.py | py | 539 | python | en | code | 0 | github-code | 36 |
75273737705 | import tkinter
import pyperclip
sirka = 500
vyska = 450
data = []
size = []
c = tkinter.Canvas(width=sirka, height=vyska)
c.pack()
def copy():
cptxt = '('
for i in range(size[1]):
cptxt += '0b' + ''.join(str(e) for e in data[i]) + ','
if i == size[1] - 1:
cptxt = cptxt[:-1] + ')'
print(cptxt)
pyperclip.copy('ahojsvet')
def print_data():
for i in range(size[1]):
text = ''.join(str(e) for e in data[i])
c.create_text(350,15*i+100+i*15,fill='black',text='0b'+text,font='Timer 15')
def clear():
global data
# clear old table
c.delete('all')
# generate new table
columns, rows = size
data = [[y*0 for y in range(columns)] for _ in range(rows)]
table(*size)
print_data()
def clickon(event):
global data
x = event.x
y = event.y
if x <= 10 or x >= 260 or y <= 10 or y >= 410:
return False
tag_x = (x - 10) // 50
tag_y = (y - 10) // 50
data[tag_y][tag_x] = 1 - data[tag_y][tag_x]
# clear old table
c.delete('all')
# generate new table
table(*size)
print_data()
def table(columns, rows):
for ra in range(rows):
for co in range(columns):
color = 'darkblue'
if data[ra][co] == 1:
color = 'black'
c.create_rectangle(co*50+10, ra*50+10,co*50+60,ra*50+60, outline='white', tag='box', fill=color)
def new_screen(columns, rows):
# create table with data
global data, size
size = [columns, rows]
data = [[y*0 for y in range(columns)] for _ in range(rows)]
# genrate table
table(columns, rows)
# generate button for clear
clear_button = tkinter.Button(text='Clear',command=clear)
clear_button.place(x=300,y=420)
copy_button = tkinter.Button(text='Copy',command=copy)
copy_button.place(x=250,y=420)
print_data()
new_screen(5,8)
c.bind('<Button-1>', clickon)
| branislavblazek/notes | Python/projekty/char_creator.py | char_creator.py | py | 1,990 | python | en | code | 0 | github-code | 36 |
40461953299 | import os
import gspread
from oauth2client.service_account import ServiceAccountCredentials as SAC
from linebot import LineBotApi, WebhookParser
from linebot.models import MessageEvent, TextMessage, ImageSendMessage, URITemplateAction, TextSendMessage, TemplateSendMessage, ButtonsTemplate, MessageTemplateAction, CarouselColumn, CarouselContainer, CarouselTemplate, ImageCarouselColumn, ConfirmTemplate, MessageAction, ImageCarouselTemplate, LocationSendMessage
import requests
import bs4
import time
channel_access_token = "d7LmOXC9iutUkR//iJHfWy55z5oWmOkz2/Nx1FM34Y5LVfbTdwe+n/cCjcARAax+CD1oRkPGMKAWTKdi2P84uJSq5RBLYs4P4nwdMc9SkTtXK2oiKMyp6ch5XFsRFSOGNlOyH3iId6EPkKJnugb3+QdB04t89/1O/w1cDnyilFU=/bfDiSkEsrHaYtAS/fKH6vi9aMnwsM08hZJmg/xwPJVD="
styles = {'不限': "", '現代風' : "Modern", '簡約風':"Simplicity",
'飯店風':"Hotel", '奢華風':"Luxury", '休閒風':"Leisure",
'鄉村風':"Rustic", '混搭風':"Mashup", '日式':"Japanese",
'LOFT':"Industrial", '前衛風':"AvantGarde"}
styles_ind = ['不限', '現代風', '簡約風', '飯店風', '奢華風', '休閒風',
'鄉村風', '混搭風', '日式', 'LOFT', '前衛風']
def send_text_message(reply_token, text):
line_bot_api = LineBotApi(channel_access_token)
line_bot_api.reply_message(reply_token, TextSendMessage(text=text))
return "OK"
def show_search_style_or_category(reply_token):
line_bot_api = LineBotApi(channel_access_token)
Carousel_template = TemplateSendMessage(
alt_text='Carousel template',
template=CarouselTemplate(
columns=[
CarouselColumn(
thumbnail_image_url='https://i.imgur.com/GVWVqOO.png',
title='請選擇要查詢的風格或類別',
text=' ',
actions=[
MessageTemplateAction(
label='風格',
text='search'
),
MessageTemplateAction(
label="類別",
text="category"
)
]
)
]
)
)
line_bot_api.reply_message(reply_token,Carousel_template)
def show_category(reply_token):
line_bot_api = LineBotApi(channel_access_token)
Carousel_template = TemplateSendMessage(
alt_text='Image Carousel template',
template=ImageCarouselTemplate(
columns=[
ImageCarouselColumn(
image_url='https://i.imgur.com/EsGdyZi.png',
action=URITemplateAction(
label='大會議室',
uri='https://officesnapshots.com/photos/?filter_meeting-spaces=large-meeting-room'
)
),
ImageCarouselColumn(
image_url='https://i.imgur.com/lpdriMH.png',
action=URITemplateAction(
label='小會議室',
uri='https://officesnapshots.com/photos/?filter_meeting-spaces=small-meeting-room'
)
),
ImageCarouselColumn(
image_url='https://i.imgur.com/o8sd5bc.png',
action=URITemplateAction(
label='開放式工作站',
uri='https://officesnapshots.com/photos/?filter_work-spaces=open-office'
)
)
]
)
)
line_bot_api.reply_message(reply_token,Carousel_template)
def show_search(reply_token):
line_bot_api = LineBotApi(channel_access_token)
prefer = "請輸入想要的風格或編號:\n"
ind = 1
for key, value in styles.items():
if(ind != len(styles)):
prefer += f"{ind:2d}: {key}\n"
else:
prefer += f"{ind:2d}: {key}"
ind += 1
line_bot_api.reply_message(reply_token, TextSendMessage(text=prefer))
def show_start_search(reply_token, text):
line_bot_api = LineBotApi(channel_access_token)
url_end = '&pattern=Office&page=1'
if(text.isdigit() and (int(text) > 0 and int(text) <= len(styles_ind))):
url = 'https://decotv.com.tw/gallery?works=' + styles[styles_ind[int(text)-1]] + url_end
elif(not text.isdigit() and text in styles.keys()):
url = 'https://decotv.com.tw/gallery?works=' + styles[text] + url_end
else:
line_bot_api.reply_message(reply_token,TextSendMessage(text="輸入錯誤,請重新輸入"))
# , headers={"user-agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36"}
response = requests.get(url)
html = bs4.BeautifulSoup(response.text, 'html.parser')
links = html.find_all("img", {"class": "bgimg"})
links2 = html.find_all("div", {"class": "frameitemin caseclicknew btn"})
time.sleep(3)
popup_url_link = []
img_urls = []
popup_url = 'https://decotv.com.tw/works_thisid,'
popup_url_mid = '_thispic,'
links_length = 5 if len(links) > 5 else len(links)
for i in range(links_length):
img_urls.append('https://decotv.com.tw/' + links[i]['src'])
popup_url_link.append(popup_url + links2[i].get("data-id") + popup_url_mid + links2[i].get("data-pic"))
imagecarouselcolumns = []
for i in range(links_length):
imagecarouselcolumns.append(
ImageCarouselColumn(
image_url=img_urls[i],
action=URITemplateAction(label=str(i+1), uri=popup_url_link[i])))
if(len(links) > 5):
imagecarouselcolumns.append(
ImageCarouselColumn(
image_url="https://i.imgur.com/4CIrAa9.png",
action=URITemplateAction(label="查看更多", uri=url)))
imagecarouselcolumns.append(
ImageCarouselColumn(
image_url="https://i.imgur.com/PoXcTmZ.png",
action=MessageTemplateAction(label="重新查詢", text="重新查詢")))
imagecarouselcolumns.append(
ImageCarouselColumn(
image_url='https://i.imgur.com/iuDuTbt.png?1',
action=MessageTemplateAction(label='返回主選單', text='main menu')))
Carousel_template = TemplateSendMessage(
alt_text='Image Carousel template',
template=ImageCarouselTemplate(
columns=imagecarouselcolumns))
line_bot_api.reply_message(reply_token,Carousel_template)
def show_enter_menu(reply_token):
line_bot_api = LineBotApi(channel_access_token)
line_bot_api.reply_message(reply_token, TextSendMessage(text="請從選單中選擇您要的服務項目"))
def show_main_menu(reply_token):
line_bot_api = LineBotApi(channel_access_token)
Carousel_template = TemplateSendMessage(
alt_text='Carousel template',
template=CarouselTemplate(
columns=[
CarouselColumn(
thumbnail_image_url='https://i.imgur.com/dXfoAvK.jpg',
title='主選單',
text='請選擇想要的服務項目',
actions=[
MessageTemplateAction(
label='查詢家具目錄&圖片',
text='contents and images'
),
MessageTemplateAction(
label='保養方法',
text="maintenance method"
),
MessageTemplateAction(
label='聯絡我們',
text="contact us"
)
]
)
]
)
)
line_bot_api.reply_message(reply_token,Carousel_template)
def show_maintenance_method(reply_token):
line_bot_api = LineBotApi(channel_access_token)
line_bot_api.reply_message(reply_token,ImageSendMessage(
original_content_url='https://i.imgur.com/ITshKAM.png',
preview_image_url='https://i.imgur.com/ITshKAM.png'))
def show_FSM(reply_token):
line_bot_api = LineBotApi(channel_access_token)
line_bot_api.reply_message(reply_token,ImageSendMessage(
original_content_url="https://i.imgur.com/rR8CR8W.png",
preview_image_url="https://i.imgur.com/rR8CR8W.png"))
def show_contact_us(reply_token):
line_bot_api = LineBotApi(channel_access_token)
Carousel_template = TemplateSendMessage(
alt_text='Carousel template',
template=CarouselTemplate(
columns=[
CarouselColumn(
thumbnail_image_url='https://i.imgur.com/XjDtpGl.png',
title='聯絡我們',
text=' ',
actions=[
MessageTemplateAction(
label='地址',
text='address'
),
MessageTemplateAction(
label="聯絡電話",
text="contact number"
)
]
)
]
)
)
line_bot_api.reply_message(reply_token,Carousel_template)
def show_address(reply_token):
line_bot_api = LineBotApi(channel_access_token)
line_bot_api.reply_message(reply_token, LocationSendMessage(title="地址", address="台北市內湖區成功路四段188號14樓之11", latitude=25.08414356007363, longitude=121.59439182744914))
def show_contact_number(reply_token):
line_bot_api = LineBotApi(channel_access_token)
line_bot_api.reply_message(reply_token, TextSendMessage(text="聯絡電話:(02)2794-2268"))
def show_search_contents_and_images(reply_token):
line_bot_api = LineBotApi(channel_access_token)
Carousel_template = TemplateSendMessage(
alt_text='Carousel template',
template=CarouselTemplate(
columns=[
CarouselColumn(
thumbnail_image_url='https://i.imgur.com/WSh5g9U.jpg',
title='家具目錄&圖片',
text='請選擇想要的家具目錄或圖片',
actions=[
MessageTemplateAction(
label='目錄',
text='contents'
),
MessageTemplateAction(
label='辦公桌',
text='office tables'
),
MessageTemplateAction(
label='辦公椅&沙發',
text='office chairs and sofas'
)
]
)
]
)
)
line_bot_api.reply_message(reply_token,Carousel_template)
def show_office_chairs(reply_token):
line_bot_api = LineBotApi(channel_access_token)
Carousel_template = TemplateSendMessage(
alt_text='Image Carousel template',
template=ImageCarouselTemplate(
columns=[
ImageCarouselColumn(
image_url='https://i.imgur.com/vA4AV0k.jpg',
action=URITemplateAction(
label='辦公椅(SD)',
uri="https://drive.google.com/drive/folders/1zb0oE92j4H7nwSjnREH1qO9gctXaAO45"
)
),
ImageCarouselColumn(
image_url='https://i.imgur.com/hKw6Hfw.jpg',
action=URITemplateAction(
label='造型椅(LG)',
uri='https://drive.google.com/drive/folders/1VvAbvKri-wz1mswbvyQ4eB-uv6jhdSP1'
)
),
ImageCarouselColumn(
image_url='https://i.imgur.com/Calzg5r.png',
action= URITemplateAction(
label='沙發',
uri='https://drive.google.com/drive/folders/12kw46rchbYTRjybj2BH0pGglCM8t5KnE'
)
)
]
)
)
line_bot_api.reply_message(reply_token,Carousel_template)
def show_office_tables(reply_token):
line_bot_api = LineBotApi(channel_access_token)
Carousel_template = TemplateSendMessage(
alt_text='Image Carousel template',
template=ImageCarouselTemplate(
columns=[
ImageCarouselColumn(
image_url='https://i.imgur.com/C1qXu2a.jpg',
action=URITemplateAction(
label='獨立桌(LG)',
uri='https://drive.google.com/drive/folders/1PohqcyoW1TPUdDfVoGP8Tu0bKorexUtp'
)
),
ImageCarouselColumn(
image_url='https://i.imgur.com/emrkJns.jpg',
action=URITemplateAction(
label='獨立桌(KT)',
uri="https://drive.google.com/drive/folders/1_ds45brlPQq5WK5cyIwZsgAe_GSFVcQQ"
)
),
ImageCarouselColumn(
image_url='https://i.imgur.com/pQFaPYe.jpg',
action=URITemplateAction(
label='獨立桌(OS)',
uri='https://drive.google.com/drive/folders/1KcZU87EBVlUiShEQhuvQS0Fa-LNnyxQ4'
)
),
ImageCarouselColumn(
image_url='https://i.imgur.com/Kp9l3J9.jpg',
action=URITemplateAction(
label='升降桌(LG)',
uri="https://drive.google.com/drive/folders/1iTJy6aX9tVHDeJrVZ6mjQZJW7WgoEdZe"
)
),
ImageCarouselColumn(
image_url='https://i.imgur.com/5ok0DxE.jpg',
action=URITemplateAction(
label='主管桌(DS)',
uri="https://drive.google.com/drive/folders/1Zp0vS6zQdBHcKK2ReqP6lHOZQ0hJ3jxl"
)
)
]
)
)
line_bot_api.reply_message(reply_token,Carousel_template)
def show_contents(reply_token):
line_bot_api = LineBotApi(channel_access_token)
Carousel_template = TemplateSendMessage(
alt_text='Image Carousel template',
template=ImageCarouselTemplate(
columns=[
ImageCarouselColumn(
image_url='https://i.imgur.com/cAuLdQJ.png',
action=URITemplateAction(
label='主管桌目錄',
uri='https://drive.google.com/drive/folders/1JKvSRxe4ynifQpUz0yLxu9bJSGAzC5xt'
)
),
ImageCarouselColumn(
image_url='https://i.imgur.com/O0Fe27J.png',
action=URITemplateAction(
label='家具綜合目錄',
uri="https://drive.google.com/drive/folders/1-0X1bsMc8DVgJxMlvSrrsX0P8NIqVrLl"
)
),
ImageCarouselColumn(
image_url='https://i.imgur.com/V3FdBxP.png',
action=URITemplateAction(
label='獨立桌目錄',
uri='https://drive.google.com/drive/folders/1DFXEHQA9nILGK9TCCW2pW4bTHAAk3Me5'
)
),
ImageCarouselColumn(
image_url='https://i.imgur.com/W1UMAsj.png',
action=URITemplateAction(
label='辦公椅目錄',
uri="https://drive.google.com/drive/folders/11tUf4GMAW2jtIEdQmnPT25gaiTuJtRSo"
)
)
]
)
)
line_bot_api.reply_message(reply_token,Carousel_template) | JasperLin0118/linebot | utils.py | utils.py | py | 15,459 | python | en | code | 0 | github-code | 36 |
28779713791 | """
Exercism Python track
Source: https://exercism.org/tracks/python
My solutions: https://github.com/egalli64/pythonesque/exercism
Card Games https://exercism.org/tracks/python/exercises/card-games
Functions for tracking poker hands and assorted card tasks.
"""
import unittest
from lists import *
class TestCardGames(unittest.TestCase):
def test_given_get_rounds(self):
self.assertEqual(get_rounds(27), [27, 28, 29])
def test_given_concatenate_rounds(self):
self.assertEqual(concatenate_rounds(
[27, 28, 29], [35, 36]), [27, 28, 29, 35, 36])
def test_given_list_contains_round(self):
round = [27, 28, 29, 35, 36]
targets = [29, 30]
expected_results = [True, False]
for (target, expected) in zip(targets, expected_results):
with self.subTest(input_data=target, output_data=expected):
actual = list_contains_round(round, target)
self.assertEqual(actual, expected)
def test_given_card_average(self):
self.assertEqual(card_average([5, 6, 7]), 6.0)
def test_given_approx_average_is_average(self):
rounds = [[1, 2, 3], [2, 3, 4, 8, 8], [1, 2, 3, 5, 9]]
expected_results = [True, True, False]
for (round, expected) in zip(rounds, expected_results):
with self.subTest(input_data=round, output_data=expected):
actual = approx_average_is_average(round)
self.assertEqual(actual, expected)
def test_given_average_even_is_average_odd(self):
rounds = [[1, 2, 3], [1, 2, 3, 4]]
expected_results = [True, False]
for (round, expected) in zip(rounds, expected_results):
with self.subTest(input_data=round, output_data=expected):
actual = average_even_is_average_odd(round)
self.assertEqual(actual, expected)
def test_given_maybe_double_last(self):
rounds = [[5, 9, 11], [5, 9, 10]]
expected_results = [[5, 9, 22], [5, 9, 10]]
for (round, expected) in zip(rounds, expected_results):
with self.subTest(input_data=round, output_data=expected):
actual = maybe_double_last(round)
self.assertEqual(actual, expected)
if __name__ == '__main__':
unittest.main()
| egalli64/pythonesque | exercism/lists_test.py | lists_test.py | py | 2,302 | python | en | code | 17 | github-code | 36 |
16644155233 | c = 100
w = [12, 9, 56, 78, 55, 23]
# 按重量从小到大排序
w.sort()
# 贪心选择策略:每次选择最轻的集装箱装上船
count = 0
for i in range(len(w)):
if c >= w[i]:
c -= w[i]
count += 1
else:
break
print(count) | AanKn/School_work | 算法设计/最优装载问题.py | 最优装载问题.py | py | 281 | python | en | code | 0 | github-code | 36 |
15293966281 | # -*- coding:utf-8 -*-
import os
import sys
import time
import math
import threading
import json
import random
import logging
from apscheduler.schedulers.background import BackgroundScheduler
sys.path.append('./lib')
import mylog
mylog.setLog('KD48Monitor', logging.WARNING)
loggerInfo = logging.getLogger('mylogger')
loggerInfo.setLevel(logging.INFO)
fh = logging.FileHandler('./log/info.log')
formatter = logging.Formatter('%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s')
fh.setFormatter(formatter)
loggerInfo.addHandler(fh)
from utility import *
from KD48API import KD48API
from longQQMsg import LongQQMsg
from MsgCounter import MsgCounter
from cqsdk import CQBot, CQAt, CQRecord, RcvdPrivateMessage, RcvdGroupMessage, \
GroupMemberIncrease, GroupMemberDecrease
import utils
pid = os.getpid()
qqbot = CQBot(11235)
longQQMsg = LongQQMsg()
def killmyself():
killProcess(pid)
def SendDebugMsgs(debugQQ, t):
PrintLog(t)
if debugQQ:
utils.SendPrivateMsg(qqbot, str(debugQQ), t)
def PrintLog(text):
currTimeStr = Time2ISOString(time.time())
print(currTimeStr, gbkIgnore(text))
logging.info('PrintLog ' + text)
loggerInfo.info('PrintLog ' + text)
class KD48Monitor(object):
def __init__(self, accountInfo, monitorInfo):
# 登录信息
self.account = accountInfo['userId']
self.password = accountInfo['password']
self.token = '0'
# debugQQ: 接收错误信息
self.debugQQ = accountInfo['debugQQ']
# 接收消息QQ设置
# QQGroups_pro: 转发所有消息
# QQGroups_lite: 只提示房间出现,不转发消息
# QQIds: 私聊转发所有消息
self.QQIds = monitorInfo['QQIds']
self.QQGroups_pro = monitorInfo['QQGroups_pro']
if 'QQGroups_lite' in list(monitorInfo.keys()):
self.QQGroups_lite = monitorInfo['QQGroups_lite']
else:
self.QQGroups_lite = []
self.QQGroups_all = list(set(self.QQGroups_pro).union(set(self.QQGroups_lite)))
self.sendToLite = monitorInfo['sendToLite']
# 被监控成员的信息
self.memberId = monitorInfo['memberId']
self.memberName = ''
self.roomId = None
self.groupId = 0
self.roomInfo = {}
self.roomInfoOld = {}
self.beginHot = 0
# CoolQ
self.CoolQRoot = monitorInfo['CoolQRoot']
# self.CoolQImageDir = os.path.join(self.CoolQRoot, 'data', 'image')
# room msg alert
self.isappeared = False
self.timegap = 1800 #second
self.lastPrintTime = time.time()
self.msgCounter = MsgCounter(self.memberId)
self.lastOtherMemberTime = 0 #上次在房间出现其他成员的时间
if 'refreshInterval' in monitorInfo:
self.refreshInterval = monitorInfo['refreshInterval']
else:
self.refreshInterval = 15
# 更新房间信息
def updateRoomInfo(self):
response = self.api.getRoomInfo(self.token, self.memberId)
if response['status'] != -1:
self.roomInfo = response['data']
return 1
else:
return -1
# 打印房间信息
def printRoomInfo(self):
self.updateRoomInfo()
currHot = self.getRoomHot()
info = ''
if self.roomInfo:
info += self.roomInfo['memberName'] + '的房间:' + '\n'
info += '房间名称:' + self.roomInfo['roomName'] + '\n'
info += '房间话题:' + self.roomInfo['topic'] + '\n'
info += '房间心情:' + self.roomInfo['moodName'] + '\n'
info += '房间热度:' + str(currHot) + '\n'
# info += '最后发言时间:' + self.roomInfo['lastCommentTime'] + '\n'
info += '房间头像:' + self.roomInfo['roomAvatar'] + '\n'
info += '房间背景:' + self.roomInfo['bgPath']
else:
info += '当前房间为空'
return info
def checkRoom(self):
self.updateRoomInfo()
monitorDicts = {'roomName' : '房间名称',
'topic' : '房间话题',
'moodName' : '房间心情',
'roomAvatar': '房间头像',
'bgPath' : '房间背景'
}
modifiedKeys = []
response = ''
for key in list(monitorDicts.keys()):
if self.roomInfo[key] != self.roomInfoOld[key]:
modifiedKeys.append(key)
if modifiedKeys:
response = self.memberName + '修改了房间信息'
for key in modifiedKeys:
response += '\n修改了' + monitorDicts[key] + ':' + self.roomInfo[key]
self.roomInfoOld = self.roomInfo
saveJson(self.roomInfoOld, 'config/roomInfo.json')
if response:
SendDebugMsgs(self.debugQQ, response)
def getRoomHot(self):
'''获取当前成员的房间热度
'''
page = 1
while True:
result = self.api.getHotRooms(self.token, page=page,
groupId=self.groupId)
hotRooms = result['data']
if self.memberId in hotRooms:
return hotRooms[self.memberId]['hot']
else:
page += 1
if not hotRooms:
return -1
def initKD(self):
self.api = KD48API()
# 登录获取token
interval = 10
loginSucc = False
loginCnt = 0
while not loginSucc:
response = self.api.login(self.account, self.password)
if response['status'] != -1:
self.token = response['token']
loginSucc = True
log = response['msg']
PrintLog(log)
else:
loginCnt += 1
log = response['msg']
PrintLog(log)
PrintLog('%d秒钟后重试...'%interval)
time.sleep(interval)
if loginCnt >= 10:
PrintLog('登录失败!请重新启动。')
os.system('pause')
sys.exit()
# 根据成员ID获取房间其他信息
interval = 10
rinfoSucc = False
rinfoCnt = 0
while not rinfoSucc:
response = self.api.getRoomInfo(self.token, self.memberId)
if response['status'] != -1:
self.roomInfo = response['data']
self.roomId = response['data']['roomId']
self.memberName = response['data']['memberName']
self.groupId = response['data']['groupId']
rinfoSucc = True
log = response['msg']
PrintLog(log)
else:
rinfoCnt += 1
log = response['msg']
PrintLog(log)
PrintLog('%d秒钟后重试...'%interval)
time.sleep(interval)
if rinfoCnt >= 10:
PrintLog('获取房间信息失败!请重新启动。')
os.system('pause')
sys.exit()
# 初始化
self.msgLastTime = 0
self.oldLiveIds = []
self.oldReviewIds = []
response = self.api.getRoomMsgs(self.token, self.roomId)
if response['status'] != -1:
messages = response['data']
self.msgLastTime = messages[0]['msgTime']
PrintLog('初始化房间成功')
else:
log = response['msg']
PrintLog(log)
os.system('pause')
sys.exit()
response = self.api.getLiveList(self.token, memberId=self.memberId, limit=30)
if response['status'] != -1:
liveList = response['liveList']
reviewList = response['reviewList']
if liveList:
for live in reversed(liveList):
self.oldLiveIds.append(live['liveId'])
if reviewList:
for review in reversed(reviewList):
self.oldReviewIds.append(review['liveId'])
PrintLog('初始化直播成功')
else:
log = response['msg']
PrintLog(log)
os.system('pause')
sys.exit()
# 房间信息初始化
self.roomInfoPath = 'config/roomInfo.json'
if not os.path.exists(self.roomInfoPath):
saveJson(self.roomInfo, self.roomInfoPath)
self.roomInfoOld = self.roomInfo
else:
self.roomInfoOld = loadJson(self.roomInfoPath)
def initial(self):
try:
self.initKD()
except Exception as e:
logging.exception(e)
PrintLog('口袋监控初始化失败!请重新启动程序。')
os.system('pause')
sys.exit()
def roomMonitor(self):
try:
messages = []
response = self.api.getRoomMsgs(self.token, self.roomId)
if response['status'] != -1:
messages = response['data']
for msg in reversed(messages):
if msg['msgTime'] <= self.msgLastTime:
continue
msgInfo = self.api.analyzeMsg(msg, self.CoolQRoot)
if msgInfo['ignore']:
continue
# 其他成员发消息
if msgInfo['senderId'] != self.memberId and msgInfo['senderId'] > 0:
# 半小时内只播报一次 TODO: 对不同成员分别计时
if time.time()-self.lastOtherMemberTime > 1800:
self.lastOtherMemberTime = time.time()
# log = '%s在%s口袋房间出现了!'%(
# msgInfo['senderName'], self.memberName)
log_lite = '其他成员在%s口袋房间出现了!快去看看是谁!'%(self.memberName)
utils.SendGroupsMsg(qqbot, self.QQGroups_lite, log_lite)
log_pro = '其他成员在%s口袋房间出现了!'%(self.memberName)
utils.SendPrivatesMsg(qqbot, self.QQIds, log_pro)
utils.SendGroupsMsg(qqbot, self.QQGroups_pro, log_pro)
log = msgInfo['printText'].strip() + '\n来自%s口袋房间'%(self.memberName)
# 其他成员消息只pro版本转发
if msgInfo['msgType'] == 2:
# 语音消息特殊处理
utils.SendRecordMsg(qqbot, log, QQGroups=self.QQGroups_pro, QQIds=self.QQIds)
else:
utils.SendPrivatesMsg(qqbot, self.QQIds, log)
utils.SendGroupsMsg(qqbot, self.QQGroups_pro, log)
elif msgInfo['senderId'] == self.memberId: # 房间拥有者发消息
# 通知判定,半小时为临界点
if self.isappeared == False:
self.isappeared = True
log_lite = (self.memberName + '在口袋房间出现了!大家快去调戏互动啦!'
'(具体消息暂停搬运,请大家移步口袋房间)')
utils.SendGroupsMsg(qqbot, self.QQGroups_lite, log_lite)
log_pro = (self.memberName + '在口袋房间出现了!大家快去调戏互动啦!')
utils.SendPrivatesMsg(qqbot, self.QQIds, log_pro)
utils.SendGroupsMsg(qqbot, self.QQGroups_pro, log_pro)
self.beginHot = self.getRoomHot()
# 留言统计
self.cmtStat = {}
self.cmtLastTime = int(time.time()*1000)
self.scheduler.add_job(self.roomCommentMonitor, 'interval', seconds=8,
id='roomCommentMonitor', coalesce=True, max_instances=1)
time.sleep(1)
##### 转发消息 #####
log = msgInfo['printText'].strip()
##### pro版本:全部转发 #####
if msgInfo['msgType'] == 2:
# 语音消息特殊处理
utils.SendRecordMsg(qqbot, log, QQGroups=self.QQGroups_pro, QQIds=self.QQIds)
else:
utils.SendPrivatesMsg(qqbot, self.QQIds, log)
utils.SendGroupsMsg(qqbot, self.QQGroups_pro, log)
##### lite版本:根据自定义转发 #####
if msgInfo['msgType'] == 0 and self.sendToLite['text']:
# 文字消息
utils.SendGroupsMsg(qqbot, self.QQGroups_lite, log)
if msgInfo['messageObject'] == 'faipaiText' and self.sendToLite['fanpai'] \
and not self.sendToLite['text']:
# 翻牌消息
utils.SendGroupsMsg(qqbot, self.QQGroups_lite, log)
if msgInfo['msgType'] == 1 and self.sendToLite['image']:
# 图片消息
utils.SendGroupsMsg(qqbot, self.QQGroups_lite, log)
if msgInfo['msgType'] == 2 and self.sendToLite['audio']:
# 语音消息
utils.SendRecordMsg(qqbot, log, QQGroups=self.QQGroups_lite)
if msgInfo['msgType'] == 3 and self.sendToLite['video']:
# 视频消息
utils.SendGroupsMsg(qqbot, self.QQGroups_lite, log)
self.msgCounter.counter(msgInfo)
self.lastPrintTime = time.time()
# 下载非文字消息
try:
download_thread = threading.Thread(
target=self.api.downloadMsg, args=(msgInfo,), daemon=True)
download_thread.start()
except Exception as e:
SendDebugMsgs(self.debugQQ, '多线程下载错误!')
logging.exception(e)
time.sleep(1)
else:
pass
self.msgLastTime = msgInfo['msgTime']
# if messages:
# self.msgLastTime = messages[0]['msgTime'] # msgInfo['msgTime']
# 消息统计
if time.time() - self.lastPrintTime > self.timegap and self.isappeared == True:
self.isappeared = False
log = self.memberName + '从房间消失了半个小时了......\n'
log += self.msgCounter.info()
self.msgCounter.reset()
deltaHot = self.getRoomHot() - self.beginHot
log += "\n房间热度增加了:%d"%deltaHot
utils.SendPrivatesMsg(qqbot, self.QQIds, log.strip())
utils.SendGroupsMsg(qqbot, self.QQGroups_all, log.strip())
# 留言统计
sortedCmt = [self.cmtStat[y] for y in sorted(self.cmtStat,
key=lambda x:self.cmtStat[x]['count'], reverse=True)]
log = '留言统计前10名:\n'
# log += str(sortedCmt) + '\n' # save to file
log += str(sortedCmt[0:10]) + '\n'
log += '留言人数:%d人'%len(self.cmtStat)
utils.SendPrivatesMsg(qqbot, self.QQIds, log.strip())
self.scheduler.remove_job('roomCommentMonitor')
except Exception as e:
SendDebugMsgs(self.debugQQ, '房间消息解析错误!可能跳过了消息!')
logging.exception(e)
# 如果出错,则跳过这几条消息
self.msgLastTime = messages[0]['msgTime']
def roomCommentMonitor(self):
try:
comments = []
response = self.api.getRoomComments(self.token, self.roomId, limit=50)
if response['status'] != -1:
comments = response['data']
for cmt in reversed(comments):
if cmt['msgTime'] <= self.cmtLastTime:
continue
# msgInfo = self.api.analyzeMsg(cmt, self.CoolQRoot)
extInfo = json.loads(cmt['extInfo'])
senderId = extInfo['senderId']
if senderId not in self.cmtStat:
self.cmtStat[senderId] = {'count':1, 'name':extInfo['senderName']}
else:
self.cmtStat[senderId]['count'] += 1
self.cmtLastTime = cmt['msgTime']
except Exception as e:
SendDebugMsgs(self.debugQQ, '房间留言监控错误')
logging.exception(e)
def liveMonitor(self):
try:
liveList = []
reviewList = []
response = self.api.getLiveList(self.token, memberId=self.memberId)
if response['status'] != -1:
liveList = response['liveList']
reviewList = response['reviewList']
for live in reversed(liveList):
if live['liveId'] not in self.oldLiveIds:
self.oldLiveIds.append(live['liveId'])
if live['memberId'] == self.memberId:
liveInfo = self.api.getLiveInfo(live, isLive=True)
log = live['title'] + "开始直播了!\n"
log += liveInfo['printText']
utils.SendPrivatesMsg(qqbot, self.QQIds, log.strip())
utils.SendGroupsMsg(qqbot, self.QQGroups_all, log.strip())
secret = "直播封面图:" + liveInfo['picPath'] + "\n"
secret += "弹幕文件:" + liveInfo['lrcPath'] + "\n"
secret += "直播源:" + liveInfo['streamPath']
SendDebugMsgs(self.debugQQ, secret.strip())
if not liveList and response['status'] != -1:
del self.oldLiveIds[:]
for review in reversed(reviewList):
if review['liveId'] not in self.oldReviewIds:
if review['liveId'] in self.oldLiveIds:
self.oldLiveIds.remove(review['liveId'])
self.oldReviewIds.append(review['liveId'])
# self.oldReviewIds.pop(0)
if review['memberId'] == self.memberId:
liveInfo = self.api.getLiveInfo(review, isLive=False)
log = review['title'] + "的最新直播回放已出!\n"
log += liveInfo['printText']
utils.SendPrivatesMsg(qqbot, self.QQIds, log.strip())
utils.SendGroupsMsg(qqbot, self.QQGroups_all, log.strip())
except Exception as e:
SendDebugMsgs(self.debugQQ, '直播消息解析错误!')
logging.exception(e)
def run(self):
PrintLog('正在启动口袋监控...')
self.scheduler = BackgroundScheduler()
self.scheduler.add_job(self.roomMonitor, 'interval', seconds=self.refreshInterval,
id='roomMonitor', coalesce=True, max_instances=1)
time.sleep(5)
self.scheduler.add_job(self.liveMonitor, 'interval', seconds=self.refreshInterval,
id='liveMonitor', coalesce=True, max_instances=1)
time.sleep(3)
self.scheduler.add_job(self.checkRoom, 'interval', seconds=30, id='checkroom',
coalesce=True, max_instances=1)
self.scheduler.start()
PrintLog('所有监控器启动完毕')
##### 群管理设置 #####
from config import KD_admins
group_admins = KD_admins.admins['Group']
private_admins = KD_admins.admins['Private']
QQGroups = KD_admins.QQGroups
QQGroups_lite = KD_admins.QQGroups_lite
adminQQ = KD_admins.adminQQ
welcomeGroups = KD_admins.welcomeGroups
# 每个group分别有个lasttime
# 'lastTime': {'group1':0, 'group2':0}
# level: 0:全体禁止 1:管理员命令 2:普通成员命令
groupCmdAuthority = {"房间信息": {'level': 1, 'lastTime': {}},
"直播回放": {'level': 1, 'lastTime': {}},
"集资链接": {'level': 2, 'lastTime': {}},
"更新集资链接": {'level': 1, 'lastTime': {}},
"补档列表": {'level': 1, 'lastTime': {}},
"房间消息回放": {'level': 1, 'lastTime': {}},
}
############ 自动回复消息设置 #############
def ReplyHandler(msg):
global groupCmdAuthority
result = ''
try:
if msg == "命令列表":
result += '口袋命令列表:\n'
for comm in sorted(groupCmdAuthority):
result += comm + '\n'
result = result.strip()
if msg == "房间信息":
result = monitor.printRoomInfo()
if msg == "直播回放":
response = monitor.api.getLiveList(monitor.token, memberId=monitor.memberId)
if response['status'] != -1:
reviewList = response['reviewList']
review = reviewList[0]
reviewInfo = monitor.api.getLiveInfo(review, isLive=False)
result = monitor.memberName + "最近的一次直播是:\n"
result += reviewInfo['printText']
else:
result = '发生错误,请重试!'
if msg.split()[0] == "房间消息回放":
limit = 1
if len(msg.split()) == 1:
limit = 1
elif len(msg.split()) == 2:
limit = int(msg.split()[1])
else:
return '参数错误'
response = monitor.api.getRoomMsgs(monitor.token, monitor.roomId, limit=limit)
if response['status'] != -1:
messages = response['data']
result = monitor.memberName + "房间消息回放:\n"
for msg in reversed(messages):
msgInfo = monitor.api.analyzeMsg(msg)
result += msgInfo['printText'] + '\n\n'
else:
result = '发生错误,请重试!'
if "淘宝链接" in msg or "集资链接" in msg:
data = loadJson('config/KD_data.json')
result = data['moneylink']
if msg.split()[0] == '更新集资链接':
txt = msg.lstrip('更新集资链接').strip()
data = loadJson('config/KD_data.json')
data['moneylink'] = txt
saveJson(data, 'config/KD_data.json')
result = '成功更新集资链接,回复【集资链接】查看内容'
if msg == "补档列表":
result = longQQMsg.videoList
except Exception as e:
logging.exception(e)
finally:
return result.strip()
# 处理群消息
@qqbot.listener((RcvdGroupMessage,))
def ReplyGroupMsg(message):
if message.text.strip() == "":
return
global groupCmdAuthority
global group_admins
currQQLevel = 100
result = ''
if message.group in group_admins:
if message.qq in group_admins[message.group]:
currQQLevel = 1
else:
currQQLevel = 2
if 'all' in group_admins[message.group]:
currQQLevel = 1
currCommand = message.text.split()[0]
if currCommand in groupCmdAuthority:
level = groupCmdAuthority[currCommand]['level']
lastTimeDict = groupCmdAuthority[currCommand]['lastTime']
if message.group not in lastTimeDict:
lastTimeDict[message.group] = 0
lastTime = 0
else:
lastTime = lastTimeDict[message.group]
# 命令冷却时间300秒
if currQQLevel <= level and time.time() - lastTime >= 300:
result = ReplyHandler(message.text)
lastTimeDict[message.group] = time.time()
if result:
msg = "{text}\n{qq}".format(text=result, qq=CQAt(message.qq))
utils.reply(qqbot, message, msg)
# 处理私聊消息
@qqbot.listener((RcvdPrivateMessage,))
def ReplyRrivateMsg(message):
if message.text.strip() == "":
return
global private_admins
result = ''
if message.qq in private_admins:
result = ReplyHandler(message.text)
# 强制关闭
if message.text == '强制关闭口袋监控':
utils.reply(qqbot, message, '你确定要强制关闭吗?\n'
'若确定,请回复“我确定强制关闭口袋监控”')
if message.text == '我确定强制关闭口袋监控':
try:
killmyself()
utils.reply(qqbot, message, '关闭成功')
except Exception as e:
utils.reply(qqbot, message, '关闭失败')
if result:
utils.reply(qqbot, message, result)
##### 新人加群 #####
@qqbot.listener((GroupMemberIncrease))
def Welcome(message):
# QQ群自动欢迎,并私聊安利信息
if message.group in welcomeGroups:
try:
text = longQQMsg.welcome
wel = "欢迎新成员 {qq} \n{text}".format(qq=CQAt(message.operatedQQ), text=text)
time.sleep(0.5)
utils.SendGroupMsg(qqbot, message.group, wel)
time.sleep(3)
textPrivate = "{qq} {msg}".format(qq=CQAt(message.operatedQQ),
msg=longQQMsg.newMemberPrivateMsg)
utils.SendPrivateMsg(qqbot, message.operatedQQ, textPrivate)
except Exception as e:
logging.exception(e)
PrintLog(e)
else: #其他群
pass
PrintLog('有新人加群 Group: %s Join QQ: %s Admin QQ: %s'%(message.group,
message.operatedQQ, message.qq))
##### 退群监控 #####
@qqbot.listener((GroupMemberDecrease))
def GroupMemberQuit(message):
log = '有人已退群 Group: %s Quit QQ: %s Admin QQ: %s'%(message.group,
message.operatedQQ, message.qq)
PrintLog(log)
##### 口袋房间监控 #####
try:
qqbot.start()
[accountInfo, monitorInfo] = loadJson('./config/monitor.json')
monitor = KD48Monitor(accountInfo, monitorInfo)
monitor.initial()
monitor.run()
except Exception as e:
logging.exception(e)
utils.error('启动失败\n')
PrintLog(e)
os.system('pause')
sys.exit()
# 主程序循环,防止退出程序
while True:
time.sleep(100)
| momo-xii/KD48 | KDMonitor.py | KDMonitor.py | py | 26,545 | python | en | code | 1 | github-code | 36 |
73381842664 |
import matplotlib.pyplot as plt
import numpy as np
#IMAGEN 1
Imagen='33.jpg'
I=plt.imread(Imagen)
plt.title('Imagen original')
plt.imshow(I)
plt.show()
rgb = [0.2989, 0.5870, 0.1140]
ig = np.dot(I[...,:3], rgb)
plt.imshow(ig,cmap='gray')
plt.axis('off')
plt.savefig('b&w.png',bbox_inches='tight',pad_inches=0,dpi=1200)
plt.title('Imagen a escala de grises')
plt.show()
#ACCEDER AL RGB
rojo=[]
azul=[]
verde=[]
for x in range(round(len(I)/3)):
for h in range(len(I[x])):
for f in range(len(I[x][h])):
rojo.append(I[x][h][0])
azul.append(I[x][h][1])
verde.append(I[x][h][2])
print(len(verde))
print(len(rojo))
print(len(azul))
intervalos = range(round(min(rojo)), round(max(rojo)) + 2) #calculamos los extremos de los intervalos
plt.hist(x=rojo, bins=intervalos, color='r', rwidth=0.85)
plt.xticks([0, 50, 100, 150, 200, 255],[0, 50, 100, 150, 200, 255])
plt.title('Histogram of Red')
plt.xlabel('Intensity values')
plt.ylabel('Number of pixels')
plt.savefig('Histogramarojo.png',dpi=1200)
plt.show()
intervalos = range(round(min(verde)), round(max(verde)) + 2) #calculamos los extremos de los intervalos
plt.hist(x=verde, bins=intervalos, color='y', rwidth=0.85)
plt.xticks([0, 50, 100, 150, 200, 255],[0, 50, 100, 150, 200, 255])
plt.title('Histogram of Green')
plt.xlabel('Intensity values')
plt.ylabel('Number of pixels')
plt.savefig('Histogra_verde.png',dpi=1200)
plt.show()
intervalos = range(round(min(azul)), round(max(azul)) + 2) #calculamos los extremos de los intervalos
plt.hist(x=azul, bins=intervalos, color='b', rwidth=0.85)
plt.xticks([0, 50, 100, 150, 200, 255],[0, 50, 100, 150, 200, 255])
plt.title('Histogram of Blue')
plt.xlabel('Intensity values')
plt.ylabel('Number of pixels')
plt.savefig('Histogra_azul.png',dpi=1200)
plt.show()
| BrianCobianS/Capitulo4-Python | rgb.py | rgb.py | py | 1,806 | python | es | code | 0 | github-code | 36 |
26735300247 | def pali(st):
n = len(st)
if n==1:
return True
for i in range(n//2):
if st[i] != st[-i-1]:
return False
return True
st = input()
def ans(st):
for a in range(1,len(st)-2):
if pali(st[:a]):
for b in range(a+1,len(st)):
if pali(st[a:b]) and pali(st[b:]):
print(st[:a])
print(st[a:b])
print(st[b:])
return
print("not possible")
ans(st) | arpitkushwaha/Codevita-2020-Solutions | pallindrome_2.py | pallindrome_2.py | py | 500 | python | en | code | 0 | github-code | 36 |
16079144937 | from calendar_lib.constants import TEMPLATE_FILE
from calendar_lib.calendar import three_calendars
from html.html_gen import HTMLPage
from os import remove
from os.path import exists
OUTPUT_DIR="tests/pages"
def delete_file(year) -> None:
"""
Remove file for given year if it exists, bubble exceptions to toplevel
"""
filename = "{}/{}.html".format(OUTPUT_DIR, year)
if exists(filename):
try:
remove(filename)
except OSError as o:
print("Could not delete {}".format(filename))
raise o
def compare_code(year: int) -> None:
try:
# pregenerated = "../examples/{}.html".format(year)
pregenerated = "examples/{}.html".format(year)
# text of pregenerated HTML file
with open(pregenerated, 'r') as f:
expected = f.read()
output_location = "{}/{}.html".format(OUTPUT_DIR, year)
three_calendars(year).gen_html(TEMPLATE_FILE, output_location, year)
print("{} generated".format(output_location))
# text of test generated file
with open(output_location, 'r') as g:
actual = g.read()
assert actual == expected
except IOError as i:
print("Error: could not read {}".format(pregenerated))
raise i
except Exception as e:
raise e
def test_pages() -> None:
"""
Test pregenerated pages for given years against test-generated code
"""
for year in [1800,1900,2000,2017]:
compare_code(year)
delete_file(year)
| minishrink/calendargen | tests/html_test.py | html_test.py | py | 1,534 | python | en | code | 2 | github-code | 36 |
3093791396 | # You will be given a sequence of strings, each on a new line.
# Every odd line on the console is representing a resource (e.g. Gold, Silver, Copper, and so on)
# and every even – quantity. Your task is to collect the resources and print them each on a new line.
# Print the resources and their quantities in the following format:
# {resource} –> {quantity}
# The quantities will be in the range [1 … 2 000 000 000]
#resource = input()
def print_dict(dictionary, template):
for k, v in dictionary.items(): # k - key, v - value
print(template.format(k, v))
my_dict = {}
while True:
command = input()
if command == 'stop':
break
quantity = int(input())
my_dict.setdefault(command, 0)
my_dict[command] += quantity
# print(f'my_dict: {my_dict}')
print_dict(my_dict, "{} -> {}") | ivn-svn/SoftUniPythonPath | Programming Fundamentals with Python/7_dictionaries/exercise/2_miner_task.py | 2_miner_task.py | py | 829 | python | en | code | 1 | github-code | 36 |
38869163941 | from pathlib import Path
import pandas as pd
from sklearn.metrics import make_scorer, accuracy_score, f1_score, roc_auc_score
from imblearn.metrics import geometric_mean_score, sensitivity_score, specificity_score
def get_slovak_data(business_area, year, postfix):
print("Loading Slovak data...")
path_bankrupt = Path(__file__).parent / "data/slovak_data/parsed_data/bankrupt/bankrupt_{}_{}_year_{}.csv" \
.format(business_area, year, postfix)
path_non_bankrupt = Path(__file__).parent / "data/slovak_data/parsed_data/non_bankrupt/nonbankrupt_{}_{}_year_{}" \
".csv".format(business_area, year, postfix)
print("Data: {}".format(path_bankrupt))
bankrupt_data = pd.read_csv(path_bankrupt)
non_bankrupt_data = pd.read_csv(path_non_bankrupt)
features = bankrupt_data.drop(["IS_BANKRUPT"], axis=1).append(non_bankrupt_data.drop(["IS_BANKRUPT"], axis=1))
labels = bankrupt_data["IS_BANKRUPT"].append(non_bankrupt_data["IS_BANKRUPT"])
print("Info: rows - {}, columns - {}".format(len(features), len(features.columns)))
return features, labels
def get_scoring_dict():
scoring_dict = {
'accuracy_score': make_scorer(accuracy_score),
'f1_score': make_scorer(f1_score),
'roc_auc_score': make_scorer(roc_auc_score),
'geometric_mean_score': make_scorer(geometric_mean_score),
'sensitivity_score': make_scorer(sensitivity_score),
'specificity_score': make_scorer(specificity_score)
}
return scoring_dict | kanasz/TabNet | src/base_functions.py | base_functions.py | py | 1,548 | python | en | code | 0 | github-code | 36 |
22604017166 | def perform_operation(operand1, operator, operand2):
if operator == "+":
return operand1 + operand2
elif operator == "-":
return operand1 - operand2
elif operator == "*":
return operand1 * operand2
elif operator == "/":
if operand2 != 0:
return operand1 / operand2
else:
return "Cannot divide by zero"
else:
return "Invalid operator"
while True:
print("________Select operation:_______")
print("1. Addition (+)")
print("2. Subtraction (-)")
print("3. Multiplication (*)")
print("4. Division (/)")
print("5. Exit")
choice = input("Enter choice (1/2/3/4/5): ")
if choice == "5":
print("Exiting the program.")
break
if choice in ("1", "2", "3", "4"):
operand1 = float(input("Enter operand 1: "))
operand2 = float(input("Enter operand 2: "))
if choice == "1":
operator = "+"
elif choice == "2":
operator = "-"
elif choice == "3":
operator = "*"
elif choice == "4":
operator = "/"
result = perform_operation(operand1, operator, operand2)
print("Result:", result)
else:
print("Error !Invalid choice. Please select a valid option.")
| Chiro2002/SEM_5_SE | python_and_bash/calculator.py | calculator.py | py | 1,354 | python | en | code | 1 | github-code | 36 |
6540041788 | # Задача №17. Решение в группах
# Дан список чисел. Определите, сколько в нем
# встречается различных чисел.
# Input: [1, 1, 2, 0, -1, 3, 4, 4]
# Output: 6
import random
length = int(input("Введите длину списка: "))
list = []
temp = 0
for i in range(length):
list.append(random.randint(0,10))
print(list)
# input_list_1 = [random.randint(0,10) for i in range(int(input("Введите длину списка: ")))]
# print(input_list_1)
# input_list_2 = [random.randint(0,10) for i in range(length)]
# print(input_list_2)
# print(len(set(input().split())))
colors = set(list)
print(len(colors))
print(len(set(list)))
| ALGUL1987/Python | 3Семинар. Списки и словари/31task.py | 31task.py | py | 731 | python | ru | code | 0 | github-code | 36 |
25698014660 | import string
# YAML -> XML
f = open('Расписание.json',"r",encoding='utf-8')
f = f.readlines()
tab_count = 0
alph = list(string.ascii_letters)
tab_count = 1
last = ''
firstWord = ''
count = 0
tab_count = 0
a = []
print('<?xml version="1.0" encoding="utf-8"?>')
for i in f:
word = ''
firstWord = 'Body'
for j in i:
if j in alph or j in '0123456789-':
letter = j
word += j
if j == ':' and last == '"':
firstWord = word
word = ''
last = j
for b in i:
if b in alph or b in '0123456789:- ':
letter = b
word += b
if b == ':' and last == '"':
word = ''
last = b
if i.count('{') == 1:
if firstWord == 'Body':
print(f'<{firstWord}>')
count += 1
a.append(str(firstWord))
else:
print(tab_count*'\t' + f'<{firstWord}>')
count += 1
a.append(str(firstWord))
tab_count += 1
elif i.count('}') == 1 and i.count(',') == 0:
tab_count -= 1
if a[count-1] == 'Body':
print(f'</{a[count-1]}>')
a.pop(count-1)
count -= 1
else:
print(tab_count*'\t' + f'</{a[count-1]}>')
a.pop(count-1)
count -= 1
elif i.count('},') == 1:
tab_count -= 1
print(tab_count*'\t'+f'</{a[count-1]}>')
a.pop(count-1)
count -= 1
else: print(tab_count*'\t'+f'<{firstWord}>{word} </{firstWord}>')
#print(f'<{firstword}>' + f'<{word}>' + f'\<{firstword}>')
#print(tab_count)
#print(str_out)
| Sinchi1/-Computer-Science | 4 лаба/Parser1.py | Parser1.py | py | 1,705 | python | en | code | 0 | github-code | 36 |
33148392054 | import turtle
import random
screen = turtle.Screen()
bob = turtle.Turtle()
screen.bgcolor("black")
bob.pencolor("cyan")
bob.speed(10000)
def crazy(var1):
for i in range(360):
bob.forward(i)
bob.left(var1)
crazy(555)
# Range = 360
# Crazy = 34
# Crazy = 50
#speed = 100
#crazy = 34
#crazy = 90,167,124,78,137,656,988,555
| Sush4fc/Turtle-Module | Universal Pattern Creator.py | Universal Pattern Creator.py | py | 362 | python | en | code | 1 | github-code | 36 |
10441407969 | import xlwt
import csv
import requests
import pprint
class GitClient(object):
def __init__(self):
self.base_url = 'https://api.github.com/'
self.userName = ''
self.password = ''
self.autorization = False
self.notAut = ''
self.exporter = []
self.toWrite = {}
def initial(self,name='def',pas='def'):
self.userName = name if name != 'def' else input('User Name: ')
self.password = pas if pas != 'def' else input('User password: ')
self.autorization = True
def aboutUser(self):
r = requests.get(self.base_url + 'users/' + self.userName)
if r.status_code == 200:
data = ["Username : " + self.userName, "Name : " + str(r.json().get('name')), "Email : " + str(
r.json().get('email')), "Followers : " + str(r.json().get('followers'))]
else:
data = ['Error '+str(r.status_code) +' '+str(r.json()['message'])]
return data
def getRepos(self):
data = []
elem = {}
response = requests.get(self.base_url + 'users/' + self.userName + '/repos')
if response.status_code == 200:
json = response.json()
for i in range(len(json)):
# print(str(i + 1) + ": " + json[i].get('name'))
elem = {str(i + 1) : json[i].get('name')}
data.append(elem)
else:
elem = {
'0':'Error '+str(response.status_code) +' '+str(response.json()['message'])
}
data.append(elem)
return data
def createRep(self,names='def'):
name = names if names != 'def' else input('Enter repository name: ')
data = '{"name":"' + name + '"}'
response = requests.post('https://api.github.com/user/repos', data=data, auth=(self.userName, self.password))
if response.status_code == 201:
return "Repository "+ name +" created"
else:
return ("Sorry we can't create "+name+" Repository! Error " + str(response.status_code) +" "+str(response.json()['message']))
def repoInfo(self,names='def'):
data = []
elem = {}
response = requests.get(self.base_url + 'users/' + self.userName + '/repos')
name = names if names != 'def' else input('Enter repository name: ')
resCommit = requests.get(self.base_url + 'repos/' + self.userName + '/'+ name +'/commits')
resBranch = requests.get(self.base_url + 'repos/' + self.userName + '/' + name + '/branches')
if response.status_code == 200:
json = response.json()
for i in range(len(json)):
if json[i].get('name') == name:
jsonr = json[i]
commit = resCommit.json()
branch = resBranch.json()
elem = {
"Name " : jsonr.get('name'),
"Full name " : jsonr.get('full_name'),
"Language " : str(jsonr.get('language')),
"Count commits " : str(len(commit)),
"Count branches " : str(len(branch)),
"Forks count " : str(jsonr.get('forks_count')),
"Open issues count " : str(jsonr.get('open_issues_count')),
"Size": str(jsonr.get('size')) + " bytes"
}
data.append(elem)
else:
data.append({'Error' : str(response.status_code)+" "+str(response.json()['message'])})
return data
def followers(self):
followersList = [
]
response = requests.get(self.base_url + 'users/' + self.userName + '/followers')
if response.status_code == 200:
json = response.json()
for i in range(len(json)):
elem= {
'follower': json[i].get('login')
}
followersList.append(elem)
else:
followersList.append({'Error': str(response.status_code) + " " + str(response.json()['message'])})
if len(followersList) == 0:
followersList.append({'follower':'none'})
return followersList
def sizeGit(self):
sizeGit=0
res = ''
response = requests.get(self.base_url + 'users/' + self.userName + '/repos')
if response.status_code == 200:
json = response.json()
for i in range(len(json)):
sized = requests.get(self.base_url + 'repos/' + self.userName + '/' + json[i].get('name'))
if sized.status_code == 200:
sized = sized.json()
sizeGit += + float(sized.get('size'))
else:
res = 'Error ' + str(response.status_code)+" "+str(response.json()['message'])
res = str(sizeGit) + ' bytes'
else:
res = 'Error ' + str(response.status_code)+" "+str(response.json()['message'])
return res
def prints(self,obj):
toPrint = {}
for elem in obj:
toPrint.update(elem)
for k in toPrint:
print(k +": "+toPrint[k])
def exports(self):
self.initial()
saveAs = input('enter format saved (csv or xls)')
user = [{
'User': self.userName,
'Size Git Repositories': self.sizeGit()
}]
self.exporter.append(self.getRepos())
self.exporter.append(self.repoInfo())
self.exporter.append(self.followers())
self.exporter.append(user)
data = self.exporter
toWrite = {}
for elem in data:
if type(elem) == 'dict':
toWrite.update(elem)
else:
for sub in elem:
toWrite.update(sub)
for k in toWrite:
print(k +": "+toWrite[k])
name = input('Enter file name: ')
if saveAs.lower() == 'csv':
file = open(name+'.csv', 'w')
writer = csv.writer(file,delimiter=";",quotechar='"')
writer.writerows(toWrite)
file.close()
print('File saved as '+name+'.csv')
elif saveAs.lower() == 'xls':
book = xlwt.Workbook()
sh = book.add_sheet("About")
for n, k in enumerate(toWrite):
print(n, k, toWrite[k])
sh.write(n, 0, k)
sh.write(n, 1, toWrite[k])
book.save(name + ".xls")
print('File saved as ' + name + '.xls')
else:
print('Incorrect file sys!!!')
| Delight116/003-004-be-addo-Cmd_and_Html_GitClient | GitClient.py | GitClient.py | py | 6,608 | python | en | code | 0 | github-code | 36 |
13549248356 | import random
import Crypto
from Crypto.PublicKey import RSA
from Crypto import Random
import ast
import pyDes
from Crypto.Cipher import DES
from config import max_input_bit_length
import time
def OT_transfer(m0,m1,bit):
m0=int(m0)
m1=int(m1)
x0=random.randint(0, 2**1024-1)
x1=random.randint(0, 2**1024-1)
#loading public key
publickeyfile=""
with open('alice-public.pem', 'r') as f:
publickeyfile=f.read()
# print(publickeyfile)
n=0
with open('alice-publickey.n.pem', 'r') as f:
n=f.read()
# print(n)
n=long(n)
publickey=RSA.importKey(publickeyfile)
with open('alice-private.pem', 'r') as f:
privatekeyfile=f.read()
privatekey=RSA.importKey(privatekeyfile)
#bob choose random k
k=random.randint(0, 2**32-1)
# send x1, so choose 1 instead of index 0
if(bit==1):
v=(x1+publickey.encrypt(long(k), 32)[0])%(n)
else:
v=(x0+publickey.encrypt(long(k), 32)[0])%(n)
k0=privatekey.decrypt(v-x0)%n
k1=privatekey.decrypt(v-x1)%n
m00=m0+k0
m11=m1+k1
if(bit==1):
mf=m11-k
else:
mf=m00-k
return mf
start = time.clock()
n=max_input_bit_length
#bob runs this file
#using rsa to generating private keys and public keys
# random_generator = Random.new().read
# key = RSA.generate(1024, random_generator) #generate pub and priv key, that is e and d,n is the
# publickey = key.publickey() # pub key export for exchange, stands for e
# encrypted = publickey.encrypt('encrypt this message', 32)
# print 'encrypted message: ', encrypted #ciphertext
# decrypted = key.decrypt(ast.literal_eval(str(encrypted)))
# print 'decrypted message: ', decrypted
# private_pem = key.exportKey()
# with open('bob-private.pem', 'w') as f:
# f.write(private_pem)
# public_pem = key.publickey().exportKey()
# with open('bob-public.pem', 'w') as f:
# f.write(public_pem)
# public_n = key.n
# with open('bob-publickey.n.pem', 'w') as f:
# f.write(public_n)
f=open('alice_ran_keys.txt')
alice_ran_keys=f.read().split()
f.close()
# bob_bit=0b10
print("please input bob's data ")
bob_bit=int(input())
print("bob_input: ",bob_bit)
bob_input=[]
bob_input.append(OT_transfer(alice_ran_keys[8],alice_ran_keys[9],bob_bit&0b1))
bob_input.append(OT_transfer(alice_ran_keys[2],alice_ran_keys[3],bob_bit>>1))
bob_input.append(OT_transfer(alice_ran_keys[0],alice_ran_keys[1],bob_bit>>1))
bob_bit=bob_bit>>2
#augmented
# for i in range(n-2):
# print("bob_bit ",bob_bit)
if(n>2):
# print("n is bigger than 2")
for i in range(n-2):
bob_input.append(OT_transfer(alice_ran_keys[36+i*22],alice_ran_keys[37+i*22],bob_bit&0b1))
bob_input.append(OT_transfer(alice_ran_keys[38+i*22],alice_ran_keys[39+i*22],bob_bit&0b1))
bob_bit=bob_bit>>1
# print("in ot test")
# print(alice_ran_keys[36])
# print(bob_input[3])
# print(alice_ran_keys[38])
# print(bob_input[4])
with open('inputsb.txt', 'w') as f:
for i in range(len(bob_input)):
f.write(str(bob_input[i])+'\n')
end = time.clock()
print("Time for doing obvious transfer ", end-start)
| makeapp007/cryptography | mpc/code/nbit-comparator/ot.py | ot.py | py | 2,974 | python | en | code | 0 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.