seq_id
string
text
string
repo_name
string
sub_path
string
file_name
string
file_ext
string
file_size_in_byte
int64
program_lang
string
lang
string
doc_type
string
stars
int64
dataset
string
pt
string
api
list
72531926909
# pylint: disable=redefined-outer-name # pylint: disable=unused-argument # pylint: disable=unused-variable import os import shutil from pathlib import Path from typing import Callable import pytest import yaml @pytest.fixture def tmp_compose_spec(tests_data_dir: Path, tmp_path: Path): src = tests_data_dir / "docker-compose-meta.yml" dst = tmp_path / "docker-compose-meta.yml" shutil.copyfile(src, dst) return dst def test_create_new_osparc_config( run_program_with_args: Callable, tmp_compose_spec: Path ): osparc_dir = tmp_compose_spec.parent / ".osparc" assert not osparc_dir.exists() result = run_program_with_args( "config", "--from-spec-file", str(tmp_compose_spec), ) assert result.exit_code == os.EX_OK, result.output assert osparc_dir.exists() meta_cfgs = set(osparc_dir.glob("./*/metadata.y*ml")) runtime_cfgs = set(osparc_dir.glob("./*/runtime.y*ml")) assert len(runtime_cfgs) == len(meta_cfgs) assert {f.parent for f in meta_cfgs} == {f.parent for f in runtime_cfgs} service_names = set(yaml.safe_load(tmp_compose_spec.read_text())["services"].keys()) assert service_names == set({f.parent.name for f in meta_cfgs})
ITISFoundation/osparc-simcore
packages/service-integration/tests/test_command_config.py
test_command_config.py
py
1,231
python
en
code
35
github-code
6
[ { "api_name": "pathlib.Path", "line_number": 14, "usage_type": "name" }, { "api_name": "shutil.copyfile", "line_number": 17, "usage_type": "call" }, { "api_name": "pytest.fixture", "line_number": 13, "usage_type": "attribute" }, { "api_name": "typing.Callable", ...
15304075993
from sklearn import model_selection from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from sklearn.metrics import accuracy_score from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.tree import DecisionTreeClassifier from sklearn.naive_bayes import GaussianNB from sklearn.svm import SVC from sklearn.model_selection import train_test_split import pandas #names=['Total no of questions','Average time to solve each questions','No of questions marked for review','No of questions right','Average no of clicks in each page'] dataset = pandas.read_csv("dataset.csv") #dataset = dataset.apply(pandas.to_numeric,errors='ignore') ##cols.remove('Index') ##cols = dataset.columns ##for col in cols: ## try: ## dataset[col] = float(dataset[col]) ## except: ## pass array = dataset.values array=array[1:] X = array[:,0:5] Y = array[:,5] print(X) print(Y) validation_size = 0.20 seed = 7 X_train, X_validation, Y_train, Y_validation = model_selection.train_test_split(X, Y, test_size=validation_size, random_state=seed) print("X_train",X_train) print("Y_train",Y_train) print("X_validation",X_validation) print("Y_validation",Y_validation) seed = 7 scoring = 'accuracy' # Spot Check Algorithms models = [] models.append(('LR', LogisticRegression())) models.append(('LDA', LinearDiscriminantAnalysis())) models.append(('KNN', KNeighborsClassifier())) models.append(('CART', DecisionTreeClassifier())) models.append(('NB', GaussianNB())) models.append(('SVM', SVC())) # evaluate each model in turn results = [] names = [] for name, model in models: kfold = model_selection.KFold(n_splits=10, random_state=seed) cv_results = model_selection.cross_val_score(model, X_train, Y_train, cv=kfold, scoring=scoring) results.append(cv_results) names.append(name) msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std()) print(msg) knn = KNeighborsClassifier() knn.fit(X_train, Y_train) predictions = knn.predict(X_validation) print("Actual Validators",Y_validation) print("predictions",predictions) print(accuracy_score(Y_validation, predictions)) print(confusion_matrix(Y_validation, predictions)) print(classification_report(Y_validation, predictions)) model = DecisionTreeClassifier() model.fit(X_train, Y_train) # make predictions predictions = model.predict(X_validation) #prediction = model.predict([[2.8,15,18,180]]) #print("prediction",prediction) # summarize the fit of the model print("predictions",predictions) print(accuracy_score(Y_validation, predictions)) print(confusion_matrix(Y_validation, predictions)) print(classification_report(Y_validation, predictions))
tunir27/ICDCN-2019
Chennai_Floods_code/ML.py
ML.py
py
2,819
python
en
code
0
github-code
6
[ { "api_name": "pandas.read_csv", "line_number": 17, "usage_type": "call" }, { "api_name": "sklearn.model_selection.train_test_split", "line_number": 34, "usage_type": "call" }, { "api_name": "sklearn.model_selection", "line_number": 34, "usage_type": "name" }, { "...
3240628422
from rest_framework import status from rest_framework.decorators import api_view from rest_framework.response import Response from entrepreneur.models import Usuario from authentication.serializers import UserListSerializers, UserSerializer @api_view(['GET', 'POST']) def user_api_view(request): # lista los usuarios activos ingresados al sistemna if request.method == 'GET': users = Usuario.objects.filter(is_active=True).values('id', 'username', 'email', 'password', 'first_name') user_serializer = UserListSerializers(users, many=True) return Response(user_serializer.data, status=status.HTTP_200_OK) elif request.method == 'POST': """ Habilita la creación de usuarios solicitando """ user_serializer = UserSerializer(data=request.data) if user_serializer.is_valid(): user_serializer.save() return Response({'message': 'Usuario creado correctamente!'}, status=status.HTTP_201_CREATED) return Response(user_serializer.errors, status=status.HTTP_400_BAD_REQUEST) @api_view(['GET', 'PUT', 'DELETE']) def user_detail_api_view(request, pk=None): user = Usuario.objects.filter(id=pk).first() # Retrieve if user: if request.method == 'GET': user_serializer = UserSerializer(user) return Response(user_serializer.data, status=status.HTTP_200_OK) # Update elif request.method == 'PUT': user_serializer = UserSerializer(user, data=request.data) if user_serializer.is_valid(): user_serializer.save() return Response(user_serializer.data, status=status.HTTP_200_OK) return Response(user_serializer.errors, status=status.HTTP_400_BAD_REQUEST) # Delete elif request.method == 'DELETE': user.is_active = False user.save() return Response({'message': 'Usuario eliminado correctamente!'}, status=status.HTTP_201_CREATED) return Response({'message': 'Sin datos para la consulta, favor corregir y reintentar!'}, status=status.HTTP_400_BAD_REQUEST)
DevApa/auth_em
register/api.py
api.py
py
2,157
python
en
code
0
github-code
6
[ { "api_name": "entrepreneur.models.Usuario.objects.filter", "line_number": 12, "usage_type": "call" }, { "api_name": "entrepreneur.models.Usuario.objects", "line_number": 12, "usage_type": "attribute" }, { "api_name": "entrepreneur.models.Usuario", "line_number": 12, "usa...
2737490777
import wikipedia import pyfiglet word = pyfiglet.figlet_format("KAREN") print(word) while True: engine=input ("Search: ") def my_summary(): summ=wikipedia.summary(engine) return summ val = my_summary() print(val)
Shuklabrother/Search-engine.py
Index.py
Index.py
py
272
python
en
code
0
github-code
6
[ { "api_name": "pyfiglet.figlet_format", "line_number": 4, "usage_type": "call" }, { "api_name": "wikipedia.summary", "line_number": 11, "usage_type": "call" } ]
33138124964
#mass import import pandas as pd import urllib.request import json import numpy as np import matplotlib.pyplot as plt from datetime import datetime, timedelta from sklearn.cluster import KMeans pd.options.display.max_rows = 999 #for getting the date 5 days ago daydif = str(datetime.today() - timedelta(days=5)) dayref = str(daydif[0:10]) today = str(datetime.today().strftime('%Y-%m-%d')) #for retrieving the data from the APIs link = ["https://environment.data.gov.uk/flood-monitoring/id/stations/46160/readings?since=",dayref, "&_sorted&parameter=rainfall"] final = "" final = final.join(link) #print(final) web = urllib.request.Request(final) response = urllib.request.urlopen(web) the_page = response.read() jason = json.loads(the_page) link1 = ["http://environment.data.gov.uk/flood-monitoring/id/measures/46126-level-stage-i-15_min-m/readings?since=",dayref] final1 = "" final1 = final1.join(link1) #print(final1) web1 = urllib.request.Request(final1) response1 = urllib.request.urlopen(web1) the_page1 = response1.read() jason1 = json.loads(the_page1) #creates dataframes for each API df1 = pd.DataFrame(jason1["items"]) df1 = df1.sort_values('dateTime', ascending = True) df = pd.DataFrame(jason["items"]) df = df.sort_values('dateTime', ascending = True) #merged table containing level and fall, plots a graph a = pd.merge(df, df1, on = 'dateTime', how = 'left') b = a[['dateTime', 'value_x', 'value_y']].copy() b = b.rename(columns = {"dateTime" : "Date/Time", "value_x" : "Rainfall", "value_y" : "River Level"}) #Calculates hourly results c = b[['Rainfall', 'River Level']] d = c['River Level'].groupby(c.index//4).mean() d = d.diff() e = c.groupby(c.index//4)['Rainfall'].sum() hourly = pd.concat([d, e], axis = 1) drip = hourly['Rainfall'].max() drip = int(drip * 10) calc = [] tester = pd.DataFrame() for i in range (0, drip+1): x = i/10 s = hourly.Rainfall.eq(x) out = pd.DataFrame() out['River'] = hourly.loc[s.shift(1, axis = 0) | s.shift(2, axis = 0), 'River Level'] runner = len(out) out['Rain'] = x tester = pd.concat([tester, out]) tester = tester.dropna() #Machine learning : Kmeans clustering reg = KMeans(n_clusters = 2, random_state = 0).fit(tester[['River']]) #Producing a graph of the regression plt.scatter(tester['Rain'], tester['River'], color = 'Blue', marker = '+') plt.plot(tester['Rain'], reg.predict(tester[['Rain']]), color = 'Red') plt.show() #Producing an example prediction pred = pd.DataFrame() #print(pred) end = len(b.index) for i in range(1, end): prod = pd.DataFrame() #print(b) for row in b.itertuples(): temp = pd.DataFrame() temp['RDelta'] = reg.predict([[row.Rainfall]]) temp['Rainfall'] = row.Rainfall pred = pd.concat([pred, temp]) endriver = pd.DataFrame() riverstart = b['River Level'].iloc[0] for row in pred.itertuples(): rtemp = pd.DataFrame({'RPred': [riverstart]}) endriver = pd.concat([endriver, rtemp], ignore_index = True) riverstart = riverstart + row.RDelta #Producing a dataframe with the prediction and all other data together fin = pd.merge(b, endriver, left_index = True, right_index = True) print(fin) #Plotting a graph of expected riverlevel and the actual river level plt.plot(fin['Date/Time'],fin['River Level'],label = 'River Level') plt.plot(fin['Date/Time'],fin['Rainfall'], label = 'Rainfall') plt.plot(fin['Date/Time'],fin['RPred'], label = 'Predicted') plt.locator_params(axis = 'Date/Time', nbins = 10) plt.xticks(rotation = 'vertical') ax = plt.gca() ax.set_xticks(ax.get_xticks()[::48]) plt.show()
nicr0ss/RainDance
KMeans_model.py
KMeans_model.py
py
3,524
python
en
code
0
github-code
6
[ { "api_name": "pandas.options", "line_number": 9, "usage_type": "attribute" }, { "api_name": "datetime.datetime.today", "line_number": 12, "usage_type": "call" }, { "api_name": "datetime.datetime", "line_number": 12, "usage_type": "name" }, { "api_name": "datetime...
19581541837
import textract import re import os import requests from bs4 import BeautifulSoup import time import random # ===================================== get paper url ===================================== urls = [ 'https://sj.ctu.edu.vn/ql/docgia/nam-2015/loaichuyensan-2/xuatban-782.html', 'https://sj.ctu.edu.vn/ql/docgia/nam-2017/loaichuyensan-2/xuatban-1222/chuyensan-250.html', 'https://sj.ctu.edu.vn/ql/docgia/nam-2011/loaichuyensan-2/xuatban-182.html', 'https://sj.ctu.edu.vn/ql/docgia/nam-2013/loaichuyensan-2/xuatban-442/chuyensan-250.html', 'https://sj.ctu.edu.vn/ql/docgia/nam-2020/loaichuyensan-2/xuatban-2002.html' 'https://sj.ctu.edu.vn/ql/docgia/nam-2018/loaichuyensan-2/xuatban-1402.html', 'https://sj.ctu.edu.vn/ql/docgia/nam-2018/loaichuyensan-2/xuatban-1522.html' ] paper_url = [] for url in urls: page = requests.get(url) data = BeautifulSoup(page.content, 'html.parser') elements = data.select('.div-left.chitiet.grid_05') for e in elements: paper_url.append(str(e.parent.get("href"))) # ===================================== download pdf file ===================================== def download_file(writefile, url): r = requests.get(url, allow_redirects=True) with open(writefile, 'wb') as f: f.write(r.content) pdf_folder = '/content/drive/MyDrive/data/dhct' raw_text_folder = '/content/drive/MyDrive/data/dhct_raw_txt' clean_text_folder = '/content/drive/MyDrive/data/dhct_clean' for i, url in enumerate(paper_url): sleep_time = random.randint(1, 5) print(f'file {i} -- sleep in {sleep_time}s') file = os.path.join(pdf_folder, f'paper_{i}.pdf') download_file(file, url) time.sleep(sleep_time) # ===================================== convert pdf to raw txt ===================================== def convert_pdf_to_raw_txt(pdf_file, txt_file): text = textract.process(pdf_file, language='eng') text = text.decode('utf-8') with open(txt_file, 'w', encoding='utf-8') as f: f.write(text) # convert pdf to raw txt file. Raw mean there are still invalid characters for file in os.listdir(pdf_folder): file_name = file.split('.')[0] + '.txt' pdf_file = os.path.join(pdf_folder, file) txt_file = os.path.join(raw_text_folder, file_name) convert_pdf_to_raw_txt(pdf_file, txt_file) # ===================================== clean raw data ===================================== from util.shared import read_file, write_to_file from .text_preprocessor import TextPreprocessor processor = TextPreprocessor() # clean raw text for file in os.listdir(raw_text_folder): text = read_file(os.path.join(raw_text_folder, file)) text = processor.remove_invalid_unicode(text) text = re.sub('Tap chi Khoa hoc Trương Đai hoc Cân Thơ', 'Tạp chí Khoa học Trường Đại học Cần Thơ', text) text = re.sub('Trương Đai hoc Cân Thơ', 'Trường Đại học Cần Thơ', text) text = re.sub('Trương Đai hoc', 'Trường Đại học', text) text = re.sub('Tap chı Khoa hoc Trươ ng Đai hoc Cân Thơ', 'Tạp chí Khoa học Trường Đại học Cần Thơ', text) write_to_file(os.path.join(clean_text_folder, file), text) print(file)
oldguard69/lvtn
server/core/archive/crawl_data_1.py
crawl_data_1.py
py
3,229
python
en
code
0
github-code
6
[ { "api_name": "requests.get", "line_number": 22, "usage_type": "call" }, { "api_name": "bs4.BeautifulSoup", "line_number": 23, "usage_type": "call" }, { "api_name": "requests.get", "line_number": 31, "usage_type": "call" }, { "api_name": "random.randint", "lin...
8870924854
from collections import deque import sys while True: try: a=str(input('Input final configuration: ')) N=int(a.replace(' ','')) if ((N>87654321)|(N<12345678)): raise ValueError('Incorrect configuration, giving up...') break except ValueError: print('Incorrect configuration, giving up...') sys.exit() final=[int(i) for i in str(N)] def initial_state(N): initial_list = [] for i in range(1, N + 1): locals()['d' + str(i)] = i name_list = [('d' + '%d' % i) for i in range(1, N + 1)] for i in range(N): initial_list.append(vars()[name_list[i]]) return initial_list def row_exchange(L): r_L = list(L) d8, d7, d6, d5, d4, d3, d2, d1 = r_L[0], r_L[1], r_L[2], r_L[3], r_L[4], r_L[5], r_L[6], r_L[7] r_L[0], r_L[1], r_L[2], r_L[3], r_L[4], r_L[5], r_L[6], r_L[7] = d1, d2, d3, d4, d5, d6, d7, d8 return r_L def right_circular_shift(L): r_L = list(L) d1, d2, d3, d4, d5, d6, d7, d8 = r_L[0], r_L[1], r_L[2], r_L[3], r_L[4], r_L[5], r_L[6], r_L[7] r_L[0], r_L[1], r_L[2], r_L[3], r_L[4], r_L[5], r_L[6], r_L[7] = d4, d1, d2, d3, d6, d7, d8, d5 return r_L def middle_clockwise_rotation(L): r_L = list(L) d1, d2, d3, d4, d5, d6, d7, d8 = r_L[0], r_L[1], r_L[2], r_L[3], r_L[4], r_L[5], r_L[6], r_L[7] r_L[0], r_L[1], r_L[2], r_L[3], r_L[4], r_L[5], r_L[6], r_L[7] = d1, d7, d2, d4, d5, d3, d6, d8 return r_L def Rubik_Rectangle(initial_list, final_conf_list): steps_need = 0 created_list = list(initial_list) check_list = [] created_total = [] created_all=deque() created_all.append([1, 2, 3, 4, 5, 6, 7, 8]) temp_created_all=[] Flag = False temp_created_all=[] temp_created_all.append([1, 2, 3, 4, 5, 6, 7, 8]) don_t_know=set() don_t_know.add((1, 2, 3, 4, 5, 6, 7, 8)) def List_compare(list_1, list_2, i): if list_1 == list_2: return True else: return False for i in range(23): #print('I',i) #print('LENTH OF ALL',len(created_all)) steps_need = i if initial_list == final_conf_list: return steps_need else: for element in range(0, len(created_all)): created_list = created_all[element] if tuple(row_exchange(created_list)) not in don_t_know: created_total.append(row_exchange(created_list)) check_list = row_exchange(created_list) Flag = List_compare(check_list, final_conf_list, steps_need) if Flag == True: #print(check_list) return steps_need + 1 for element in range(0, len(created_all)): created_list = created_all[element] if tuple(right_circular_shift(created_list)) not in don_t_know: created_total.append(right_circular_shift(created_list)) check_list = right_circular_shift(created_list) Flag = List_compare(check_list, final_conf_list, steps_need) if Flag == True: #print(check_list) return steps_need + 1 for element in range(0, len(created_all)): created_list = created_all[element] if tuple(middle_clockwise_rotation(created_list)) not in don_t_know: created_total.append(middle_clockwise_rotation(created_list)) check_list = middle_clockwise_rotation(created_list) Flag = List_compare(check_list, final_conf_list, steps_need) if Flag == True: #print(check_list) return steps_need + 1 #print('------------------------------------------') #bug bug_L=[1,3,7,14,26,51,92,159,274,453,720,1115,1727,2603,3701,4729,5620,6240,5840,4492,2120,328,5,0] created_all1=deque(maxlen=bug_L[steps_need+1]) for i in range(len(created_total)): if tuple(created_total[i]) not in don_t_know: created_all1.append(created_total[i]) don_t_know.add(tuple(created_total[i])) temp_created_all=list(created_all1) created_all=list(created_all1) for num in range(len(created_all)): check_list = created_all[num] Flag = List_compare(check_list, final_conf_list, steps_need) if Flag == True: #print(check_list) return steps_need + 1 created_total = list() initial_list=[1,2,3,4,5,6,7,8] #final = [1,5,3,2,4,6,7,8] #print('step',Rubik_Rectangle(initial_list, final)) nb_of_stairs=Rubik_Rectangle(initial_list, final) stair_or_stairs = 'step is' if nb_of_stairs <= 1 else 'steps are' print(f'{nb_of_stairs} {stair_or_stairs} needed to reach the final configuration.')
hanxuwu/Learning-Python
Principles of Programming/Assignment/Assignment1/ASS question2.files/rubiks_rectangle.py
rubiks_rectangle.py
py
5,218
python
en
code
3
github-code
6
[ { "api_name": "sys.exit", "line_number": 13, "usage_type": "call" }, { "api_name": "collections.deque", "line_number": 55, "usage_type": "call" }, { "api_name": "collections.deque", "line_number": 110, "usage_type": "call" } ]
35154021664
import json import os import cherrypy from jinja2 import Environment, FileSystemLoader # GET CURRENT DIRECTORY from helper import get_redis_connection, get_sorted_list from scrapper import main1 CUR_DIR = os.path.dirname(os.path.abspath(__file__)) env = Environment(loader=FileSystemLoader(CUR_DIR), trim_blocks=True) class Index(object): @cherrypy.expose def index(self): template = env.get_template('templates/home.html') r = get_redis_connection() try: users = json.loads(r.get('ten_users')) except: main1() # If file was not downloaded then download the latest file. users = json.loads(r.get('ten_users')) return template.render(users=users, name='JAYANTH') @cherrypy.expose class UserService(object): @cherrypy.tools.json_out() def POST(self, name): r = get_redis_connection() users = json.loads(r.get('users')) response = {} try: res = [user for user in users if name.lower() in user["name"].lower()] # search by name (substring match) result = get_sorted_list(res, 'dict') response['success'] = True response['users'] = result[:10] response['length'] = res.__len__() return response except: response['success'] = False return response if __name__ == '__main__': conf = { '/': { 'tools.sessions.on': True, 'tools.staticdir.root': os.path.abspath(os.getcwd()) }, '/get_users': { 'request.dispatch': cherrypy.dispatch.MethodDispatcher(), 'tools.response_headers.on': True, }, '/static': { 'tools.staticdir.on': True, 'tools.staticdir.dir': './static' } } webapp = Index() webapp.get_users = UserService() cherrypy.server.socket_host = '0.0.0.0' cherrypy.quickstart(webapp, '/', conf)
jayanthns/bseproect
run.py
run.py
py
1,974
python
en
code
0
github-code
6
[ { "api_name": "os.path.dirname", "line_number": 11, "usage_type": "call" }, { "api_name": "os.path", "line_number": 11, "usage_type": "attribute" }, { "api_name": "os.path.abspath", "line_number": 11, "usage_type": "call" }, { "api_name": "jinja2.Environment", ...
70808003389
from tensorflow.keras.models import load_model from tensorflow.keras.preprocessing.image import ImageDataGenerator import numpy as np import cv2 import matplotlib.pyplot as plt import matplotlib.image as mpimg model = load_model("/home/yash/Desktop/PyImageSearch/checkpoints/emotion1.h5") classifier = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') img_size = 48 validation_dir = "/home/yash/Desktop/PyImageSearch/deep_survelliance_detector/fer2013/validation/" validation_datagen = ImageDataGenerator(rescale=1./255) validation_generator = validation_datagen.flow_from_directory(validation_dir, color_mode="grayscale", target_size=(img_size,img_size), batch_size=32, class_mode="categorical", shuffle=False) #getting class labels class_labels = validation_generator.class_indices class_labels = {v:k for v,k in class_labels.items()} #predicting test_img = "/home/yash/Downloads/happy1.jpg" def get_label(prediction): for key, val in class_labels.items(): if prediction == val: return key return -1 def predict(test_img): img = cv2.imread(test_img,cv2.IMREAD_GRAYSCALE) faces = classifier.detectMultiScale(img,scaleFactor=1.2,minNeighbors=7) face = [] for (x,y,w,h) in faces: roi_gray = img[y:y+h,x:x+w] roi = cv2.resize(roi_gray, (img_size,img_size), interpolation=cv2.INTER_AREA) face.append(roi) num_image = np.array(face, dtype=np.float32) num_image /= 255.0 num_image = num_image.reshape(1,48,48,1) predicted = model.predict(num_image)[0] #returns a list of probabilities of diff classes pred = predicted.argmax() #getting the max value in the list label = get_label(pred) return label pred_class = predict(test_img) original_image = mpimg.imread(test_img) plt.xlabel("Predicted: {0}".format(str(pred_class))) plt.imshow(original_image) plt.show()
theartificialguy/Deep-Learning-Projects
Emotion and Gender Classification/Emotion Classification/recognition.py
recognition.py
py
1,841
python
en
code
2
github-code
6
[ { "api_name": "tensorflow.keras.models.load_model", "line_number": 8, "usage_type": "call" }, { "api_name": "cv2.CascadeClassifier", "line_number": 9, "usage_type": "call" }, { "api_name": "tensorflow.keras.preprocessing.image.ImageDataGenerator", "line_number": 15, "usag...
27673629381
import numpy as np import math import cv2 #input hsvColor:[h,s,v], ranges between (0-180, 0-255, 0-255) #and output true hsvColor:[h,s,v] def get_right_V(hsvColor): h = float(hsvColor[0]) s = float(hsvColor[1]) s1 = float(hsvColor[1])/255 v1 = float(hsvColor[2]) h60 = h / 60.0 h60f = math.floor(h60) f = h60 - h60f if f<0.5: v = 3*v1*(1-f*s1)/(3-2*s1) elif f>=0.5: v = 3*v1*(1-s1+f*s1)/(3-2*s1) return [h, s, min(v,255)] if __name__=="__main__": #input and ouput path for image. PATH_TO_WRONG_IMAGE = './wrong_image.jpg' PATH_TO_RIGHT_IMAGE = './right_image.jpg' image_wrong = cv2.imread(PATH_TO_WRONG_IMAGE) image_wrong_hsv = cv2.cvtColor(image_wrong, cv2.COLOR_BGR2HSV) shape = image_wrong.shape image_right_hsv = np.zeros(shape, dtype=np.uint8) #iterate over every pixel to change the V value. for row in range(shape[0]): for col in range(shape[1]): image_right_hsv[row][col] = get_right_V(image_wrong_hsv[row][col]) image_right = cv2.cvtColor(image_right_hsv, cv2.COLOR_HSV2BGR) cv2.imwrite(PATH_TO_RIGHT_IMAGE, image_right)
zznewclear13/Gradient_Colorizing_Fix
Gradient_Colorzing_Fix.py
Gradient_Colorzing_Fix.py
py
1,205
python
en
code
0
github-code
6
[ { "api_name": "math.floor", "line_number": 13, "usage_type": "call" }, { "api_name": "cv2.imread", "line_number": 27, "usage_type": "call" }, { "api_name": "cv2.cvtColor", "line_number": 28, "usage_type": "call" }, { "api_name": "cv2.COLOR_BGR2HSV", "line_numb...
26693866815
import torch import logging from tqdm import tqdm from schnetpack.src.schnetpack import properties __all__ = ["TorchStructureLBFGS"] class TorchStructureLBFGS(torch.optim.LBFGS): """ LBFGS optimizer that allows for relaxation of multiple structures in parallel. The approximation of the inverse hessian is shared across the entire batch (all structures). Hence, it is recommended to use this optimizer preferably for batches of similar structures/compositions. In other cases, please utilize the ASELBFGS optimizer, which is particularly constructed for batches of different structures/compositions. This optimizer is an extension/adaptation of the torch.optim.LBFGS optimizer particularly designed for relaxation of atomic structures. In addition to the inherited features, this optimizer allows for fixing the positions of a set of atoms during the relaxation and a method to run the optimizer. Latter allows for setting a convergence criterium. Furthermore, we implemented a logging method that prints out the largest force in the system after each optimization iteration. """ def __init__( self, model, model_inputs, fixed_atoms_mask, maxstep=None, logging_function=None, lr: float = 1.0, energy_key: str = "energy", position_key: str = properties.R, ): """ Args: model (schnetpack.model.AtomisticModel): ml force field model model_inputs: input batch containing all structures fixed_atoms_mask (list(bool)): list of booleans indicating to atoms with positions fixed in space. maxstep (float): how far is a single atom allowed to move. (default: None) logging_function: function that logs the structure of the systems during the relaxation lr (float): learning rate (default: 1) energy_key (str): name of energies in model (default="energy") position_key (str): name of atomic positions in model (default="_positions") """ self.model = model self.energy_key = energy_key self.position_key = position_key self.fixed_atoms_mask = fixed_atoms_mask self.model_inputs = model_inputs self.logging_function = logging_function self.fmax = None self.maxstep = maxstep R = self.model_inputs[self.position_key] R.requires_grad = True super().__init__(params=[R], lr=lr) def _gather_flat_grad(self): """override this function to allow for keeping atoms fixed during the relaxation""" views = [] for p in self._params: if p.grad is None: view = p.new(p.numel()).zero_() elif p.grad.is_sparse: view = p.grad.to_dense().view(-1) else: view = p.grad.view(-1) views.append(view) flat_grad = torch.cat(views, 0) if self.fixed_atoms_mask is not None: flat_grad[self.fixed_atoms_mask] = 0.0 self.flat_grad = flat_grad return flat_grad def _add_grad(self, step_size, update): offset = 0 if self.maxstep is not None: step_size = self.determine_step_size(step_size, update) for p in self._params: numel = p.numel() # view as to avoid deprecated pointwise semantics p.add_(update[offset : offset + numel].view_as(p), alpha=step_size) offset += numel assert offset == self._numel() def determine_step_size(self, step_size, update): """Determine step to take according to maxstep Normalize all steps as the largest step. This way we still move along the eigendirection. """ reshaped_update = update.view(-1, 3) steplengths = ((step_size * reshaped_update) ** 2).sum(1) ** 0.5 longest_step = torch.max(steplengths) # check if any step in entire batch is greater than maxstep if longest_step >= self.maxstep: # rescale all steps logging.info("normalized integration step") step_size *= self.maxstep / longest_step return step_size def closure(self): results = self.model(self.model_inputs) self.zero_grad() loss = results[self.energy_key].sum() loss.backward() return loss def log(self, forces=None): """log relaxation results such as max force in the system""" if forces is None: forces = self.flat_grad.view(-1, 3) if not self.converged(): logging.info("NOT CONVERGED") logging.info( "max. atomic force: {}".format(torch.sqrt((forces**2).sum(axis=1).max())) ) def converged(self, forces=None): """Did the optimization converge?""" if forces is None: forces = self.flat_grad.view(-1, 3) return (forces**2).sum(axis=1).max() < self.fmax**2 def run(self, fmax, max_opt_steps): """run relaxation""" self.fmax = fmax # optimization for opt_step in tqdm(range(max_opt_steps)): self.step(self.closure) # log structure if self.logging_function is not None: self.logging_function(opt_step) # stop optimization if max force is smaller than threshold if self.converged(): break self.log() def get_relaxed_structure(self): return self.model_inputs[self.position_key]
maltefranke/solubility_prediction
schnetpack/src/schnetpack/interfaces/batchwise_optimizer.py
batchwise_optimizer.py
py
5,582
python
en
code
1
github-code
6
[ { "api_name": "torch.optim", "line_number": 9, "usage_type": "attribute" }, { "api_name": "schnetpack.src.schnetpack.properties.R", "line_number": 32, "usage_type": "attribute" }, { "api_name": "schnetpack.src.schnetpack.properties", "line_number": 32, "usage_type": "name...
13015171086
import pickle import torch import argparse from foresight.models import * from foresight.pruners import * from foresight.dataset import * from foresight.weight_initializers import init_net def get_num_classes(args): return 100 if args.dataset == 'cifar100' else 10 if args.dataset == 'cifar10' else 120 def parse_arguments(): parser = argparse.ArgumentParser(description='Zero-cost Metrics for NAS-Bench-201') parser.add_argument('--api_loc', default='data/NAS-Bench-201-v1_0-e61699.pth', type=str, help='path to API') parser.add_argument('--outdir', default='./', type=str, help='output directory') parser.add_argument('--init_w_type', type=str, default='none', help='weight initialization (before pruning) type [none, xavier, kaiming, zero]') parser.add_argument('--init_b_type', type=str, default='none', help='bias initialization (before pruning) type [none, xavier, kaiming, zero]') parser.add_argument('--batch_size', default=64, type=int) parser.add_argument('--dataset', type=str, default='cifar10', help='dataset to use [cifar10, cifar100, ImageNet16-120]') parser.add_argument('--gpu', type=int, default=0, help='GPU index to work on') parser.add_argument('--num_data_workers', type=int, default=2, help='number of workers for dataloaders') parser.add_argument('--dataload', type=str, default='random', help='random or grasp supported') parser.add_argument('--dataload_info', type=int, default=1, help='number of batches to use for random dataload or number of samples per class for grasp dataload') parser.add_argument('--seed', type=int, default=42, help='pytorch manual seed') parser.add_argument('--write_freq', type=int, default=1, help='frequency of write to file') parser.add_argument('--start', type=int, default=0, help='start index') parser.add_argument('--end', type=int, default=0, help='end index') parser.add_argument('--noacc', default=False, action='store_true', help='avoid loading NASBench2 api an instead load a pickle file with tuple (index, arch_str)') args = parser.parse_args() args.device = torch.device("cuda:"+str(args.gpu) if torch.cuda.is_available() else "cpu") return args if __name__ == '__main__': args = parse_arguments() if args.noacc: api = pickle.load(open(args.api_loc,'rb')) else: from nas_201_api import NASBench201API as API api = API(args.api_loc) torch.manual_seed(args.seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False train_loader, val_loader = get_cifar_dataloaders(args.batch_size, args.batch_size, args.dataset, args.num_data_workers) cached_res = [] pre='cf' if 'cifar' in args.dataset else 'im' pfn=f'nb2_{pre}{get_num_classes(args)}_seed{args.seed}_dl{args.dataload}_dlinfo{args.dataload_info}_initw{args.init_w_type}_initb{args.init_b_type}.p' op = os.path.join(args.outdir,pfn) args.end = len(api) if args.end == 0 else args.end #loop over nasbench2 archs for i, arch_str in enumerate(api): if i < args.start: continue if i >= args.end: break res = {'i':i, 'arch':arch_str} net = nasbench2.get_model_from_arch_str(arch_str, get_num_classes(args)) net.to(args.device) init_net(net, args.init_w_type, args.init_b_type) arch_str2 = nasbench2.get_arch_str_from_model(net) if arch_str != arch_str2: print(arch_str) print(arch_str2) raise ValueError measures = predictive.find_measures(net, train_loader, (args.dataload, args.dataload_info, get_num_classes(args)), args.device) res['logmeasures']= measures if not args.noacc: info = api.get_more_info(i, 'cifar10-valid' if args.dataset=='cifar10' else args.dataset, iepoch=None, hp='200', is_random=False) trainacc = info['train-accuracy'] valacc = info['valid-accuracy'] testacc = info['test-accuracy'] res['trainacc']=trainacc res['valacc']=valacc res['testacc']=testacc #print(res) cached_res.append(res) #write to file if i % args.write_freq == 0 or i == len(api)-1 or i == 10: print(f'writing {len(cached_res)} results to {op}') pf=open(op, 'ab') for cr in cached_res: pickle.dump(cr, pf) pf.close() cached_res = []
SamsungLabs/zero-cost-nas
nasbench2_pred.py
nasbench2_pred.py
py
4,715
python
en
code
137
github-code
6
[ { "api_name": "argparse.ArgumentParser", "line_number": 14, "usage_type": "call" }, { "api_name": "torch.device", "line_number": 33, "usage_type": "call" }, { "api_name": "torch.cuda.is_available", "line_number": 33, "usage_type": "call" }, { "api_name": "torch.cu...
9001276762
from time import time from gurobipy import Model class MDSP: def __init__(self, d: list, filename: str, optimize=False, time_limit=3600): self.D = d self.B = sum(d) self.k = len(self.D) self.D_ = self.get_unique_distances() self.M = self.get_mult() self.P = list(range(self.B + 1)) self.filename = filename if optimize: self.P = self.valid_points() self.model = Model() self.model.setParam('LogFile', f'{filename}.log') self.model.setParam('LogToConsole', 0) self.model.setParam('TimeLimit', time_limit) def solve(self): t1 = time() self.model.update() self.model.optimize() t2 = time() self.write_time_file(t1, t2) self.model.write(f'{self.filename}.lp') if self.model.status == 3: print('Infeasible') else: self.model.write(f'{self.filename}.sol') print('Obj: %s' % self.model.ObjVal) def get_unique_distances(self): d = list(set(self.D)) d.sort() return d def get_mult(self): m = dict() for i in self.D_: m[i] = self.D.count(i) return m def valid_points(self): c = {0} for d in self.D: t = set() for p in c: t = t.union({p-d, p+d}) c = c.union(t) p = [x for x in c if x >= 0] p.sort() return p def write_time_file(self, t1, t2): with open(f'{self.filename}.time', 'w') as file: file.write(f'{self.model.Status}\n') file.write(f'{t2 - t1}\n') file.close()
cleberoli/mdsp
model/mdsp.py
mdsp.py
py
1,706
python
en
code
0
github-code
6
[ { "api_name": "gurobipy.Model", "line_number": 19, "usage_type": "call" }, { "api_name": "time.time", "line_number": 25, "usage_type": "call" }, { "api_name": "time.time", "line_number": 28, "usage_type": "call" } ]
41295805453
import pygame from pygame.locals import * import numpy as np from conway import Life class GameOfLife(Life): def __init__(self, width = 1600, height = 1000, cell_size = 5, speed = 10): Life.__init__(self, width // 10, height // 10) self.width = width self.height = height self.cell_size = cell_size # Устанавливаем размер окна self.screen_size = width, height # Создание нового окна self.screen = pygame.display.set_mode(self.screen_size) # Вычисляем количество ячеек по вертикали и горизонтали self.cell_width = self.width // self.cell_size self.cell_height = self.height // self.cell_size # Скорость протекания игры self.speed = speed self.Mboard() def draw_grid(self): for x in range(0, self.width, self.cell_size): pygame.draw.line(self.screen, pygame.Color('black'), (x, 0), (x, self.height)) for y in range(0, self.height, self.cell_size): pygame.draw.line(self.screen, pygame.Color('black'), (0, y), (self.width, y)) def cells(self): for row in range(self.rows): for column in range(self.columns): if self.board[row, column] == 1: pygame.draw.rect(self.screen, (57, 255, 20), ((row * self.cell_size) + 1, (column * self.cell_size) +1, self.cell_size -1, self.cell_size -1)) else: pygame.draw.rect(self.screen, (255, 255, 255), ((row * self.cell_size) + 1, (column * self.cell_size) + 1, self.cell_size -1, self.cell_size -1)) def run(self): pygame.init() clock = pygame.time.Clock() pygame.display.set_caption('Game of Life') self.screen.fill(pygame.Color('white')) running = True while running: for event in pygame.event.get(): if event.type == QUIT: running = False self.draw_grid() self.cells() self.cell() pygame.display.flip() clock.tick(self.speed) pygame.quit() if __name__ == '__main__': game = GameOfLife() game.run()
hatiff/GameLife
Life.py
Life.py
py
2,437
python
en
code
0
github-code
6
[ { "api_name": "conway.Life", "line_number": 6, "usage_type": "name" }, { "api_name": "conway.Life.__init__", "line_number": 8, "usage_type": "call" }, { "api_name": "conway.Life", "line_number": 8, "usage_type": "name" }, { "api_name": "pygame.display.set_mode", ...
25867921082
#import libraries import pandas as pd import numpy as np import bokeh from bokeh.plotting import figure, output_file, show from bokeh.models.tools import HoverTool from bokeh.core.properties import value from bokeh.models import ColumnDataSource, FactorRange from bokeh.plotting import figure import math from bokeh.models import Range1d, LabelSet, Label class Flight_Arrivals(): def __init__(self): pass def flights(self): avion=pd.read_csv("fly_mia.csv",encoding="latin-1") a=avion a['est_arr_time'] = a['est_arr_time'].str.replace('?', '') a['est_arr_time']=a['est_arr_time'].str.replace(r"\(.*\)","") a=a[a.est_arr_time.str.contains('0')] sun1=a[a.est_arr_time.str.contains('Sun')] sun1['est_arr_time'] = sun1['est_arr_time'].str.replace('Sun', '2019-08-18') sun1['dep_time'] = sun1['dep_time'].str.replace('Sun', '2019-08-18') sat1=a[a.est_arr_time.str.contains('Sat')] sat1['est_arr_time'] = sat1['est_arr_time'].str.replace('Sat', '2019-08-17') sat1['dep_time'] = sat1['dep_time'].str.replace('Sat', '2019-08-17') fri1=a[a.est_arr_time.str.contains('Fri')] fri1['est_arr_time'] =fri1['est_arr_time'].str.replace('Fri', '2019-08-16') fri1['dep_time'] =fri1['dep_time'].str.replace('Fri', '2019-08-16') ok2=pd.concat([sun1,sat1,fri1],axis=0) ok2['dep_time'] =ok2['dep_time'].str.replace('Fri', '2019-08-16') ok2['dep_time'] =ok2['dep_time'].str.replace('Sat', '2019-08-17') ok2['dep_time']=pd.to_datetime(ok2['dep_time']) ok2['est_arr_time']=pd.to_datetime(ok2['est_arr_time']) ok2['flight_time']=ok2['est_arr_time']-ok2['dep_time'] ok2['flight_time']=ok2['flight_time'].dt.total_seconds() ok2['flight_time']=ok2['flight_time']/60 #to minutes #airport time zones (departure zones) #1. cest cest=ok2[ok2.origin.str.contains('MAD|ZRH|BRU|MXP|CDG|DUS|FCO|VIE|FRA|Pisa|BCN|ZAZ|WAW|ORY|AMS')] cest['flight_time']=cest['flight_time']+360 cest['flight_time'] = cest['flight_time'].apply(lambda x: 561 if x < 400 else x) #2.south american flights sa=ok2[ok2.origin.str.contains("GIG|FOR|COR|EZE|Dois de|BSB|GRU|REC|MVD|BEL|SNU")] sa['flight_time']=sa['flight_time']+60 sa['flight_time']=sa['flight_time'].apply(lambda x: 451.5 if x<350 else x) otro=ok2[~ok2.origin.str.contains('MAD|ZRH|BRU|MXP|CDG|DUS|FCO|VIE|FRA|Pisa|BCN|ZAZ|WAW|ORY|AMS|GIG|FOR|COR|EZE|Dois de|BSB|GRU|REC|MVD|BEL|SNU')] todos=pd.concat([cest,sa,otro],axis=0) # percent of flights less one hour bins=[0,60,120,180,240,300,360,420,480,540,600,660] todos['flight_bins']=pd.cut(todos['flight_time'], bins) pct_time=todos['flight_bins'].value_counts() pct_time=pd.DataFrame(pct_time) pct_time.reset_index(level=0,inplace=True) pct_time['pct']=pct_time['flight_bins']/todos.shape[0] #ii. variance by origin vaR=todos.groupby('origin')['flight_time'].var() vaR.sort_values() #iii. arrives by part of the day tiempo=todos[["origin","est_arr_time"]] t=tiempo t['hours']=t['est_arr_time'].dt.hour t['minutes']=t['est_arr_time'].dt.minute mid_six=t[(t.hours>=0) & (t.hours<=6)] seven_twelve=t[(t.hours>=7) & (t.hours<=12)] one_six=t[(t.hours>=13) & (t.hours<=18)] seven_twelve1=t[(t.hours>=19) & (t.hours<=23)] #percent arrivals by time of the day mid_sixP=mid_six.shape[0]/t.shape[0] seven_twelveP=seven_twelve.shape[0]/t.shape[0] one_sixP=one_six.shape[0]/t.shape[0] seven_twelveP1=seven_twelve1.shape[0]/t.shape[0] #origin counts ori=t['origin'].value_counts() ori=pd.DataFrame(ori) ori.reset_index(level=0,inplace=True) ori.columns=['origin','total'] #time between flights tX=todos tX.sort_values(['origin','dep_time'],inplace=True) tX['diff_dep']=tX['dep_time'].diff() mask=tX.origin !=tX.origin.shift(1) tX['diff_dep'][mask]=np.nan tX['diff_dep']=tX['diff_dep'].dt.total_seconds() tX['diff_dep']=tX['diff_dep']/60 #to minutes tX.iloc[0:10] tX=tX[~(tX.diff_dep==0)] takeoffs=tX.groupby('origin')['diff_dep'].median() takeoffs=takeoffs.sort_values() takeoffs=pd.DataFrame(takeoffs) take=takeoffs take=take[take.diff_dep>=1] take1=take[take.diff_dep<=80] s=t s=s.set_index('est_arr_time') s=s.loc['2019-08-17 00:00:00':'2019-08-17 23:59:59'] #VIZ I #east coast time vs. cst,pdt, and mdt (comparing flight times) west_cent=tX[tX.origin.str.contains('LAX|SFO|LAS|SEA|SAN|SNU|DFW|MEX|MDW|MSY|CMW|MEM|ORD|TUL|MSP|MCI|STL|MID|IAH|VRA|PNS|GDL|MTY|KSAT|BHM|SCU|HOG|TLC|HSV')] east=tX[tX.origin.str.contains('NAS|PHI|Toron|Bahama|DCA|HAV|ORF|TPA|LGA|JAX|SAV|SDF|PIE|GGT|PLS|CVG|PIT|CHS|CLE|JFK|CAP|IND|DTW|KEY|CMH|BUF|RDU|SFB|MYEH|MYAM|CYUL|GSP|PBI|RIC|GSO|FMY|BDL|BWI|KTEB|ZSA|KMLB|KAPF|SGJ')] #length of flights wc=west_cent['flight_bins'].value_counts() wc=pd.DataFrame(wc) wc.columns=['flight_time'] wc.reset_index(level=0,inplace=True) wc=wc.sort_values(by="index") wc=wc.set_index('index') ea=east['flight_bins'].value_counts() ea=pd.DataFrame(ea) ea.columns=['flight_time'] ea.reset_index(level=0,inplace=True) ea=ea.sort_values(by="index") ea=ea.set_index('index') factors=[("0-60"),("60-120"),("120-180"), ("180-240"),("240-300"),("300-360"),("360-420"),("420-480"),("480-540"),("540-600"),("600-660")] regions=['east_time_zone','other_time_zone'] east_data=ea.flight_time.tolist() west_data=wc.flight_time.tolist() source=ColumnDataSource(data=dict(x=factors,east_time_zone=east_data,other_time_zone=west_data,)) p = figure(x_range=FactorRange(*factors), plot_height=250,toolbar_location=None, tools="") p.vbar_stack(regions, x='x', width=0.9, alpha=0.5, color=["orange", "purple"], source=source,legend=[value(x) for x in regions]) p.y_range.start = 0 p.y_range.end = 120 p.x_range.range_padding = 0.1 p.xaxis.major_label_orientation = 1 p.xgrid.grid_line_color = None p.xaxis.axis_label='Flight Time (Minutes)' p.yaxis.axis_label='Frequency' p.legend.location = "top_right" p.legend.orientation = "horizontal" output_file("mia1.html") #show(p) #VIZ II (time between departures) source1=ColumnDataSource(take1) airports=source1.data['origin'].tolist() p1=figure(x_range=airports) p1.vbar_stack(stackers=['diff_dep'],x='origin',source=source1,width=0.5) p1.title.text='Time Between Flight Departures' p1.title.align="center" p1.title.text_color="orange" p1.xaxis.major_label_orientation = math.pi/4.25 p1.xaxis.axis_label='' p1.yaxis.axis_label='Minutes' hover=HoverTool() hover.tooltips=[("Time Between Flights","@diff_dep minutes")] hover.mode='vline' p1.add_tools(hover) output_file("mia2.html") #show(p1) #VIZ III (what time of the day do flights arrive?) time_arr=['Midnight to 7 AM','7 AM to 1 PM','1 PM to 7 PM','7 PM to Midnight'] counts=[mid_sixP,seven_twelveP1,one_sixP,seven_twelveP1] palette=['lavender','plum','darkviolet','indigo'] source = ColumnDataSource(data=dict(time_arr=time_arr, counts=counts)) p = figure(x_range=time_arr, plot_height=250, toolbar_location=None, title="When Do Flights to X Arrive?") p.vbar(x='time_arr', top='counts', width=0.5, source=source, color="teal", line_color='white') p.xgrid.grid_line_color = None p.y_range.start = 0.0 p.y_range.end = 0.6 p.xaxis.axis_label="" p.yaxis.major_label_overrides = {0:'0',0.1:'10%',0.2:'20%',0.3:'30%',0.4:'40%',0.5:'50%'} p.yaxis.axis_label="Total Flights" p.legend.orientation = "horizontal" p.legend.location = "top_center" p.title.align="center" output_file("mia3.html") #show(p) #VIZ IV (outlier flights time plot) top_diez=tX['origin'].value_counts() top_diez=pd.DataFrame(top_diez) top_diez.reset_index(level=0,inplace=True) air_names=top_diez.iloc[0:10]["index"] an=air_names an0=an.iloc[0] an1=an.iloc[1] an2=an.iloc[2] an3=an.iloc[3] an4=an.iloc[4] an5=an.iloc[5] an6=an.iloc[6] an7=an.iloc[7] an8=an.iloc[8] an9=an.iloc[9] sub_air=tX[(tX.origin==an0) | (tX.origin==an1) | (tX.origin==an2) | (tX.origin==an3) | (tX.origin==an4) | (tX.origin==an5) | (tX.origin==an6) | (tX.origin==an7) | (tX.origin==an8) | (tX.origin==an9)] df=pd.DataFrame(dict(flight_time=sub_air['flight_time'],group=sub_air['origin'])) originS=df['group'].unique().tolist() groups=df.groupby('group') q1=groups.quantile(q=0.25) q2=groups.quantile(q=0.50) q3=groups.quantile(q=0.75) iqr=q3-q1 upper=q3+1.5*iqr lower=q1-1.5*iqr #find outliers in each group def outliers(group): originS=group.name return group[(group.flight_time > upper.loc[originS]['flight_time']) | (group.flight_time < lower.loc[originS]['flight_time'])]['flight_time'] out=groups.apply(outliers).dropna() #prepare outlier data for plotting if not out.empty: outx=[] outy=[] for keys in out.index: outx.append(keys[0]) outy.append(out.loc[keys[0]].loc[keys[1]]) p = figure(tools="", background_fill_color="#efefef", x_range=originS, toolbar_location=None) #if no outliers, shrink lengths of stems to be no longer than the minimums or maximums qmin=groups.quantile(q=0.00) qmax=groups.quantile(q=1.00) upper.score=[min([x,y]) for (x,y) in zip(list(qmax.loc[:,'flight_time']),upper.flight_time)] lower.score = [max([x,y]) for (x,y) in zip(list(qmin.loc[:,'flight_time']),lower.flight_time)] # stems p.segment(originS, upper.flight_time, originS, q3.flight_time, line_color="black") p.segment(originS, lower.flight_time, originS, q1.flight_time, line_color="black") # boxes p.vbar(originS, 0.7, q2.flight_time, q3.flight_time, fill_color="aqua", line_color="black") p.vbar(originS, 0.7, q1.flight_time, q2.flight_time, fill_color="maroon", line_color="black") # whiskers (almost-0 height rects simpler than segments) p.rect(originS, lower.flight_time, 0.2, 0.01, line_color="black") p.rect(originS,upper.flight_time, 0.2, 0.01, line_color="black") # outliers if not out.empty: p.circle(outx, outy, size=6, color="#F38630", fill_alpha=0.6) p.xgrid.grid_line_color = None p.ygrid.grid_line_color = "white" p.grid.grid_line_width = 2 p.xaxis.major_label_text_font_size="12pt" p.xaxis.major_label_orientation = 3.5/2 p.xaxis.axis_label = '' p.yaxis.axis_label = 'Flight Time (minutes)' p.title.text='Flights That Are Shorter or Longer Than Average' p.title.align="center" output_file('mia4x.html') #show(p) #VIZ V dep=tX['diff_dep'].tolist() time=tX['flight_time'].tolist() airports=tX['origin'].tolist() source=ColumnDataSource(data=dict(dep=dep,time=time,airports=airports)) p=figure(title="Flight Time Vs. Time Between Departures",x_range=Range1d(0,1000)) p.scatter(x="dep",y="time",size=4,source=source) p.xaxis[0].axis_label="Time Between Flights (Minutes)" p.yaxis[0].axis_label="Flight Time (Minutes)" labels = LabelSet(x='dep', y='time', text='airports', level='glyph',x_offset=5, y_offset=5, source=source, render_mode='canvas') p.add_layout(labels) show(p) if __name__=='__main__': flights=Flight_Arrivals() flights.flights()
Fremont28/miami_flights-
flights_viz1.py
flights_viz1.py
py
12,432
python
en
code
0
github-code
6
[ { "api_name": "pandas.read_csv", "line_number": 18, "usage_type": "call" }, { "api_name": "pandas.concat", "line_number": 34, "usage_type": "call" }, { "api_name": "pandas.to_datetime", "line_number": 38, "usage_type": "call" }, { "api_name": "pandas.to_datetime",...
75189166908
import pygame, threading pygame.init() white = (255, 255, 255) green = (0, 255, 0) blue = (0, 0, 128) X = 400 Y = 400 display_surface = pygame.display.set_mode((X, Y )) pygame.display.set_caption('Show Text') font = pygame.font.Font('freesansbold.ttf', 32) text = font.render('GeeksForGeeks', True, green, blue) textRect = text.get_rect() textRect.center = (X // 2, Y // 2) def updateGUI(): while True: display_surface.fill(white) display_surface.blit(text, textRect) for event in pygame.event.get() : if event.type == pygame.QUIT : pygame.quit() quit() pygame.display.update() GUI = threading.Thread(target = updateGUI, args = ()) GUI.daemon = True GUI.start()
ger534/Proyecto2Arqui2
examplePygame.py
examplePygame.py
py
779
python
en
code
0
github-code
6
[ { "api_name": "pygame.init", "line_number": 3, "usage_type": "call" }, { "api_name": "pygame.display.set_mode", "line_number": 10, "usage_type": "call" }, { "api_name": "pygame.display", "line_number": 10, "usage_type": "attribute" }, { "api_name": "pygame.display...
73025036669
from enum import Enum from typing import List import sqlalchemy as sa from sqlalchemy import orm as so from .base import BaseMixin, db, IdentityMixin, TimestampMixin __all__ = ['Chat', 'ChatEntry'] class Chat(BaseMixin, IdentityMixin, TimestampMixin, db.Model): """Chat Model. Represents a chat conversation in the application. """ __tablename__ = 'chats' title: so.Mapped[str] = so.mapped_column( sa.String(64), unique=False, index=True, nullable=False, ) entry: so.Mapped[List['ChatEntry']] = so.relationship( back_populates='chat', ) def teaser(self, length=None) -> str: """Return a teaser for the chat.""" if not self.entry: return '' first_entry = min(self.entry, key=lambda x: x.created_at) length = length or 150 if len(first_entry.content) > length: return first_entry.content[:length - 3] + '...' return first_entry.content[:length] @classmethod def create_new_chat(cls, title=None): """Create a new chat and return the new chat.""" title = title or 'New chat' new_chat = cls.create(title=title) new_chat.save() return new_chat class ChatEntry(BaseMixin, IdentityMixin, TimestampMixin, db.Model): """ChatEntry Model. Represents a single message in a Chat. """ class Role(Enum): """Represents the role of a :class:`.ChatEntry`.""" USER = 'user' ASSISTANT = 'assistant' def __str__(self) -> str: """Get a string representation of this role. :return: The name of this role. :rtype: str """ return self.name.strip().lower() __tablename__ = 'chat_entries' content: so.Mapped[str] = so.mapped_column( sa.Text, nullable=False, ) chat_id: so.Mapped[int] = so.mapped_column( sa.ForeignKey('chats.id'), nullable=False ) role: so.Mapped[Role] = so.mapped_column( sa.Enum( Role, name='role_types', values_callable=lambda obj: [str(item.value) for item in obj] ), nullable=False, ) chat: so.Mapped['Chat'] = so.relationship( back_populates='entry', )
sergeyklay/promptly
backend/promptly/models/chat.py
chat.py
py
2,313
python
en
code
1
github-code
6
[ { "api_name": "base.BaseMixin", "line_number": 12, "usage_type": "name" }, { "api_name": "base.IdentityMixin", "line_number": 12, "usage_type": "name" }, { "api_name": "base.TimestampMixin", "line_number": 12, "usage_type": "name" }, { "api_name": "base.db.Model",...
73674519546
'''This script contains the functions used to contruct and train the GAN.''' import numpy as np import pandas as pd import matplotlib.pyplot as plt import os import tensorflow as tf # Change the environment variable TF_CPP_MIN_LOG_LEVEL to 2 to avoid the orderbooks about the compilation of the CUDA code os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' from data_utils import * import argparse import logging # from tensorflow.keras.utils import plot_model from model_utils import * import math import gc from scipy.stats import wasserstein_distance import sys if __name__ == '__main__': parser = argparse.ArgumentParser( description='''Main script used to train the GAN.''') parser.add_argument("-l", "--log", default="info", help=("Provide logging level. Example --log debug', default='info'")) parser.add_argument('-N', '--N_days', type=int, help='Number of the day to consider') parser.add_argument('-d', '--depth', help='Depth of the orderbook', type=int) parser.add_argument('-bs', '--batch_size', help='Batch size', type=int) parser.add_argument('-ld', '--latent_dim', help='Latent dimension', type=int) parser.add_argument('-nlg', '--n_layers_gen', help='Number of generator layers', type=int) parser.add_argument('-nld', '--n_layers_disc', help='Number of discriminator layers', type=int) parser.add_argument('-tg', '--type_gen', help='Type of generator model (conv, lstm, dense)', type=str) parser.add_argument('-td', '--type_disc', help='Type of discriminator model (conv, lstm, dense)', type=str) parser.add_argument('-sc', '--skip_connection', action='store_true', help='Use or not skip connections') parser.add_argument('-Tc', '--T_condition', help='Number of time steps to condition on', type=int, default=2) parser.add_argument('-Tg', '--T_gen', help='Number of time steps to generate', type=int, default=1) parser.add_argument('-ls', '--loss', help='Loss function (original, wasserstein)', type=str, default='original') parser.add_argument('-lo', '--load', help='Load a model. The job_id must be provided', type=int, default=0) args = parser.parse_args() levels = {'critical': logging.CRITICAL, 'error': logging.ERROR, 'warning': logging.WARNING, 'info': logging.INFO, 'debug': logging.DEBUG} if os.getenv("PBS_JOBID") != None: job_id = os.getenv("PBS_JOBID") else: job_id = os.getpid() logging.basicConfig(filename=f'train_{job_id}.log', format='%(message)s', level=levels[args.log]) logger = tf.get_logger() logger.setLevel('ERROR') # Set the seed for TensorFlow to the number of the beast tf.random.set_seed(666) # Print the current date and time current_datetime = pd.Timestamp.now() formatted_datetime = current_datetime.strftime("%Y-%m-%d %H:%M:%S") logging.info(f"Current Date and Time:\n\t {formatted_datetime}") # Enable device placement logging tf.debugging.set_log_device_placement(True) # Load the data stock = 'MSFT' date = '2018-04-01_2018-04-30_5' total_depth = 5 N = args.N_days depth = args.depth logging.info(f'Stock:\n\t{stock}') logging.info(f'Number of days:\n\t{N}') physical_devices = tf.config.experimental.list_physical_devices('GPU') if len(physical_devices) == 0: logging.info("No GPUs available.") else: logging.info("Available GPUs:") tf.config.experimental.set_memory_growth(physical_devices[0], True) for device in physical_devices: logging.info(f'\t{device}\n') # Folders creation os.mkdir(f'plots/{job_id}_{args.type_gen}_{args.type_disc}_{args.n_layers_gen}_{args.n_layers_disc}_{args.T_condition}_{args.loss}') # Model architecture plots, metrics plots os.mkdir(f'generated_samples/{job_id}_{args.type_gen}_{args.type_disc}_{args.n_layers_gen}_{args.n_layers_disc}_{args.T_condition}_{args.loss}') # Generated samples os.mkdir(f'models/{job_id}_{args.type_gen}_{args.type_disc}_{args.n_layers_gen}_{args.n_layers_disc}_{args.T_condition}_{args.loss}') # Models # Create the orderbook dataframe orderbook_df = create_orderbook_dataframe(N, previos_days=False) # Define the parameters of the GAN. Some of them are set via argparse T_condition = args.T_condition T_gen = args.T_gen window_size = T_condition + T_gen n_features_input = orderbook_df.shape[1] n_features_gen = 2*depth latent_dim = args.latent_dim n_epochs = 5000 batch_size = args.batch_size # Define the parameters for the early stopping criterion best_gen_weights = None best_disc_weights = None best_wass_dist = float('inf') patience_counter = 0 patience = 200 num_pieces = 5 if not os.path.exists(f'../data/input_train_{stock}_{window_size}_day{N}_orderbook.npy'): logging.info('\n[Input] ---------- PREPROCESSING ----------') data_input = orderbook_df.values # data_input = np.load(f'anomaly_data_{N}.npy') # logging.info(f'\nAre anomaly_data and normal_data the same?\n\t{np.all(data_input == data_input_a)}') # exit() # Divide input data into overlapping pieces sub_data, length = divide_into_overlapping_pieces(data_input, window_size, num_pieces) if sub_data[-1].shape[0] < window_size: raise ValueError(f'The last piece has shape {sub_data[-1].shape} and it is smaller than the window size {window_size}.') logging.info(f'Number of windows: {length}') # Create a memmap to store the scaled data. final_shape = (length-num_pieces*(window_size-1), window_size, n_features_input) fp = np.memmap("final_data.dat", dtype='float32', mode='w+', shape=final_shape) start_idx = 0 logging.info(f'\nStart scaling the data...') for piece_idx, data in enumerate(sub_data): logging.info(f'\t{piece_idx+1}/{num_pieces}') windows = np.array(divide_into_windows(data, window_size)) logging.info(f'\twindows shape: {windows.shape}') end_idx = start_idx + windows.shape[0] fp[start_idx:end_idx] = windows start_idx = end_idx del windows # Explicit deletion logging.info('Done.') np.save(f'normal_data_{N}.npy', fp) logging.info('\nDividing each window into condition and input...') condition_train, input_train = fp[:, :T_condition, :], fp[:, T_condition:, :n_features_gen] logging.info('Done.') logging.info(f'input_train shape:\n\t{input_train.shape}') logging.info(f'condition_train shape:\n\t{condition_train.shape}') logging.info('\nSave the files...') np.save(f'../data/condition_train_{stock}_{window_size}_day{N}_orderbook.npy', condition_train) np.save(f'../data/input_train_{stock}_{window_size}_day{N}_orderbook.npy', input_train) logging.info('Done.') logging.info('\n[Input] ---------- DONE ----------') else: logging.info('Loading input_train, input_validation and input_test sets...') input_train = np.load(f'../data/input_train_{stock}_{window_size}_{N}days_orderbook.npy', mmap_mode='r') condition_train = np.load(f'../data/condition_train_{stock}_{window_size}_{N}days_orderbook.npy', mmap_mode='r') logging.info(f'input_train shape:\n\t{input_train.shape}') logging.info(f'condition_train shape:\n\t{condition_train.shape}') logging.info(f"\nHYPERPARAMETERS:\n" f"\tstock: {stock}\n" f"\tdepth: {depth}\n" f"\tgenerator: {args.type_gen}\n" f"\tdiscriminator: {args.type_disc}\n" f"\tn_layers_gen: {args.n_layers_gen}\n" f"\tn_layers_disc: {args.n_layers_disc}\n" f"\tskip_connection: {args.skip_connection}\n" f"\tlatent_dim per time: {latent_dim}\n" f"\tn_features_input: {n_features_input}\n" f"\tn_features_gen: {n_features_gen}\n" f"\tfeatures: {orderbook_df.columns}\n" f"\tn_epochs: {n_epochs}\n" f"\tT_condition: {T_condition}\n" f"\tT_gen: {T_gen}\n" f"\tbatch_size: {batch_size} (num_batches: {input_train.shape[0]//batch_size})\n" f"\tloss: {args.loss}\n" f"\tpatience: {patience}\n" f"\tjob_id: {job_id}\n" f"\tLoaded model: {None if args.load==0 else args.load}\n") # Define the optimizers generator_optimizer = tf.keras.optimizers.Adam(learning_rate=0.00001) discriminator_optimizer = tf.keras.optimizers.Adam(learning_rate=0.00001) optimizer = [generator_optimizer, discriminator_optimizer] if args.load == 0: # Build the models generator_model = build_generator(args.n_layers_gen, args.type_gen, args.skip_connection, T_gen, T_condition, n_features_input, n_features_gen, latent_dim, True) discriminator_model = build_discriminator(args.n_layers_disc, args.type_disc, args.skip_connection, T_gen, T_condition, n_features_input, n_features_gen, True, args.loss) feature_extractor = build_feature_extractor(discriminator_model, [i for i in range(1, args.n_layers_disc)]) else: prev_job_id = args.load # Load the models generator_model = tf.keras.models.load_model(f'models/{prev_job_id}.pbs01_{args.type_gen}_{args.type_disc}_{args.n_layers_gen}_{args.n_layers_disc}_{args.T_condition}_{args.loss}/generator_model.h5') discriminator_model = tf.keras.models.load_model(f'models/{prev_job_id}.pbs01_{args.type_gen}_{args.type_disc}_{args.n_layers_gen}_{args.n_layers_disc}_{args.T_condition}_{args.loss}/discriminator_model.h5') feature_extractor = build_feature_extractor(discriminator_model, [i for i in range(1, args.n_layers_disc)]) logging.info('\n[Model] ---------- MODEL SUMMARIES ----------') generator_model.summary(print_fn=logging.info) logging.info('\n') discriminator_model.summary(print_fn=logging.info) logging.info('[Model] ---------- DONE ----------\n') # Define a dictionary to store the metrics metrics = {'discriminator_loss': [], 'gen_loss': [], 'real_disc_out': [], 'fake_disc_out': []} # Train the GAN. logging.info('\n[Training] ---------- START TRAINING ----------') dataset_train = tf.data.Dataset.from_tensor_slices((condition_train, input_train)).batch(batch_size) num_batches = len(dataset_train) logging.info(f'Number of batches:\n\t{num_batches}\n') # Initialize a list to store the mean over all the features of the wasserstein distances at each epoch wass_to_plot = [] for epoch in range(n_epochs): j = 0 W_batch = [] # W_batch will have num_batches elements noises = [[] for _ in range(num_batches)] # noises will have num_batches elements. Each elements is a list containing the noises used for each batch in that epoch for batch_condition, batch_real_samples in dataset_train: j += 1 batch_size = batch_real_samples.shape[0] generator_model, discriminator_model, generated_samples, noise = train_step(batch_real_samples, batch_condition, generator_model, discriminator_model, feature_extractor, optimizer, args.loss, T_gen, T_condition, latent_dim, batch_size, num_batches, j, job_id, epoch, metrics, args) # Append the noise noises[j-1] = noise W_features = [] # W_features will have n_features_gen elements for feature in range(n_features_gen): # Iteration over the features W_samples = [] # W_samples will have batch_size elements for i in range(generated_samples.shape[0]): # Iteration over the samples w = wasserstein_distance(batch_real_samples[i, :, feature], generated_samples[i, :, feature]) W_samples.append(w) W_features.append(np.mean(np.array(W_samples))) # averaged over the samples in a batch W_batch.append(np.mean(np.array(W_features))) # averaged over the features overall_W_mean = np.mean(np.array(W_batch)) # averaged over the batches wass_to_plot.append(overall_W_mean) logging.info(f'Wasserstein distance: {overall_W_mean}') if epoch % 150 == 0: logging.info('Creating a time series with the generated samples...') features = orderbook_df.columns[:n_features_gen] plot_samples(dataset_train, generator_model, noises, features, T_gen, n_features_gen, job_id, epoch, args) logging.info('Saving the models...') generator_model.save(f'models/{job_id}_{args.type_gen}_{args.type_disc}_{args.n_layers_gen}_{args.n_layers_disc}_{args.T_condition}_{args.loss}/generator_model.h5') discriminator_model.save(f'models/{job_id}_{args.type_gen}_{args.type_disc}_{args.n_layers_gen}_{args.n_layers_disc}_{args.T_condition}_{args.loss}/discriminator_model.h5') logging.info('Done') logging.info('Check Early Stopping Criteria...') if epoch > 2500: if overall_W_mean + 5e-4 < best_wass_dist: logging.info(f'Wasserstein distance improved from {best_wass_dist} to {overall_W_mean}') best_wass_dist = overall_W_mean best_gen_weights = generator_model.get_weights() best_disc_weights = discriminator_model.get_weights() patience_counter = 0 np.save(f'generated_samples/{job_id}_{args.type_gen}_{args.type_disc}_{args.n_layers_gen}_{args.n_layers_disc}_{args.T_condition}_{args.loss}/noise_{epoch}.npy', noises) else: logging.info(f'Wasserstein distance did not improve from {best_wass_dist}') patience_counter += 1 if patience_counter >= patience: best_epoch = epoch - patience logging.info(f"Early stopping on epoch {epoch}. Restoring best weights of epoch {best_epoch}...") generator_model.set_weights(best_gen_weights) # restore best weights discriminator_model.set_weights(best_disc_weights) logging.info('Saving the models...') generator_model.save(f'models/{job_id}_{args.type_gen}_{args.type_disc}_{args.n_layers_gen}_{args.n_layers_disc}_{args.T_condition}_{args.loss}/generator_model.h5') discriminator_model.save(f'models/{job_id}_{args.type_gen}_{args.type_disc}_{args.n_layers_gen}_{args.n_layers_disc}_{args.T_condition}_{args.loss}/discriminator_model.h5') logging.info('Done') else: logging.info(f'Early stopping criterion not met. Patience counter:\n\t{patience_counter}') # Plot the wasserstein distance plt.figure(figsize=(10, 6)) plt.plot(wass_to_plot) plt.xlabel('Epoch') plt.ylabel('Wasserstein distance') plt.title(f'Mean over the features of the Wasserstein distances') # add a vertical line at the best epoch plt.axvline(x=epoch-patience_counter, color='r', linestyle='--', alpha=0.8, label=f'Best epoch: {epoch-patience_counter}') plt.legend() plt.savefig(f'plots/{job_id}_{args.type_gen}_{args.type_disc}_{args.n_layers_gen}_{args.n_layers_disc}_{args.T_condition}_{args.loss}/0_wasserstein_distance.png') plt.close() if patience_counter >= patience: break logging.info('[Training] ---------- DONE ----------\n') logging.info('Plotting the first 2 principal components of the generated and real samples...') # Plot the first 2 principal components of the generated and real samples # Load the best generator generator_model = tf.keras.models.load_model(f'models/{job_id}_{args.type_gen}_{args.type_disc}_{args.n_layers_gen}_{args.n_layers_disc}_{args.T_condition}_{args.loss}/generator_model.h5') generated_samples = [] real_samples = [] k = 0 for batch_condition, batch in dataset_train: gen_sample = generator_model([noises[k], batch_condition]) for i in range(gen_sample.shape[0]): # All the appended samples will be of shape (T_gen, n_features_gen) generated_samples.append(gen_sample[i, -1, :]) real_samples.append(batch[i, -1, :]) k += 1 plot_pca_with_marginals(generated_samples, real_samples, job_id, args) logging.info('Done.') logging.info('Computing the errors on the correlation matrix using bootstrap...') # At the end of the training, compute the errors on the correlation matrix using bootstrap. # In order to do so, I need the best generator and the noises used. correlation_matrix(dataset_train, generator_model, noises, T_gen, n_features_gen, job_id) logging.info('Done.') # Maybe it is not necessary, but I prefer to clear all the memory and exit the script gc.collect() tf.keras.backend.clear_session() sys.exit()
DanieleMDiNosse/GAN_Anomaly_Detection
train.py
train.py
py
17,245
python
en
code
0
github-code
6
[ { "api_name": "os.environ", "line_number": 9, "usage_type": "attribute" }, { "api_name": "argparse.ArgumentParser", "line_number": 22, "usage_type": "call" }, { "api_name": "logging.CRITICAL", "line_number": 41, "usage_type": "attribute" }, { "api_name": "logging....
17372597106
# LinearlyVariableInfill """ Linearly Variable Infill for 3D prints. Author: Barnabas Nemeth Version: 1.5 """ from ..Script import Script from UM.Logger import Logger from UM.Application import Application import re #To perform the search from cura.Settings.ExtruderManager import ExtruderManager from collections import namedtuple from enum import Enum from typing import List, Tuple from UM.Message import Message from UM.i18n import i18nCatalog catalog = i18nCatalog("cura") __version__ = '1.5' ##----------------------------------------------------------------------------------------------------------------------------------------------------------------- Point2D = namedtuple('Point2D', 'x y') Segment = namedtuple('Segment', 'point1 point2') class Infill(Enum): """Enum for infill type.""" LINEAR = 1 # Linear infill like rectilinear or triangles class Section(Enum): """Enum for section type.""" NOTHING = 0 INNER_WALL = 1 OUTER_WALL = 2 INFILL = 3 def dist(segment: Segment, point: Point2D) -> float: """Calculate the distance from a point to a line with finite length. Args: segment (Segment): line used for distance calculation point (Point2D): point used for distance calculation Returns: float: distance between ``segment`` and ``point`` """ px = segment.point2.x - segment.point1.x py = segment.point2.y - segment.point1.y norm = px * px + py * py u = ((point.x - segment.point1.x) * px + (point.y - segment.point1.y) * py) / float(norm) if u > 1: u = 1 elif u < 0: u = 0 x = segment.point1.x + u * px y = segment.point1.y + u * py dx = x - point.x dy = y - point.y return (dx * dx + dy * dy) ** 0.5 def two_points_distance(point1: Point2D, point2: Point2D) -> float: """Calculate the euclidean distance between two points. Args: point1 (Point2D): first point point2 (Point2D): second point Returns: float: euclidean distance between the points """ return ((point1.x - point2.x) ** 2 + (point1.y - point2.y) ** 2) ** 0.5 def min_distance_to_segment(segment: Segment, segments: List[Segment]) -> float: """Calculate the minimum distance from the midpoint of ``segment`` to the nearest segment in ``segments``. Args: segment (Segment): segment to use for midpoint calculation segments (List[Segment]): segments list Returns: float: the smallest distance from the midpoint of ``segment`` to the nearest segment in the list """ middlePoint = Point2D((segment.point1.x + segment.point2.x) / 2, (segment.point1.y + segment.point2.y) / 2) return min(dist(s, middlePoint) for s in segments) def getXY(currentLineINcode: str) -> Point2D: """Create a ``Point2D`` object from a gcode line. Args: currentLineINcode (str): gcode line Raises: SyntaxError: when the regular expressions cannot find the relevant coordinates in the gcode Returns: Point2D: the parsed coordinates """ searchX = re.search(r"X(\d*\.?\d*)", currentLineINcode) searchY = re.search(r"Y(\d*\.?\d*)", currentLineINcode) if searchX and searchY: elementX = searchX.group(1) elementY = searchY.group(1) else: raise SyntaxError('Gcode file parsing error for line {currentLineINcode}') return Point2D(float(elementX), float(elementY)) def mapRange(a: Tuple[float, float], b: Tuple[float, float], s: float) -> float: """Calculate a multiplier for the extrusion value from the distance to the perimeter. Args: a (Tuple[float, float]): a tuple containing: - a1 (float): the minimum distance to the perimeter (always zero at the moment) - a2 (float): the maximum distance to the perimeter where the interpolation is performed b (Tuple[float, float]): a tuple containing: - b1 (float): the maximum flow as a fraction - b2 (float): the minimum flow as a fraction s (float): the euclidean distance from the middle of a segment to the nearest perimeter Returns: float: a multiplier for the modified extrusion value """ (a1, a2), (b1, b2) = a, b return b1 + ((s - a1) * (b2 - b1) / (a2 - a1)) def gcode_template(x: float, y: float, extrusion: float) -> str: """Format a gcode string from the X, Y coordinates and extrusion value. Args: x (float): X coordinate y (float): Y coordinate extrusion (float): Extrusion value Returns: str: Gcode line """ return "G1 X{} Y{} E{}".format(round(x, 3), round(y, 3), round(extrusion, 5)) def is_layer(line: str) -> bool: """Check if current line is the start of a layer section. Args: line (str): Gcode line Returns: bool: True if the line is the start of a layer section """ return line.startswith(";LAYER:") def is_innerwall(line: str) -> bool: """Check if current line is the start of an inner wall section. Args: line (str): Gcode line Returns: bool: True if the line is the start of an inner wall section """ return line.startswith(";TYPE:WALL-INNER") def is_outerwall(line: str) -> bool: """Check if current line is the start of an outer wall section. Args: line (str): Gcode line Returns: bool: True if the line is the start of an outer wall section """ return line.startswith(";TYPE:WALL-OUTER") def ez_nyomtatasi_vonal(line: str) -> bool: """Check if current line is a standard printing segment. Args: line (str): Gcode line Returns: bool: True if the line is a standard printing segment """ return "G1" in line and " X" in line and "Y" in line and "E" in line def is_infill(line: str) -> bool: """Check if current line is the start of an infill. Args: line (str): Gcode line Returns: bool: True if the line is the start of an infill section """ return line.startswith(";TYPE:FILL") def fill_type(Mode): """Definie the type of Infill pattern Linearly Variable Infill like lineas or triangles = 1 Args: line (Mode): Infill Pattern Returns: Int: the Type of infill pattern """ iMode=0 if Mode == 'grid': iMode=1 if Mode == 'lines': iMode=1 if Mode == 'triangles': iMode=1 if Mode == 'trihexagon': iMode=1 if Mode == 'cubic': iMode=1 if Mode == 'cubicsubdiv': iMode=0 if Mode == 'tetrahedral': iMode=1 if Mode == 'quarter_cubic': iMode=1 if Mode == 'concentric': iMode=0 if Mode == 'zigzag': iMode=0 if Mode == 'cross': iMode=0 if Mode == 'cross_3d': iMode=0 if Mode == 'gyroid': iMode=0 return iMode class LinearlyVariableInfill(Script): def getSettingDataString(self): return """{ "name": "Linearly Variable Infill", "key": "LinearlyVariableInfill", "metadata": {}, "version": 2, "settings": { "variableSegmentLength": { "label": "Valtoztatott szakasz hossza", "description": "Distance of the gradient (max to min) in mm", "unit": "mm", "type": "float", "default_value": 6.0, "minimum_value": 1.0, "minimum_value_warning": 2.0 }, "divisionNR": { "label": "Szakasz felosztasanak szama", "description": "Only applicable for Linearly Variable Infills; number of segments within the gradient(fullSegmentLength=variableSegmentLength / divisionNR); use sensible values to not overload", "type": "int", "default_value": 4, "minimum_value": 1, "minimum_value_warning": 2 }, "variableSpeed": { "label": "Valtozo sebesseg", "description": "Activate also Valtozo sebesseg linked to the gradual flow", "type": "bool", "default_value": false }, "maxSpeedFactor": { "label": "Max sebesseg szorzo", "description": "Maximum over speed factor", "unit": "%", "type": "int", "default_value": 200, "minimum_value": 100, "maximum_value": 400, "minimum_value_warning": 110, "maximum_value_warning": 370, "enabled": "variableSpeed" }, "minSpeedFactor": { "label": "Min sebesseg szorzo", "description": "Minimum over speed factor", "unit": "%", "type": "int", "default_value": 60, "minimum_value": 10, "maximum_value": 100, "minimum_value_warning": 40, "maximum_value_warning": 90, "enabled": "variableSpeed" }, "extruderNR": { "label": "Extruder sorszam", "description": "Define Extruder szam in case of multi extruders", "unit": "", "type": "int", "default_value": 1 } } }""" ## ----------------------------------------------------------------------------- # # Main Prog # ## ----------------------------------------------------------------------------- def execute(self, data): Logger.log('w', 'Plugin is starting ' ) print('naygvera') division_nr = float(self.getSettingValueByKey("divisionNR")) variable_segment_lengh = float(self.getSettingValueByKey("variableSegmentLength")) extruder_nr = self.getSettingValueByKey("extruderNR") extruder_nr = extruder_nr -1 variable_speed= bool(self.getSettingValueByKey("variableSpeed")) max_speed_factor = float(self.getSettingValueByKey("maxSpeedFactor")) max_speed_factor = max_speed_factor /100 min_speed_factor = float(self.getSettingValueByKey("minSpeedFactor")) min_speed_factor = min_speed_factor /100 # machine_extruder_count # extruder_count=Application.getInstance().getGlobalContainerStack().getProperty("machine_extruder_count", "value") # extruder_count = extruder_count-1 # if extruder_nr>extruder_count : # extruder_nr=extruder_count # Deprecation function extrud = list(Application.getInstance().getGlobalContainerStack().extruders.values()) #extrud = Application.getInstance().getGlobalContainerStack().extruderList Message('Extrud:{}'.format(extrud), title = catalog.i18nc("@info:title", "Post Processing")).show() infillpattern = extrud[extruder_nr].getProperty("infill_pattern", "value") connectinfill = extrud[extruder_nr].getProperty("zig_zaggify_infill", "value") """Parse Gcode and modify infill portions with an extrusion width gradient.""" currentSection = Section.NOTHING lastPosition = Point2D(-10000, -10000) littleSegmentLength = variable_segment_lengh / division_nr infill_type=fill_type(infillpattern) if infill_type == 0: # Logger.log('d', 'Infill Pattern not supported : ' + infillpattern) Message('Infill Pattern not supported : ' + infillpattern , title = catalog.i18nc("@info:title", "Post Processing")).show() return None if connectinfill == True: # Logger.log('d', 'Connect Infill Lines no supported') Message('Gcode must be generate without Connect Infill Lines mode activated' , title = catalog.i18nc("@info:title", "Post Processing")).show() return None Logger.log('d', "GradientFill Param : " + str(littleSegmentLength) + "/" + str(division_nr)+ "/" + str(variable_segment_lengh) ) #str(max_flow) + "/" + str(min_flow) + "/" + Logger.log('d', "Pattern Param : " + infillpattern + "/" + str(infill_type) ) for layer in data: layer_index = data.index(layer) lines = layer.split("\n") for currentLineINcode in lines: new_Line="" stringFeed = "" line_index = lines.index(currentLineINcode) if is_layer(currentLineINcode): perimeterSegments = [] if is_innerwall(currentLineINcode): currentSection = Section.INNER_WALL # Logger.log('d', 'is_innerwall' ) if is_outerwall(currentLineINcode): currentSection = Section.OUTER_WALL # Logger.log('d', 'is_outerwall' ) if currentSection == Section.INNER_WALL: if ez_nyomtatasi_vonal(currentLineINcode): Logger.log('d', 'Ez sor rossz ' + currentLineINcode) perimeterSegments.append(Segment(getXY(currentLineINcode), lastPosition)) if is_infill(currentLineINcode): # Log Size of perimeterSegments for debuging Logger.log('d', 'PerimeterSegments seg : {}'.format(len(perimeterSegments))) currentSection = Section.INFILL # ! Important continue if currentSection == Section.INFILL: if "F" in currentLineINcode and "G1" in currentLineINcode: searchSpeed = re.search(r"F(\d*\.?\d*)", currentLineINcode) if searchSpeed: current_speed=float(searchSpeed.group(1)) new_Line="G1 F{}\n".format(current_speed) else: Logger.log('d', 'Gcode file parsing error for line : ' + currentLineINcode ) if "E" in currentLineINcode and "G1" in currentLineINcode and "X" in currentLineINcode and "Y" in currentLineINcode: currentPosition = getXY(currentLineINcode) splitLine = currentLineINcode.split(" ") # ha lineraris if infill_type == 1: for element in splitLine: if "E" in element: E_inCode = float(element[1:]) fullSegmentLength = two_points_distance(lastPosition, currentPosition) segmentSteps = fullSegmentLength / littleSegmentLength extrudeLengthPERsegment = (0.006584 * fullSegmentLength) / segmentSteps E_inCode_last = E_inCode - (extrudeLengthPERsegment * segmentSteps) littlesegmentDirectionandLength = Point2D((currentPosition.x - lastPosition.x) / fullSegmentLength * littleSegmentLength,(currentPosition.y - lastPosition.y) / fullSegmentLength * littleSegmentLength) speed_deficit = ((current_speed * max_speed_factor + current_speed * min_speed_factor) / division_nr) step_number = 0 last_step_number = 0 if segmentSteps >= 2: # new_Line=new_Line+"; LinearlyVariableInfill segmentSteps >= 2\n" for step in range(int(segmentSteps)): segmentEnd = Point2D(lastPosition.x + littlesegmentDirectionandLength.x, lastPosition.y + littlesegmentDirectionandLength.y) extrudeLength=E_inCode_last+extrudeLengthPERsegment if perimeterSegments==[] : Logger.log('d', 'Itt a hiba ' + currentLineINcode) shortestDistance = min_distance_to_segment(Segment(lastPosition, segmentEnd), perimeterSegments) if shortestDistance < variable_segment_lengh: segmentSpeed = current_speed if variable_speed: if variable_speed: if step_number < division_nr: segmentSpeed = current_speed * min_speed_factor + (speed_deficit * step_number) if step_number >= division_nr: segmentSpeed = current_speed * max_speed_factor if step_number >= segmentSteps - division_nr: segmentSpeed = current_speed * max_speed_factor - (speed_deficit * last_step_number) last_step_number=last_step_number + 1 stringFeed = " F{}".format(int(segmentSpeed)) else: segmentSpeed = current_speed * min_speed_factor if variable_speed: if step_number < division_nr: segmentSpeed = current_speed * min_speed_factor + (speed_deficit * step_number) if step_number >= division_nr: segmentSpeed = current_speed * max_speed_factor if step_number >= segmentSteps - division_nr: segmentSpeed = current_speed * max_speed_factor - (speed_deficit * last_step_number) last_step_number=last_step_number + 1 stringFeed = " F{}".format(int(segmentSpeed)) new_Line=new_Line + gcode_template(segmentEnd.x, segmentEnd.y, extrudeLength) + stringFeed + "\n" #szakaszExtrudalas lastPosition = segmentEnd E_inCode_last = extrudeLength step_number = step_number + 1 segmentSpeed = current_speed * min_speed_factor lastSpeed = " F{}".format(int(segmentSpeed)) new_Line=new_Line + gcode_template(currentPosition.x, currentPosition.y, E_inCode, ) + lastSpeed + "\n" #Original line for finish lines[line_index] = new_Line else : outPutLine = "" # outPutLine = "; LinearlyVariableInfill segmentSteps < 2\n" for element in splitLine: if "E" in element: outPutLine = outPutLine + "E" + str(round(E_inCode, 5)) else: outPutLine = outPutLine + element + " " outPutLine = outPutLine # + "\n" lines[line_index] = outPutLine # writtenToFile = 1 # # comment like ;MESH:NONMESH # if ";" in currentLineINcode: currentSection = Section.NOTHING lines[line_index] = currentLineINcode # other Comment # # line with move # if "X" in currentLineINcode and "Y" in currentLineINcode and ("G1" in currentLineINcode or "G0" in currentLineINcode): lastPosition = getXY(currentLineINcode) final_lines = "\n".join(lines) data[layer_index] = final_lines return data
vaxbarn/LinearlyVariableInfill
LinearlyVariableInfill.py
LinearlyVariableInfill.py
py
21,725
python
en
code
0
github-code
6
[ { "api_name": "UM.i18n.i18nCatalog", "line_number": 20, "usage_type": "call" }, { "api_name": "collections.namedtuple", "line_number": 26, "usage_type": "call" }, { "api_name": "collections.namedtuple", "line_number": 27, "usage_type": "call" }, { "api_name": "enu...
72492708988
import pytest from pytest_persistence import plugin plg = plugin.Plugin() @pytest.mark.parametrize("scope", ["session", "package", "module", "class", "function"]) @pytest.mark.parametrize("result", ["result", 42]) def test_store_fixture(result, scope): fixture_id = ('fixture1', scope, 'tests/test_mock.py') plg.store_fixture(result, fixture_id, 'tests/test_mock.py', None) if scope == "session": assert plg.output[scope] == {"('fixture1', 'session', 'tests/test_mock.py', None)": result} else: assert plg.output[scope]["tests/test_mock.py"] == { f"('fixture1', '{scope}', 'tests/test_mock.py', None)": result} @pytest.fixture(params=[(x, y) for x in ["session", "package", "module", "class", "function"] for y in ["result", 42]]) def store_fixtures(request): scope = request.param[0] result = request.param[1] fixture_id = ('fixture1', scope, 'tests/test_mock.py') plg.store_fixture(result, fixture_id, 'tests/test_mock.py', None) plg.input = plg.output return scope, result def test_load_fixture(store_fixtures): scope = store_fixtures[0] result = store_fixtures[1] fixture_id = ('fixture1', scope, 'tests/test_mock.py') fixture_result = plg.load_fixture(fixture_id, 'tests/test_mock.py') assert fixture_result == result
JaurbanRH/pytest-persistence
tests/test_unit.py
test_unit.py
py
1,367
python
en
code
0
github-code
6
[ { "api_name": "pytest_persistence.plugin.Plugin", "line_number": 5, "usage_type": "call" }, { "api_name": "pytest_persistence.plugin", "line_number": 5, "usage_type": "name" }, { "api_name": "pytest.mark.parametrize", "line_number": 8, "usage_type": "call" }, { "a...
12702052399
""" To render html web pages """ import random from django.http import HttpResponse from django.template.loader import render_to_string from articles.models import Article def home_view(request, id=None, *args, **kwargs): """ Take in a request (Django send request) return HTML as a response (We pick to return the response) """ name = "Artem" # hard coded number = random.randint(1, 2) # pseudo random article_obj = Article.objects.get(id=number) article_queryset = Article.objects.all() context = { "object_list": article_queryset, "title": article_obj.title, "content": article_obj.content, "id": article_obj.id } # Django templates HTML_STRING = render_to_string("home-view.html", context=context) return HttpResponse(HTML_STRING)
L1verly/djproject-private
djproject/views.py
views.py
py
832
python
en
code
0
github-code
6
[ { "api_name": "random.randint", "line_number": 19, "usage_type": "call" }, { "api_name": "articles.models.Article.objects.get", "line_number": 22, "usage_type": "call" }, { "api_name": "articles.models.Article.objects", "line_number": 22, "usage_type": "attribute" }, ...
34097968081
#!/usr/bin/python import curses import sys import RPi.GPIO as GPIO def main(stdscr): # do not wait for input when calling getch stdscr.nodelay(1) initGPIO() while True: # get keyboard input, returns -1 if none available c = stdscr.getch() if c != -1: # print numeric value stdscr.addstr(str(c)) stdscr.refresh() # return curser to start position stdscr.move(0, 0) if c == 97: GPIO.cleanup() sys.exit('Bye bye') elif c == 117: lightToggle() elif c == 114: light() def initGPIO(): GPIO.setmode(GPIO.BCM) GPIO.setup(8, GPIO.OUT) def lightToggle(): GPIO.output(8, not GPIO.input(8)) def light(): GPIO.output(8, GPIO.HIGH) def dark(): GPIO.output(8, GPIO.LOW) if __name__ == '__main__': curses.wrapper(main)
tophsic/gpio
one_led_controled_by_s.py
one_led_controled_by_s.py
py
914
python
en
code
0
github-code
6
[ { "api_name": "RPi.GPIO.cleanup", "line_number": 23, "usage_type": "call" }, { "api_name": "RPi.GPIO", "line_number": 23, "usage_type": "name" }, { "api_name": "sys.exit", "line_number": 24, "usage_type": "call" }, { "api_name": "RPi.GPIO.setmode", "line_numbe...
41584638888
"""Celery를 사용하는 예제""" import random import time from os import path from urllib import parse import requests from celery import Celery from pydub import AudioSegment from my_logging import get_my_logger logger = get_my_logger(__name__) # 크롤링 요청 간격 리스트 정의 RANDOM_SLEEP_TIMES = [x * 0.1 for x in range(10, 40, 5)] # 아티스트 이름 ARTIST_NAME = "Maurice RAVEL " # 앨범 타이틀 ALBUM_NAME = "The Piano Music of Maurice Ravel from archive.org" # 크롤링 대상 URL 리스트 MUSIC_URLS = [ 'https://archive.org/download/ThePianoMusicOfMauriceRavel/01PavanePourUneInfanteDfuntePourPianoMr19.mp3', 'https://archive.org/download/ThePianoMusicOfMauriceRavel/02JeuxDeauPourPianoMr30.mp3', 'https://archive.org/download/ThePianoMusicOfMauriceRavel/03SonatinePourPianoMr40-Modr.mp3', 'https://archive.org/download/ThePianoMusicOfMauriceRavel/04MouvementDeMenuet.mp3', 'https://archive.org/download/ThePianoMusicOfMauriceRavel/05Anim.mp3', ] # Reids의 0번째 DB를 사용하는 예 app = Celery('crawler_with_celery_sample', broker='redis://localhost:6379/0') app.conf.update( # Redis에 태스크 또는 실행 결과를 저장할 때의 형식을 JSON으로 지정 task_serializer='json', accept_content=['json'], result_serializer='json', timezone='Asia/Seoul', enable_utc=True, # Celery 태스크 내부의 시간을 UTC로 다룸 # 1개의 워커는 동시에 1개의 프로세스만 실행하게 함 worker_max_tasks_per_child=1, # Redis에 저장되는 태스크의 실행 결과는 60초가 지나면 제거(파기)함 result_expires=60, # 워커가 표준 출력으로 출력한 내용을 명령어 실행 터미널에 출력하지 않음 worker_redirect_stdouts=False, # 태스크 실행 시간이 180초를 넘으면 자동으로 종료함 task_soft_time_limit=180, # 어떤 태스크를 어떤 워커로 라우팅할지 설정함 task_routes={ 'crawler_with_celery_sample.download': { 'queue': 'download', 'routing_key': 'download', }, 'crawler_with_celery_sample.cut_mp3': { 'queue': 'media', 'routing_key': 'media', }, }, ) # 재시도는 최대 2회까지, 재시도할 때는 10초 간격을 둠 @app.task(bind=True, max_retries=2, default_retry_delay=10) def download(self, url, timeout=180): """파일 내려받기""" try: # mp3 파일 이름을 URL을 기반으로 추출 parsed_url = parse.urlparse(url) file_name = path.basename(parsed_url.path) # 요청 간격을 랜덤하게 선택 sleep_time = random.choice(RANDOM_SLEEP_TIMES) # 내려받기 시작을 로그에 출력 logger.info("[download start] sleep: {time} {file_name}".format(time=sleep_time, file_name=file_name)) # 요청 대기 time.sleep(sleep_time) # 음악 파일 내려받기 r = requests.get(url, timeout=timeout) with open(file_name, 'wb') as fw: fw.write(r.content) # 내려받기 종료를 로그에 출력 logger.info("[download finished] {file_name}".format(file_name=file_name)) cut_mp3.delay(file_name) # cut_mp3 함수 실행을 태스크로 큐에 넣음 except requests.exceptions.RequestException as e: # 예외가 발생하면 로그를 출력하고 재시도 logger.error("[download error - retry] file: {file_name}, e: {e}".format( file_name=file_name, e=e)) raise self.retry(exc=e, url=url) @app.task def cut_mp3(file_name): """앞의 2초를 추출해서 저장하기""" logger.info("[cut_mp3 start] {file_name}".format(file_name=file_name)) # 내려받은 파일을 pydub 데이터 형식으로 변환해서 읽어 들임 music = AudioSegment.from_mp3(file_name) # mp3 파일의 앞 2초만 잘라내기 head_time = 2 * 1000 # milliseconds head_part = music[:head_time] # 잘라냄 root_name, ext = path.splitext(file_name) # 파일 이름을 확장자와 이외의 부분으로 분할 # 저장 # 원래 파일과 구별할 수 있게 확장자 이름 앞에 _head를 붙임 file_handler = head_part.export( root_name + "_head" + ext, format="mp3", tags={ 'title': root_name, 'artist': ARTIST_NAME, 'album': ALBUM_NAME, } ) # 주의: 파일 핸들러 닫기를 잊으면 안 됩니다. file_handler.close() logger.info("[cut_mp3 finished] {file_name}".format(file_name=file_name)) if __name__ == '__main__': logger.info("[main start]") # 크롤링 대상 URL 별로 download() 함수를 태스크로 큐에 넣음 # 큐에 들어간 태스크는 워커에 의해서 자동 실행됨 for music_url in MUSIC_URLS: download.delay(music_url) logger.info("[main finished]")
JSJeong-me/2021-K-Digital-Training
Web_Crawling/python-crawler/chapter_5/crawler_with_celery_sample.py
crawler_with_celery_sample.py
py
5,058
python
ko
code
7
github-code
6
[ { "api_name": "my_logging.get_my_logger", "line_number": 11, "usage_type": "call" }, { "api_name": "celery.Celery", "line_number": 32, "usage_type": "call" }, { "api_name": "urllib.parse.urlparse", "line_number": 68, "usage_type": "call" }, { "api_name": "urllib.p...
32756126137
# !/usr/bin/python import os import sys # Logging configuration import logging class logger(logging.Logger): def __init__(self): """Initializer.""" super().__init__() logging.basicConfig(filename="errlog.log", filemode="a", format="(%(asctime)s) | %(name)s | %(levelname)s:%(message)s", datefmt="%d %B %Y , %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO"))
MohdFarag/Musical-Instruments-Equalizer
src/logger.py
logger.py
py
478
python
en
code
0
github-code
6
[ { "api_name": "logging.Logger", "line_number": 8, "usage_type": "attribute" }, { "api_name": "logging.basicConfig", "line_number": 13, "usage_type": "call" }, { "api_name": "os.environ.get", "line_number": 17, "usage_type": "call" }, { "api_name": "os.environ", ...
42591021729
import os import numpy as np import pickle import argparse from implicit.bpr import BayesianPersonalizedRanking from implicit.nearest_neighbours import CosineRecommender from scipy.sparse import csr_matrix from methods import consul, oracle, PrivateRank, PrivateWalk np.random.seed(0) def recall(li, gt): if gt in li: return 1 return 0 def nDCG(li, gt): if gt in li: return 1 / np.log2(li.tolist().index(gt) + 2) return 0 def list_minimum_group(li, sensitive): return np.bincount(sensitive[li], minlength=sensitive.max() + 1).min() parser = argparse.ArgumentParser() parser.add_argument('--data', choices=['100k', '1m', 'home', 'hetrec'], default='100k') parser.add_argument('--prov', choices=['cosine', 'bpr'], default='cosine') parser.add_argument('--sensitive', choices=['popularity', 'old'], default='popularity', help='`old` is valid only for MovieLens') parser.add_argument('--split', type=int, default=1, help='Total number of parallel execusion (only for parallel execusion, set 1 otherwise)') parser.add_argument('--block', type=int, default=0, help='Id of the current execusion (only for parallel excecusion, set 0 otherwise)') args = parser.parse_args() assert(args.sensitive == 'popularity' or args.data in ['100k', '1m']) assert(0 <= args.block and args.block < args.split) # # Load Data # if args.data == '100k': n = 943 m = 1682 filename = 'ml-100k/u.data' delimiter = '\t' elif args.data == '1m': n = 6040 m = 3952 filename = 'ml-1m/ratings.dat' delimiter = '::' K = 10 if args.data == '100k' or args.data == '1m': raw_R = np.zeros((n, m)) history = [[] for i in range(n)] with open(filename) as f: for r in f: user, movie, r, t = map(int, r.split(delimiter)) user -= 1 movie -= 1 raw_R[user, movie] = r history[user].append((t, movie)) elif args.data == 'hetrec': raw_R = np.log2(np.load('hetrec.npy') + 1) n, m = raw_R.shape history = [[] for i in range(n)] for i in range(n): for j in np.nonzero(raw_R[i] > 0)[0]: history[i].append((np.random.rand(), j)) elif args.data == 'home': raw_R = np.load('Home_and_Kitchen.npy') n, m = raw_R.shape with open('Home_and_Kitchen_history.pickle', 'br') as f: history = pickle.load(f) if args.sensitive == 'popularity': mask = raw_R > 0 if args.data == '100k': sensitive = mask.sum(0) < 50 elif args.data == '1m': sensitive = mask.sum(0) < 300 elif args.data == 'hetrec': sensitive = mask.sum(0) < 50 elif args.data == 'home': sensitive = mask.sum(0) < 50 sensitive = sensitive.astype('int') elif args.sensitive == 'old': sensitive = np.zeros(m, dtype='int') if args.data == '100k': filename = 'ml-100k/u.item' delimiter = '|' elif args.data == '1m': filename = 'ml-1m/movies.dat' delimiter = '::' with open(filename, encoding='utf8', errors='ignore') as f: for r in f: li = r.strip().split(delimiter) if '(19' in li[1]: year = 1900 + int(li[1].split('(19')[1].split(')')[0]) elif '(20' in li[1]: year = 2000 + int(li[1].split('(20')[1].split(')')[0]) sensitive[int(li[0]) - 1] = year < 1990 # # Data Loaded # damping_factor = 0.01 tau = 5 provider_recall = 0 provider_nDCG = 0 provider_minimum = 0 oracle_recall = 0 oracle_nDCG = 0 oracle_minimum = 0 PR_recall = 0 PR_nDCG = 0 PR_minimum = 0 PW_recall = 0 PW_nDCG = 0 PW_minimum = 0 random_recall = 0 random_nDCG = 0 random_minimum = 0 consul_recall = 0 consul_nDCG = 0 consul_minimum = 0 PW_cnt = np.array(0) consul_cnt = np.array(0) start_index = int(n * args.block / args.split) end_index = int(n * (args.block + 1) / args.split) for i in range(start_index, end_index): gt = sorted(history[i])[-1][1] source = sorted(history[i])[-2][1] used = [y for x, y in history[i] if y != gt] R = raw_R.copy() R[i, gt] = 0 mask = R > 0 if args.prov == 'bpr': model = BayesianPersonalizedRanking(num_threads=1, random_state=0) elif args.prov == 'cosine': model = CosineRecommender() sR = csr_matrix(mask.T) model.fit(sR, show_progress=False) if args.prov == 'bpr': score = model.item_factors @ model.item_factors.T else: score = np.zeros((m, m)) for item in range(m): for j, v in model.similar_items(item, m): score[item, j] = v score_remove = score.copy() score_remove[:, used] -= score.max() + 1 score_remove -= np.eye(m) * (score.max() + 1) list_provider = np.argsort(-score_remove[source])[:K] provider_recall += recall(list_provider, gt) provider_nDCG += nDCG(list_provider, gt) provider_minimum += list_minimum_group(list_provider, sensitive) oracle_list = oracle(score_remove[source], sensitive, tau, used, K) oracle_recall += recall(oracle_list, gt) oracle_nDCG += nDCG(oracle_list, gt) oracle_minimum += list_minimum_group(oracle_list, sensitive) # Construct the recsys graph A = np.zeros((m, m)) rank = np.argsort(-score_remove, 1)[:, :K] weight = 1 / np.log2(np.arange(K) + 2) weight /= weight.sum() A[np.arange(m).repeat(K), rank.reshape(-1)] += weight.repeat(m).reshape(K, m).T.reshape(-1) # Consul consul_list = consul(rank, sensitive, tau, source, used, K, access_conuter=consul_cnt) consul_recall += recall(consul_list, gt) consul_nDCG += nDCG(consul_list, gt) consul_minimum += list_minimum_group(consul_list, sensitive) # PrivateRank PR_list = PrivateRank(A, sensitive, tau, source, used, K, damping_factor) PR_recall += recall(PR_list, gt) PR_nDCG += nDCG(PR_list, gt) PR_minimum += list_minimum_group(PR_list, sensitive) # PrivateWalk PW_list = PrivateWalk(rank, sensitive, tau, source, used, K, access_conuter=PW_cnt) PW_recall += recall(PW_list, gt) PW_nDCG += nDCG(PW_list, gt) PW_minimum += list_minimum_group(PW_list, sensitive) # Random np.random.seed(0) random_score = np.random.rand(m) random_list = oracle(random_score, sensitive, tau, used, K) random_recall += recall(random_list, gt) random_nDCG += nDCG(random_list, gt) random_minimum += list_minimum_group(random_list, sensitive) print('#') print('# User {} - {}'.format(start_index, i)) print('#') print('-' * 30) print('provider recall {:.2f}'.format(provider_recall)) print('oracle recall ', oracle_recall) print('consul recall ', consul_recall) print('PrivateRank recall', PR_recall) print('PrivateWalk recall', PW_recall) print('random recall ', random_recall) print('-' * 30) print('provider nDCG {:.2f}'.format(provider_nDCG)) print('oracle nDCG ', oracle_nDCG) print('consul nDCG ', consul_nDCG) print('PrivateRank nDCG', PR_nDCG) print('PrivateWalk nDCG', PW_nDCG) print('random nDCG ', random_nDCG) print('-' * 30) print('provider least count {:.2f}'.format(provider_minimum)) print('oracle least count ', oracle_minimum) print('consul least count ', consul_minimum) print('PrivateRank least count', PR_minimum) print('PrivateWalk least count', PW_minimum) print('random least count ', random_minimum) print('-' * 30) print('consul access ', consul_cnt) print('PrivateWalk access', PW_cnt) print('-' * 30) if not os.path.exists('out'): os.mkdir('out') with open('out/{}-{}-{}-{}.txt'.format(args.data, args.prov, args.sensitive, args.block), 'w') as f: print(provider_recall, file=f) print(provider_nDCG, file=f) print(provider_minimum, file=f) print(oracle_recall, file=f) print(oracle_nDCG, file=f) print(oracle_minimum, file=f) print(consul_recall, file=f) print(consul_nDCG, file=f) print(consul_minimum, file=f) print(consul_cnt, file=f) print(PR_recall, file=f) print(PR_nDCG, file=f) print(PR_minimum, file=f) print(PW_recall, file=f) print(PW_nDCG, file=f) print(PW_minimum, file=f) print(PW_cnt, file=f) print(random_recall, file=f) print(random_nDCG, file=f) print(random_minimum, file=f)
joisino/consul
evaluate.py
evaluate.py
py
8,347
python
en
code
5
github-code
6
[ { "api_name": "numpy.random.seed", "line_number": 13, "usage_type": "call" }, { "api_name": "numpy.random", "line_number": 13, "usage_type": "attribute" }, { "api_name": "numpy.log2", "line_number": 24, "usage_type": "call" }, { "api_name": "numpy.bincount", "...
5153764381
import pandas as pd import numpy as np import matplotlib.pyplot as plt import sklearn from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix, classification_report import xlsxwriter #Reading the file into the system file1 = pd.read_csv("/home/user/Downloads/portugese bank/bank-full-encoded.csv", sep=";" ,parse_dates= True) print(file1.shape) #Splitting into x,y and train and test data y = file1["y"].values x = file1.drop("y", axis = 1).values x_train, x_test, y_train, y_test = train_test_split(x, y, test_size= 0.3, random_state= 25) #Running the Random Forrest Classifier rf_classifier = RandomForestClassifier() rf_classifier.fit(x_train, y_train) rf_prediction = rf_classifier.predict(x_test) print("\nThe Confusion Matrix is as follows:\n", confusion_matrix(y_test,rf_prediction)) print("\nThe Classification Report for the random forrest classifier is as follows:\n", classification_report(y_test, rf_prediction)) # Writing output to Excel writer = pd.ExcelWriter(path = "/home/user/Downloads/portugese bank/Random Forrest.xlsx", engine = 'xlsxwriter') workbook = writer.book rf_output = [] for i in rf_prediction: rf_output.append(i) df_rfoutput = pd.DataFrame(rf_output) df_rfoutput.to_excel(writer, sheet_name="RandomForrest", startrow=0, startcol=0) print(len(rf_prediction)) features = rf_classifier.feature_importances_ print(features) print(len(features)) feature_list = file1.columns.values.tolist() print(feature_list) x = 0 for (i,j) in np.ndenumerate(features): x = x + j print(x) print("\nThe feature list and its corresponding importance is as follows:") feature_output = [] for i in range(26): print(feature_list[i], "=", features[i]*100, "%") feature_output.append(features[i]*100) df_feature_names = pd.DataFrame(feature_list) df_feature_values = pd.DataFrame(feature_output) df_feature_values.to_excel(writer, sheet_name="RandomForrest", startrow= 0, startcol=5) df_feature_names.to_excel(writer, sheet_name="RandomForrest", startrow= 0, startcol=4) workbook.close() writer.save()
Royston2708/Loan_Defaulter_Project
Models/Decision Trees and Random Forrest.py
Decision Trees and Random Forrest.py
py
2,138
python
en
code
0
github-code
6
[ { "api_name": "pandas.read_csv", "line_number": 11, "usage_type": "call" }, { "api_name": "sklearn.model_selection.train_test_split", "line_number": 18, "usage_type": "call" }, { "api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 21, "usage_type": "call...
24883752413
from django.conf.urls import patterns, include, url urlpatterns = patterns('', url(r'^$', 'informes.views.home', name='i_home'), url(r'^pendientes/$', 'informes.views.informes_pendientes', name='i_pend'), url(r'^arreglados/$', 'informes.views.informes_arreglados', name='i_fixed'), url(r'^noarreglados/$', 'informes.views.informes_wontfix', name='i_wontfix'), url(r'^equipo/(?P<equipo_id>\d+)/levantar/$', 'informes.views.levantar_informe_equipo', name='informe_le'), url(r'^(?P<informe_id>\d+)/resolver/$', 'informes.views.resolver_informe', name='informe_r'), )
efylan/ccreservas
informes/urls.py
urls.py
py
592
python
es
code
0
github-code
6
[ { "api_name": "django.conf.urls.patterns", "line_number": 4, "usage_type": "call" }, { "api_name": "django.conf.urls.url", "line_number": 5, "usage_type": "call" }, { "api_name": "django.conf.urls.url", "line_number": 6, "usage_type": "call" }, { "api_name": "djan...
26023698980
import matplotlib.pyplot as plt import numpy as np #plot 1 x=np.arange(-8,8,0.1) y=x**3 plt.subplot(2,2,1) plt.plot(x,y) plt.title("plot 1") #plot 2 x=np.linspace(0,3*np.pi,400) y=x/(1+(x**4)*(np.sin(x))**2) plt.subplot(2,2,2) plt.plot(x,y) plt.title("plot 2") #plot 3 x=np.linspace(1,10,400) y=np.sin(1/(x**(1/2))) plt.subplot(2,2,3) plt.plot(x,y) plt.title("plot 3") #plot 4 x=np.linspace(0,2*np.pi,400) y=x**(np.sin(x)) plt.subplot(2,2,4) plt.plot(x,y) plt.title("plot 4") plt.show()
suanhaitech/pythonstudy2023
Wangwenbin/Matplotlib4.py
Matplotlib4.py
py
492
python
uk
code
2
github-code
6
[ { "api_name": "numpy.arange", "line_number": 5, "usage_type": "call" }, { "api_name": "matplotlib.pyplot.subplot", "line_number": 7, "usage_type": "call" }, { "api_name": "matplotlib.pyplot", "line_number": 7, "usage_type": "name" }, { "api_name": "matplotlib.pypl...
36733301943
import re import json from collections import defaultdict def file_paths(file_path= 'logs_2/postcts.log1'): with open(file_path, 'r') as file: file_data = file.read() return file_data def parse_log_file(): file_contents = file_paths() # Compile regex patterns for improved performance regex_pattern_summary = re.compile( r'\s+opt_design Final Summary(.*?)Routing Overflow:\s*(.*?)\n', re.DOTALL) regex_pattern_setupHold = re.compile( r'\|\s+(?:WNS \(ns\):\|\s*(-?\d+\.?\d+)|TNS \(ns\):\|\s*(-?\d+\.?\d+)|Violating Paths:\|\s*(\d+\.?\d+)\s*)') pattern_DRV = re.compile( r'\|\s+(max_cap|max_tran|max_fanout)\s*\|\s*(\d+\s*\(\d+\))\s*\|\s*(-?\d+\.?\d+)\s*') regex_pattern_density = re.compile(r'Density:\s*(\d+\.?\d+)\s*') # regex_pattern_routing = re.compile( # r'Routing Overflow:\s*(-?\d+\.?\d+)\%\s*H and (-?\d+\.?\d+)\%\s*V') # or regex_pattern_routing = re.compile( r'(-?\d+\.?\d+)\%\s*H and (-?\d+\.?\d+)\%\s*V') regex_pattern_total_power = re.compile( r'^(Total Power\s*\n)[-]*\n(Total Internal Power:\s*(.*?)\s*(\d+\.?\d+)%\n)(Total Switching Power:\s*(.*?)\s*(\d+\.?\d+)%\n)(Total Leakage Power:\s*(.*?)\s*(\d+\.?\d+)%\n)', re.MULTILINE) regex_pattern_instance_design = re.compile( r'Instances in design\s*=\s*(\d+)') regex_pattern_vt = re.compile( r'(LVT|SVT|HVT) : inst = (\d+) \((\d+\.?\d+)%\)') regex_pattern_run_time = re.compile( r'totcpu=(.*?),\s*real=(.*?),\s*mem=(.*?)$') setUpMode = defaultdict(list) holdMode = defaultdict(list) DRV = {} density = [] congestion_overflow = {"H": [], "V": []} VT_dist = defaultdict(list) insts_count = [] power = {"Dynamic": [], "Leakage": []} runTime = [] summary_match = regex_pattern_summary.findall(file_contents) # Finding Total Summary (WNS, TNS, FEP), DRV's, Density, Routing_Overflow if summary_match: summary_data = summary_match[-1][0] # print(summary_data) wns_tns_match = re.findall(regex_pattern_setupHold, summary_data) if len(wns_tns_match) > 4: setUpMode["WNS"].append(wns_tns_match[0][0]) setUpMode["TNS"].append(wns_tns_match[1][1]) setUpMode["FEP"].append(wns_tns_match[2][2]) holdMode["WNS"].append(wns_tns_match[3][0]) holdMode["TNS"].append(wns_tns_match[4][1]) holdMode["FEP"].append(wns_tns_match[5][2]) else: setUpMode["WNS"].append(wns_tns_match[0][0]) setUpMode["TNS"].append(wns_tns_match[1][1]) setUpMode["FEP"].append(wns_tns_match[2][2]) holdMode["WNS"].append("-") holdMode["TNS"].append("-") holdMode["FEP"].append("-") matches = re.findall(pattern_DRV, summary_data) DRV = {key: {"terms": value, "slack": slack} for key, value, slack in matches} density_match = re.search(regex_pattern_density, summary_data) density_val = density_match.group(1) if density_match else "-" density.append(density_val) # routing_overflow_match = re.search(regex_pattern_routing, summary_match[-1]) routing_overflow_match = re.search( regex_pattern_routing, summary_match[-1][1]) routing_overflow_h = routing_overflow_match.group( 1) if routing_overflow_match else "-" routing_overflow_v = routing_overflow_match.group( 2) if routing_overflow_match else "-" congestion_overflow["H"].append(routing_overflow_h) congestion_overflow['V'].append(routing_overflow_v) else: print("Setup value pattern not found") # Finding the Total Power (Switching, Leakage) total_power_match = regex_pattern_total_power.search(file_contents) if total_power_match: power['Dynamic'].append(total_power_match.group(6)) power['Leakage'].append(total_power_match.group(9)) else: print("Pattern not found for Total Power.") # Find Instance count matches = regex_pattern_instance_design.findall(file_contents) if matches: instances_in_design = matches[-1].strip() insts_count.append(instances_in_design) else: print("Pattern not found for Instances in Design.") # Find LVT, SVT, HVT and % matches = regex_pattern_vt.findall(file_contents) for design_type, inst_value, percentage in matches[-3:]: VT_dist[design_type].append(inst_value) VT_dist[f"{design_type} %"].append(percentage) # Find Run_Time run_time_match = regex_pattern_run_time.search(file_contents) if run_time_match: runTime.append(run_time_match.group(2)) else: print("Pattern not found for Run Time.") return { "setUpMode": dict(setUpMode), "holdMode": dict(holdMode), "DRV": DRV, "density": density, "congestion_overflow": congestion_overflow, "VT_dist": dict(VT_dist), "insts_count": insts_count, "power": power, "runTime": runTime } # parsed_data = parse_log_file() # output_file_path = 'parsed_data.json' # # Write the data to the JSON file # with open(output_file_path, 'w') as json_file: # json.dump(parsed_data, json_file, indent=4) # def main(): # file_contents = file_paths() # try: # pattern_text_between = r'\s+flow\.cputime\s+flow.realtime\s+timing\.setup\.tns\s+timing\.setup\.wns\s+snapshot\nUM:\s*\d+\.?\d+\s+\d+\.?\d+\s+report_finish' # # Use re.search() to find the pattern in the string # match = re.search(pattern_text_between, file_contents, re.DOTALL | re.MULTILINE) # if match: # print(match) # else: # print("Pattern not found.") # except Exception as e # print(e) # file_path = 'logs_2/postcts.log1' # main(file_path) import os def get_max_numbered_log(logfiles, base_name): pattern = re.compile(fr"{base_name}\.log(\d*)") number_logs = [file for file in log_files if pattern.fullmatch(file)] # print(number_logs) if number_logs: print(number_logs) return False folder_path = 'D:/COE_07/COE-PRO/ZLogParse/logs_1' log_files = os.listdir(folder_path) selected_logs = [] base_names_to_check = ['floorplan', 'cts', 'prects', 'postcts'] for base_name in base_names_to_check: selected_log = get_max_numbered_log(log_files, base_name) # if selected_log: # selected_logs.append(selected_log) # print(selected_logs)
DavidJose2000/Log_parse
Zpharse.py
Zpharse.py
py
6,755
python
en
code
0
github-code
6
[ { "api_name": "re.compile", "line_number": 16, "usage_type": "call" }, { "api_name": "re.DOTALL", "line_number": 17, "usage_type": "attribute" }, { "api_name": "re.compile", "line_number": 18, "usage_type": "call" }, { "api_name": "re.compile", "line_number": ...
6794457250
from __future__ import annotations import typing from dataclasses import dataclass from anchorpy.borsh_extension import EnumForCodegen import borsh_construct as borsh class UninitializedJSON(typing.TypedDict): kind: typing.Literal["Uninitialized"] class ActiveJSON(typing.TypedDict): kind: typing.Literal["Active"] class PostOnlyJSON(typing.TypedDict): kind: typing.Literal["PostOnly"] class PausedJSON(typing.TypedDict): kind: typing.Literal["Paused"] class ClosedJSON(typing.TypedDict): kind: typing.Literal["Closed"] class TombstonedJSON(typing.TypedDict): kind: typing.Literal["Tombstoned"] @dataclass class Uninitialized: discriminator: typing.ClassVar = 0 kind: typing.ClassVar = "Uninitialized" @classmethod def to_json(cls) -> UninitializedJSON: return UninitializedJSON( kind="Uninitialized", ) @classmethod def to_encodable(cls) -> dict: return { "Uninitialized": {}, } @dataclass class Active: discriminator: typing.ClassVar = 1 kind: typing.ClassVar = "Active" @classmethod def to_json(cls) -> ActiveJSON: return ActiveJSON( kind="Active", ) @classmethod def to_encodable(cls) -> dict: return { "Active": {}, } @dataclass class PostOnly: discriminator: typing.ClassVar = 2 kind: typing.ClassVar = "PostOnly" @classmethod def to_json(cls) -> PostOnlyJSON: return PostOnlyJSON( kind="PostOnly", ) @classmethod def to_encodable(cls) -> dict: return { "PostOnly": {}, } @dataclass class Paused: discriminator: typing.ClassVar = 3 kind: typing.ClassVar = "Paused" @classmethod def to_json(cls) -> PausedJSON: return PausedJSON( kind="Paused", ) @classmethod def to_encodable(cls) -> dict: return { "Paused": {}, } @dataclass class Closed: discriminator: typing.ClassVar = 4 kind: typing.ClassVar = "Closed" @classmethod def to_json(cls) -> ClosedJSON: return ClosedJSON( kind="Closed", ) @classmethod def to_encodable(cls) -> dict: return { "Closed": {}, } @dataclass class Tombstoned: discriminator: typing.ClassVar = 5 kind: typing.ClassVar = "Tombstoned" @classmethod def to_json(cls) -> TombstonedJSON: return TombstonedJSON( kind="Tombstoned", ) @classmethod def to_encodable(cls) -> dict: return { "Tombstoned": {}, } MarketStatusKind = typing.Union[ Uninitialized, Active, PostOnly, Paused, Closed, Tombstoned ] MarketStatusJSON = typing.Union[ UninitializedJSON, ActiveJSON, PostOnlyJSON, PausedJSON, ClosedJSON, TombstonedJSON ] def from_decoded(obj: dict) -> MarketStatusKind: if not isinstance(obj, dict): raise ValueError("Invalid enum object") if "Uninitialized" in obj: return Uninitialized() if "Active" in obj: return Active() if "PostOnly" in obj: return PostOnly() if "Paused" in obj: return Paused() if "Closed" in obj: return Closed() if "Tombstoned" in obj: return Tombstoned() raise ValueError("Invalid enum object") def from_json(obj: MarketStatusJSON) -> MarketStatusKind: if obj["kind"] == "Uninitialized": return Uninitialized() if obj["kind"] == "Active": return Active() if obj["kind"] == "PostOnly": return PostOnly() if obj["kind"] == "Paused": return Paused() if obj["kind"] == "Closed": return Closed() if obj["kind"] == "Tombstoned": return Tombstoned() kind = obj["kind"] raise ValueError(f"Unrecognized enum kind: {kind}") layout = EnumForCodegen( "Uninitialized" / borsh.CStruct(), "Active" / borsh.CStruct(), "PostOnly" / borsh.CStruct(), "Paused" / borsh.CStruct(), "Closed" / borsh.CStruct(), "Tombstoned" / borsh.CStruct(), )
Ellipsis-Labs/phoenixpy
phoenix/types/market_status.py
market_status.py
py
4,121
python
en
code
5
github-code
6
[ { "api_name": "typing.TypedDict", "line_number": 8, "usage_type": "attribute" }, { "api_name": "typing.Literal", "line_number": 9, "usage_type": "attribute" }, { "api_name": "typing.TypedDict", "line_number": 12, "usage_type": "attribute" }, { "api_name": "typing....
5897258860
import pickle import numpy as np from flask import Flask, request, jsonify # Load the pickled model with open('model.pkl', 'rb') as file: model = pickle.load(file) app = Flask(__name__) # Endpoint for making predictions @app.route('/predict', methods=['POST']) def predict(): try: data = request.get_json(force=True) # Assuming your input data is in the format of a list of dictionaries # where each dictionary represents a row in the CSV predictions = [] for row in data: # Preprocess the input data (You may need to adjust this based on your actual data) input_data = [ int(row['AGE']), int(row['PackHistory']), int(row['MWT1']), int(row['MWT2']), float(row['FEV1']), float(row['FVC']), int(row['CAT']), int(row['HAD']), float(row['SGRQ']), int(row['copd']), int(row['gender']), int(row['smoking']) ] # Make prediction using the loaded model prediction = model.predict([input_data]) predictions.append(int(prediction[0])) return jsonify({'predictions': predictions}) except Exception as e: return jsonify({'error': str(e)}), 400 if __name__ == '__main__': app.run(host='0.0.0.0', port=5000, debug=True)
mdalamin706688/copd-ml-model
app.py
app.py
py
1,327
python
en
code
0
github-code
6
[ { "api_name": "pickle.load", "line_number": 7, "usage_type": "call" }, { "api_name": "flask.Flask", "line_number": 9, "usage_type": "call" }, { "api_name": "flask.request.get_json", "line_number": 15, "usage_type": "call" }, { "api_name": "flask.request", "lin...
38336203002
#!/usr/bin/python from websocket import create_connection import unittest from common import read_info from common import read_message from common import check_action as c import time import json class websocket_request(unittest.TestCase): """32. 安装脚本""" def setUp(self): rt=read_info.ReadInfo() web=rt.get_device_ip() port=rt.get_port() url=web+":"+port try: self.ws=create_connection(url,timeout=5) #建立设备连接 if self.ws.connected: print("服务:%s连接成功!"%url) except Exception as e: print("websocket连接失败:%s"%e) pass def test_install_script(self): """32. 安装脚本/32.1. 发送数据 """ rm=read_message.ReadMessage() data_c=rm.get_data("5","control") url=self.ws print("step 1、控制设备:") c.checkAction(url,data_c) time.sleep(1) data_initialize=rm.get_data("3","initialize") print("step 2、初始化:") c.checkAction(url,data_initialize) time.sleep(8) data_install_script=rm.get_data("32","install_script") """重新设置安装文件名""" data_dict=json.loads(data_install_script) data_dict["data"]["index"]=1 data_dict["data"]["name"]="test.lua" print("安装脚本:"+data_dict["data"]["name"]) data_install_script=json.dumps(data_dict) print(data_install_script) print("step 3、安装test.lua文件") c.checkAction(url,data_install_script) time.sleep(2) data_script_start=rm.get_data("1","run_script_start_test") print("step 4、运行step 3安装的脚本:test.lua:") c.checkAction(url,data_script_start) time.sleep(6) data_script_stop=rm.get_data("1","run_script_stop") print("step 5、停止脚本运行:") c.checkAction(url,data_script_stop) data_r=rm.get_data("6","release") print("step 6、释放设备:") c.checkAction(url,data_r) def tearDown(self): self.ws.close() if __name__ == "__main__": unittest.main()
leen0910/websocket_api
websocket_api/test_case/test10_InstallScript.py
test10_InstallScript.py
py
2,199
python
en
code
0
github-code
6
[ { "api_name": "unittest.TestCase", "line_number": 14, "usage_type": "attribute" }, { "api_name": "common.read_info.ReadInfo", "line_number": 17, "usage_type": "call" }, { "api_name": "common.read_info", "line_number": 17, "usage_type": "name" }, { "api_name": "web...
10691788495
import logging from sentry.client.handlers import SentryHandler logger = logging.getLogger() # ensure we havent already registered the handler if SentryHandler not in map(lambda x: x.__class__, logger.handlers): logger.addHandler(SentryHandler(logging.WARNING)) # Add StreamHandler to sentry's default so you can catch missed exceptions logger = logging.getLogger('sentry.errors') logger.propagate = False logger.addHandler(logging.StreamHandler())
8planes/langolab
django/web/sentry_logger.py
sentry_logger.py
py
475
python
en
code
3
github-code
6
[ { "api_name": "logging.getLogger", "line_number": 4, "usage_type": "call" }, { "api_name": "sentry.client.handlers.SentryHandler", "line_number": 6, "usage_type": "name" }, { "api_name": "sentry.client.handlers.SentryHandler", "line_number": 7, "usage_type": "call" }, ...
3885504768
from django.contrib import admin from django.contrib.auth.admin import UserAdmin from django.utils.html import mark_safe from rooms.models import Room from .models import User # admin.ModelAdmin을 상속받는 경우 # @admin.register(User) # class CustomUserAdmin(admin.ModelAdmin): # """ Custom User Admin """ # list_display = ("username", "email", "gender", "language", "currency", "superhost") # list_filter = ( # "language", # "currency", # "superhost", # ) class RoomInline(admin.TabularInline): model = Room # 방법 1 @admin.register(User) # 데코레이터를 붙여 주면 CustomUserAdmin 클래스가 models.User를 사용한다는 의미 class CustomUserAdmin(UserAdmin): """Custom User Admin""" inlines = (RoomInline,) fieldsets = UserAdmin.fieldsets + ( ( "Custom Profile", { "fields": ( "avatar", "gender", "bio", "birthdate", "language", "currency", "superhost", "login_method", ) }, ), ) list_display = ( "username", # "get_thumbnail", "first_name", "last_name", "email", "is_active", "language", "currency", "superhost", "is_staff", "is_superuser", "email_verified", "email_secret", "login_method", ) list_filter = UserAdmin.list_filter + ("superhost",) # def get_thumbnail(self, obj): # return mark_safe(f"<img width='50px' src='{obj.avatar}' />") # get_thumbnail.short_description = "avatar" # admin.site.register(models.User, CustomUserAdmin) # 방법 2
Odreystella/Pinkbnb
users/admin.py
admin.py
py
1,838
python
en
code
0
github-code
6
[ { "api_name": "django.contrib.admin.TabularInline", "line_number": 22, "usage_type": "attribute" }, { "api_name": "django.contrib.admin", "line_number": 22, "usage_type": "name" }, { "api_name": "rooms.models.Room", "line_number": 23, "usage_type": "name" }, { "ap...
11844211331
from flask import Flask, render_template, request from mbta_helper import find_stop_near app = Flask(__name__, template_folder="templates") @app.route("/") def index(): """ This function asks for the user's location """ return render_template("index.html") @app.route("/POST/nearest", methods=["POST","GET"]) def find(): """ This function returns whether the location entered has a station nearby and if there is wheelchair accessibility """ if request.method == "POST": place = request.form["location"] place = str(place) result = find_stop_near(place) if result == "MBTA Not Available": return render_template("notavailable.html") else: result = result.split(",") return render_template("available.html", location = result[0], wheelchair = result[1]) if __name__ == "__main__": app.run(debug=True)
nandini363/Assignment-3
app.py
app.py
py
917
python
en
code
0
github-code
6
[ { "api_name": "flask.Flask", "line_number": 5, "usage_type": "call" }, { "api_name": "flask.render_template", "line_number": 12, "usage_type": "call" }, { "api_name": "flask.request.method", "line_number": 19, "usage_type": "attribute" }, { "api_name": "flask.requ...
18002323535
from hydra import compose, initialize import logging import torch from torch.utils.tensorboard import SummaryWriter from data.dataset import get_dex_dataloader from trainer import Trainer from utils.global_utils import log_loss_summary, add_dict from omegaconf import OmegaConf from omegaconf.omegaconf import open_dict import os from os.path import join as pjoin from tqdm import tqdm import argparse from utils.interrupt_handler import InterruptHandler def process_config(cfg, save=True): root_dir = cfg["exp_dir"] os.makedirs(root_dir, exist_ok=True) with open_dict(cfg): cfg["device"] = f'cuda:{cfg["cuda_id"]}' if torch.cuda.is_available() else "cpu" if save: yaml_path = pjoin(root_dir, "config.yaml") print(f"Saving config to {yaml_path}") with open(yaml_path, 'w') as f: print(OmegaConf.to_yaml(cfg), file=f) return cfg def log_tensorboard(writer, mode, loss_dict, cnt, epoch): for key, value in loss_dict.items(): writer.add_scalar(mode + "/" + key, value / cnt, epoch) writer.flush() def main(cfg): cfg = process_config(cfg) """ Logging """ log_dir = cfg["exp_dir"] os.makedirs(log_dir, exist_ok=True) logger = logging.getLogger("TrainModel") logger.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') file_handler = logging.FileHandler(f'{log_dir}/log.txt') file_handler.setLevel(logging.INFO) file_handler.setFormatter(formatter) logger.addHandler(file_handler) """ Tensorboard """ writer = SummaryWriter(pjoin(log_dir, "tensorboard")) """ DataLoaders """ train_loader = get_dex_dataloader(cfg, "train") test_loader = get_dex_dataloader(cfg, "test") """ Trainer """ trainer = Trainer(cfg, logger) start_epoch = trainer.resume() """ Test """ def test_all(dataloader, mode, iteration): test_loss = {} for _, data in enumerate(tqdm(dataloader)): _, loss_dict = trainer.test(data) loss_dict["cnt"] = 1 add_dict(test_loss, loss_dict) cnt = test_loss.pop("cnt") log_loss_summary(test_loss, cnt, lambda x, y: logger.info(f'{mode} {x} is {y}')) log_tensorboard(writer, mode, test_loss, cnt, iteration) """ Train """ # Upon SIGINT, it will save the current model before exiting with InterruptHandler() as h: train_loss = {} for epoch in range(start_epoch, cfg["total_epoch"]): for _, data in enumerate(tqdm(train_loader)): loss_dict = trainer.update(data) loss_dict["cnt"] = 1 add_dict(train_loss, loss_dict) if trainer.iteration % cfg["freq"]["plot"] == 0: cnt = train_loss.pop("cnt") log_loss_summary(train_loss, cnt, lambda x, y: logger.info(f"Train {x} is {y}")) log_tensorboard(writer, "train", train_loss, cnt, trainer.iteration) train_loss = {} if trainer.iteration % cfg["freq"]["step_epoch"] == 0: trainer.step_epoch() if trainer.iteration % cfg["freq"]["test"] == 0: test_all(test_loader, "test", trainer.iteration) if trainer.iteration % cfg["freq"]["save"] == 0: trainer.save() if h.interrupted: break if h.interrupted: break trainer.save() def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--config-name", type=str, default="ipdf_config") parser.add_argument("--exp-dir", type=str, help="E.g., './ipdf_train'.") return parser.parse_args() if __name__ == "__main__": args = parse_args() initialize(version_base=None, config_path="../configs", job_name="train") if args.exp_dir is None: cfg = compose(config_name=args.config_name) else: cfg = compose(config_name=args.config_name, overrides=[f"exp_dir={args.exp_dir}"]) main(cfg)
PKU-EPIC/UniDexGrasp
dexgrasp_generation/network/train.py
train.py
py
4,171
python
en
code
63
github-code
6
[ { "api_name": "os.makedirs", "line_number": 22, "usage_type": "call" }, { "api_name": "omegaconf.omegaconf.open_dict", "line_number": 24, "usage_type": "call" }, { "api_name": "torch.cuda.is_available", "line_number": 25, "usage_type": "call" }, { "api_name": "tor...
34465917082
import torch from torch.utils.data import DataLoader from .coco_dataset import build_dataset def batch_collator(batch): images, boxmgrs = list(zip(*batch)) images = torch.stack(images, dim=0) return images, boxmgrs def build_dataloader(cfg, is_train=True): dataset = build_dataset(cfg, is_train=is_train) batch_size = ( cfg["DATA"]["BATCH_SIZE_TRAIN"] if is_train else cfg["DATA"]["BATCH_SIZE_VAL"] ) return DataLoader( dataset, batch_size, shuffle=is_train, num_workers=10, collate_fn=batch_collator )
lmyybh/computer-vision
yolo/yolo/data/dataloader.py
dataloader.py
py
558
python
en
code
0
github-code
6
[ { "api_name": "torch.stack", "line_number": 9, "usage_type": "call" }, { "api_name": "coco_dataset.build_dataset", "line_number": 14, "usage_type": "call" }, { "api_name": "torch.utils.data.DataLoader", "line_number": 20, "usage_type": "call" } ]
12483191239
# reference: J. P. Tignol # "Galois Thoery of Algebraic Equations" chapter 12 import numpy as np from sympy import factorint,root,expand class Period:# Gaussian periods @classmethod def init(cls,p):# p must be prime n = p-1 g = 2 # generator mod p f = factorint(n) while True: for q in f: if pow(g, n//q, p)==1: break else: break g+=1 i = np.empty(p, dtype=np.int) x = np.empty(n, dtype=np.int) a = 1 for j in range(n): x[j] = a i[a] = j a *= g a %= p cls.p = p cls.x = x cls.index = i cls.factor = f @classmethod def SetDim(cls,e):# e must divide p-1 p = cls.p x = cls.x i = cls.index n = p-1 f = n//e # multiplication table w = np.zeros((e,e), dtype=np.int) for j in range(e): for k in range(j,n,e): l = (1 + x[k])%p if l: w[j,i[l]%e] += 1 else: w[j] -= f w = [np.roll(w,j,(0,1)) for j in range(e)] cls.w = np.asarray(w) @classmethod def save_context(cls): if not hasattr(cls, 'p'): return context = {'p':cls.p, 'x':cls.x, 'index':cls.index, 'factor':cls.factor} if hasattr(cls, 'context'): cls.context.append(context) else: cls.context = [context] @classmethod def restore_context(cls): if(not hasattr(cls, 'context') or len(cls.context)==0): return context = cls.context.pop() cls.p = context['p'] cls.x = context['x'] cls.index = context['index'] cls.factor = context['factor'] def __init__(self, c): self.c = c def __mul__(self, p): c = np.dot(self.c, np.dot(p.c, self.w)) return Period(c) def __pow__(self, n): m = (1<<(n.bit_length() - 1))>>1 p = self while m: p *= p if n&m: p *= self m >>= 1 return p class Ring:# ring of cyclotomic polynomial def __init__(self, c):# symbolic representation self.c = c # by array of coefficients def __add__(self, a): if isinstance(a, Ring): return Ring(self.c + a.c) elif a==0: return self else:# never occurs c = self.c.copy() c[0] += a return Ring(c) def __mul__(self, a): if isinstance(a, Ring): n = len(self.c) t = np.convolve(self.c, a.c) c = t[:n] c[:-1] += t[n:] - c[-1] c[-1] = 0 # normalize return Ring(c) else: return Ring(a*self.c) def __rmul__(self, a): return Ring(a*self.c) def cyclo_(p, recur=True): """ solve cyclotomic equation by radicals return p-th roots of unity (except 1) [exp(2pi ij/p) for j=1,2,...,p-1] p must be prime if recur is True, q-th roots of unity (q<p) are recursively replaced by radical expressions """ if p==2: return -1 if recur: Period.save_context() Period.init(p) n = 1 y = np.zeros(p-1, dtype='object') y[0] = -1 for p in list(Period.factor)[::-1]: r = np.eye(p, dtype=np.int64) r = [Ring(x) for x in r] if recur: w = cyclo_(p) else: w = [root(1,p,i) for i in range(1,p)] w = np.insert(w,0,1) i = np.outer(np.r_[:p], np.r_[:p])%p w,z = w[i],w[-i] for _ in range(Period.factor[p]): m = n*p Period.SetDim(m) v = np.zeros(m, dtype='object') v[::n] = r u = (Period(v)**p).c u = [x.c for x in np.r_[u[:n], u[-n:]]] # DFT (a.k.a. Lagrange resolvent) u = np.dot(u, w[:,1:]) for k in range(n): t = np.dot(y[:n], u[:n]) y[k+n:m:n] = [root(x,p,0) for x in t] # inverse DFT v[k::n] = np.dot(z, y[k:m:n])/p # cyclic permutation of periods u = np.roll(u, 1, axis=0) n = m y[:n] = [expand(x) for x in v] y = y[Period.index[1:]] if recur: Period.restore_context() return y def cyclo(n, recur=True): """ solve cyclotomic equation by radicals return n-th roots of unity (except 1) [exp(2pi ij/n) for j=1,2,...,n-1] if recur is True, q-th roots of unity (q<n) are recursively replaced by radical expressions """ if n<2: raise RuntimeError("n<2 in cyclo") f = factorint(n) z = np.empty(n, dtype='object') j = n for p in f: k,m = n//p,n z[k::k] = cyclo_(p, recur) for _ in range(1,f[p]): l = k//p a = np.r_[k:n:k][:,np.newaxis] b = np.r_[l:k:l] z[b] = [root(x,p,0) for x in z[k:m:k]] z[a+b] = z[a]*z[b] k,m = l,k if j<n: a = np.r_[j:n:j][:,np.newaxis] b = np.r_[k:n:k] z[(a+b)%n] = z[a]*z[b] j = j*k//n return z[1:]
tt-nakamura/cyclo
cyclo.py
cyclo.py
py
5,447
python
en
code
0
github-code
6
[ { "api_name": "sympy.factorint", "line_number": 13, "usage_type": "call" }, { "api_name": "numpy.empty", "line_number": 21, "usage_type": "call" }, { "api_name": "numpy.int", "line_number": 21, "usage_type": "attribute" }, { "api_name": "numpy.empty", "line_nu...
35160550288
from django.shortcuts import render, redirect from django.contrib.auth.decorators import login_required from django.contrib import messages from django.db.models import Q from .models import Employee from .forms import AddEmployeeForm @login_required(login_url='authapp:login') def index(request): context = dict() context['employees'] = Employee.objects.all().order_by('-joined_date')[:5] return render(request, 'employee/index.html', context) @login_required(login_url='authapp:login') def view_all(request): context = dict() search = '' try: search = request.GET['search'] except: pass context['employees'] = Employee.objects.all().order_by('-joined_date') if search is not None: print(search) context['employees'] = Employee.objects.filter( Q(name__icontains=search) | Q(email__icontains=search) | Q(phone__icontains=search) | Q(department__name__icontains=search) | Q(role__name__icontains=search) ).order_by('-joined_date') return render(request, 'employee/view_all.html', context) @login_required(login_url='authapp:login') def view_single_employee(request, id): context = dict() try: employee = Employee.objects.get(pk=id) except: pass context['employee'] = employee return render(request, 'employee/employee.html', context) @login_required(login_url='authapp:login') def add(request): context = dict() context['form'] = AddEmployeeForm() if request.method == 'POST': form = AddEmployeeForm(request.POST) context['form'] = form if form.is_valid(): form.save() messages.info(request, 'Employee added successfully.') return redirect('index') else: return render(request, 'employee/add.html', context) else: context['form'] = AddEmployeeForm() return render(request, 'employee/add.html', context) @login_required(login_url='authapp:login') def update(request, id): context = dict() try: employee = Employee.objects.get(pk=id) except Employee.DoesNotExist: messages.error(request, f'Empoyee does not exist with id {id}') return render(request, 'employee/update.html', context) form = AddEmployeeForm(instance=employee) context['form'] = form if request.method == 'POST': form = AddEmployeeForm(request.POST, instance=employee) context['form'] = form if form.is_valid(): form.save() messages.info(request, 'Employee updated successfully.') return redirect('employee:view_single_employee', employee.id) print(request.resolver_match.url_name) return render(request, 'employee/update.html', context) @login_required(login_url='authapp:login') def delete(request, id): try: employee = Employee.objects.get(pk=id) except Employee.DoesNotExist: messages.error(request, 'Employee does not exist.') return redirect('employee:view_all') employee.delete() messages.info(request, 'Employee deleted successfully.') return redirect('employee:view_all')
somukhan9/django-employee-management-system
employee/views.py
views.py
py
3,206
python
en
code
0
github-code
6
[ { "api_name": "models.Employee.objects.all", "line_number": 13, "usage_type": "call" }, { "api_name": "models.Employee.objects", "line_number": 13, "usage_type": "attribute" }, { "api_name": "models.Employee", "line_number": 13, "usage_type": "name" }, { "api_name...
41039585752
import logging import random import string import time import sys from decimal import Decimal from typing import Any, Callable, Optional, TypeVar, Union import requests from vega_sim.grpc.client import VegaCoreClient, VegaTradingDataClientV2 from vega_sim.proto.data_node.api.v2.trading_data_pb2 import GetVegaTimeRequest from vega_sim.proto.vega.api.v1.core_pb2 import StatisticsRequest from vega_sim.proto.vega.markets_pb2 import Market from vega_sim.tools.retry import retry T = TypeVar("T") TIME_FORWARD_URL = "{base_url}/api/v1/forwardtime" logger = logging.getLogger(__name__) class DataNodeBehindError(Exception): pass class ProposalNotAcceptedError(Exception): pass def generate_id(n: int) -> str: return "".join(random.choices(string.ascii_lowercase + (2 * string.digits), k=n)) def get_enum(value: Union[str, T, int], enum_class: Any) -> T: return ( value if isinstance(value, (type(enum_class), int)) else getattr(enum_class, value) ) def enum_to_str(e: Any, val: int) -> str: return e.keys()[e.values().index(val)] def num_to_padded_int(to_convert: float, decimals: int) -> int: return int(Decimal(str(to_convert)) * Decimal(10**decimals)) def num_from_padded_int(to_convert: Union[str, int], decimals: int) -> float: if not to_convert: return 0 to_convert = int(to_convert) if isinstance(to_convert, str) else to_convert return float(to_convert) / 10**decimals def wait_for_datanode_sync( trading_data_client: VegaTradingDataClientV2, core_data_client: VegaCoreClient, max_retries: int = 650, ) -> None: """Waits for Datanode to catch up to vega core client. Note: Will wait for datanode 'latest' time to catch up to core time when function is called. This avoids the case where a datanode consistently slightly behind the core client never returns. As such, this ensures that the data node has data from the core *at the time of call* not necessarily the latest data when the function returns. Wait time is exponential with increasing retries (each attempt waits 0.05 * 1.03^attempt_num seconds). """ attempts = 1 core_time = retry( 10, 0.5, lambda: core_data_client.GetVegaTime(GetVegaTimeRequest()).timestamp ) trading_time = retry( 10, 0.5, lambda: trading_data_client.GetVegaTime(GetVegaTimeRequest()).timestamp ) while core_time > trading_time: logging.debug(f"Sleeping in wait_for_datanode_sync for {0.005 * 1.1**attempts}") time.sleep(0.0005 * 1.1**attempts) try: trading_time = retry( 10, 2.0, lambda: trading_data_client.GetVegaTime(GetVegaTimeRequest()).timestamp, ) except Exception as e: logging.warn(e) trading_time = sys.maxsize attempts += 1 if attempts >= max_retries: raise DataNodeBehindError( f"Data Node is behind and not catching up after {attempts} retries" ) def wait_for_core_catchup( core_data_client: VegaCoreClient, max_retries: int = 200, ) -> None: """Waits for core node to fully execute everything in it's backlog. Note that this operates by a rough cut of requesting time twice and checking for it being unchanged, so only works on nullchain where we control time. May wait forever in a standard tendermint chain """ attempts = 1 core_time = retry( 10, 0.5, lambda: core_data_client.GetVegaTime(GetVegaTimeRequest()).timestamp ) time.sleep(0.0001) core_time_two = retry( 10, 0.5, lambda: core_data_client.GetVegaTime(GetVegaTimeRequest()).timestamp ) while core_time != core_time_two: logging.debug(f"Sleeping in wait_for_core_catchup for {0.05 * 1.03**attempts}") core_time = retry( 10, 0.5, lambda: core_data_client.GetVegaTime(GetVegaTimeRequest()).timestamp, ) time.sleep(0.0001 * 1.03**attempts) core_time_two = retry( 10, 0.5, lambda: core_data_client.GetVegaTime(GetVegaTimeRequest()).timestamp, ) attempts += 1 if attempts >= max_retries: raise DataNodeBehindError( f"Core Node is behind and not catching up after {attempts} retries" ) def wait_for_acceptance( submission_ref: str, submission_load_func: Callable[[str], T], ) -> T: logger.debug("Waiting for proposal acceptance") submission_accepted = False for i in range(50): try: proposal = submission_load_func(submission_ref) except: time.sleep(0.001 * 1.1**i) continue if proposal: logger.debug("Your proposal has been accepted by the network") submission_accepted = True break time.sleep(0.001 * 1.1**i) if not submission_accepted: raise ProposalNotAcceptedError( "The network did not accept the proposal within the specified time" ) return proposal def forward(time: str, vega_node_url: str) -> None: """Steps chain forward a given amount of time, either with an amount of time or until a specified time. Args: time: str, time argument to use when stepping forwards. Either an increment (e.g. 1s, 10hr etc) or an ISO datetime (e.g. 2021-11-25T14:14:00Z) vega_node_url: str, url for a Vega nullchain node """ payload = {"forward": time} req = requests.post(TIME_FORWARD_URL.format(base_url=vega_node_url), json=payload) req.raise_for_status() def statistics(core_data_client: VegaCoreClient): return retry( 10, 0.5, lambda: core_data_client.Statistics(StatisticsRequest()).statistics ) def get_settlement_asset(market: Market) -> str: return get_product(market).settlement_asset def get_product(market: Market) -> Any: product = market.tradable_instrument.instrument.WhichOneof("product") if product is None: raise Exception(f"product not set for market '{market.id}'") return getattr(market.tradable_instrument.instrument, product)
vegaprotocol/vega-market-sim
vega_sim/api/helpers.py
helpers.py
py
6,261
python
en
code
19
github-code
6
[ { "api_name": "typing.TypeVar", "line_number": 16, "usage_type": "call" }, { "api_name": "logging.getLogger", "line_number": 19, "usage_type": "call" }, { "api_name": "random.choices", "line_number": 31, "usage_type": "call" }, { "api_name": "string.ascii_lowercas...
15018597005
from utils.utils import OS import sys if OS.Linux: import matplotlib matplotlib.use("agg") import json import math import multiprocessing import random from multiprocessing import Pool from threading import Thread from typing import Union, Callable from uuid import UUID import networkx from Model.Computation import Computation from Model.GraphLayoutAlgorithm import GraphLayoutAlgorithm from Model.Network.Edge import Edge from Model.Network.Flow import Flow from Model.Network.Network import Network from Model.Network.Node import Node from Model.Result import Result from utils import utils class ModelFacade: def __init__(self, network: Network = None): self.computation_pools = {} self.pending_results = {} if network is None: network = Network(model=self) self.network = network def add_node(self, x: int = 0, y: int = 0, name: str = None, is_selected=False, uid=None) -> Node: """ Adds a node to the model. :param x: x coordinate :param y: y coordinate :param name: node name :param is_selected: selection state :param uid: UID :return: node object """ if name is None: name = f"S{self.network.node_counter}" node = Node(name=str(name), is_selected=is_selected, x=x, y=y, uid=uid) self.network.node_counter += 1 self.network.add_node(node) self.network.notify((node, "add_node")) return node def add_edge(self, start_node: Node, end_node: Node, name=None, is_selected=False, uid=None) -> Edge: """ Adds an edge to the model. :param start_node: start node :param end_node: end node :param name: edge name :param is_selected: selection state :param uid: UID :return: edge object """ try: if start_node is end_node: raise ValueError("start node equals end node") if end_node in start_node.edges or start_node in end_node.edges: raise ValueError("edge already exists") except ValueError as e: self.notify_error(e) raise e if name is None: name = f"E{self.network.edge_counter}" edge = Edge(name=name, is_selected=is_selected, start_node=start_node, end_node=end_node, uid=uid) self.network.edge_counter += 1 self.network.add_edge(edge) self.network.notify((edge, "add_edge")) start_node.notify((edge, "add_edge")) end_node.notify((edge, "add_edge")) return edge def add_flow(self, path: list[Node], name=None, is_selected=False, uid=None) -> Flow: """ Adds a flow to the model. :param path: path of the flow :param name: flow name :param is_selected: selection state :param uid: UID :return: flow object """ if name is None: name = f"F{self.network.flow_counter}" try: if len(path) < 2: raise ValueError("Path too short") except ValueError as e: self.notify_error(e) raise e self.add_missing_edges_to_path(path, is_selected=is_selected) flow = Flow(name=name, is_selected=is_selected, path=path, uid=uid) self.network.flow_counter += 1 self.network.add_flow(flow) self.network.notify((flow, "add_flow")) for node in path: node.notify((flow, "add_flow")) return flow def add_missing_edges_to_path(self, path: list[Node], is_selected=False): """ Adds edges to the model based on the given path. :param path: flow path :param is_selected: selection state of the edges """ for index, start_node in enumerate(path): if index + 1 == len(path): return end_node = path[index + 1] if end_node not in start_node.edges: self.add_edge(start_node, end_node, is_selected=is_selected) def select_component(self, component: Union[Node, Edge, Flow], activity): """ Sets the selection state of a component. :param component: component object :param activity: selection state """ if component.is_selected != activity: if activity: self.network.selected_components.append(component) else: self.network.selected_components.remove(component) component.is_selected = activity component.notify((component, "set_selection")) def unselect_all_components(self): """ Deselects every component of the model. """ for component in self.network.selected_components: component.is_selected = False component.notify((component, "set_selection")) self.network.selected_components.clear() def delete_component(self, component: Union[Node, Edge, Flow]): """ Deletes a component from the model. :param component: component object """ if isinstance(component, Node): self.network.delete_node(component) elif isinstance(component, Edge): self.network.delete_edge(component) elif isinstance(component, Flow): self.network.delete_flow(component) def add_configuration(self, network_configuration_class, name=None) -> type['AbstractNetworkConfiguration']: """ Adds a configuration to the model. :param network_configuration_class: a network configuration class :param name: configuration name :return: instance of a network configuration """ if name is None: name = f"{network_configuration_class.get_configuration_name()}-{self.network.configs_counter}" self.network.configs_counter += 1 flow_ids_of_interest = list(self.network.flows.keys()) network_configuration = network_configuration_class(name=name, flow_ids_of_interest=flow_ids_of_interest, model=self, associated_component=self.network) group_id = network_configuration.group_id self.network.configurations[group_id] = network_configuration self.network.notify((network_configuration, "add_configuration")) node_configuration_class = network_configuration_class.get_node_configuration_class() for node in self.network.nodes.values(): node.configurations[group_id] = node_configuration_class(group_id=group_id, model=self, associated_component=node) node.notify((node.configurations[group_id], "add_configuration")) edge_configuration_class = network_configuration_class.get_edge_configuration_class() for edge in self.network.edges.values(): edge.configurations[group_id] = edge_configuration_class(group_id=group_id, model=self, associated_component=edge) edge.notify((edge.configurations[group_id], "add_configuration")) flow_configuration_class = network_configuration_class.get_flow_configuration_class() for flow in self.network.flows.values(): flow.configurations[group_id] = flow_configuration_class(group_id=group_id, model=self, associated_component=flow) flow.notify((flow.configurations[group_id], "add_configuration")) return network_configuration def delete_configuration(self, configuration): """ Deletes a network configuration from the model. :param configuration: configuration object """ group_id = configuration.group_id if group_id in self.network.configurations: for flow in self.network.flows.values(): del flow.configurations[group_id] flow.notify((group_id, "delete_configuration")) for edge in self.network.edges.values(): del edge.configurations[group_id] edge.notify((group_id, "delete_configuration")) for node in self.network.nodes.values(): del node.configurations[group_id] node.notify((group_id, "delete_configuration")) del self.network.configurations[group_id] self.network.notify((group_id, "delete_configuration")) def update_network_name(self, name: str): """ Updates the network name. :param name: new name """ self.network.update_name(name) def update_network_information(self, new_text: str): """ Updates the network information. :param new_text: new information text """ self.network.information_text = new_text self.network.notify((self.network, "information_text")) @staticmethod def update_node_name(node: Node, name: str): """ Updates the name of a node. :param node: node object :param name: new name """ node.name = name node.notify((node, "set_parameter")) @staticmethod def update_node_x_y(node: Node, x: int, y: int): """ Updates the x and y coordinates of a node. :param node: node object :param x: x coordinate :param y: y coordinate """ node.x = x node.y = y node.notify((node, "set_parameter")) @staticmethod def update_edge_name(edge: Edge, name: str): """ Updates the name of an edge. :param edge: edge object :param name: new name """ edge.name = name edge.notify((edge, "set_parameter")) @staticmethod def update_flow_name(flow: Flow, name: str): """ Updates the name of a flow. :param flow: flow object :param name: new name """ flow.name = name flow.notify((flow, "set_parameter")) def insert_node_in_flow_path(self, flow: Flow, node: Node, index: int = None, is_edge_selected=False): """ Inserts a node to an existing path at a specific index. :param flow: flow object :param node: node object :param index: insertion index, use 'None' to append :param is_edge_selected: selection state of the edge """ if index is None: self.append_node_to_flow_path(flow, node, is_edge_selected=is_edge_selected) else: flow.path.insert(index, node) try: self.add_missing_edges_to_path(flow.path, is_selected=is_edge_selected) except Exception as e: flow.path.remove(node) raise e flow.notify((flow, "update_path")) def append_node_to_flow_path(self, flow: Flow, node: Node, is_edge_selected=False): """ Appends a node to an existing flow path. :param flow: flow object :param node: node object :param is_edge_selected: selection state of the edge """ if node not in flow.path[-1].edges: self.add_edge(flow.path[-1], node, is_selected=is_edge_selected) flow.path.append(node) flow.notify((flow, "update_path")) def update_flow_color(self, flow: Flow, color: str): """ Update the color of a flow. :param flow: flow object :param color: color as RGB string (#RRGGBB) """ self.check_valid_color(color) flow.color = color flow.notify((flow, "set_parameter")) def update_flow_highlight_color(self, flow: Flow, highlight_color: str): """ Update the highlight color of a flow. :param flow: flow object :param highlight_color: color as RGB string (#RRGGBB) """ self.check_valid_color(highlight_color) flow.highlight_color = highlight_color flow.notify((flow, "set_parameter")) def run_computation(self, number_of_workers=None) -> Computation: """ Runs the computation with the current configurations. :param number_of_workers: number of processes :return: instance of a computation """ active_configurations = list(filter(lambda c: c.is_active, self.network.configurations.values())) sys.setrecursionlimit(900000) active_configurations = list(filter(lambda c: c.is_active, list(self.network.configurations.values()))) if len(active_configurations) == 0 or not self.network.flows: raise ValueError("No active configuration or no flows") def on_error(error, error_result): error_result.error() self.notify_error(error) def make_error_callback(error_result): return lambda error: on_error(error, error_result) if number_of_workers is None: process_count = min(multiprocessing.cpu_count(), len(active_configurations)) else: process_count = number_of_workers computation = Computation() if OS.Linux: try: multiprocessing.set_start_method('spawn') except RuntimeError: pass self.computation_pools[computation] = pool = Pool(processes=process_count) for configuration in active_configurations: result = Result(configuration_name=configuration.name, configuration_id=configuration.group_id) self.pending_results[result.id] = result computation.add_result(result) pool.apply_async(self._compute_configuration, (configuration, result), callback=self._notify_result, error_callback=make_error_callback(result)) computation.start() self.network.notify((computation, "computation_started")) Thread(target=lambda: (pool.close(), pool.join(), self._end_computation(computation))).start() return computation def _compute_configuration(self, configuration, result: Result): """ Computes a configuration. :param configuration: configuration object :param result: result object to store results :return: result object """ result.start() configuration.compute(self.network, result) result.finish() return result def _end_computation(self, computation: Computation): """ Ends the computation. :param computation: computation object """ computation.finish() if computation in self.computation_pools: del self.computation_pools[computation] def cancel_computation(self, computation: Computation): """ Cancels a running computation. :param computation: computation object """ computation.cancel() if computation in self.computation_pools: self.computation_pools[computation].terminate() def _notify_result(self, pickled_result: Result): """ Notify the subscribers of the result object about finishing the computation. :param pickled_result: includes the computation results """ original_result = self.pending_results[pickled_result.id] original_result.__dict__ = original_result.__dict__ | pickled_result.__dict__ original_result.notify((original_result, "finished_result")) del self.pending_results[pickled_result.id] def update_configuration_parameters(self, configuration, dictionary: dict): """ Updates parameters of a configuration. :param configuration: configuration object :param dictionary: parameter dictionary """ try: configuration.update_parameter_dict(dictionary) except Exception as error: self.notify_error(error) raise error finally: configuration.notify((configuration, "set_parameter")) def update_configuration_name(self, group_id: UUID, new_name: str): """ Updates the name of a configuration. :param group_id: group ID of the configuration :param new_name: new name """ configuration = self.network.configurations[group_id] configuration.name = new_name configuration.notify((configuration, "set_parameter")) for flow in self.network.flows.values(): configuration = flow.configurations[group_id] configuration.notify((configuration, "set_parameter")) for edge in self.network.edges.values(): configuration = edge.configurations[group_id] configuration.notify((configuration, "set_parameter")) for node in self.network.nodes.values(): configuration = node.configurations[group_id] configuration.notify((configuration, "set_parameter")) def add_flow_of_interest(self, group_id: UUID, flow: Flow): """ Adds a flow of interest to a list of flow of interests. :param group_id: group ID of a configuration :param flow: flow object """ configuration = self.network.configurations[group_id] if flow.id not in configuration.flow_ids_of_interest: configuration.flow_ids_of_interest.append(flow.id) configuration.notify((configuration, "set_parameter")) flow_configuration = flow.configurations[group_id] flow_configuration.is_flow_of_interest = True flow_configuration.notify((flow_configuration, "set_parameter")) def remove_flow_of_interest(self, group_id: UUID, flow: Flow): """ Removes a flow of interest from a list of flow of interests. :param group_id: group ID of a configuration :param flow: flow object """ configuration = self.network.configurations[group_id] if flow.id in configuration.flow_ids_of_interest: configuration.flow_ids_of_interest.remove(flow.id) configuration.notify((configuration, "set_parameter")) flow_configuration = flow.configurations[group_id] flow_configuration.is_flow_of_interest = False flow_configuration.notify((flow_configuration, "set_parameter")) def notify_error(self, error: Exception): """ Notify the subscribers of the network about an error. :param error: error """ utils.print_exception_traceback(error) self.network.notify((error, "error")) def import_network(self, json_file: str): """ Imports a network from a JSON file. :param json_file: JSON file """ try: new_model = ModelFacade.load_network(json_file) except Exception as e: self.notify_error(e) return self.network.notify((new_model, "new_model")) def generate_random_network_in_new_model(self, seed=None, min_num_nodes=100, max_num_nodes=150, min_num_edges=200, max_num_edges=200, delete_not_connected_nodes=True) -> 'ModelFacade': """ Generates a network and returns a new model. :param seed: network seed :param min_num_nodes: minimum number of nodes :param max_num_nodes: maximum number of nodes :param min_num_edges: minimum number of edges :param max_num_edges: maximum number of edges :param delete_not_connected_nodes: whether unconnected node should be removed :return: model """ rand = random.Random(seed) model = ModelFacade(network=Network(network_id=UUID(bytes=rand.randbytes(16), version=4))) used_nodes = set() num_nodes = rand.randint(min_num_nodes, max_num_nodes) column_num = int(math.sqrt(num_nodes)) y = -1 for i in range(num_nodes): x = i % column_num if x == 0: y += 1 model.add_node(x * 200, y * 200, uid=UUID(bytes=rand.randbytes(16), version=4)) all_nodes = list(model.network.nodes.values()) num_edges = rand.randint(min_num_edges, max_num_edges) for i in range(num_edges): node_i = rand.choice(all_nodes) node_j = rand.choice(all_nodes) while node_j == node_i: node_j = rand.choice(all_nodes) try: if node_j not in node_i.edges and node_i not in node_j.edges: model.add_edge(node_i, node_j, uid=UUID(bytes=rand.randbytes(16), version=4)) used_nodes.add(node_i) used_nodes.add(node_j) except ValueError: continue if delete_not_connected_nodes: for node in used_nodes.symmetric_difference(model.network.nodes.values()): model.delete_component(node) self.network.notify((model, "new_model")) return model def generate_networkx_network_in_new_model(self, generator: Callable[..., networkx.Graph], uid_seed=None, **kwargs) -> 'ModelFacade': """ Generates a network using NetworkX and returns a new model. :param generator: NetworkX generation function :param uid_seed: seed to recreate UIDs of network components :param kwargs: arguments for the NetworkX generation function :return: model """ uid_rand = random.Random(uid_seed) try: graph: networkx.Graph = generator(**kwargs) scale = ModelFacade._get_layout_scale(graph) pos = networkx.kamada_kawai_layout(graph, scale=scale) model = ModelFacade() mapping = {} for nx_node in graph.nodes: x, y = pos[nx_node] node = model.add_node(x=x, y=y, name=nx_node, uid=UUID(bytes=uid_rand.randbytes(16), version=4)) mapping[nx_node] = node for (i, j) in graph.edges: node_i, node_j = mapping[i], mapping[j] model.add_edge(node_i, node_j, uid=UUID(bytes=uid_rand.randbytes(16), version=4)) except Exception as e: self.notify_error(e) raise e self.network.notify((model, "new_model")) return model def add_random_flows(self, min_num_flows=5, max_num_flows=10, min_num_nodes_in_flow_path=2, max_num_nodes_in_flow_path=10, flow_seed=None): """ Adds random flows to a network. :param min_num_flows: minimum number of flows :param max_num_flows: maximum number of flows :param min_num_nodes_in_flow_path: minimum number of nodes in a flow path :param max_num_nodes_in_flow_path: maximum number of nodes in a flow path :param flow_seed: flow seed """ if min_num_nodes_in_flow_path < 2 or max_num_nodes_in_flow_path < 2: raise ValueError("Minimal 2 nodes needed for flow") graph = self.make_networkx_graph() nodes = list(self.network.nodes.values()) rand = random.Random(flow_seed) target_num_flows = rand.randint(min_num_flows, max_num_flows) failed_combinations = set() max_combinations = len(nodes) ** 2 * (max_num_nodes_in_flow_path - min_num_nodes_in_flow_path + 1) added_flows = 0 while added_flows < target_num_flows: start_node = rand.choice(nodes) end_node = rand.choice(nodes) path_length = rand.randint(min_num_nodes_in_flow_path, max_num_nodes_in_flow_path) if start_node == end_node: failed_combinations.add((start_node, end_node, path_length)) if len(failed_combinations) == max_combinations: raise ValueError( f"Cannot find a simple path with length " + f"[{min_num_nodes_in_flow_path}, {max_num_nodes_in_flow_path}]") while (start_node, end_node, path_length) in failed_combinations: start_node = rand.choice(nodes) end_node = rand.choice(nodes) path_length = rand.randint(min_num_nodes_in_flow_path, max_num_nodes_in_flow_path) found_path = None for path in networkx.all_simple_paths(graph, start_node, end_node, cutoff=path_length): if len(path) == path_length: found_path = path break if found_path is None: failed_combinations.add((start_node, end_node, path_length)) continue self.add_flow(path=found_path, uid=UUID(bytes=rand.randbytes(16), version=4)) added_flows += 1 def make_networkx_graph(self) -> networkx.Graph: """ Translates a network into a NetworkX graph. :return: NetworkX graph """ graph = networkx.Graph() graph.add_nodes_from(self.network.nodes.values()) edges = self.network.edges.values() starts = map(lambda e: e.start, edges) ends = map(lambda e: e.end, edges) graph.add_edges_from(zip(starts, ends)) return graph def change_graph_layout(self, layout: GraphLayoutAlgorithm = GraphLayoutAlgorithm.fruchterman_reingold): """ Changes the layout of the network using NetworkX. :param layout: NetworkX layout type """ G = networkx.Graph() initial_pos = {} for node_id, node in self.network.nodes.items(): G.add_node(node_id) initial_pos[node_id] = node.x, node.y for edge in self.network.edges.values(): G.add_edge(edge.start.id, edge.end.id) scale = ModelFacade._get_layout_scale(G) try: if layout == GraphLayoutAlgorithm.fruchterman_reingold: pos = networkx.fruchterman_reingold_layout(G, k=scale, pos=initial_pos, scale=scale) elif layout == GraphLayoutAlgorithm.planar: pos = networkx.planar_layout(G, scale=scale * 1.5) elif layout == GraphLayoutAlgorithm.shell: pos = networkx.shell_layout(G, scale=scale) elif layout == GraphLayoutAlgorithm.kamada_kawai: pos = networkx.kamada_kawai_layout(G, pos=initial_pos, scale=scale) elif layout == GraphLayoutAlgorithm.spectral: pos = networkx.spectral_layout(G, scale=scale) elif layout == GraphLayoutAlgorithm.spiral: pos = networkx.spiral_layout(G, scale=scale) elif layout == GraphLayoutAlgorithm.circular: pos = networkx.circular_layout(G, scale=scale) elif layout == GraphLayoutAlgorithm.random: pos = networkx.random_layout(G) for node_id, coord in pos.items(): pos[node_id] = scale * coord else: raise ValueError("Unknown Algorithm") except Exception as e: self.notify_error(e) raise e for node_id, (x, y) in pos.items(): node = self.network.nodes[node_id] self.update_node_x_y(node, int(x), int(y)) @staticmethod def _get_layout_scale(G: networkx.Graph): """ Returns the scale of the NetworkX graph. :param G: NetworkX graph :return: scale """ num_components = G.number_of_nodes() + G.number_of_edges() base = 1.07 if num_components <= 20: x = math.log(num_components, base) elif num_components <= 50: x = math.log(num_components, base - 0.03) elif num_components <= 100: x = math.log(num_components, base - 0.04) elif num_components <= 500: x = math.log(num_components, base - 0.05) else: x = math.log(num_components, base - 0.06) return x * 10 def _get_flows_as_networkx_digraph(self): """ Creates a digraph containing flows. :return: NetworkX digraph """ g = networkx.DiGraph() flow: Flow for flow in self.network.flows.values(): for index, node_i in enumerate(flow.path): if index + 1 == len(flow.path): break g.add_edge(node_i, flow.path[index + 1]) return g def are_flows_cyclic(self): """ Checks whether the network contains cyclic flows. :return: whether flows are cyclic """ g = self._get_flows_as_networkx_digraph() return not networkx.is_directed_acyclic_graph(g) def get_flow_cycles(self): """ Returns cyclic flows. :return: simple cycles as defined by NetworkX """ g = self._get_flows_as_networkx_digraph() return networkx.simple_cycles(g) def export_network(self, path: str): """ Exports the network as JSON file. :param path: path to the JSON file """ with open(path, 'w') as f: json_string = self.network.to_json() f.write(json_string) @staticmethod def load_network(path: str): """ Loads a network from a JSON file. :param path: path to the JSON file :return: the network model """ with open(path) as json_file: json_dict = json.load(json_file) model = ModelFacade() network = Network.from_json(json_dict, model) model.network = network return model def check_valid_color(self, color: str): """ Checks whether a color is in a valid RGB format. :param color: color string :return: whether string is valid """ try: if len(color) != 7: raise ValueError() int(color[1:7], 16) except ValueError: e = ValueError(f"Illegal Color: \"{color}\". Expected \"#rrggbb\" format.") self.notify_error(e) raise e def __getstate__(self): """ Used to pickle the object for process communication :return: network dictionary """ return {"network": self.network}
Moni5656/npba
Model/ModelFacade.py
ModelFacade.py
py
30,489
python
en
code
0
github-code
6
[ { "api_name": "utils.utils.OS.Linux", "line_number": 4, "usage_type": "attribute" }, { "api_name": "utils.utils.OS", "line_number": 4, "usage_type": "name" }, { "api_name": "matplotlib.use", "line_number": 7, "usage_type": "call" }, { "api_name": "Model.Network.Ne...
3955950488
# -*- coding: utf-8 -*- """check cache status * check cache status * this file uses standalone """ import sys import os import json import time import subprocess import configparser from glob import glob from state_list import get_error_message, DONE config = configparser.ConfigParser() config.read("config.ini") MAIL_ADDRESS = config.get("general", "mail_address") CACHE_PATH = "./cache" def create_filter_caches(span): """filtered caches to cache list in span Args: span (int): cache check span Returns: filtered_cache_paths (list): cache paths in span """ all_caches = glob(CACHE_PATH + "/*.json") if not all_caches: return False sorted_caches = sorted(all_caches, key=lambda x: os.path.getctime(x), reverse=True) filtered_cache_paths = [] current_time = time.time() for cache in sorted_caches: cache_time = os.path.getctime(cache) if current_time - cache_time < span: filtered_cache_paths.append(cache) else: break return filtered_cache_paths def create_cache_statuses(cache_paths): """create status list from cache_list Args: cache_paths (list): cache paths in span Returns: statuses (list): status list """ statuses = [] try: for cache in cache_paths: ret = json.load(open(cache)) status = ret[-1] statuses.append(str(status)) except FileNotFoundError: # not found cache return False return statuses def create_count_statuses(statuses): """count status appeared times Args: statuses (list): status list Returns: status_counts (list): status key with appeared times """ status_counts = {} for status in statuses: if status in status_counts: # already found, count up status_counts[status] += 1 else: status_counts[status] = 1 return status_counts def create_messages(counts): """status counts to messages Args: counts (collection): status key with appeared times Returns: messages (list): messages """ messages = [] sorted_counts = sorted(counts.items(), key=lambda x: x[1], reverse=True) sorted_keys = [key[0] for key in sorted_counts] if str(DONE) in sorted_keys: messages.append("Good") else: messages.append("*Careful*") for key in sorted_keys: error_message = get_error_message(int(key), 2) messages.append(key + " : " + str(counts[key]) + " " + error_message + "\n") return messages def create_mail_title(span, condition): """time to messages Args: span (int): cache check span condition (str): server condition Returns: title (string): time and conditions to mail title """ span_hour = str(int(span / 3600)) return condition + " " + span_hour + "h" def create_mail_body(messages): """status counts to messages Args: messages (list): messages Returns: body (string): statuses to mail body """ body_message = messages[1:] body = "".join(body_message) return body def create_mail(span): """create mail from caches which made in span Args: span (int): cache check span Returns: title (string): time and conditions to mail title body (string): statuses to mail body """ cache_paths = create_filter_caches(span) if not cache_paths: title = create_mail_title(span, "***Bad***") body = "no cache available" return title, body statuses = create_cache_statuses(cache_paths) if not statuses: title = create_mail_title(span, "***Bad***") body = "may move cache" return title, body counts = create_count_statuses(statuses) messages = create_messages(counts) title = create_mail_title(span, messages[0]) body = create_mail_body(messages) return title, body def main(): """main function Args: Returns: """ # if no args, then exit if not sys.argv[1]: return span = int(sys.argv[1]) title, body = create_mail(span) cmd = 'echo "' + body + '" | mail -s "' + title + '" ' + MAIL_ADDRESS subprocess.call(cmd, shell=True) if __name__ == "__main__": main()
Neilsaw/PriLog_web
watchdog_status.py
watchdog_status.py
py
4,426
python
en
code
30
github-code
6
[ { "api_name": "configparser.ConfigParser", "line_number": 19, "usage_type": "call" }, { "api_name": "glob.glob", "line_number": 38, "usage_type": "call" }, { "api_name": "os.path.getctime", "line_number": 42, "usage_type": "call" }, { "api_name": "os.path", "l...
38867577492
from inspect import signature from functools import wraps import werdsazxc from platforms.config import CODE_DICT as config import json import threading import inspect import pickle import time import requests import logging import re logger = logging.getLogger('robot') requests.packages.urllib3.disable_warnings() alert_pattern = re.compile('(?<=alert\([\'\"]).*?(?=[\'\"]\))') default_headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36'} BETSLIP_ALLOW_LIST = {'1_13': '棋牌_银河', '1_2': '棋牌_开元', '1_5': '棋牌_JDB', '1_7': '棋牌_MG', '1_9': '棋牌_LEG', '1_15': '棋牌_SGWIN', '1_18': '棋牌_新世界', '1_20': '棋牌_美天', '1_21': '棋牌_百胜', '1_22': '棋牌_FG', '1_34': '棋牌_PS', '2_13': '捕鱼_银河', '2_2': '捕鱼_开元', '2_3': '捕鱼_CQ9', '2_5': '捕鱼_JDB', '2_7': '捕鱼_MG', '2_8': '捕鱼_BBIN', '2_9': '捕鱼_LEG', '2_10': '捕鱼_AG', '2_16': '捕鱼_BG', '2_18': '捕鱼_新世界', '2_20': '捕鱼_美天', '2_21': '捕鱼_百胜', '2_22': '捕鱼_FG', '2_24': '捕鱼_FC', '2_27': '捕鱼_KA', '2_34': '捕鱼_PS', '3_13': '电子_银河', '3_3': '电子_CQ9', '3_5': '电子_JDB', '3_7': '电子_MG', '3_8': '电子_BBIN(旧)', '3_9': '电子_LEG', '3_10': '电子_AG', '3_14': '电子_PG', '3_20': '电子_美天', '3_21': '电子_百胜', '3_22': '电子_FG', '3_24': '电子_FC', '3_27': '电子_KA', '3_33': '电子_BNG', '3_34': '电子_PS', '3_37': '电子_PP','3_75': '电子_BBIN', '4_8': '真人_BBIN', '4_10': '真人_AG', '4_16': '真人_BG', '4_17': '真人_eBET', '5_6': '体育_利记', '5_8': '体育_BBIN', '5_19': '体育_沙巴', '5_36': '体育_三升', '8_8': '彩票_BBIN', '8_11': '彩票_双赢', '8_35': '彩票_云博'} spin_betslip_gamedict = { '140048': '[PG电子]双囍临门', '140050': '[PG电子]嘻游记', '140054': '[PG电子]赏金船长', '140060': '[PG电子]爱尔兰精灵', '140061': '[PG电子]唐伯虎点秋香', '140065': '[PG电子]麻将胡了', '140071': '[PG电子]赢财神', '140074': '[PG电子]麻将胡了2', '140075': '[PG电子]福运象财神', '140079': '[PG电子]澳门壕梦', '140082': '[PG电子]凤凰传奇', '140083': '[PG电子]火树赢花', '140084': '[PG电子]赏金女王', '140087': '[PG电子]寻宝黄金城', '140089': '[PG电子]招财喵', '140091': '[PG电子]冰火双娇', '140095': '[PG电子]宝石传奇', '140100': '[PG电子]糖心风暴', '140104': '[PG电子]亡灵大盗', '140105': '[PG电子]霹雳神偷', '140106': '[PG电子]麒麟送宝', '140119': '[PG电子]百鬼夜行', '140121': '[PG电子]日月星辰', '140122': '[PG电子]神鹰宝石', '30016': '[CQ9电子]六颗扭蛋', '30022': '[CQ9电子]跳高高', '30023': '[CQ9电子]跳起来', '30025': '[CQ9电子]跳高高2', '30026': '[CQ9电子]五福临门', '30028': '[CQ9电子]鸿福齐天', '30029': '[CQ9电子]武圣', '30030': '[CQ9电子]宙斯', '30031': '[CQ9电子]蹦迪', '30032': '[CQ9电子]跳过来', '30033': '[CQ9电子]直式蹦迪', '30034': '[CQ9电子]单手跳高高', '30037': '[CQ9电子]跳起来2', '30038': '[CQ9电子]野狼Disco', '30040': '[CQ9电子]六颗糖', '30041': '[CQ9电子]直式洪福齐天', '30043': '[CQ9电子]发财神2', '30044': '[CQ9电子]金鸡报喜', '30045': '[CQ9电子]东方神起', '30046': '[CQ9电子]火烧连环船2', '30086': '[CQ9电子]血之吻', '8003': '[JDB电子]变脸', '8006': '[JDB电子]台湾黑熊', '8015': '[JDB电子]月光秘宝', '8020': '[JDB电子]芝麻开门', '8044': '[JDB电子]江山美人', '8047': '[JDB电子]变脸2', '8048': '[JDB电子]芝麻开门2', '14006': '[JDB电子]亿万富翁', '14016': '[JDB电子]王牌特工', '14030': '[JDB电子]三倍金刚', '14033': '[JDB电子]飞鸟派对', '14035': '[JDB电子]龙舞', '14041': '[JDB电子]雷神之锤', '14061': '[JDB电子]玛雅金疯狂', '14042': '[JDB电子]聚宝盆', '514079': '[JDB电子]富豪哥2', } spin_betslip_gametype = {'3_14':'PG 电子','3_3':'CQ9 电子','3_5':'JDB 电子'} class ThreadProgress(threading.Thread): def __init__(self, cf, mod_key, detail=True): super().__init__() self.lst = [] self.cf = cf self.mod_key = mod_key self.running = True self.detail = detail def stop(self): self.running = False def run(self): from gui.Apps import return_schedule # 新版进度回传API保留所有进度状态, 全部回传 if self.detail: while self.running or self.lst: try: time.sleep(.1) item = self.lst.pop(0) return_schedule(self.cf, self.mod_key, **item) except IndexError as e: continue # 旧版进度回传API保留最后一个进度状态, 只回传最后一个 else: while self.running: try: time.sleep(1) item = self.lst.pop() self.lst.clear() return_schedule(self.cf, self.mod_key, **item) except IndexError as e: continue # 装饰器, 加上后每隔60秒会延长一次liveTime def keep_connect(func): class Keep(threading.Thread): def __init__(self, cf): super().__init__() self.cf = cf def run(self): from gui.Apps import keep_connect t = threading.current_thread() while getattr(t, 'running', True) and self.cf['token']: keep_connect(self.cf) time.sleep(55) @wraps(func) def wrapper(*args, **kwargs): # 读取说明文件第一行, 作为函数名后续纪录log使用 funcname = func.__doc__.split('\n')[0] # 对应传入参数, 产生参数字典 sig = signature(func) bound = sig.bind_partial(*args, **kwargs) bound.apply_defaults() arguments = bound.arguments # 建立执行续进行保持连线 cf = arguments['cf'] t = Keep(cf) t.start() # 执行功能 result = func(*args, **kwargs) # 停止执行续 t.running = False return result return wrapper class NotSignError(Exception): '''自定义当出现账号被登出时, 自动登入GPK平台''' pass class NullError(Exception): '''自定义当出现账号被登出时, 自动登入GPK平台''' pass def log_info(func): @wraps(func) def wrapper(*args, **kwargs): # 读取说明文件第一行, 作为函数名后续纪录log使用 funcname = func.__doc__.split('\n')[0] # 对应传入参数, 产生参数字典 sig = signature(func) bound = sig.bind_partial(*args, **kwargs) bound.apply_defaults() arguments = bound.arguments # 纪录挡排除参数阵列 exclud_args = ["url", "endpoints", "timeout", "args", "kwargs"] # 纪录参数 logger.info(f'{funcname} 网址: {arguments.get("url")}{arguments.get("endpoints")}') # logger.info(f'{funcname} 参数: {dict((k, v) for k, v in arguments.items() if k not in exclud_args)}') # 执行函数 result = func(*args, **kwargs) # 有错误则打印整串返回的内容, 打印完后将原始资料删除 if result["IsSuccess"] is False and result['ErrorCode'] != config.SUCCESS_CODE.code: if result.get('RawStatusCode'): logger.warning(f"网页原始状态码为: {result.get('RawStatusCode')}") if result.get('RawContent'): logger.warning(f"网页原始内容为: {result.get('RawContent')}") if result.get('RawStatusCode'): del result["RawStatusCode"] if result.get('RawContent'): del result["RawContent"] # 纪录结果 logger.info(f'{funcname} 返回: {werdsazxc.Dict(result)}') return result return wrapper #检查系统传过来的参数型别是否正常 def check_type(cls): @wraps(cls) def wrapper(*args, **kwargs): from .mission import BaseFunc # 对应传入参数, 产生参数字典 sig = signature(cls) bound = sig.bind_partial(*args, **kwargs) bound.apply_defaults() arguments = bound.arguments.get('kwargs') system_dict = {**BaseFunc.Meta.system_dict, **cls.Meta.return_value['include']} [system_dict.pop(i,None) for i in cls.Meta.return_value['exclude']] rep = [k for k, v in arguments.items() if k in system_dict and type(v) != system_dict[k]] if rep: logger.warning(f"型别异常参数: {rep}") return_content = { 'IsSuccess': False, 'ErrorCode': config.PARAMETER_ERROR.code, 'ErrorMessage': config.PARAMETER_ERROR.msg } for i in cls.Meta.return_value['data']: if i in arguments: return_content[i] = arguments[i] elif i in ['BetAmount','AllCategoryCommissionable','GameCommissionable','SingleCategoryCommissionable']: return_content[i] = '0.00' else: return_content[i] = '' return return_content # 执行函数 result = cls(*args, **kwargs) return result return wrapper def catch_exception(func): @wraps(func) def wrapper(*args, **kwargs): # 读取说明文件第一行, 作为函数名后续纪录log使用 funcname = func.__doc__.split('\n')[0] # 对应传入参数, 产生参数字典 sig = signature(func) bound = sig.bind_partial(*args, **kwargs) bound.apply_defaults() arguments = bound.arguments # 计算错误次数 count = 1 cf = arguments['cf'] while count <= cf['retry_times']: try: result = func(*args, **kwargs) break # 检查schema是否输入 except requests.exceptions.MissingSchema as e: logger.debug(f'{e.__class__.__name__} {e}') return { 'IsSuccess': False, 'ErrorCode': config.EXCEPTION_CODE.code, 'ErrorMessage': f'平台设定错误, 通讯协定(http或https)未输入', } # 检查schema是否合法 except requests.exceptions.InvalidSchema as e: logger.debug(f'{e.__class__.__name__} {e}') return { 'IsSuccess': False, 'ErrorCode': config.EXCEPTION_CODE.code, 'ErrorMessage': f'平台设定错误, 通讯协定(http或https)无法解析', } # 检查网址是否合法 except requests.exceptions.InvalidURL as e: logger.debug(f'{e.__class__.__name__} {e}') return { 'IsSuccess': False, 'ErrorCode': config.EXCEPTION_CODE.code, 'ErrorMessage': f'平台设定错误, 无法解析', } # 发生重导向异常 except requests.exceptions.TooManyRedirects as e: logger.debug(f'{e.__class__.__name__} {e}') return { 'IsSuccess': False, 'ErrorCode': config.EXCEPTION_CODE.code, 'ErrorMessage': f'平台设定错误, 发生重导向异常', } # 捕捉被登出 except (NotSignError, json.JSONDecodeError) as e: from .module import session from .module import login if type(e) == json.JSONDecodeError: logger.info(f'json.JSONDecodeError: {e.doc}') if (cf['need_backend_otp'] is False and hasattr(session, 'url') and hasattr(session, 'acc') and hasattr(session, 'pw')): login(cf=cf, url=session.url, acc=session.acc, pw=session.pw, otp='') continue return { 'IsSuccess': False, 'ErrorCode': config.SIGN_OUT_CODE.code, 'ErrorMessage': config.SIGN_OUT_CODE.msg.format(platform=cf.platform), } # except NullError as e: if count < cf['retry_times']: time.sleep(1) continue return { 'IsSuccess': False, 'ErrorCode': config.SIGN_OUT_CODE.code, 'ErrorMessage': config.SIGN_OUT_CODE.msg.format(platform=cf.platform), } # key error except KeyError as e: logger.debug(f'{e.__class__.__name__} {e}') return { 'IsSuccess': False, 'ErrorCode': config.EXCEPTION_CODE.code, 'ErrorMessage': config.EXCEPTION_CODE.msg, } # 捕捉连线异常 except (requests.exceptions.ConnectionError, requests.exceptions.Timeout, requests.exceptions.ContentDecodingError) as e: logger.debug(f'{e.__class__.__name__} {e}') logger.debug(f'{e.__class__.__name__} ({count}/3)...') if count >= 3: return { 'IsSuccess': False, 'ErrorCode': config.CONNECTION_CODE.code, 'ErrorMessage': config.CONNECTION_CODE.msg, } if f'与{cf.get("platform")}连线异常...' not in cf['error_msg']: cf['error_msg'].append(f'与{cf.get("platform")}连线异常...') time.sleep(3) count += 1 # 字段异常、无权限等 except IndexError as e: logger.debug(f'{e.__class__.__name__} {e}') local_envs = inspect.getinnerframes(e.__traceback__)[-1].frame.f_locals resp = local_envs.get('resp') if resp: status_code = resp.status_code content = resp.content else: status_code = '函数未设定resp变数, 请修改变数命名规则' content = '函数未设定resp变数, 请修改变数命名规则' return { 'IsSuccess': False, 'ErrorCode': config.HTML_CONTENT_CODE.code, 'ErrorMessage': config.HTML_CONTENT_CODE.msg, 'RawStatusCode': resp.status_code, 'RawContent': resp.content } # 捕捉程式错误 except Exception as e: werdsazxc.log_trackback() local_envs = inspect.getinnerframes(e.__traceback__) local_envs = [frame for frame in local_envs if frame.function == func.__name__] if local_envs: local_envs = local_envs[-1].frame.f_locals resp = local_envs.get('resp') if resp: status_code = resp.status_code content = resp.content else: status_code = '函数未设定resp变数, 请修改变数命名规则' content = '函数未设定resp变数, 请修改变数命名规则' else: status_code = '' content = '' return { 'IsSuccess': False, 'ErrorCode': config.EXCEPTION_CODE.code, 'ErrorMessage': f'未知异常- {e.__class__.__name__}: {e}', 'RawStatusCode': status_code, 'RawContent': content } return result return wrapper
gleam542/platforms
platforms/wg/utils.py
utils.py
py
16,213
python
zh
code
0
github-code
6
[ { "api_name": "logging.getLogger", "line_number": 13, "usage_type": "call" }, { "api_name": "requests.packages.urllib3.disable_warnings", "line_number": 14, "usage_type": "call" }, { "api_name": "requests.packages", "line_number": 14, "usage_type": "attribute" }, { ...
35560642063
import numpy as np import matplotlib.pyplot as plt import matplotlib from mayavi import mlab from scipy.ndimage import map_coordinates from scipy import signal, interpolate from PIL import Image, ImageDraw from matplotlib.colors import ListedColormap from tqdm import tqdm, trange def create_block_diagram(strat, prop, facies, dx, ve, xoffset, yoffset, scale, ci, plot_strat, plot_contours, plot_sides, color_mode, bottom, topo_min, topo_max, export, opacity): """function for creating a 3D block diagram in Mayavi strat - input array with stratigraphic surfaces facies - property or facies array dx - size of gridcells in the horizontal direction in 'strat' ve - vertical exaggeration offset - offset in the y-direction relative to 0 scale - scaling factor ci - contour interval strat_switch - 1 if you want to plot stratigraphy on the sides; 0 otherwise contour_switch - 1 if you want to plot contours on the top surface; 0 otherwise bottom - elevation value for the bottom of the block""" r,c,ts = np.shape(strat) # if z is increasing downward: if np.max(strat[:, :, -1] - strat[:, :, 0]) < 0: strat = -1 * strat z = scale*strat[:,:,ts-1].T if plot_strat: z1 = strat[:,:,0].T else: z1 = strat[:,:,-1].T X1 = scale*(xoffset + np.linspace(0,c-1,c)*dx) # x goes with c and y with r Y1 = scale*(yoffset + np.linspace(0,r-1,r)*dx) X1_grid , Y1_grid = np.meshgrid(X1, Y1) if export == 1: surf = mlab.surf(X1,Y1,z,warp_scale=ve,colormap='gist_earth',vmin=scale*topo_min,vmax=scale*topo_max, opacity = opacity) # cmapf = matplotlib.cm.get_cmap('Blues_r',256) BluesBig = matplotlib.cm.get_cmap('Blues_r', 512) newcmp = ListedColormap(BluesBig(np.linspace(0.0, 1.0, 256))) normf = matplotlib.colors.Normalize(vmin=scale*topo_min,vmax=scale*topo_max) z_range = np.linspace(scale*topo_min, scale*topo_max, 256) surf.module_manager.scalar_lut_manager.lut.table = (np.array(newcmp(normf(z_range)))*255).astype('uint8') else: # if color_mode == 'property': # mlab.mesh(X1_grid, Y1_grid, ve*z, scalars = prop[:, :, -1], colormap='YlOrBr', vmin=0, vmax=1, opacity = opacity) # if not plot_sides: # mlab.mesh(X1_grid, Y1_grid, ve*scale*strat[:,:,0].T, scalars = facies[:, :, 0], colormap='YlOrBr', vmin=0, vmax=1, opacity = opacity) # else: mlab.surf(X1, Y1, z, warp_scale=ve, colormap='gist_earth', opacity = opacity) #, line_width=5.0, vmin=scale*topo_min,vmax=scale*topo_max, representation='wireframe') if not plot_sides: mlab.surf(X1, Y1, scale*strat[:,:,0].T, warp_scale=ve, colormap='gist_earth', opacity=opacity) #colormap='gist_earth',vmin=scale*topo_min,vmax=scale*topo_max, opacity = opacity) if plot_contours: vmin = scale * topo_min #np.min(strat[:,:,-1]) vmax = scale * topo_max #np.max(strat[:,:,-1]) contours = list(np.arange(vmin, vmax, ci*scale)) # list of contour values mlab.contour_surf(X1, Y1, z, contours=contours, warp_scale=ve, color=(0,0,0), line_width=1.0) if plot_sides: gray = (0.6,0.6,0.6) # color for plotting sides # updip side: vertices, triangles = create_section(z1[:,0],dx,bottom) x = scale*(xoffset + vertices[:,0]) y = scale*(yoffset + np.zeros(np.shape(vertices[:,0]))) z = scale*ve*vertices[:,1] mlab.triangular_mesh(x, y, z, triangles, color=gray, opacity = opacity) # downdip side: vertices, triangles = create_section(z1[:,-1],dx,bottom) x = scale*(xoffset + vertices[:,0]) y = scale*(yoffset + (r-1)*dx*np.ones(np.shape(vertices[:,0]))) z = scale*ve*vertices[:,1] mlab.triangular_mesh(x, y, z, triangles, color=gray, opacity = opacity) # left edge (looking downdip): vertices, triangles = create_section(z1[0,:],dx,bottom) x = scale*(xoffset + np.zeros(np.shape(vertices[:,0]))) y = scale*(yoffset + vertices[:,0]) z = scale*ve*vertices[:,1] mlab.triangular_mesh(x, y, z, triangles, color=gray, opacity = opacity) # right edge (looking downdip): vertices, triangles = create_section(z1[-1,:],dx,bottom) x = scale*(xoffset + (c-1)*dx*np.ones(np.shape(vertices[:,0]))) y = scale*(yoffset + vertices[:,0]) z = scale*ve*vertices[:,1] mlab.triangular_mesh(x, y, z, triangles, color=gray, opacity = opacity) # bottom face of block: vertices = dx*np.array([[0,0],[c-1,0],[c-1,r-1],[0,r-1]]) triangles = [[0,1,3],[1,3,2]] x = scale*(xoffset + vertices[:,0]) y = scale*(yoffset + vertices[:,1]) z = scale*bottom*np.ones(np.shape(vertices[:,0])) mlab.triangular_mesh(x, y, ve*z, triangles, color=gray, opacity = opacity) def add_stratigraphy_to_block_diagram(strat, prop, facies, dx, ve, xoffset, yoffset, scale, plot_surfs, color_mode, colors, colormap, line_thickness, export, opacity): """function for adding stratigraphy to the sides of a block diagram colors layers by relative age strat - input array with stratigraphic surfaces facies - 1D array of facies codes for layers h - channel depth (height of point bar) thalweg_z - array of thalweg elevations for each layer dx - size of gridcells in the horizontal direction in 'strat' ve - vertical exaggeration offset - offset in the y-direction relative to 0 scale - scaling factor plot_surfs - if equals 1, stratigraphic boundaries will be plotted on the sides as black lines color_mode - determines what kind of plot is created; can be 'property', 'time', or 'facies' colors - colors scheme for facies (list of RGB values) line_thickness - tube radius for plotting layers on the sides export - if equals 1, the display can be saved as a VRML file for use in other programs (e.g., 3D printing)""" r,c,ts=np.shape(strat) if color_mode == 'time': norm = matplotlib.colors.Normalize(vmin=0.0, vmax=ts-1) cmap = matplotlib.cm.get_cmap(colormap) if (color_mode == 'property') | (color_mode == 'facies'): norm = matplotlib.colors.Normalize(vmin=0.0, vmax=0.35) cmap = matplotlib.cm.get_cmap(colormap) for layer_n in trange(ts-1): # main loop vmin = 0.0 vmax = 0.35 top = strat[:,0,layer_n+1] # updip side base = strat[:,0,layer_n] if color_mode == "property": props = prop[:,0,layer_n] if plot_surfs: Y1 = scale*(yoffset + dx*np.arange(0,r)) X1 = scale*(xoffset + np.zeros(np.shape(base))) Z1 = ve*scale*base mlab.plot3d(X1,Y1,Z1,color=(0,0,0),tube_radius=line_thickness) if np.max(top-base)>0: Points,Inds = triangulate_layers(top,base,dx) for i in range(len(Points)): vertices = Points[i] triangles, scalars = create_triangles(vertices) Y1 = scale*(yoffset + vertices[:,0]) X1 = scale*(xoffset + dx*0*np.ones(np.shape(vertices[:,0]))) Z1 = scale*vertices[:,1] if color_mode == "property": scalars = props[Inds[i]] else: scalars = [] plot_layers_on_one_side(layer_n, facies, color_mode, colors, X1, Y1, Z1, ve, triangles, vertices, scalars, colormap, norm, vmin, vmax, export, opacity) top = strat[:,-1,layer_n+1] # downdip side base = strat[:,-1,layer_n] if color_mode == "property": props = prop[:,-1,layer_n] if plot_surfs: Y1 = scale*(yoffset + dx*np.arange(0,r)) X1 = scale*(xoffset + dx*(c-1)*np.ones(np.shape(base))) Z1 = ve*scale*base mlab.plot3d(X1,Y1,Z1,color=(0,0,0),tube_radius=line_thickness) if np.max(top-base)>0: Points,Inds = triangulate_layers(top,base,dx) for i in range(len(Points)): vertices = Points[i] triangles, scalars = create_triangles(vertices) Y1 = scale*(yoffset + vertices[:,0]) X1 = scale*(xoffset + dx*(c-1)*np.ones(np.shape(vertices[:,0]))) Z1 = scale*vertices[:,1] if color_mode == "property": scalars = props[Inds[i]] else: scalars = [] plot_layers_on_one_side(layer_n, facies, color_mode, colors, X1, Y1, Z1, ve, triangles, vertices, scalars, colormap, norm, vmin, vmax, export, opacity) top = strat[0,:,layer_n+1] # left edge (looking downdip) base = strat[0,:,layer_n] if color_mode == "property": props = prop[0,:,layer_n] if plot_surfs: Y1 = scale*(yoffset + np.zeros(np.shape(base))) X1 = scale*(xoffset + dx*np.arange(0,c)) Z1 = ve*scale*base mlab.plot3d(X1,Y1,Z1,color=(0,0,0),tube_radius=line_thickness) if np.max(top-base)>0: Points,Inds = triangulate_layers(top,base,dx) for i in range(len(Points)): vertices = Points[i] triangles, scalars = create_triangles(vertices) Y1 = scale*(yoffset + dx*0*np.ones(np.shape(vertices[:,0]))) X1 = scale*(xoffset + vertices[:,0]) Z1 = scale*vertices[:,1] if color_mode == "property": scalars = props[Inds[i]] else: scalars = [] plot_layers_on_one_side(layer_n, facies, color_mode, colors, X1, Y1, Z1, ve, triangles, vertices, scalars, colormap, norm, vmin, vmax, export, opacity) top = strat[-1,:,layer_n+1] # right edge (looking downdip) base = strat[-1,:,layer_n] if color_mode == "property": props = prop[-1,:,layer_n] if plot_surfs: Y1 = scale*(yoffset + dx*(r-1)*np.ones(np.shape(base))) X1 = scale*(xoffset + dx*np.arange(0,c)) Z1 = ve*scale*base mlab.plot3d(X1,Y1,Z1,color=(0,0,0),tube_radius=line_thickness) if np.max(top-base)>0: Points,Inds = triangulate_layers(top,base,dx) for i in range(len(Points)): vertices = Points[i] triangles, scalars = create_triangles(vertices) Y1 = scale*(yoffset + dx*(r-1)*np.ones(np.shape(vertices[:,0]))) X1 = scale*(xoffset + vertices[:,0]) Z1 = scale*vertices[:,1] if color_mode == "property": scalars = props[Inds[i]] else: scalars = [] plot_layers_on_one_side(layer_n, facies, color_mode, colors, X1, Y1, Z1, ve, triangles, vertices, scalars, colormap, norm, vmin, vmax, export, opacity) def create_exploded_view(strat, prop, facies, x0, y0, nx, ny, gap, dx, ve, scale, plot_strat, plot_surfs, plot_contours, plot_sides, color_mode, colors, colormap, line_thickness, bottom,export, topo_min, topo_max, ci, opacity): """function for creating an exploded-view block diagram inputs: strat - stack of stratigraphic surfaces facies - 1D array of facies codes for layers topo - stack of topographic surfaces nx - number of blocks in x direction ny - number of blocks in y direction gap - gap between blocks (number of gridcells) dx - gridcell size ve - vertical exaggeration scale - scaling factor (for whole model) strat_switch - if equals 1, the stratigraphy will be plotted on the sides of the blocks plot_surfs - if equals 1, the stratigraphic surfaces will be plotted on the sides (adds a lot of triangles - not good for 3D printing) contour_swicth - if equals 1, contours will be plotted on the top surface color_mode - determines what kind of plot is created; can be 'property', 'time', or 'facies' colors - colors scheme for facies (list of RGB values) line_thickness - - tube radius for plotting layers on the sides bottom - elevation value for the bottom of the block export - if equals 1, the display can be saved as a VRML file for use in other programs (e.g., 3D printing)""" r,c,ts = np.shape(strat) count = 0 for i in range(nx): for j in range(ny): x1 = i * int(c/nx) x2 = (i+1) * int(c/nx) y1 = j * int(r/ny) y2 = (j+1) * int(r/ny) xoffset = x0 + (x1+i*gap)*dx yoffset = y0 + (y1+j*gap)*dx if color_mode == "property": create_block_diagram(strat[y1:y2,x1:x2,:], prop[y1:y2,x1:x2,:], facies[y1:y2,x1:x2,:], dx, ve, xoffset, yoffset, scale, ci, plot_strat, plot_contours, plot_sides, color_mode, bottom, topo_min, topo_max, export, opacity) if plot_strat: add_stratigraphy_to_block_diagram(strat[y1:y2,x1:x2,:], prop[y1:y2,x1:x2,:], facies[y1:y2,x1:x2,:], dx, ve, xoffset, yoffset, scale, plot_surfs, color_mode, colors, colormap, line_thickness, export, opacity) else: create_block_diagram(strat[y1:y2,x1:x2,:], prop, facies[y1:y2,x1:x2,:], dx, ve, xoffset, yoffset, scale, ci, plot_strat, plot_contours, plot_sides, color_mode, bottom, topo_min, topo_max, export, opacity) if plot_strat: add_stratigraphy_to_block_diagram(strat[y1:y2,x1:x2,:], prop, facies[y1:y2,x1:x2,:], dx, ve, xoffset, yoffset, scale, plot_surfs, color_mode, colors, colormap, line_thickness, export, opacity) count = count+1 print("block "+str(count)+" done, out of "+str(nx*ny)+" blocks") def create_fence_diagram(strat, prop, facies, x0, y0, nx, ny, dx, ve, scale, plot_surfs, plot_sides, color_mode, colors, colormap, line_thickness, bottom, export, opacity): """function for creating a fence diagram inputs: strat - stack of stratigraphic surfaces facies - 1D array of facies codes for layers topo - stack of topographic surfaces nx - number of strike sections ny - number of dip sections dx - gridcell size ve - vertical exaggeration scale - scaling factor (for whole model) plot_surfs - if equals 1, the stratigraphic surfaces will be plotted on the sides (adds a lot of triangles - not good for 3D printing) color_mode - determines what kind of plot is created; can be 'property', 'time', or 'facies' colors - colors scheme for facies (list of RGB values) line_thickness - - tube radius for plotting layers on the sides bottom - elevation value for the bottom of the block export - if equals 1, the display can be saved as a VRML file for use in other programs (e.g., 3D printing)""" r,c,ts=np.shape(strat) gray = (0.6,0.6,0.6) norm = matplotlib.colors.Normalize(vmin=0.0, vmax=ts-1) cmap = matplotlib.cm.get_cmap(colormap) vmin = np.min(prop) vmax = np.max(prop) gray = (0.6,0.6,0.6) # color for plotting sides z = scale*strat[:,:,ts-1].T z1 = strat[:,:,0].T xoffset = 0; yoffset = 0 # updip side: vertices, triangles = create_section(z1[:,0],dx,bottom) x = scale*(xoffset + vertices[:,0]) y = scale*(yoffset + np.zeros(np.shape(vertices[:,0]))) z = scale*ve*vertices[:,1] mlab.triangular_mesh(x, y, z, triangles, color=gray, opacity = opacity) # downdip side: vertices, triangles = create_section(z1[:,-1],dx,bottom) x = scale*(xoffset + vertices[:,0]) y = scale*(yoffset + (r-1)*dx*np.ones(np.shape(vertices[:,0]))) z = scale*ve*vertices[:,1] mlab.triangular_mesh(x, y, z, triangles, color=gray, opacity = opacity) # left edge (looking downdip): vertices, triangles = create_section(z1[0,:],dx,bottom) x = scale*(xoffset + np.zeros(np.shape(vertices[:,0]))) y = scale*(yoffset + vertices[:,0]) z = scale*ve*vertices[:,1] mlab.triangular_mesh(x, y, z, triangles, color=gray, opacity = opacity) # right edge (looking downdip): vertices, triangles = create_section(z1[-1,:],dx,bottom) x = scale*(xoffset + (c-1)*dx*np.ones(np.shape(vertices[:,0]))) y = scale*(yoffset + vertices[:,0]) z = scale*ve*vertices[:,1] mlab.triangular_mesh(x, y, z, triangles, color=gray, opacity = opacity) # bottom face of block: vertices = dx*np.array([[0,0],[c-1,0],[c-1,r-1],[0,r-1]]) triangles = [[0,1,3],[1,3,2]] x = scale*(xoffset + vertices[:,0]) y = scale*(yoffset + vertices[:,1]) z = scale*bottom*np.ones(np.shape(vertices[:,0])) mlab.triangular_mesh(x, y, ve*z, triangles, color=gray, opacity = opacity) section_inds = np.hstack((0, int(c/(nx+1)) * np.arange(1, nx+1), c-1)) for x1 in tqdm(section_inds): # strike sections if plot_sides: vertices, triangles = create_section(strat[:,x1,0],dx,bottom) y = y0 + scale*(vertices[:,0]) x = x0 + scale*(x1*dx+np.zeros(np.shape(vertices[:,0]))) z = scale*ve*vertices[:,1] mlab.triangular_mesh(x,y,z,triangles,color=gray) for layer_n in range(ts-1): # main loop top = strat[:,x1,layer_n+1] base = strat[:,x1,layer_n] if color_mode == 'property': props = prop[:,x1,layer_n] if plot_surfs: Y1 = y0 + scale*(dx*np.arange(0,r)) X1 = x0 + scale*(x1*dx+np.zeros(np.shape(base))) Z1 = ve*scale*base mlab.plot3d(X1,Y1,Z1,color=(0,0,0),tube_radius=line_thickness) if np.max(top-base)>0: Points,Inds = triangulate_layers(top,base,dx) for i in range(len(Points)): vertices = Points[i] triangles, scalars = create_triangles(vertices) Y1 = y0 + scale*(vertices[:,0]) X1 = x0 + scale*(x1*dx+dx*0*np.ones(np.shape(vertices[:,0]))) Z1 = scale*vertices[:,1] if color_mode == 'property': scalars = props[Inds[i]] else: scalars = [] # plot_layers_on_one_side(layer_n,facies,color_mode,colors,X1,Y1,Z1,ve,triangles,vertices,scale*scalars,cmap,norm,vmin,vmax,export) plot_layers_on_one_side(layer_n, facies, color_mode, colors, X1, Y1, Z1, ve, triangles, vertices, scalars, colormap, norm, vmin, vmax, export, opacity) section_inds = np.hstack((0, int(r/(ny+1)) * np.arange(1, ny+1), r-1)) for y1 in tqdm(section_inds): # dip sections if plot_sides: vertices, triangles = create_section(strat[y1,:,0],dx,bottom) y = y0 + scale*(y1*dx+np.zeros(np.shape(vertices[:,0]))) x = x0 + scale*(vertices[:,0]) z = scale*ve*vertices[:,1] mlab.triangular_mesh(x,y,z,triangles,color=gray) for layer_n in range(ts-1): # main loop top = strat[y1,:,layer_n+1] base = strat[y1,:,layer_n] if color_mode == 'property': props = prop[y1,:,layer_n] if plot_surfs: Y1 = y0 + scale*(y1*dx+np.zeros(np.shape(base))) X1 = x0 + scale*(dx*np.arange(0,c)) Z1 = ve*scale*base mlab.plot3d(X1,Y1,Z1,color=(0,0,0),tube_radius=line_thickness) if np.max(top-base)>0: Points,Inds = triangulate_layers(top,base,dx) for i in range(len(Points)): vertices = Points[i] triangles, scalars = create_triangles(vertices) Y1 = y0 + scale*(y1*dx + dx*0*np.ones(np.shape(vertices[:,0]))) X1 = x0 + scale*(vertices[:,0]) Z1 = scale*vertices[:,1] if color_mode == 'property': scalars = props[Inds[i]] else: scalars = [] # plot_layers_on_one_side(layer_n,facies,color_mode,colors,X1,Y1,Z1,ve,triangles,vertices,scale*scalars,cmap,norm,vmin,vmax,export) plot_layers_on_one_side(layer_n, facies, color_mode, colors, X1, Y1, Z1, ve, triangles, vertices, scalars, colormap, norm, vmin, vmax, export, opacity) # print('done with section '+str(nsec)+' of '+str(ny)+' dip sections') r,c = np.shape(strat[:,:,-1]) Y1 = scale*(np.linspace(0,r-1,r)*dx) X1 = scale*(np.linspace(0,c-1,c)*dx) topo_min = np.min(strat[:,:,-1]) topo_max = np.max(strat[:,:,-1]) mlab.surf(X1, Y1, scale*strat[:,:,-1].T, warp_scale=ve, colormap='gist_earth', vmin=scale*topo_min, vmax=scale*topo_max, opacity=0.15) def triangulate_layers(top,base,dx): """function for creating vertices of polygons that describe one layer""" x = dx * np.arange(0,len(top)) ind1 = np.argwhere(top-base>0).flatten() ind2 = np.argwhere(np.diff(ind1)>1) ind2 = np.vstack((np.array([[-1]]),ind2)) ind2 = np.vstack((ind2,np.array([[len(top)]]))) Points = [] # list for points to be triangulated Inds = [] for i in range(len(ind2)-1): ind3 = ind1[int(ind2[i])+1:int(ind2[i+1])+1] if (ind3[0] != 0) & (ind3[-1] != len(top)-1): ind3 = np.hstack((ind3[0]-1,ind3)) ind3 = np.hstack((ind3,ind3[-1]+1)) top1 = top[ind3][:-1] base1 = base[ind3][1:] x1 = np.concatenate((x[ind3][:-1], x[ind3][::-1][:-1])) inds = np.concatenate((ind3[:-1], ind3[::-1][:-1])) if (ind3[0] == 0) & (ind3[-1] != len(top)-1): ind3 = np.hstack((ind3,ind3[-1]+1)) top1 = top[ind3][:-1] base1 = base[ind3] x1 = np.concatenate((x[ind3][:-1], x[ind3][::-1])) inds = np.concatenate((ind3[:-1], ind3[::-1])) if (ind3[0] != 0) & (ind3[-1] == len(top)-1): ind3 = np.hstack((ind3[0]-1,ind3)) top1 = top[ind3] base1 = base[ind3][1:] x1 = np.concatenate((x[ind3], x[ind3][::-1][:-1])) inds = np.concatenate((ind3, ind3[::-1][:-1])) if (ind3[0] == 0) & (ind3[-1] == len(top)-1): top1 = top[ind3] base1 = base[ind3] x1 = np.concatenate((x[ind3], x[ind3][::-1])) inds = np.concatenate((ind3, ind3[::-1])) npoints = len(top1)+len(base1) y = np.hstack((top1,base1[::-1])) vertices = np.vstack((x1,y)).T Points.append(vertices) Inds.append(inds) return Points,Inds def create_triangles(vertices): """function for creating list of triangles from vertices inputs: vertices - 2 x n array with coordinates of polygon returns: triangles - indices of the 'vertices' array that from triangles (for triangular mesh) scalars - 'fake' elevation values for each vertex of the polygon, used for coloring (relies on the base of the polygon)""" n = len(vertices[:,0]) Z1 = vertices[:,1] triangles = [] if (np.mod(n,2)==0) & (vertices[int((n-1)/2),0] != vertices[int((n-1)/2+1),0]): # if polygon is in the interior of the block triangles.append([0,1,n-1]) for i in range(1,int(n/2-1)): triangles.append([i,i+1,n-i]) triangles.append([i+1,n-i,n-i-1]) triangles.append([int(n/2-1),int(n/2),int(n/2+1)]) scalars = np.hstack((Z1[0],Z1[int(n/2):][::-1],Z1[int(n/2)+1:])) if (np.mod(n,2)==0) & (vertices[int((n-1)/2),0] == vertices[int((n-1)/2+1),0]): # if polygon touches both sides of the block for i in range(0,int(n/2-1)): triangles.append([i,i+1,n-i-1]) triangles.append([i+1,n-i-1,n-i-2]) scalars = np.hstack((Z1[int(n/2):][::-1],Z1[int(n/2):])) if np.mod(n,2)!=0: # if polygon has one segment on the side of the block if vertices[int((n-1)/2),0] == vertices[int((n-1)/2+1),0]: # if polygon touches the right side of the block triangles.append([0,1,n-1]) for i in range(1,int((n-1)/2)): triangles.append([i,i+1,n-i]) triangles.append([i+1,n-i,n-i-1]) scalars = np.hstack((Z1[0],Z1[int((n+1)/2):][::-1],Z1[int((n+1)/2):])) else: for i in range(0,int((n-1)/2)-1): # if polygon touches the left side of the block triangles.append([i,i+1,n-i-1]) triangles.append([i+1,n-i-1,n-i-2]) triangles.append([int((n-1)/2-1),int((n-1)/2),int((n-1)/2+1)]) scalars = np.hstack((Z1[int((n+1)/2)-1:][::-1],Z1[int((n+1)/2):])) return triangles, scalars def create_section(profile,dx,bottom): """function for creating a cross section from a top surface inputs: profile - elevation data for top surface dx - gridcell size bottom - elevation value for the bottom of the block returns: vertices - coordinates of vertices triangles - indices of the 'vertices' array that from triangles (for triangular mesh) """ x1 = dx*np.linspace(0, len(profile)-1, len(profile)) x = np.hstack((x1, x1[::-1])) y = np.hstack((profile, bottom*np.ones(np.shape(x1)))) vertices = np.vstack((x, y)).T n = len(x) triangles = [] for i in range(0,int((n-1)/2)): triangles.append([i,i+1,n-i-1]) triangles.append([i+1,n-i-1,n-i-2]) return vertices, triangles def plot_layers_on_one_side(layer_n, facies, color_mode, colors, X1, Y1, Z1, ve, triangles, vertices, scalars, colormap, norm, vmin, vmax, export, opacity): """function for plotting layers on one side of a block inputs: layer_n - layer number facies - 1D array of facies codes for layers color_mode - determines what kind of plot is created; can be 'property', 'time', or 'facies' colors - list of RGB values used if color_mode is 'facies' X1,Y1,Z1 - coordinates of mesh vertices ve - vertical exaggeration triangles - indices of triangles used in mesh vertices - coordinates of the vertices scalars - scalars used for coloring the mesh in 'property' mode (= z-value of the base of current layer) cmap - colormap used for layers in 'time' mode norm - color normalization function used in 'time' mode export - if equals 1, the display can be saved as a VRML file for use in other programs (e.g., 3D printing) """ if color_mode == 'time': cmap = matplotlib.cm.get_cmap(colormap) mlab.triangular_mesh(X1, Y1, ve*Z1, triangles, color = cmap(norm(layer_n))[:3], opacity = opacity) if color_mode == 'property': # color based on property map mlab.triangular_mesh(X1, Y1, ve*Z1, triangles, scalars=scalars, colormap=str(colormap), vmin=vmin, vmax=vmax, opacity = opacity) if color_mode == 'facies': mlab.triangular_mesh(X1,Y1,ve*Z1, triangles, color=tuple(colors[int(facies[0, 0, layer_n])]), opacity = opacity) def create_random_section_2_points(strat,facies,scale,ve,color_mode,colors,colormap,x1,x2,y1,y2,s1,dx,bottom,export,opacity): r, c, ts = np.shape(strat) dist = dx*((x2-x1)**2 + (y2-y1)**2)**0.5 s2 = s1*dx+dist num = int(dist/float(dx)) cmap = matplotlib.cm.get_cmap(colormap) norm = matplotlib.colors.Normalize(vmin=0.0, vmax=ts-1) Xrand, Yrand, Srand = np.linspace(x1,x2,num), np.linspace(y1,y2,num), np.linspace(s1*dx,s2,num) base = map_coordinates(strat[:,:,0], np.vstack((Yrand,Xrand))) vertices, triangles = create_section(base,dx,bottom) gray = (0.6,0.6,0.6) # color for plotting basal part of panel mlab.triangular_mesh(scale*np.hstack((dx*Xrand,dx*Xrand[::-1])),scale*np.hstack((dx*Yrand,dx*Yrand[::-1])),scale*ve*vertices[:,1],triangles,color=gray) for layer_n in trange(0,ts-1): top = map_coordinates(strat[:,:,layer_n+1], np.vstack((Yrand,Xrand))) base = map_coordinates(strat[:,:,layer_n], np.vstack((Yrand,Xrand))) if np.max(top-base)>1e-6: Points, Inds = triangulate_layers(top,base,dx) for i in range(len(Points)): vertices = Points[i] inds = Inds[i] triangles, scalars = create_triangles(vertices) X1 = scale*dx*Xrand[inds] Y1 = scale*dx*Yrand[inds] Z1 = scale*vertices[:,1] mlab.plot3d(X1,Y1,Z1*ve,color=(0,0,0),tube_radius=0.5) vmin = 0; vmax = 1 plot_layers_on_one_side(layer_n,facies,color_mode,colors,X1,Y1,Z1,ve,triangles,vertices,scalars,colormap,norm,vmin,vmax,export,opacity) def create_random_section_n_points(strat,facies,topo,scale,ve,color_mode,colors,colormap,x1,x2,y1,y2,dx,bottom,export,opacity): r, c, ts = np.shape(strat) if len(x1)==1: create_random_section_2_points(strat,facies,scale,ve,color_mode,colors,colormap,x1,x2,y1,y2,0,dx,bottom,export,opacity) else: count = 0 dx1,dy1,ds1,s1 = compute_derivatives(x1,y1) for i in range(len(x1)): create_random_section_2_points(strat,facies,scale,ve,color_mode,colors,colormap,x1[i],x2[i],y1[i],y2[i],s1[i],dx,bottom,export,opacity) count = count+1 # print("panel "+str(count)+" done, out of "+str(len(x1))+" panels") def create_random_cookie(strat,facies,topo,scale,ve,color_mode,colors,colormap,x1,x2,y1,y2,dx,bottom,export,opacity): r, c, ts = np.shape(strat) count = 0 dx1,dy1,ds1,s1 = compute_derivatives(x1,y1) for i in range(len(x1)): create_random_section_2_points(strat,facies,scale,ve,color_mode,colors,colormap,x1[i],x2[i],y1[i],y2[i],s1[i],dx,bottom,export,opacity) count = count+1 # print("panel "+str(count)+" done, out of "+str(len(x1)+1)+" panels") create_random_section_2_points(strat,facies,scale,ve,color_mode,colors,colormap,x2[-1],x1[0],y2[-1],y1[0],s1[-1]+np.sqrt((x1[0]-x2[-1])**2+(y1[0]-y2[-1])**2),dx,bottom,export,opacity) polygon = [] for i in range(len(x1)): polygon.append((x1[i]+0.5, y1[i]+0.5)) polygon.append((x2[-1]+0.5, y2[-1]+0.5)) img = Image.fromarray(np.zeros(np.shape(strat[:,:,-1]))) ImageDraw.Draw(img).polygon(polygon, outline=1, fill=1) img = np.array(img) mask = np.ones_like(strat[:,:,-1]).astype(bool) mask[img == 1] = False r,c = np.shape(strat[:,:,-1]) Y1 = scale*(np.linspace(0,r-1,r)*dx) X1 = scale*(np.linspace(0,c-1,c)*dx) topo_min = np.min(strat[:,:,-1]) topo_max = np.max(strat[:,:,-1]) mlab.surf(X1, Y1, scale*strat[:,:,-1].T, mask=mask.T, warp_scale=ve, colormap='gist_earth', vmin=scale*topo_min, vmax=scale*topo_max) def compute_derivatives(x,y): dx = np.diff(x) # first derivatives dy = np.diff(y) ds = np.sqrt(dx**2+dy**2) s = np.hstack((0,np.cumsum(ds))) return dx, dy, ds, s class LineBuilder: def __init__(self, line): self.line = line self.xs = list(line.get_xdata()) self.ys = list(line.get_ydata()) self.cid = line.figure.canvas.mpl_connect('button_press_event', self) def __call__(self, event): if event.inaxes!=self.line.axes: return self.xs.append(event.xdata) self.ys.append(event.ydata) self.line.set_data(self.xs, self.ys) self.line.figure.canvas.draw() def select_random_section(strat): fig = plt.figure(figsize=(8,6)) ax = fig.add_subplot(111) ax.imshow(strat[:,:,-1],cmap='viridis') plt.tight_layout() ax.set_title('click to build line segments') line, = ax.plot([], []) # empty line linebuilder = LineBuilder(line) xcoords = linebuilder.xs ycoords = linebuilder.ys return xcoords, ycoords def plot_strat_diagram(time, elevation, time_units, elev_units, end_time, max_elevation): fig = plt.figure(figsize=(9,6)) ax1 = fig.add_axes([0.07, 0.08, 0.85, 0.76]) # [left, bottom, width, height] ax1.set_xlabel('time (' + time_units + ')', fontsize = 12) ax1.set_ylabel('elevation (' + elev_units + ')', fontsize = 12) for tick in ax1.xaxis.get_major_ticks(): tick.label.set_fontsize(10) for tick in ax1.yaxis.get_major_ticks(): tick.label.set_fontsize(10) ax2 = fig.add_axes([0.92, 0.08, 0.05, 0.76]) ax2.set_xticks([]) ax2.set_yticks([]) ax3 = fig.add_axes([0.07, 0.84, 0.85, 0.08]) ax3.set_yticks([]) ax3.set_xticks([]) ax1.set_xlim(0, end_time) elev_range = max_elevation - np.min(elevation) ylim1 = np.min(elevation)# - 0.02 * elev_range ylim2 = max_elevation + 0.02 * elev_range ax1.set_ylim(ylim1, ylim2) ax2.set_xlim(0, 1) ax2.set_ylim(ylim1, ylim2) ax3.set_ylim(0, 1) ax3.set_xlim(0, end_time) ax4 = fig.add_axes([0.07, 0.92, 0.6, 0.08]) ax4.set_xlim(0, 10) ax4.set_ylim(0, 1) ax1.plot(time, elevation, 'xkcd:medium blue', linewidth = 3) strat = np.minimum.accumulate(elevation[::-1])[::-1] # stratigraphic 'elevation' unconf_inds = np.where(strat != elevation)[0] # indices where 'strat' curve is different from elevation inds = np.where(np.diff(unconf_inds)>1)[0] # indices where deposition starts again, after erosion inds = np.hstack((inds, len(unconf_inds)-1)) # add last index if strat[-1] - strat[-2] == 0: inds = np.hstack((inds, len(unconf_inds)-1)) if len(unconf_inds) > 0: strat_tops = strat[unconf_inds[inds]+1] # stratigraphic tops else: strat_tops = [] strat_top_ages = [] # ages of the stratigraphic tops for i in range(len(strat_tops)): # generate list of ages of stratigraphic tops strat_top_ages.append(np.min(time[strat >= strat_tops[i]])) loc_max_elev = signal.find_peaks(elevation)[0] loc_min_elev = signal.find_peaks(-elevation)[0] if elevation[-1] < elevation[-2]: loc_min_elev = np.hstack((loc_min_elev, len(elevation)-1)) if (len(loc_min_elev) > 0) & (len(loc_max_elev) > 0): if elevation[1] < elevation[0]: # add first point as a local maximum elevation if the series starts out erosionally loc_max_elev = np.hstack((0, loc_max_elev)) if loc_min_elev[0] < loc_max_elev[0]: ind = np.argmax(elevation[0 : loc_min_elev[0]]) loc_max_elev = np.sort(np.hstack((loc_max_elev, ind))) for i in range(len(loc_min_elev)-1): if len(loc_max_elev[loc_max_elev > loc_min_elev[i]]) > 0: if np.min(loc_max_elev[loc_max_elev > loc_min_elev[i]]) > loc_min_elev[i+1]: ind = np.argmax(elevation[loc_min_elev[i] : loc_min_elev[i+1]]) ind = loc_min_elev[i] + ind loc_max_elev = np.sort(np.hstack((loc_max_elev, ind))) else: ind = np.argmax(elevation[loc_min_elev[i] : loc_min_elev[i+1]]) ind = loc_min_elev[i] + ind loc_max_elev = np.sort(np.hstack((loc_max_elev, ind))) for i in range(len(loc_max_elev)-1): if len(loc_min_elev[loc_min_elev > loc_max_elev[i]]) > 0: if np.min(loc_min_elev[loc_min_elev > loc_max_elev[i]]) > loc_max_elev[i+1]: ind = np.argmin(elevation[loc_max_elev[i] : loc_max_elev[i+1]]) ind = loc_max_elev[i] + ind loc_min_elev = np.sort(np.hstack((loc_min_elev, ind))) else: ind = np.argmin(elevation[loc_max_elev[i] : loc_max_elev[i+1]]) ind = loc_max_elev[i] + ind loc_min_elev = np.sort(np.hstack((loc_min_elev, ind))) erosion_start_times = time[loc_max_elev] # times when erosion starts erosion_end_times = time[loc_min_elev] # times when erosion ends erosion_start_elevations = elevation[loc_max_elev] # elevations when erosion starts erosion_end_elevations = elevation[loc_min_elev] # elevations when erosion ends if (len(loc_min_elev) > 0) & (len(loc_max_elev) > 0): for i in range(len(erosion_end_times)): # plot erosional segments ax1.plot(time[loc_max_elev[i]:loc_min_elev[i]+1], elevation[loc_max_elev[i]:loc_min_elev[i]+1], 'xkcd:red', linewidth=3) if len(erosion_start_times) > len(erosion_end_times): # plot last erosional segment (if needed) ax1.plot(time[loc_max_elev[-1]:], elevation[loc_max_elev[-1]:], 'xkcd:red', linewidth=3) strat_top_labels = ['s' for strat_top in strat_tops] # labels for stratigraphic tops erosion_start_labels = ['es' for erosion_start_time in erosion_start_times] # labels for start of erosion erosion_end_labels = ['ee' for erosion_end_time in erosion_end_times] # labels for end of erosion time_bounds = np.hstack((strat_top_ages, erosion_start_times, erosion_end_times)) # all time boundaries sort_inds = np.argsort(time_bounds) # indices for sorting time_bounds = time_bounds[sort_inds] # sort time boundaries elevation_bounds = np.hstack((strat_tops, erosion_start_elevations, erosion_end_elevations)) # all elevation boundaries elevation_bounds = elevation_bounds[sort_inds] # sort elevation boundaries bound_labels = np.hstack((strat_top_labels, erosion_start_labels, erosion_end_labels)) # all boundary labels bound_labels = bound_labels[sort_inds] # sort boundary labels time_bounds = np.hstack((time[0], time_bounds, time[-1])) # add first and last time step to time boundaries elevation_bounds = np.hstack((elevation[0], elevation_bounds, elevation[-1])) # add first and last elevation values if elevation[-1] - elevation[-2] < 0: # add first and last boundary labels bound_labels = np.hstack(('s', bound_labels, 'ee')) else: bound_labels = np.hstack(('s', bound_labels, 's')) inds = [] for i in range(len(bound_labels)-1): if (bound_labels[i] == 'es') & (bound_labels[i+1] == 's'): inds.append(i) if len(inds)>0: for i in range(len(inds)): bound_labels[inds[i]] = 's' bound_labels[inds[i]+1] = 'es' time_labels = [] for i in range(len(time_bounds)-1): # plot chronostratigraphic units x = [time_bounds[i], time_bounds[i+1], time_bounds[i+1], time_bounds[i]] y = [0, 0, 1, 1] if (bound_labels[i] == 's') and (bound_labels[i+1] == 'es'): # vacuity ax3.fill(x, y, facecolor='xkcd:light grey', edgecolor='k') time_labels.append('v') elif (bound_labels[i] == 'ee') and (bound_labels[i+1] == 'es'): # vacuity ax3.fill(x, y, facecolor='xkcd:light grey', edgecolor='k') time_labels.append('v') elif (bound_labels[i] == 'es') and (bound_labels[i+1] == 'ee'): # erosion ax3.fill(x, y, facecolor='xkcd:red', edgecolor='k') time_labels.append('e') elif (bound_labels[i] == 's') and (bound_labels[i+1] == 'ee'): # erosion time_labels.append('e') elif (bound_labels[i] == 'ee') and (bound_labels[i+1] == 's'): # deposition ax3.fill(x, y, facecolor='xkcd:medium blue', edgecolor='k') time_labels.append('d') elif (bound_labels[i] == 's') and (bound_labels[i+1] == 's'): # deposition ax3.fill(x, y, facecolor='xkcd:medium blue', edgecolor='k') time_labels.append('d') ax1.plot([time_bounds[i], time_bounds[i]], [elevation_bounds[i], max_elevation + 0.02 * elev_range], 'k--', linewidth=0.5) for i in range(len(strat_tops)): ax2.plot([0, 1], [strat_tops[i], strat_tops[i]], color = 'xkcd:red', linewidth = 3) if len(strat_tops) > 0: if elevation[0] < np.min(strat_tops): strat_tops = np.hstack((elevation[0], strat_tops, elevation[-1])) strat_top_ages = np.hstack((0, strat_top_ages, time[-1])) else: strat_tops = np.hstack((strat_tops, elevation[-1])) strat_top_ages = np.hstack((strat_top_ages, time[-1])) else: strat_tops = np.hstack((elevation[0], strat_tops, elevation[-1])) strat_top_ages = np.hstack((0, strat_top_ages, time[-1])) for i in range(len(strat_tops)-1): # plot stratigraphic units x = [0, 1, 1, 0] y = [strat_tops[i], strat_tops[i], strat_tops[i+1], strat_tops[i+1]] ax2.fill(x, y, facecolor='xkcd:medium blue', edgecolor='k') if i > 0: ax1.plot([strat_top_ages[i], end_time], [strat_tops[i], strat_tops[i]], 'k--', linewidth=0.5) times = np.diff(time_bounds) thicknesses = np.diff(elevation_bounds) deposition_time = np.sum([item[0] for item in zip(times, time_labels) if item[1] == 'd' ]) vacuity_time = np.sum([item[0] for item in zip(times, time_labels) if item[1] == 'v' ]) erosion_time = np.sum([item[0] for item in zip(times, time_labels) if item[1] == 'e' ]) deposition_thickness = np.sum([item[0] for item in zip(thicknesses, time_labels) if item[1] == 'd' ]) vacuity_thickness = np.sum([item[0] for item in zip(thicknesses, time_labels) if item[1] == 'v' ]) eroded_thickness = np.sum([item[0] for item in zip(thicknesses, time_labels) if item[1] == 'e' ]) dve_data = [deposition_time, vacuity_time, erosion_time, deposition_thickness, vacuity_thickness, eroded_thickness] y1 = 0.55 y2 = 0.15 y = [y1, y1, y2, y2] x1 = 0 x2 = 3 * deposition_time/time[-1] x = [x1, x2, x2, x1] ax4.fill(x, y, facecolor='xkcd:medium blue', edgecolor = 'k', zorder = 1000) ax4.axis('off') ax4.text(x1, y1 + 0.07, 'deposition', fontsize = 12) ax4.text(x1 + 0.05, 0.27, str(np.round(deposition_time/time[-1], 3)), fontsize = 10, color = 'w',zorder=2000) x1 = 3 x2 = x1 + 3 * erosion_time/time[-1] x = [x1, x2, x2, x1] ax4.fill(x, y, facecolor='xkcd:red', edgecolor = 'k', zorder = 1001) ax4.text(x1, y1 + 0.07, 'erosion', fontsize = 12) ax4.text(x1 + 0.05, 0.27, str(np.round(erosion_time/time[-1], 3)), fontsize = 10, color = 'w',zorder=2000) x1 = 6 x2 = x1 + 3 * vacuity_time/time[-1] x = [x1, x2, x2, x1] ax4.fill(x, y, facecolor='xkcd:light grey', edgecolor = 'k', zorder = 1002) ax4.text(x1, y1 + 0.07, 'vacuity', fontsize = 12) ax4.text(x1 + 0.05, 0.27, str(np.round(vacuity_time/time[-1], 3)), fontsize = 10, color = 'w',zorder=2000) return fig def topostrat(topo): # convert topography to stratigraphy if len(np.shape(topo)) == 2: strat = np.minimum.accumulate(topo[::-1, :], axis=0)[::-1, :] if len(np.shape(topo)) == 3: strat = np.minimum.accumulate(topo[:, :, ::-1], axis=2)[:, :, ::-1] return strat def create_wheeler_diagram(topo): """create Wheeler (chronostratigraphic) diagram from a set of topographic surfaces """ strat = topostrat(topo) # convert topography to stratigraphy wheeler = np.diff(topo, axis=2) # 'normal' Wheeler diagram wheeler_strat = np.diff(strat, axis=2) # array for Wheeler diagram with vacuity blanked out; this array will be a positive number if there is preserved depostion, zero otherwise vacuity = np.zeros(np.shape(wheeler)) # array for vacuity vacuity[(wheeler>0) & (wheeler_strat==0)] = 1 # make the 'vacuity' array 1 where there was deposition (wheeler > 0) but stratigraphy is not preserved (wheeler_strat = 0) wheeler_strat[wheeler<0] = wheeler[wheeler<0] # add erosion to 'wheeler_strat' (otherwise it would only show deposition) return strat, wheeler, wheeler_strat, vacuity def plot_model_cross_section_EW(strat, prop, facies, dx, xsec, color_mode, line_freq = 1, ve = False, map_aspect = 1, flattening_ind = False, units = 'm', list_of_colors = ['lemonchiffon', 'peru', 'sienna']): """Plots an E-W oriented cross section through a stratigraphic model :param WG: well graph :param strat: stratigraphic grid :param prop: property array :param facies: facies array :param dx: gridcell size in the x- and y directions :param xsec: index of cross section to be displayed :param color_mode: determines what kind of plot is created; can be 'property' or 'facies' :param flattening_ind: index of stratigraphic top that should be used for flattening; default is 'False' (= no flattening) :param ve: vertical exaggeration; default is 'False' :param units: units used in the model :param map_aspect: the aspect ratio of the inset map that shows the location of the cross section :param list_of_colors: list of named matplotlib colors that will be used when 'color_mode' is set to 'facies' :return fig: figure handle """ fig = plt.figure(figsize = (10, 6)) ax = fig.add_subplot(111) axin = ax.inset_axes([0.03, 0.03, 0.3, 0.3]) r,c,ts = np.shape(strat) for i in trange(0, ts-1): if flattening_ind: top = (strat[xsec, :, i] - strat[xsec, :, flattening_ind]) base = (strat[xsec, :, i+1] - strat[xsec, :, flattening_ind]) else: top = strat[xsec, :, i] base = strat[xsec, :, i+1] props = prop[xsec, :, i] faciess = facies[xsec, :, i] if np.max(base - top)>0: Points, Inds = triangulate_layers(base,top,dx) for j in range(len(Points)): vertices = Points[j] triangles, scalars = create_triangles(vertices) x = vertices[:,0] y = vertices[:,1] if color_mode == 'property': colors = props[Inds[j]] colors = np.mean(colors[np.array(triangles)], axis = 1) ax.tripcolor(x, y, triangles=triangles, facecolors = colors, cmap = 'YlOrBr_r', edgecolors = 'none', vmin = 0, vmax = 0.35) if color_mode == 'facies': colors = faciess[Inds[j]] colors = np.median(colors[np.array(triangles)], axis = 1) cmap = ListedColormap(list_of_colors) ax.tripcolor(x, y, triangles=triangles, facecolors = colors, edgecolors = 'none', cmap = cmap, vmin = 0, vmax = len(list_of_colors)) if np.mod(i, line_freq) == 0: ax.plot(np.arange(0, dx*c, dx), top, 'k', linewidth = 0.25) if i == ts-2: ax.plot(np.arange(0, dx*c, dx), base, 'k', linewidth = 0.5) ax.set_xlim(0, dx*(c-1)) if flattening_ind: ax.set_ylim(np.nanmin(strat[:,:,0] - strat[:, :, flattening_ind]), np.nanmax(strat[:,:,-1] - strat[:, :, flattening_ind])) else: ax.set_ylim(np.nanmin(strat), np.nanmax(strat)) ax.set_xlabel('distance (' + units + ')') ax.set_ylabel('depth (' + units + ')') axin.imshow(strat[:, :, -1], cmap='viridis', aspect = map_aspect) axin.set_xticks([]) axin.set_yticks([]) axin.plot([0, c-1], [xsec, xsec], 'k') # axin.set_aspect('equal') if ve: ax.set_aspect(ve, adjustable='datalim') # plt.tight_layout() return fig def plot_model_cross_section_NS(strat, prop, facies, dx, xsec, color_mode, line_freq = 1, ve = False, flattening_ind = False, units = 'm', map_aspect = 1, list_of_colors = ['lemonchiffon', 'peru', 'sienna']): """Plots an E-W oriented cross section through a stratigraphic model :param WG: well graph :param strat: stratigraphic grid :param prop: property array :param facies: facies array :param dx: gridcell size in the x- and y directions :param xsec: index of cross section to be displayed :param color_mode: determines what kind of plot is created; can be 'property' or 'facies' :param flattening_ind: index of stratigraphic top that should be used for flattening; default is 'False' (= no flattening) :param units: units used in the model :param map_aspect: the aspect ratio of the inset map that shows the location of the cross section :param list_of_colors: list of named matplotlib colors that will be used when 'color_mode' is set to 'facies' :return fig: figure handle """ fig = plt.figure(figsize = (10, 6)) ax = fig.add_subplot(111) axin = ax.inset_axes([0.03, 0.03, 0.3, 0.3]) r,c,ts = np.shape(strat) for i in trange(0, ts-1): if flattening_ind: top = (strat[:, xsec, i] - strat[:, xsec, flattening_ind]) base = (strat[:, xsec, i+1] - strat[:, xsec, flattening_ind]) else: top = strat[:, xsec, i] base = strat[:, xsec, i+1] props = prop[:, xsec, i] faciess = facies[:, xsec, i] if np.max(base - top)>0: Points, Inds = triangulate_layers(base,top,dx) for j in range(len(Points)): vertices = Points[j] triangles, scalars = create_triangles(vertices) x = vertices[:,0] y = vertices[:,1] if color_mode == 'property': colors = props[Inds[j]] colors = np.mean(colors[np.array(triangles)], axis = 1) ax.tripcolor(x, y, triangles=triangles, facecolors = colors, cmap = 'YlOrBr_r', edgecolors = 'none', vmin = 0, vmax = 0.35) if color_mode == 'facies': colors = faciess[Inds[j]] colors = np.median(colors[np.array(triangles)], axis = 1) cmap = ListedColormap(list_of_colors) ax.tripcolor(x, y, triangles=triangles, facecolors = colors, edgecolors = 'none', cmap = cmap, vmin = 0, vmax = len(list_of_colors)) if np.mod(i, line_freq) == 0: ax.plot(np.arange(0, dx*r, dx), top, 'k', linewidth = 0.5) if i == ts-2: ax.plot(np.arange(0, dx*r, dx), base, 'k', linewidth = 0.5) ax.set_xlim(0, dx*(r-1)) if flattening_ind: ax.set_ylim(np.nanmin(strat[:,:,0] - strat[:, :, flattening_ind]), np.nanmax(strat[:,:,-1] - strat[:, :, flattening_ind])) else: ax.set_ylim(np.nanmin(strat), np.nanmax(strat)) ax.set_xlabel('distance (' + units + ')') ax.set_ylabel('depth (' + units + ')') axin.imshow(strat[:, :, -1], cmap='viridis', aspect = map_aspect) axin.set_xticks([]) axin.set_yticks([]) axin.plot([xsec, xsec], [0, r-1], 'k') # plt.tight_layout() if ve: ax.set_aspect(ve, adjustable='datalim') return fig def resample_elevation_spl(time, elevation, sampling_rate): spl = interpolate.splrep(time, elevation, s=0.5) time_new = np.arange(time[0], time[-1]+1, sampling_rate) elevation_new = interpolate.splev(time_new, spl) return time_new, elevation_new def resample_elevation_int1d(time, elevation, sampling_rate): f = interpolate.interp1d(time, elevation) time_new = np.arange(time[0], time[-1]+1, sampling_rate) elevation_new = f(time_new) return time_new, elevation_new
zsylvester/stratigraph
stratigraph/stratigraph.py
stratigraph.py
py
50,938
python
en
code
8
github-code
6
[ { "api_name": "numpy.shape", "line_number": 24, "usage_type": "call" }, { "api_name": "numpy.max", "line_number": 27, "usage_type": "call" }, { "api_name": "numpy.linspace", "line_number": 36, "usage_type": "call" }, { "api_name": "numpy.linspace", "line_numbe...
11557761416
import sys from classes import * import pprint import os # system keword 에 대한 dfa를 자동생성한다. def make_system_dfa(name, keyword): digit = "1234567890" char = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" length = len(keyword) dfa = Dfa(name) dfa.set_final_states([length]) for i in range(0, length): dfa.add_rule(i,i+1, keyword[i]) return dfa # 한 글자를 판단하기 위한 dfa를 작성한다. def make_single_dfa(name, char): dfa = Dfa(name) dfa.set_final_states([1]) dfa.add_rule(0, 1, char) return dfa # dfa 작성 코드가 너무 길어서 별도의 함수로 분리. def set_dfa(token_scanner): nz = "123456789" digit = "1234567890" char = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" # system keyword (Keyword stmt) token_scanner.add_dfa(make_system_dfa(Token.IF, "if")) token_scanner.add_dfa(make_system_dfa(Token.ELSE, "else")) token_scanner.add_dfa(make_system_dfa(Token.WHILE, "while")) token_scanner.add_dfa(make_system_dfa(Token.FOR, "for")) token_scanner.add_dfa(make_system_dfa(Token.RETURN, "return")) # system keyword (Vtype) vtype_dfa = Dfa(Token.V_TYPE) vtype_dfa.set_final_states([3, 7, 11, 16]) vtype_dfa.add_rule(0, 1, "i") vtype_dfa.add_rule(1, 2, "n") vtype_dfa.add_rule(2, 3, "t") vtype_dfa.add_rule(0, 4, "c") vtype_dfa.add_rule(4, 5, "h") vtype_dfa.add_rule(5, 6, "a") vtype_dfa.add_rule(6, 7, "r") vtype_dfa.add_rule(0, 12, "f") vtype_dfa.add_rule(12, 13, "l") vtype_dfa.add_rule(13, 14, "o") vtype_dfa.add_rule(14, 15, "a") vtype_dfa.add_rule(15, 16, "t") token_scanner.add_dfa(vtype_dfa) # arthimatic operators addsub_dfa = Dfa(Token.ADDSUB) addsub_dfa.set_final_states([1]) addsub_dfa.add_rule(0, 1, "-") addsub_dfa.add_rule(0, 1, "+") token_scanner.add_dfa(addsub_dfa) multdiv_dfa = Dfa(Token.MULTDIV) multdiv_dfa.set_final_states([1]) multdiv_dfa.add_rule(0, 1, "*") multdiv_dfa.add_rule(0, 1, "/") token_scanner.add_dfa(multdiv_dfa) # comparison operators comp_dfa = Dfa(Token.COMP) comp_dfa.set_final_states([1, 2, 3, 4, 6, 8]) comp_dfa.add_rule(0, 1, "<") comp_dfa.add_rule(1, 2, "=") comp_dfa.add_rule(0, 3, ">") comp_dfa.add_rule(3, 4, "=") comp_dfa.add_rule(0, 5, "=") comp_dfa.add_rule(5, 6, "=") comp_dfa.add_rule(0, 7, "!") comp_dfa.add_rule(7, 8, "=") token_scanner.add_dfa(comp_dfa) # whites space ws_dfa = Dfa(Token.WHITE_SPACE) ws_dfa.set_final_states([1]) ws_dfa.add_rule(0, 1, "\t") ws_dfa.add_rule(0, 1, "\n") ws_dfa.add_rule(0, 1, " ") ws_dfa.add_rule(1, 1, "\t") ws_dfa.add_rule(1, 1, "\n") ws_dfa.add_rule(1, 1, " ") token_scanner.add_dfa(ws_dfa) #assign token_scanner.add_dfa(make_single_dfa(Token.ASSIGN, "=")) #semicolon semi_dfa = Dfa(Token.SEMI) semi_dfa.set_final_states([1]) semi_dfa.add_rule(0, 1, ";") token_scanner.add_dfa(semi_dfa) # brackets token_scanner.add_dfa(make_single_dfa(Token.L_PAREN, "(")) token_scanner.add_dfa(make_single_dfa(Token.R_PAREN, ")")) token_scanner.add_dfa(make_single_dfa(Token.L_BRACE, "{")) token_scanner.add_dfa(make_single_dfa(Token.R_BRACE, "}")) # comma token_scanner.add_dfa(make_single_dfa(Token.COMMA, ",")) # integer integer_dfa = Dfa(Token.NUM) integer_dfa.set_final_states([1, 3]) integer_dfa.add_rule(0, 1, "0") integer_dfa.add_rule(0, 2, "-") integer_dfa.add_rule(0, 3, nz) integer_dfa.add_rule(2, 3, nz) integer_dfa.add_rule(3, 3, digit) token_scanner.add_dfa(integer_dfa) # literal literal_dfa = Dfa(Token.LITERAL) literal_dfa.set_final_states([2]) literal_dfa.add_rule(0, 1, "\"") literal_dfa.add_rule(1, 1, digit) literal_dfa.add_rule(1, 1, char) literal_dfa.add_rule(1, 1, " ") literal_dfa.add_rule(1, 2, "\"") token_scanner.add_dfa(literal_dfa) #float float_dfa = Dfa(Token.FLOAT) float_dfa.set_final_states([5]) float_dfa.add_rule(0, 1, "-") float_dfa.add_rule(0, 2, nz) float_dfa.add_rule(0, 3, "0") float_dfa.add_rule(1, 2, nz) float_dfa.add_rule(1, 3, "0") float_dfa.add_rule(2, 2, digit) float_dfa.add_rule(2, 4, ".") float_dfa.add_rule(3, 4, ".") float_dfa.add_rule(4, 5, digit) float_dfa.add_rule(5, 5, nz) float_dfa.add_rule(5, 6, "0") float_dfa.add_rule(6, 5, nz) float_dfa.add_rule(6, 6, "0") token_scanner.add_dfa(float_dfa) #id id_dfa = Dfa(Token.ID) id_dfa.set_final_states([1]) id_dfa.add_rule(0, 1, char) id_dfa.add_rule(0, 1, "_") id_dfa.add_rule(1, 1, char) id_dfa.add_rule(1, 1, "_") id_dfa.add_rule(1, 1, digit) token_scanner.add_dfa(id_dfa) def main(file_path): with open(file_path, mode="r") as f: literal_list = f.read() # print(literal_list) # 읽어온 Source Code를 Parsing 하기 위해 Token Scanner에 전달합니다. token_scanner = TokenScanner(literal_list) # Token을 인식하기 위한 dfa를 생성해 token_scanner에 전달해준다. set_dfa(token_scanner) # Parse 된 Token을 저장하기 위한 list를 생성한다. token_list = [] while True: # Token 한개를 Parsing 해본다. ret = token_scanner.parse_token() # ret이 None이다 -> 파싱 실패 또는 파싱 종료. filename, file_extension = os.path.splitext(file_path) new_filename = f"{filename}.out" if ret is None: if token_scanner.parse_end() is True: # print("성공") # 성공했을 때의 출력 # pprint.pprint(token_list) with open(new_filename, "w") as f: import json f.write(json.dumps({"body": token_list, "original": literal_list})) # f.writelines(map(lambda t: f"{t}\n", token_list)) else: end_pos = token_scanner.start_pos all_lines = literal_list[0:end_pos + 1] line_number = len(all_lines.splitlines()) literal_list_lines = literal_list.splitlines(keepends=True) print(literal_list_lines, literal_list_lines[0:line_number]) length_line_before = len(''.join(literal_list_lines[0:line_number - 1])) print(length_line_before) local_pos = end_pos - length_line_before + 1 print(f"local_pos {local_pos} = end_pos {end_pos} - {length_line_before} + 1") str = "" str = str + f"error at line number {line_number}, column {local_pos}.\n\n" original_line = literal_list_lines[line_number - 1] str = str + f"{original_line}\n" print(str) with open(new_filename, "w") as f: f.write(str) pass break token_list.append(ret) if len(token_list) > 1 \ and (token_list[-1][0] in [Token.NUM, Token.FLOAT] and "-" in token_list[-1][1]): # print(1) # 그 이전에 Number 가 바로 나오면 쪼갠다 # 그렇지 않으면 유지 finding_token = None for i in range(len(token_list) - 1, 0, -1): i = i - 1 # range 반복 값 보정. # 블랭크는 제외하고 찾는다. if token_list[i][0] == Token.WHITE_SPACE: continue finding_token = token_list[i] break if (finding_token is not None) and finding_token[0] in [Token.NUM, Token.FLOAT]: # print(f"split {token_list[-1]}") token_list[-1] = (token_list[-1][0], token_list[-1][1].replace("-", "")) token_list.insert(-1, (Token.ADDSUB, "-")) if __name__ == "__main__": if(len(sys.argv) < 2): print("plaese pass file path") sys.exit() file_path = sys.argv[1] print("File path : " + file_path) main(file_path)
pula39/compiler_assignment1
lexical.py
lexical.py
py
8,178
python
en
code
0
github-code
6
[ { "api_name": "os.path.splitext", "line_number": 184, "usage_type": "call" }, { "api_name": "os.path", "line_number": 184, "usage_type": "attribute" }, { "api_name": "json.dumps", "line_number": 193, "usage_type": "call" }, { "api_name": "sys.argv", "line_numb...
1999311786
import os from enum import Enum, auto from random import randint import pygame class Main: @staticmethod def start(): pygame.font.init() os.environ['SDL_VIDEO_WINDOW_POS'] = "%d,%d" % (400, 100) surface = pygame.display.set_mode((1200, 900)) pygame.display.set_caption('Minesweeper') state = States.running player = Player() grid = Grid(player) running = True clock = pygame.time.Clock() while running: clock.tick(30) for event in pygame.event.get(): if event.type == pygame.QUIT: running = False if event.type == pygame.MOUSEBUTTONDOWN and state == States.running: if pygame.mouse.get_pressed()[0]: pos = pygame.mouse.get_pos() grid.click(pos[0], pos[1]) elif pygame.mouse.get_pressed()[2]: pos = pygame.mouse.get_pos() grid.mark_mine(pos[0] // 30, pos[1] // 30) if grid.check_if_win(): state = States.win if event.type == pygame.KEYDOWN: if event.key == pygame.K_SPACE and (state == States.game_over or state == States.win): grid.reload() state = States.running if event.key == pygame.K_b: grid.show_mines() surface.fill((0, 0, 0)) if player.get_health() == 0: state = States.game_over if state == States.game_over: Stats.draw(surface, 'Game over!', (970, 350)) Stats.draw(surface, 'Press Space to restart', (920, 400)) elif state == States.win: Stats.draw(surface, 'You win!', (1000, 350)) Stats.draw(surface, 'Press Space to restart', (920, 400)) grid.draw(surface) Stats.draw(surface, 'Lives remaining', (950, 100)) Stats.draw(surface, str(player.get_health()), (1020, 200)) pygame.display.flip() class States(Enum): running = auto() game_over = auto() win = auto() class Player: def __init__(self): self.health = 5 def sub_health(self): self.health -= 1 def get_health(self): return self.health class Stats: @staticmethod def draw(surface, label, pos): textsurface = pygame.font.SysFont('Comic Sans MS', 24).render(label, False, (255, 255, 255)) surface.blit(textsurface, (pos[0], pos[1])) class Cell: def __init__(self, pos, random_mine): self.visible = False self.mine = random_mine self.show_mine = False self.size = 30 self.color = (200, 200, 200) self.pos = pos self.label = False self.mine_counter = 0 self.font_color = (0, 0, 0) self.marked = False self.explosion = False self.img_flag = pygame.image.load('../resources/minesweeper/cell-flagged.png') self.img_flag = pygame.transform.scale(self.img_flag, (self.size, self.size)) self.img_explode = pygame.image.load('../resources/minesweeper/mine-exploded.png') self.img_explode = pygame.transform.scale(self.img_explode, (self.size, self.size)) self.img_mine = pygame.image.load('../resources/minesweeper/mine.png') self.img_mine = pygame.transform.scale(self.img_mine, (self.size, self.size)) self.img_cell = [] for i in range(9): _img = pygame.image.load(f'../resources/minesweeper/cell-{i}.png') _img = pygame.transform.scale(_img, (self.size, self.size)) self.img_cell.append(_img) def draw(self, surface): if self.visible and not self.label and not (self.show_mine and self.mine): surface.blit(self.img_cell[0], (self.pos[0], self.pos[1])) elif self.label: self.show_label(surface, self.mine_counter, self.pos) elif self.marked: surface.blit(self.img_flag, (self.pos[0], self.pos[1])) elif self.show_mine and self.mine: surface.blit(self.img_mine, (self.pos[0], self.pos[1])) elif self.explosion: surface.blit(self.img_explode, (self.pos[0], self.pos[1])) else: pygame.draw.rect(surface, (50, 50, 50), (self.pos[0], self.pos[1], self.size, self.size)) def show_label(self, surface, label, pos): # textsurface = pygame.font.SysFont('Comic Sans MS', 18).render(label, False, self.font_color) # surface.blit(textsurface, (pos[0] + 10, pos[1] + 4)) surface.blit(self.img_cell[int(label)], (pos[0], pos[1])) class Grid: def __init__(self, player): self.player = player self.cells = [] self.search_dirs = [(0, -1), (-1, -1), (-1, 0), (-1, 1), (0, 1), (1, 1), (1, 0), (1, -1)] for y in range(30): self.cells.append([]) for x in range(30): self.cells[y].append(Cell((x * 30, y * 30), self.random_mines())) self.lines = [] for y in range(1, 31, 1): temp = [] temp.append((0, y * 30)) temp.append((900, y * 30)) self.lines.append(temp) for x in range(1, 31, 1): temp = [] temp.append((x * 30, 0)) temp.append((x * 30, 900)) self.lines.append(temp) def random_mines(self): r = randint(0, 10) if r > 9: return True else: return False def draw(self, surface): for row in self.cells: for cell in row: cell.draw(surface) for line in self.lines: pygame.draw.line(surface, (0, 125, 0), line[0], line[1]) def is_within_bounds(self, x, y): return x >= 0 and x < 30 and y >= 0 and y < 30 def search(self, x, y): if not self.is_within_bounds(x, y): return cell = self.cells[y][x] if cell.visible: return if cell.mine: cell.explosion = True self.player.sub_health() return cell.visible = True num_mines = self.num_of_mines(x, y) if num_mines > 0: cell.label = True cell.mine_counter = str(num_mines) return for xx, yy in self.search_dirs: self.search(x + xx, y + yy) def num_of_mines(self, x, y): counter = 0 for xx, yy in self.search_dirs: if self.is_within_bounds(x + xx, y + yy) and self.cells[y + yy][x + xx].mine: counter += 1 return counter def click(self, x, y): grid_x, grid_y = x // 30, y // 30 self.search(grid_x, grid_y) def reload(self): self.player.health = 5 for row in self.cells: for cell in row: cell.visible = False cell.label = False cell.marked = False cell.show_mine = False cell.explosion = False cell.mine = self.random_mines() def check_if_win(self): if self.player.health < 1: return False for row in self.cells: for cell in row: if not cell.visible and not cell.mine: return False return True def show_mines(self): for row in self.cells: for cell in row: if not cell.show_mine: cell.show_mine = True else: cell.show_mine = False def mark_mine(self, x, y): self.cells[y][x].marked = True if __name__ == "__main__": Main.start()
MaximCosta/messy-pypi
messy_pypi/done/main_minesweeper.py
main_minesweeper.py
py
7,807
python
en
code
2
github-code
6
[ { "api_name": "pygame.font.init", "line_number": 11, "usage_type": "call" }, { "api_name": "pygame.font", "line_number": 11, "usage_type": "attribute" }, { "api_name": "os.environ", "line_number": 12, "usage_type": "attribute" }, { "api_name": "pygame.display.set_...
34098196022
import uvicorn from pyroute2 import IPRoute from fastapi import FastAPI ipr = IPRoute() ipr.bind() app = FastAPI() @app.get("/iface/{iface_name}") async def iface_id(iface_name): with IPRoute() as ipr: iface = ipr.link_lookup(ifname=iface_name) return {"iface": iface[0]}
andreagarbugli/iaac-tc-quic
tc-daemon/router.py
router.py
py
293
python
en
code
0
github-code
36
[ { "api_name": "pyroute2.IPRoute", "line_number": 5, "usage_type": "call" }, { "api_name": "fastapi.FastAPI", "line_number": 9, "usage_type": "call" }, { "api_name": "pyroute2.IPRoute", "line_number": 14, "usage_type": "call" } ]
33040837881
import io import struct from typing import Any, BinaryIO class StructStream(int): PACK = "" """ Create a class that can parse and stream itself based on a struct.pack template string. """ def __new__(cls: Any, value: int): value = int(value) try: v1 = struct.unpack(cls.PACK, struct.pack(cls.PACK, value))[0] if value != v1: raise ValueError(f"Value {value} does not fit into {cls.__name__}") except Exception: bits = struct.calcsize(cls.PACK) * 8 raise ValueError( f"Value {value} of size {value.bit_length()} does not fit into " f"{cls.__name__} of size {bits}" ) return int.__new__(cls, value) # type: ignore @classmethod def parse(cls: Any, f: BinaryIO) -> Any: bytes_to_read = struct.calcsize(cls.PACK) read_bytes = f.read(bytes_to_read) assert read_bytes is not None and len(read_bytes) == bytes_to_read return cls(*struct.unpack(cls.PACK, read_bytes)) def stream(self, f): f.write(struct.pack(self.PACK, self)) @classmethod def from_bytes(cls: Any, blob: bytes) -> Any: # type: ignore f = io.BytesIO(blob) result = cls.parse(f) assert f.read() == b"" return result def __bytes__(self: Any) -> bytes: f = io.BytesIO() self.stream(f) return bytes(f.getvalue())
snight1983/chia-rosechain
chia/util/struct_stream.py
struct_stream.py
py
1,440
python
en
code
369
github-code
36
[ { "api_name": "typing.Any", "line_number": 13, "usage_type": "name" }, { "api_name": "struct.unpack", "line_number": 16, "usage_type": "call" }, { "api_name": "struct.pack", "line_number": 16, "usage_type": "call" }, { "api_name": "struct.calcsize", "line_numb...
6198323090
''' 1.입력받은 문자의 길이별 + 문자별 조건식? 2.BruteForce니까 로직만 짜서 1씩 증가? <- 이래도 되는게 가장 큰 수 해봐야 5^5임 시간초과는 안날걸 2번으로 진행해보도록 하고, 로직은 어떻게 하느냐가 문제일듯 ''' from itertools import product def solution(word): answer = [] for i in range(1,6): for v in product(["A","E","I","O","U"],repeat = i): answer.append("".join(v)) answer.sort() return answer.index(word)+1
byeong-chang/Baekjoon-programmers
프로그래머스/lv2/84512. 모음 사전/모음 사전.py
모음 사전.py
py
518
python
ko
code
2
github-code
36
[ { "api_name": "itertools.product", "line_number": 10, "usage_type": "call" } ]
14007737341
import torch import torch.nn as nn import torch.nn.functional as F import torchvision from torchvision import datasets, transforms from torchvision.utils import save_image import matplotlib.pyplot as plt import numpy as np import random class AutoEncoderNet(torch.nn.Module): def __init__(self, n_channels, dim_last_layer, latent_features): super(AutoEncoderNet, self).__init__() n_flatten = torch.prod(torch.tensor(dim_last_layer)) self.encoder = nn.Sequential( nn.Conv2d(n_channels, 16, 5), nn.ReLU(), nn.Conv2d(16, 32, 5,stride=2), nn.BatchNorm2d(32), nn.ReLU(), nn.Conv2d(32, 64, 5,stride=2), nn.BatchNorm2d(64), nn.ReLU(), nn.Conv2d(64, 64, 5,stride=2), nn.BatchNorm2d(64), nn.ReLU(), nn.Flatten(start_dim=1), nn.Linear(n_flatten, 512), nn.ReLU(), nn.Linear(512, latent_features) ) self.decoder = nn.Sequential( nn.Linear(latent_features, n_flatten), nn.ReLU(), nn.Unflatten(1,dim_last_layer), nn.ConvTranspose2d(64, 64, 5,stride=2,output_padding=1), nn.BatchNorm2d(64), nn.ReLU(), nn.ConvTranspose2d(64, 32, 5,stride=2,output_padding=1), nn.BatchNorm2d(32), nn.ReLU(), nn.ConvTranspose2d(32, 16, 5,stride=2,output_padding=1), nn.BatchNorm2d(16), nn.ReLU(), nn.ConvTranspose2d(16, n_channels, 5), nn.Sigmoid() ) def forward(self, x): latent_space = self.encoder(x) x_reconstruction = self.decoder(latent_space) return latent_space, x_reconstruction
s183920/02582_Computational_Data_Analysis_Case2
autoencoder/ae.py
ae.py
py
1,937
python
en
code
0
github-code
36
[ { "api_name": "torch.nn", "line_number": 11, "usage_type": "attribute" }, { "api_name": "torch.prod", "line_number": 15, "usage_type": "call" }, { "api_name": "torch.tensor", "line_number": 15, "usage_type": "call" }, { "api_name": "torch.nn.Sequential", "line...
39908117264
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient import logging import time import argparse import json import ast AllowedActions = ['both', 'publish', 'subscribe'] file_path = "../History.log" faults = [] fault_type = "" # Read in command-line parameters parser = argparse.ArgumentParser() parser.add_argument("-e", "--endpoint", action="store", required=True, dest="host", help="a28yobe9j1e4my-ats.iot.us-east-2.amazonaws.com") parser.add_argument("-r", "--rootCA", action="store", required=True, dest="rootCAPath", help="x509root.crt") parser.add_argument("-c", "--cert", action="store", dest="certificatePath", help="f9f5eadeff-certificate.pem.crt") parser.add_argument("-k", "--key", action="store", dest="privateKeyPath", help="f9f5eadeff-private.pem.key") parser.add_argument("-p", "--port", action="store", dest="port", type=int, help="Port number override") parser.add_argument("-w", "--websocket", action="store_true", dest="useWebsocket", default=False, help="Use MQTT over WebSocket") parser.add_argument("-id", "--clientId", action="store", dest="clientId", default="basicPubSub", help="Targeted client id") parser.add_argument("-t", "--topic", action="store", dest="topic", default="topic/getData", help="Targeted topic") parser.add_argument("-m", "--mode", action="store", dest="mode", default="both", help="Operation modes: %s"%str(AllowedActions)) parser.add_argument("-M", "--message", action="store", dest="message", default="Hello World!", help="Message to publish") args = parser.parse_args() host = args.host rootCAPath = args.rootCAPath certificatePath = args.certificatePath privateKeyPath = args.privateKeyPath port = args.port useWebsocket = args.useWebsocket clientId = args.clientId topic = args.topic if args.mode not in AllowedActions: parser.error("Unknown --mode option %s. Must be one of %s" % (args.mode, str(AllowedActions))) exit(2) if args.useWebsocket and args.certificatePath and args.privateKeyPath: parser.error("X.509 cert authentication and WebSocket are mutual exclusive. Please pick one.") exit(2) if not args.useWebsocket and (not args.certificatePath or not args.privateKeyPath): parser.error("Missing credentials for authentication.") exit(2) # Port defaults if args.useWebsocket and not args.port: # When no port override for WebSocket, default to 443 port = 443 if not args.useWebsocket and not args.port: # When no port override for non-WebSocket, default to 8883 port = 8883 # Configure logging logger = logging.getLogger("AWSIoTPythonSDK.core") logger.setLevel(logging.DEBUG) streamHandler = logging.StreamHandler() formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') streamHandler.setFormatter(formatter) logger.addHandler(streamHandler) # Init AWSIoTMQTTClient myAWSIoTMQTTClient = None if useWebsocket: myAWSIoTMQTTClient = AWSIoTMQTTClient(clientId, useWebsocket=True) myAWSIoTMQTTClient.configureEndpoint(host, port) myAWSIoTMQTTClient.configureCredentials(rootCAPath) else: myAWSIoTMQTTClient = AWSIoTMQTTClient(clientId) myAWSIoTMQTTClient.configureEndpoint(host, port) myAWSIoTMQTTClient.configureCredentials(rootCAPath, privateKeyPath, certificatePath) # AWSIoTMQTTClient connection configuration myAWSIoTMQTTClient.configureAutoReconnectBackoffTime(1, 32, 20) myAWSIoTMQTTClient.configureOfflinePublishQueueing(-1) # Infinite offline Publish queueing myAWSIoTMQTTClient.configureDrainingFrequency(2) # Draining: 2 Hz myAWSIoTMQTTClient.configureConnectDisconnectTimeout(10) # 10 sec myAWSIoTMQTTClient.configureMQTTOperationTimeout(5) # 5 sec # Connect and subscribe to AWS IoT myAWSIoTMQTTClient.connect() time.sleep(5) try: with open(file_path, 'r') as file: idx = 0 while True: file.seek(0) for i in range(idx): file.readline() new_lines = file.readlines() if new_lines: for l in new_lines: idx += 1 l = l.strip() if l.startswith("-"): if len(faults) > 0: if args.mode == 'both' or args.mode == 'publish': data = json.dumps({'deviceID': "OBD-II_Dongle", "data": { 'fault': fault_type, "codes": faults}}) try: myAWSIoTMQTTClient.publish(topic, data, 1) except Exception as error: print('Error while sending data to DB: {}'.format(error)) myAWSIoTMQTTClient.connect() time.sleep(5) myAWSIoTMQTTClient.publish(topic, data, 1) if args.mode == 'publish': print('Published topic %s: %s\n' % (topic, data)) fault_type, faults = "", [] else: sidx = l.find('>') + 1 eidx = l.find(';') if l.startswith('DTC'): dtc = l[sidx:eidx].strip() faults.append(dtc) elif l.startswith('20'): fault_type = l[sidx:eidx].strip() else: time.sleep(0.5) continue except FileNotFoundError: print(f"File '{file_path}' not found.") except IOError as e: print(f"Error reading file: {e}")
ngonza27/ctp-ngv-23
src/py/send_data.py
send_data.py
py
5,270
python
en
code
0
github-code
36
[ { "api_name": "argparse.ArgumentParser", "line_number": 14, "usage_type": "call" }, { "api_name": "logging.getLogger", "line_number": 59, "usage_type": "call" }, { "api_name": "logging.DEBUG", "line_number": 60, "usage_type": "attribute" }, { "api_name": "logging....
22382283158
import shutil import tempfile from django.contrib.auth import get_user_model from django.test import Client, TestCase, override_settings from django.urls import reverse from django import forms from django.conf import settings from django.core.files.uploadedfile import SimpleUploadedFile from django.core.cache import cache from ..models import Group, Post, Follow User = get_user_model() TEST_OF_POST = 13 FIRST_OF_POSTS = 10 TEMP_MEDIA_ROOT = tempfile.mktemp(dir=settings.BASE_DIR) @override_settings(MEDIA_ROOT=TEMP_MEDIA_ROOT) class PostViewsTests(TestCase): @classmethod def setUpClass(cls): super().setUpClass() cls.user = User.objects.create(username='tes') def setUp(self): self.small_gif = ( b'\x47\x49\x46\x38\x39\x61\x02\x00' b'\x01\x00\x80\x00\x00\x00\x00\x00' b'\xFF\xFF\xFF\x21\xF9\x04\x00\x00' b'\x00\x00\x00\x2C\x00\x00\x00\x00' b'\x02\x00\x01\x00\x00\x02\x02\x0C' b'\x0A\x00\x3B' ) self.uploaded = SimpleUploadedFile( name='small.gif', content=self.small_gif, content_type='image/gif' ) self.unauthorized_client = Client() self.authorized_client = Client() self.authorized_client.force_login(self.user) self.group = Group.objects.create( id=1, title='Тестовая группа', slug='slug', description='Тестовое описание', ) self.post = Post.objects.create( author=self.user, text='Тестовая пост какойто', group=self.group, image='posts/small.gif', ) @classmethod def tearDownClass(cls): shutil.rmtree(settings.MEDIA_ROOT, ignore_errors=True) super().tearDownClass() def test_pages_uses_correct_template(self): """URL-адрес использует соответствующий шаблон.""" templates_page_names = { 'posts/index.html': reverse('posts:index'), 'posts/group_list.html': ( reverse('posts:group_list', kwargs={'slug': 'slug'}) ), 'posts/profile.html': ( reverse('posts:profile', kwargs={'username': self.user.username}) ), 'posts/post_detail.html': ( reverse('posts:post_detail', kwargs={'post_id': self.post.pk}) ), 'posts/create_post.html': reverse('posts:post_create'), } for template, reverse_name in templates_page_names.items(): with self.subTest(template=template): response = self.authorized_client.get(reverse_name) self.assertTemplateUsed(response, template) def test_index_show_correct_context(self): """Шаблон index сформирован с правильным контекстом.""" response = self.authorized_client.get(reverse('posts:index')) first_object = response.context['posts'][0] post_text_0 = first_object.text post_group_0 = first_object.group post_author_0 = first_object.author post_image_0 = first_object.image self.assertEqual(post_text_0, 'Тестовая пост какойто') self.assertEqual(post_author_0, self.user) self.assertEqual(post_group_0, self.group) self.assertEqual(post_image_0, 'posts/small.gif') def test_group_list_show_correct_context(self): """Шаблон group_list сформирован с правильным контекстом.""" response = self.authorized_client.get(reverse( 'posts:group_list', kwargs={'slug': 'slug'}) ) first_object = response.context['posts'][0] post_group_0 = first_object.group self.assertEqual(post_group_0, self.group) post_image_0 = first_object.image self.assertEqual(post_image_0, 'posts/small.gif') def test_prifile_show_correct_context(self): """Шаблон profile сформирован с правильным контекстом.""" response = self.authorized_client.get(reverse( 'posts:profile', kwargs={'username': self.user.username}) ) first_object = response.context['post_list'][0] post_author_0 = first_object.author post_image_0 = first_object.image self.assertEqual(post_author_0, self.user) self.assertEqual(post_image_0, 'posts/small.gif') def test_post_detail_show_correct_context(self): """Шаблон post_detail сформирован с правильным контекстом.""" response = self.authorized_client.get(reverse( 'posts:post_detail', kwargs={'post_id': self.post.pk}) ) first_object = response.context.get('post') post_text_0 = first_object.text post_image_0 = first_object.image self.assertEqual(post_text_0, 'Тестовая пост какойто') self.assertEqual(post_image_0, 'posts/small.gif') def test_post_detail_page_list_is_1(self): """На post_detail передаётся ожидаемое количество объектов""" response = self.authorized_client.get(reverse( 'posts:post_detail', kwargs={'post_id': self.post.pk}) ) self.assertEqual(response.context['post_count'], 1) def test_create_correct_context(self): """Шаблон create сформирован с правильным контекстом.""" response = self.authorized_client.get(reverse('posts:post_create')) form_fields = { 'text': forms.fields.CharField, 'group': forms.fields.ChoiceField, 'image': forms.ImageField, } for value, expected in form_fields.items(): with self.subTest(value=value): form_field = response.context.get('form').fields.get(value) self.assertIsInstance(form_field, expected) def test_post_added_correctly_user(self): """Пост при создании виден на странице выбранной группы, в профайле пользовател и на главной странице""" group2 = Group.objects.create(title='Тестовая группа 2', slug='test_group2') posts_count = Post.objects.filter(group=self.group).count() post = Post.objects.create( text='Тестовый пост от другого автора', author=self.user, group=group2) response_profile = self.authorized_client.get( reverse('posts:profile', kwargs={'username': f'{self.user.username}'})) response_home = self.authorized_client.get( reverse('posts:index')) group = Post.objects.filter(group=self.group).count() profile = response_profile.context['post_list'] home = response_home.context['posts'] self.assertEqual(group, posts_count) self.assertIn(post, profile) self.assertIn(post, home) def test_index_cache_context(self): """Проверка кэширования страницы index""" before_create_post = self.authorized_client.get( reverse('posts:index')) first_item_before = before_create_post.content Post.objects.create( author=self.user, text='Проверка кэша', group=self.group, image=self.uploaded ) after_create_post = self.authorized_client.get(reverse('posts:index')) first_item_after = after_create_post.content self.assertEqual(first_item_after, first_item_before) cache.clear() after_clear = self.authorized_client.get(reverse('posts:index')) self.assertNotEqual(first_item_after, after_clear) class PaginatorViewsTest(TestCase): def setUp(self): self.client = Client() self.guest_client = Client() self.user = User.objects.create_user(username='auth') self.authorized_client = Client() self.authorized_client.force_login(self.user) self.group = Group.objects.create( id=1, title='Тестовая группа', slug='slug', description='Тестовое описание', ) bilk_post: list = [] for i in range(TEST_OF_POST): bilk_post.append(Post(text=f'Тестовая, пост какойто {i}', group=self.group, author=self.user)) Post.objects.bulk_create(bilk_post) def test_correct_page_context_guest_client(self): """Проверка количества постов на первой и второй страницах.""" pages: tuple = (reverse('posts:index'), reverse('posts:profile', kwargs={'username': f'{self.user.username}'}), reverse('posts:group_list', kwargs={'slug': f'{self.group.slug}'})) for page in pages: response1 = self.client.get(page) response2 = self.client.get(page + '?page=2') count_posts1 = len(response1.context['page_obj']) count_posts2 = len(response2.context['page_obj']) self.assertEqual(count_posts1, FIRST_OF_POSTS) self.assertEqual(count_posts2, TEST_OF_POST - FIRST_OF_POSTS) class FollowViewsTest(TestCase): @classmethod def setUpClass(cls): super().setUpClass() cls.user = User.objects.create_user(username='auth1') cls.user2 = User.objects.create_user(username='auth2') cls.author = User.objects.create_user(username='someauthor') def setUp(self): self.guest_client = Client() self.authorized_client = Client() self.authorized_client.force_login(self.user) self.authorized_client2 = Client() self.authorized_client2.force_login(self.user2) def test_user_follower_authors(self): """Посты доступны пользователю, который подписался на автора. Увеличение подписок автора""" count_follow = Follow.objects.filter(user=FollowViewsTest.user).count() data_follow = {'user': FollowViewsTest.user, 'author': FollowViewsTest.author} url_redirect = reverse( 'posts:profile', kwargs={'username': FollowViewsTest.author.username}) response = self.authorized_client.post( reverse('posts:profile_follow', kwargs={ 'username': FollowViewsTest.author.username}), data=data_follow, follow=True) new_count_follow = Follow.objects.filter( user=FollowViewsTest.user).count() self.assertTrue(Follow.objects.filter( user=FollowViewsTest.user, author=FollowViewsTest.author).exists()) self.assertRedirects(response, url_redirect) self.assertEqual(count_follow + 1, new_count_follow) def test_unfollower_no_see_new_post(self): """У не подписчика поста нет""" new_post_follower = Post.objects.create( author=FollowViewsTest.author, text='Текстовый текст') Follow.objects.create(user=FollowViewsTest.user, author=FollowViewsTest.author) response_unfollower = self.authorized_client2.get( reverse('posts:follow_index')) new_post_unfollower = response_unfollower.context['page_obj'] self.assertNotIn(new_post_follower, new_post_unfollower) def test_follower_see_new_post(self): """У подписчика появляется новый пост избранного автора.""" new_post_follower = Post.objects.create( author=FollowViewsTest.author, text='Текстовый текст') Follow.objects.create(user=FollowViewsTest.user, author=FollowViewsTest.author) response_follower = self.authorized_client.get( reverse('posts:follow_index')) new_posts = response_follower.context['page_obj'] self.assertIn(new_post_follower, new_posts)
krankir/Social-network
yatube/posts/tests/test_views.py
test_views.py
py
12,656
python
en
code
0
github-code
36
[ { "api_name": "django.contrib.auth.get_user_model", "line_number": 14, "usage_type": "call" }, { "api_name": "tempfile.mktemp", "line_number": 17, "usage_type": "call" }, { "api_name": "django.conf.settings.BASE_DIR", "line_number": 17, "usage_type": "attribute" }, { ...
73507098983
from django import forms from .Config import EffectType class ChooseEffectRadioForm(forms.Form): def __init__(self, effect_type, effect_label, *args, **kwargs): super(ChooseEffectRadioForm, self).__init__(*args, **kwargs) self.fields["pref-effect"] = forms.BooleanField(label=effect_label, required=True, widget=forms.CheckboxInput(attrs={ "type": "radio", "id": effect_type, "value": effect_type })) radio_list = list() for key, value in EffectType.__members__.items(): radio_list.append(ChooseEffectRadioForm(key, value.value[0]))
gwolan/pic_convolving_website
upload_pic/src/ChooseEffectRadioForm.py
ChooseEffectRadioForm.py
py
932
python
en
code
0
github-code
36
[ { "api_name": "django.forms.Form", "line_number": 5, "usage_type": "attribute" }, { "api_name": "django.forms", "line_number": 5, "usage_type": "name" }, { "api_name": "django.forms.BooleanField", "line_number": 9, "usage_type": "call" }, { "api_name": "django.for...
73256078825
# add CBAM注意力机制模块 import torch from torch import nn class ChannelAttention(nn.Module): def __init__(self, channel, ratio=16): super(ChannelAttention, self).__init__() self.max_pool = nn.AdaptiveMaxPool2d(1) self.avg_pool = nn.AdaptiveAvgPool2d(1) self.fc = nn.Sequential( nn.Linear(channel,channel//ratio,False), nn.ReLU(), nn.Linear(channel//ratio,channel,False) ) self.sigmoid = nn.Sigmoid() def forward(self, x): b,c,h,w = x.size() max_pool_out = self.max_pool(x).view([b,c]) avg_pool_out = self.avg_pool(x).view([b,c]) max_fc_out = self.fc(max_pool_out) avg_fc_out = self.fc(avg_pool_out) out = max_fc_out + avg_fc_out out = self.sigmoid(out).view([b,c,1,1]) return out*x class SpatialAttention(nn.Module): def __init__(self, kernel_size=7): super(SpatialAttention, self).__init__() assert kernel_size in (3, 7), 'kernel size must be 3 or 7' padding = 3 if kernel_size == 7 else 1 self.conv = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False) self.sigmoid = nn.Sigmoid() def forward(self, x): b, c, h, w = x.size() max_pool_out = torch.max(x,dim=1,keepdim=True) mean_pool_out = torch.mean(x, dim=1, keepdim=True) pool_out =torch.cat([max_pool_out, mean_pool_out], dim=1) out = self.conv(pool_out) out = self.sigmoid(out) return out*x class cbam(nn.Module): def __init__(self, channel, ratio=16,kernel_size=7): super(cbam, self).__init__() self.channel_attention = ChannelAttention(channel,ratio) self.spatial_attention = SpatialAttention(kernel_size) def forward(self,x): x = self.channel_attention(x) x = self.spatial_attention(x) return x # model = cbam(512) # print(model)
DickensKP/Yolov3-vehicle-pedestrian-trafficsign-detection-system
CBAM.py
CBAM.py
py
2,002
python
en
code
4
github-code
36
[ { "api_name": "torch.nn.Module", "line_number": 5, "usage_type": "attribute" }, { "api_name": "torch.nn", "line_number": 5, "usage_type": "name" }, { "api_name": "torch.nn.AdaptiveMaxPool2d", "line_number": 8, "usage_type": "call" }, { "api_name": "torch.nn", ...
17887863275
""" 集成了流式布局、按钮排布的窗口。 """ from PySide2.QtWidgets import QScrollArea, QWidget, QToolButton, QVBoxLayout, QSpacerItem, QSizePolicy from PySide2.QtCore import Qt, QSize from typing import TYPE_CHECKING if TYPE_CHECKING: from PySide2.QtGui import QResizeEvent from widgets import PMFlowLayout class PMFlowAreaWidget(QWidget): def __init__(self): super().__init__() from widgets import PMFlowLayout self.outer_layout = QVBoxLayout() self.flow_layout = PMFlowLayout() self.setMinimumWidth(100) self.outer_layout.addLayout(self.flow_layout) spacer_v = QSpacerItem(20, 20, QSizePolicy.Minimum, QSizePolicy.Expanding) self.outer_layout.addItem(spacer_v) self.setLayout(self.outer_layout) def add_widget(self, w: 'QWidget'): self.flow_layout.add_widget(w) def setup_ui(self): if hasattr(self.widget(), 'setup_ui'): self.widget().setup_ui() def resizeEvent(self, a0: 'QResizeEvent') -> None: super().resizeEvent(a0) layout: 'PMFlowLayout' = self.flow_layout layout.on_resize() class PMFlowArea(QScrollArea): def __init__(self, parent=None): super().__init__(parent) self.flow_widget = PMFlowAreaWidget() self.widgets_list = self.flow_widget.flow_layout.widgets_list self.setWidget(self.flow_widget) self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn) self.setWidgetResizable(True) def set_layout_content_margins( self, left: int, right: int, up: int, down: int): self.flow_widget.flow_layout.setContentsMargins(left, right, up, down) def add_tool_button(self, name: str, text: str, icon_path: str = ''): from widgets import create_icon b = QToolButton() b.setText(text) icon = create_icon(icon_path) b.setIcon(icon) b.setToolButtonStyle(Qt.ToolButtonTextUnderIcon) b.setIconSize(QSize(40, 40)) b.setMaximumWidth(80) b.setMinimumWidth(80) b.setMinimumHeight(60) b.setMaximumHeight(60) self.add_widget(b) return b def add_widget(self, w: 'QWidget'): self.widget().add_widget(w) return w def setup_ui(self): if hasattr(self.widget(), 'setup_ui'): self.widget().setup_ui() if __name__ == '__main__': from PySide2.QtWidgets import QApplication, QPushButton import sys app = QApplication(sys.argv) sa = PMFlowArea() for i in range(10): w = sa.add_widget(QPushButton('ad%d' % i)) w.setMaximumHeight(60) w.setMinimumHeight(60) w.setMinimumWidth(100) w.setMaximumWidth(100) sa.show() sys.exit(app.exec_())
pyminer/pyminer
pyminer/widgets/widgets/basic/containers/flowarea.py
flowarea.py
py
2,813
python
en
code
77
github-code
36
[ { "api_name": "typing.TYPE_CHECKING", "line_number": 8, "usage_type": "name" }, { "api_name": "PySide2.QtWidgets.QWidget", "line_number": 13, "usage_type": "name" }, { "api_name": "PySide2.QtWidgets.QVBoxLayout", "line_number": 18, "usage_type": "call" }, { "api_n...
21993890104
from typing import List from unittest import TestCase import torch from torch import Tensor from attnganw import config from attnganw.randomutils import get_vector_interpolation class TestInterpolation(TestCase): def test_get_noise_interpolation(self): batch_size = 1 noise_vector_size = 3 noise_vector_start: Tensor = torch.randn(batch_size, noise_vector_size, dtype=torch.float) noise_vector_end: Tensor = torch.randn(batch_size, noise_vector_size, dtype=torch.float) config.generation['noise_interpolation_steps'] = 4 initial_interpolation: List[Tensor] = get_vector_interpolation(batch_size=batch_size, noise_vector_size=noise_vector_size, noise_vector_start=noise_vector_start, noise_vector_end=noise_vector_end, gpu_id=-1) self.assertEqual(len(initial_interpolation), config.generation['noise_interpolation_steps'] + 1) self.assertTrue(torch.equal(initial_interpolation[0], noise_vector_start)) self.assertTrue(torch.equal(initial_interpolation[-1], noise_vector_end)) config.generation['noise_interpolation_steps'] = config.generation['noise_interpolation_steps'] * 2 second_interpolation: List[Tensor] = get_vector_interpolation(batch_size=batch_size, noise_vector_size=noise_vector_size, noise_vector_start=noise_vector_start, noise_vector_end=noise_vector_end, gpu_id=-1) self.assertEqual(len(second_interpolation), config.generation['noise_interpolation_steps'] + 1) self.assertTrue(torch.equal(second_interpolation[0], noise_vector_start)) self.assertTrue(torch.equal(second_interpolation[-1], noise_vector_end)) self.assertFalse(torch.equal(second_interpolation[1], initial_interpolation[1]))
cptanalatriste/birds-of-british-empire
tests/test_train.py
test_train.py
py
2,304
python
en
code
null
github-code
36
[ { "api_name": "unittest.TestCase", "line_number": 11, "usage_type": "name" }, { "api_name": "torch.Tensor", "line_number": 17, "usage_type": "name" }, { "api_name": "torch.randn", "line_number": 17, "usage_type": "call" }, { "api_name": "torch.float", "line_nu...
33245288481
#!/usr/bin/env python import rospy import matplotlib.pyplot as plt import matplotlib.animation import numpy as np from ti_mmwave_rospkg.msg import RadarScan class MySimpleClass(object): def __init__(self): self.sub = rospy.Subscriber('/ti_mmwave/radar_scan',RadarScan,self.sub_callback) self.tmp_x = [] self.tmp_y = [] self.showflag = 0 #self.fig, ax = plt.subplots() #self.sc = ax.scatter(self.tmp_x,self.tmp_y) def sub_callback(self,msg): if msg.point_id ==0: '''if not self.showflag: self.showflag = 1 plt.show() ''' #plt.scatter(self.tmp_x,self.tmp_y) #self.sc.set_offsets(np.c_[self.tmp_x,self.tmp_y]) #self.fig.canvas.draw_idle() #plt.pause(0.1) self.tmp_x=[] self.tmp_y=[] self.tmp_x.append(msg.x) self.tmp_y.append(msg.y) def animate(i): sc.set_offsets(np.c_[my_simple_class.tmp_x,my_simple_class.tmp_y]) if __name__ =="__main__": rospy.init_node('hello') my_simple_class = MySimpleClass() #plt.show() fig, ax = plt.subplots() x, y = [],[] sc = ax.scatter(x,y) plt.xlim(0,10) plt.ylim(-10,10) ani = matplotlib.animation.FuncAnimation(fig, animate, frames=30, interval=100, repeat=True) plt.show() rospy.spin()
YiShan8787/mm-2sensor
src/micro_doppler_pkg/scripts/test3.py
test3.py
py
1,420
python
en
code
0
github-code
36
[ { "api_name": "rospy.Subscriber", "line_number": 11, "usage_type": "call" }, { "api_name": "ti_mmwave_rospkg.msg.RadarScan", "line_number": 11, "usage_type": "argument" }, { "api_name": "numpy.c_", "line_number": 36, "usage_type": "attribute" }, { "api_name": "ros...
6798077511
import logging from django.core.management.base import BaseCommand from django.core.exceptions import ObjectDoesNotExist from embed_video.backends import detect_backend from ...clients import VimeoClient from ...models import Resource from ...conf import settings logger = logging.getLogger('vimeo') class Command(BaseCommand): def exists_dev_tag(self, video_link): client = VimeoClient() is_dev = False code = detect_backend(video_link).code try: tags = client.get_video_tags(code) for tag in tags: if tag.get('name') == settings.RESOURCE_DEVELOPMENT_TAG_NAME: is_dev = True except Exception as e: logger.error('commands.import_vimeo_resources.get_video_tags.ValueError: {}'.format(e)) is_dev = True return is_dev def create_internal_messages(self): Resource.objects.create_internal_messages( resources=self.resources_updated, level=settings.RESOURCE_CH_MESSAGE_SUCCESS) Resource.objects.create_internal_messages( resources=self.resources_error, level=settings.RESOURCE_CH_MESSAGE_ERROR) def get_resources_links_deleted(self): return Resource.objects.removed().values_list('link', flat=True) def get_resources_links_dev(self): return Resource.objects.get_development_resources().values_list('link', flat=True) def get_video_status(self, video_data): return video_data.get('status') def update_or_create_resource(self, resource, video_data): try: resource, created = Resource.objects.update_or_create(**video_data) try: self.resources_updated[resource.created_by.pk] += 1 except KeyError: self.resources_updated[resource.created_by.pk] = 1 except AttributeError: pass self.stdout.write('Resource with pk {} updated'.format(resource.pk)) except Exception as e: logger.error('commands.import_vimeo_resources.update_or_create.Exception: {}'.format(e)) def set_resource_status_error(self, resource): self.stdout.write('Resource with pk {} error'.format(resource.pk)) resource.set_as_error() try: self.resources_error[resource.created_by.pk] += 1 except KeyError: self.resources_error[resource.created_by.pk] = 1 except AttributeError: pass def set_video_thumbnail(self, video_data): video_pictures = video_data.get('pictures') if video_pictures and len(video_pictures): positions = [0, 1, 2, 3] for position in positions: try: thumbnail = video_pictures.get('sizes')[position] video_data['thumbnail'] = thumbnail.get('link') except IndexError: pass return video_data def is_video_available(self, video_status): return video_status == settings.RESOURCE_PROVIDER_STATUS_AVAILABLE def is_video_error(self, video_status): return video_status == settings.RESOURCE_PROVIDER_STATUS_UPLOADING_ERROR def is_resource_deleted(self, video_link): return video_link in self.resources_deleted def is_resource_dev_local(self, video_link): return video_link in self.resources_dev_local def is_resource_dev_vimeo(self, video_link): return video_link in self.resources_dev_vimeo def handle(self, *args, **kwargs): self.stdout.write('Script init: Import vimeo videos') logger.info('Script init: Import vimeo videos') client = VimeoClient() num_pages = client.get_video_num_pages() self.resources_updated = {} self.resources_error = {} self.resources_dev_vimeo = [] self.resources_dev_local = self.get_resources_links_dev() self.resources_deleted = self.get_resources_links_deleted() for page in range(0, num_pages): videos = client.get_videos_paginated(page + 1) for video_data in videos: video_link = video_data.get('link') try: resource, _ = Resource.objects.get_or_create(link=video_link) if resource.is_draft: status = self.get_video_status(video_data) video_data = self.set_video_thumbnail(video_data) if self.exists_dev_tag(video_link): self.resources_dev_vimeo.append(video_link) if self.is_resource_deleted(video_link): continue elif (self.is_resource_dev_local(video_link) or self.is_resource_dev_vimeo(video_link)) \ and settings.UPLOAD_REAL: continue if self.is_video_available(status): self.update_or_create_resource(resource, video_data) elif self.is_video_error(status): self.set_resource_status_error(resource) except ObjectDoesNotExist: logger.error('commands.import_vimeo_resources.ObjectDoesNotExist: {}'.format(video_link)) continue self.create_internal_messages() self.stdout.write('Script finished: Import vimeo videos') logger.info('Script finished: Import vimeo videos')
tomasgarzon/exo-services
service-exo-medialibrary/resource/management/commands/import_vimeo_resources.py
import_vimeo_resources.py
py
5,577
python
en
code
0
github-code
36
[ { "api_name": "logging.getLogger", "line_number": 12, "usage_type": "call" }, { "api_name": "django.core.management.base.BaseCommand", "line_number": 15, "usage_type": "name" }, { "api_name": "clients.VimeoClient", "line_number": 18, "usage_type": "call" }, { "api...
32975944321
#Scripts for the search import mysql.connector import json class carrier(): Name = "" Email = "" Location = [] class seller(): Firstname = "" Surname = "" Email = "" Graduate = False Location = "" Products = [] class product(): menteeName = "" menteeGraduate = False menteeLocation = "" menteeEmail = "" Quantity = "" MenteeEmail = "" Matchs = False #Returns the quantity as a volume in liters def convertToLiters(quantity, units): if units == "liter": return quantity * 1.0 elif units == "milliliter": return quantity/1000.0 elif units == "gallons": return quantity*4.54609 elif units == "pints": return quantity/1.7598 def convertToKilograms(quantity, units): if units == "kilograms": return quantity*1.0 elif units == "grams": return quantity/1000.0 elif units == "tonnes": return quantity * 1000.0 elif units == "stone": return quantity*6.35029 elif units == "pounds": return quantity/2.20462 elif units == "ounces": return quantity/35.274 def search(quantity, units, keywords, endRegion, usertype): unitFamily = "" #Determine if liquid, solid or loose #Convert to corresponding standard measure(liters, kilograms, pieces) if units in ["liters", "milliliters", "gallons", "pints"]: unitFamily="liters" convertToLiters(quantity, units) elif units in ["kilograms", "grams", "tonnes", "stone", "pounds", "ounces"]: unitFamily="kilograms" convertToKilograms(quantity, units) else: unitFamily = "pieces" #function convertToLiters #function convertToKilograms #Access database cnx = mysql.connector.connect(user='root', password='cfg2014!', host='127.0.0.1', database='c4g', port='3306') cursor = cnx.cursor() #carrier query carriers = [] query = ("SELECT Name, Email, Location FROM carrier") cursor.execute(query) for (Name, Email, Location) in cursor: individual = carrier() individual.Name = Name individual.Email = Email individual.Location = split(Location, ",").trim() carriers.append(individual) #Find carriers with end region in range viableCarriers = [] for i in range(len(carriers)): for j in range(len(carriers[i].Location)): if carriers[i].Location[j].lower() == endRegion: viableCarriers.append(viableCarriers[i]) #Find sellers within the ranges of carriers #Get sellers sellers = [] query = ("SELECT Firstname, Surname, Email, Graduate, Location FROM mentee") cursor.execute(query) for Firstname, Surname, Email, Graduate, Location in cursor: individual = seller() individual.Firstname = Firstname individual.Surname = Surname individual.Email = Email individual.Graduate = Graduate individual.Location = Location #Prevents non graduated accounts from being shown if individual.Graduate == True or (usertype == "mentee" or usertype == "mentor"): #Get products of user products = [] query = ("SELECT Name, Quantity, Email FROM products WHERE MenteeEmail = '" + str(individual.Email) + "'") cursor.execute(query) # # # #HERE # # # # for Name, Quantity, Email in cursor: individualProduct = product() individualProduct.Name = Name individualProduct.MenteeEmail = Email individualProduct.Quantity = Quantity individualProduct.menteeName = (individual.Firstname + individual.Lastname) individualProduct.menteeGraduate = individual.Graduate individualProduct.menteeLocation = individual.Location products.append(individualProduct) individual.Products = products #Check for matching with keywords and their products keywordList = split(keywords, " ") for i in range[len(sellers)]: for j in range[len(sellers[i].Products)]: for k in range[len(keywordList)]: if keywordList[k] in sellers[i].Products[j].Name: sellers[i].Products[j].Matchs = True #Check for region matching that of viable carriers finalListProducts = [] for i in range[len(sellers)]: for j in range[len(sellers[i].Products)]: for k in range[len(viableCarriers)]: if sellers[i].Products[j].Matchs == True: if sellers[i].Location in viableCarriers.Location: finalListProducts.append(sellers[i].Products[k]) #Organise by quantity finalListProducts.sort() finalListProducts[::-1] #find closest to required quantity targetSum = Quantity suggestList = [] for i in range[len(finalListProducts)]: if finalListProducts[i].Quantity < targetSum: targetSum = targetSum - finalListProducts[i].Quantity suggestList.append(finalListProducts[i]) #Return as JSON #JSON suggestList #JSONfinalListProducts #iterate for i in range[len(suggestList)]: suggestDict = {'unitvalue': suggestList[i].Quantity, 'unit': unitFamily, 'keyword' : suggestList[i].Name, 'meName' : suggestList[i].menteeName, 'region' : suggestList[i].Location, 'carrier' : viableCarrier[0], 'qualitymatch' : 0} for i in range[len(finalListProducts)]: finalDict = {'unitvalue': finalListProducts[i].Quantity, 'unit': unitFamily, 'keyword' : finalListProducts[i].Name, 'meName' : finalListProducts[i].menteeName, 'region' : finalListProducts[i].Location, 'carrier' : viableCarrier[0], 'qualitymatch' : 1} JSONDICT = {'success' : 1, 'data' : [suggestDict + finalDict]} return HttpResponse(json.dump(JSONDICT))
Team-14-CodeForGood2014/Cherie-Blair-Foundation-Marketplace
Django/cbfm/searchEngine/scripts.py
scripts.py
py
6,321
python
en
code
0
github-code
36
[ { "api_name": "mysql.connector.connector.connect", "line_number": 86, "usage_type": "call" }, { "api_name": "mysql.connector.connector", "line_number": 86, "usage_type": "attribute" }, { "api_name": "mysql.connector", "line_number": 86, "usage_type": "name" }, { "...
11352228515
import os clear = lambda : os.system('cls') import datetime from time import process_time_ns x = datetime.datetime.now() ulang = "y" while ulang=="y" or ulang=="Y": kodeGolongan = [1,2,3] gajiPokok = [2500000, 4500000, 6500000] tunjanganIstri = [0.01, 0.03, 0.05] kodeJK =[1,2] JK = ['Laki - Laki','Perempuan'] kodeStsKwn =[1,2] StsKwn = ['Kawin','Belum Kawin'] kodeStsAnk =[1,2] StsAnk = ['Punya','Belum Punya'] iuranPensiun = 15500 iuranOrganisasi = 3500 clear print ("==============================================") print("{:^44}".format("SELAMAT DATANG")) print("{:^44}".format("PERHITUNGAN GAJI KARYAWAN CV.LOGOS")) print("{:^44}".format("TANGGAL = " + x.strftime("%x"))) print ("==============================================") namaKaryawan = input("Masukan Nama = ") inp = 1 while inp < 4: clear() print("==============================================") print("{:^44}".format("PILIHAN GOLONGAN")) print("==============================================") nmr = 1 a = 0 for kodeGol in kodeGolongan : print(str(nmr) + ". Golongan " + str(kodeGol)) a = a + 1 nmr = nmr + 1 print("==============================================") golongan = int(input("Masukan Kode Golongan = ")) clear() inp = golongan if inp <= len(kodeGolongan) : i = 0 while i<len(kodeGolongan): if kodeGolongan[i] == inp: ambilGaji = gajiPokok[i] i+=1 else : break clear() print("==============================================") print("{:^44}".format("PILIHAN JENIS KELAMIN")) print("==============================================") a = 0 for jenisKel in JK : kodeKel = kodeJK[a] print(str(kodeKel) + ". " + str(jenisKel)) a = a + 1 print("==============================================") jenisKelamin = int(input("Masukan Kode Jenis Kelamin = ")) clear() inpJK = jenisKelamin if inpJK <= len(kodeJK) : i = 0 while i<len(kodeJK): if kodeJK[i] == inpJK: ambilJK = JK[i] i+=1 else : break clear() print("==============================================") print("{:^44}".format("PILIHAN STATUS KAWIN")) print("==============================================") a = 0 for jenisSK in StsKwn : kodeSK = kodeStsKwn[a] print(str(kodeSK) + ". " + str(jenisSK)) a = a + 1 print("==============================================") StatusKawin = int(input("Masukan Kode Status Kawin = ")) clear() inpSK = StatusKawin if inpSK <= len(kodeStsKwn) : i = 0 while i<len(kodeStsKwn): if kodeStsKwn[i] == inpSK: ambilSK = StsKwn[i] i+=1 else : break if ambilSK == 'Kawin' : clear() print("==============================================") print("{:^44}".format("PILIHAN STATUS ANAK")) print("==============================================") a = 0 for jenisSA in StsAnk : kodeSA = kodeStsAnk[a] print(str(kodeSA) + ". " + str(jenisSA)) a = a + 1 print("==============================================") StatusAnak = int(input("Masukan Kode Status Anak = ")) clear() inpSA = StatusAnak if inpSA <= len(kodeStsAnk) : i = 0 while i<len(kodeStsAnk): if kodeStsAnk[i] == inpSA: ambilSA = StsAnk[i] i+=1 else : break #hitung tunjangan istri if ambilJK == 'Laki - Laki' and ambilSK == 'Kawin' : i = 0 while i<len(kodeGolongan): if kodeGolongan[i] == inp: ambilTunjanganIstri = tunjanganIstri[i] totalTunjanganIstri = ambilGaji * ambilTunjanganIstri i+=1 else : totalTunjanganIstri = 0 #hitung tunjangan anak if ambilSK == 'Kawin' and ambilSA == 'Punya' : totalTunjanganAnak = ambilGaji * 0.02 else : totalTunjanganAnak = 0 #hitung gaji bruto gajiBruto = ambilGaji + totalTunjanganAnak + totalTunjanganIstri #hitung biaya jabatan biayaJabatan = gajiBruto * 0.0005 #hitung gaji netto gajiNetto = gajiBruto - biayaJabatan - iuranPensiun - iuranOrganisasi clear() print("==============================================") print("{:^44}".format("SLIP GAJI")) print("{:^44}".format("KARYAWAN CV.LOGOS")) print("{:^44}".format("TANGGAL = " + x.strftime("%x"))) print("==============================================") print("Nama " + namaKaryawan) print("Golongan " + str(golongan)) print("jenis kelamin " + ambilJK) print("Staus Kawin " + ambilSK) print("Gaji Pokok Rp " + format(ambilGaji,',.2f')) print("Tunjangan istri Rp " + format(totalTunjanganIstri,',.2f')) print("Tunjangan Anak Rp " + format(totalTunjanganAnak,',.2f')) print(">> Gaji bruto Rp " + format(gajiBruto,',.2f')) print("==============================================") print("Biaya Jabatan Rp " + format(biayaJabatan,',.2f')) print("Iuran Pensiun Rp " + format(iuranPensiun,',.2f')) print("Iuran Organisasi Rp " + format(iuranOrganisasi,',.2f')) print(">> Gaji Netto Rp " + format(gajiNetto,',.2f')) print("") #cetak struk (ekstensi : .txt) f=open("SLIPGAJI"+ namaKaryawan.upper() +".txt","w+") f.write("==============================================\r") f.write("{:^44}".format("SLIP GAJI") + "\r") f.write("{:^44}".format("KARYAWAN CV.LOGOS") + "\r") f.write("{:^44}".format("TANGGAL = " + x.strftime("%x")) + "\r") f.write("==============================================\r") f.write("Nama " + namaKaryawan + "\r") f.write("Golongan " + str(golongan) + "\r") f.write("jenis kelamin " + ambilJK + "\r") f.write("Staus Kawin " + ambilSK + "\r") f.write("Gaji Pokok Rp " + format(ambilGaji,',.2f') + "\r") f.write("Tunjangan istri Rp " + format(totalTunjanganIstri,',.2f') + "\r") f.write("Tunjangan Anak Rp " + format(totalTunjanganAnak,',.2f') + "\r") f.write(">> Gaji bruto Rp " + format(gajiBruto,',.2f') + "\r") f.write("==============================================\r") f.write("Biaya Jabatan Rp " + format(biayaJabatan,',.2f') + "\r") f.write("Iuran Pensiun Rp " + format(iuranPensiun,',.2f') + "\r") f.write("Iuran Organisasi Rp " + format(iuranOrganisasi,',.2f') + "\r") f.write(">> Gaji Netto Rp " + format(gajiNetto,',.2f') + "\r") f.write("\r") f.write("{:^44}".format("- TETAP SEMANGAT & SEHAT SELALU -") + "\r") f.write("{:^44}".format("- TERIMA KASIH -") + "\r") ulang = input('Ulangi Cek Gaji? (y/t) : ') clear() break
20083000169RianHudaMaulana/Uas-
UAS_20083000169_Rian Huda Maulana_2G.py
UAS_20083000169_Rian Huda Maulana_2G.py
py
8,062
python
en
code
0
github-code
36
[ { "api_name": "os.system", "line_number": 2, "usage_type": "call" }, { "api_name": "datetime.datetime.now", "line_number": 6, "usage_type": "call" }, { "api_name": "datetime.datetime", "line_number": 6, "usage_type": "attribute" } ]
74605387945
from petalo_calib.tdc_corrections import correct_tfine_wrap_around from petalo_calib.qdc_corrections import correct_efine_wrap_around from petalo_calib.tdc_corrections import apply_tdc_correction_tot from petalo_calib.tdc_corrections import compute_integration_window_size from petalo_calib.tdc_corrections import add_tcoarse_extended_to_df from petalo_calib.clustering import compute_evt_number_combined_with_cluster_id from petalo_calib.io import compute_file_chunks_indices from petalo_calib.io import write_corrected_df_daq from sklearn.cluster import DBSCAN import pandas as pd import numpy as np import sys def compute_tcoarse_wrap_arounds(df): limits = df[df.tcoarse_diff < -20000].index first = df.index[0] last = df.index[-1] limits = np.concatenate([np.array([first]), limits.values, np.array([last])]) return limits def compute_tcoarse_nloops(df): limits = compute_tcoarse_wrap_arounds(df) nloops = np.zeros(df.shape[0], dtype='int32') for i in range(limits.shape[0]-1): start = limits[i] end = limits[i+1] nloops[start:end+1] = i return nloops def compute_extended_tcoarse(df): return df['tcoarse'] + df['nloops'] * 2**16 def add_tcoarse_extended_to_df(df): df['tcoarse'] = df.tcoarse.astype(np.int32) df['tcoarse_diff'] = df.tcoarse.diff() df['nloops'] = compute_tcoarse_nloops(df) df['tcoarse_extended'] = compute_extended_tcoarse(df) def local_sort_tcoarse(df, indices): start = -1 end = -1 window_size = 120 for index in indices: if (index >= start) and (index <= end): #print("Done! ", index) continue start = index - window_size end = index + window_size #print(start, end) df.iloc[start:end] = df.iloc[start:end].sort_values('tcoarse', ascending=False) def local_sort_tcoarse_to_fix_wrap_arounds(df): add_tcoarse_extended_to_df(df) indices = df[df.tcoarse_diff < -20000].index.values local_sort_tcoarse(df, indices) add_tcoarse_extended_to_df(df) #df.drop(columns=['tcoarse_diff', 'nloops'], inplace=True) def compute_tcoarse_extended_with_local_sort(df): df_0 = df[df.tofpet_id == 0].reset_index() df_2 = df[df.tofpet_id == 2].reset_index() local_sort_tcoarse_to_fix_wrap_arounds(df_0) local_sort_tcoarse_to_fix_wrap_arounds(df_2) df_all = pd.concat([df_0, df_2]) df_all_sorted = df_all.sort_values(['evt_number', 'tcoarse_extended']).reset_index(drop=True) return df_all_sorted def compute_clusters(df): values = df.tcoarse_extended.values values = values.reshape(values.shape[0],1) clusters = DBSCAN(eps=10, min_samples=2).fit(values) return clusters.labels_ def process_daq_df_tot(df, df_tdc1_asic0, df_tdc2_asic0, df_tdc1_asic2, df_tdc2_asic2): compute_integration_window_size(df) correct_tfine_wrap_around(df) correct_efine_wrap_around(df) df = compute_tcoarse_extended_with_local_sort(df) df_0 = df[df.tofpet_id == 0] df_2 = df[df.tofpet_id == 2] df_0 = apply_tdc_correction_tot(df_0, df_tdc1_asic0, 'tfine') df_0 = apply_tdc_correction_tot(df_0, df_tdc2_asic0, 'efine') df_2 = apply_tdc_correction_tot(df_2, df_tdc1_asic2, 'tfine') df_2 = apply_tdc_correction_tot(df_2, df_tdc2_asic2, 'efine') df = pd.concat([df_0, df_2]).sort_index() df.drop(columns=['card_id', 'wordtype_id'], inplace=True) df['cluster'] = compute_clusters(df) return df def process_daq_file(filein, fileout, df_tdc1_asic0, df_tdc2_asic0, df_tdc1_asic2, df_tdc2_asic2): chunks = compute_file_chunks_indices(filein) nchunks = chunks.shape[0] for i in range(nchunks-1): print("{}/{}".format(i, nchunks-2)) start = chunks[i] end = chunks[i+1] df = pd.read_hdf(filein, 'data', start=start, stop=end+1) df_corrected = process_daq_df_tot(df, df_tdc1_asic0, df_tdc2_asic0, df_tdc1_asic2, df_tdc2_asic2) write_corrected_df_daq(fileout, df_corrected, i, i>0) tdc1_asic0 = '/home/vherrero/CALIBRATION_FILES/tfine_cal_asic0_run11291.h5' tdc2_asic0 = '/home/vherrero/CALIBRATION_FILES/tfine2_cal_asic0_run11291.h5' tdc1_asic2 = '/home/vherrero/CALIBRATION_FILES/tfine_cal_asic2_run11292.h5' tdc2_asic2 = '/home/vherrero/CALIBRATION_FILES/tfine2_cal_asic2_run11292.h5' df_tdc1_asic0 = pd.read_hdf(tdc1_asic0, key='tfine_cal') df_tdc2_asic0 = pd.read_hdf(tdc2_asic0, key='tfine_cal') df_tdc1_asic2 = pd.read_hdf(tdc1_asic2, key='tfine_cal') df_tdc2_asic2 = pd.read_hdf(tdc2_asic2, key='tfine_cal') filein = sys.argv[1] fileout = sys.argv[2] process_daq_file(filein, fileout, df_tdc1_asic0, df_tdc2_asic0, df_tdc1_asic2, df_tdc2_asic2)
jmbenlloch/petalo_calib
petalo_calib/scripts/process_files_tot_new_clusters.py
process_files_tot_new_clusters.py
py
4,761
python
en
code
0
github-code
36
[ { "api_name": "numpy.concatenate", "line_number": 24, "usage_type": "call" }, { "api_name": "numpy.array", "line_number": 24, "usage_type": "call" }, { "api_name": "numpy.zeros", "line_number": 30, "usage_type": "call" }, { "api_name": "numpy.int32", "line_num...
42887542436
from collections import OrderedDict class Cache: """ A python class which used ordered dictionary (OrderedDict) to implement the LRU cache. Each entry of the dictinoary will be a key/value pair. The search would be by key. LRU Cache will have its maximum size defined at initiation. When adding new keys that cause the capacity to be exceed the size defined at initialtion, the oldest items will be removed to make room. The newly added items/last accessed items will be moved to the back of the dictonary (most recently used) and the elements at the begining will correspond to lest recently used. The element at the begining of the dictionary will be the one to discard when the cache if full (least recently used) Methods: get(key) - Get the value corresponding to key put(key,value) - Insert key/value into the cache delKey(key) - Delete the key reset() - Clear the cache dumpCache() - Print the cache contents """ def __init__(self, size, verbose=False): """ Initialize new cache object with the size passed. Args: size (int): The max size of the cache. """ self.printDebug = verbose self.sizeOfCache = size self.cacheLRU = OrderedDict() def get(self, key): """ Returns the value corresponding to the key provided. If the key does not exit, it returns None else it moves the key to the end of the dictionary using move_to_end (method of OrderedDict) Args: key (int): The key to lookup the value Returns: value (int): The value corresponding to the key if found else None """ if self.sizeOfCache == 0: if self.printDebug: print("Zero capacity cache and get request. Raise exception.") raise Exception('Cache is not defined') else: if key not in self.cacheLRU: return None else: self.cacheLRU.move_to_end(key) return self.cacheLRU[key] def put(self, key, value): """ Inserts the key/value pair to the cache. If the cache is already full (max capacity), it will remove the element at the head of the dict (least recently used) using popitem and insert the key/value pair at the end of the dict (most recently used). If the key already exists, it will not fail but mark the item as recently used (move it to the back of the dict) Returns: Nothing or exception if cache capacity is 0 """ if key in self.cacheLRU: if self.printDebug: print("Key {} already exists in cache. Update this and mark as recently used".format(key)) self.cacheLRU[key] = value self.cacheLRU.move_to_end(key) else: if self.sizeOfCache == 0: if self.printDebug: print("Zero capacity cache and put request. Raise exception.") raise Exception('Cache is not defined') else: if len(self.cacheLRU) >= self.sizeOfCache: outKey, outVal = self.cacheLRU.popitem(last = False) if self.printDebug: print("Cache at capacity of {}. Removing LRU key {}".format(self.sizeOfCache,outKey)) self.cacheLRU[key] = value self.cacheLRU.move_to_end(key) def delKey(self, key): """ Delete the key from the cache if it exists. If the key does not exist nothing happens. The delete is treated as a cache hit and the key is moved to the end (most recently used) and then removed. Returns: Nothing or exception if cache capacity is 0 """ if self.sizeOfCache == 0: if self.printDebug: print("Zero capacity cache and del request. Raise exception.") raise Exception('Cache is not defined') else: if key in self.cacheLRU: self.cacheLRU.move_to_end(key) self.cacheLRU.popitem(last = True) else: if self.printDebug: print("Key {} to delete does not exist in the cache. Doing nothing.\n".format(self.sizeOfCache)) def reset(self): """ Reset the cache Returns: Nothing or exception if cache capacity is 0 """ if self.sizeOfCache == 0: if self.printDebug: print("Zero capacity cache and reset request. Raise exception.") raise Exception('Cache is not defined') else: self.cacheLRU.clear() def dumpCache(self): """ Print the contents of the cache Returns: Nothing """ print(self.cacheLRU)
harsimrit/task1
cacheLRU.py
cacheLRU.py
py
4,971
python
en
code
0
github-code
36
[ { "api_name": "collections.OrderedDict", "line_number": 29, "usage_type": "call" } ]
21333304207
import unittest from climateeconomics.sos_processes.iam.witness.witness_coarse.usecase_witness_coarse_new import Study from sostrades_core.execution_engine.execution_engine import ExecutionEngine from tempfile import gettempdir from copy import deepcopy from gemseo.utils.compare_data_manager_tooling import delete_keys_from_dict,\ compare_dict import numpy as np class WITNESSParallelTest(unittest.TestCase): def setUp(self): self.name = 'Test' self.root_dir = gettempdir() self.ee = ExecutionEngine(self.name) def test_01_exec_parallel(self): """ 8 proc """ n_proc = 16 repo = 'climateeconomics.sos_processes.iam.witness' self.ee8 = ExecutionEngine(self.name) builder = self.ee8.factory.get_builder_from_process( repo, 'witness_coarse') self.ee8.factory.set_builders_to_coupling_builder(builder) self.ee8.configure() self.ee8.display_treeview_nodes() usecase = Study() usecase.study_name = self.name values_dict = {} for dict_item in usecase.setup_usecase(): values_dict.update(dict_item) values_dict[f'{self.name}.sub_mda_class'] = "GSPureNewtonMDA" values_dict[f'{self.name}.max_mda_iter'] = 50 values_dict[f'{self.name}.n_processes'] = n_proc self.ee8.load_study_from_input_dict(values_dict) self.ee8.execute() dm_dict_8 = deepcopy(self.ee8.get_anonimated_data_dict()) """ 1 proc """ n_proc = 1 builder = self.ee.factory.get_builder_from_process( repo, 'witness_coarse') self.ee.factory.set_builders_to_coupling_builder(builder) self.ee.configure() self.ee.display_treeview_nodes() usecase = Study() usecase.study_name = self.name values_dict = {} for dict_item in usecase.setup_usecase(): values_dict.update(dict_item) values_dict[f'{self.name}.sub_mda_class'] = "GSPureNewtonMDA" values_dict[f'{self.name}.max_mda_iter'] = 50 values_dict[f'{self.name}.n_processes'] = n_proc self.ee.load_study_from_input_dict(values_dict) self.ee.execute() dm_dict_1 = deepcopy(self.ee.get_anonimated_data_dict()) residual_history = self.ee.root_process.sub_mda_list[0].residual_history dict_error = {} # to delete modelorigin and discipline dependencies which are not the # same delete_keys_from_dict(dm_dict_1) delete_keys_from_dict(dm_dict_8) compare_dict(dm_dict_1, dm_dict_8, '', dict_error) residual_history8 = self.ee8.root_process.sub_mda_list[0].residual_history #self.assertListEqual(residual_history, residual_history8) for key, value in dict_error.items(): print(key) print(value) for disc1, disc2 in zip(self.ee.root_process.sos_disciplines, self.ee8.root_process.sos_disciplines): if disc1.jac is not None: # print(disc1) for keyout, subjac in disc1.jac.items(): for keyin in subjac.keys(): comparison = disc1.jac[keyout][keyin].toarray( ) == disc2.jac[keyout][keyin].toarray() try: self.assertTrue(comparison.all()) except: print('error in jac') print(keyout + ' vs ' + keyin) np.set_printoptions(threshold=1e6) for arr, arr2 in zip(disc1.jac[keyout][keyin], disc2.jac[keyout][keyin]): if not (arr.toarray() == arr2.toarray()).all(): print(arr) print(arr2) # The only different value is n_processes self.assertDictEqual(dict_error, { '.<study_ph>.n_processes.value': "1 and 16 don't match"}) if '__main__' == __name__: cls = WITNESSParallelTest() cls.setUp() cls.test_01_exec_parallel()
os-climate/witness-core
climateeconomics/tests/_l1_test_witness_parallel.py
_l1_test_witness_parallel.py
py
4,198
python
en
code
7
github-code
36
[ { "api_name": "unittest.TestCase", "line_number": 11, "usage_type": "attribute" }, { "api_name": "tempfile.gettempdir", "line_number": 16, "usage_type": "call" }, { "api_name": "sostrades_core.execution_engine.execution_engine.ExecutionEngine", "line_number": 17, "usage_t...
43157370954
#!/usr/bin/python3 import sys, pygame from pygame.locals import * black = (0, 0, 0) white = (255,255,255) red = (255,0,0) green = (0, 255, 0) blue = (0, 0, 255) pygame.init() pygame.display.set_caption("drawing") # set the title of the window surface = pygame.display.set_mode((400, 300)) # return pygame.Surface surface.fill(white) # <=== white the surface # draw polygon pygame.draw.polygon(surface, green, ((123, 0), (234,132), (269, 211), (77, 66)), 0) # draw line pygame.draw.line(surface, red, (70, 200), (80,100), 20) pygame.draw.circle(surface, black, (30, 50), 15, 10) pygame.draw.ellipse(surface, black, (30, 50, 100, 60), 10) pygame.draw.rect(surface, blue, (30, 50, 100, 60)) # event loop: handling event, update game state(variables), rendering graphics while True: for event in pygame.event.get(): # QUIT etc, defined in pygame.locals if event.type == QUIT: pygame.quit() sys.exit() pygame.display.update() # render surface into screen # Question: # what's the different if we move Line 14 ~ 18 into loop before pygame.display.update() ? """ Surface: 2D rectangle Color : (r, g, b, a) Rect: (x0, y0, width, height) """
minskeyguo/mylib
python-edu/17-pygame-basic/02-geometry.py
02-geometry.py
py
1,223
python
en
code
0
github-code
36
[ { "api_name": "pygame.init", "line_number": 12, "usage_type": "call" }, { "api_name": "pygame.display.set_caption", "line_number": 14, "usage_type": "call" }, { "api_name": "pygame.display", "line_number": 14, "usage_type": "attribute" }, { "api_name": "pygame.dis...
34547412730
from PIL import Image import cv2 # 選擇第二隻攝影機 cap = cv2.VideoCapture(0) while(True): # 從攝影機擷取一張影像 ret, frame = cap.read() frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) edges = cv2.Canny(frame, 100 , 200) # img_fc, contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # hierarchy = hierarchy[0] # found = [] # for i in range(len(contours)): # k = i # c = 0 # while hierarchy[k][2] != -1: # k = hierarchy[k][2] # c = c + 1 # if c >= 5: # found.append(i) # for i in found: # cv2.drawContours(frame, contours, i, (0, 255, 0), 3) (_, cnts, _) = cv2.findContours(blurred.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) if len(cnts) > 0: for cnt in cnts: # compute the (rotated) bounding box around then # contour and then draw it rect = np.int32(cv2.boxPoints(cv2.minAreaRect(cnt))) cv2.drawContours(self.frame, [rect], -1, (0, 255, 0), 2) # 顯示圖片 cv2.imshow('frame', edges) cv2.waitKey(1) if cv2.getWindowProperty('frame', cv2.WND_PROP_AUTOSIZE) == -1: break # 釋放攝影機 cap.release() # 關閉所有 OpenCV 視窗 cv2.destroyAllWindows()
Amenoimi/Simple_OCR
QR_GET.py
QR_GET.py
py
1,322
python
en
code
0
github-code
36
[ { "api_name": "cv2.VideoCapture", "line_number": 8, "usage_type": "call" }, { "api_name": "cv2.cvtColor", "line_number": 12, "usage_type": "call" }, { "api_name": "cv2.COLOR_BGR2GRAY", "line_number": 12, "usage_type": "attribute" }, { "api_name": "cv2.Canny", ...
73642772264
# ----------------------------------------------------------- # --------- Assignment 4 - PCA analysis with Python ------------------ # ----------------------------------------------------------- # Author: Tomas Milla-Koch # Purpose: The following script is script for clipping a scene to vector boundary and performing a PCA analysis. # Course: REMS 6023 # Date: 30/01/2022 # Disclaimer: This script is for educational purposes only. # ---------------------------------------------------------- # ----------------------------------------------------------- # --------- Importing of required python libraries --------- # ----------------------------------------------------------- import os import shutil import fnmatch # import exceptions module from pci.exceptions import * # import pci modules for project from pci.clip import clip from pci.pcimod import * from pci.nspio import Report, enableDefaultReport from pci.pca import pca from pci.nspio import Report, enableDefaultReport from pci.fexport import * # initializing script time from datetime import datetime as dt, time # start time of script start = dt.now() # ----------------------------------------------------------- # --------- File Management -------------------------------- # ----------------------------------------------------------- print('Obtaining necessary files.') # get root directory root = os.getcwd() # list containing paths where files of different outputs will go files = ['pca', 'reports'] # iterate through files to remove existing data and create new empty folders for i in files: if os.path.exists(root + '\\' + i): shutil.rmtree(root + '\\' + i) os.mkdir(root + '\\' + i) # make new folders # initialize metadata file list input_files = [] # populate list with availible MTL.txt files, in this case just one for r, d, f in os.walk(os.getcwd()): for inFile in fnmatch.filter(f, '*_MTL.txt'): input_files.append(os.path.join(r, inFile)) # create a list of 1 for the vector file to be used as a boundary vector_files = [] for r, d, f in os.walk(os.getcwd()): for inFile in fnmatch.filter(f, '*.shp'): vector_files.append(os.path.join(r, inFile)) print('Finished obtaining necessary files.') # ----------------------------------------------------------- # --------- Clipping Image and Adding Bands to Image--------- # ----------------------------------------------------------- print('Clipping image.') try: clip(fili=input_files[0]+'-MS', # call MTL file from previously populated list of 1 element dbic=[1, 2, 3, 4, 5, 6, 7], # which bands to clip sltype='vec', # what kind of boundary file will image clip to # clip image to vector file provided ... any file name for other purposes is fine filo=root + '\\pca\\hal_clip.pix', clipfil=vector_files[0] ) pcimod(file=root+'\\pca\\hal_clip.pix', pciop='ADD', # add raster layers pcival=[0, 0, 3, 0] # add 3 16bit unsigned layers to .pix file ) except Exception as e: print(e) print('Finished clipping image.') # ----------------------------------------------------------- # --------- PCA Image & Report Writing ---------------------- # ----------------------------------------------------------- print('Starting PCA analysis and report writing.') try: # initialize report file and make sure none of it is already in memory Report.clear() enableDefaultReport(root+'\\reports\\PCA_report_1.txt') # pca analysis function pca(file=root + '\\pca\\hal_clip.pix', # which spectral bands to be analysed dbic=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], eign=[1, 2, 3], # what bands to occupy the newly created principal components # setting RGB color channels to 8,9,10 will give PC1,PC2,PC3 in focus dboc=[8, 9, 10], rtype='long') finally: # close the report file enableDefaultReport('term') print('Finished PCA analysis and report writing.') # How long did the script take? scp_time = dt.now() - start print('The script took ' + str(scp_time) + ' to complete.') # ----------------------------------------------------------- # --------- File Exporting ---------------------------------- # ----------------------------------------------------------- fexport(fili=root + '\\pca\\hal_clip.pix', filo=root + '\\pca\\MillaKoch_PCA.pix', dbic=[8, 9, 10]) print("File with PCs has been exported.") # ----------------------------------------------------------- # --------- End of Script ----------------------------------- # -----------------------------------------------------------
tomasmk/Remote-Sensing-Automation
PCA.py
PCA.py
py
4,658
python
en
code
0
github-code
36
[ { "api_name": "datetime.datetime.now", "line_number": 31, "usage_type": "call" }, { "api_name": "datetime.datetime", "line_number": 31, "usage_type": "name" }, { "api_name": "os.getcwd", "line_number": 38, "usage_type": "call" }, { "api_name": "os.path.exists", ...
71116093224
from common.httphandler import HttpHander from common.yml_util import YmlUtil import pytest, json http = HttpHander() YmlUt = YmlUtil() class TestCaseSingle: def get_case_all(self, case_data, url_map, header): if case_data.get("method") == 'get': resp = http.get(url=url_map, headers=header) print(resp) if "assert_data" in case_data.keys(): assert case_data.get("assert_data") in resp elif case_data.get("method") == 'post': data = case_data.get("param") resp = http.post(url=url_map, json=data, headers=header) print(resp) if "assert_data" in case_data.keys(): assert case_data.get("assert_data") in resp else: return @pytest.mark.parametrize('yml_path,case_id', YmlUt.read_yaml_all_tuple("case_single")) def test_case_all(self, yml_path, case_id): yml_value = YmlUt.read_yaml_values(yml_path) case_data = YmlUt.get_value(yml_value, case_id) header = YmlUt.read_yaml_values(YmlUt.read_yaml_paths("token_head")[0]).get("headers") host_map = YmlUt.read_yaml_values(YmlUt.read_yaml_paths("token_head")[0]).get("host_map") url_map = YmlUt.host_map_url(case_data, host_map) # 服务器映射 # print(header) self.get_case_all(case_data, url_map, header)
itol220/testapi
testcases/test_single.py
test_single.py
py
1,385
python
en
code
0
github-code
36
[ { "api_name": "common.httphandler.HttpHander", "line_number": 4, "usage_type": "call" }, { "api_name": "common.yml_util.YmlUtil", "line_number": 5, "usage_type": "call" }, { "api_name": "pytest.mark.parametrize", "line_number": 25, "usage_type": "call" }, { "api_n...
25321444319
import pandas as pd from glob import glob from datetime import datetime import os def removeDups(file): df = pd.read_excel(file) # Keep only FIRST record from set of duplicates df_first_record = df.drop_duplicates(subset="Date/Time", keep="first") #creates an excel file with sorted times if glob("noDupsTime.xlsx"): pass else: df_first_record.to_excel("./downloads/noDupsTime.xlsx", index=False) # removeDups() def create_dict(): os.chdir('./downloads') df=pd.read_excel("noDupsTime.xlsx") names_list=list(df['Name']) dates_list=list(df['Date/Time']) custom_dict={}#dictionary of names as keys and al lthe datetime as values modified_dict={} for name,date_time in zip(names_list,dates_list): if name not in custom_dict.keys(): custom_dict[name]=[date_time] else: custom_dict[name].append(date_time) for name in custom_dict.keys(): #for each name go through each date,then split date_time into date and time, #create a dictionary with each day as the key and the values an array of times date_dict={} for dateTime in custom_dict[name]: #iterate over datetimes of each person date = dateTime.split()[0] time = dateTime.split()[-1] if date not in date_dict.keys(): date_dict[date]=[time] else: date_dict[date].append(time) modified_dict[name]=date_dict#create new dictionary with the name as the key print(modified_dict) return modified_dict def create_report(create_dict): data=create_dict() lst_of_names=[] length=0 lst_of_dates=[] lst_of_timein=[] lst_of_timeout=[] lst_of_durations=[] total=0 #iterate over all the names for name in data.keys(): length+=1 days=data[name] dates=data[name].keys()#gets dates for each name print(f'each_day:{type(days)}') no_of_names=len(dates) lst_of_names.extend([name for i in range(no_of_names)])#make name array same size as dates array lst_of_dates.extend(dates) for day in days.values(): print('day:',day) total+=1 first_time = datetime.strptime(day[0], '%H:%M:%S') last_time = datetime.strptime(day[-1], '%H:%M:%S') time_diff_Hours = (last_time - first_time).seconds//3600 rem_minutes = ((last_time-first_time).seconds% 3600)//60 time_diff = str(time_diff_Hours) + ":" + str(rem_minutes) lst_of_timein.append(first_time.time()) lst_of_timeout.append(last_time.time()) lst_of_durations.append(time_diff) #lst_of_durations.extend(data[name].values()[0]) print(f'total:{total}') print(f"old length:{length}\nnew length:{len(lst_of_names)}") print(f"no of dates:{len(lst_of_dates)}") print(f'{len(lst_of_timein)}') print(f'{len(lst_of_timeout)}') df=pd.DataFrame({"Names":lst_of_names,"Date":lst_of_dates,"Time_in":lst_of_timein,"Time_out":lst_of_timeout,"Time Spent":lst_of_durations}) #df2=pd.DataFrame({"Time_in":lst_of_timein,"Time_out":lst_of_timeout}) #df=pd.DataFrame({"Names":lst_of_names,"Date":lst_of_dates}) df.to_excel('report.xlsx') #df2.to_excel('fingers2.xlsx') # create_report(create_dict) def report(file): removeDups(file) create_report(create_dict)
OliverSolomon/flaskExcel
reporter.py
reporter.py
py
3,444
python
en
code
0
github-code
36
[ { "api_name": "pandas.read_excel", "line_number": 7, "usage_type": "call" }, { "api_name": "glob.glob", "line_number": 11, "usage_type": "call" }, { "api_name": "os.chdir", "line_number": 19, "usage_type": "call" }, { "api_name": "pandas.read_excel", "line_num...
17704956797
import numpy as np from PyQt5.QtWidgets import QWidget, QApplication, QVBoxLayout from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg from matplotlib.figure import Figure from SO3 import SO3 from rotation import Ui_Form class My_window(QWidget, Ui_Form): def __init__(self, parent=None, *args, **kwargs): super().__init__(parent, *args, **kwargs) self.setupUi(self) self.retranslateUi(self) self.current_rotation = SO3() self.figure = Figure() self.canvas = FigureCanvasQTAgg(self.figure) self.layout = QVBoxLayout(self.widget_2) self.layout.addWidget(self.canvas) self.display_current_rotation() def slot_matrix2all(self): pass def slot_quat2all(self): q = np.empty(4) q[0] = self.quatw.value() q[1] = self.quatx.value() q[2] = self.quaty.value() q[3] = self.quatz.value() self.current_rotation = SO3.from_quaternion(q) self.display_current_rotation() def slot_anglevec2all(self): vector = np.empty(4) vector[0] = self.vectorx.value() vector[1] = self.vectory.value() vector[2] = self.vectorz.value() vector[3] = self.angle.value() self.current_rotation = SO3.from_axis_angle(vector) self.display_current_rotation() def display_current_rotation(self): # matrix matrix = self.current_rotation.rotation_matrix print(matrix) for i in range(3): for j in range(3): exec("self.matrix{}{}.setValue({})".format(str(i), str(j), matrix[i][j])) # quat quat = self.current_rotation.to_quaternion() self.quatw.setValue(quat[0]) self.unitw.setValue(quat[0]) self.quatx.setValue(quat[1]) self.unitx.setValue(quat[1]) self.quaty.setValue(quat[2]) self.unity.setValue(quat[2]) self.quatz.setValue(quat[3]) self.unitz.setValue(quat[3]) # angle_vector vector = self.current_rotation.to_axis_angle() self.angle.setValue(vector[3]) self.vectorx.setValue(vector[0]) self.vectory.setValue(vector[1]) self.vectorz.setValue(vector[2]) self.textBrowser.setText(str(self.current_rotation)) # plot self.figure.clear() self.current_rotation.plot_coordinate_system(self.figure) self.canvas.draw() if __name__ == '__main__': import sys app = QApplication(sys.argv) window = My_window() window.show() sys.exit(app.exec_())
rollingball-3/Learning-rotation
main.py
main.py
py
2,586
python
en
code
0
github-code
36
[ { "api_name": "PyQt5.QtWidgets.QWidget", "line_number": 11, "usage_type": "name" }, { "api_name": "rotation.Ui_Form", "line_number": 11, "usage_type": "name" }, { "api_name": "SO3.SO3", "line_number": 17, "usage_type": "call" }, { "api_name": "matplotlib.figure.Fi...
40243436461
""" Beautify Images Utils """ import random import operator import heapq import math from scipy.interpolate import UnivariateSpline import cv2 import pilgram from PIL import Image, ImageStat import numpy as np from src.utils.image_process import ( do_we_need_to_sharpen, sharpen_my_image, adjust_contrast_brightness, ) def get_top_frames(scores, num, fps, dispersed=True): """ Returns list of indexes for number frames with the highest scores as specified by the user. Users can define the 'dispersed' function if they wish to have num images taken from different parts of the video. In this instance, we randomly sample 10% of the frames from the video and score these frames. Otherwise the function just returns the best num images from the frames scored. """ if len(scores) <= 1000: dispersed = False if dispersed: tmp = [] while True: if len(tmp) == int(0.1 * len(scores)): break sampled_frame = random.choice(scores) if len(tmp) == 0: tmp.append(sampled_frame) else: flag = False for i in tmp: if i - fps <= sampled_frame <= i + fps: flag = True break if flag == False: tmp.append(sampled_frame) idx = sorted( list(zip(*heapq.nlargest(num, enumerate(tmp), key=operator.itemgetter(1))))[ 0 ] ) return sorted([scores.index(j) for j in [tmp[i] for i in idx]]) else: return sorted( list( zip(*heapq.nlargest(num, enumerate(scores), key=operator.itemgetter(1))) )[0] ) def get_top_n_idx(filtered_scores, filtered_idx, sampling_size=0.1, n=10): """ Random sample from scores and get the indices of the top n scores from original video Args: filtered_scores (np.array): scores filtered from object detection that pass a threshold filtered_idx (np.array): the indices of scores that pass the threshold, from original video sampling_size (float): proportion of samples to choose from num_frames of original video n (int): top n scores to choose from Return: top_n_idx (np.array): indices of top n scores from the sample, corresponding to indices from original video """ # sample from filtered_scores & filtered_idx arrays n_sample = int(np.ceil(len(filtered_scores) * sampling_size)) if n_sample <= n: n_sample = len(filtered_scores) rand_sample = np.random.choice(len(filtered_scores), n_sample, replace=False) rand_sample_scores = filtered_scores[rand_sample] rand_sample_idx = filtered_idx[rand_sample] # get the indices of the top n scores from the sample top_n_idx = rand_sample_idx[rand_sample_scores.argsort()[::-1][: min(n, n_sample)]] return top_n_idx def brightness(im_file): """ Returns perceived brightness of image https://www.nbdtech.com/Blog/archive/2008/04/27/Calculating-the-Perceived-Brightness-of-a-Color.aspx """ stat = ImageStat.Stat(im_file) r, g, b = stat.mean return math.sqrt(0.241 * (r**2) + 0.691 * (g**2) + 0.068 * (b**2)) def LookupTable(x, y): spline = UnivariateSpline(x, y) return spline(range(256)) def Summer(img): increaseLookupTable = LookupTable([0, 64, 128, 256], [0, 80, 160, 256]) decreaseLookupTable = LookupTable([0, 64, 128, 256], [0, 50, 100, 256]) blue_channel, green_channel, red_channel = cv2.split(img) red_channel = cv2.LUT(red_channel, increaseLookupTable).astype(np.uint8) blue_channel = cv2.LUT(blue_channel, decreaseLookupTable).astype(np.uint8) sum = cv2.merge((blue_channel, green_channel, red_channel)) return sum def Winter(img): increaseLookupTable = LookupTable([0, 64, 128, 256], [0, 80, 160, 256]) decreaseLookupTable = LookupTable([0, 64, 128, 256], [0, 50, 100, 256]) blue_channel, green_channel, red_channel = cv2.split(img) red_channel = cv2.LUT(red_channel, decreaseLookupTable).astype(np.uint8) blue_channel = cv2.LUT(blue_channel, increaseLookupTable).astype(np.uint8) win = cv2.merge((blue_channel, green_channel, red_channel)) return win def beautify(beauti_img, filter="hudson"): """ Beautifies selected images. Input arguments: 1) beauti_img (np.array) - array of images in cv2/BGR format 2) filter (str) - instagram filter to apply 3) The default filter is Hudson. List of Instagram filters: https://github.com/akiomik/pilgram/tree/master/pilgram """ if filter: try: pilgram_filter = getattr(pilgram, filter.lower()) except: raise ValueError( """ That was not a correct filter. The list of correct filters are: _1977 aden brannan brooklyn clarendon earlybird gingham hudson inkwell kelvin lark lofi maven mayfair moon nashville perpetua reyes rise slumber stinson toaster valencia walden willow xpro2 Here's some showcases of filtered images: https://github.com/akiomik/pilgram/blob/master/screenshots/screenshot.png """ ) for idx, img in enumerate(beauti_img): ## Reduce blue light ## if filter and filter.lower() == "hudson": img = Summer(img) # Adjust brightness and contrast lux = brightness(Image.fromarray(img)) if lux <= 130: beta = 137.5 - lux elif lux > 145: beta = 137.5 - lux else: beta = 0 img = adjust_contrast_brightness(img, contrast=1.2, brightness=beta) ## Check and sharpen ## if do_we_need_to_sharpen(img): img = sharpen_my_image(img) ## Apply instagram filter ## if filter: img = np.array(pilgram_filter(Image.fromarray(img))) beauti_img[idx] = img return beauti_img def check_filter(filter): error_msg = """ That was not a correct filter. The list of correct filters are: \n _1977 aden brannan brooklyn clarendon earlybird gingham hudson inkwell kelvin lark lofi maven mayfair moon nashville perpetua reyes rise slumber stinson toaster valencia walden willow xpro2 \nHere's some showcases of filtered images: https://github.com/akiomik/pilgram/blob/master/screenshots/screenshot.png """ try: pilgram_filter = getattr(pilgram, filter.lower()) except: return False, error_msg return True, error_msg
teyang-lau/you-only-edit-once
src/utils/beautify.py
beautify.py
py
6,911
python
en
code
6
github-code
36
[ { "api_name": "random.choice", "line_number": 46, "usage_type": "call" }, { "api_name": "heapq.nlargest", "line_number": 62, "usage_type": "call" }, { "api_name": "operator.itemgetter", "line_number": 62, "usage_type": "call" }, { "api_name": "heapq.nlargest", ...
75091410342
import tensorflow as tf from transformers import GPT2Tokenizer, TFGPT2LMHeadModel import wikipediaapi # Set up Wikipedia API wiki = wikipediaapi.Wikipedia('en') # Set up tokenizer and model tokenizer = GPT2Tokenizer.from_pretrained('gpt2') model = TFGPT2LMHeadModel.from_pretrained('gpt2', pad_token_id=tokenizer.eos_token_id) # Define training parameters batch_size = 4 epochs = 2 learning_rate = 1e-4 # Define function to preprocess input text def preprocess(text): text = text.strip().replace('\n', ' ') tokens = tokenizer.encode(text, add_special_tokens=True, max_length=512) input_ids = tf.convert_to_tensor(tokens[:-1], dtype=tf.int32) target_ids = tf.convert_to_tensor(tokens[1:], dtype=tf.int32) return input_ids, target_ids # Define function to fetch training data from Wikipedia def fetch_training_data(): titles = [ 'Artificial intelligence', 'Machine learning', 'Natural language processing', 'Recurrent neural network', 'Transformer (machine learning)', 'Generative Pre-trained Transformer 2' ] text = '' for title in titles: page = wiki.page(title) if page.exists(): text += page.text return text # Fetch training data from Wikipedia training_data = fetch_training_data() # Preprocess the training data and convert it to a TensorFlow dataset dataset = tf.data.Dataset.from_tensor_slices(training_data).map(preprocess).shuffle(10000).batch(batch_size) # Set up optimizer and loss function optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate) loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) # Train the model for epoch in range(epochs): epoch_loss = 0.0 for batch in dataset: with tf.GradientTape() as tape: logits = model(batch[0], training=True)[0] loss = loss_fn(batch[1], logits) gradients = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables)) epoch_loss += loss.numpy() print('Epoch {} Loss: {:.4f}'.format(epoch+1, epoch_loss/len(dataset))) # Save the trained model and tokenizer to files model.save_pretrained('my_chatgpt_model') tokenizer.save_pretrained('my_chatgpt_model')
ethan-haynes/test
train.py
train.py
py
2,291
python
en
code
0
github-code
36
[ { "api_name": "wikipediaapi.Wikipedia", "line_number": 6, "usage_type": "call" }, { "api_name": "transformers.GPT2Tokenizer.from_pretrained", "line_number": 9, "usage_type": "call" }, { "api_name": "transformers.GPT2Tokenizer", "line_number": 9, "usage_type": "name" }, ...
27653523771
import time import utilities.custom_logger as cl import logging from base.basepage import BasePage from base.selenium_driver import SeleniumDriver class Register_courses_page(BasePage): log = cl.customLogger(logging.DEBUG) #Locators _search_box_id = "search-courses" _course_xpath = "/html/body/div/div/div/div[2]/div/div/div[1]/a/div/div[2]" _search_btn_id = "search-course-button" _enroll_button_id = "enroll-button-top" # type is id _cc_id= "payment_method_credit_card" #type is id _credit_card_num_name = "cardnumber" #type is id _cc_exp_name = "exp-date" _cc_cvc_name = "cvc" _postal_field_name = "postal" _submit_enroll_id = "confirm-purchase" #id _enroll_error_message_class = "cc__error alert-danger" # class def __init__(self, driver): super(Register_courses_page, self).__init__(driver) self.driver = driver def enterCourseToEnroll(self, Name): self.sendKeys(Name, self._search_box_id, "id") self.elementClick(self._search_btn_id, "id") #put a wait command here,becoz it will load def selectCourseToEnroll(self): # not sure about the link text self.elementClick(self._course_xpath, "xpath") #put wait statements self.elementClick(self._enroll_button_id, "id") def enterCardNumber(self, cardNumber): #__privateStripeFrame6 self.webScroll("down", self._postal_field_name, "name") #self.webScroll("down") #switch to frame using id time.sleep(1) self.driver.switch_to_frame(self.getElement("__privateStripeFrame3", "name")) time.sleep(2) self.sendKeys(cardNumber, self._credit_card_num_name, "name") self.driver.switch_to_default_content() def enterCardExp(self,exp): #time.sleep(1) self.driver.switch_to_frame(self.getElement("__privateStripeFrame4", "name")) time.sleep(2) self.sendKeys(exp, self._cc_exp_name,"name") self.driver.switch_to_default_content() def enterCardCvc(self,cvc): #time.sleep(1) self.driver.switch_to_frame(self.getElement("__privateStripeFrame5", "name")) time.sleep(2) self.sendKeys(cvc, self._cc_cvc_name,"name") self.driver.switch_to_default_content() def enterpostalcode(self,postalcode): self.driver.switch_to_frame(self.getElement("__privateStripeFrame6", "name")) time.sleep(2) self.sendKeys(postalcode, self._postal_field_name,"name") self.driver.switch_to_default_content() def enrollInCourse(self): self.elementClick(self._submit_enroll_id, "id") def enterCreditCardinformation(self, cardNumber, exp, cvc, postal_code): self.enterCardNumber(cardNumber) self.enterCardExp(exp) self.enterCardCvc(cvc) self.enterpostalcode(postal_code) def captureErrorMsg(self): error_msg = "Hint : check for exception" try: error_msg = self.getElement(self._enroll_error_message_class, "class") except: raise return error_msg
akanksha2306/selenium_python_practice
pages/courses/register_courses_page.py
register_courses_page.py
py
3,129
python
en
code
0
github-code
36
[ { "api_name": "base.basepage.BasePage", "line_number": 9, "usage_type": "name" }, { "api_name": "utilities.custom_logger.customLogger", "line_number": 11, "usage_type": "call" }, { "api_name": "utilities.custom_logger", "line_number": 11, "usage_type": "name" }, { ...
70168571944
import matplotlib.pyplot as plt import numpy as np x = np.linspace(0,10,100) y = [] up_limit = 0.8 for i in x: if i < 6: y.append(0) elif i < 9: y.append((i-6)/3 * up_limit) else: y.append(up_limit) plt.plot(x,y) plt.show()
CryptoGamer8/INFO6205-FINAL
Model/main/draw.py
draw.py
py
264
python
en
code
2
github-code
36
[ { "api_name": "numpy.linspace", "line_number": 5, "usage_type": "call" }, { "api_name": "matplotlib.pyplot.plot", "line_number": 16, "usage_type": "call" }, { "api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name" }, { "api_name": "matplotlib.pyp...
8640307873
"""Extract hourly real-time EIA data from the bulk-download zip file.""" import pandas as pd import json from os.path import join import os import zipfile import requests import logging from electricitylci.globals import data_dir def download_EBA(): """Add docstring.""" url = 'http://api.eia.gov/bulk/EBA.zip' print(f"Downloading eia bulk data from {url}...", end="") r = requests.get(url) os.makedirs(join(data_dir, 'bulk_data'), exist_ok=True) output = open(join(data_dir, 'bulk_data', 'EBA.zip'), 'wb') output.write(r.content) output.close() print(f"complete.") path = join(data_dir, 'bulk_data', 'EBA.zip') if __name__=="__main__": try: z = zipfile.ZipFile(path, 'r') with z.open('EBA.txt') as f: raw_txt = f.readlines() except FileNotFoundError: download_EBA() z = zipfile.ZipFile(path, 'r') with z.open('EBA.txt') as f: raw_txt = f.readlines() # REGION_NAMES = [ # 'California', 'Carolinas', 'Central', # 'Electric Reliability Council of Texas, Inc.', 'Florida', # 'Mid-Atlantic', 'Midwest', 'New England ISO', # 'New York Independent System Operator', 'Northwest', 'Southeast', # 'Southwest', 'Tennessee Valley Authority' # ] # # REGION_ACRONYMS = [ # 'TVA', 'MIDA', 'CAL', 'CAR', 'CENT', 'ERCO', 'FLA', # 'MIDW', 'ISNE', 'NYIS', 'NW', 'SE', 'SW', # ] # # TOTAL_INTERCHANGE_ROWS = [ # json.loads(row) for row in raw_txt if b'EBA.TI.H' in row # ] # # NET_GEN_ROWS = [ # json.loads(row) for row in raw_txt if b'EBA.NG.H' in row # ] # # DEMAND_ROWS = [ # json.loads(row) for row in raw_txt if b'EBA.D.H' in row # ] # # EXCHANGE_ROWS = [ # json.loads(row) for row in raw_txt if b'EBA.ID.H' in row # ] # # BA_TO_BA_ROWS = [ # row for row in EXCHANGE_ROWS # if row['series_id'].split('-')[0][4:] not in REGION_ACRONYMS # ] def row_to_df(rows, data_type): """ Turn rows of a single type from the bulk data text file into a dataframe with the region, datetime, and data as columns Parameters ---------- rows : list rows from the EBA.txt file data_type : str name to use for the data column (e.g. demand or total_interchange) Returns ------- dataframe Data for all regions in a single df with datatimes converted and UTC """ tuple_list = [] for row in rows: # "data" is of form: # [['20190214T04Z', -102], # ['20190214T03Z', -107], # ['20190214T02Z', -108], # ['20190214T01Z', -103]] try: datetime = pd.to_datetime([x[0] for x in row['data']], utc=True, format='%Y%m%dT%HZ') except ValueError: try: datetime = pd.to_datetime([x[0]+":00" for x in row['data']], format='%Y%m%dT%H%z') except ValueError: continue data = [x[1] for x in row['data']] region = row['series_id'].split('-')[0][4:] # df_data = { # 'region': region, # 'datetime': datetime, # data_type: data, # } # region_list=[region for x in datetime] # _df = pd.DataFrame(df_data) # tuple_list.append(_df) tuple_data=[x for x in zip([region]*len(datetime), list(datetime), data)] tuple_list.extend(tuple_data) df=pd.DataFrame(tuple_list, columns=["region", "datetime", data_type]) # df = pd.concat(df_list).reset_index(drop=True) return df def ba_exchange_to_df(rows, data_type='ba_to_ba'): """ Turn rows of a single type from the bulk data text file into a dataframe with the region, datetime, and data as columns Parameters ---------- rows : list rows from the EBA.txt file data_type : str name to use for the data column (e.g. demand or total_interchange) Returns ------- dataframe Data for all regions in a single df with datatimes converted and UTC """ tuple_list = [] for row in rows: # "data" is of form: # [['20190214T04Z', -102], # ['20190214T03Z', -107], # ['20190214T02Z', -108], # ['20190214T01Z', -103]] try: datetime = pd.to_datetime([x[0] for x in row['data']], utc=True, format='%Y%m%dT%HZ') except ValueError: try: datetime = pd.to_datetime([x[0]+"00" for x in row['data']], format='%Y%m%dT%H%z') except ValueError: continue data = [x[1] for x in row['data']] from_region = row['series_id'].split('-')[0][4:] to_region = row['series_id'].split('-')[1][:-5] # df_data = { # 'from_region': from_region, # 'to_region': to_region, # 'datetime': datetime, # data_type: data, # } tuple_data = [x for x in zip([from_region]*len(datetime), [to_region]*len(datetime), datetime, data)] tuple_list.extend(tuple_data) # _df = pd.DataFrame(df_data) # df_list.append(_df) # df = pd.concat(df_list).reset_index(drop=True) df=pd.DataFrame(tuple_list, columns=["from_region", "to_region", "datetime", data_type]) return df
USEPA/ElectricityLCI
electricitylci/bulk_eia_data.py
bulk_eia_data.py
py
5,340
python
en
code
23
github-code
36
[ { "api_name": "requests.get", "line_number": 17, "usage_type": "call" }, { "api_name": "os.makedirs", "line_number": 18, "usage_type": "call" }, { "api_name": "os.path.join", "line_number": 18, "usage_type": "call" }, { "api_name": "electricitylci.globals.data_dir...
7630680769
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('inicio', '0006_auto_20160820_2050'), ] operations = [ migrations.CreateModel( name='cargo', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('nombre_cargo', models.CharField(unique=True, max_length=100)), ('fecha', models.DateTimeField(auto_now=True)), ('estado', models.IntegerField(default=0)), ], options={ }, bases=(models.Model,), ), migrations.RemoveField( model_name='adminstrador', name='carrera', ), migrations.AddField( model_name='adminstrador', name='cargo', field=models.ForeignKey(default='1', to='inicio.cargo'), preserve_default=False, ), ]
juanjavierlimachi/sistema-de-Informacion
mipagina/mipagina/apps/inicio/migrations/0007_auto_20160820_2141.py
0007_auto_20160820_2141.py
py
1,065
python
en
code
0
github-code
36
[ { "api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute" }, { "api_name": "django.db.migrations", "line_number": 7, "usage_type": "name" }, { "api_name": "django.db.migrations.CreateModel", "line_number": 14, "usage_type": "call" }, ...
13790256283
#!/usr/bin/env python import rbd import rados import json import subprocess from itertools import chain from texttable import Texttable, get_color_string, bcolors def f(x): if x=="quota_max_bytes": return str(pool[x]/1024/1024) else: return str(pool[x]) p = subprocess.check_output('ceph osd dump -f json-pretty', shell=True) pools = json.loads(p)['pools'] pools_table = Texttable() header = [ "Id", "Pool", "Size", "Min_size", "Pg_num", "Pgp_num", "Crush","Quota (MB)", "Quota (obj)" ] keys = [ "pool", "pool_name", "size", "min_size", "pg_num", "pg_placement_num", "crush_ruleset","quota_max_bytes","quota_max_objects" ] pools_table.header(map(lambda x: get_color_string(bcolors.YELLOW, x), header)) for pool in pools: pools_table.add_row(map(f, keys)) table = Texttable() table.set_deco(Texttable.BORDER | Texttable.HEADER | Texttable.VLINES) table.set_cols_align( [ "l", "l", "l", "l", "l", "l", "l" ]) table.set_cols_valign([ "m", "m", "m", "m", "m", "m", "m" ]) table.set_cols_width([ "20", "20", "8","8","20","8","8"]) header = [ "Pool", "Image", "Size(Mb)", "Features", "Lockers", "Str_size", "Str_cnt" ] keys = [ "features", "list_lockers", "stripe_unit", "stripe_count" ] table.header(map(lambda x: get_color_string(bcolors.YELLOW, x), header)) with rados.Rados(conffile='/etc/ceph/ceph.conf') as cluster: pool_list = cluster.list_pools() for pool in pool_list: table.add_row([ get_color_string(bcolors.GREEN, pool) , "", "", "", "", "", "" ]) with cluster.open_ioctx(pool) as ioctx: rbd_inst = rbd.RBD() image_list = rbd_inst.list(ioctx) for image_name in image_list: with rbd.Image(ioctx, image_name) as image: image_size = str(image.size()/1024**2) table.add_row(["", image_name, image_size] + map(lambda x: str(getattr(image,x)()), keys)) if pool != pool_list[-1]: table.add_row([ "-"*20, "-"*20,"-"*8,"-"*8,"-"*20,"-"*8,"-"*8 ]) print(pools_table.draw()) print print(table.draw())
angapov/ceph-scripts
ceph.py
ceph.py
py
2,086
python
en
code
0
github-code
36
[ { "api_name": "subprocess.check_output", "line_number": 15, "usage_type": "call" }, { "api_name": "json.loads", "line_number": 16, "usage_type": "call" }, { "api_name": "texttable.Texttable", "line_number": 17, "usage_type": "call" }, { "api_name": "texttable.get_...
15616400542
from typing import Dict from tools.coco_dataset_metrics import COCODatasetMetrics from tools.tools import load_json_data from detectron2_metrics import TrainingMetrics, InferenceMetrics def load_annotations(annotation_paths: Dict): """ Load annotations from path and assign them to corresponding key. :param annotation_paths: Dict :return: Dict - {key: annotations} """ annotations = {} for key, path in annotation_paths.items(): annotations[key] = load_json_data(path) return annotations if __name__ == '__main__': # Setup path variables annotation_org_dataset_path = { 'Original dataset': '../data/testing_annotations/labels_household_object_detection_newest.json' } annotation_split_dataset_path = { 'Split data - train': '../data/labels/train_coco_annotations.json', 'Split data - validation': '../data/labels/val_coco_annotations.json', 'Split data - test': '../data/labels/test_coco_annotations.json' } annotation_generated_split_dataset_path = { 'Generated data - train': '../generated_data/labels/train_coco_annotations.json', 'Generated data - validation': '../generated_data/labels/val_coco_annotations.json', 'Generated data - test': '../generated_data/labels/test_coco_annotations.json' } all_annotations = { 'original_dataset': annotation_org_dataset_path, 'original_split_dataset': annotation_split_dataset_path, 'generated_split_dataset': annotation_generated_split_dataset_path } # Iterate over the annotations and plot corresponding dataset distributions for key, annotation_paths in all_annotations.items(): annotations = load_annotations(annotation_paths) dataset_metrics = COCODatasetMetrics(annotations) dataset_metrics.plot_metrics(key) # Plot training and inference metrics for all training runs training_metrics = TrainingMetrics() inference_metrics = InferenceMetrics() training_metrics.plot_training_metrics() inference_metrics.plot_inference_metrics()
Mathiasn21/household_object_detection
code/plot_metrics.py
plot_metrics.py
py
2,100
python
en
code
0
github-code
36
[ { "api_name": "typing.Dict", "line_number": 8, "usage_type": "name" }, { "api_name": "tools.tools.load_json_data", "line_number": 17, "usage_type": "call" }, { "api_name": "tools.coco_dataset_metrics.COCODatasetMetrics", "line_number": 49, "usage_type": "call" }, { ...
74950126185
#!/usr/bin/env python # coding: utf-8 # In[45]: # Choquet adaptive thresholding: two step algorithm import progressbar from time import sleep import math import cv2 import numpy as np import matplotlib.pyplot as plt import matplotlib.image as mpimg import PIL from skimage import measure from pynverse import inversefunc import time import scipy.misc get_ipython().run_line_magic('matplotlib', 'inline') import sys import warnings import numpy as np #Otsu trhesholding from skimage import data from skimage import filters from skimage import exposure #format the output in a readable format float_formatter = lambda x: "%.2f" % x np.set_printoptions(precision=0,formatter={'float_kind':float_formatter}) if not sys.warnoptions: warnings.simplefilter("ignore") # In[46]: #function section ### import img def namestr(obj, namespace): return [name for name in namespace if namespace[name] is obj] def plot_it(img): plt.figure(figsize = [8,8]) arr = np.asarray(img) plt.imshow(arr, cmap='gray', vmin=0, vmax=arr.max()) plt.title(namestr(img, globals())) plt.show() def import_img(img_path): img = cv2.imread(img_path, 0) img_reverted= cv2.bitwise_not(img) norm_img = img_reverted / 255.0 #plot_it(norm_img) print(norm_img) print(norm_img.shape) print(norm_img.size) return(norm_img) ### cumulative G function (sum-table algorithm) def compute_summed_area_table(image): # image is a 2-dimensional array containing ints or floats, with at least 1 element. height = len(image) width = len(image[0]) new_image = [[0.0] * width for _ in range(height)] # Create an empty summed area table for row in range(0, height): for col in range(0, width): if (row > 0) and (col > 0): new_image[row][col] = image[row][col] + new_image[row][col - 1] + new_image[row - 1][col] - new_image[row - 1][col - 1] elif row > 0: new_image[row][col] = image[row][col] + new_image[row - 1][col] elif col > 0: new_image[row][col] = image[row][col] + new_image[row][col - 1] else: new_image[row][col] = image[row][col] return new_image def get_int_img_m1(input_img): h, w = input_img.shape #integral img int_img = np.zeros_like(input_img, dtype=np.uint32) for col in range(w): for row in range(h): int_img[row,col] = input_img[0:row+1,0:col+1].sum() return int_img def cdf_image(input_img): nh, binn = np.histogram(input_img) cdf = np.cumsum(nh) return([cdf, nh, binn]) # In[ ]: # In[47]: # Adaptive choquet # OPT= 0 Hamacher # OPT= 1 Discrete Choquet # Opt= 2 Discrete Choquet with F1,F2 on the distributive property def compute_choquet(choquet_order, fuzzy_mu, opt=0): C=0 if opt==0: # Choquet Hamacher for i in range(len(choquet_order)-1): j = i +1 C = C + (choquet_order[j] * fuzzy_mu[i])/(choquet_order[j] + fuzzy_mu[i] - (choquet_order[j] * fuzzy_mu[i])) if opt==1: #Choquet for i in range(len(choquet_order)-1): j = i +1 C = C + ((choquet_order[j] - choquet_order[j-1] )*fuzzy_mu[i]) if opt ==2: #Choquet F1 F2 for i in range(len(choquet_order)-1): j = i +1 C = C + (np.sqrt(choquet_order[j]*fuzzy_mu[i]) - max( (choquet_order[j]+fuzzy_mu[i] -1) , 0)) return(C) def compute_sugeno(sugeno_order, fuzzy_mu): S = np.empty((1), float) for i in range(len(sugeno_order)): S = np.append(S, min(sugeno_order[i], fuzzy_mu[i])) #print(S) #print('sugeno: ' + str(choquet_order[j]) + " " + str(fuzzy_mu[i]) + " " + str(max(S))) return(max(S)) ## Integral Choquet and Sugeno image. def adaptive_choquet_itegral(input_img, int_img, opt,log=False): h, w = input_img.shape th_mat = np.zeros(input_img.shape) choquet_mat = np.zeros(input_img.shape) sugeno_mat = np.zeros(input_img.shape) count_matrix = np.zeros(input_img.shape) for col in range(w): #i for row in range(h): #j #SxS region y0 = int(max(row-1, 0)) y1 = int(min(row, h-1)) x0 = int(max(col-1, 0)) x1 = int(min(col, w-1)) count = (y1-y0)*(x1-x0) count_matrix[row, col] = count choquet_order = -1 sum_ = -1 fuzzy_mu = -1 if count == 0: if x0 == x1 and y0 == y1: sum_ = int_img[y0, x0] C_ = sum_ S_ = sum_ if x1 == x0 and y0 != y1: sum_ = (int_img[y1, x1] + int_img[y0, x1])/2 choquet_order = np.asarray([0,int_img[y0, x1], int_img[y1, x1]]) sugeno_order = np.asarray([int_img[y0, x1], int_img[y1, x1]]) fuzzy_mu = np.asarray([1, 0.5]) C_ = compute_choquet(choquet_order, fuzzy_mu,opt) S_ = compute_sugeno(sugeno_order, fuzzy_mu) if y1 == y0 and x1 != x0: sum_ = (int_img[y1, x1] + int_img[y1, x0])/2 choquet_order = np.asarray([0,int_img[y1, x0], int_img[y1, x1]]) sugeno_order = np.asarray([int_img[y1, x0], int_img[y1, x1]]) fuzzy_mu = np.asarray([1, 0.5]) C_ = compute_choquet(choquet_order, fuzzy_mu,opt) S_ = compute_sugeno(sugeno_order, fuzzy_mu) else: sum_ = int_img[y1, x1] - int_img[y0, x1] - int_img[y1, x0] + int_img[y0, x0] if(int_img[y0, x1] > int_img[y1, x0] ): choquet_order = np.asarray([0,int_img[y0, x0], int_img[y1, x0], int_img[y0, x1], int_img[y1, x1]]) sugeno_order = np.asarray([int_img[y0, x0], int_img[y1, x0], int_img[y0, x1], int_img[y1, x1]]) else: choquet_order = np.asarray([0,int_img[y0, x0], int_img[y0, x1], int_img[y1, x0], int_img[y1, x1]]) sugeno_order = np.asarray([int_img[y0, x0], int_img[y0, x1], int_img[y1, x0], int_img[y1, x1]]) fuzzy_mu = np.asarray([1, 0.75, 0.50, 0.25]) C_ = compute_choquet(choquet_order, fuzzy_mu,opt) S_ = compute_sugeno(sugeno_order, fuzzy_mu) th_mat[row,col] = sum_ choquet_mat[row,col] = C_ sugeno_mat[row,col] = S_ if(log): coords_window = np.zeros_like(input_img) #coords_window[x0:x1,y0:y1] = 1.0 coords_window[y0, x0] = 0.2 coords_window[y1, x0] = 0.4 coords_window[y0, x1] = 0.6 coords_window[y1, x1] = 0.8 plot_it(coords_window) print("Search_region") print("x0:" + str(x0) + " x1:"+ str(x1) + " y0:" + str(y0) + " y1:" + str(y1) ) print("Row:" + str(row) + " Col:" + str(col)) print("Count: " + str(count)) print("choquet fixed ordered and fuzzy mu") print(choquet_order) print(fuzzy_mu) print("choquet calculus") print(C_) print("sugeno calculus") print(S_) print("Input mat") print(input_img) print("Int img") print(int_img) print("I integral mat: ") print(th_mat) print("C_ choquet") print(choquet_mat) print("S_ sugeno") print(sugeno_mat) print("Count matrix") print(count_matrix) print("-------") return choquet_mat, sugeno_mat, count_matrix # In[ ]: # In[48]: ## Classic Bradley Apprroach def adaptive_thresh(input_img, int_img, a1=8, a2=2, T=0.15): out_img = np.zeros_like(input_img) h, w = input_img.shape S = w/a1 s2 = S/a2 th_mat = np.zeros(input_img.shape) for col in range(w): for row in range(h): #SxS region y0 = int(max(row-s2, 0)) y1 = int(min(row+s2, h-1)) x0 = int(max(col-s2, 0)) x1 = int(min(col+s2, w-1)) count = (y1-y0)*(x1-x0) sum_ = int_img[y1, x1] - int_img[y0, x1] - int_img[y1, x0] + int_img[y0, x0] th_mat[row,col] = sum_/count if input_img[row, col]*count < sum_*(1.-T)/1.: out_img[row,col] = 0 else: out_img[row,col] = 1 return np.asarray(out_img), th_mat #Novel choquet adaptive approach def adaptive_thresh2(input_img, int_img, a1=4, a2=1, T=0, log=False): if T==0: T = filters.threshold_otsu(input_img) T = T out_img_choquet = np.zeros_like(input_img) out_img_sugeno = np.zeros_like(input_img) choquet_mat = np.zeros_like(input_img) sugeno_mat = np.zeros_like(input_img) h, w = input_img.shape S = w/a1 s2 = S/a2 for col in range(w): for row in range(h): y0 = int(max(row-s2, 0)) y1 = int(min(row+s2, h-1)) x0 = int(max(col-s2, 0)) x1 = int(min(col+s2, w-1)) count = (y1-y0)*(x1-x0) sum_ = -1 fuzzy_mu = -1 if count == 0: if x0 == x1 and y0 == y1: sum_ = int_img[y0, x0] S_ = sum_ if x1 == x0 and y0 != y1: sum_ = int_img[y1, x1] - int_img[y0, x1] sugeno_order = np.asarray([int_img[y0, x1], int_img[y1, x1]]) fuzzy_mu = np.asarray([1, 0.5]) S_ = compute_sugeno(sugeno_order, fuzzy_mu) if y1 == y0 and x1 != x0: sum_ = int_img[y1, x1] - int_img[y1, x0] sugeno_order = np.asarray([int_img[y1, x0], int_img[y1, x1]]) fuzzy_mu = np.asarray([1, 0.5]) S_ = compute_sugeno(sugeno_order, fuzzy_mu) else: sum_ = int_img[y1, x1] - int_img[y0, x1] - int_img[y1, x0] + int_img[y0, x0] if(int_img[y0, x1] > int_img[y1, x0] ): sugeno_order = np.asarray([int_img[y0, x0], int_img[y1, x0], int_img[y0, x1], int_img[y1, x1]]) else: sugeno_order = np.asarray([int_img[y0, x0], int_img[y0, x1], int_img[y1, x0], int_img[y1, x1]]) fuzzy_mu = np.asarray([1, 0.75, 0.50, 0.25]) S_ = compute_sugeno(sugeno_order, fuzzy_mu) choquet_mat[row,col] = sum_/count if input_img[row, col]*count < sum_ * (1.-T)/1.: out_img_choquet[row,col] = 0 else: out_img_choquet[row,col] = 1 sugeno_mat[row,col] = S_/count #note is not only T if input_img[row, col]*count < S_ * (1.- T)/1.: out_img_sugeno[row,col] = 0 else: out_img_sugeno[row,col] = 1 return out_img_choquet, out_img_sugeno, choquet_mat, sugeno_mat, T # In[ ]: #Qualitative comparisons # Compute the mean squared error and structural similarity # index for the images def compare_images(img1, img2): m = mse(img1, img2) s = measure.compare_ssim(img1, img2, data_range=img2.max() - img2.min(), multichannel=False) ret = np.array([m,s]) #the higher the ssim, the more "similar" return(ret) def mse(img1, img2): err = np.sum((img1.astype("float") - img2.astype("float")) ** 2) err /= float(img1.shape[0] * img2.shape[1]) #the lower the error, the more "similar" return(err) #simple listing class in order to collect the results class results_collector(object): def __init__(self, name, original_img, choquet_mat, sugeno_mat, count_matrix, out_img_adapt_choquet,out_img_sugeno,out_img_bradley, c_m, s_m, T, elapsed_time, mse_choquet, mse_sugeno, mse_bradley, ssim_choquet, ssim_sugeno, ssim_bradley, th, a1, a2): self.name = name, self.img = original_img, self.choquet_mat = choquet_mat, self.sugeno_mat = sugeno_mat, self.count_matrix = count_matrix, self.out_img_adapt_choquet = out_img_adapt_choquet, self.out_img_sugeno = out_img_sugeno, self.out_img_bradley = out_img_bradley self.c_m = c_m, self.s_m = s_m, self.T = T, self.elapsed_time = elapsed_time, self.mse_choquet = mse_choquet, self.mse_sugeno = mse_sugeno, self.mse_bradley = mse_bradley, self.ssim_choquet = ssim_choquet, self.ssim_sugeno = ssim_sugeno, self.ssim_bradley = ssim_bradley, self.th = th, self.a1 = a1, self.a2 = a2 #Embedded method for comparisons between groundtruth and Choquet thresholded images def compute_multi_thresh(test_images, gt_images, opt = 0, T=0, a1=2, a2=2): count=0 resc = [] for i in test_images: test_image = i #plot_it(test_image) S1 = np.asarray(compute_summed_area_table(test_image)) #S1 = get_int_img_m1(test_image) choquet_mat, sugeno_mat, count_matrix = adaptive_choquet_itegral(np.asarray(test_image), S1, opt, log=False ) #Choquet Adaptive Thresh out_img_adapt_choquet, out_img_sugeno, c_m, s_m, T = adaptive_thresh2(np.asarray(test_image), np.asarray(choquet_mat), a1 = a1, a2 = a2, T= T, log=False ) #with compute_summed_area table doesn't work. #Bradley Adaptive Thresh S1 = get_int_img_m1(test_image) out_img_bradley, bradley_int_mat = adaptive_thresh(np.asarray(test_image), S1 , a1=a1, a2=a2, T=T) #compare it mse_choquet, ssim_choquet = compare_images(gt_images[count], out_img_adapt_choquet) mse_sugeno, ssim_sugeno = compare_images(gt_images[count], out_img_sugeno) mse_bradley, ssim_bradley = compare_images(gt_images[count], out_img_bradley) # resc.append(results_collector("Comparisons", i, choquet_mat, sugeno_mat,count_matrix, out_img_adapt_choquet, out_img_sugeno, out_img_bradley, c_m, s_m, T, elapsed_time, mse_choquet, mse_sugeno, mse_bradley, ssim_choquet, ssim_sugeno, ssim_bradley, T, a1, a2)) count += 1 return(resc) def add_random_noise(small_image, perc=1): np.random.seed(1) mu, sigma = 0, 1 # mean and standard deviation s = np.random.normal(mu, sigma, small_image.shape) img_n = np.abs(s/s.max()) * perc img_ret = small_image + img_n return(img_ret) # In[82]: ### Testing Grad/Glaze images vs Groundtruth / GT noise vs GT / Test+noise vs GT def test_exp(test_images, gt_images, a1=7, a2=7, opt=0, scale = 0.01, noise_gt = -1, noise_test=-1): resc_a = [] elapsed_time=0 ### Add noise on the GroundTruth if noise_gt > 0: noise_img = [] for i in range(len(gt_images)): noise_img.append(add_random_noise(gt_images[i], noise_gt)) test_images = noise_img #Add noise on the test images if noise_test > 0: noise_img = [] for i in range(len(test_images)): noise_img.append(add_random_noise(test_images[i], noise_test)) test_images = noise_img # Test test_images or noised ones with respect the GT. for i in range(a1): for j in range(a2): x = scale if(i >= j ): print("Testing image conf ( i: " + str(i) + " j: " + str(j) + ")") t1 = time.process_time() while(x <= 1.01): resc = compute_multi_thresh(test_images,gt_images, opt = opt, T=x, a1=i+1, a2=j+1) x = x + scale resc_a.append(resc) elapsed_time = time.process_time() - t1 print('Out: {} images processed in {} seconds'.format(str(len(resc)), round(elapsed_time ,3))) return(resc_a) ## Simple testing prints ## It should return the list of the stuff def search_results(resc_b, ssim_th = 0.5, attention_image = -1): count=0 for i in range(len(resc_b)): for j in range(len(resc_b[-1])): if(resc_b[i][j].ssim_choquet[0] > resc_b[i][j].ssim_bradley[0] and resc_b[i][j].ssim_choquet[0] > ssim_th and resc_b[i][j].a1[0] != resc_b[i][j].a2): count= count+1 print('{}-th image -------------------\n mse: C {} S {} B {}, \nssid: C {} S {} B {} \na1: {}, a2: {}, th: {}'.format( str(j), round(resc_b[i][j].mse_choquet[0],3), round(resc_b[i][j].mse_sugeno[0],3), round(resc_b[i][j].mse_bradley[0],3), round(resc_b[i][j].ssim_choquet[0],3), round(resc_b[i][j].ssim_sugeno[0],3), round(resc_b[i][j].ssim_bradley[0],3), str(resc_b[i][j].a1[0]), str(resc_b[i][j].a2), round(resc_b[i][j].th[0], 4) )) if(attention_image >= 0): if(j==attention_image): print("**********************************************************************************") print("Percentage of coverage around all the possible configurations" + str(count/(len(resc_b)*len(resc_b[-1])))) # In[83]: ################################################################################ #### Test on a single image: ################################################################################ small_image = 1.0 - import_img('./original/00.bmp') plot_it(small_image) S1 = np.asarray(compute_summed_area_table(small_image)) cdf_img = cdf_image(small_image) int_img = get_int_img_m1(small_image) # common #int_img2 = get_int_img_m2(small_image, cum_distr) #choquet int img print("Image") print(np.asarray(small_image)) print("summed area table") print(np.asarray(summ_at)) print("integral image") print(int_img) plt.plot(np.asarray(cdf_img[0]), np.asarray( cdf_img[2][0:len(cdf_img[2])-1]), 'r--') print("cumulative distribution of the image") print(np.asarray(cdf_img[0])) print("histogram") print(np.asarray(cdf_img[1])) print("range values") print(np.asarray(cdf_img[2])) choquet_mat, sugeno_mat, count_matrix = adaptive_choquet_itegral(np.asarray(small_image), S1, 1, log=False ) print("C mat") plot_it(choquet_mat) print("S mat") plot_it(sugeno_mat) print("-----------------------------------------------------------------------------------") #Otsu T parameter print("Image thresholded with the choquet integral image and an automatic Otsu threshold") out_img_adapt_choquet, out_img_sugeno, c_m, s_m, T = adaptive_thresh2(np.asarray(small_image), np.asarray(choquet_mat), a1 = 16, a2 = 2, #Leave T = 0 for the Otsu log=False ) #con compute_summed_area table doesn't work. print("Threshold " + str(T)) plot_it(out_img_adapt_choquet) plot_it(out_img_sugeno) plot_it(c_m) plot_it(s_m) print("-----------------------------------------------------------------------------------") #Manual Parameter print("Image thresholded with the choquet integral image and a fixed manual threshold.") out_img_adapt_choquet, out_img_sugeno, c_m, s_m, T = adaptive_thresh2(np.asarray(small_image), np.asarray(choquet_mat), a1 = 16, a2 = 2, T = 0.2, log=False ) #con compute_summed_area table doesn't work. print("Threshold " + str(T)) plot_it(out_img_adapt_choquet) plot_it(out_img_sugeno) plot_it(c_m) plot_it(s_m) # In[84]: ################################################################################ #### Toy dataset # Testing complex gradients, glazes, additive noise, smoothness ################################################################################ #Prepare the list data structures #Groundtruth images gt_images = [] # Smothed, glazed GT images test_images = [] # In[85]: ### # Definition of the toy dataset ### small_image1 = [[0, 0, 0, 0, 0, 0,0,0,0], [0, 1, 0, 1, 0, 0,0,0,0], [1, 1, 1, 1, 1, 0,0,0,0], [0, 1, 0, 1, 0, 0,0,0,0], [0, 0, 1, 1, 1, 0,0,0,0], [0, 0, 0, 1, 0, 0,0,0,0], [0, 0, 1, 1, 1, 0,0,0,0], [0, 0, 0, 1, 0, 0,0,0,0], [0, 0, 1, 1, 1, 0,0,0,0]] small_image1 = np.asarray(small_image1, dtype="float32") gt_images.append(small_image1) plot_it(small_image1) small_image1 = [[0.2, 0.2, 0.1, 0.2, 0.15, 0.14,0.13,0.12,0.11], [0.16, 0.6, 0.2, 0.3, 0.15, 0.14,0.13,0.12,0.11], [0.6, 0.5, 0.6, 0.7, 0.8, 0.14,0.13,0.12,0.11], [0.14, 0.5, 0.2, 0.3, 0.15, 0.14,0.13,0.12,0.11], [0.15, 0.12, 0.3, 0.4, 0.3, 0.14,0.13,0.12,0.11], [0.14, 0.13, 0.2, 0.4, 0.15, 0.14,0.13,0.12,0.11], [0.15, 0.12, 0.3, 0.3, 0.3, 0.14,0.13,0.12,0.11], [0.14, 0.13, 0.2, 0.26, 0.1, 0.14,0.13,0.12,0.11], [0.15, 0.12, 0.25, 0.25, 0.25, 0.14,0.13,0.12,0.11]] small_image1 = np.asarray(small_image1, dtype="float32") test_images.append(small_image1) plot_it(small_image1) small_image2 = [[0, 0, 0, 0, 1,0,0,0,0], [0, 0, 0, 1, 0,1,0,0,0], [0, 0, 1, 0, 0,0,1,0,0], [0, 1, 0, 0, 0,0,0,1,0], [0, 1, 0, 0, 0,0,0,1,0], [0, 0, 1, 0, 0,0,1,0,0], [0, 0, 0, 1, 0,1,0,0,0], [0, 0, 0, 0, 1,0,0,0,0]] small_image2 = np.asarray(small_image2, dtype="float32") gt_images.append(small_image2) plot_it(small_image2) small_image2 = [[0.22, 0.19, 0.19, 0.18, 0.5, 0.11, 0.08, 0.06,0.02], [0.22, 0.19, 0.19, 0.5, 0.15,0.6, 0.08, 0.06,0.02], [0.22, 0.19, 0.40, 0.18, 0.15,0.11, 0.7, 0.06,0.02], [0.22, 0.30, 0.19, 0.18, 0.15,0.11, 0.08, 0.8,0.02], [0.22, 0.30, 0.19, 0.18, 0.15,0.11, 0.08, 0.8,0.02], [0.22, 0.19, 0.40, 0.18, 0.15,0.11, 0.7, 0.06,0.02], [0.22, 0.19, 0.19, 0.5, 0.15,0.6, 0.08, 0.06,0.02], [0.22, 0.19, 0.19, 0.18, 1 ,0.11, 0.08, 0.06,0.02]] small_image2 = np.asarray(small_image2, dtype="float32") plot_it(small_image2) test_images.append(small_image2) small_image3 = [[0,0,0, 0, 0, 0, 0,0], [0,0,0, 0, 0, 0, 0,0], [0,0,0, 1, 0, 0, 0,0], [0,0,1, 1, 1, 0, 0,0], [0,0,0, 1, 0, 1, 0,0], [0,0,0, 1, 1, 1, 1,0], [0,0,0, 1, 0, 1, 0,0], [0,0,0, 1, 0, 0, 0,0]] small_image3 = np.asarray(small_image3, dtype="float32") plot_it(small_image3) gt_images.append(small_image3) small_image3 = [[0.18,0.22, 0.15, 0.22, 0.20, 0.17, 0.15,0.14], [0.18,0.22, 0.15, 0.22, 0.20, 0.15, 0.17,0.1], [0.18,0.22, 0.15, 0.45, 0.20, 0.17, 0.15,0.14], [0.17,0.21, 0.35, 0.45, 0.55, 0.15, 0.17,0.1], [0.17,0.20, 0.15, 0.45, 0.20, 0.65, 0.15,0.14], [0.18,0.21, 0.15, 0.45, 0.55, 0.65, 0.75,0.1], [0.19,0.22, 0.15, 0.45, 0.20, 0.65, 0.15,0.14], [0.18,0.22, 0.15, 0.35, 0.20, 0.15, 0.17,0.1]] small_image3 = np.asarray(small_image3, dtype="float32") plot_it(small_image3) test_images.append(small_image3) small_image4 = [[0, 0, 0, 0, 0,0,0,0], [0, 0, 0, 0, 1,1,1,0], [0, 0, 0, 1, 1,1,1,0], [0, 0, 1, 1, 1,1,1,0], [0, 1, 1, 1, 1,1,1,0], [0, 0, 1, 1, 1,1,1,0], [0, 0, 0, 1, 1,1,1,0], [0, 0, 0, 0, 1,1,1,0] ] small_image4 = np.asarray(small_image4, dtype="float32") small_image6 = np.asarray(np.transpose(small_image4), dtype="float32") plot_it(small_image4) gt_images.append(small_image4) gt_images.append(small_image6) small_image4 = [[0.1, 0.1, 0.3, 0.2, 0.2,0.1,0, 0], [0.1, 0.15, 0.3, 0.2, 0.4,0.6,0.6,0], [0.1, 0.15, 0.3, 0.5, 0.5,0.5,0.6,0], [0.2, 0.15, 0.6, 0.5, 0.55,0.5,0.6,0], [0.2, 0.8, 0.7, 0.5, 0.55,0.5,0.6,0], [0.2, 0.15, 0.6, 0.5, 0.55,0.5,0.6,0], [0.1, 0.1, 0.3, 0.5, 0.5,0.5,0.6,0], [0.1, 0.1, 0.3, 0.2, 0.5,0.5,0.6,0] ] small_image4 = np.asarray(small_image4, dtype="float32") small_image6 = np.asarray(np.transpose(small_image4), dtype="float32") plot_it(small_image4) test_images.append(small_image4) test_images.append(small_image6) small_image5 = [[1., 0., 0., 0., 0., 0., 0., 0.], [0., 1., 0., 0., 0., 0., 0., 0.], [0., 0., 1., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 1., 0, 0., 0.], [0., 0., 0., 1., 1., 1., 0., 0.], [0., 0., 0., 0., 1., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 0., 0.] ] small_image5 = np.asarray(small_image5, dtype="float32") plot_it(small_image5) gt_images.append(small_image5) small_image5 = [[0.4, 0. , 0., 0., 0., 0., 0., 0.], [0. , 0.5, 0., 0., 0., 0., 0., 0.], [0. , 0. , 0.6, 0.1, 0.1, 0.1, 0.1, 0.], [0. , 0. , 0.1, 0.1, 1.0, 0.1, 0.1, 0.], [0. , 0. , 0.1, 0.7, 0.8, 0.9, 0.1, 0.], [0. , 0. , 0.1, 0.1, 0.9, 0.1, 0.1, 0.], [0. , 0. , 0.1, 0.2, 0.1, 0.1, 0.1, 0.], [0. , 0. , 0., 0., 0., 0., 0., 0.] ] small_image5 = np.asarray(small_image5, dtype="float32") plot_it(small_image5) test_images.append(small_image5) small_image7 = [[0,0, 0, 0, 0, 0,0,0], [0,0, 0, 0, 0, 0,0,0], [0,0, 0, 1, 0, 0,0,0], [0,0, 1, 0 , 1, 0,0,0], [0,0, 0, 1, 0, 0,0,0], [0,0, 1, 0, 1, 0,0,0], [0,0, 0, 1, 0, 0,0,0], [0,0, 0, 0, 0, 0,0,0], ] small_image7 = np.asarray(small_image7, dtype="float32") plot_it(small_image7) gt_images.append(small_image7) small_image7 = [[0,0.1, 0.2, 0.2, 0.2, 0.1,0,0], [0,0.1, 0.2, 0.2, 0.2, 0.1,0,0], [0,0.1, 0.2, 0.7, 0.2, 0.1,0,0], [0,0.3, 0.6, 0.2 , 0.6, 0.1,0,0], [0,0.3, 0.2, 0.8, 0.2, 0.25,0,0], [0,0.1, 0.7, 0.2, 0.6, 0.1,0,0], [0,0.1, 0.1, 0.6, 0.2, 0.2,0,0], [0,0.1, 0.1, 0.1, 0.2, 0.2,0,0] ] small_image7 = np.asarray(small_image7, dtype="float32") plot_it(small_image7) test_images.append(small_image7) small_image8 = [ [0, 0, 0, 0, 0,0,1,0], [0, 0, 0, 0, 0,1,0,0], [0, 0, 0, 0, 1,0,0,0], [ 0, 0, 0, 1, 0,0,0,0], [ 0, 0, 1, 0, 0,0,0,0], [ 0, 1, 0, 0, 0,0,0,0], [ 1, 0, 0, 0, 0,0,0,0], [0, 0, 0, 0, 0, 0, 0, 0]] small_image8 = np.asarray(small_image8, dtype="float32") plot_it(small_image8) gt_images.append(small_image8) small_image8 = [[0, 0, 0, 0, 0, 0.4, 1, 0.5], [0, 0, 0, 0, 0.3, 0.95, 0.4, 0.5], [0, 0, 0, 0.3, 0.9, 0.4, 0, 0], [0, 0, 0.3, 0.8, 0.3, 0, 0, 0], [0, 0.2, 0.8, 0.3, 0, 0, 0, 0], [0.2, 0.8, 0.3, 0, 0, 0, 0, 0], [0.8, 0.2, 0, 0, 0, 0, 0, 0], [0.2, 0, 0, 0, 0, 0, 0, 0]] small_image8 = np.asarray(small_image8, dtype="float32") plot_it(small_image8) test_images.append(small_image8) # In[86]: ### OPT 0,1,2 on Testing imgs vs GT test_0a = test_exp(test_images, gt_images, a1=7, a2=7, opt=0, scale = 0.2) test_0b = test_exp(test_images, gt_images, a1=7, a2=7, opt=1, scale = 0.2) test_0c = test_exp(test_images, gt_images, a1=7, a2=7, opt=2, scale = 0.2) search_results(test_0a, ssim_th = 0.3, attention_image = 2) search_results(test_0b, ssim_th = 0.3, attention_image = 2) search_results(test_0c, ssim_th = 0.3, attention_image = 2) ### OPT 0,1,2 on GT noised vs GT +20% test_1a = test_exp(test_images, gt_images, a1=7, a2=7, opt=0, scale = 0.2, noise_gt = 0.2) test_1b = test_exp(test_images, gt_images, a1=7, a2=7, opt=1, scale = 0.2, noise_gt = 0.2) test_1c = test_exp(test_images, gt_images, a1=7, a2=7, opt=2, scale = 0.2, noise_gt = 0.2) search_results(test_1a, ssim_th = 0.3, attention_image = 2) search_results(test_1b, ssim_th = 0.3, attention_image = 2) search_results(test_1c, ssim_th = 0.3, attention_image = 2) ### OPT 0,1,2 on Testing imgs noised vs GT + 20% test_2a = test_exp(test_images, gt_images, a1=7, a2=7, opt=0, scale = 0.2, noise_test = 0.2) test_2b = test_exp(test_images, gt_images, a1=7, a2=7, opt=1, scale = 0.2, noise_test = 0.2) test_2c = test_exp(test_images, gt_images, a1=7, a2=7, opt=2, scale = 0.2, noise_test = 0.2) search_results(test_2a, ssim_th = 0.3, attention_image = 2) search_results(test_2b, ssim_th = 0.3, attention_image = 2) search_results(test_2c, ssim_th = 0.3, attention_image = 2) # In[23]: ##################### ### Berkeley Dataset imgs = ['./original/00.bmp', './original/01.bmp','./original/02.bmp', './original/03.bmp', './original/04.bmp','./original/05.bmp', './original/06.bmp', './original/07.bmp','./original/08.bmp', './original/09.bmp' ] imgs_gt = [ './gtruth/00.bmp', './gtruth/01.bmp', './gtruth/02.bmp', './gtruth/03.bmp', './gtruth/04.bmp', './gtruth/05.bmp', './gtruth/06.bmp', './gtruth/07.bmp', './gtruth/08.bmp', './gtruth/09.bmp' ] test_images2 = [] test_images2.append(1.0 - import_img(imgs[0])) test_images2.append(1.0 - import_img(imgs[1])) test_images2.append(1.0 - import_img(imgs[2])) test_images2.append(1.0 - import_img(imgs[3])) test_images2.append(1.0 - import_img(imgs[4])) test_images2.append(1.0 - import_img(imgs[5])) test_images2.append(1.0 - import_img(imgs[6])) test_images2.append(1.0 - import_img(imgs[7])) test_images2.append(1.0 - import_img(imgs[8])) test_images2.append(1.0 - import_img(imgs[9])) test_images_gt_2 = [] test_images_gt_2.append(1.0 - import_img(imgs_gt[0])) test_images_gt_2.append(1.0 - import_img(imgs_gt[1])) test_images_gt_2.append(1.0 - import_img(imgs_gt[2])) test_images_gt_2.append(1.0 - import_img(imgs_gt[3])) test_images_gt_2.append(1.0 - import_img(imgs_gt[4])) test_images_gt_2.append(1.0 - import_img(imgs_gt[5])) test_images_gt_2.append(1.0 - import_img(imgs_gt[6])) test_images_gt_2.append(1.0 - import_img(imgs_gt[7])) test_images_gt_2.append(1.0 - import_img(imgs_gt[8])) test_images_gt_2.append(1.0 - import_img(imgs_gt[9])) # In[ ]: ###Berkeley ### Testing Grad/Glaze images vs Groundtruth - t-norm choquet ### From a range from 0 to 1 it requires 2963.407 seconds ### better fixing a 16/2 and not trying all the possible combs. brk_resc = [] t1 = time.process_time() x=0 while(x <= 1.00): resc = compute_multi_thresh(test_images2, test_images_gt_2, opt = 0, T=x, a1=16, a2=2) x = x + 0.01 brk_resc.append(resc) elapsed_time = time.process_time() - t1 print(x) print('Images processed in {} seconds'.format(round(elapsed_time ,3))) # In[110]: print(len(brk_resc)) # In[114]: for i in range(len(brk_resc)): for j in range(len(brk_resc[-1])): if(brk_resc[i][j].ssim_choquet[0] > brk_resc[i][j].ssim_bradley[0] and brk_resc[i][j].ssim_choquet[0] >0.3 and brk_resc[i][j].a1[0] != brk_resc[i][j].a2): count= count+1 print('{}-th image -------------------\n mse: C {} S {} B {}, \nssid: C {} S {} B {} \na1: {}, a2: {}, th: {}'.format( str(j), round(brk_resc[i][j].mse_choquet[0],3), round(brk_resc[i][j].mse_sugeno[0],3), round(brk_resc[i][j].mse_bradley[0],3), round(brk_resc[i][j].ssim_choquet[0],3), round(brk_resc[i][j].ssim_sugeno[0],3), round(brk_resc[i][j].ssim_bradley[0],3), str(brk_resc[i][j].a1[0]), str(brk_resc[i][j].a2), round(brk_resc[i][j].th[0], 4) )) # In[176]: #### Example of the chessboard test_image = 1.0 - import_img('./original/chessboard.png') #Choquet Adaptive Thresh choquet_mat, _, _ = adaptive_choquet_itegral(np.asarray(test_image), S1, 0, #t-norm version log=False ) out_img_adapt_choquet, _, _, _, T = adaptive_thresh2(np.asarray(test_image), np.asarray(choquet_mat), a1 = 16, a2 = 2, T= 0.095, log=False ) #con compute_summed_area table doesn't work. #Choquet Adaptive Thresh choquet_mat, _, _ = adaptive_choquet_itegral(np.asarray(test_image), S1, 1, #choquet int version log=False ) out_img_adapt_choquet2, _, _, _, T = adaptive_thresh2(np.asarray(test_image), np.asarray(choquet_mat), a1 = 16, a2 = 2, T= 0.095, log=False ) #con compute_summed_area table doesn't work. #Bradley Adaptive Thresh S1 = get_int_img_m1(test_image) out_img_bradley, bradley_int_mat = adaptive_thresh(np.asarray(test_image), S1 , a1=16, a2=2, T=T) # In[177]: #Choquet Adaptive Thresh plot_it(test_image) plot_it(out_img_adapt_choquet2) plot_it(out_img_adapt_choquet) plot_it(out_img_bradley) print(compare_images(out_img_adapt_choquet, out_img_bradley)) print(compare_images(out_img_adapt_choquet2, out_img_bradley)) # In[ ]: # In[ ]:
lodeguns/FuzzyAdaptiveBinarization
fuzzy_adaptive_bin.py
fuzzy_adaptive_bin.py
py
36,527
python
en
code
3
github-code
36
[ { "api_name": "numpy.set_printoptions", "line_number": 31, "usage_type": "call" }, { "api_name": "sys.warnoptions", "line_number": 34, "usage_type": "attribute" }, { "api_name": "warnings.simplefilter", "line_number": 35, "usage_type": "call" }, { "api_name": "mat...
24390099064
import re from . import builder, cc, msvc from .. import log, shell from .common import choose_builder, guess_command, make_command_converter from ..languages import known_langs with known_langs.make('rc') as x: x.vars(compiler='RC', flags='RCFLAGS') x.exts(source=['.rc']) _c_to_rc = make_command_converter([ (re.compile(r'gcc(?:-[\d.]+)?(?:-(?:posix|win32))?'), 'windres'), ]) _posix_cmds = ['windres'] _windows_cmds = ['rc', 'windres'] _builders = (cc.CcRcBuilder, msvc.MsvcRcBuilder) @builder('rc') def rc_builder(env): langinfo = known_langs['rc'] cmd = env.getvar(langinfo.var('compiler')) if cmd: return choose_builder(env, langinfo, _builders, candidates=cmd) # We don't have an explicitly-set command from the environment, so try to # guess what the right command would be based on the C compiler command. candidates = (_windows_cmds if env.host_platform.family == 'windows' else _posix_cmds) sibling = env.builder('c').compiler guessed_cmd = guess_command(sibling, _c_to_rc) # If the guessed command is the same as the first default command # candidate, remove it. This will keep us from logging a useless info # message that we guessed the default value for the command. if guessed_cmd is not None and guessed_cmd != candidates[0]: try: builder = choose_builder(env, langinfo, _builders, candidates=guessed_cmd, strict=True) log.info('guessed windows rc compiler {!r} from c compiler {!r}' .format(guessed_cmd, shell.join(sibling.command))) return builder except FileNotFoundError: pass # Try the default command candidates. return choose_builder(env, langinfo, _builders, candidates=candidates)
jimporter/bfg9000
bfg9000/tools/rc.py
rc.py
py
1,858
python
en
code
73
github-code
36
[ { "api_name": "languages.known_langs.make", "line_number": 8, "usage_type": "call" }, { "api_name": "languages.known_langs", "line_number": 8, "usage_type": "name" }, { "api_name": "common.make_command_converter", "line_number": 12, "usage_type": "call" }, { "api_...
13744203488
import glob import os import requests import time import sys import numpy as np import pandas as pd from pandas.core.frame import DataFrame from geocoding_api_extract.utils.progress import Progress def create_geocoding_api_request_str(street, city, state, benchmark='Public_AR_Census2010', vintage='Census2010_Census2010', layers='14', format='json') -> str: """Create geocoding api request str Args: street (str): street address city (str): city state (str): state as 2 digit initial benchmark (str, optional): Defaults to 'Public_AR_Census2010'. vintage (str, optional): Defaults to 'Census2010_Census2010'. layers (str, optional): Defaults to '14'. format (str, optional): Defaults to 'json'. Returns: str: geocoding api request string. """ return 'https://geocoding.geo.census.gov/geocoder/geographies/address?street=' + \ street + '&city=' + city + '&state=' + state + '&benchmark=' + benchmark + \ '&vintage=' + vintage + '&layers=' + layers + '&format=' + format def extract_address_batch(address_batch, city, state, retries=5): """Extract one address batch Args: address_batch (list(str)): list of addresses city (str): City state (str): 2-digit state code retries (int, optional): Number of time to retry the api request. Defaults to 5. Returns: DataFrame: result table from api extract """ result = {'address': address_batch, 'state': [], 'county': [], 'tract': [], 'cent_lat': [], 'cent_lon': [], 'us_zip': []} exception = "" for address in address_batch: request = requests.get( create_geocoding_api_request_str(address, city, state)) for attempt in range(retries): try: if request.status_code == 200 and request.json()['result']['addressMatches'] != []: result['state'].append(request.json()['result']['addressMatches'] [0]['geographies']['Census Blocks'][0]['STATE']) result['county'].append(request.json()['result']['addressMatches'] [0]['geographies']['Census Blocks'][0]['COUNTY']) result['tract'].append(request.json()['result']['addressMatches'] [0]['geographies']['Census Blocks'][0]['TRACT']) result['cent_lat'].append(request.json()['result']['addressMatches'] [0]['geographies']['Census Blocks'][0]['CENTLAT']) result['cent_lon'].append(request.json()['result']['addressMatches'] [0]['geographies']['Census Blocks'][0]['CENTLON']) result['us_zip'].append(request.json()['result']['addressMatches'] [0]['addressComponents']['zip']) else: result['state'].append("not found") result['county'].append("not found") result['tract'].append("not found") result['cent_lat'].append("not found") result['cent_lon'].append("not found") result['us_zip'].append("not found") except Exception as x: print(f'BAD REQUEST: {type(x)} {x} {request}') exception = x # wait incrementally longer each retry wait_time = 30 * (attempt+1)**2 print(f'Waiting {wait_time} seconds.') time.sleep(wait_time) else: break else: # all attempts failed, log this print( f'API REQUEST FAILED AFTER {retries} ATTEMPTS WITH EXCEPTION: {exception} :: {request}') empty_result = pd.DataFrame() return empty_result results = pd.DataFrame(result) return results def extract_address_batches(address_batches, city, state, progress): """Wrapper function to yeild results of each address batch extraction. Args: address_batches (list[list[str]]): list of address batches city (str): City state (str): 2 digit state code progress (Progress): Object for keeping track of progress Yields: DataFrame: resulting DataFrame from address extraction """ for i in range(progress.value, len(address_batches)): print('Processing address batch:', i) result = extract_address_batch( address_batches[i], city, state) yield result def extract_address_details(addresses, city, state, tmp_folder, tmp_filename_prefix='', reset=False, clean_result=True) -> DataFrame: """Extract address details from geocoding api Args: addresses (list[str]): a list of addresses city (str): the city where the addresses reside state (str): 2-digit state abbreviation tmp_folder (str): the folder to put partial data extracts in tmp_filename_prefix (str, optional): export filename suffix for temp result chunks reset (bool, optional): if True, the extraction will reset which will delete all temp files and reset progress Returns: DataFrame: A table showing each address and the following extra columns: ['address','state'(id),'county'(id),'tract'(id),'cent_lat','cent_lon','us_zip'] """ if type(addresses) != list: print("Type mismatch: 'addresses' needs to be a list of strings") return pd.DataFrame() if (type(city) != str): print("Type mismatch: 'city' needs to be a string") return pd.DataFrame() if (type(state) != str): print("Type mismatch: 'state' needs to be a string") return pd.DataFrame() if (type(tmp_folder) != str): print("Type mismatch: 'tmp_folder' needs to be a string") return pd.DataFrame() # batch addresses into manageable chunks parts = len(addresses)//50 if parts > 1: address_batches = np.array_split(addresses, parts) else: address_batches = [addresses] path = tmp_folder + tmp_filename_prefix + state + '_' + city + '/' try: if os.path.isdir(path): print("Directory already exists") else: os.mkdir(path) except OSError as e: print("Returning empty DataFrame: There is a problem with the tmp_folder path: %s." % path) return pd.DataFrame() else: print("Successfully created the directory %s " % path) export_tmp_fp = path + 'geocoding_api_extract' # initialize progress tracker # TODO: add 'home' directory in a project config file and use that as the root for # this progress filepath progress = Progress('GEOCODING_API', path + 'geocoding_api_progress.cfg') # reset progress if reset: progress.reset() filepaths = glob.glob(export_tmp_fp + "_part*.parquet.gzip") for fp in filepaths: os.remove(fp) # extract and export each batch for result in extract_address_batches(address_batches, city, state, progress): # export batch DataFrame fp = export_tmp_fp + '_part' + str(progress.value) + '.parquet.gzip' result.to_parquet(fp) # record progress progress.increment() # combine all the batch files into a DataFrame filepaths = glob.glob(export_tmp_fp + "_part*.gzip") result_dfs = [pd.read_parquet(filepath) for filepath in filepaths] results = pd.concat(result_dfs, ignore_index=True) if clean_result: results = results[results['tract'] != 'not found'] print("Geocoding api address extract is complete.") return results def remove_tmp_files(city, state, tmp_folder, tmp_filename_prefix='') -> None: """Remove temp files created during an extract Args: city (str): the city where the addresses reside state (str): 2-digit state abbreviation tmp_filename_prefix (str, optional): export filename suffix for temp result chunks """ path = tmp_folder + tmp_filename_prefix + state + '_' + city + '/' if not os.path.isdir(path): return export_tmp_fp = path + 'geocoding_api_extract' filepaths = glob.glob(export_tmp_fp + "_part*.parquet.gzip") for fp in filepaths: os.remove(fp) if os.path.isfile(path + 'geocoding_api_progress.cfg'): os.remove(path + 'geocoding_api_progress.cfg') if os.path.isdir(path): os.rmdir(path)
AndoKalrisian/geocoding_api_extract
src/geocoding_api_extract/__init__.py
__init__.py
py
9,005
python
en
code
0
github-code
36
[ { "api_name": "requests.get", "line_number": 61, "usage_type": "call" }, { "api_name": "time.sleep", "line_number": 91, "usage_type": "call" }, { "api_name": "pandas.DataFrame", "line_number": 98, "usage_type": "call" }, { "api_name": "pandas.DataFrame", "line...
31063677025
from ..utils import Object class LoginUrlInfoRequestConfirmation(Object): """ An authorization confirmation dialog needs to be shown to the user Attributes: ID (:obj:`str`): ``LoginUrlInfoRequestConfirmation`` Args: url (:obj:`str`): An HTTP URL to be opened domain (:obj:`str`): A domain of the URL bot_user_id (:obj:`int`): User identifier of a bot linked with the website request_write_access (:obj:`bool`): True, if the user needs to be requested to give the permission to the bot to send them messages Returns: LoginUrlInfo Raises: :class:`telegram.Error` """ ID = "loginUrlInfoRequestConfirmation" def __init__(self, url, domain, bot_user_id, request_write_access, **kwargs): self.url = url # str self.domain = domain # str self.bot_user_id = bot_user_id # int self.request_write_access = request_write_access # bool @staticmethod def read(q: dict, *args) -> "LoginUrlInfoRequestConfirmation": url = q.get('url') domain = q.get('domain') bot_user_id = q.get('bot_user_id') request_write_access = q.get('request_write_access') return LoginUrlInfoRequestConfirmation(url, domain, bot_user_id, request_write_access)
iTeam-co/pytglib
pytglib/api/types/login_url_info_request_confirmation.py
login_url_info_request_confirmation.py
py
1,365
python
en
code
20
github-code
36
[ { "api_name": "utils.Object", "line_number": 6, "usage_type": "name" } ]
39845550652
""" Iguana (c) by Marc Ammon, Moritz Fickenscher, Lukas Fridolin, Michael Gunselmann, Katrin Raab, Christian Strate Iguana is licensed under a Creative Commons Attribution-ShareAlike 4.0 International License. You should have received a copy of the license along with this work. If not, see <http://creativecommons.org/licenses/by-sa/4.0/>. """ from datetime import timedelta from django.utils import timezone import datetime from django.core.exceptions import ValidationError from django.utils.translation import ugettext_lazy as _ def date_is_present_or_past(value): if isinstance(value, datetime.datetime): if value > timezone.now(): raise ValidationError(_("The date entered must be today or lesser.")) else: raise ValidationError(_("The value entered isn't a valid type of date or datetime.")) def logged_time_is_positive(value): if isinstance(value, timedelta): if value <= timedelta(seconds=0): raise ValidationError(_("The logged time must be at least one minute")) else: raise ValidationError(_("The value entered isn't a valid type of timedelta."))
midas66/iguana
src/timelog/validators.py
validators.py
py
1,138
python
en
code
null
github-code
36
[ { "api_name": "datetime.datetime", "line_number": 20, "usage_type": "attribute" }, { "api_name": "django.utils.timezone.now", "line_number": 21, "usage_type": "call" }, { "api_name": "django.utils.timezone", "line_number": 21, "usage_type": "name" }, { "api_name":...
31867884735
from setuptools import setup, find_packages from os import path here = path.abspath(path.dirname(__file__)) # Get the long description from the README file with open(path.join(here, 'README.md'), encoding='utf-8') as f: long_description = f.read() version = {} with open(path.join(here, "sonocrop", "__version__.py")) as f: exec(f.read(), version) setup( name='sonocrop', version=version["__version__"], description='Prepare ultrasound videos for machine learning-- crop and remove static clutter from ultrasound video.', long_description=long_description, long_description_content_type='text/markdown', url='https://github.com/davycro/sonocrop', author='David Crockett, MD', author_email='davycro1@gmail.com', license = "Apache Software License 2.0", classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: Healthcare Industry', 'Topic :: Multimedia :: Video', 'Topic :: Scientific/Engineering :: Medical Science Apps.', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8' ], keywords='ultrasound bedside ultrasound pocus ffmpeg opencv echo cardiac', packages=find_packages(), python_requires='>=3.6', install_requires=[ 'numpy', 'opencv-python', 'fire', 'rich', 'matplotlib', ], entry_points={ # Optional 'console_scripts': [ 'sonocrop=sonocrop.cli:main', ], }, )
davycro/sonocrop
setup.py
setup.py
py
1,442
python
en
code
6
github-code
36
[ { "api_name": "os.path.abspath", "line_number": 4, "usage_type": "call" }, { "api_name": "os.path", "line_number": 4, "usage_type": "name" }, { "api_name": "os.path.dirname", "line_number": 4, "usage_type": "call" }, { "api_name": "os.path.join", "line_number"...
70585542504
## # 邮件自动推送 -- 20191105 created by terrell # 配置相关变量 # 设置主题、正文等信息 # 添加附件 # 登录、发送# import time import os import smtplib import email import datetime import sys import traceback from email.mime.application import MIMEApplication from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText year = int(datetime.datetime.now().strftime('%Y')) month = int(datetime.datetime.now().strftime('%m')) day = int(datetime.datetime.now().strftime('%d')) print(day) print(month) print(year) target = " 离考试只有: " + str((datetime.datetime(2020,12,21)-datetime.datetime(year,month,day)).days) + "天了!" print(target) # 配置变量 sender = "2516234365@qq.com" qqCode = 'lnxcnjuvmtqmdjif' receiver = '1203562850@qq.com' cc = '2516234365@qq.com' subject = "向同学☺加油, 我们一起上岸! " + target username = "2516234365@qq.com" password = "yuyali2010970514" # 邮件主题、正文设置 massage = MIMEMultipart() massage['subject'] = subject massage['to'] = receiver massage['Cc'] = cc massage['from'] = 'dongjian.yu@qtdatas.com' body = '''Dear 向同学: 昨天学了吗? 今天要学吗? 任务完成了吗? ①英语 ②政治 ③专业理论 ④毕业创作 ⑤毕业论文 ------------------ Terrell QTdatas dongjian.yu Mobile: +86 15188593321 Email: dongjian.yu@qtdatas.com''' massage.attach(MIMEText(body, 'plain', 'utf-8')) # 添加附件 # for i in filekkkk: # appendix = MIMEApplication(open(file, 'rb').read()) # appendix.add_header('content-disposition', 'attachment', filename=file_name) # massage.attach(appendix) def main(): # smtp_server = 'smtp.exmail.qq.com' smtp_server = 'smtp.qq.com' server = smtplib.SMTP_SSL(smtp_server, 465) server.login(sender, qqCode) server.set_debuglevel(1) # server.ehlo() # server.starttls() server.login(username, password) print('登录成功') server.sendmail(sender, receiver.split(',') + cc.split(','), massage.as_string()) print('邮件发送完成') # except Exception as e: # print('报错了...') # traceback.print_exc() # print(e) # else: # server.quit() main()
yudongjian/remember_word_tkinter
sent_mailToLeo.py
sent_mailToLeo.py
py
2,257
python
en
code
0
github-code
36
[ { "api_name": "datetime.datetime.now", "line_number": 18, "usage_type": "call" }, { "api_name": "datetime.datetime", "line_number": 18, "usage_type": "attribute" }, { "api_name": "datetime.datetime.now", "line_number": 19, "usage_type": "call" }, { "api_name": "da...
12486383692
''' Created on Jan 29, 2020 @author: Michal.Busta at gmail.com ''' import numpy as np import neptune class Meter: '''A meter to keep track of losses scores throughout an epoch''' def __init__(self, phase, epoch, use_neptune=False, log_interval = 100, total_batches = 100): self.metrics = {} self.rmetrics = {} self.phase = phase self.epoch = epoch self.use_neptune = use_neptune self.log_interval = log_interval self.total_batches = total_batches def update(self, **kwargs): itr = 0 for name, value in kwargs.items(): if name == 'itr': itr = value continue try: self.metrics[name].append(value) self.rmetrics[name].append(value) except: self.metrics[name] = [] self.metrics[name].append(value) self.rmetrics[name] = [] self.rmetrics[name].append(value) if itr % self.log_interval == 0: if self.use_neptune: for key in self.rmetrics.keys(): mean = np.mean(self.rmetrics[key]) self.rmetrics[key] = [] neptune.log_metric(f'{key}_{self.phase}', itr + self.epoch * self.total_batches, mean) else: for key in self.rmetrics.keys(): mean = np.mean(self.rmetrics[key]) self.rmetrics[key] = [] print(f' - {key}: {mean}') def get_metrics(self): ret = {} log_str = '' for key in self.metrics.keys(): mean = np.mean(self.metrics[key]) ret[key] = mean log_str = '%s | %s: %0.4f ' % (log_str, key, mean) if self.use_neptune: neptune.log_metric(f'epoch_{key}_{self.phase}', self.epoch, mean) print(log_str) return ret
drivendataorg/open-cities-ai-challenge
3rd Place/meter.py
meter.py
py
1,744
python
en
code
113
github-code
36
[ { "api_name": "numpy.mean", "line_number": 42, "usage_type": "call" }, { "api_name": "neptune.log_metric", "line_number": 44, "usage_type": "call" }, { "api_name": "numpy.mean", "line_number": 47, "usage_type": "call" }, { "api_name": "numpy.mean", "line_numbe...
70585526504
import sys, copy from udp_interface import udp_interface import numpy as np import os # from ppo.run import train # from baselines.common import tf_util as U from utils.action_filter import ActionFilterButter from utils.reference_generator import ReferenceMotionGenerator from collections import deque from utils.utility import * from utils.quaternion_function import euler2quat import time from sshkeyboard import listen_keyboard import threading import torch from rsl_rl.modules import ActorCritic from dataclasses import dataclass, field @dataclass class RobotState: trans_vel: np.ndarray trans_acc: np.ndarray rot_quat: np.ndarray rot_vel: np.ndarray motor_pos: np.ndarray motor_vel: np.ndarray NUM_MOTORS=12 class ExpEnv(): def __init__(self, ref_file='../motions/MotionLibrary/LiftingMotion_Simulator.motionlib', model_path=None, cfg=None, recv_IP=None, recv_port=None,send_IP=None,send_port=None): self.robot = udp_interface(recv_IP=recv_IP, recv_port=recv_port, send_IP=send_IP, send_port=send_port) self.dir = os.path.dirname(os.path.abspath(__file__)) self.rt_freq = 1000 self.exp_env_freq = 30 self.num_sims_per_env_step = self.rt_freq // self.exp_env_freq self.secs_per_policy_step = self.num_sims_per_env_step / self.rt_freq self.policy_freq = 1 / self.secs_per_policy_step self.motor_kps = [100.0]*12 self.motor_kds = 4*[1.0,2.0,2.0] self.motor_vel_idx = [i+6 for i in range(NUM_MOTORS)] '''a1, but also applied on mini-cheetah''' self.default_target_positions = [0.0,1.0661,-2.1869, 0.0,1.0661,-2.1869, 0.0,1.0661,-2.1869, 0.0,1.0661,-2.1869] self.action_bounds = np.array([[-0.7767, 0.7767], [-0.3011, 3.7045], [-2.8500, -0.1500], [-0.7767, 0.7767], [-0.3011, 3.7045], [-2.8500, -0.1500], [-0.7767, 0.7767], [-0.3011, 3.7045], [-2.8500, -0.1500], [-0.7767, 0.7767], [-0.3011, 3.7045], [-2.8500, -0.1500]]).T self.history_len = 15 self.action_filter_order = 2 self.action_filter = ActionFilterButter(lowcut=None, highcut=[4], sampling_rate=self.policy_freq,order=self.action_filter_order,num_joints=NUM_MOTORS) self.selected_policy = 0 self.use_planner = True self.ever_jump = False self.policy_running = False self.init_model(model_path) self.init_robot_state() self.previous_obs = deque(maxlen=self.history_len) self.previous_acs = deque(maxlen=self.history_len) self.reference_generator = ReferenceMotionGenerator(ref_file, 2000, self.secs_per_policy_step) self.__reset() if not self.use_planner: self.reference_generator.set_policy(self.selected_policy) self.low_obs_act = [] def init_robot_state(self): trans_vel = np.zeros((3,)) trans_acc = np.zeros((3,)) rot_vel = np.zeros((3,)) rot_quat = np.zeros((4,)) motor_pos = np.zeros((NUM_MOTORS,)) motor_vel = np.zeros((NUM_MOTORS,)) self.obs_robot_state = RobotState(trans_vel=trans_vel, trans_acc=trans_acc, rot_quat=rot_quat, rot_vel=rot_vel, motor_pos=motor_pos, motor_vel=motor_vel) def init_model(self, model_path): self.pi = [] if isinstance(model_path, str): selection = input("Single policy experiment, press 1 or 2 to select policy, any key to exit: ") selection = int(selection) if selection == 1 or selection == 2: self.selected_policy = selection self.use_planner = False path = [None, None] path[self.selected_policy - 1] = model_path model_path = path else: raise NotImplementedError for model in model_path: pi = ActorCritic( 499, 536, NUM_MOTORS, actor_hidden_dims=[512, 256, 128], #499, 536 critic_hidden_dims=[512, 256, 128]) if model is not None: loaded_dict = torch.load(model, map_location=torch.device('cpu')) pi.load_state_dict(loaded_dict['model_state_dict']) pi.eval() self.pi.append(pi) def __process_recv_package(self, obs): self._raw_state = obs # Convert quaternion from wxyz to xyzw, which is default for Pybullet. rpy = self._raw_state[0:3] q = euler2quat(rpy[0], rpy[1], rpy[2]) self.obs_robot_state.motor_pos = np.array(self._raw_state[6:18]) self.obs_robot_state.rot_quat = np.copy(np.array([q[1], q[2], q[3], q[0]])) # print(self.obs_robot_state.rot_quat) ''' Thigh and Calf joints are reversed on the real robot ''' self.obs_robot_state.motor_pos[[1,2,4,5,7,8,10,11]] *= -1 def __get_observation(self, acs = np.zeros(NUM_MOTORS), step = False): __acs = np.copy(acs) ref_dict_1 = self.reference_generator.getReferenceMotion(look_forward=1) ref_dict_4 = self.reference_generator.getReferenceMotion(look_forward=4) ref_dict_7 = self.reference_generator.getReferenceMotion(look_forward=7) ob1 = ref_dict_1["joints_rot"] ob4 = ref_dict_4["joints_rot"] ob7 = ref_dict_7["joints_rot"] ob_curr = np.concatenate([self.obs_robot_state.rot_quat, self.obs_robot_state.motor_pos]) bezier_param = np.concatenate([self.reference_generator.get_bezier_coefficients(), self.reference_generator.get_fixed_motion_duration(), self.reference_generator.get_motion_t_norm(), self.reference_generator.get_motion_phase()]) # motion_type: 0,1,2,3,4 feet_pos = np.concatenate([ref_dict_1["foot_pos_bezier"], ref_dict_4["foot_pos_bezier"], ref_dict_7["foot_pos_bezier"]]) if self.timestep == 0: [self.previous_obs.append(ob_curr) for i in range(self.history_len)] [self.previous_acs.append(self.default_target_positions) for i in range(self.history_len)] ob_prev = np.concatenate([np.array(self.previous_obs).flatten(), np.array(self.previous_acs).flatten()]) # print(bezier_param) if step: self.previous_obs.append(ob_curr) self.previous_acs.append(__acs) self.curr_obs = np.concatenate([ob_prev, ob_curr, ob1, ob4, ob7, bezier_param, feet_pos]) def process_send_cmd(self, motor_commands): return motor_commands def acs_norm2actual(self, acs): return self.action_bounds[0] + (acs + 1)/2.0 * (self.action_bounds[1] - self.action_bounds[0]) def acs_actual2norm(self, actual_acs): return (actual_acs - self.action_bounds[0])*2 / (self.action_bounds[1] - self.action_bounds[0]) - 1 def __get_action(self): print(self.selected_policy, self.curr_obs[-15:-9]) if self.selected_policy == 0: acs = np.copy(self.acs_actual2norm(self.default_target_positions)) else: acs = self.pi[self.selected_policy - 1].act_inference(torch.from_numpy(self.curr_obs).to(torch.float32).unsqueeze(0))[0] acs = acs.detach().numpy() acs = np.clip(np.copy(acs), -1, 1) if self.selected_policy == 1 or self.selected_policy == 2: if self.reference_generator.time_in_sec < 0.33: acs[[6,9]] = np.clip(acs[[6,9]], -0.2, 0.2) else: acs[[6,9]] = np.clip(acs[[6,9]], -0.8, 0.8) assert acs.shape[0] == 12 and -1.0 <= acs.all() <= 1.0 if self.timestep == 0: # prevent zero action output default_action = np.array(self.default_target_positions) self.actual_pTs_filtered = default_action self.action_filter.init_history(self.acs_actual2norm(default_action)) pTs_filtered = np.copy(self.action_filter.filter(np.copy(acs))) actual_pTs_filtered = np.copy(self.acs_norm2actual(pTs_filtered)) return actual_pTs_filtered, np.copy(self.curr_obs), np.copy(acs) def __env_update(self): # if self.timestep<3: self.timestep += 1 self.time_in_sec = (self.timestep*self.num_sims_per_env_step) / self.rt_freq self.reference_generator.update_step(self.timestep) def __reset(self): self.action_filter.reset() self.timestep = 0.0 self.est_timestep = 0 self.time_in_sec = 0.0 self.actual_pTs = np.zeros(NUM_MOTORS) self.actual_pTs_filtered = np.zeros(NUM_MOTORS) def pid_ctrl(self): policy_count = 0 previous_time = time.time() t = threading.currentThread() a1_default_target_positions = np.array([0.0,0.9,-1.8, 0.0,0.9,-1.8, 0.0,0.9,-1.8, 0.0,0.9,-1.8]) while getattr(t, "do_run", True): obs = self.robot.receive_observation() self.__process_recv_package(np.copy(obs)) if policy_count % 1 == 0: self.__get_observation(np.copy(self.default_target_positions), step=False) self.actual_pTs_filtered_sent = np.copy(a1_default_target_positions) for i in range(4): self.actual_pTs_filtered_sent[3*i+1] = -self.actual_pTs_filtered_sent[3*i+1] self.actual_pTs_filtered_sent[3*i+2] = -self.actual_pTs_filtered_sent[3*i+2] cmd=self.process_send_cmd(np.concatenate((self.actual_pTs_filtered_sent,[0.0, 0.0, 0.0],np.zeros((12,)), [0.0,0.0,0.0]))) self.robot.send_command(cmd) policy_count += 1 current_time = time.time() # print("proc", "Frequency: ", 1/(current_time - previous_time + 1e-10)) previous_time = current_time delay = 0.6 _ = time.perf_counter() + delay/1000 while time.perf_counter() < _: pass def pid_ctrl_squat_prep(self): policy_count = 0 previous_time = time.time() t = threading.currentThread() a1_default_target_positions = np.array([0.0,0.9,-1.8, 0.0,0.9,-1.8, 0.0,0.9,-1.8, 0.0,0.9,-1.8]) while getattr(t, "do_run", True): obs = self.robot.receive_observation() self.__process_recv_package(np.copy(obs)) if policy_count > 30: self.actual_pTs_filtered_sent = np.copy(self.default_target_positions) for i in range(4): self.actual_pTs_filtered_sent[3*i+1] = -self.actual_pTs_filtered_sent[3*i+1] self.actual_pTs_filtered_sent[3*i+2] = -self.actual_pTs_filtered_sent[3*i+2] break else: self.__get_observation(np.copy(self.default_target_positions), step=False) self.actual_pTs_filtered_sent = (30 - policy_count) / 30 * a1_default_target_positions + policy_count / 30 * np.array(self.default_target_positions) for i in range(4): self.actual_pTs_filtered_sent[3*i+1] = -self.actual_pTs_filtered_sent[3*i+1] self.actual_pTs_filtered_sent[3*i+2] = -self.actual_pTs_filtered_sent[3*i+2] cmd=self.process_send_cmd(np.concatenate((self.actual_pTs_filtered_sent,[0.0, 0.0, 0.0],np.zeros((12,)), [0.0,0.0,0.0]))) self.robot.send_command(cmd) policy_count += 1 current_time = time.time() # print("proc", "Frequency: ", 1/(current_time - previous_time + 1e-10)) previous_time = current_time delay = 0.6 _ = time.perf_counter() + delay/1000 while time.perf_counter() < _: pass def press(self, key): print("Doing nothing") def set_actions_from_policy(self, planner_actions=None): import time # print("delay: ", time.time()-planner_actions[-1]) planner_actions = planner_actions[:-1] if not self.use_planner: raise NotImplementedError else: if not self.reference_generator.action_enabled: self.selected_policy = int(planner_actions[-1]) if self.selected_policy == 1: self.ever_jump = True # if self.reference_generator.motion_phase == 2: # if self.ever_jump: # self.selected_policy = 1 # else: # self.selected_policy = 2 planner_actions[-1] = self.selected_policy self.reference_generator.set_actions_from_policy(planner_actions, self.timestep) def get_robot_states(self, planner_actions=None): ob_curr = np.concatenate([self.obs_robot_state.rot_quat, self.obs_robot_state.motor_pos]) robot_states = np.concatenate([np.array(self.previous_obs)[-5:].flatten(), ob_curr]) robot_actions = np.concatenate([np.array(self.previous_acs)[-5:].flatten(), self.actual_pTs_filtered]) reference_params = np.concatenate([self.reference_generator.get_fixed_motion_duration(), self.reference_generator.get_motion_t_norm(), self.reference_generator.get_motion_phase()]) return robot_states, robot_actions, reference_params def run_policy(self): proc = threading.Thread(target=self.pid_ctrl) proc.start() listen_keyboard(on_press=self.press, until='space') proc.do_run = False previous_time = time.perf_counter() # proc_squat_prep = threading.Thread(target=self.pid_ctrl_squat_prep) # proc_squat_prep.start() # listen_keyboard(on_press=self.press, until='space') # proc_squat_prep.do_run = False self.pid_ctrl_squat_prep() while(True): if not self.policy_running: self.policy_running = True obs = self.robot.receive_observation() self.__process_recv_package(obs) if self.est_timestep % 1 == 0: # print("self.est_timestep", self.est_timestep) # print("self.num_sims_per_env_step", self.num_sims_per_env_step) # if self.timestep < 30: # self.reference_generator.set_policy(0, self.timestep) # self.selected_policy = 0 # else: # self.reference_generator.set_policy(1, self.timestep) # self.selected_policy = 1 if self.timestep == 0: self.actual_pTs_filtered = np.zeros(12) self.__get_observation(np.copy(self.actual_pTs_filtered), step=True) self.actual_pTs_filtered, ob, ac= self.__get_action() self.low_obs_act.append((np.copy(self.curr_obs), np.copy(self.actual_pTs_filtered))) self.actual_pTs_filtered = np.round(self.actual_pTs_filtered,5) self.actual_pTs_filtered_sent = np.copy(self.actual_pTs_filtered) for i in range(4): # self.actual_pTs_filtered_sent[3*i] = -self.actual_pTs_filtered_sent[3*i] self.actual_pTs_filtered_sent[3*i+1] = -self.actual_pTs_filtered_sent[3*i+1] self.actual_pTs_filtered_sent[3*i+2] = -self.actual_pTs_filtered_sent[3*i+2] self.robot.send_command(self.actual_pTs_filtered_sent) self.est_timestep = 0 self.__env_update() else: # send previous action package self.robot.send_command(self.actual_pTs_filtered_sent) time.sleep(0.00001) current_time = time.time() # print("Frequency: ", 1/(current_time - previous_time)) previous_time = current_time self.est_timestep += 1 def pid_ctrl_restore_stand(self): policy_count = 0 a1_default_target_positions = np.array([0.0,0.9,-1.8, 0.0,0.9,-1.8, 0.0,0.9,-1.8, 0.0,0.9,-1.8]) obs = self.robot.receive_observation() self.__process_recv_package(np.copy(obs)) landing_joint_pos = np.array(obs[6:18]) landing_joint_pos[[1,2,4,5,7,8,10,11]] *= -1 restore_duration = 60 while policy_count < 60: obs = self.robot.receive_observation() self.__process_recv_package(np.copy(obs)) if policy_count > restore_duration: self.actual_pTs_filtered_sent = np.copy(a1_default_target_positions) for i in range(4): self.actual_pTs_filtered_sent[3*i+1] = -self.actual_pTs_filtered_sent[3*i+1] self.actual_pTs_filtered_sent[3*i+2] = -self.actual_pTs_filtered_sent[3*i+2] cmd=self.process_send_cmd(np.concatenate((self.actual_pTs_filtered_sent,[0.0, 0.0, 0.0],np.zeros((12,)), [0.0,0.0,0.0]))) else: self.actual_pTs_filtered_sent = (restore_duration - policy_count) / restore_duration * landing_joint_pos + policy_count / restore_duration * np.array(a1_default_target_positions) for i in range(4): self.actual_pTs_filtered_sent[3*i+1] = -self.actual_pTs_filtered_sent[3*i+1] self.actual_pTs_filtered_sent[3*i+2] = -self.actual_pTs_filtered_sent[3*i+2] cmd=self.process_send_cmd(np.concatenate((self.actual_pTs_filtered_sent,[0.0, 0.0, 0.0],np.zeros((12,)), [0.0,0.0,0.0]))) self.robot.send_command(cmd) policy_count += 1 current_time = time.time() # print("proc", "Frequency: ", 1/(current_time - previous_time + 1e-10)) previous_time = current_time delay = 0.6 _ = time.perf_counter() + delay/1000 while time.perf_counter() < _: pass
yichen928/RSR_Goalkeeper
src/rl_control/env.py
env.py
py
18,624
python
en
code
0
github-code
36
[ { "api_name": "numpy.ndarray", "line_number": 23, "usage_type": "attribute" }, { "api_name": "numpy.ndarray", "line_number": 24, "usage_type": "attribute" }, { "api_name": "numpy.ndarray", "line_number": 25, "usage_type": "attribute" }, { "api_name": "numpy.ndarra...
27435770961
import os import pathlib from matplotlib import pyplot as plt from skimage import io, img_as_float from skimage.color import rgb2gray from skimage.filters.edges import sobel from skimage.segmentation import felzenszwalb, watershed, mark_boundaries, slic, quickshift def read_boundaries(_img): # Read image _img = io.imread(_img) row = 3 column = 2 _img_as_float = img_as_float(_img[::2, ::2]) # ------------------------------------------------------------------------------------------------------------------ plt.figure("Segmentation") plt.axis("off") plt.subplot(row, column, 1, title="Original") plt.imshow(_img_as_float) # ---------------------------------------------------------------------------------------------------------------- # segments_fz = felzenszwalb(_img_as_float, scale=100, sigma=0.5, min_size=50) plt.subplot(row, column, 3, title="Felzenszwalb") plt.imshow(mark_boundaries(_img_as_float, segments_fz)) # ---------------------------------------------------------------------------------------------------------------- # gradient = sobel(rgb2gray(_img_as_float)) segments_watershed = watershed(gradient, markers=250, compactness=0.001) plt.subplot(row, column, 4, title="Watershed") plt.imshow(mark_boundaries(_img_as_float, segments_watershed)) # ---------------------------------------------------------------------------------------------------------------- # _slic = slic(_img_as_float, n_segments=250, compactness=10, sigma=1, start_label=1) plt.subplot(row, column, 5, title="SLIC") plt.imshow(mark_boundaries(_img_as_float, _slic)) # ---------------------------------------------------------------------------------------------------------------- # _quick = quickshift(_img_as_float, kernel_size=3, max_dist=6, ratio=0.5) plt.subplot(row, column, 6, title="Quick") plt.imshow(mark_boundaries(_img_as_float, _quick)) plt.show(block=True) if __name__ == "__main__": dir_path = os.path.join(pathlib.Path(__file__).parent.parent, "input3", "fish.bmp") read_boundaries(dir_path)
206081/psio
Lab3/zad3.py
zad3.py
py
2,134
python
en
code
0
github-code
36
[ { "api_name": "skimage.io.imread", "line_number": 14, "usage_type": "call" }, { "api_name": "skimage.io", "line_number": 14, "usage_type": "name" }, { "api_name": "skimage.img_as_float", "line_number": 17, "usage_type": "call" }, { "api_name": "matplotlib.pyplot.f...
38232459709
""" 获取csdn用户资料 """ import sys import json sys.path.append(r'D:\github\python\python-spider') from csdnTest import * #1.链接本地数据库服务 name = MongoClient('localhost') #2.链接本地数据库 demo 没有会创建 db = name.demo #demo数据库名 # 3.创建,连接集合 emp = db.employees # employees集合名 user = db.csdn_users # page集合名 # 爬取‘前端’模块 category = 'web' headers = getHeader() # 定制请求头 #开启多线程 def openThread(): return False; def getData(proxies,shown_offset): url = 'https://blog.csdn.net/api/articles?type=more&category=' + category+'&shown_offset='+shown_offset print("当前访问的url:" + url + ",访问的proxies:" + str(proxies)) try: request = requests.get(url=url, proxies=proxies, headers=headers, timeout=4) if request.status_code != 200: print('200') return False except: print('3333') return False content = json.loads(request.content) shownOffset = str(content['shown_offset']) print(shownOffset) all = content['articles'] list = [] for each in all: print(each['user_name']) handleData.writeUser(user, { "user_name": each['user_name'], "user_url": each['user_url'], "avatar": each['avatar'] }) sleepTime = random.choice([3,4,5,6]); time.sleep(sleepTime) getData(proxies, shownOffset) def start(): url = 'https://blog.csdn.net/nav/'+category ip = handleData.getIp(emp) # 代理ip proxies = {"http": "http://" + ip, "https": "http://" + ip} print("当前访问的url:" + url + ",访问的ip:" + ip) try: request = requests.get(url=url, proxies=proxies, headers=headers, timeout=4) if request.status_code != 200: return False text = request.text soup = BeautifulSoup(text, 'lxml') content = soup.find('ul', class_='feedlist_mod') shownOffset = str(content['shown-offset']) except: print('失败了,删除ip' + ip) # 删除失效ip handleData.delete(emp, {'ip': ip}) start() return False getData(proxies, shownOffset) if __name__ == '__main__': start()
guosimin/python-spider
csdnTest/getUser.py
getUser.py
py
2,266
python
en
code
6
github-code
36
[ { "api_name": "sys.path.append", "line_number": 7, "usage_type": "call" }, { "api_name": "sys.path", "line_number": 7, "usage_type": "attribute" }, { "api_name": "json.loads", "line_number": 39, "usage_type": "call" } ]
33164677249
""" 2019 La Brachistochrone Réelle Un TIPE réalisé par Gautier BEN AÏM http://tobog.ga """ import numpy as np # # I. Calculs physiques # ====================== # def generer_ligne(longueur, hauteur, nb_points): """ Renvoie le toboggan ligne droite. Un toboggan est représenté par un triplet (longueur, hauteur, liste des hauteurs des points intermédiaires) longueur : flottant, distance horizontale entre le départ et l'arrivée hauteur : flottant, distance verticale nb_points : entier, nombre total de points """ return ( longueur, hauteur, [hauteur * (1. - i / (nb_points - 1)) for i in range(1, nb_points - 1)], ) def calculer_temps_segment(distance, v, deriver_v, limite, pas): """ Renvoie le temps et la vitesse après le parcours d'un segment. distance : flottant, distance à parcourir v : flottant, vitesse intiale deriver_v : fonction, renvoie la dérivée de la vitesse limite : flottant, limite de temps de parcours pas : flottant, intervalle de temps dt """ t = 0. x = 0. # On utilise la méthode d'Euler while x < distance and t < limite and v >= 0.: x += pas * v v += pas * deriver_v(v) t += pas if x >= distance: return t, v return None, None def calculer_temps_toboggan(toboggan, appliquer_pfd, limite, pas): """ Renvoie le temps de parcours du toboggan donné. toboggan : triplet appliquer_pfd : fonction, renvoie deriver_v limite : flottant, limite de temps de parcours pas : flottant, intervalle de temps dt """ points = toboggan[2][:] points.append(0.) # On rajoute l'arrivée l = len(points) section = toboggan[0] / l # Distance horizontale entre deux points section2 = section * section temps_total = 0. vitesse = 0. depart = toboggan[1] for i in range(l): arrivee = points[i] distance = ((depart - arrivee) * (depart - arrivee) + section2) ** 0.5 # On applique le PFD sur le segment deriver_v = appliquer_pfd(section, depart - arrivee) temps, vitesse = calculer_temps_segment( distance, vitesse, deriver_v, limite, pas ) if temps is None: return None temps_total += temps limite -= temps depart = arrivee return temps_total # # II. Algorithme hybride # ======================== # def generer_evaluateur(appliquer_pfd): """ Renvoie une fonction qui calcule le score (le temps de parcours) d'un toboggan. appliquer_pfd : fonction, renvoie deriver_v """ return lambda toboggan, limite, pas: ( calculer_temps_toboggan(toboggan, appliquer_pfd, limite, pas) ) def muter_creuser(toboggan, n): """ Creuse un intervalle choisi au hasard d'une profondeur au hasard. """ _, hauteur, points = toboggan i = np.random.randint(len(points)) j = np.random.randint(len(points)) if i > j: i, j = j, i h = hauteur / (1. + 0.05 * n) v = np.random.uniform(-h, h) for k in range(i, j + 1): points[k] += v def muter_lisser(toboggan, n): """ Prend un point au hasard et en fait la moyenne de ses voisins. """ _, _, points = toboggan i = np.random.randint(len(points) - 2) points[i + 1] = (points[i] + points[i + 2]) / 2. def diviser(toboggan, nb_points): """ Coupe chaque segment pour augmenter le nombre de points. """ longueur, hauteur, anciens_points = toboggan anciens_points = [hauteur] + anciens_points + [0.] ancien_nb_points = len(anciens_points) points = [] for i in range(1, nb_points - 1): x = i * (ancien_nb_points - 1) / (nb_points - 1) j = int(x) t = x % 1 points.append((1 - t) * anciens_points[j] + t * anciens_points[j + 1]) return longueur, hauteur, points def generer_incrementeur(evaluateur, nb_points, facteur_nb_points, pas, facteur_pas): """ Renvoie une fonction qui permet de passer à la génération suivante. evaluateur : fonction, renvoyée par generer_evaluateur nb_points : entier, nombre de points initial facteur_nb_points : flottant, coefficient multiplicateur pas : flottant, pas initial facteur_pas : flottant, coefficient multiplicateur """ def premiere_generation(meilleur_candidat): """ Lorsque incrementer_generation est appelée pour la première fois. """ def calculer_score(toboggan, limite): return evaluateur(toboggan, limite, pas) meilleur_score = calculer_score(meilleur_candidat, 10.) if meilleur_score is None: raise Exception("Le candidat proposé ne fonctionne pas") return meilleur_candidat, meilleur_score, calculer_score def incrementer_generation(generation, meilleur_candidat, meilleur_score): """ Passe à la génération suivante. """ if generation == 0: return premiere_generation(meilleur_candidat) nouveau_pas = pas * facteur_pas ** generation def calculer_score(toboggan, limite): return evaluateur(toboggan, limite, nouveau_pas) meilleur_candidat = diviser( meilleur_candidat, (nb_points - 1) * facteur_nb_points ** generation + 1 ) score = calculer_score(meilleur_candidat, 2 * meilleur_score) if not score is None: meilleur_score = score return meilleur_candidat, meilleur_score, calculer_score return incrementer_generation def evoluer( toboggan, nb_generations, generation_suivante, incrementer_generation, periode_lisser, signaler_fin, rafraichir=None, ): """ Améliore itérativement le toboggan donné en argument. toboggan : triplet nb_generations : entier, maximum de modifications des paramètres generation_suivante : entier, individus à tester avant de passer incrementer_generation : fonction, appelée au changement de génération periode_lisser : entier, période entre deux lissages signaler_fin : fonction, commande l'arrêt de la fonction rafraichir : fonction, appelée à chaque amélioration """ generation = 0 meilleur_candidat, meilleur_score, calculer_score = incrementer_generation( generation, toboggan, None ) # Nombre de candidats générés, dernier progrès enregistré n = 0 dernier_progres = 0 nb_progres = 0 print("Initialisation, score : {:f}".format(meilleur_score)) while not signaler_fin(): n += 1 # Si l'algorithme ne progresse plus, on augmente la finesse if ( n - dernier_progres >= generation_suivante and generation < nb_generations - 1 ): generation += 1 dernier_progres = n meilleur_candidat, meilleur_score, calculer_score = incrementer_generation( generation, meilleur_candidat, meilleur_score ) print( "Génération {} ({}), score : {:f}".format(generation, n, meilleur_score) ) # On prend un nouveau candidat candidat = (meilleur_candidat[0], meilleur_candidat[1], meilleur_candidat[2][:]) # On le mute if n % periode_lisser == 0: muter_lisser(candidat, n) else: muter_creuser(candidat, n) # Et enfin on le teste score = calculer_score(candidat, meilleur_score) if not score is None and score < meilleur_score: nb_progres += 1 dernier_progres = n meilleur_candidat = candidat meilleur_score = score if not rafraichir is None: rafraichir(meilleur_candidat, meilleur_score) print(("{} individus testés, {} conservés").format(n, nb_progres)) return meilleur_candidat # # III. Génération d'une cycloïde # ================================ # def generer_cycloide(longueur, hauteur, nb_points): """ Renvoie le toboggan cycloïde. """ def trouver_zero(f, a, b, precision=1e-9): """ Recherche dichotomique du zéro de f entre a et b. """ fa = f(a) while b - a > precision: m = (a + b) / 2. fm = f(m) if fm == 0.: return m elif fm * fa > 0.: a = m fa = f(a) else: b = m return m # Valeur de thêta du point d'arrivée theta = trouver_zero( lambda t: hauteur / longueur - (1. - np.cos(t)) / (t - np.sin(t)), 0.001, 2 * np.pi, ) # Rayon de la cycloïde reliant le départ et l'arrivée r = hauteur / (1. - np.cos(theta)) # Points de la courbe paramétrée courbe = [] for i in range(2 * nb_points + 1): t = theta * i / (2 * nb_points) x = r * (t - np.sin(t)) y = r * (np.cos(t) - 1.) + hauteur courbe.append((x, y)) # Points intermédiaires du toboggan points = [] j = 0 for i in range(1, nb_points - 1): x = longueur * i / (nb_points - 1) while courbe[j][0] < x: j += 1 a = (courbe[j][1] - courbe[j - 1][1]) / (courbe[j][0] - courbe[j - 1][0]) b = courbe[j][1] - a * courbe[j][0] points.append(a * x + b) return longueur, hauteur, points # # IV. Génération de la meilleure courbe # ======================================= # if __name__ == "__main__": import sys import matplotlib.pyplot as plt from time import time debut = time() # Paramètres de l'expérience longueur = 1.2 hauteur = 0.5 # Paramètres de l'algorithme nb_points = 121 # Départ + intermédiaires + arrivée pas = 0.000001 # Intervalle de temps dt nb_generations = 4 generation_suivante = 150 periode_lisser = 8 nb_points_initial = 16 facteur_nb_points = 2 pas_initial = 0.0004 facteur_pas = 0.2 temps_de_calcul = int(sys.argv[1]) if len(sys.argv) >= 2 else 60 def appliquer_pfd(x, y): """ PFD au point parcourant le toboggan. """ g_sin_theta = 9.81 * y / (y * y + x * x) ** 0.5 fg_cos_theta = 0.3263 * 9.81 * x / (y * y + x * x) ** 0.5 a = g_sin_theta - fg_cos_theta # Renvoie la dérivée de la vitesse v exprimée en fonction d'elle-même return lambda v: a - 0.0026 * v - 0.4748 * v * v # Calcul pour la cycloïde cycloide = generer_cycloide(longueur, hauteur, nb_points) calculer_score = generer_evaluateur(appliquer_pfd) temps_cycloide = calculer_score(cycloide, 10., pas) # Point de départ de l'algorithme ligne = generer_ligne(longueur, hauteur, nb_points_initial) # Affichage plt.figure("Toboggan", figsize=(8, 6), dpi=72) plt.plot( np.linspace(0., longueur, nb_points), [hauteur] + cycloide[2] + [0.], "#363737", dashes=[3, 2], label="cycloïde" if temps_cycloide is None else "cycloïde ({:f} s)".format(temps_cycloide), ) graphe, = plt.plot( np.linspace(0., longueur, nb_points_initial), [hauteur] + ligne[2] + [0.], "#ef4026", linewidth=2, label="toboggan", ) plt.title("La brachistochrone réelle") plt.xlabel("Longueur (m)") plt.ylabel("Hauteur (m)") plt.axis("equal") plt.legend() plt.draw() plt.pause(0.001) def generer_chronometre(): """ Renvoie toutes les fonctions dépendantes du temps. """ debut = time() def temps_ecoule(): """ Temps écoulé. """ return time() - debut def signaler_fin(): """ Signal de fin. """ return temps_ecoule() > temps_de_calcul def rafraichir(toboggan, temps): """ Met à jour le graphe à chaque amélioration. """ t = temps_ecoule() nb_points = len(toboggan[2]) + 2 if len(graphe.get_xdata()) != nb_points: graphe.set_xdata(np.linspace(0., longueur, nb_points)) graphe.set_ydata([hauteur] + toboggan[2] + [0.]) graphe.set_label("toboggan ({:f} s)".format(temps)) plt.title( "La brachistochrone réelle après {:d} min {:0>2d} s de calcul".format( int(t / 60), int(t % 60) ) ) if temps_cycloide is None or temps <= temps_cycloide: graphe.set_color("#0165fc") plt.legend() plt.draw() plt.pause(0.001) return signaler_fin, rafraichir signaler_fin, rafraichir = generer_chronometre() # Appel de l'algorithme hybride toboggan = evoluer( ligne, nb_generations, generation_suivante, generer_incrementeur( calculer_score, nb_points_initial, facteur_nb_points, pas_initial, facteur_pas, ), periode_lisser, signaler_fin, rafraichir, ) temps = calculer_score(toboggan, 10., pas) rafraichir(toboggan, temps) print("Temps sur le toboggan optimisé : {:f} secondes".format(temps)) if not temps_cycloide is None: print( ( "Temps sur la cycloïde ........ : {:f} secondes\n" + "Différence de temps .......... : {:f} secondes" ).format(temps_cycloide, abs(temps_cycloide - temps)) ) else: print("La cycloïde ne permet pas de rejoindre les deux points") # Temps d'exécution print("Calculé en {:f} secondes".format(time() - debut)) if len(sys.argv) >= 3 and sys.argv[2] == "svg": plt.savefig("toboggan.svg") plt.show()
GauBen/Toboggan
toboggan.py
toboggan.py
py
14,074
python
fr
code
1
github-code
36
[ { "api_name": "numpy.random.randint", "line_number": 114, "usage_type": "call" }, { "api_name": "numpy.random", "line_number": 114, "usage_type": "attribute" }, { "api_name": "numpy.random.randint", "line_number": 115, "usage_type": "call" }, { "api_name": "numpy....
10328382240
from django.shortcuts import render,redirect,get_object_or_404 from django.http import HttpResponse from .models import Item,Category import datetime # Create your views here. def index(request): tasks = Item.objects.all() categories = Category.objects.all() context = {"task_list" : tasks, "category_list":categories} return render(request,"index.html" , context) def add_task(request): # time = datetime.now() title = request.POST["task"] iscompleted = False category = request.POST["category_select"] print(category) todo = Item() todo.title= title todo.iscompleted = iscompleted todo.category = Category.objects.get(title = category) todo.save() return redirect('/') def complete_task(request,task_id): todo = Item.objects.get(id=task_id) todo.iscompleted = True todo.save() return redirect('/') def delete_task(request,task_id): Item.objects.get(id=task_id).delete() return redirect('/')
FazalJarral/Notetaker
todo/views.py
views.py
py
1,004
python
en
code
0
github-code
36
[ { "api_name": "models.Item.objects.all", "line_number": 9, "usage_type": "call" }, { "api_name": "models.Item.objects", "line_number": 9, "usage_type": "attribute" }, { "api_name": "models.Item", "line_number": 9, "usage_type": "name" }, { "api_name": "models.Cate...
4409217473
from time import sleep import requests class AntiCaptcha: def __init__(self, client_key): self.base_url = "https://api.anti-captcha.com/" self.headers = {"Content-Type": "Application/json"} self.client_key = client_key def _post(self, endpoint: str, data: object): """Make requests to api.anti-captcha.com Args: endpoint (str): API Endpoint data (object): API Payload Raises: Exception: API Error Returns: Response: Request Response """ url = self.base_url + endpoint data.update({"clientKey": self.client_key}) response = requests.post(url, data, headers=self.headers) json = response.json() if not hasattr(json, "errorId") or json.errorId == 0: return json else: raise Exception(json) def create_task(self, data: object): """Create Task Args: data (object): createTask Payload Returns: Response: Request Response """ return self._post("createTask", data) def get_task(self, task_id: str): """Get Args: task_id (str): API Task ID Returns: Response: Request Response """ return self._post("getTaskResult", {"taskId": task_id}) def get_result(self, task_id: str, sleep_seconds: float = 5): """Wait for result Args: task_id (str): API Task ID sleep_seconds (float, optional): Amount of time to sleep between checks. Defaults to 5. Raises: Exception: API Error Returns: Response: Request Response """ json = {} while json.status == "processing": json = self.get_task(task_id) sleep(sleep_seconds) if not hasattr(json, "errorId") or json.errorId == 0: return json else: raise Exception(json) def get_token(self, task_id: str, sleep_seconds: float = 5): """Get result token Args: task_id (str): API Task ID sleep_seconds (float, optional): Amount of time to sleep between checks. Defaults to 5. Returns: str: API Result Token """ return self.get_result(task_id, sleep_seconds).solution.token def solve(self, data: object): """All-in-one function to get token Args: data (object): createTask Payload Returns: str: API Token """ json = self.create_task(data) token = self.get_token(json.taskId) return token
ShayBox/AntiCaptcha
anticaptcha/main.py
main.py
py
2,693
python
en
code
3
github-code
36
[ { "api_name": "requests.post", "line_number": 27, "usage_type": "call" }, { "api_name": "time.sleep", "line_number": 75, "usage_type": "call" } ]
1398892267
from flask import Flask,render_template from time import time class Blockchain: def __init__(self): self.transactions = [] self.chain = [] self.create_block(0, '00') def create_block(self, nonce, previous_hash): block = { 'block_number': len(self.chain)+1, 'timestamp': time(), 'transactions': self.transactions, 'nonce': nonce, 'previous_hash': previous_hash } self.transactions = [] self.chain.append(block) app = Flask(__name__) @app.route('/') def index(): return render_template("index.html") if __name__ == "__main__": app.run(debug=True)
Thilagavathycse/Block-Chain-learnings
blockchain.py
blockchain.py
py
687
python
en
code
0
github-code
36
[ { "api_name": "time.time", "line_number": 14, "usage_type": "call" }, { "api_name": "flask.Flask", "line_number": 24, "usage_type": "call" }, { "api_name": "flask.render_template", "line_number": 29, "usage_type": "call" } ]
38877688552
import pandas as pd import numpy as np # For preprocessing the data from sklearn.preprocessing import Imputer from sklearn import preprocessing # To split the dataset into train and test datasets from sklearn.cross_validation import train_test_split # To model the Gaussian Navie Bayes classifier from sklearn.naive_bayes import GaussianNB from sklearn import datasets # To calculate the accuracy score of the model from sklearn.metrics import accuracy_score import urllib file = r'/home/deepa/Downloads/adult.csv' # df = pd.read_csv(file) # print(df) # comma delimited is the default adult_df = pd.read_csv(file, header = None, delimiter=' *, *', engine='python') adult_df.columns = ['age', 'workclass', 'fnlwgt', 'education', 'education_num', 'marital_status', 'occupation', 'relationship', 'race', 'sex', 'capital_gain', 'capital_loss', 'hours_per_week', 'native_country', 'income'] adult_df.isnull().sum() for value in ['workclass', 'education', 'marital_status', 'occupation', 'relationship','race', 'sex', 'native_country', 'income']: print (value,":", sum(adult_df[value] == '?')) #data preprocessing adult_df_rev = adult_df adult_df_rev.describe(include= 'all') # for value in ['workclass', 'education', # 'marital_status', 'occupation', # 'relationship','race', 'sex', # 'native_country', 'income']: # adult_df_rev[value].replace(['?'], [adult_df_rev.describe(include='all')[value][2]], # inplace='True') num_features = ['age', 'workclass_cat', 'fnlwgt', 'education_cat', 'education_num', 'marital_cat', 'occupation_cat', 'relationship_cat', 'race_cat', 'sex_cat', 'capital_gain', 'capital_loss', 'hours_per_week', 'native_country_cat'] scaled_features = {} for each in num_features: mean, std = adult_df_rev[each].mean(), adult_df_rev[each].std() scaled_features[each] = [mean, std] adult_df_rev.loc[:, each] = (adult_df_rev[each] - mean)/std rev.values[:,:14] target = adult_df_rev.values[:,14] features_train, features_test, target_train, target_test = train_test_split(features, target, test_size = 0.33, random_state = 10) clf = GaussianNB() clf.fit(features_train, target_train) target_pred = clf.predict(features_test) d=accuracy_score(target_test, target_pred, normalize = True) print(d)
XecureBot/DeepikaDS
data.py
data.py
py
2,534
python
en
code
0
github-code
36
[ { "api_name": "pandas.read_csv", "line_number": 18, "usage_type": "call" }, { "api_name": "sklearn.cross_validation.train_test_split", "line_number": 52, "usage_type": "call" }, { "api_name": "sklearn.naive_bayes.GaussianNB", "line_number": 54, "usage_type": "call" }, ...
21011472446
import os from pytest import raises from pydantic.error_wrappers import ValidationError from pathlib import Path from unittest import TestCase from lazy_env_configurator import BaseConfig, BaseEnv from lazy_env_configurator.custom_warnings import EnvWarning class TestInvalidEnv(TestCase): def test_eager_validation(self): with raises(ValidationError) as e: class ContainedEnv(BaseEnv): class Config(BaseConfig): envs = ("FOO", "APP") dot_env_path = Path(__file__).parent / ".env.contained" contained = True validations = { "FOO": { "required": True, "type": str, }, "APP": { "required": True, "type": str } } eagerly_validate = True self.assertIsInstance(e.value, ValidationError) self.assertEqual(e.value.raw_errors[0].loc_tuple()[1], "APP")
satyamsoni2211/lazy_env_configurator
tests/test_eager_validation.py
test_eager_validation.py
py
1,131
python
en
code
2
github-code
36
[ { "api_name": "unittest.TestCase", "line_number": 10, "usage_type": "name" }, { "api_name": "pytest.raises", "line_number": 13, "usage_type": "call" }, { "api_name": "pydantic.error_wrappers.ValidationError", "line_number": 13, "usage_type": "argument" }, { "api_n...
8596743224
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Extract persistent network by removing ephemeral links and adding missing links. Two filters: 1. at least 100 daily views for target video 2. the mean daily views of source video is at least 1% of the target video Usage: python extract_persistent_network.py Input data files: ../data/vevo_forecast_data_60k.tsv, ../data/network_pickle/ Output data files: ../data/persistent_network.csv Time: ~7M """ import sys, os, pickle from datetime import datetime, timedelta sys.path.append(os.path.join(os.path.dirname(__file__), '../')) from utils.data_loader import DataLoader from utils.helper import Timer, obj2str, is_persistent_link, is_same_genre def main(): # == == == == == == Part 1: Set up environment == == == == == == # timer = Timer() timer.start() data_prefix = '../data/' # == == == == == == Part 2: Load video views == == == == == == # data_loader = DataLoader() data_loader.load_video_views() embed_avg_view_dict = data_loader.embed_avg_view_dict num_videos = data_loader.num_videos data_loader.load_embed_content_dict() embed_cid_dict = data_loader.embed_cid_dict embed_genre_dict = data_loader.embed_genre_dict # == == == == == == Part 3: Load dynamic network snapshot == == == == == == # network_dict_list = [] for t in range(T): target_date_str = obj2str(datetime(2018, 9, 1) + timedelta(days=t)) filename = 'network_{0}.p'.format(target_date_str) network_dict = pickle.load(open(os.path.join(data_prefix, 'network_pickle', filename), 'rb')) for embed in network_dict: network_dict[embed] = [x[0] for x in network_dict[embed] if x[1] < NUM_REL] network_dict_list.append(network_dict) persistent_src_embed_set = set() persistent_tar_embed_set = set() existing_edges = set() num_reciprocal_edges = 0 num_same_artist = 0 num_same_genre = 0 with open(os.path.join(data_prefix, 'persistent_network.csv'), 'w') as fout: fout.write('Source,Target\n') for tar_embed in range(num_videos): src_union_set = set() for t in range(T): src_union_set.update(set(network_dict_list[t][tar_embed])) for src_embed in src_union_set: linkage_list = [0] * T for t in range(T): if src_embed in network_dict_list[t][tar_embed]: linkage_list[t] = 1 if is_persistent_link(linkage_list): # filter: at least 100 daily views for target video, # and the mean daily views of source video is at least 1% of the target video src_mean = embed_avg_view_dict[src_embed] tar_mean = embed_avg_view_dict[tar_embed] if tar_mean >= 100 and src_mean >= 0.01 * tar_mean: fout.write('{0},{1}\n'.format(src_embed, tar_embed)) persistent_src_embed_set.add(src_embed) persistent_tar_embed_set.add(tar_embed) if '{1}-{0}'.format(src_embed, tar_embed) in existing_edges: num_reciprocal_edges += 1 if embed_cid_dict[src_embed] == embed_cid_dict[tar_embed]: num_same_artist += 1 if is_same_genre(embed_genre_dict[src_embed], embed_genre_dict[tar_embed]): num_same_genre += 1 existing_edges.add('{0}-{1}'.format(src_embed, tar_embed)) print('{0} edges in the persistent network'.format(len(existing_edges))) print('{0} source videos, {1} target videos, {2} videos appear in both set'.format(len(persistent_src_embed_set), len(persistent_tar_embed_set), len(persistent_src_embed_set.intersection(persistent_tar_embed_set)))) print('{0} pairs of reciprocal edges'.format(num_reciprocal_edges)) print('{0} ({1:.1f}%) edges belong to the same artist'.format(num_same_artist, 100 * num_same_artist / len(existing_edges))) print('{0} ({1:.1f}%) edges belong to the same genre'.format(num_same_genre, 100 * num_same_genre / len(existing_edges))) timer.stop() if __name__ == '__main__': NUM_REL = 15 T = 63 main()
avalanchesiqi/networked-popularity
wrangling/extract_persistent_network.py
extract_persistent_network.py
py
4,496
python
en
code
11
github-code
36
[ { "api_name": "sys.path.append", "line_number": 18, "usage_type": "call" }, { "api_name": "sys.path", "line_number": 18, "usage_type": "attribute" }, { "api_name": "os.path.join", "line_number": 18, "usage_type": "call" }, { "api_name": "os.path", "line_number...
27687264440
from WGF import GameWindow, AssetsLoader, shared from os.path import join import logging log = logging.getLogger(__name__) SETTINGS_PATH = join(".", "settings.toml") LEADERBOARD_PATH = join(".", "leaderboard.json") LB_LIMIT = 5 def load_leaderboard(): if getattr(shared, "leaderboard", None) is None: from Game.leaderboard import Leaderboard try: lb = Leaderboard.from_file(LEADERBOARD_PATH, limit=LB_LIMIT) except Exception as e: log.warning(f"Unable to load leaderboard: {e}") # Creating default lb, in case our own doesnt exist board = { # #TODO: for now, entries are placeholders and dont match actual # score/kills values you can get in game "endless": { "slug": "Endless", "entries": [ {"name": "xXx_Gamer_xXx", "score": 720, "kills": 69}, {"name": "amogus", "score": 300, "kills": 50}, {"name": "Gabriel", "score": 100, "kills": 20}, {"name": "Default", "score": 50, "kills": 10}, {"name": "Karen", "score": 10, "kills": 1}, ], }, "time_attack": { "slug": "Time Attack", "entries": [ {"name": "Top_Kek", "score": 300, "kills": 50}, {"name": "loss", "score": 200, "kills": 30}, {"name": "Someone", "score": 150, "kills": 25}, {"name": "Amanda", "score": 75, "kills": 13}, {"name": "123asd123", "score": 10, "kills": 1}, ], }, } lb = Leaderboard( leaderboard=board, path=LEADERBOARD_PATH, limit=LB_LIMIT, ) lb.to_file() shared.leaderboard = lb return shared.leaderboard def make_game() -> GameWindow: """Factory to create custom GameWindow""" mygame = GameWindow("WeirdLand") assets_directory = join(".", "Assets") img_directory = join(assets_directory, join("Sprites")) mygame.assets = AssetsLoader( assets_directory=assets_directory, fonts_directory=join(assets_directory, "Fonts"), sounds_directory=join(assets_directory, "Sounds"), font_extensions=[".ttf"], image_extensions=[".png"], sound_extensions=[".wav"], ) # Overriding some built-in defaults and adding new mygame.settings.set_default("vsync", True) mygame.settings.set_default("show_fps", False) mygame.settings.set_default("camera_speed", 0.8) mygame.settings.set_default( "window_modes", { "double_buffer": True, "hardware_acceleration": True, }, ) mygame.icon_path = join(".", "icon.png") mygame.settings.from_toml(SETTINGS_PATH) mygame.init() mygame.assets.load_all() mygame.assets.spritesheets = {} # from WGF import shared load_leaderboard() # This is kinda janky, but also kinda not? shared.sprite_scale = 4 mygame.assets.load_images( path=join(img_directory, "4x"), scale=shared.sprite_scale, ) shared.extra_scale = 2 mygame.assets.load_images( path=join(img_directory, "2x"), scale=shared.extra_scale, ) # Specifying font as shared variable, since it should be used in all scenes shared.font = mygame.assets.load_font("./Assets/Fonts/romulus.ttf", 36) shared.game_paused = False from WGF.nodes import Align from WGF import Point from Game.ui import make_text fps_counter = make_text( name="fps_counter", text="", pos=Point(mygame.screen.get_rect().width, 0), align=Align.topright, ) @fps_counter.updatemethod def update_fps(): # if not shared.game_paused: fps_counter.text = f"FPS: {mygame.clock.get_fps():2.0f}" from Game.scenes import logo, level, menus mygame.tree.add_child(logo.sc) mygame.tree.add_child(menus.mm_wrapper, show=False) mygame.tree.add_child(level.sc, show=False) level.sc.stop() mygame.tree.add_child(fps_counter, show=mygame.settings["show_fps"]) return mygame
moonburnt/WeirdLand
Game/main.py
main.py
py
4,372
python
en
code
1
github-code
36
[ { "api_name": "logging.getLogger", "line_number": 5, "usage_type": "call" }, { "api_name": "os.path.join", "line_number": 7, "usage_type": "call" }, { "api_name": "os.path.join", "line_number": 8, "usage_type": "call" }, { "api_name": "WGF.shared", "line_numbe...
21928404743
# css selector 활용 크롤링 ''' # css란? Cascading Style Sheets html로 잡힌 골격에 스타일링(색, 크기 등)을 하는 것 스타일의 이름으로 구조가 특정지어질 수 있음(css selector) CSS selector - 웹 구성 시 CSS Selector을 직접 활용해 이름을 붙혀 만들기 때문에 CSS Selector로 찾아질 가능성이 높다 - Element Type 방식 태그 값들이 selector의 기준이 된다 - ID 방식 태그 내 id 값이 존재하면 id값이 selector의 기준이 된다 - Class 방식 태그 내 class 값이 존재하는 경우 class값이 selector의 기준이 된다 - 고급 한정자 방식 id, class가 없는 경우 기준이 되는 것 ex> nth-child ''' from bs4 import BeautifulSoup as BS #HTML을 편하게 다룰 수 있게 해줌 import requests as req # HTTP 통신을 위해 사용 # module 'collections' has no attribute 'Callable' 에러 대응 # collections.Callable 참조가 파이썬 3.10부터 collections.abc.Callable로 이동하여, 제거된 Attribute라서 발생하는 오류 import collections if not hasattr(collections, 'Callable'): collections.Callable = collections.abc.Callable #------------------------------------------------------------------ url = "https://finance.naver.com/marketindex/exchangeList.naver" res = req.get(url) #print(res.text) soup = BS(res.text, "html.parser") # 출력 테스트 # print(soup.title) # print(soup.title.string) # 원하는 영역 찾기 tds = soup.find_all("td") names = [] for td in soup.select("td.tit") : names.append(td.get_text(strip=True)) prices = [] for td in soup.select("td.sale") : prices.append(td.get_text(strip=True)) print(names) print(prices)
sh95fit/Python_study
Python_Crawling/Crawling_Static/Static_Study06.py
Static_Study06.py
py
1,717
python
ko
code
1
github-code
36
[ { "api_name": "collections.Callable", "line_number": 31, "usage_type": "attribute" }, { "api_name": "collections.abc", "line_number": 31, "usage_type": "attribute" }, { "api_name": "requests.get", "line_number": 35, "usage_type": "call" }, { "api_name": "bs4.Beaut...
4932293353
from transformers import GPT2Tokenizer import json import matplotlib.pyplot as plt ## This file was used to find the length of our longest input in tokens and visualize the distribution of token length with open('./combined_data.jsonl', 'r') as json_file: json_list = list(json_file) tokenizer = GPT2Tokenizer.from_pretrained("gpt2") longestString = '' longestLength = 0 listOfLengths = [] newList = [] for json_str in json_list: result = json.loads(json_str) string = result['text'] listOfIds = tokenizer(string)['input_ids'] tokens = len(listOfIds) if tokens <= 70: newList.append(result) listOfLengths.append(tokens) # if (length > longestLength): # longestLength = length # longestString = string plt.hist(listOfLengths) plt.show() # print(len(newList)) # with open('combined_and_cut_data.jsonl', 'w') as f: # for entry in newList: # json.dump(entry, f) # f.write('\n')
brennanem/CS324FinalProject
check_tokens.py
check_tokens.py
py
958
python
en
code
0
github-code
36
[ { "api_name": "transformers.GPT2Tokenizer.from_pretrained", "line_number": 11, "usage_type": "call" }, { "api_name": "transformers.GPT2Tokenizer", "line_number": 11, "usage_type": "name" }, { "api_name": "json.loads", "line_number": 18, "usage_type": "call" }, { "...
1767000003
from setuptools import setup, find_packages REQUIREMENTS = [] with open("requirements.txt") as f: for line in f.readlines(): line = line.strip() if len(line) == 0: continue REQUIREMENTS.append(line) setup( name = "wallstreet", version = "0.1", packages = find_packages(exclude=["*.test", "*.test.*", "test.*", "test"]), entry_points = { "console_scripts" : [ 'wallstreet = wallstreet.bin.__main__:main' ] }, install_requires = REQUIREMENTS, setup_requires=['pytest-runner'], tests_require = ['pytest'] )
breakhearts/wallstreet
setup.py
setup.py
py
605
python
en
code
0
github-code
36
[ { "api_name": "setuptools.setup", "line_number": 11, "usage_type": "call" }, { "api_name": "setuptools.find_packages", "line_number": 14, "usage_type": "call" } ]
41076521456
from PyQt5.QtWidgets import QDialog, QDialogButtonBox, QVBoxLayout, QLabel class AlertDialogClass(QDialog): """ This class load the help dialog pyqt component """ def __init__(self, title, message, parent=None): """ Confirm dialog class constructor :param parent: """ QDialog.__init__(self, parent) self.setWindowTitle(title) print(title) print(message) q_btn = QDialogButtonBox.Ok # | QDialogButtonBox.Cancel self.buttonBox = QDialogButtonBox(q_btn) self.buttonBox.accepted.connect(self.accept) self.buttonBox.rejected.connect(self.reject) self.layout = QVBoxLayout() message = QLabel(message) self.layout.addWidget(message) self.layout.addWidget(self.buttonBox) self.setLayout(self.layout)
samuelterra22/Analysis-of-antenna-coverage
src/main/python/dialogs/alert_dialog_class.py
alert_dialog_class.py
py
855
python
en
code
5
github-code
36
[ { "api_name": "PyQt5.QtWidgets.QDialog", "line_number": 6, "usage_type": "name" }, { "api_name": "PyQt5.QtWidgets.QDialog.__init__", "line_number": 16, "usage_type": "call" }, { "api_name": "PyQt5.QtWidgets.QDialog", "line_number": 16, "usage_type": "name" }, { "a...