hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d317ad165ee915f3ecb315380a7f117fb4e72a72 | 1,211 | py | Python | capstone/game/utils/play.py | davidrobles/mlnd-capstone-code | 19ca88aaa137665af147da9bbd0e510829a14cf1 | [
"MIT"
] | 2 | 2017-04-13T18:31:39.000Z | 2017-05-06T05:14:12.000Z | capstone/game/utils/play.py | davidrobles/mlnd-capstone-code | 19ca88aaa137665af147da9bbd0e510829a14cf1 | [
"MIT"
] | null | null | null | capstone/game/utils/play.py | davidrobles/mlnd-capstone-code | 19ca88aaa137665af147da9bbd0e510829a14cf1 | [
"MIT"
] | null | null | null | from __future__ import print_function
from ...utils import print_header
def play_match(game, players, verbose=True):
"""Plays a match between the given players"""
if verbose:
print(game)
while not game.is_over():
cur_player = players[game.cur_player()]
move = cur_player.choose_move(game.copy())
game.make_move(move)
if verbose:
print(game)
def play_series(game, players, n_matches=100, verbose=True):
"""
Plays a series of 'n_matches' of a 'game' between
the given 'players'.
"""
if verbose:
print_header('Series')
# print('Game:', game.name)
print('Players:', players)
print('No. Matches: %d\n' % n_matches)
counters = {'W': 0, 'L': 0, 'D': 0}
for n_match in range(1, n_matches + 1):
if verbose:
print('Match %d/%d:' % (n_match, n_matches), end=' ')
new_game = game() if callable(game) else game.copy()
play_match(new_game, players, verbose=False)
outcomes = new_game.outcomes()
counters[outcomes[0]] += 1
if verbose:
print(outcomes)
if verbose:
print('\nOutcomes:', counters)
return counters
| 30.275 | 65 | 0.593724 | 158 | 1,211 | 4.386076 | 0.341772 | 0.077922 | 0.121212 | 0.049062 | 0.103896 | 0.103896 | 0.103896 | 0 | 0 | 0 | 0 | 0.011377 | 0.274154 | 1,211 | 39 | 66 | 31.051282 | 0.777019 | 0.11313 | 0 | 0.275862 | 0 | 0 | 0.055291 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068966 | false | 0 | 0.068966 | 0 | 0.172414 | 0.344828 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d31c45f4f233234e4306a8d0af8c554db40cfdf6 | 1,701 | py | Python | modules_lib/plugin_models/application.py | hephaestus9/Ironworks | 37be48e37f63530dd7bf82618948ef82522699a0 | [
"MIT"
] | 1 | 2021-05-17T08:31:07.000Z | 2021-05-17T08:31:07.000Z | modules_lib/plugin_models/application.py | hephaestus9/Ironworks | 37be48e37f63530dd7bf82618948ef82522699a0 | [
"MIT"
] | null | null | null | modules_lib/plugin_models/application.py | hephaestus9/Ironworks | 37be48e37f63530dd7bf82618948ef82522699a0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import sys
from ironworks import serverTools
def highest_position(model):
highest_position = 0
items = model.query.all()
for item in items:
if item.position > highest_position:
highest_position = item.position
return highest_position + 1
class Application():
def __init__(self):
"""Table for one application in the applications module"""
self.apps = serverTools.getPrefsDb()
self.apps.beginTransaction()
self.apps.checkTable("applications", [
{"name": "id", "type": "integer primary key autoincrement"},
{"name": "name", "type": "text"},
{"name": "url", "type": "text"},
{"name": "description", "type": "text"},
{"name": "image", "type": "text"},
{"name": "position", "type": "integer"}])
self.apps.commitTransaction()
"""if position is None:
self.position = highest_position(Application)
else:
self.position = position"""
def getApplications(self, orderBy=False):
cursor = self.apps.select("applications", orderBy)
return cursor.fetchall()
def getAppById(self, app):
cursor = self.apps.select("applications", where={"id": app})
return cursor.fetchone()
def setApp(self, name, url, description, image, position):
data = [
{"name": name},
{"url": url},
{"description": description},
{"image": image},
{"position": position}]
self.db.beginTransaction()
self.db.insertOrUpdate("applications", data)
self.db.commitTransaction() | 28.35 | 72 | 0.572604 | 165 | 1,701 | 5.842424 | 0.393939 | 0.093361 | 0.049793 | 0.041494 | 0.06639 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002455 | 0.281599 | 1,701 | 60 | 73 | 28.35 | 0.786416 | 0.044092 | 0 | 0 | 0 | 0 | 0.146703 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.131579 | false | 0 | 0.078947 | 0 | 0.315789 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d31caf04cb9c133ec4d3c2c4da1c9cc2f19a7a62 | 553 | py | Python | module_functions.py | dcazabat/SYNOP_PY | 7a9f1804858d72b1ec2584fed887689161036ad7 | [
"MIT"
] | null | null | null | module_functions.py | dcazabat/SYNOP_PY | 7a9f1804858d72b1ec2584fed887689161036ad7 | [
"MIT"
] | null | null | null | module_functions.py | dcazabat/SYNOP_PY | 7a9f1804858d72b1ec2584fed887689161036ad7 | [
"MIT"
] | null | null | null | import platform
import os
def creation_date(path_to_file):
if platform.system() == 'Windows':
return os.path.getctime(path_to_file)
else:
stat = os.stat(path_to_file)
try:
return stat.st_birthtime
except AttributeError:
# We're probably on Linux. No easy way to get creation dates here,
# so we'll settle for when its content was last modified.
return stat.st_time
def clear():
if os.name == "nt":
os.system("cls")
else:
os.system("clear") | 27.65 | 78 | 0.60217 | 76 | 553 | 4.263158 | 0.618421 | 0.055556 | 0.092593 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.305606 | 553 | 20 | 79 | 27.65 | 0.84375 | 0.216998 | 0 | 0.125 | 0 | 0 | 0.039443 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.4375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d323c510c079cd6b3286991351384c97bf206346 | 2,731 | py | Python | Display_Analysis_Phyionet_Bio_Signals.py | Philip-M-Schmidt/Physionet_Biosignals_Display_Analysis_Automated | 19d2161f110bf76bf5d6ea396f117ecd7b179d80 | [
"Apache-2.0"
] | 1 | 2021-12-19T11:19:51.000Z | 2021-12-19T11:19:51.000Z | Display_Analysis_Phyionet_Bio_Signals.py | Philip-M-Schmidt/Physionet_Biosignals_Display_Analysis_Automated | 19d2161f110bf76bf5d6ea396f117ecd7b179d80 | [
"Apache-2.0"
] | null | null | null | Display_Analysis_Phyionet_Bio_Signals.py | Philip-M-Schmidt/Physionet_Biosignals_Display_Analysis_Automated | 19d2161f110bf76bf5d6ea396f117ecd7b179d80 | [
"Apache-2.0"
] | null | null | null | import tkinter.filedialog
import tkinter.simpledialog
from tkinter import messagebox
import numpy as np
import matplotlib.pyplot as plt
import wfdb
import peakutils
from scipy import signal
import pandas as pd
# To display any physiological signal from physionet, a dat-File needs to have a complementary hea-File in the same directory.
# Otherwise the display won't work
# awesome tutorial: https://www.youtube.com/watch?v=WyjGCEWU4zY&t=317s
file = tkinter.filedialog.askopenfilename()
data_type = tkinter.simpledialog.askstring('Select Type of File', 'type in: hea, dat or atr ')
n_samples = tkinter.simpledialog.askinteger('Number of samples',
'Type in the number of samples you want to be displayed (example: 3000, 6000, 10000 etc.)')
if file.endswith('.atr'):
file = file[:-4]
if file.endswith('.dat'):
file = file[:-4]
if file.endswith('.hea'):
file = file[:-4]
#Define ecg
record = wfdb.rdrecord(file, sampto=n_samples)
ann = wfdb.rdann(file, data_type, sampto=n_samples)
#Filerecord
file_record = record.__dict__
#print(file_record)
wfdb.plot_items(signal=record.p_signal, title='ECG Signal',ann_samp=[ann.sample, ann.sample], time_units='samples', figsize=(10,4))
#Detect R-Peaks
signal_slice = np.ndarray.flatten(record.p_signal[0:n_samples])
smooth_signal = signal.cspline1d(signal_slice, lamb=1000) #smoothing the signal (filtering)
#r_peak_index = peakutils.indexes(smooth_signal, thres = 0.45, min_dist = 0.1) # first peak detection option
peak_index = signal.find_peaks_cwt(smooth_signal, widths= np.arange(60,80)) # second peak detection option
fig, ax = plt.subplots()
ax.set_title('Detect R peak')
ax.plot(signal_slice)
p_min_distance = -20 # marking for p-wave example
p_max_distance = -60
t_min_distance = 20 # marking for t-wave example
t_max_distance = 100
for peak in peak_index:
ax.axvline(x = peak, color = 'r')
#ax.axvspan(peak + p_max_distance , peak + p_min_distance, alpha = 0.2) # mark for p-wave
#ax.axvspan(peak + t_max_distance , peak + t_min_distance, alpha = 0.2) # mark for t-wave
plt.show()
#Display HR
RR_intervall = np.diff(peak_index) / record.fs
heart_rate = 60 / RR_intervall #BPM
fig,ax = plt.subplots()
ax.set_title('Heart Rate Diagram')
ax.plot(heart_rate)
plt.show()
# Display HRV
df = pd.DataFrame(heart_rate, columns =["Heart Rate"], dtype=float)
HRV = [df.describe()] # the HRV is nothing else as the normal STD of lots of values
messagebox.showinfo('Statistical analysis of the ECG', HRV)
#Display ECG-Record
messagebox.showinfo('Properties of the selected file', file_record) | 35.934211 | 134 | 0.706335 | 409 | 2,731 | 4.581907 | 0.415648 | 0.024013 | 0.022412 | 0.01174 | 0.103522 | 0.078975 | 0.054429 | 0 | 0 | 0 | 0 | 0.023799 | 0.184548 | 2,731 | 76 | 135 | 35.934211 | 0.817692 | 0.28561 | 0 | 0.152174 | 0 | 0.021739 | 0.152104 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.195652 | 0 | 0.195652 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d32d473a0b25c8a65ddd3bb4653d7114e10ce38b | 15,635 | py | Python | google_patent_scraper/main.py | TroisLiu/google_patent_scraper | 3e157b0d0d357f17ffaeec66afb2e58f5fcde68d | [
"MIT"
] | null | null | null | google_patent_scraper/main.py | TroisLiu/google_patent_scraper | 3e157b0d0d357f17ffaeec66afb2e58f5fcde68d | [
"MIT"
] | null | null | null | google_patent_scraper/main.py | TroisLiu/google_patent_scraper | 3e157b0d0d357f17ffaeec66afb2e58f5fcde68d | [
"MIT"
] | null | null | null | # Scrape #
from urllib.request import Request, urlopen
import urllib.parse
from urllib.error import HTTPError
from bs4 import BeautifulSoup
# json #
import json
# errors #
from .errors import *
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# Create scraper class
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
class scraper_class:
"""
Google scraper class used to scrape data from 'https://patents.google.com/'
There are two primary ways to use the class:
(1) Add list of patents to class and scrape all patents at once
scraper=scraper_class() #<- Initialize class
# ~ Add patents to list ~ #
scraper.add_patents('US2668287A')
scraper.add_patents('US266827A')
# ~ Scrape all patents ~ #
scraper.scrape_all_patents()
# ~ Get results of scrape ~ #
patent_1_parsed = scraper.parsed_patents['2668287A']
patent_2_parsed = scraper.parsed_patents['266827A']
(2) Scrape each patent individually
scraper=scraper_class() #<- Initialize class
# ~~ Scrape patents individually ~~ #
patent_1 = 'US2668287A'
patent_2 = 'US266827A'
err_1, soup_1, url_1 = scraper.request_single_patent(patent_1)
err_2, soup_2, url_2 = scraper.request_single_patent(patent_2)
# ~ Parse results of scrape ~ #
patent_1_parsed = scraper.get_scraped_data(soup_1,patent_1,url_1)
patent_2_parsed = scraper.get_scraped_data(soup_2,patetn_2,url_2)
Attributes:
- list_of_patents (list) : patents to be scraped
- scrape_status (dict) : status of request using patent
- parsed_patents (dict) : result of parsing patent html
- return_abstract (bool) : boolean for whether the code should return the abstract
"""
def __init__(self,return_abstract=False):
self.list_of_patents = []
self.scrape_status = {}
self.parsed_patents = {}
self.return_abstract = return_abstract
def add_patents(self, patent):
"""Append patent to patent list attribute self.list_of_patents
Inputs:
- patent (str) : patent number
"""
# ~ Check if patent is string ~ #
if not isinstance(patent,str):
raise(PatentClassError("'patent' variable must be a string"))
# ~ Append patent to list to be scrapped ~ #
else:
self.list_of_patents.append(patent)
def delete_patents(self,patent):
"""Remove patent from patent list attribute self.list_of_patents
Inputs:
- patent (str) : patent number
"""
# ~ Check if patent is in list ~ #
if patent in self.list_of_patents:
self.list_of_patents.pop(self.list_of_patents.index(patent))
else:
print('Patent {0} not in patent list'.format(patent))
def add_scrape_status(self,patent,success_value):
"""Add status of scrape to dictionary self.scrape_status"""
self.scrape_status[patent] = success_value
def request_single_patent(self,patent,url=False):
"""Calls request function to retreive google patent data and parses returned html using BeautifulSoup
Returns:
- Status of scrape <- String
- Html of patent <- BS4 object
Inputs:
- patent (str) : if url == False then patent is patent number
elif url == True then patent is google patent url
- url (bool) : determines whether patent is treated as patent number
or google patent url
"""
try:
if not url:
url='https://patents.google.com/patent/{0}'.format(patent)
else:
url=patent
print(url)
req = Request(url,headers={'User-Agent': 'Mozilla/5.0'})
webpage = urlopen(req).read()
soup = BeautifulSoup(webpage, features="lxml")
return(('Success',soup,url))
except HTTPError as e:
print('Patent: {0}, Error Status Code : {1}'.format(patent,e.code))
return(e.code,'',url)
def parse_citation(self,single_citation):
"""Parses patent citation, returning results as a dictionary
Returns (variables returned in dictionary, following are key names):
- patent_number (str) : patent number
- priority_date (str) : priority date of patent
- pub_date (str) : publication date of patent
Inputs:
- single_citation (str) : html string from citation section in google patent html
"""
try:
patent_number = single_citation.find('span',itemprop='publicationNumber').get_text()
except:
patent_number = ''
# ~ Get priority date ~ #
try:
priority_date = single_citation.find('td',itemprop='priorityDate').get_text()
except:
priority_date = ''
# ~ Get publication date ~ #
try:
pub_date = single_citation.find('td',itemprop='publicationDate').get_text()
except:
pub_date
return({'patent_number':patent_number,
'priority_date':priority_date,
'pub_date':pub_date})
def process_patent_html(self,soup):
""" Parse patent html using BeautifulSoup module
Returns (variables returned in dictionary, following are key names):
- application_number (str) : application number
- inventor_name (json) : inventors of patent
- assignee_name_orig (json) : original assignees to patent
- assignee_name_current (json) : current assignees to patent
- pub_date (str) : publication date
- filing_date (str) : filing date
- priority_date (str) : priority date
- grant_date (str) : grant date
- forward_cites_no_family (json) : forward citations that are not family-to-family cites
- forward_cites_yes_family (json) : forward citations that are family-to-family cites
- backward_cites_no_family (json) : backward citations that are not family-to-family cites
- backward_cites_yes_family (json) : backward citations that are family-to-family cites
- cpc_level1 (json) : cpc 1-tier id (e.g. A, B, ...,H), same as cpc_section_id in api/query of PatentViews
- cpc_level2 (json) : cpc 2-tier id (e.g. A01, B01, ...,H99)
- cpc_level3 (json) : cpc 3-tier id (e.g. A23B, A24F, ...,H01G), same as cpc_group_id in api/query of PatentViews
- cpc_level4 (json) : cpc 4-tier id (e.g. F04D29, A24F11)
- cpc_level5 (json) : cpc 5-tier id (e.g. F04D29/38, A24F11/00), same as cpc_subgroup_id in api/query of PatentViews
Inputs:
- soup (str) : html string from of google patent html
"""
try:
inventor_name = [{'inventor_name':x.get_text()} for x in soup.find_all('dd',itemprop='inventor')]
except:
inventor_name = []
# Assignee #
try:
assignee_name_orig = [{'assignee_name':x.get_text()} for x in soup.find_all('dd',itemprop='assigneeOriginal')]
except:
assignee_name_orig = []
try:
assignee_name_current = [{'assignee_name':x.get_text()} for x in soup.find_all('dd',itemprop='assigneeCurrent')]
except:
assignee_name_current = []
# Publication Date #
try:
pub_date = soup.find('dd',itemprop='publicationDate').get_text()
except:
pub_date = ''
# Application Number #
try:
application_number = soup.find('dd',itemprop="applicationNumber").get_text()
except:
application_number = ''
# Filing Date #
try:
filing_date = soup.find('dd',itemprop='filingDate').get_text()
except:
filing_date = ''
# Loop through all events #
list_of_application_events = soup.find_all('dd',itemprop='events')
priority_date = ''
grant_date = ''
for app_event in list_of_application_events:
# Get information #
try:
title_info = app_event.find('span',itemprop='type').get_text()
timeevent = app_event.find('time',itemprop='date').get_text()
if title_info == 'priority':
priority_date = timeevent
if title_info == 'granted':
grant_date = timeevent
if title_info == 'publication' and pub_date=='':
pub_date = timeevent
except:
continue
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# Citations
#
# All citations are of the same format
# -Find all citations
# -If there are any citations, parse each individually using "parse_citation"
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# ~~~ Forward Citations (No Family to Family) ~~~ #
found_forward_cites_orig=soup.find_all('tr', itemprop="forwardReferencesOrig")
forward_cites_no_family=[]
if len(found_forward_cites_orig)>0:
for citation in found_forward_cites_orig:
forward_cites_no_family.append(self.parse_citation(citation))
# ~~~ Forward Citations (Yes Family to Family) ~~~ #
found_forward_cites_family=soup.find_all('tr', itemprop="forwardReferencesFamily")
forward_cites_yes_family=[]
if len(found_forward_cites_family)>0:
for citation in found_forward_cites_family:
forward_cites_yes_family.append(self.parse_citation(citation))
# ~~~ Backward Citations (No Family to Family) ~~~ #
found_backward_cites_orig = soup.find_all('tr', itemprop='backwardReferences')
backward_cites_no_family=[]
if len(found_backward_cites_orig)>0:
for citation in found_backward_cites_orig:
backward_cites_no_family.append(self.parse_citation(citation))
# ~~~ Backward Citations (Yes Family to Family) ~~~ #
found_backward_cites_family = soup.find_all('tr', itemprop='backwardReferencesFamily')
backward_cites_yes_family=[]
if len(found_backward_cites_family)>0:
for citation in found_backward_cites_family:
backward_cites_yes_family.append(self.parse_citation(citation))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# Get abstract
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
abstract_text=''
if self.return_abstract:
# Get abstract #
abstract = soup.find('meta',attrs={'name':'DC.description'})
# Get text
if abstract:
abstract_text=abstract['content']
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# Get cpc category
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
cpc_level1=[]
cpc_level2=[]
cpc_level3=[]
cpc_level4=[]
cpc_level5=[]
cpc_data = soup.find_all('span', itemprop="Code")
# Get text
if cpc_data:
for cpc_item in cpc_data:
cpc_code = cpc_item.contents[0]
if len(cpc_code) == 1 and cpc_code not in cpc_level1:
cpc_level1.append(cpc_code)
elif len(cpc_code) == 3 and cpc_code not in cpc_level2:
cpc_level2.append(cpc_code)
elif len(cpc_code) == 4 and cpc_code not in cpc_level3:
cpc_level3.append(cpc_code)
elif len(cpc_code) > 4:
if '/' in cpc_code:
cpc_l4_code = cpc_code[:cpc_code.index('/')]
if cpc_code not in cpc_level5:
cpc_level5.append(cpc_code)
else:
pass
else:
cpc_l4_code = cpc_code
if cpc_l4_code not in cpc_level4:
cpc_level4.append(cpc_l4_code)
else:
pass
else:
pass
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# Return data as a dictionary
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
return({'inventor_name':json.dumps(inventor_name),
'assignee_name_orig':json.dumps(assignee_name_orig),
'assignee_name_current':json.dumps(assignee_name_current),
'pub_date':pub_date,
'priority_date':priority_date,
'grant_date':grant_date,
'filing_date':filing_date,
'forward_cite_no_family':json.dumps(forward_cites_no_family),
'forward_cite_yes_family':json.dumps(forward_cites_yes_family),
'backward_cite_no_family':json.dumps(backward_cites_no_family),
'backward_cite_yes_family':json.dumps(backward_cites_yes_family),
'abstract_text':abstract_text,
'cpc_level1':json.dumps(cpc_level1),
'cpc_level2':json.dumps(cpc_level2),
'cpc_level3':json.dumps(cpc_level3),
'cpc_level4':json.dumps(cpc_level4),
'cpc_level5':json.dumps(cpc_level5)})
def get_scraped_data(self,soup,patent,url):
# ~~ Parse individual patent ~~ #
parsing_individ_patent = self.process_patent_html(soup)
# ~~ Add url + patent to dictionary ~~ #
parsing_individ_patent['url'] = url
parsing_individ_patent['patent'] = patent
# ~~ Return patent info ~~ #
return(parsing_individ_patent)
def scrape_all_patents(self):
""" Scrapes all patents in list self.list_of_patents using function "request_single_patent".
If you want to scrape a single patent without adding it to the class variable,
use "request_single_patent" function as a method on the class. See the doc string
in the class module for an example.
"""
# ~ Check if there are any patents ~ #
if len(self.list_of_patents)==0:
raise(NoPatentsError("no patents to scrape specified in 'patent' variable: add patent using class.add_patents([<PATENTNUMBER>])"))
# ~ Loop through list of patents and scrape them ~ #
else:
for patent in self.list_of_patents:
error_status, soup, url = self.request_single_patent(patent)
# ~ Add scrape status variable ~ #
self.add_scrape_status(patent,error_status)
if error_status=='Success':
self.parsed_patents[patent] = self.get_scraped_data(soup,patent,url)
else:
self.parsed_patents[patent] = {}
| 42.371274 | 145 | 0.550432 | 1,680 | 15,635 | 4.886905 | 0.153571 | 0.014495 | 0.020585 | 0.020706 | 0.31754 | 0.242875 | 0.163703 | 0.090743 | 0.048843 | 0.035201 | 0 | 0.01447 | 0.337 | 15,635 | 368 | 146 | 42.486413 | 0.777542 | 0.368724 | 0 | 0.203297 | 0 | 0 | 0.105934 | 0.024368 | 0 | 0 | 0 | 0 | 0 | 1 | 0.049451 | false | 0.016484 | 0.032967 | 0 | 0.087912 | 0.016484 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d32ec0367097fa0f09e21b517c44535883c2818e | 1,697 | py | Python | pattersonschwartz/pattersonschwartz/spiders/homelistdetails.py | click-here/Scrapy-Talk | b9eaaca7caeeafb7a6cbc4e147ef52dab7733148 | [
"MIT"
] | 1 | 2020-09-14T17:32:39.000Z | 2020-09-14T17:32:39.000Z | pattersonschwartz/pattersonschwartz/spiders/homelistdetails.py | click-here/Scrapy-Talk | b9eaaca7caeeafb7a6cbc4e147ef52dab7733148 | [
"MIT"
] | null | null | null | pattersonschwartz/pattersonschwartz/spiders/homelistdetails.py | click-here/Scrapy-Talk | b9eaaca7caeeafb7a6cbc4e147ef52dab7733148 | [
"MIT"
] | 1 | 2019-09-04T09:56:26.000Z | 2019-09-04T09:56:26.000Z | # -*- coding: utf-8 -*-
import scrapy
from pattersonschwartz.items import ListingItem
class HomelistdetailsSpider(scrapy.Spider):
name = 'homelistdetails'
allowed_domains = ['pattersonschwartz.com']
start_urls = ['http://www.pattersonschwartz.com/forsale/Harford/priceMin_250000/priceMax_650000']
def parse(self, response):
for r in response.css('.psr-result'):
listingurl = r.css('.psr-more-info::attr(href)').get()
item = ListingItem()
item['price'] = r.css('.psr-price::text').get()
item['cdp'] = r.css('.psr-address > span:nth-of-type(1)::text').get()
item['address'] = r.css('.psr-address > span:nth-of-type(2)::text').get()
item['listing'] = listingurl
request = scrapy.Request(url=response.urljoin(listingurl), callback=self.parse_details)
request.meta['item'] = item
yield request
def get_primary_details(self, details):
all_strongs = details.xpath('.//strong/text()').getall()
detail_dict = {}
for s in all_strongs:
value = details.xpath('//strong[text()=$val]/following-sibling::text()', val=s).get()
cleaned_strong = s.replace(':','')
detail_dict[cleaned_strong] = value.strip()
return detail_dict
def parse_details(self, response):
item = response.meta['item']
item['fulladdress'] = response.css('.property-location::text').get().strip()
item['description'] = response.xpath('.//*[@class="secondary-details"]/div/div/p/text()').get()
item['primarydetails'] = self.get_primary_details(response.css('.primary-details'))
yield item
| 41.390244 | 103 | 0.616971 | 195 | 1,697 | 5.282051 | 0.446154 | 0.029126 | 0.027184 | 0.027184 | 0.052427 | 0.052427 | 0.052427 | 0.052427 | 0 | 0 | 0 | 0.011202 | 0.210961 | 1,697 | 40 | 104 | 42.425 | 0.758028 | 0.012375 | 0 | 0 | 0 | 0 | 0.27957 | 0.12963 | 0 | 0 | 0 | 0 | 0 | 1 | 0.096774 | false | 0 | 0.064516 | 0 | 0.322581 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d330581c4ead509c0ffddb1d0dcb4f4ba775168c | 3,212 | py | Python | CodingInterviews/python/63_get_median.py | YorkFish/git_study | 6e023244daaa22e12b24e632e76a13e5066f2947 | [
"MIT"
] | null | null | null | CodingInterviews/python/63_get_median.py | YorkFish/git_study | 6e023244daaa22e12b24e632e76a13e5066f2947 | [
"MIT"
] | null | null | null | CodingInterviews/python/63_get_median.py | YorkFish/git_study | 6e023244daaa22e12b24e632e76a13e5066f2947 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# coding:utf-8
from random import randrange
class Solution:
def __init__(self):
"""
最小值放大顶堆
最大值放小顶堆
"""
self.littleValMaxHeap = []
self.bigValMinHeap = []
self.maxHeapCount = 0
self.minHeapCount = 0
def Insert(self, num):
def cmpMinHeap(t, p):
return t < p
def cmpMaxHeap(t, p):
return p < t
if self.minHeapCount < self.maxHeapCount:
self.minHeapCount += 1
if num < self.littleValMaxHeap[0]:
self.createHeap(self.bigValMinHeap,
self.littleValMaxHeap[0],
cmpMinHeap)
self.adjustHeap(self.littleValMaxHeap, num, cmpMaxHeap)
else:
self.createHeap(self.bigValMinHeap, num, cmpMinHeap)
else:
self.maxHeapCount += 1
if len(self.littleValMaxHeap) == 0:
self.createHeap(self.littleValMaxHeap, num, cmpMaxHeap)
elif self.bigValMinHeap[0] < num:
self.createHeap(self.littleValMaxHeap,
self.bigValMinHeap[0],
cmpMaxHeap)
self.adjustHeap(self.bigValMinHeap, num, cmpMinHeap)
else:
self.createHeap(self.littleValMaxHeap, num, cmpMaxHeap)
def GetMedian(self, n=None): # add "n=None" because of bad setting
if self.maxHeapCount == 0:
return -1
elif self.minHeapCount < self.maxHeapCount:
return self.littleValMaxHeap[0]
else:
return (self.littleValMaxHeap[0] + self.bigValMinHeap[0]) / 2.0
def createHeap(self, heap, num, cmpFun):
heap.append(num)
tmpIdx = len(heap) - 1
while tmpIdx > 0:
parentIdx = (tmpIdx - 1) >> 1
if cmpFun(heap[tmpIdx], heap[parentIdx]):
heap[tmpIdx], heap[parentIdx] = heap[parentIdx], heap[tmpIdx]
tmpIdx = parentIdx
else:
break
def adjustHeap(self, heap, num, cmpFun):
size = len(heap)
heap[0] = num
tmpIdx = 0
while tmpIdx < size:
leftIdx = tmpIdx*2 + 1
rightIdx = tmpIdx*2 + 2
if rightIdx < size:
if cmpFun(heap[leftIdx], heap[rightIdx]):
target = leftIdx
else:
target = rightIdx
elif leftIdx < size:
target = leftIdx
else:
break
if cmpFun(heap[target], heap[tmpIdx]):
heap[target], heap[tmpIdx] = heap[tmpIdx], heap[target]
tmpIdx = target
else:
break
if __name__ == "__main__":
s = Solution()
# nums = [randrange(10, 100) for num in range(15)]
nums = [5, 2, 3, 4, 1, 6, 7, 0, 8]
print(f">>> nums = {nums}")
print(f">>> Midian = {s.GetMedian()}")
for num in nums:
s.Insert(num)
print(f">>> MinHeap = {s.bigValMinHeap}")
print(f">>> MaxHeap = {s.littleValMaxHeap}")
print(f">>> Midian = {s.GetMedian()}")
| 32.444444 | 77 | 0.505915 | 315 | 3,212 | 5.120635 | 0.244444 | 0.123993 | 0.065096 | 0.046497 | 0.225666 | 0.140112 | 0 | 0 | 0 | 0 | 0 | 0.022335 | 0.386675 | 3,212 | 98 | 78 | 32.77551 | 0.796447 | 0.04203 | 0 | 0.2125 | 0 | 0 | 0.051611 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0875 | false | 0 | 0.0125 | 0.025 | 0.175 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d33144bb76d08ef23148781e070bd332ae38e8d8 | 1,234 | py | Python | src/utils.py | mac389/LVSI | ecfe762d03f332568ea703b3217952d71967a634 | [
"Apache-2.0"
] | null | null | null | src/utils.py | mac389/LVSI | ecfe762d03f332568ea703b3217952d71967a634 | [
"Apache-2.0"
] | null | null | null | src/utils.py | mac389/LVSI | ecfe762d03f332568ea703b3217952d71967a634 | [
"Apache-2.0"
] | null | null | null | import json,itertools
import numpy as np
import pandas as pd
from statsmodels.stats.inter_rater import cohens_kappa
def kappa(f1,f2,pathologists,cols_to_parse,outname,ratings):
contingency_tables = {}
lvsi = {}
for (pathologist_one, pathologist_two) in itertools.combinations(pathologists,2):
KEY = '%s-%s'%(pathologist_one,pathologist_two)
df_one = pd.read_excel(f1,pathologist_one,parse_cols=cols_to_parse, convert_float=True)
df_two = pd.read_excel(f2,pathologist_two,parse_cols=cols_to_parse, convert_float=True)
patho_one_ratings = np.array([i[0][0] if len(i[0]) > 0 else -1 for i in df_one.apply(np.nonzero,axis=1).values]).astype(int)
patho_two_ratings = np.array([i[0][0] if len(i[0]) > 0 else -1 for i in df_two.apply(np.nonzero,axis=1).values]).astype(int)
#-1 indicates an invalid value in case the rater forgot to fill the form out
table = [[np.logical_and(patho_one_ratings == rating_one,patho_two_ratings == rating_two).sum()
for rating_one in ratings]
for rating_two in ratings]
contingency_tables[KEY] = table
lvsi['%s-%s'%(pathologist_one,pathologist_two)] = cohens_kappa(table).kappa
json.dump(lvsi,open('../data/%s.json'%outname,'wb'))
return contingency_tables
| 39.806452 | 126 | 0.744733 | 204 | 1,234 | 4.294118 | 0.372549 | 0.063927 | 0.013699 | 0.09589 | 0.315068 | 0.315068 | 0.246575 | 0.246575 | 0.086758 | 0.086758 | 0 | 0.016698 | 0.126418 | 1,234 | 30 | 127 | 41.133333 | 0.795918 | 0.060778 | 0 | 0 | 0 | 0 | 0.023316 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.2 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d3319c458f622667537db3beeda90e607c800fbc | 4,363 | py | Python | raspberry/Car-obd/fuction tests/test_obdsim.py | dsd-m2m/vehicle-tracking | 7542c5d13bf3e5a3a2fdbf9de8e7dd80a4820a51 | [
"MIT"
] | 2 | 2018-10-10T12:11:00.000Z | 2018-11-18T12:14:36.000Z | raspberry/Car-obd/fuction tests/test_obdsim.py | dsd-m2m/vehicle-tracking | 7542c5d13bf3e5a3a2fdbf9de8e7dd80a4820a51 | [
"MIT"
] | 6 | 2018-10-16T21:19:30.000Z | 2018-12-10T15:39:49.000Z | raspberry/Car-obd/fuction tests/test_obdsim.py | dsd-m2m/vehicle-tracking | 7542c5d13bf3e5a3a2fdbf9de8e7dd80a4820a51 | [
"MIT"
] | 4 | 2018-10-28T18:43:32.000Z | 2018-12-09T18:41:03.000Z |
import time
import pytest
from obd import commands, Unit
# NOTE: This is purposefully tuned slightly higher than the ELM's default
# message timeout of 200 milliseconds. This prevents us from
# inadvertently marking the first query of an async connection as
# null, since it may be the case that the first transaction incurs the
# ELM's internal timeout.
STANDARD_WAIT_TIME = 0.3
@pytest.fixture(scope="module")
def obd(request):
"""provides an OBD connection object for obdsim"""
import obd
port = request.config.getoption("--port")
return obd.OBD(port)
@pytest.fixture(scope="module")
def asynchronous(request):
"""provides an OBD *Async* connection object for obdsim"""
import obd
port = request.config.getoption("--port")
return obd.Async(port)
def good_rpm_response(r):
return (not r.is_null()) and \
(r.value.u == Unit.rpm) and \
(r.value >= 0.0 * Unit.rpm)
@pytest.mark.skipif(not pytest.config.getoption("--port"),
reason="needs --port=<port> to run")
def test_supports(obd):
assert(len(obd.supported_commands) > 0)
assert(obd.supports(commands.RPM))
@pytest.mark.skipif(not pytest.config.getoption("--port"),
reason="needs --port=<port> to run")
def test_rpm(obd):
r = obd.query(commands.RPM)
assert(good_rpm_response(r))
# Async tests
@pytest.mark.skipif(not pytest.config.getoption("--port"),
reason="needs --port=<port> to run")
def test_async_query(asynchronous):
rs = []
asynchronous.watch(commands.RPM)
asynchronous.start()
for i in range(5):
time.sleep(STANDARD_WAIT_TIME)
rs.append(asynchronous.query(commands.RPM))
asynchronous.stop()
asynchronous.unwatch_all()
# make sure we got data
assert(len(rs) > 0)
assert(all([ good_rpm_response(r) for r in rs ]))
@pytest.mark.skipif(not pytest.config.getoption("--port"),
reason="needs --port=<port> to run")
def test_async_callback(asynchronous):
rs = []
asynchronous.watch(commands.RPM, callback=rs.append)
asynchronous.start()
time.sleep(STANDARD_WAIT_TIME)
asynchronous.stop()
asynchronous.unwatch_all()
# make sure we got data
assert(len(rs) > 0)
assert(all([ good_rpm_response(r) for r in rs ]))
@pytest.mark.skipif(not pytest.config.getoption("--port"),
reason="needs --port=<port> to run")
def test_async_paused(asynchronous):
assert(not asynchronous.running)
asynchronous.watch(commands.RPM)
asynchronous.start()
assert(asynchronous.running)
with asynchronous.paused() as was_running:
assert(not asynchronous.running)
assert(was_running)
assert(asynchronous.running)
asynchronous.stop()
assert(not asynchronous.running)
@pytest.mark.skipif(not pytest.config.getoption("--port"),
reason="needs --port=<port> to run")
def test_async_unwatch(asynchronous):
watched_rs = []
unwatched_rs = []
asynchronous.watch(commands.RPM)
asynchronous.start()
for i in range(5):
time.sleep(STANDARD_WAIT_TIME)
watched_rs.append(asynchronous.query(commands.RPM))
with asynchronous.paused():
asynchronous.unwatch(commands.RPM)
for i in range(5):
time.sleep(STANDARD_WAIT_TIME)
unwatched_rs.append(asynchronous.query(commands.RPM))
asynchronous.stop()
# the watched commands
assert(len(watched_rs) > 0)
assert(all([ good_rpm_response(r) for r in watched_rs ]))
# the unwatched commands
assert(len(unwatched_rs) > 0)
assert(all([ r.is_null() for r in unwatched_rs ]))
@pytest.mark.skipif(not pytest.config.getoption("--port"),
reason="needs --port=<port> to run")
def test_async_unwatch_callback(asynchronous):
a_rs = []
b_rs = []
asynchronous.watch(commands.RPM, callback=a_rs.append)
asynchronous.watch(commands.RPM, callback=b_rs.append)
asynchronous.start()
time.sleep(STANDARD_WAIT_TIME)
with asynchronous.paused():
asynchronous.unwatch(commands.RPM, callback=b_rs.append)
time.sleep(STANDARD_WAIT_TIME)
asynchronous.stop()
asynchronous.unwatch_all()
assert(all([ good_rpm_response(r) for r in a_rs + b_rs ]))
assert(len(a_rs) > len(b_rs))
| 27.613924 | 76 | 0.66766 | 570 | 4,363 | 4.998246 | 0.189474 | 0.050193 | 0.060021 | 0.046683 | 0.643033 | 0.618112 | 0.541594 | 0.50509 | 0.474201 | 0.436995 | 0 | 0.004338 | 0.207426 | 4,363 | 157 | 77 | 27.789809 | 0.819549 | 0.116892 | 0 | 0.574257 | 0 | 0 | 0.064718 | 0 | 0 | 0 | 0 | 0 | 0.188119 | 1 | 0.09901 | false | 0 | 0.049505 | 0.009901 | 0.178218 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d334711681aa97c717a08b726e722c51a9979e9c | 740 | py | Python | wind.py | RyosukeDTomita/vaisaraPlot | ea0e9f95ece1ee92419b0d6c99558c655ddcc28c | [
"MIT"
] | 1 | 2021-06-25T08:07:14.000Z | 2021-06-25T08:07:14.000Z | wind.py | RyosukeDTomita/vaisaraPlot | ea0e9f95ece1ee92419b0d6c99558c655ddcc28c | [
"MIT"
] | null | null | null | wind.py | RyosukeDTomita/vaisaraPlot | ea0e9f95ece1ee92419b0d6c99558c655ddcc28c | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
# made by tomita
from readcsv import readcsv
fig = plt.figure(figsize = (4.5,7))
ax = fig.add_subplot(111)
data = "./data/mirai_rs41_20210619_2330.txt"
u,v,h = [],[],[]
readcsv = readcsv(data)
df = readcsv.df
U = readcsv["Ecomp"]
V = readcsv["Ncomp"]
H = readcsv["HeightMSL"]
for i in range(0,len(U),1):
if i%150 == 0:
u.append(U[i])
v.append(V[i])
h.append(H[i])
zero = np.zeros(len(h))
ax.barbs(zero,h,u,v)
ax.tick_params(labelbottom=False,
labelleft=True,
labelright=False,
labeltop=False)
ax.tick_params(bottom=False,
left=True,
right=False,
top=False)
plt.show()
| 22.424242 | 44 | 0.589189 | 112 | 740 | 3.839286 | 0.544643 | 0.009302 | 0.055814 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.047273 | 0.256757 | 740 | 32 | 45 | 23.125 | 0.734545 | 0.018919 | 0 | 0 | 0 | 0 | 0.074586 | 0.048343 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.107143 | 0 | 0.107143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d3349825d7ea4049412af69f489a6488ecdb3b9e | 3,020 | py | Python | data_as_code/_commands.py | Mikuana/data_as_code | 339f0c6bcd4e318ad925ceb4ccd6149f980d4032 | [
"MIT"
] | 2 | 2021-05-18T22:04:22.000Z | 2021-07-24T19:52:49.000Z | data_as_code/_commands.py | Mikuana/data_as_code | 339f0c6bcd4e318ad925ceb4ccd6149f980d4032 | [
"MIT"
] | 1 | 2021-03-12T22:56:45.000Z | 2021-03-12T22:56:45.000Z | data_as_code/_commands.py | Mikuana/data_as_code | 339f0c6bcd4e318ad925ceb4ccd6149f980d4032 | [
"MIT"
] | null | null | null | import argparse
import os
import subprocess
import sys
import venv
from pathlib import Path
from typing import Union
from data_as_code import __version__
def menu(args=None):
args = _parse_args(args)
args.func(args)
def _parse_args(args: list = None):
program = 'data-as-code'
parser = argparse.ArgumentParser(
prog=program,
description="data-as-code utilities"
)
parser.add_argument(
'--version', action='version',
version=f'{program} version {__version__}'
)
commands = parser.add_subparsers(metavar='')
# init submodule
cmd_init = commands.add_parser(
'init', help='initialize a project folder'
)
cmd_init.set_defaults(func=initialize_folder)
cmd_init.add_argument(
'-d', type=str, default='.',
help='path to project folder. Defaults to current directory'
)
cmd_init.add_argument(
'-x', action='store_true', default=False,
help='ignore error if folder or objects already exist'
)
cmd_init.add_argument(
'--git', action='store_true', default=False,
help='include git artifacts in folder'
)
if not len(sys.argv) > 1: # if no args, print help to stderr
parser.print_help(sys.stderr)
sys.exit(1)
else:
return parser.parse_args(args)
def initialize_folder(arg: argparse.Namespace):
_InitializeFolder(path=arg.d, exist_ok=arg.x)
class _InitializeFolder:
def __init__(self, path: Union[Path, str] = None, exist_ok=False):
self.wd = Path(path or '.').absolute()
self.exist_ok = exist_ok
self.wd.mkdir(exist_ok=True)
self.make_folder('data/')
self.make_folder('metadata/')
self.make_recipe('recipe.py')
self.make_gitignore('.gitignore')
self.make_pipenv()
def make_folder(self, folder):
Path(self.wd, folder).mkdir(exist_ok=self.exist_ok)
def make_venv(self, folder):
venv.create(Path(self.wd, folder))
def _make_file(self, x: str, txt: str):
p = Path(self.wd, x)
if p.exists() and self.exist_ok is False:
raise FileExistsError(f"{x} already exists in {self.wd}")
else:
if isinstance(txt, bytes):
p.write_bytes(txt)
else:
p.write_text(txt)
def make_recipe(self, file):
self._make_file(file, 'this is my recipe')
def make_pipenv(self):
cwd = os.getcwd()
try:
os.chdir(self.wd)
Path('Pipfile').touch()
_pipenv_init()
finally:
os.chdir(cwd)
def make_gitignore(self, file):
patterns = [
'data/',
]
txt = '\n'.join(patterns)
self._make_file(file, txt)
def _pip_freeze() -> bytes:
return subprocess.check_output([sys.executable, '-m', 'pip', 'freeze'])
def _pipenv_init():
reqs = ['requests', 'tqdm']
subprocess.check_output([sys.executable, '-m', 'pipenv', 'install'] + reqs) | 26.725664 | 79 | 0.611589 | 383 | 3,020 | 4.634465 | 0.331593 | 0.031549 | 0.016901 | 0.030423 | 0.074366 | 0.074366 | 0 | 0 | 0 | 0 | 0 | 0.000899 | 0.263576 | 3,020 | 113 | 79 | 26.725664 | 0.797212 | 0.015563 | 0 | 0.067416 | 0 | 0 | 0.136991 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.134831 | false | 0 | 0.089888 | 0.011236 | 0.258427 | 0.011236 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d338d1cde2b451ce153dc7246f3b2ecbf270831f | 1,788 | py | Python | delivery/views.py | wesky93/shipping_system | ac3d5d5998e20b9008c90cb972f6bdfc5d3c43b0 | [
"MIT"
] | null | null | null | delivery/views.py | wesky93/shipping_system | ac3d5d5998e20b9008c90cb972f6bdfc5d3c43b0 | [
"MIT"
] | null | null | null | delivery/views.py | wesky93/shipping_system | ac3d5d5998e20b9008c90cb972f6bdfc5d3c43b0 | [
"MIT"
] | null | null | null | # Create your views here.
from datetime import datetime
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from rest_framework import status, viewsets
from rest_framework.decorators import action
from rest_framework.response import Response
from delivery.serializer import TaskSerializer
from order.models import Order
task_response = openapi.Response('주문 정보', TaskSerializer)
class TaskViewSet(viewsets.ViewSet):
def get_queryset(self):
return Order.objects.filter(date=datetime.now().date())
@swagger_auto_schema(
operation_id='today_order_list',
operation_summary='오늘 배송 물량',
responses={"200": task_response}
)
def list(self, request):
orders = self.get_queryset().filter(deliverer__user=request.user)
serializer = TaskSerializer(orders,many=True)
return Response(serializer.data)
@swagger_auto_schema(
operation_id='pickup',
operation_summary='픽업', )
@action(detail=True, methods=['post'])
def pickup(self, request, pk=None):
qs = self.get_queryset().filter(deliverer__user=request.user).filter(pk=pk).first()
if qs:
qs.departure()
qs.save()
return Response(status=status.HTTP_200_OK)
return Response(status=status.HTTP_400_BAD_REQUEST)
@swagger_auto_schema(
operation_id='deliver_completed',
operation_summary='배송 완료', )
@action(detail=True, methods=['post'])
def done(self, request, pk=None):
qs = self.get_queryset().filter(deliverer__user=request.user).filter(pk=pk).first()
if qs:
qs.finish()
qs.save()
return Response(status=status.HTTP_200_OK)
return Response(status=status.HTTP_400_BAD_REQUEST)
| 33.735849 | 91 | 0.690157 | 221 | 1,788 | 5.384615 | 0.357466 | 0.058824 | 0.057143 | 0.087395 | 0.442857 | 0.372269 | 0.321849 | 0.321849 | 0.284034 | 0.284034 | 0 | 0.010556 | 0.205257 | 1,788 | 52 | 92 | 34.384615 | 0.826882 | 0.012864 | 0 | 0.348837 | 0 | 0 | 0.039705 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.093023 | false | 0 | 0.186047 | 0.023256 | 0.44186 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d34286269400aa25ffc4c5c20ffc4d7826caf90e | 7,659 | py | Python | mqtt_pi0_client/pi0_main.py | hengying/mqtt_at_home | ed1cc5dd0a57cc46e57ec5edeb48ae1d019ebae5 | [
"BSD-3-Clause"
] | null | null | null | mqtt_pi0_client/pi0_main.py | hengying/mqtt_at_home | ed1cc5dd0a57cc46e57ec5edeb48ae1d019ebae5 | [
"BSD-3-Clause"
] | null | null | null | mqtt_pi0_client/pi0_main.py | hengying/mqtt_at_home | ed1cc5dd0a57cc46e57ec5edeb48ae1d019ebae5 | [
"BSD-3-Clause"
] | null | null | null | import os
import time
from queue import Queue
from PIL import Image, ImageDraw, ImageFont
from button_enum import ButtonType
from event import *
from config import Config
ROW_HEIGHT = 16
class App():
def __init__(self):
self._input_devices = []
self._subsystems = []
self._layers = []
self._event_queue = Queue()
self._config = Config()
self._background_color = 'BLACK'
self._foreground_color = 'WHITE'
self.__init_devices()
self.__init_subsystems()
self._width = self._display.width
self._height = self._display.height
self._row_count = self._height // self.row_height
self.__init_paint_system()
self.__active_subsystem(self._root_menu.title)
self._client_id = str(time.time())
self._need_refresh = True
self._no_user_input_count = 0
self._in_screen_saving_mode = False
def __init_devices(self):
if self._config.has_ups_lite:
from UPS_Lite import UPSLite
self._upslite = UPSLite()
self._input_devices.append(self._upslite)
if self._config.has_lcd_hat_1in3:
from lcd_hat_1in3 import LCDHat
self._display = LCDHat()
self._input_devices.append(self._display)
if self._config.has_oled_hat_1in3:
from oled_hat_1in3 import OLEDHat
self._display = OLEDHat()
self._input_devices.append(self._display)
self._background_color = 255 # 黑白颠倒
self._foreground_color = 0
if self._config.use_pygame:
from desktop_win import DesktopWin
self._display = DesktopWin()
self._input_devices.append(self._display)
if self._config.use_mqtt:
from mqtt_client import MQTTClient
self._mqtt_client = MQTTClient(self._event_queue)
def __init_subsystems(self):
from root_menu import RootMenu
self._root_menu = RootMenu()
self._subsystems.append(self._root_menu)
if self._config.has_music_subsystem:
from music_subsystem import MusicSubSystem
self._subsystems.append(MusicSubSystem())
if self._config.has_alarm_subsystem:
from alarm_subsystem import AlarmSubSystem
self._subsystems.append(AlarmSubSystem())
from about_subsystem import AboutSubsystem
self._subsystems.append(AboutSubsystem())
subsystem_name_list = [s.title for s in self._subsystems[1:]]
self._root_menu.set_subsystems(subsystem_name_list)
def __init_paint_system(self):
if self._config.use_rgb_color:
self.__image = Image.new('RGB', (self._width, self._height))
else:
self.__image = Image.new('1', (self._width, self._height))
self.__image_draw = ImageDraw.Draw(self.__image)
self.__font16 = ImageFont.truetype("fonts/uni_dzh.ttf", 16)
def __active_subsystem(self, subsystem_name):
for subsystem in self._subsystems:
if subsystem.title == subsystem_name:
self._layers.append(subsystem.active(self))
self._need_refresh = True
self._active_subsystem = subsystem_name
def is_actived(self, subsystem_name):
return self._active_subsystem == subsystem_name
def system_shutdown(self):
os.system('sudo halt')
time.sleep(100)
@property
def font16(self):
return self.__font16
@property
def width(self):
return self._width
@property
def row_height(self):
return ROW_HEIGHT
@property
def row_count(self):
return self._row_count
@property
def height(self):
return self._height
@property
def client_id(self):
return self._client_id
@property
def background_color(self):
return self._background_color
@property
def foreground_color(self):
return self._foreground_color
def refresh_win(self):
self._event_queue.put_nowait(RefreshWinEvent())
def add_event(self, event):
self._event_queue.put_nowait(event)
def add_layer(self, layer):
self._layers.append(layer)
self._need_refresh = True
def show_message(self, str):
BIAS_Y = -2
self.__image_draw.rectangle((0, 0, self._width, self._height), outline=self.background_color,
fill=self.background_color)
self.__image_draw.text((0, int(self.row_height * 1.5) + BIAS_Y), str,
font=self.font16, fill=self.foreground_color)
self._display.display(self.__image)
def run(self):
while True:
display_need_refresh = False
for input_device in self._input_devices:
input_device.update(self._event_queue)
self._no_user_input_count += 1
while not self._event_queue.empty():
event = self._event_queue.get_nowait()
if is_user_input(event):
if self._in_screen_saving_mode == True:
self._in_screen_saving_mode = False;
self._need_refresh = True
self._no_user_input_count = 0
# ignore this input event
continue;
self._need_refresh = True
self._no_user_input_count = 0
if type(event) == PowerLowEvent:
print('System shut down!')
self.system_shutdown()
return
elif type(event) == QuitEvent:
print('Quiting!')
return
elif type(event) == PopLayer:
count = event.count
if count > len(self._layers):
count = len(self._layers) - 1
for i in range(count):
self._layers.pop()
if len(self._layers) == 1:
self._active_subsystem = self._root_menu.title
self._need_refresh = True
elif type(event) == ActiveSubsystem:
self.__active_subsystem(event.subsystem_name)
else:
for subsystem in self._subsystems:
subsystem.handle_event(event)
self._layers[-1].handle_event(event)
if self._need_refresh:
if self._in_screen_saving_mode == False:
print('refresh screen...')
self.__image_draw.rectangle((0, 0, self._width, self._height), outline=self.background_color, fill=self.background_color)
self._layers[-1].paint(self.__image_draw)
self._display.display(self.__image)
self._need_refresh = False
if self._no_user_input_count > self._config.screen_saver_wait_count:
if self._in_screen_saving_mode == False:
print('screen saving mode...')
self._in_screen_saving_mode = True
self.__image_draw.rectangle((0, 0, self._width, self._height), outline=self.background_color, fill=self.background_color)
self._display.display(self.__image)
if self._in_screen_saving_mode == True:
time.sleep(1/10.0)
else:
time.sleep(1/60.0)
try:
app = App()
app.run()
except IOError as e:
print(e)
except KeyboardInterrupt:
print("ctrl + c:")
exit()
| 33.445415 | 141 | 0.592375 | 858 | 7,659 | 4.904429 | 0.189977 | 0.019962 | 0.040637 | 0.029943 | 0.27424 | 0.223384 | 0.173954 | 0.153517 | 0.118346 | 0.096958 | 0 | 0.01063 | 0.324455 | 7,659 | 228 | 142 | 33.592105 | 0.802667 | 0.003656 | 0 | 0.196721 | 0 | 0 | 0.014683 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.10929 | false | 0 | 0.087432 | 0.04918 | 0.262295 | 0.032787 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d3443a482a767fc4c1af1d31c45a7e7fe38149c7 | 15,895 | py | Python | test_bank/borrowing/tests/test_views.py | Ursidours/django-tech-test | a96e1d73c9221458ca68b999d817881863a60584 | [
"BSD-3-Clause"
] | 1 | 2021-11-13T01:30:30.000Z | 2021-11-13T01:30:30.000Z | test_bank/borrowing/tests/test_views.py | arnaudblois/django-tech-test | a96e1d73c9221458ca68b999d817881863a60584 | [
"BSD-3-Clause"
] | null | null | null | test_bank/borrowing/tests/test_views.py | arnaudblois/django-tech-test | a96e1d73c9221458ca68b999d817881863a60584 | [
"BSD-3-Clause"
] | null | null | null | from django.test import RequestFactory
from test_plus.test import TestCase
from django.test.client import Client
from django.core.urlresolvers import reverse
from test_bank.users.tests.factories import UserFactory
from .factories import BorrowerProfileFactory, BusinessFactory, LoanFactory
from decimal import Decimal
from ..views import (
home_view,
BorrowerCreateView,
BusinessCreateView,
BusinessUpdateView,
LoanCreateView,
LoanDetailView,
cancel_loan_request,
verify_phone,
)
from test_bank.borrowing.helper_functions import get_verification_code
from test_bank.borrowing.models import BorrowerProfile, Business, Loan
class BaseBorrowingTestCase(TestCase):
def setUp(self):
"""
creates a new user 'user-0' and attach it to a request
"""
UserFactory.reset_sequence()
self.user = UserFactory()
self.client = Client()
self.client.login(username=self.user.username, password='password')
def run_test_login_required(self):
"""
makes sure that if a user is not logged in,
she is redirected
"""
response = Client().get(self.url)
self.assertEqual(
response.status_code, 302
)
def run_test_borrower_required(self):
"""
checks if a user which has not a completed borrower profile is redirected
to borrowing:home
"""
response = self.client.get(self.url, follow=True)
self.assertRedirects(
response, reverse("borrowing:home"), status_code=302, target_status_code=200
)
class TestHome(BaseBorrowingTestCase):
""" Test case for the home page of the borrower section """
def setUp(self):
super().setUp()
self.url = reverse('borrowing:home')
def test_login_required(self):
self.run_test_login_required()
def test_new_user_should_apply_to_borrower(self):
""" makes sure a logged-in user with no profile has a link to become borrower"""
response = self.client.get(self.url)
self.assertContains(
response,
'href="{0}"'.format(reverse("borrowing:activate_account")),
status_code=200,
)
def test_borrower_no_business_links(self):
"""
checks a borrower with no business has a link to
create a new business but no link to create a loan
"""
borrower = BorrowerProfileFactory(user=self.user)
response = self.client.get(self.url)
self.assertContains(
response, 'href="{0}"'.format(reverse("borrowing:create_business")),
status_code=200
)
self.assertNotIn('href="{0}"'.format(reverse("borrowing:create_loan")), str(response.content))
def test_borrower_with_business(self):
"""
checks a borrower with businesses has a link to create new loans and
have all businesses and loans listed
"""
borrower = BorrowerProfileFactory(user=self.user)
business1 = BusinessFactory(owner=borrower)
loan1 = LoanFactory(business=business1, borrower=borrower)
loan2 = LoanFactory(business=business1, borrower=borrower)
business2 = BusinessFactory(owner=borrower)
response = self.client.get(self.url)
self.assertContains(
response,
'href="{0}"'.format(reverse("borrowing:create_business")),
status_code=200,
)
self.assertContains(
response,
'href="{0}"'.format(reverse("borrowing:create_loan"))
)
# Link to update business without loans; no link for those with a loan
self.assertContains(
response,
'href="{0}"'.format(reverse("borrowing:update_business", kwargs={'pk': 2})),
)
self.assertNotContains(
response,
'href="{0}"'.format(reverse("borrowing:update_business", kwargs={'pk': 1})),
)
# Link to see the details of loan 2
self.assertContains(
response,
'href="{0}"'.format(reverse("borrowing:loan_detail", kwargs={'pk': 2})),
)
# ------------------------------------------------
# BORROWER VIEWS
# ------------------------------------------------
class TestBorrowerCreateView(BaseBorrowingTestCase):
def setUp(self):
super().setUp()
self.url = reverse('borrowing:activate_account')
def test_login_required(self):
self.run_test_login_required()
def test_redirect_home_if_already_borrower(self):
"""
once created, this profile can no longer be modified
-> redirect to borrowing:home
"""
borrower = BorrowerProfileFactory(user=self.user)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 302)
def test_get_success_url(self):
"""" should redirect to the page to setup a new business """
self.assertEqual(
BorrowerCreateView().get_success_url(),
reverse('borrowing:create_business')
)
def test_creation(self):
"""
checks a new borrower profile has been created upon submission of
valid data (test with invalid data are in test.forms)
"""
data = {
'phone_number': "+447123567890",
'code': get_verification_code("+447123567890"),
'has_signed': 'on',
'first_name': 'Jane',
'last_name': 'Doe',
}
response = self.client.post(self.url, data)
self.assertEqual(response.status_code, 302)
self.user.refresh_from_db()
self.assertEqual(self.user.first_name, 'Jane')
borrower = BorrowerProfile.objects.get(user=self.user)
self.assertEqual(borrower.phone_number, "+447123567890")
# ------------------------------------------------
# BUSINESS VIEWS
# ------------------------------------------------
class TestBusinessCreateView(BaseBorrowingTestCase):
""" Test Case for the CBV Business Create View """
def setUp(self):
super().setUp()
self.view = BusinessCreateView.as_view()
self.url = reverse("borrowing:create_business")
def test_login_required(self):
self.run_test_login_required()
def test_redirect_home_if_not_borrower(self):
self.run_test_borrower_required()
def test_get_success_url(self):
"""" upon success, redirect to borrowing:home"""
self.assertEqual(
BusinessCreateView(object=BusinessFactory()).get_success_url(),
reverse('borrowing:home')
)
def test_creation(self):
"""
checks a new business profile has been created upon submission of
valid data
"""
borrower = BorrowerProfileFactory(user=self.user)
data = {
"name": "Test Business",
"address": "42 test street, London",
"company_number": '01234567',
"sector": "R",
}
response = self.client.post(self.url, data)
self.assertEqual(response.status_code, 302)
business = borrower.business_set.first()
self.assertEqual(business.name, 'Test Business')
class TestBusinessUpdateView(BaseBorrowingTestCase):
def setUp(self):
super().setUp()
self.borrower = BorrowerProfileFactory(user=self.user)
self.business = BusinessFactory(owner=self.borrower)
self.url = reverse("borrowing:update_business", kwargs={"pk": self.business.pk})
def test_login_required(self):
self.run_test_login_required()
def test_redirect_home_if_not_borrower(self):
""" makes sure access is restricted to approved borrower """
c = Client()
new_user = UserFactory()
c.login(username=new_user.username, password="password")
response = c.post(self.url, {}, follow=True)
self.assertRedirects(
response, reverse('borrowing:home'),
status_code=302,
target_status_code=200
)
def test_get_success_url(self):
"""" should redirect to the page to setup a new business """
"""" upon success, redirect to borrowing:home"""
self.assertEqual(
BusinessUpdateView(object=self.business).get_success_url(),
reverse('borrowing:home')
)
def test_successful_update(self):
"""
posts valid data and checks the Business is updated accordingly
"""
data = {
"name": "Edited Business",
"address": "42 edited street, London",
"company_number": '76543210',
"sector": "R",
}
response = self.client.post(self.url, data)
self.assertRedirects(response, reverse('borrowing:home'), status_code=302, target_status_code=200)
# We refresh the object from db and check it has been updated
business = Business.objects.get(pk=1)
self.assertEqual(business.name, "Edited Business")
def test_denied_update_to_others_businesses(self):
"""
The user should be denied access to businesses belonging to other
users
"""
other_business = BusinessFactory()
self.url = reverse("borrowing:update_business", kwargs={"pk": other_business.pk})
response = self.client.post(self.url, {})
self.assertEqual(response.status_code, 404)
class TestBusinessDeleteView(BaseBorrowingTestCase):
def setUp(self):
super().setUp()
self.borrower = BorrowerProfileFactory(user=self.user)
self.business = BusinessFactory(owner=self.borrower)
self.url = reverse('borrowing:delete_business', kwargs={"pk": self.business.pk})
def test_login_required(self):
self.run_test_login_required()
def test_redirect_home_if_not_borrower(self):
""" makes sure access is restricted to approved borrower """
c = Client()
new_user = UserFactory()
c.login(username=new_user.username, password="password")
response = c.post(self.url, {}, follow=True)
self.assertEqual(response.status_code, 404)
def test_successful_update(self):
"""
posts valid data and checks the Business has been destroyed as expected
"""
response = self.client.post(self.url, {})
self.assertRedirects(response, reverse('borrowing:home'), status_code=302, target_status_code=200)
with self.assertRaises(Business.DoesNotExist):
Business.objects.get(pk=1)
def test_deny_deletion_to_others_businesses(self):
"""
The user should be denied access to businesses belonging to other
users
"""
other_business = BusinessFactory()
self.url = reverse("borrowing:delete_business", kwargs={"pk": other_business.pk})
response = self.client.post(self.url, {})
self.assertEqual(response.status_code, 404)
# ------------------------------------------------
# LOAN VIEWS
# ------------------------------------------------
class TestLoanCreateView(BaseBorrowingTestCase):
def setUp(self):
super().setUp()
self.url = reverse('borrowing:create_loan')
def test_login_required(self):
self.run_test_login_required()
def test_borrower_required(self):
self.run_test_borrower_required()
def test_get_success_url(self):
"""" should redirect to borrowing:home """
self.assertEqual(
LoanCreateView(object=LoanFactory()).get_success_url(),
reverse('borrowing:home')
)
def test_legit_user_valid_data(self):
"""
checks a new loan is created for a legit user inputting
correct data
"""
borrower = BorrowerProfileFactory(user=self.user)
business = BusinessFactory(owner=borrower)
data = {
'business': business.pk,
'reason': "Test",
'duration': 5,
'interest_rate': '0.05',
'amount': 15000
}
response = self.client.post(self.url, data)
self.assertEqual(response.status_code, 302)
new_loan = Loan.objects.last()
self.assertEqual(new_loan.interest_rate, Decimal('0.05'))
class TestLoanDetailView(BaseBorrowingTestCase):
"""
Test Case for Loan Details
"""
def setUp(self):
super().setUp()
borrower = BorrowerProfileFactory(user=self.user)
self.loan = LoanFactory(borrower=borrower)
self.url = reverse("borrowing:loan_detail", kwargs={"pk": self.loan.pk})
def test_login_required(self):
self.run_test_login_required()
def test_borrower_required(self):
"""
404 if no valid borrower account
"""
c = Client()
new_user = UserFactory()
c.login(username=new_user.username, password="password")
response = c.get(self.url, {}, follow=True)
self.assertRedirects(
response, reverse("borrowing:home"), status_code=302, target_status_code=200
)
def test_legit_access(self):
"""
checks the rightful user can access the page
"""
response = self.client.get(self.url)
self.assertContains(
response,
"Loan of 20000.00 GBP",
status_code=200
)
def test_deny_access_to_others_loans(self):
"""
raise 404 if attempting access to other users' loan
"""
loan2 = LoanFactory()
url = reverse("borrowing:loan_detail", kwargs={"pk": loan2.pk})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
class TestLoanCancelView(BaseBorrowingTestCase):
def setUp(self):
super().setUp()
borrower = BorrowerProfileFactory(user=self.user)
self.loan = LoanFactory(borrower=borrower)
self.url = reverse("borrowing:cancel_loan", kwargs={"pk": self.loan.pk})
def test_login_required(self):
self.run_test_login_required()
def test_borrower_required(self):
"""
if the user doesn't have a valid borrower profile
redirects her to borrowing:home
"""
c = Client()
c.login()
response = self.client.post(self.url)
self.assertEqual(response.status_code, 302)
def test_deny_cancellation_others_loan(self):
"""
Accessing another loan should yield error 404
"""
loan2 = LoanFactory()
url = reverse("borrowing:cancel_loan", kwargs={"pk": loan2.pk})
response = self.client.post(url, {})
self.assertEqual(response.status_code, 404)
def test_cancelling_valid_loan(self):
"""
A cancelled loan has status == 4
"""
self.client.post(self.url, {})
loan = Loan.objects.get(pk=1)
self.assertEqual(loan.status, 4)
# ------------------------------------------------
# PHONE VIEWS
# ------------------------------------------------
class TestPhoneView(BaseBorrowingTestCase):
def setUp(self):
super().setUp()
self.url = reverse("borrowing:verify_phone")
def test_login_required(self):
self.run_test_login_required()
def test_valid_phone_number(self):
"""
A valid phone number should return a response with status code 200
"""
with self.settings(DEBUG=True):
response = self.client.post(self.url, {'phone_number': '+447123456789'})
self.assertEqual(response.status_code, 200)
def test_invalid_phone_number(self):
"""
An invalid phone number should return a response with status code 400
"""
response = self.client.post(self.url, {'phone_number': '+447189'})
self.assertEqual(response.status_code, 400)
| 33.533755 | 106 | 0.612394 | 1,708 | 15,895 | 5.552108 | 0.144614 | 0.026574 | 0.034061 | 0.039755 | 0.64062 | 0.585891 | 0.558579 | 0.523358 | 0.469577 | 0.432247 | 0 | 0.018441 | 0.259704 | 15,895 | 473 | 107 | 33.604651 | 0.787456 | 0.166342 | 0 | 0.489726 | 0 | 0 | 0.09146 | 0.039197 | 0 | 0 | 0 | 0 | 0.130137 | 1 | 0.164384 | false | 0.013699 | 0.034247 | 0 | 0.232877 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d34bf9dadda6e3b8a530d372c58b2f396d11b505 | 1,634 | py | Python | util/canvas_api_extension.py | Person314159/cs221bot | ac1c768c08105d094d830b1a7cefae3492b2f76e | [
"Unlicense"
] | 33 | 2020-10-03T05:33:19.000Z | 2022-03-26T13:03:51.000Z | util/canvas_api_extension.py | Person314159/cs221bot | ac1c768c08105d094d830b1a7cefae3492b2f76e | [
"Unlicense"
] | 2 | 2020-10-21T04:22:13.000Z | 2020-10-23T01:21:42.000Z | util/canvas_api_extension.py | Person314159/cs221bot | ac1c768c08105d094d830b1a7cefae3492b2f76e | [
"Unlicense"
] | 6 | 2020-10-08T03:07:49.000Z | 2021-02-14T04:40:50.000Z | from canvasapi.course import Course
from canvasapi.requester import Requester
from canvasapi.util import combine_kwargs, get_institution_url
def get_course_stream(course_id: int, base_url: str, access_token: str, **kwargs: dict) -> dict:
"""
Parameters
----------
course_id : `int`
Course id
base_url : `str`
Base URL of the Canvas instance's API
access_token : `str`
API key to authenticate requests with
Returns
-------
`dict`
JSON response for course activity stream
"""
access_token = access_token.strip()
base_url = get_institution_url(base_url)
requester = Requester(base_url, access_token)
response = requester.request(
"GET",
f"courses/{course_id}/activity_stream",
_kwargs=combine_kwargs(**kwargs)
)
return response.json()
def get_course_url(course_id: str, base_url) -> str:
"""
Parameters
----------
course_id : `str`
Course id
base_url : `str`
Base URL of the Canvas instance's API
Returns
-------
`str`
URL of course page
"""
base_url = get_institution_url(base_url)
return f"{base_url}/courses/{course_id}"
def get_staff_ids(course: Course) -> list[int]:
"""
Parameters
----------
course : `Course`
The course to get staff IDs for
Returns
-------
`List[int]`
A list of the IDs of all professors and TAs in the given course.
"""
staff = course.get_users(enrollment_type=["teacher", "ta"])
staff_ids = list(map(lambda user: user.id, staff))
return staff_ids
| 22.383562 | 96 | 0.618115 | 206 | 1,634 | 4.703884 | 0.296117 | 0.086687 | 0.04128 | 0.03096 | 0.163055 | 0.163055 | 0.163055 | 0.099071 | 0.099071 | 0.099071 | 0 | 0 | 0.262546 | 1,634 | 72 | 97 | 22.694444 | 0.804149 | 0.354957 | 0 | 0.1 | 0 | 0 | 0.085556 | 0.072222 | 0 | 0 | 0 | 0 | 0 | 1 | 0.15 | false | 0 | 0.15 | 0 | 0.45 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d34e67cf20eff8898bbfe5d4cd62cab743664e32 | 778 | py | Python | music_generator/score_to_midi.py | wonsjb/MusicGenerator | 98290721227a6c69bcd186d02c4492b0e4e9fbe1 | [
"Apache-2.0"
] | null | null | null | music_generator/score_to_midi.py | wonsjb/MusicGenerator | 98290721227a6c69bcd186d02c4492b0e4e9fbe1 | [
"Apache-2.0"
] | null | null | null | music_generator/score_to_midi.py | wonsjb/MusicGenerator | 98290721227a6c69bcd186d02c4492b0e4e9fbe1 | [
"Apache-2.0"
] | 1 | 2019-04-08T09:39:58.000Z | 2019-04-08T09:39:58.000Z | import mido
import pickle
from tqdm import tqdm
from music_generator.score_util import score_to_events
def save_events(track, events):
current_time = 0
for this_time in tqdm(sorted(events), "Writing midi events"):
sleep_time = this_time - current_time
current_time = this_time
for msg in events[this_time]:
msg.time = sleep_time
sleep_time = 0
track.append(msg)
def save_to_midi(score, midi_file):
midi = mido.MidiFile()
midi_track = mido.MidiTrack()
midi.tracks.append(midi_track)
save_events(midi_track, score_to_events(score))
midi.save(midi_file)
def main(score_file, midi_file):
score = pickle.load(score_file)
score_file.close()
save_to_midi(score, midi_file.name)
| 25.096774 | 65 | 0.694087 | 113 | 778 | 4.495575 | 0.300885 | 0.062992 | 0.051181 | 0.059055 | 0.090551 | 0.090551 | 0 | 0 | 0 | 0 | 0 | 0.003306 | 0.222365 | 778 | 30 | 66 | 25.933333 | 0.836364 | 0 | 0 | 0 | 0 | 0 | 0.024422 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.130435 | false | 0 | 0.173913 | 0 | 0.304348 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d3580ed4695bebcfc2891b5c39c7a502e5bced9b | 2,083 | py | Python | Ch6/lle.py | jason-168/MLCode | 429c17e004fb41ba16c371416c8f73833ab8fc1d | [
"Xnet",
"X11"
] | 146 | 2016-05-24T02:55:53.000Z | 2022-03-23T14:54:42.000Z | Ch6/lle.py | coky/MarslandMLAlgo | 4277b24db88c4cb70d6b249921c5d21bc8f86eb4 | [
"Xnet",
"X11"
] | 1 | 2017-08-17T23:07:39.000Z | 2017-08-18T08:27:19.000Z | Ch6/lle.py | coky/MarslandMLAlgo | 4277b24db88c4cb70d6b249921c5d21bc8f86eb4 | [
"Xnet",
"X11"
] | 94 | 2016-05-06T12:34:33.000Z | 2022-03-30T03:31:04.000Z |
# Code from Chapter 6 of Machine Learning: An Algorithmic Perspective (2nd Edition)
# by Stephen Marsland (http://stephenmonika.net)
# You are free to use, change, or redistribute the code in any way you wish for
# non-commercial purposes, but please maintain the name of the original author.
# This code comes with no warranty of any kind.
# Stephen Marsland, 2008, 2014
# The Locally Linear Embedding algorithm, and the swissroll example
import pylab as pl
import numpy as np
def swissroll():
# Make the swiss roll dataset
N = 1000
noise = 0.05
t = 3*np.pi/2 * (1 + 2*np.random.rand(1,N))
h = 21 * np.random.rand(1,N)
data = np.concatenate((t*np.cos(t),h,t*np.sin(t))) + noise*np.random.randn(3,N)
return np.transpose(data), np.squeeze(t)
def lle(data,nRedDim=2,K=12):
ndata = np.shape(data)[0]
ndim = np.shape(data)[1]
d = np.zeros((ndata,ndata),dtype=float)
# Inefficient -- not matrices
for i in range(ndata):
for j in range(i+1,ndata):
for k in range(ndim):
d[i,j] += (data[i,k] - data[j,k])**2
d[i,j] = np.sqrt(d[i,j])
d[j,i] = d[i,j]
indices = d.argsort(axis=1)
neighbours = indices[:,1:K+1]
W = np.zeros((K,ndata),dtype=float)
for i in range(ndata):
Z = data[neighbours[i,:],:] - np.kron(np.ones((K,1)),data[i,:])
C = np.dot(Z,np.transpose(Z))
C = C+np.identity(K)*1e-3*np.trace(C)
W[:,i] = np.transpose(np.linalg.solve(C,np.ones((K,1))))
W[:,i] = W[:,i]/np.sum(W[:,i])
M = np.eye(ndata,dtype=float)
for i in range(ndata):
w = np.transpose(np.ones((1,np.shape(W)[0]))*np.transpose(W[:,i]))
j = neighbours[i,:]
#print shape(w), np.shape(np.dot(w,np.transpose(w))), np.shape(M[i,j])
ww = np.dot(w,np.transpose(w))
for k in range(K):
M[i,j[k]] -= w[k]
M[j[k],i] -= w[k]
for l in range(K):
M[j[k],j[l]] += ww[k,l]
evals,evecs = np.linalg.eig(M)
ind = np.argsort(evals)
y = evecs[:,ind[1:nRedDim+1]]*np.sqrt(ndata)
return evals,evecs,y
data,t = swissroll()
evals,evecs,y = lle(data)
t -= t.min()
t /= t.max()
pl.scatter(y[:,0],y[:,1],s=50,c=t,cmap=pl.cm.gray)
pl.axis('off')
pl.show()
| 27.407895 | 83 | 0.62794 | 408 | 2,083 | 3.205882 | 0.35049 | 0.058869 | 0.009174 | 0.025229 | 0.108563 | 0.074924 | 0.047401 | 0.047401 | 0 | 0 | 0 | 0.027539 | 0.163226 | 2,083 | 75 | 84 | 27.773333 | 0.722892 | 0.264042 | 0 | 0.061224 | 0 | 0 | 0.001972 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.040816 | false | 0 | 0.040816 | 0 | 0.122449 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d3591775c7b698ed72a3fd1c68e5a61ee0d47bde | 1,904 | py | Python | tests/functional/test_session.py | Andrew-Wichmann/pytest-localstack | 83ac2a5e39eb23cad402a3fcdaefd76db903168d | [
"MIT"
] | 63 | 2018-03-14T18:47:06.000Z | 2022-02-01T20:42:27.000Z | tests/functional/test_session.py | Andrew-Wichmann/pytest-localstack | 83ac2a5e39eb23cad402a3fcdaefd76db903168d | [
"MIT"
] | 43 | 2018-03-06T22:37:32.000Z | 2022-02-25T21:42:21.000Z | tests/functional/test_session.py | Andrew-Wichmann/pytest-localstack | 83ac2a5e39eb23cad402a3fcdaefd76db903168d | [
"MIT"
] | 23 | 2018-05-29T14:03:30.000Z | 2021-12-14T20:21:40.000Z | """Functional tests for pytest_localstack.session."""
import pytest
from pytest_localstack import constants, exceptions, service_checks, session
@pytest.mark.parametrize("test_service", sorted(constants.SERVICE_PORTS))
def test_RunningSession_individual_services(test_service, docker_client):
localstack_imagename = "localstack/localstack:latest"
docker_client.images.pull(localstack_imagename)
localstack_container = None
try:
port = constants.SERVICE_PORTS[test_service]
localstack_container = docker_client.containers.run(
localstack_imagename,
name="localstack_test",
detach=True,
auto_remove=True,
ports={port: port},
)
test_session = session.RunningSession("127.0.0.1", services=[test_service])
with test_session:
for service_name, service_check in service_checks.SERVICE_CHECKS.items():
if service_name == test_service:
service_check(test_session)
else:
with pytest.raises(exceptions.ServiceError):
test_session.service_hostname(test_session)
finally:
if localstack_container:
localstack_container.stop(timeout=10)
@pytest.mark.parametrize("test_service", sorted(constants.SERVICE_PORTS))
def test_LocalstackSession_individual_services(test_service, docker_client):
"""Test that each service can run individually."""
test_session = session.LocalstackSession(docker_client, services=[test_service])
with test_session:
for service_name, service_check in service_checks.SERVICE_CHECKS.items():
if service_name == test_service:
service_check(test_session)
else:
with pytest.raises(exceptions.ServiceError):
test_session.service_hostname(test_session)
| 41.391304 | 85 | 0.686975 | 199 | 1,904 | 6.276382 | 0.296482 | 0.08807 | 0.060849 | 0.040032 | 0.522018 | 0.522018 | 0.456365 | 0.456365 | 0.456365 | 0.456365 | 0 | 0.005483 | 0.233718 | 1,904 | 45 | 86 | 42.311111 | 0.850583 | 0.048319 | 0 | 0.432432 | 0 | 0 | 0.042199 | 0.015547 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054054 | false | 0 | 0.054054 | 0 | 0.108108 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d35a738b7eea5737405cf7a351e38faa5ee30171 | 1,410 | py | Python | 07-dash/src/__init__.py | dushyantkhosla/dataviz | 05a004a390d180d87be2d09873c3f7283c2a2e27 | [
"MIT"
] | null | null | null | 07-dash/src/__init__.py | dushyantkhosla/dataviz | 05a004a390d180d87be2d09873c3f7283c2a2e27 | [
"MIT"
] | 2 | 2021-03-25T22:11:43.000Z | 2022-03-02T22:43:47.000Z | 07-dash/src/__init__.py | dushyantkhosla/viz4ds | 05a004a390d180d87be2d09873c3f7283c2a2e27 | [
"MIT"
] | null | null | null | list_markets = [
'japan', 'portugal', 'russia', 'switzerland', 'romania', 'italy',
'ukraine', 'germany', 'monaco', 'denmark', 'netherlands', 'greece',
'spain', 'uk', 'israel', 'new-zealand', 'kazakhstan', 'canada',
'south-africa', 'lithuania', 'colombia', 'poland', 'turkish-cyprus',
'france', 'czech-republic', 'korea', 'slovak-republic', 'guatemala',
'slovenia', 'palestine', 'bulgaria', 'andorra', 'croatia',
'dominican-republic', 'latvia', 'armenia'
]
dict_pmiMarket_to_ISO = {
'Andorra': 'AD',
'Armenia': 'AM',
'Bulgaria': 'BG',
'Canada': 'CA',
'Switzerland': 'CH',
'Colombia': 'CO',
'Curacao': 'CW',
'Turkish Cyprus': 'CY',
'Cyprus': 'CY',
'Czech Republic': 'CZ',
'Germany': 'DE',
'Denmark': 'DK',
'Dominican Republic': 'DO',
'Spain': 'ES',
'France': 'FR',
'UK': 'GB',
'Greece': 'GR',
'Guatemala': 'GT',
'Croatia': 'HR',
'Israel': 'IL',
'Italy': 'IT',
'Japan': 'JP',
'Korea': 'KR',
'Kazakhstan': 'KZ',
'Lithuania': 'LT',
'Latvia': 'LV',
'Monaco': 'MC',
'Netherlands': 'NL',
'New Zealand': 'NZ',
'Poland': 'PL',
'Palestine': 'PS',
'Portugal': 'PT',
'Romania': 'RO',
'Serbia': 'RS',
'Russia': 'RU',
'Slovenia': 'SI',
'Slovak Republic': 'SK',
'Ukraine': 'UA',
'South Africa': 'ZA'
}
| 27.115385 | 72 | 0.500709 | 132 | 1,410 | 5.318182 | 0.651515 | 0.02849 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.249645 | 1,410 | 51 | 73 | 27.647059 | 0.663516 | 0 | 0 | 0 | 0 | 0 | 0.485816 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d35daef25397aae02fca1e7c5ccdd6b6b3650e3b | 4,530 | py | Python | seed_services_cli/identity_store.py | praekeltfoundation/seed-services-cli | 943fca5e70be086d4f29fd580103d7647a81f99a | [
"BSD-3-Clause"
] | null | null | null | seed_services_cli/identity_store.py | praekeltfoundation/seed-services-cli | 943fca5e70be086d4f29fd580103d7647a81f99a | [
"BSD-3-Clause"
] | null | null | null | seed_services_cli/identity_store.py | praekeltfoundation/seed-services-cli | 943fca5e70be086d4f29fd580103d7647a81f99a | [
"BSD-3-Clause"
] | null | null | null | import click
import json
import csv
import sys
from seed_services_client.identity_store import IdentityStoreApiClient
from demands import HTTPServiceError
if sys.version_info.major == 2:
file_open_mode = 'rb'
else:
file_open_mode = 'r'
def get_api_client(url, token):
return IdentityStoreApiClient(
api_url=url,
auth_token=token
)
@click.option(
'--address_type', '-t',
help='Address Type (e.g. msisdn)')
@click.option(
'--address', '-a',
help='Address (e.g. 27812345678)')
@click.pass_context
def search(ctx, address_type, address):
""" Find an identity
"""
api = get_api_client(ctx.obj.identity_store.api_url,
ctx.obj.identity_store.token)
if not all((address_type, address)):
raise click.UsageError(
"Please specify address type and address. See --help.")
click.echo("Looking for %s of %s." % (address_type, address))
results = list(
api.get_identity_by_address(address_type, address)['results'])
click.echo("Found %s results:" % len(results))
for result in results:
click.echo(result["id"])
@click.option(
'--identity', '-i',
help='Identity UUID')
@click.pass_context
def get_identity(ctx, identity):
""" Find a specific identity
"""
api = get_api_client(ctx.obj.identity_store.api_url,
ctx.obj.identity_store.token)
if identity:
# get a very particular identity
try:
result = api.get_identity(identity=identity)
except HTTPServiceError:
click.echo("Identity not found")
ctx.abort()
else:
raise click.UsageError(
"Please specify identity UUID. See --help.")
click.echo(json.dumps(result))
@click.option(
'--csv', type=click.File(file_open_mode),
help=('CSV file with columns for the endpoint'))
@click.option(
'--json', type=click.File(file_open_mode),
help=('JSON objects, one per line for the endpoint'))
@click.pass_context
def identities_import(ctx, csv, json):
""" Import to the Identity Store service.
"""
if not any((csv, json)):
raise click.UsageError("Please specify either --csv or --json.")
api = get_api_client(ctx.obj.identity_store.api_url,
ctx.obj.identity_store.token)
if csv:
for identity in identities_from_csv(csv):
result = api.create_identity(identity)
click.echo(result["id"])
if json:
for identity in identities_from_json(json):
result = api.create_identity(identity)
click.echo(result["id"])
click.echo("Completed importing identities.")
def identities_from_csv(csv_file):
reader = csv.DictReader(csv_file)
for data in reader:
identity = {
"communicate_through": data["communicate_through"],
"details": {
"addresses": {
data["address_type"]: {
data["address"]: {}
}
},
"default_addr_type": data["address_type"]
}
}
for key, value in data.iteritems():
if key not in ("address_type", "address", "communicate_through"):
identity["details"][key] = value
yield identity
def identities_from_json(json_file):
for line in json_file:
data = json.loads(line.rstrip("\n"))
if not isinstance(data, dict):
raise click.UsageError(
"JSON file lines must be objects.")
yield data
@click.option(
'--json-file', type=click.File(file_open_mode),
help=('JSON objects, details that will be updated'))
@click.pass_context
def identities_details_update(ctx, json_file):
""" Update identities details fields.
"""
if not json_file:
raise click.UsageError("Please specify --json_file.")
api = get_api_client(ctx.obj.identity_store.api_url,
ctx.obj.identity_store.token)
update_data = json.load(json_file)
for key, patches in update_data.items():
for patch in patches:
identities = api.search_identities(
"details__{}".format(key), patch["old"])
for identity in identities['results']:
identity["details"][key] = patch["new"]
api.update_identity(
identity["id"], {"details": identity["details"]})
click.echo("Completed updating identity details.")
| 30.608108 | 77 | 0.608389 | 537 | 4,530 | 4.970205 | 0.242086 | 0.048707 | 0.041963 | 0.05695 | 0.282503 | 0.180967 | 0.180967 | 0.170101 | 0.170101 | 0.104159 | 0 | 0.003637 | 0.271744 | 4,530 | 147 | 78 | 30.816327 | 0.805396 | 0.036424 | 0 | 0.241379 | 0 | 0 | 0.178341 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060345 | false | 0.034483 | 0.068966 | 0.008621 | 0.137931 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d362e720bb3d218affebcec43cea8d2928edf0c3 | 7,899 | py | Python | executables/plot_cands.py | UCBerkeleySETI/blipss | dba5c6724701abcc95b59aa016adf2582ecc69f3 | [
"MIT"
] | null | null | null | executables/plot_cands.py | UCBerkeleySETI/blipss | dba5c6724701abcc95b59aa016adf2582ecc69f3 | [
"MIT"
] | null | null | null | executables/plot_cands.py | UCBerkeleySETI/blipss | dba5c6724701abcc95b59aa016adf2582ecc69f3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
'''
Plot candidate verification plots including periodograms, average pulse profiles and phase-time diagrams.
Run using the following syntax.
python plot_cands.py -i <Configuration script of inputs> | tee <Log file>
'''
from __future__ import print_function
from __future__ import absolute_import
# Custom imports
from modules.general_utils import setup_logger_stdout, create_dir
from modules.read_config import read_config
from modules.read_data import read_watfile
from modules.plotting import candverf_plot
# Standard packages
from argparse import ArgumentParser
from riptide import TimeSeries, ffa_search
import os, logging, time, sys
import numpy as np
import pandas as pd
##############################################################
def myexecute(inputs_cfg):
"""
Primary function that handles script execution.
Parameters
----------
inputs_cfg : str
configuration script of inputs
"""
# Profile code execution.
prog_start_time = time.time()
# Read inputs from config file and set default parameter values, if applicable.
hotpotato = read_config(inputs_cfg)
hotpotato = set_defaults(hotpotato)
logger = setup_logger_stdout() # Set logger output to stdout().
# Read .csv file of periodicity candidates.
logger.info('Reading file: %s'% (hotpotato['csvfile']))
df = pd.read_csv(hotpotato['csvfile'], dtype={'Code':'string'})
cand_codes = np.array(df['Code'])
# Find indices of candidates to plot.
idx = np.array([])
for code in hotpotato['codes_plot']:
idx = np.append(idx, np.where(cand_codes==code)[0])
idx = np.array(idx, dtype=int)
N_cands = len(idx)
print('No. of candidates selected for plotting = %d\n'% (N_cands))
# Properties of chosen candidates
chosencand_chans = np.array(df['Channel'], dtype=int)[idx]
chosencand_periods = np.array(df['Period (s)'], dtype=np.float64)[idx]
chosencand_bins = np.array(df['Bins'], dtype=int)[idx]
chosencand_codes = cand_codes[idx]
# Read data.
N_datafiles = len(hotpotato['datafile_list'])
start_mjds = [] # Store start MJDs (UTC) of data sets
all_data = [] # Store 2D dynamic spectra arrays.
for i in range(N_datafiles):
logger.info('Reading data from %s'% (hotpotato['datafile_list'][i]))
wat = read_watfile(hotpotato['DATA_DIR'] + '/' + hotpotato['datafile_list'][i], hotpotato['mem_load'])
data = wat.data[:,0,:].T
if wat.header['foff']<0:
data = np.flip(data,axis=0)
# Store data arrays.
all_data.append(data)
# Store start MJDs.
start_mjds.append(wat.header['tstart'])
create_dir(hotpotato['PLOT_DIR'])
# Produce candidate plots one by one.
for i in range(N_cands):
chan = chosencand_chans[i]
period = chosencand_periods[i]
bins = chosencand_bins[i]
code = chosencand_codes[i]
logger.info('Working with candidate %d'% (i+1))
print('Channel = %d'% (chan))
print('Period = %s s'% (period))
print('Bins = %d'% (bins))
print('Code = %s'% (code))
# Compute periodogram of relevant time series from each data file.
periodograms = [] # Store periodograms from different data files.
detrended_ts = [] # Store detrended time series from different data files.
max_snrs = [] # Max S/N in periodograms from each data file
for j in range(N_datafiles):
raw_ts = TimeSeries.from_numpy_array(all_data[j][chan], tsamp = wat.header['tsamp'])
dts, pgram = ffa_search(raw_ts, period_min=hotpotato['min_period'], period_max=hotpotato['max_period'],
fpmin=hotpotato['fpmin'], bins_min=hotpotato['bins_min'], bins_max=hotpotato['bins_max'],
ducy_max=hotpotato['ducy_max'], deredden=hotpotato['do_deredden'], rmed_width=hotpotato['rmed_width'],
already_normalised=False)
periodograms.append(pgram)
max_snrs.append(pgram.snrs.max())
detrended_ts.append(dts)
# Maximum S/N to be shown on periodogram plot
snr_max = 1.25*np.max(max_snrs)
# Produce candidate plot and save plot to disk.
plot_name = hotpotato['PLOT_DIR'] + '/' + hotpotato['basename'] + '_ch%d'% (chan) + '_code%s'% (code) +'_period%.5f'% (period)
candverf_plot(period, bins, detrended_ts, periodograms, hotpotato['beam_labels'],
start_mjds, snr_max, hotpotato['periodaxis_log'], plot_name, hotpotato['plot_formats'])
# Calculate total run time for the code.
prog_end_time = time.time()
run_time = (prog_end_time - prog_start_time)/60.0
logger.info('Code run time = %.3f minutes'% (run_time))
##############################################################
def set_defaults(hotpotato):
"""
Set default values for keys in a dictionary of input parameters.
Parameters
----------
hotpotato : dictionary
Dictionary of input parameters gathered from a configuration script
Returns
-------
hotpotato : dictionary
Input dictionary with keys set to default values
"""
# Default annotation labels
if hotpotato['beam_labels']=='' or hotpotato['beam_labels']==[]:
hotpotato['beam_labels'] = ['']*len(hotpotato['datafile_list'])
# Default plot format = ['.png']
if hotpotato['plot_formats']=='' or hotpotato['plot_formats']==[]:
hotpotato['plot_formats'] = ['.png']
# Default output directory = DATA_DIR
if hotpotato['PLOT_DIR']=='':
hotpotato['PLOT_DIR'] = hotpotato['DATA_DIR']
# Default log scale for period axis in periodogram = True
if hotpotato['periodaxis_log']=='':
hotpotato['periodaxis_log'] = True
# Default minimum period covered in FFA search = 10 s
if hotpotato['min_period']=='':
hotpotato['min_period'] = 10.0
# Default maximum period covered in FFA search = 100 s
if hotpotato['max_period']=='':
hotpotato['max_period'] = 100.0
# Default fpmin = 3
if hotpotato['fpmin']=='':
hotpotato['fpmin'] = 3
# Default S/N threshold = 8.0
if hotpotato['SNR_threshold']=='':
hotpotato['SNR_threshold'] = 8.0
# Default bins_min = 8
if hotpotato['bins_min']=='':
hotpotato['bins_min'] = 10
# Default bins_max = 11
if hotpotato['bins_max']=='':
hotpotato['bins_max'] = 11
# Default max duty cycle = 0.5
if hotpotato['ducy_max']=='':
hotpotato['ducy_max'] = 0.5
# Detrending flag
if hotpotato['do_deredden']=='':
hotpotato['do_deredden'] = False
# Default running median window width = 12 s
if hotpotato['rmed_width']=='':
hotpotato['rmed_width'] = 12.0
# Default memory load size = 1 GB
if hotpotato['mem_load']=='':
hotpotato['mem_load'] = 1.0
return hotpotato
##############################################################
def main():
""" Command line tool for running plot_cands.py """
parser = ArgumentParser(description="Produce candidate verification plots.")
optional = parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
required.add_argument('-i', action='store', required=True, dest='inputs_cfg', type=str,
help="Configuration script of inputs")
parser._action_groups.append(optional)
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
parse_args = parser.parse_args()
# Initialize parameter values
inputs_cfg = parse_args.inputs_cfg
# Run task using inputs from configuration script.
myexecute(inputs_cfg)
##############################################################
if __name__=='__main__':
main()
##############################################################
| 41.356021 | 138 | 0.622484 | 960 | 7,899 | 4.948958 | 0.275 | 0.032414 | 0.007577 | 0.017049 | 0.055567 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008901 | 0.217749 | 7,899 | 190 | 139 | 41.573684 | 0.759994 | 0.251551 | 0 | 0 | 0 | 0 | 0.163165 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026087 | false | 0 | 0.095652 | 0 | 0.130435 | 0.06087 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d363c2f764722f8256c8d75fcbfe318c21740559 | 401 | py | Python | ignore.py | VolVox99/Paraphrasing-Tool | 2ef59d23a06ed1d1f3e9f88c23e95aa9e6e1c9a8 | [
"MIT"
] | 3 | 2021-02-17T03:22:35.000Z | 2021-12-08T04:54:32.000Z | ignore.py | VolVox99/Paraphrasing-Tool | 2ef59d23a06ed1d1f3e9f88c23e95aa9e6e1c9a8 | [
"MIT"
] | null | null | null | ignore.py | VolVox99/Paraphrasing-Tool | 2ef59d23a06ed1d1f3e9f88c23e95aa9e6e1c9a8 | [
"MIT"
] | null | null | null |
def should_ignore(word: str) -> bool:
with open('ignorelist.txt') as file:
words_to_ignore = file.readlines()
conditions = [
#if all chars are not letters from alphabet
not word.isalpha(),
word.lower() in words_to_ignore,
word.isnumeric(),
#if first letter is capital = proper noun
word[0].isupper()
]
return any(conditions) | 23.588235 | 51 | 0.605985 | 50 | 401 | 4.76 | 0.76 | 0.084034 | 0.109244 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003521 | 0.291771 | 401 | 17 | 52 | 23.588235 | 0.834507 | 0.204489 | 0 | 0 | 0 | 0 | 0.044164 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d36649a36f426f9099f11bf5367c4a8a8f4ec2b2 | 2,071 | py | Python | Python/Tkinter/Tech-Gram Academy/PhoneBook App.py | omkarsutar1255/Python-Data | 169d0c54b23d9dd5a7f1aea41ab385121c3b3c63 | [
"CC-BY-3.0"
] | null | null | null | Python/Tkinter/Tech-Gram Academy/PhoneBook App.py | omkarsutar1255/Python-Data | 169d0c54b23d9dd5a7f1aea41ab385121c3b3c63 | [
"CC-BY-3.0"
] | null | null | null | Python/Tkinter/Tech-Gram Academy/PhoneBook App.py | omkarsutar1255/Python-Data | 169d0c54b23d9dd5a7f1aea41ab385121c3b3c63 | [
"CC-BY-3.0"
] | null | null | null | from tkinter import *
import datetime
from view_people import Mypeople
date = datetime.datetime.now().date()
date = str(date)
class Application(object): # inheritance from object is by default in python 3
def __init__(self, master):
self.master = master # this allow to use master in other def function as self.master
# Frame
self.top = Frame(master, height=150, bg='white')
self.top.pack(fill=X)
self.bottom = Frame(master, height=500, bg='#34baeb')
self.bottom.pack(fill=X)
# top frame design in frame
self.top_image = PhotoImage(file='C:\\Users\\dell\\Omkar Programme\\Python Files\\newimage.png')
self.top_image_label = Label(self.top, image=self.top_image) # giving image as label on it
self.top_image_label.place(x=130, y=25)
# heading design in frame
self.heading = Label(self.top, text='My Phonebook App', font='arial 15 bold', bg='white', fg='#ebb434')
self.heading.place(x=230, y=60)
# date design in frame
self.date_lbl = Label(self.top, text="Date : " + date, font='arial 12 bold', fg='#ebb434', bg='white')
self.date_lbl.place(x=450, y=110)
# View people button
self.viewbutton = Button(self.bottom, text=' My People ', font='arial 12 bold', fg='white', bg='black',
command=self.my_people)
self.viewbutton.place(x=250, y=70)
# Add people button
self.addbutton = Button(self.bottom, text='Add People', font='arial 12 bold', fg='white', bg='black')
self.addbutton.place(x=250, y=130)
# About us
self.aboutbutton = Button(self.bottom, text=' About Us ', font='arial 12 bold', fg='white', bg='black')
self.aboutbutton.place(x=250, y=190)
def my_people(self):
people = Mypeople()
def main():
root = Tk()
app = Application(root)
root.title('Passbook App')
root.geometry('650x550+350+200')
root.resizable(False, False)
root.mainloop()
if __name__ == '__main__':
main()
| 35.101695 | 113 | 0.623853 | 291 | 2,071 | 4.360825 | 0.364261 | 0.049645 | 0.047281 | 0.047281 | 0.097715 | 0.084318 | 0.084318 | 0.084318 | 0.084318 | 0 | 0 | 0.044388 | 0.238532 | 2,071 | 58 | 114 | 35.706897 | 0.760304 | 0.126509 | 0 | 0 | 0 | 0 | 0.156841 | 0.012236 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081081 | false | 0.027027 | 0.081081 | 0 | 0.189189 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d3670d7e4f25769b7a27109b32a3982276760c06 | 478 | py | Python | LBP06.py | Anandgowda18/LogicBasedPrograms | 25baa9fbf19cd45229c87e099877e97281b0e76b | [
"MIT"
] | null | null | null | LBP06.py | Anandgowda18/LogicBasedPrograms | 25baa9fbf19cd45229c87e099877e97281b0e76b | [
"MIT"
] | null | null | null | LBP06.py | Anandgowda18/LogicBasedPrograms | 25baa9fbf19cd45229c87e099877e97281b0e76b | [
"MIT"
] | null | null | null | '''For each of the 6 coffee cups I buy, I get a 7th cup free. In total, I get 7 cups. Implement a program that takes n cups bought and print as an integer the total number of cups I would get.
Input Format
n number of cups from user
Constraints
n>0
Output Format
number of cups present have
Sample Input 0
13
Sample Output 0
15
Sample Input 1
6
Sample Output 1
7'''
#solution
n = int(input())
total_cups = 0
if n > 0:
total_cups = n + (n//6)
print(total_cups) | 14.9375 | 192 | 0.709205 | 94 | 478 | 3.574468 | 0.489362 | 0.071429 | 0.107143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.046196 | 0.230126 | 478 | 32 | 193 | 14.9375 | 0.866848 | 0.792887 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d36c21fe0133ee21a1ab3addd4c1f6e8a3228f92 | 3,695 | py | Python | cloudmesh/analytics/OpenAPIServer.py | cloudmesh/cloudmesh-analytics | 26e8ab8e718730cbbc5b99ac71395c22686ae698 | [
"Apache-2.0"
] | null | null | null | cloudmesh/analytics/OpenAPIServer.py | cloudmesh/cloudmesh-analytics | 26e8ab8e718730cbbc5b99ac71395c22686ae698 | [
"Apache-2.0"
] | null | null | null | cloudmesh/analytics/OpenAPIServer.py | cloudmesh/cloudmesh-analytics | 26e8ab8e718730cbbc5b99ac71395c22686ae698 | [
"Apache-2.0"
] | 2 | 2019-10-21T03:58:57.000Z | 2020-02-12T00:07:06.000Z | """To create a flask app
The method definition to create a flask app by call ing the create_app function
Example:
create_app(test_config)
"""
import os
import connexion
import sys
import textwrap
from cloudmesh.common.util import writefile
from cloudmesh.common.util import path_expand
from cloudmesh.common.util import banner
from pathlib import Path
class OpenAPIServer:
"""
This is a Conveneinet method to create an OpenAPI server with
upload ability of files.
Uasage
from cloudmesh.analytics.OpenAPIServer import OpenAPIServer
server = OpenAPIServer(
port = "8000"
host = "127.0.0.1",
path = ".",
spec = "server.yaml",
key = "dev")
server.run()
In case you like to specify a program that contains such a server you can
use
server = OpenAPIServer(
port = "8000"
host = "127.0.0.1",
path = ".",
spec = "server.yaml",
key = "dev")
server.write("server.py")
"""
def __init__(self,
port=8000,
host="127.0.0.1",
path=".",
spec="server.yaml",
key='dev'):
self.spec = spec
self.key = key
self.host = host
self.port = port
if path == ".":
self.path = "."
else:
self.path = path_expand(path)
print(" Server Path:", self.path)
print()
sys.path.append(self.path)
# banner(self.path)
def create_app(self, config=None):
"""
Creates the server while using the config file. In addition some
configuration parameters are used that are defined at instantiation
time.
:param config: parameters passed to the flas server
:return: a flas server
"""
if self.path == ".":
self.path = os.getcwd()
# ensure the file folder exists
try:
os.makedirs(self.path)
except OSError:
pass
# Setup the server
_app = connexion.App(__name__, specification_dir=self.path)
_app.add_api(self.spec)
_app.app.config.from_mapping(
SECRET_KEY=self.key,
UPLOAD_FOLDER=f"{self.path}/data"
)
if config is None:
# load the instance config, if it exists, when not testing
_app.app.config.from_pyfile('config.py', silent=True)
else:
# load the test config if passed in
_app.app.config.from_mapping(config)
return _app.app
def app(self):
"""
Starts the server
"""
self.create_app().run(host=self.host, port=self.port)
def __str__(self):
"""
returns the Server as a python program string
:return:
"""
program = textwrap.dedent(
f"""
from cloudmesh.analytics.OpenAPIServer import OpenAPIServer
server = OpenAPIServer(
port = f"{self.port}",
host = f"{self.host}",
path = f"{self.path}",
spec = f"{self.path}/{self.spec}",
key = f"{self.key}")
server.app()
""")
return program
def write(self, filename, path="."):
"""
Writes a python program into the filename that contains the server
details. This fil can be started and will run an OpenAPI server
:param filename:
:return:
"""
content = self.__str__()
self.path = path
writefile(filename, content)
| 25.308219 | 79 | 0.539107 | 420 | 3,695 | 4.657143 | 0.330952 | 0.05317 | 0.029141 | 0.035276 | 0.242331 | 0.156953 | 0.156953 | 0.156953 | 0.156953 | 0.08998 | 0 | 0.012782 | 0.364817 | 3,695 | 145 | 80 | 25.482759 | 0.820622 | 0.36184 | 0 | 0.031746 | 0 | 0 | 0.202194 | 0.028612 | 0 | 0 | 0 | 0 | 0 | 1 | 0.079365 | false | 0.015873 | 0.142857 | 0 | 0.269841 | 0.031746 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d36ce0a150d51a1118f75e69b494546bb51688b1 | 1,610 | py | Python | migrations/versions/v3_create_specialization_schema.py | GCETTB-HYLAND-Hackathon2012-22/Miracurol-Rebuild | a6748e93b45872efdbaca013388dd6277824a9a5 | [
"Apache-2.0"
] | null | null | null | migrations/versions/v3_create_specialization_schema.py | GCETTB-HYLAND-Hackathon2012-22/Miracurol-Rebuild | a6748e93b45872efdbaca013388dd6277824a9a5 | [
"Apache-2.0"
] | null | null | null | migrations/versions/v3_create_specialization_schema.py | GCETTB-HYLAND-Hackathon2012-22/Miracurol-Rebuild | a6748e93b45872efdbaca013388dd6277824a9a5 | [
"Apache-2.0"
] | null | null | null | """Create Specialization Schema
Create a table 'specialization' which represents doctors specializations and have a many-to-many relationship with the
doctor table.
-- SQL
CREATE TABLE specialization (
id SERIAL,
specialization_name TEXT NOT NULL,
CONSTRAINT pk_specialization PRIMARY KEY (id)
);
CREATE TABLE doctor_specialization (
id SERIAL,
doctor_id INTEGER NOT NULL,
specialization_id INTEGER NOT NULL,
CONSTRAINT pk_doctor_specialization PRIMARY KEY (id)
);
"""
import sqlalchemy as sa
from alembic import op
revision = 'v3'
down_revision = 'v2'
def upgrade():
# Create new 'specialization' table
op.create_table(
'specialization', # Table Name
sa.Column('id', sa.Integer, autoincrement=True),
sa.Column('specialization_name', sa.Text, nullable=False),
# Constraints Definition
sa.PrimaryKeyConstraint('id', name='pk_doctor') # Primary Key
)
# Create many-to-many relation between 'doctor' and 'specialization' table
op.create_table(
'doctor_specialization', # Table Name
sa.Column('id', sa.Integer, autoincrement=True),
sa.Column('doctor_id', sa.Integer, nullable=False),
sa.Column('specialization_id', sa.Integer, nullable=False),
# Constraints Definition
sa.PrimaryKeyConstraint('id', name='pk_doctor_specialization') # Primary Key
)
def downgrade():
# Drop 'specialization' table
op.drop_table('specialization')
# Drop many-to-many relation between 'doctor' and 'specialization' table
op.drop_table('doctor_specialization')
| 27.288136 | 118 | 0.701863 | 187 | 1,610 | 5.935829 | 0.294118 | 0.102703 | 0.075676 | 0.034234 | 0.501802 | 0.345946 | 0.345946 | 0.345946 | 0.345946 | 0.345946 | 0 | 0.001555 | 0.201242 | 1,610 | 58 | 119 | 27.758621 | 0.861586 | 0.491925 | 0 | 0.190476 | 0 | 0 | 0.19975 | 0.082397 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095238 | false | 0 | 0.095238 | 0 | 0.190476 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d36e069713d8c3e52684121fe2690f4962a2708a | 2,515 | py | Python | tools/fixup_io_tilegrid.py | mfkiwl/prjoxide | 318331f8b30c2e2a31cc41d51f104b671e180a8a | [
"0BSD"
] | 80 | 2019-12-10T21:06:12.000Z | 2021-02-06T09:12:37.000Z | tools/fixup_io_tilegrid.py | mfkiwl/prjoxide | 318331f8b30c2e2a31cc41d51f104b671e180a8a | [
"0BSD"
] | 13 | 2021-03-18T12:59:25.000Z | 2022-03-30T11:35:51.000Z | tools/fixup_io_tilegrid.py | mfkiwl/prjoxide | 318331f8b30c2e2a31cc41d51f104b671e180a8a | [
"0BSD"
] | 4 | 2020-10-04T22:23:15.000Z | 2021-01-29T21:51:25.000Z | import database
import tiles
import json
from os import path
"""
Despite Lattice assigning them the same tile type; "odd" and "even" top/left/right IO
locations have slightly different routing - swapped output tristate and data
This script fixes this by patching tile names
"""
for f, d in [("LIFCL", "LIFCL-40"), ("LIFCL", "LFD2NX-40"), ("LFCPNX", "LFCPNX-100")]:
tgp = path.join(database.get_db_root(), f, d, "tilegrid.json")
with open(tgp, "r") as infile:
tg = json.load(infile)["tiles"]
tiles_by_xy = [[]]
max_row = 0
max_col = 0
for tile in sorted(tg.keys()):
r, c = tiles.pos_from_name(tile)
max_row = max(r, max_row)
max_col = max(c, max_col)
while r >= len(tiles_by_xy):
tiles_by_xy.append([])
while c >= len(tiles_by_xy[r]):
tiles_by_xy[r].append([])
tiles_by_xy[r][c].append(tile)
# Top tiles
is_odd = False
for col in tiles_by_xy[0]:
for tile in col:
tt = tiles.type_from_fullname(tile)
if not tt.startswith("SYSIO"):
continue
# Don't rename special or already-renamed tiles
if tt[-1].isdigit():
new_name = tile + ("_ODD" if is_odd else "_EVEN")
assert new_name not in tg
tg[new_name] = dict(tg[tile])
tg[new_name]["tiletype"] = tg[new_name]["tiletype"] + ("_ODD" if is_odd else "_EVEN")
del tg[tile]
is_odd = not is_odd
# Left/right tiles
for tc in (0, max_col):
is_odd = False
bank = ""
for row in tiles_by_xy:
for tile in row[tc]:
tt = tiles.type_from_fullname(tile)
if not tt.startswith("SYSIO"):
continue
if tt.endswith("REM"):
continue
tile_bank = tt[tt.find("B")+1]
if tile_bank != bank:
is_odd = False
bank = tile_bank
if tt[-1].isdigit():
new_name = tile + ("_ODD" if is_odd else "_EVEN")
assert new_name not in tg
tg[new_name] = dict(tg[tile])
tg[new_name]["tiletype"] = tg[new_name]["tiletype"] + ("_ODD" if is_odd else "_EVEN")
del tg[tile]
is_odd = not is_odd
with open(tgp, "w") as outfile:
json.dump({"tiles": tg}, outfile, sort_keys=True, indent=4)
| 34.930556 | 105 | 0.525249 | 345 | 2,515 | 3.643478 | 0.315942 | 0.043755 | 0.057279 | 0.031822 | 0.33572 | 0.33572 | 0.33572 | 0.33572 | 0.33572 | 0.33572 | 0 | 0.00987 | 0.355467 | 2,515 | 71 | 106 | 35.422535 | 0.765577 | 0.028628 | 0 | 0.428571 | 0 | 0 | 0.067507 | 0 | 0 | 0 | 0 | 0 | 0.035714 | 1 | 0 | false | 0 | 0.071429 | 0 | 0.071429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d36e8ca439eb0a47f4ed9a65eb100752706fe147 | 1,425 | py | Python | tests/test_html_table.py | gp2kumar/jsonconverter | e90a989c8fe45c466179a9e202f301d1c495668c | [
"MIT"
] | 1 | 2021-02-16T10:09:55.000Z | 2021-02-16T10:09:55.000Z | tests/test_html_table.py | gp2kumar/jsonconverter | e90a989c8fe45c466179a9e202f301d1c495668c | [
"MIT"
] | null | null | null | tests/test_html_table.py | gp2kumar/jsonconverter | e90a989c8fe45c466179a9e202f301d1c495668c | [
"MIT"
] | null | null | null | from unittest import TestCase
from jsonConverter import jsonConverter
from jsonConverter.errors import EmptyJson
from bs4 import BeautifulSoup
import os
import json
class TestHtmlTable(TestCase):
def __init__(self, *args, **kwargs):
current_directory = os.path.dirname(os.path.realpath(__file__))
self.test_data_dir = os.path.join(current_directory, "test_data")
self.json = None
super().__init__(*args, **kwargs)
def test_html_table_json_1(self):
json_path = os.path.join(self.test_data_dir, "json_1.json")
converter = jsonConverter.from_file(json_path)
html_table = converter.get_html_table()
soup = BeautifulSoup(html_table)
headers = [header.text for header in soup.find("table").findAll("th")]
self.assertEqual(len(self._get_json(json_path)[0]), len(soup.find("table").find_all("tr")))
self.assertListEqual(headers, [header for header in self._get_json(json_path)[0].keys()])
def test_html_table_with_empty_json(self):
json_path = os.path.join(self.test_data_dir, "json_2.json")
self.assertRaises(EmptyJson, jsonConverter.from_file(json_path).get_html_table)
def _get_json(self, file_path: str, reload: bool = False):
if reload or not self.json:
with open(file_path) as fp:
self.json = json.load(fp)
return self.json
return self.json
| 39.583333 | 99 | 0.689123 | 196 | 1,425 | 4.729592 | 0.341837 | 0.06041 | 0.038835 | 0.048544 | 0.194175 | 0.131607 | 0.088457 | 0.088457 | 0.088457 | 0.088457 | 0 | 0.005268 | 0.200702 | 1,425 | 35 | 100 | 40.714286 | 0.808604 | 0 | 0 | 0.068966 | 0 | 0 | 0.031579 | 0 | 0 | 0 | 0 | 0 | 0.103448 | 1 | 0.137931 | false | 0 | 0.206897 | 0 | 0.448276 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d3719221e4d0e1a5fd57d85ca24e8b4d7705fe8c | 15,915 | py | Python | git-sync.py | wikimedia-bg/git-sync | de047324e34595bfd8b6eb80c09aa8a22674ea12 | [
"MIT"
] | null | null | null | git-sync.py | wikimedia-bg/git-sync | de047324e34595bfd8b6eb80c09aa8a22674ea12 | [
"MIT"
] | 7 | 2020-05-22T21:39:28.000Z | 2020-07-02T12:25:13.000Z | git-sync.py | wikimedia-bg/git-sync | de047324e34595bfd8b6eb80c09aa8a22674ea12 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os
import os.path
import re
import signal
import sys
import time
from datetime import datetime as dt
from datetime import timedelta as td
from pathlib import Path
import git
import pywikibot as pwb
import yaml
class SignalHandler:
def __init__(self):
self._is_sleeping = False
self._exit_requested = False
signal.signal(signal.SIGINT, self._request_exit)
signal.signal(signal.SIGTERM, self._request_exit)
def _request_exit(self, signal, frame):
if self._is_sleeping:
self._exit_now()
else:
self._exit_requested = True
def _exit_now(self):
print('SIGINT or SIGTERM received, exiting...')
sys.exit(0)
def sleep(self, seconds):
if self._exit_requested:
self._exit_now()
else:
self._is_sleeping = True
time.sleep(seconds)
self._is_sleeping = False
class GitSync:
def __init__(self):
self._base_path = Path(__file__).parent
self._config_file = self._base_path / 'config.yml'
self.config = {}
self.repos = []
self.usermap_email_list = []
def init_repos(self):
for repo in self.config['repos']:
file_regex = re.compile(
repo['file_regex'],
re.I if repo['regex_nocase'] else 0)
repo_path = os.path.join(self.config['repositories_root'], repo['name'])
git_repo = git.Repo(repo_path)
site = pwb.Site(
code=repo['project']['code'],
fam=repo['project']['family'],
user=self.config['mediawiki_username'])
self.repos.append(GitRepo(repo['name'], git_repo, site,
repo['namespace'], file_regex, repo['force_extension'],
repo['ignore_list'] + self.config['global_ignore_list'],
self.config['usermap'], self.usermap_email_list))
def read_config(self):
self.config = yaml.load(self._config_file.read_text(), Loader=yaml.FullLoader)
if not self.config:
print('Error: Configuration file not found or empty.', file=sys.stderr)
sys.exit(1)
else:
self.usermap_email_list = [_['email'] for _ in self.config['usermap'].values()]
class GitRepo:
def __init__(self, name, repo, site, namespace, title_regex, force_ext, ignores, usermap, usermap_email_list):
self.name = name
self.repo = repo
self.site = site
self.namespace = namespace
self.title_regex = title_regex
self.force_ext = force_ext
if force_ext:
self.re_force_ext = re.compile(r'\.' + force_ext + '$')
self.ignores = ignores
self._need_resync = False
self._pending_commits = {}
self._usermap = usermap
self._usermap_email_list = usermap_email_list
def _create_summary(self, author_name, author_email, repo_name, commit_sha, message):
base_url = 'https://github.com/wikimedia-bg'
message = message.replace('\n', ' ')
# Try finding the commit author email in the usermap dictionary. For this we have two lists:
# * the top-level keys, i.e. the Wikimedia usernames (list(self._usermap.keys()));
# * the email subkeys in the _same_ order (created in GitSync.read_config()).
# Because the two lists have the same order, the username in index N in the first list corresponds to the email
# in index N in the second list. Thus, if we find the commit author email in the second list at index N, we know
# that the corresponding Wikimedia username will be at index N in the first list.
try:
wiki_user = list(self._usermap.keys())[self._usermap_email_list.index(author_email)]
# If there's no match, just mention the commit author's name in the edit summary, but don't create a user page
# link.
except ValueError:
wiki_user_mention = author_name
# If there is a match, create a user page link, since this must be a valid Wikimedia user.
else:
wiki_user_mention = '[[User:{user}|{user}]]'.format(user=wiki_user)
return '{user_mention} | {base_url}/{repo}/commit/{sha} | {message}'.format(
user_mention=wiki_user_mention,
base_url=base_url,
repo=repo_name,
sha=commit_sha,
message=message[:400] + (message[400:] and '..'))
def _pagelist(self):
return [_ for _ in self.site.allpages(namespace=self.namespace)
if self.title_regex.search(_.title(with_ns=False))]
def _last_changed(self):
return dt.utcfromtimestamp(self.repo.commit('master').committed_date) + td(seconds=1)
def _pending_revs(self):
pending_revs = []
for page in self._pagelist():
try:
revs_since_last_sync = page.revisions(endtime=self._last_changed(), content=True)
candidate_revs = [(page.title(with_ns=False), _, 'edit') for _ in revs_since_last_sync]
if self._need_resync:
# Full re-sync requested, so get the latest revision of _all_ pages in the repo.
last_rev = page.latest_revision
candidate_revs.append(
(
page.title(with_ns=False),
{
'user': 'syncbot',
'comment': 'forced resync from wiki',
'text': last_rev['text'],
'timestamp': dt.utcnow(),
},
'resync',
)
)
except pwb.exceptions.NoPage:
# Apparently, the page got deleted on-wiki during our processing so scrap the candiate_revs.
pass
else:
pending_revs += candidate_revs
# If a resync has been requested, it's done.
self._need_resync = False
# We need to also check for deleted pages that we keep track of.
repo_files = [_.path for _ in self.repo.tree().traverse() if _.type != 'tree']
page_files = set(repo_files) - set(self.ignores)
if self.force_ext:
repo_pages = [self.re_force_ext.sub('', _.replace('.d/', '/')) for _ in page_files]
else:
repo_pages = [_.replace('.d/', '/') for _ in page_files]
existing_pages = [_.title(with_ns=False) for _ in self._pagelist()]
deleted_pages = set(repo_pages) - set(existing_pages)
for page_name in deleted_pages:
for event in self.site.logevents(page=self.namespace + ':' + page_name):
if event.type() in ['delete', 'move']:
pending_revs.append(
(
page_name,
{
'user': event.user(),
'comment': event.comment(),
'timestamp': event.timestamp(),
},
event.type(),
)
)
# We need only the first (chronologically last) delete or move event.
break
pending_revs.sort(key=lambda rev: rev[1]['timestamp'])
return pending_revs
def _pull(self):
old_master = self.repo.commit('master')
self.repo.git.pull()
new_master = self.repo.commit('master')
if new_master == old_master:
return
pull_commits_newest_first = self.repo.iter_commits(
old_master.hexsha + '...' + self.repo.commit('master').hexsha)
pull_commits = reversed([_ for _ in pull_commits_newest_first])
# This requires Python 3.7+ to keep the insertion order of the dictionary.
for commit in pull_commits:
self._pending_commits[commit] = self.repo.git.diff_tree(
'--no-commit-id', '--name-only', '-r', '-z',
commit.parents[0], commit
).split('\0')[:-1]
def _wiki2git(self):
revs = self._pending_revs()
synced_files = []
for rev in revs:
#
# Summary/commit message parsing.
#
git_commit_message = rev[1]['comment'] or '*** празно резюме ***'
#
# User parsing.
#
wiki_user = rev[1]['user']
# Ignore our own sync edits in the wiki.
if wiki_user == self.site.username():
continue
try:
author = self._usermap[wiki_user]['author']
email = self._usermap[wiki_user]['email']
except KeyError:
author = wiki_user
email = ''
git_author = git.Actor(author, email)
git_committer = git.Actor(author, email)
#
# Page/file parsing.
#
# We cannot have both a file and a directory with the same name, so where we have
# 'Page' and 'Page/doc', the latter gets converted to 'Page.d/doc'.
file_name = rev[0].replace('/', '.d/')
# If we've configured a file extension for syntax highlighting, add it, but only for
# files in the root of the namespace/repository (the rest will likely be 'Page/doc').
if self.force_ext and '.d/' not in file_name:
file_name = file_name + '.' + self.force_ext
file_path = os.path.join(self.repo.working_dir, file_name)
#
# Committing.
#
# To avoid conflicts as much as possible, perform git pull right before we apply the
# change and commit it.
self._pull()
if rev[2] in ['edit', 'resync']:
os.makedirs(os.path.dirname(file_path), exist_ok=True)
if rev[2] == 'resync' and os.path.exists(file_path):
with open(file_path, 'r') as f:
if rev[1]['text'] == f.read().rstrip('\n'):
# The on-wiki and Git versions are the same. No need to resync.
continue
with open(file_path, 'w') as f:
f.write(rev[1]['text'] + '\n')
self.repo.index.add([file_path])
elif rev[2] in ['delete', 'move']:
self.repo.index.remove([file_path], working_tree=True)
else:
print('Error: Unknown revision type: "{}"'.format(rev[2]))
continue
print('Syncing to Git: {}'.format(file_name))
self.repo.index.commit(
git_commit_message,
author=git_author,
committer=git_committer,
author_date=dt.isoformat(rev[1]['timestamp'], timespec='seconds'),
commit_date=dt.isoformat(rev[1]['timestamp'], timespec='seconds'))
# Push after each commit. It's inefficient, but should minimize possible conflicts.
self.repo.git.push()
synced_files.append(file_name)
return synced_files
def _git2wiki(self, synced_from_wiki):
# Iterate over a list of the keys, instead of directly on the dictionary. This allows to
# delete the pending commits from the latter once they are processed.
commit_list = list(self._pending_commits)
for commit in commit_list:
if re.search(r'\bDO\s+NOT\s+(MERGE|SYNC)\b', commit.message):
print('Ignoring commit {} because of DO NOT MERGE/SYNC.'.format(commit.hexsha))
del self._pending_commits[commit]
continue
for file_name in self._pending_commits[commit]:
# We cannot have both a file and a directory with the same name, so where we have
# 'Page' and 'Page/doc', the latter was converted to 'Page.d/doc'.
page_name = self.namespace + ':' + file_name.replace('.d/', '/')
# If we've configured a file extension for syntax highlighting, remove it, but only
# for files in the root of the namespace/repo (the rest will likely be 'Page/doc').
if self.force_ext and '/' not in page_name:
page_name = self.re_force_ext.sub('', page_name)
page = pwb.Page(self.site, page_name)
summary = self._create_summary(
commit.author.name,
commit.author.email,
self.name,
commit.hexsha,
commit.message)
if file_name in synced_from_wiki:
# This page has been updated on the wiki in this sync run. To be on the safe
# side, we'll discard the possibly conflicting changes from the Git repo.
print('Ignoring possibly conflicting changes in {}'.format(file_name))
# Sometimes this might lead to out-of-sync situations, so schedule a resync.
self._need_resync = True
continue
file_removed = False
try:
file_git_blob = self.repo.commit(commit).tree.join(file_name)
except KeyError as e:
if str(e).endswith('\'{file}\' not found"'.format(file=file_name)):
file_removed = True
else:
raise
if not file_removed:
file_contents_at_commit = b''.join(file_git_blob.data_stream[3].readlines())
page.text = file_contents_at_commit.decode('utf-8').rstrip('\n')
print('Saving {}'.format(page.title()))
try:
page.save(summary=summary, botflag=True, quiet=True)
except pwb.data.api.APIError as e:
print('APIError exception: {}'.format(str(e)), file=sys.stderr)
else:
print('Deleting {}'.format(page.title()))
try:
page.delete(reason=summary, prompt=False)
except pwb.data.api.APIError as e:
print('APIError exception: {}'.format(str(e)), file=sys.stderr)
# When all files in a commit have been processed, remove it from the pending list.
del self._pending_commits[commit]
def sync(self, resync=False):
if resync:
self._need_resync = True
w2g_synced_files = self._wiki2git()
self._pull()
if self._pending_commits:
self._git2wiki(w2g_synced_files)
def main(argv):
sig_handler = SignalHandler()
git_sync = GitSync()
git_sync.read_config()
git_sync.init_repos()
'''
Disabled temporarily.
if argv:
if argv.pop() in ['resync', 'force']:
for repo in repos:
print('Resyncing repo "{}"...'.format(repo.repo.git_dir))
repo.sync(resync=True)
'''
while True:
for repo in git_sync.repos:
print('Syncing repo "{}"...'.format(repo.repo.git_dir))
repo.sync()
# Sleep for a second between repos to catch requests to shutdown faster.
sig_handler.sleep(1)
print('Sleeping...')
sig_handler.sleep(git_sync.config['daemon_sleep_seconds'])
if __name__ == '__main__':
main(sys.argv[1:])
| 43.964088 | 120 | 0.543827 | 1,852 | 15,915 | 4.488121 | 0.212203 | 0.014437 | 0.013474 | 0.012031 | 0.168311 | 0.106112 | 0.08999 | 0.08999 | 0.071944 | 0.071944 | 0 | 0.003701 | 0.354885 | 15,915 | 361 | 121 | 44.085873 | 0.805883 | 0.170342 | 0 | 0.139706 | 0 | 0 | 0.077274 | 0.006117 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0.003676 | 0.044118 | 0.007353 | 0.139706 | 0.044118 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d37289938c883e0f5b2c05d45673f8a8f5f65c59 | 2,230 | py | Python | eve/flaskapp.py | nrcmedia/eve | fa45b0fd34e0fd2a6e201b71edd382fe0d72e86f | [
"BSD-3-Clause"
] | null | null | null | eve/flaskapp.py | nrcmedia/eve | fa45b0fd34e0fd2a6e201b71edd382fe0d72e86f | [
"BSD-3-Clause"
] | null | null | null | eve/flaskapp.py | nrcmedia/eve | fa45b0fd34e0fd2a6e201b71edd382fe0d72e86f | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
eve.flaskapp
~~~~~~~~~~~~
This module implements the central WSGI application object as a Flask
subclass.
:copyright: (c) 2013 by Nicola Iarocci.
:license: BSD, see LICENSE for more details.
"""
import eve
from flask import Blueprint
from werkzeug.routing import BaseConverter
from werkzeug.serving import WSGIRequestHandler
from eve import default_settings
from eve.io.mongo import Mongo, Validator
from eve.methods.common import ApiView
from flask.ext.pymongo import PyMongo
class Api(object):
""" The main Api object, on init it will add all the configured urls to create the
endpoints (wrapped in a Blueprint) """
def __init__(self, url_prefix, app=None):
self.app = app
if app is not None:
self.init_app(app, url_prefix)
def init_app(self, app, url_prefix):
self.driver = PyMongo(app)
# Set default api configuration, if not already set by users config
for key in dir(default_settings):
app.config.setdefault(key, getattr(default_settings, key))
blueprint = Blueprint('eve', 'eve', url_prefix=url_prefix)
resources = {}
urls = {}
datasources = {}
#blueprint.add_url_rule('/', 'home', view_func=home_endpoint)
def register_api(resource, endpoint, url, pk='_id', pk_type='ObjectId'):
view_func = ApiView.as_view(endpoint, self.driver, resource)
blueprint.add_url_rule(url, defaults={pk: None},
view_func=view_func, methods=['GET',])
blueprint.add_url_rule(url, view_func=view_func, methods=['POST',])
blueprint.add_url_rule('%s<%s:%s>' % (url, pk_type, pk), view_func=view_func,
methods=['GET', 'PATCH', 'DELETE', 'PUT'])
for resource, settings in app.config['DOMAIN'].items():
base_url = "/%s/" % resource
register_api(resource, "%s_api" % resource, base_url, pk='_id')
self.blueprint = blueprint
app.config['RESOURCES'] = resources
app.config['URLS'] = urls
app.config['SOURCES'] = datasources
app.register_blueprint(blueprint)
| 33.787879 | 89 | 0.626457 | 277 | 2,230 | 4.891697 | 0.382671 | 0.047232 | 0.04428 | 0.056089 | 0.087823 | 0.038376 | 0 | 0 | 0 | 0 | 0 | 0.003019 | 0.257399 | 2,230 | 65 | 90 | 34.307692 | 0.815217 | 0.204484 | 0 | 0 | 0 | 0 | 0.051445 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.222222 | 0 | 0.333333 | 0.194444 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d37462d1ff2d389bfad4c0d668e447663a2bbb4d | 2,606 | py | Python | ethosu/mlw_codec/test/test_mlw_codec.py | DemonGiggle/vela | 298e383d2dbab465ddd4995a6ebc6f8bc07c8719 | [
"Apache-2.0"
] | 1 | 2021-12-01T17:16:14.000Z | 2021-12-01T17:16:14.000Z | ethosu/mlw_codec/test/test_mlw_codec.py | DemonGiggle/vela | 298e383d2dbab465ddd4995a6ebc6f8bc07c8719 | [
"Apache-2.0"
] | null | null | null | ethosu/mlw_codec/test/test_mlw_codec.py | DemonGiggle/vela | 298e383d2dbab465ddd4995a6ebc6f8bc07c8719 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the License); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Simple example of the usage of mlw_codec.
import pytest
from ethosu import mlw_codec
class TestMLWCodec:
""" This class is responsible to test the mlw_codec library
It mainly tests the two methods encode() and decode() with different inputs"""
weights = [0, 2, 3, 0, -1, -2, -3, 0, 0, 0, 1, -250, 240] * 3
compressed_weights = bytearray(
b"\xb8\x00\\q^\x1f\xfc\x01\x03\x05\x08\x0c\x10\x908\x12\xd7\x99:\xd2\x99$\xae#\x9d\xa9#\x00\xf0\xff\xff\xff"
)
empty_decoded = bytearray(b"\xfe\xffC\x00\xf0\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff")
# Generate parameters lists for the tests below
encode_testdata = [
(mlw_codec.encode, weights, compressed_weights),
pytest.param(mlw_codec.encode, ["a"], empty_decoded, marks=pytest.mark.xfail), # cannot accept strings
]
decode_testdata = [(mlw_codec.decode, compressed_weights, weights)]
codec_testdata = [
(weights, weights),
([1] * 10, [1] * 10),
pytest.param(["a"], ["a"], marks=pytest.mark.xfail), # cannot accept strings
]
@pytest.mark.parametrize("function_under_test,test_input,expected", encode_testdata)
def test_mlw_codec(self, function_under_test, test_input, expected):
self._call_mlw_codec_method(function_under_test, test_input, expected)
@pytest.mark.parametrize("function_under_test,test_input,expected", decode_testdata)
def test_mlw_decode(self, function_under_test, test_input, expected):
self._call_mlw_codec_method(function_under_test, test_input, expected)
@pytest.mark.parametrize("test_input,expected", codec_testdata)
def test_mlw_encode_decode(self, test_input, expected):
output = mlw_codec.decode(mlw_codec.encode(test_input))
assert output == expected
def _call_mlw_codec_method(self, method_name, test_input, expected):
output = method_name(test_input)
assert output == expected
| 41.365079 | 116 | 0.720645 | 379 | 2,606 | 4.788918 | 0.406332 | 0.052893 | 0.049587 | 0.052893 | 0.280441 | 0.236915 | 0.236915 | 0.193939 | 0.193939 | 0.144904 | 0 | 0.031974 | 0.171911 | 2,606 | 62 | 117 | 42.032258 | 0.809082 | 0.344973 | 0 | 0.129032 | 0 | 0.064516 | 0.158617 | 0.145498 | 0 | 0 | 0 | 0 | 0.064516 | 1 | 0.129032 | false | 0 | 0.064516 | 0 | 0.419355 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d377969c4f87184fc09ab88ee20863bc245c4b23 | 646 | py | Python | plot_testing.py | MorganR/gaussian-processes | bf8e1c8453b5c07a525b1393bbe8013b4c10d9fb | [
"MIT"
] | null | null | null | plot_testing.py | MorganR/gaussian-processes | bf8e1c8453b5c07a525b1393bbe8013b4c10d9fb | [
"MIT"
] | null | null | null | plot_testing.py | MorganR/gaussian-processes | bf8e1c8453b5c07a525b1393bbe8013b4c10d9fb | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
plt.style.use('ggplot')
import GPflow
import math
N = 16
X = np.random.rand(N,1)
Y = np.sin(12*X) + 0.66*np.cos(25*X) + np.random.randn(N,1)*0.1 + 3
k = GPflow.kernels.Matern52(1, lengthscales=0.3)
m = GPflow.gpr.GPR(X, Y, kern=k)
m.likelihood.variance = 0.01
m.optimize()
xx = np.linspace(-0.1, 1.1, 100)[:,None]
mean, var = m.predict_y(xx)
plt.figure(figsize=(12, 6))
plt.plot(X, Y, 'kx', mew=2)
plt.plot(xx, mean, 'b', lw=2)
plt.fill_between(xx[:,0], mean[:,0] - 2*np.sqrt(var[:,0]), mean[:,0] + 2*np.sqrt(var[:,0]), color='blue', alpha=0.2)
plt.xlim(-0.1, 1.1)
print(m)
plt.show()
| 20.1875 | 116 | 0.631579 | 135 | 646 | 3.007407 | 0.466667 | 0.019704 | 0.044335 | 0.019704 | 0.081281 | 0.081281 | 0.081281 | 0.081281 | 0 | 0 | 0 | 0.083481 | 0.128483 | 646 | 31 | 117 | 20.83871 | 0.637655 | 0 | 0 | 0 | 0 | 0 | 0.020124 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.190476 | 0 | 0.190476 | 0.047619 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d378df2e0e879fe284c4b24445531bfc9a8d0706 | 2,092 | py | Python | 1089.duplicate-zeros.py | windard/leeeeee | 0107a5f95746592ca4fe78d2b5875cf65b1910e7 | [
"MIT"
] | null | null | null | 1089.duplicate-zeros.py | windard/leeeeee | 0107a5f95746592ca4fe78d2b5875cf65b1910e7 | [
"MIT"
] | null | null | null | 1089.duplicate-zeros.py | windard/leeeeee | 0107a5f95746592ca4fe78d2b5875cf65b1910e7 | [
"MIT"
] | null | null | null | # coding=utf-8
#
# @lc app=leetcode id=1089 lang=python
#
# [1089] Duplicate Zeros
#
# https://leetcode.com/problems/duplicate-zeros/description/
#
# algorithms
# Easy (58.99%)
# Likes: 118
# Dislikes: 94
# Total Accepted: 15.1K
# Total Submissions: 25.6K
# Testcase Example: '[1,0,2,3,0,4,5,0]'
#
# Given a fixed length array arr of integers, duplicate each occurrence of
# zero, shifting the remaining elements to the right.
#
# Note that elements beyond the length of the original array are not written.
#
# Do the above modifications to the input array in place, do not return
# anything from your function.
#
#
#
# Example 1:
#
#
# Input: [1,0,2,3,0,4,5,0]
# Output: null
# Explanation: After calling your function, the input array is modified to:
# [1,0,0,2,3,0,0,4]
#
#
# Example 2:
#
#
# Input: [1,2,3]
# Output: null
# Explanation: After calling your function, the input array is modified to:
# [1,2,3]
#
#
#
#
# Note:
#
#
# 1 <= arr.length <= 10000
# 0 <= arr[i] <= 9
#
#
class Solution(object):
def _duplicateZeros(self, arr):
"""
:type arr: List[int]
:rtype: None Do not return anything, modify arr in-place instead.
"""
# Don't Growth More Length
index = 0
actual = 0
length = len(arr)
while index < length:
if arr[actual] == 0:
arr.insert(actual+1, 0)
index += 1
actual += 2
else:
index += 1
actual += 1
def duplicateZeros(self, arr):
"""
:type arr: List[int]
:rtype: None Do not return anything, modify arr in-place instead.
"""
# Don't Growth More Length
actual = 0
length = len(arr)
while actual < length:
if arr[actual] == 0:
arr.insert(actual+1, 0)
actual += 2
arr.pop()
else:
actual += 1
# if __name__ == "__main__":
# s = Solution()
# a = [1,0,2,3,0,4,5,0]
# s.duplicateZeros(a)
# print a
| 21.56701 | 77 | 0.546845 | 281 | 2,092 | 4.039146 | 0.402135 | 0.010573 | 0.010573 | 0.014097 | 0.442291 | 0.442291 | 0.4 | 0.4 | 0.378855 | 0.378855 | 0 | 0.061571 | 0.32457 | 2,092 | 96 | 78 | 21.791667 | 0.741684 | 0.589388 | 0 | 0.695652 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0 | 0 | 0.130435 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d379100771febdd3cd71e6437f6cbe231c5f4030 | 2,841 | py | Python | scripts/motion/preprocess.py | isi-vista/adam-visual-perception | 8ad6ed883b184b5407a1bf793617b226c78b3a13 | [
"MIT"
] | 1 | 2020-07-21T10:52:26.000Z | 2020-07-21T10:52:26.000Z | scripts/motion/preprocess.py | isi-vista/adam-visual-perception | 8ad6ed883b184b5407a1bf793617b226c78b3a13 | [
"MIT"
] | null | null | null | scripts/motion/preprocess.py | isi-vista/adam-visual-perception | 8ad6ed883b184b5407a1bf793617b226c78b3a13 | [
"MIT"
] | 2 | 2020-07-21T15:30:42.000Z | 2021-01-20T21:54:09.000Z | """ Preprocess
This script loads the videos, removes the audio track, cuts the videos from
the start point to endpoint for given dataset fragments, and saves them in a
new directory. A new `tsv` file is being generated with corresponding
filenames and ground truth labels.
Parameters
----------
data_dir : str, optional
Directory path to raw videos (default is "/path/to/raw/videos")
tsv_path : str, optional
Path to tsv file containing dataset information (default is "benchmarks/motion_raw.tsv")
target_dir : str, optional
Where to save the preprocessed videos (default is "data/videos_motion")
target_tsv : str, optional
Where to write information on preprocessed vidoes (default is "benchmarks/motion.tsv")
base_name : str, optional
Base name of preprocessed vidoes (default is "video")
audio : bool, optional
Whether to include the audio in the video files (default is False)
"""
# External imports
from sacred import Experiment
from sacred.observers import FileStorageObserver
from collections import namedtuple
from adam_visual_perception import preprocess
import pandas as pd
import numpy as np
import os
ex = Experiment()
#ex.observers.append(FileStorageObserver.create('sacred'))
@ex.config
def my_config():
data_dir = "/path/to/raw/videos"
tsv_path = "benchmarks/motion_raw.tsv"
target_dir = "data/videos_motion"
target_tsv = "benchmarks/motion.tsv"
base_name = "video"
audio = False
@ex.automain
def main(_config):
args = namedtuple('GenericDict', _config.keys())(**_config)
# Setting the random seed
np.random.seed(args.seed)
# Check data dir
if not os.path.isdir(args.data_dir):
raise Exception("Data dir {} is invalid.".format(args.data_dir))
# Load tsv
if not os.path.isfile(args.tsv_path):
raise Exception("The path to tsv file cannot be found at {}.".format(args.tsv_path))
df = pd.read_csv(args.tsv_path, sep='\t')
# Create the target dir
if not os.path.isdir(args.target_dir):
os.makedirs(args.target_dir)
# Go through all video files, clip them and save as new files
df_new = pd.DataFrame(columns = ['path', 'label'], index=range(0, len(df)))
for index, row in df.iterrows():
# Get the necessary information
filename, start, end, label = row
filename = os.path.join(args.data_dir, filename)
target_name = os.path.join(args.target_dir, args.base_name + str(index) + ".mp4")
# Preprocessing
preprocess(filename, start, end, target_name, args.audio)
# Save the entry in the target tsv file
df_new.loc[index] = [target_name, label]
# Dump the tsv file post-preprocessing
df_new.to_csv(args.target_tsv, sep='\t', index=False)
print("Dumped the tsv file of preprocessed videos at {}.".format(args.target_tsv))
| 33.821429 | 92 | 0.708905 | 413 | 2,841 | 4.779661 | 0.346247 | 0.024823 | 0.013678 | 0.022796 | 0.129686 | 0.077001 | 0.023303 | 0 | 0 | 0 | 0 | 0.00087 | 0.190778 | 2,841 | 83 | 93 | 34.228916 | 0.857764 | 0.431538 | 0 | 0 | 0 | 0 | 0.144646 | 0.028804 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.194444 | 0 | 0.25 | 0.027778 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d379ebabfe6a453a1a327feab9114e7908641286 | 391 | py | Python | 2022-04-28-mean-shift.py | joaco18/maia-italy-aia | 74032ea590c2a29fe5a38b539cee5c748c7ba063 | [
"MIT"
] | null | null | null | 2022-04-28-mean-shift.py | joaco18/maia-italy-aia | 74032ea590c2a29fe5a38b539cee5c748c7ba063 | [
"MIT"
] | null | null | null | 2022-04-28-mean-shift.py | joaco18/maia-italy-aia | 74032ea590c2a29fe5a38b539cee5c748c7ba063 | [
"MIT"
] | null | null | null | import cv2
import utils
def main():
input_img = cv2.imread(str(utils.EXAMPLES_DIR/'retina.png'))
result = cv2.pyrMeanShiftFiltering(input_img, 2, 30, 0)
cv2.imshow('Original', input_img)
cv2.waitKey(0)
cv2.imshow('Mean Shift result', result)
cv2.waitKey(0)
cv2.imwrite(str(utils.EXAMPLES_DIR/'retina.MS.png'), result)
if __name__ == '__main__':
main()
| 19.55 | 64 | 0.675192 | 55 | 391 | 4.563636 | 0.490909 | 0.095618 | 0.087649 | 0.151394 | 0.199203 | 0 | 0 | 0 | 0 | 0 | 0 | 0.043478 | 0.176471 | 391 | 19 | 65 | 20.578947 | 0.736025 | 0 | 0 | 0.166667 | 0 | 0 | 0.143223 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.166667 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d37ad7f9f7b6e3bd8100c785d3bb4dcf55ed0deb | 400 | py | Python | Scripts/Python/Utils/resample.py | TheSchilk/PmodADC | 7c79507034d3c8e1f475fa4ca5fec741bf46c17e | [
"MIT"
] | 1 | 2022-02-15T06:53:36.000Z | 2022-02-15T06:53:36.000Z | Scripts/Python/Utils/resample.py | TheSchilk/PmodADC | 7c79507034d3c8e1f475fa4ca5fec741bf46c17e | [
"MIT"
] | null | null | null | Scripts/Python/Utils/resample.py | TheSchilk/PmodADC | 7c79507034d3c8e1f475fa4ca5fec741bf46c17e | [
"MIT"
] | null | null | null | import scipy.signal as sps
import numpy as np
def resample_audio(audio, fs_from, fs_to):
number_of_samples = round(len(audio) * float(fs_to) / fs_from)
audio = sps.resample(audio, number_of_samples)
# Ensure re-sampling did not create samples outside of [-1,1]:
max_amplitude = np.abs(audio).max()
if max_amplitude > 1:
audio = audio / max_amplitude
return audio
| 25 | 66 | 0.695 | 62 | 400 | 4.290323 | 0.516129 | 0.135338 | 0.112782 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009494 | 0.21 | 400 | 15 | 67 | 26.666667 | 0.832278 | 0.15 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.222222 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d37b259e6b4975ce0d8db6b86cb732283a09517f | 3,849 | py | Python | feder/main/urls.py | dzemeuksis/feder | 32ef7793af6256d4ecada61505c7baf334b34419 | [
"MIT"
] | 16 | 2015-08-11T17:20:26.000Z | 2022-02-11T20:15:41.000Z | feder/main/urls.py | dzemeuksis/feder | 32ef7793af6256d4ecada61505c7baf334b34419 | [
"MIT"
] | 534 | 2015-08-04T00:10:54.000Z | 2022-03-17T10:44:47.000Z | feder/main/urls.py | dzemeuksis/feder | 32ef7793af6256d4ecada61505c7baf334b34419 | [
"MIT"
] | 10 | 2017-08-30T13:34:32.000Z | 2022-02-18T13:00:35.000Z | from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.sitemaps.views import index, sitemap
from django.utils.translation import ugettext_lazy as _
from django.views.generic import TemplateView
from rest_framework import routers
from teryt_tree.rest_framework_ext.viewsets import JednostkaAdministracyjnaViewSet
from feder.cases.sitemaps import CaseSitemap
from feder.cases.viewsets import CaseViewSet, CaseReportViewSet
from feder.institutions.sitemaps import InstitutionSitemap, TagSitemap
from feder.institutions.viewsets import InstitutionViewSet, TagViewSet
from feder.letters.sitemaps import LetterSitemap
from feder.main.sitemaps import StaticSitemap
from feder.monitorings.sitemaps import MonitoringPagesSitemap, MonitoringSitemap
from feder.monitorings.viewsets import MonitoringViewSet
from feder.records.viewsets import RecordViewSet
from feder.teryt.sitemaps import JSTSitemap
from feder.monitorings.views import MultiCaseTagManagement
from . import views
handler500 = views.handler500 # required to have exception id
router = routers.DefaultRouter()
router.register(r"institutions", InstitutionViewSet, basename="institution")
router.register(r"tags", TagViewSet)
router.register(r"teryt", JednostkaAdministracyjnaViewSet)
router.register(r"records", RecordViewSet)
router.register(r"cases/report", CaseReportViewSet, basename="case-report")
router.register(r"cases", CaseViewSet)
router.register(r"monitorings", MonitoringViewSet)
urlpatterns = [url(_(r"^$"), views.HomeView.as_view(), name="home")]
urlpatterns += [
url(
_(r"^about/$"),
TemplateView.as_view(template_name="pages/about.html"),
name="about",
),
# Django Admin
url(r"^admin/", admin.site.urls),
# User management
url(_(r"^users/"), include("feder.users.urls", namespace="users")),
url(r"^accounts/", include("allauth.urls")),
# Your stuff: custom urls includes go here
url(
_(r"^institutions/"),
include("feder.institutions.urls", namespace="institutions"),
),
url(
_(r"^monitorings/"), include("feder.monitorings.urls", namespace="monitorings")
),
url(_(r"^cases/"), include("feder.cases.urls", namespace="cases")),
url(_(r"^cases/tags/"), include("feder.cases_tags.urls", namespace="cases_tags")),
url(_(r"^alerts/"), include("feder.alerts.urls", namespace="alerts")),
url(_(r"^letters/"), include("feder.letters.urls", namespace="letters")),
url(_(r"^teryt/"), include("feder.teryt.urls", namespace="teryt")),
url(_(r"^letters/logs/"), include("feder.letters.logs.urls", namespace="logs")),
url(_(r"^parcels/"), include("feder.parcels.urls", namespace="parcels")),
url(_(r"^virus_scan/"), include("feder.virus_scan.urls", namespace="virus_scan")),
url(
r"^api/monitorings/(?P<monitoring_pk>\d+)/case-tags/update/$",
MultiCaseTagManagement.as_view(),
name="monitoring-case-tags-update",
),
url(r"^api/", include(router.urls)),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
sitemaps = {
"cases": CaseSitemap,
"institutions": InstitutionSitemap,
"institutions_tags": TagSitemap,
"letters": LetterSitemap,
"main": StaticSitemap,
"monitorings": MonitoringSitemap,
"monitorings_pages": MonitoringPagesSitemap,
"teryt": JSTSitemap,
}
urlpatterns += [
url(
r"^sitemap\.xml$", index, {"sitemaps": sitemaps, "sitemap_url_name": "sitemaps"}
),
url(
r"^sitemap-(?P<section>.+)\.xml$",
sitemap,
{"sitemaps": sitemaps},
name="sitemaps",
),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns = [url(r"^__debug__/", include(debug_toolbar.urls))] + urlpatterns
| 38.108911 | 88 | 0.719408 | 430 | 3,849 | 6.34186 | 0.251163 | 0.029336 | 0.038504 | 0.013201 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001802 | 0.1351 | 3,849 | 100 | 89 | 38.49 | 0.817363 | 0.025721 | 0 | 0.16092 | 0 | 0 | 0.218425 | 0.06008 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.252874 | 0 | 0.252874 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d3800894268187dfb1384e045a327952f3559d7b | 4,972 | py | Python | myscripts/projects/thesis/mean_profile.py | LSaffin/scripts | 100fc442229ea11f8766a6d78b4db8790c607326 | [
"MIT"
] | 1 | 2020-03-16T13:54:28.000Z | 2020-03-16T13:54:28.000Z | myscripts/projects/thesis/mean_profile.py | LSaffin/scripts | 100fc442229ea11f8766a6d78b4db8790c607326 | [
"MIT"
] | null | null | null | myscripts/projects/thesis/mean_profile.py | LSaffin/scripts | 100fc442229ea11f8766a6d78b4db8790c607326 | [
"MIT"
] | 1 | 2021-01-16T04:54:53.000Z | 2021-01-16T04:54:53.000Z | import matplotlib.pyplot as plt
import iris
from irise import plot
from irise.plot.util import multilabel, legend
from systematic_forecasts import second_analysis
from myscripts.projects.thesis import plotdir
def main():
# Parameters same for all plots
mappings = ['pv_full', 'pv_main', 'pv_phys']
domains = ['full', 'sea', 'no_coast'] # , 'ridges', 'troughs']
title = ['Forecast', 'PV budget', 'Physics PV tracers']
xlabel = 'PV (PVU)'
# Ground relative
"""
coord = 'altitude'
ylabel = 'Height (km)'
xlims = [(0, 15), (-0.12, 0.12), (-0.12, 0.12)]
ylims = (1, 17)
profile(coord, mappings, domains, title, xlabel, ylabel, xlims, ylims)
#plt.savefig(plotdir + 'ch7_low/height_profile.pdf')
plt.show()
"""
# Tropopause relative
"""
coord = 'distance_from_dynamical_tropopause'
ylabel = 'Height (km)'
xlims = [(-0.5, 0.2), (-0.2, 0.3), (-0.2, 0.3)]
ylims = (-2, 2)
profile(coord, mappings, domains, title, xlabel, ylabel, xlims, ylims)
#plt.savefig(plotdir + 'ch6_tropopause/trop_profile_full.pdf')
plt.show()
"""
# BL relative
coord = 'distance_from_boundary_layer_top'
ylabel = 'Vertical distance from boundary-layer top (km)'
xlims = [(0, 0.8), (-0.75, 0.75), (-0.75, 0.75)]
ylims = (-1, 1)
profile(coord, mappings, domains, title, xlabel, ylabel, xlims, ylims)
plt.savefig(plotdir + '../bl_profile_60h.pdf')
plt.show()
return
def profile(coord, mappings, domains, title, xlabel, ylabel, xlims, ylims):
ncols = len(mappings)
nrows = len(domains)
# Initialise the plot
fig = plt.figure(figsize=(18, 25))
# Loop over mappings
for m, domain in enumerate(domains):
cubes = second_analysis.get_data(coord, domain)
for n, mapping in enumerate(mappings):
mapping = second_analysis.mappings[mapping]
ax = plt.subplot2grid((nrows, ncols), (m, n))
profile_multi(cubes, ax, mapping, coord)
ax.set_xlim(*xlims[n])
ax.set_ylim(*ylims)
if m == 0:
ax.set_title(title[n])
else:
ax.set_title('')
if m == nrows - 1:
legend(ax, key=second_analysis.get_idx, loc='upper left',
ncol=2, bbox_to_anchor=(0.05, -0.25))
else:
ax.get_xaxis().set_ticklabels([])
if n == 0:
if m == 1:
ax.set_ylabel(ylabel)
else:
ax.set_ylabel('')
else:
ax.set_ylabel('')
ax.get_yaxis().set_ticklabels([])
if m == nrows - 1 and n == 1:
ax.set_xlabel(xlabel)
else:
ax.set_xlabel('')
ax.axvline(color='k')
ax.axhline(color='k')
if coord == 'air_pressure':
ax.set_ylim(ax.get_ylim()[::-1])
multilabel(ax, n + m * ncols)
fig.subplots_adjust(bottom=0.4)
return
def profile_multi(cubes, axis, mapping, coord):
for variable in mapping:
print(variable)
# Extract the plot styles for the variable
c = mapping[variable]
# Load the cube
cube = cubes.extract(variable)[0]
cube = cube.extract(iris.Constraint(forecast_lead_time=60))
nice_units(cube, coord)
# Plot tropopause gradient vs lead time
mean, std_err = second_analysis.extract_statistics(
cube, 'forecast_index')
plot.errorbar(mean, mean.coord(coord), xerr=std_err,
linestyle=c.linestyle, color=c.color, label=c.symbol)
return
def profile_error(cubes, axis, mapping, coord):
for variable in mapping:
# Extract the plot styles for the variable
c = mapping[variable]
# Load the cube
cube = cubes.extract(variable)[0]
nice_units(cube, coord)
# Analysis (exclude first forecast)
analysis = cube[1:].extract(iris.Constraint(forecast_lead_time=0))
# 24h lead time (exclude last forecast)
forecast = cube[:-1].extract(iris.Constraint(forecast_lead_time=24))
# Take the difference between the 48h forecast and the 24h forecast for
# the same verification time
diff = forecast.data - analysis.data
diff = forecast.copy(data=diff)
# Take the mean difference
mean, std_err = second_analysis.extract_statistics(
diff, 'forecast_index')
plot.errorbar(mean[1:], mean.coord(coord)[1:], xerr=std_err[1:],
linestyle=c.linestyle, color=c.color, label=c.symbol)
return
def nice_units(cube, coord):
# Nice units
if cube.coord(coord).units == 'Pa':
cube.coord(coord).convert_units('hPa')
elif cube.coord(coord).units == 'm':
cube.coord(coord).convert_units('km')
if __name__ == '__main__':
main()
| 29.951807 | 79 | 0.580853 | 620 | 4,972 | 4.535484 | 0.287097 | 0.017781 | 0.02845 | 0.038407 | 0.388336 | 0.302276 | 0.280583 | 0.251422 | 0.19239 | 0.173186 | 0 | 0.026107 | 0.291231 | 4,972 | 165 | 80 | 30.133333 | 0.77185 | 0.098954 | 0 | 0.25 | 0 | 0 | 0.063648 | 0.013713 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054348 | false | 0 | 0.065217 | 0 | 0.163043 | 0.01087 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d380729626394d54cdb858039c05f741e3b17ed1 | 359 | py | Python | warmup/sherlockbeast/sherlockbeast.py | antfarmar/hackerrank | 87aa6ec8abd35746f209efdaa29b1799fd03baaa | [
"Unlicense"
] | 1 | 2021-09-09T02:02:53.000Z | 2021-09-09T02:02:53.000Z | warmup/sherlockbeast/sherlockbeast.py | antfarmar/hackerrank | 87aa6ec8abd35746f209efdaa29b1799fd03baaa | [
"Unlicense"
] | null | null | null | warmup/sherlockbeast/sherlockbeast.py | antfarmar/hackerrank | 87aa6ec8abd35746f209efdaa29b1799fd03baaa | [
"Unlicense"
] | null | null | null | from sys import stdin
#file = stdin
file = open( r".\data\sherlockbeast.txt" ) #stdin
data = list(map(int, file.read().strip().split()))[1:]
file.close()
# Maximize x in k = 3x + 5y
for k in data:
for x in range(k//3, -1, -1):
if (k-3 * x) % 5 == 0:
y = (k-3 * x) // 5
print('5'*3*x + '3'*5*y)
break
elif x==0:
print(-1)
| 19.944444 | 54 | 0.512535 | 66 | 359 | 2.787879 | 0.515152 | 0.032609 | 0.032609 | 0.043478 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.064886 | 0.270195 | 359 | 17 | 55 | 21.117647 | 0.637405 | 0.119777 | 0 | 0 | 0 | 0 | 0.083333 | 0.076923 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.083333 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d383caabaf3ad3e599d36baea06fb49c58be08ee | 208 | py | Python | atcoder/other/lang_update_202001/abc083_b.py | knuu/competitive-programming | 16bc68fdaedd6f96ae24310d697585ca8836ab6e | [
"MIT"
] | 1 | 2018-11-12T15:18:55.000Z | 2018-11-12T15:18:55.000Z | atcoder/other/lang_update_202001/abc083_b.py | knuu/competitive-programming | 16bc68fdaedd6f96ae24310d697585ca8836ab6e | [
"MIT"
] | null | null | null | atcoder/other/lang_update_202001/abc083_b.py | knuu/competitive-programming | 16bc68fdaedd6f96ae24310d697585ca8836ab6e | [
"MIT"
] | null | null | null | def main() -> None:
N, A, B = map(int, input().split())
ans = 0
for i in range(1, N+1):
ans += i * (A <= sum(map(int, str(i))) <= B)
print(ans)
if __name__ == '__main__':
main()
| 18.909091 | 52 | 0.466346 | 34 | 208 | 2.617647 | 0.647059 | 0.134831 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020833 | 0.307692 | 208 | 10 | 53 | 20.8 | 0.597222 | 0 | 0 | 0 | 0 | 0 | 0.038462 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0 | 0 | 0.125 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d383fefb1598ef6ec1a75585c0aa27732689abcd | 1,601 | py | Python | quadruped_spring/env/env_randomizers/env_randomizer_collection.py | francescovezzi/quadruped_spring | 23848496ac7a4508e8a0f527e961c7956fd12f95 | [
"MIT"
] | 3 | 2022-02-21T22:30:21.000Z | 2022-03-03T12:59:25.000Z | quadruped_spring/env/env_randomizers/env_randomizer_collection.py | francescovezzi/quadruped_spring | 23848496ac7a4508e8a0f527e961c7956fd12f95 | [
"MIT"
] | 1 | 2022-03-28T09:22:50.000Z | 2022-03-28T16:44:46.000Z | quadruped_spring/env/env_randomizers/env_randomizer_collection.py | francescovezzi/quadruped_spring | 23848496ac7a4508e8a0f527e961c7956fd12f95 | [
"MIT"
] | null | null | null | from quadruped_spring.env.env_randomizers.env_randomizer import (
EnvRandomizerDisturbance,
EnvRandomizerInitialConfiguration,
EnvRandomizerMasses,
EnvRandomizerSprings,
)
from quadruped_spring.utils.base_collection import CollectionBase
# Implemented observation spaces for deep reinforcement learning:
# "MASS_RANDOMIZER": For randomly change the mass values of robot links and
# adding another to the trunk to vary its COM
# "DISTURBANCE_RANDOMIZER": For applying random external forces one time for
# each episode on the robot trunk
# "SETTLING_RANDOMIZER": Add some noise in the robot settling configuration
# "SPRING_RANDOMIZER": Change spring stiffness and dumping
class EnvRandomizerCollection(CollectionBase):
"""Utility to collect all the implemented environment randomizers."""
def __init__(self):
super().__init__()
self._MASS_RANDOMIZER = EnvRandomizerMasses
self._DISTURBANCE = EnvRandomizerDisturbance
self._SETTLING = EnvRandomizerInitialConfiguration
self._SPRINGS = EnvRandomizerSprings
self._dict = {
"MASS_RANDOMIZER": [self._MASS_RANDOMIZER],
"DISTURBANCE_RANDOMIZER": [self._DISTURBANCE],
"SETTLING_RANDOMIZER": [self._SETTLING],
"MASS_SETTLING_RANDOMIZER": [self._MASS_RANDOMIZER, self._SETTLING],
"SPRING_RANDOMIZER": [self._SPRINGS],
"ALL_RANDOMIZERS": [self._MASS_RANDOMIZER, self._DISTURBANCE, self._SETTLING],
}
self._element_type = "env randomizer"
| 44.472222 | 90 | 0.712055 | 152 | 1,601 | 7.210526 | 0.460526 | 0.089416 | 0.065693 | 0.051095 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.217364 | 1,601 | 35 | 91 | 45.742857 | 0.874701 | 0.339163 | 0 | 0 | 0 | 0 | 0.120459 | 0.043977 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.086957 | 0 | 0.173913 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d385a78415912448a87e4e43be4dcba83acd9566 | 17,144 | py | Python | training/common/dataset_training_deploy_pipeline.py | ivanmkc/vertex-ai-project-example | 070c8d5f534a1fc9f235eb041337592af074de7f | [
"MIT"
] | 1 | 2021-08-24T07:56:10.000Z | 2021-08-24T07:56:10.000Z | training/common/dataset_training_deploy_pipeline.py | ivanmkc/vertex-ai-project-example | 070c8d5f534a1fc9f235eb041337592af074de7f | [
"MIT"
] | null | null | null | training/common/dataset_training_deploy_pipeline.py | ivanmkc/vertex-ai-project-example | 070c8d5f534a1fc9f235eb041337592af074de7f | [
"MIT"
] | 1 | 2022-03-30T04:17:58.000Z | 2022-03-30T04:17:58.000Z | from typing import Any, Callable, Dict, Optional, Sequence, Tuple
import abc
import dataclasses
from google_cloud_pipeline_components import aiplatform as gcc_aip
import kfp
from kfp.v2.dsl import component, importer, Condition, Dataset, Model
import training.common.managed_dataset_pipeline as managed_dataset_pipeline
from google.cloud.aiplatform import explain
@dataclasses.dataclass
class ExportInfo:
"""Info for exporting a trained, exportable Model to a location specified by the user.
A Model is considered to be exportable if it has at least one `supported_export_formats`.
Either `artifact_destination` or `image_destination` must be provided.
Usage:
ExportInfo(
export_format_id='tf-saved-model'
artifact_destination='gs://my-bucket/models/'
)
or
ExportInfo(
export_format_id='custom-model'
image_destination='us-central1-docker.pkg.dev/projectId/repo/image'
)
Args:
export_format_id (str):
Required. The ID of the format in which the Model must be exported.
The list of export formats that this Model supports can be found
by calling `Model.supported_export_formats`.
artifact_destination (str):
The Cloud Storage location where the Model artifact is to be
written to. Under the directory given as the destination a
new one with name
"``model-export-<model-display-name>-<timestamp-of-export-call>``",
where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601
format, will be created. Inside, the Model and any of its
supporting files will be written.
This field should only be set when, in [Model.supported_export_formats],
the value for the key given in `export_format_id` contains ``ARTIFACT``.
image_destination (str):
The Google Container Registry or Artifact Registry URI where
the Model container image will be copied to. Accepted forms:
- Google Container Registry path. For example:
``gcr.io/projectId/imageName:tag``.
- Artifact Registry path. For example:
``us-central1-docker.pkg.dev/projectId/repoName/imageName:tag``.
This field should only be set when, in [Model.supported_export_formats],
the value for the key given in `export_format_id` contains ``IMAGE``.
sync (bool):
Whether to execute this export synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
Returns:
output_info (Dict[str, str]):
Details of the completed export with output destination paths to
the artifacts or container image.
Raises:
ValueError if model does not support exporting.
ValueError if invalid arguments or export formats are provided.
"""
export_format_id: str
artifact_destination: Optional[str] = None
image_destination: Optional[str] = None
@dataclasses.dataclass
class DeployInfo:
"""Info for deploying a model to endpoint. Endpoint will be created if unspecified.
Args:
endpoint ("Endpoint"):
Optional. Endpoint to deploy model to. If not specified, endpoint
display name will be model display name+'_endpoint'.
deployed_model_display_name (str):
Optional. The display name of the DeployedModel. If not provided
upon creation, the Model's display_name is used.
traffic_percentage (int):
Optional. Desired traffic to newly deployed model. Defaults to
0 if there are pre-existing deployed models. Defaults to 100 if
there are no pre-existing deployed models. Negative values should
not be provided. Traffic of previously deployed models at the endpoint
will be scaled down to accommodate new deployed model's traffic.
Should not be provided if traffic_split is provided.
traffic_split (Dict[str, int]):
Optional. A map from a DeployedModel's ID to the percentage of
this Endpoint's traffic that should be forwarded to that DeployedModel.
If a DeployedModel's ID is not listed in this map, then it receives
no traffic. The traffic percentage values must add up to 100, or
map must be empty if the Endpoint is to not accept any traffic at
the moment. Key for model being deployed is "0". Should not be
provided if traffic_percentage is provided.
machine_type (str):
Optional. The type of machine. Not specifying machine type will
result in model to be deployed with automatic resources.
min_replica_count (int):
Optional. The minimum number of machine replicas this deployed
model will be always deployed on. If traffic against it increases,
it may dynamically be deployed onto more replicas, and as traffic
decreases, some of these extra replicas may be freed.
max_replica_count (int):
Optional. The maximum number of replicas this deployed model may
be deployed on when the traffic against it increases. If requested
value is too large, the deployment will error, but if deployment
succeeds then the ability to scale the model to that many replicas
is guaranteed (barring service outages). If traffic against the
deployed model increases beyond what its replicas at maximum may
handle, a portion of the traffic will be dropped. If this value
is not provided, the smaller value of min_replica_count or 1 will
be used.
accelerator_type (str):
Optional. Hardware accelerator type. Must also set accelerator_count if used.
One of ACCELERATOR_TYPE_UNSPECIFIED, NVIDIA_TESLA_K80, NVIDIA_TESLA_P100,
NVIDIA_TESLA_V100, NVIDIA_TESLA_P4, NVIDIA_TESLA_T4
accelerator_count (int):
Optional. The number of accelerators to attach to a worker replica.
service_account (str):
The service account that the DeployedModel's container runs as. Specify the
email address of the service account. If this service account is not
specified, the container runs as a service account that doesn't have access
to the resource project.
Users deploying the Model must have the `iam.serviceAccounts.actAs`
permission on this service account.
explanation_metadata (explain.ExplanationMetadata):
Optional. Metadata describing the Model's input and output for explanation.
Both `explanation_metadata` and `explanation_parameters` must be
passed together when used. For more details, see
`Ref docs <http://tinyurl.com/1igh60kt>`
explanation_parameters (explain.ExplanationParameters):
Optional. Parameters to configure explaining for Model's predictions.
For more details, see `Ref docs <http://tinyurl.com/1an4zake>`
metadata (Sequence[Tuple[str, str]]):
Optional. Strings which should be sent along with the request as
metadata.
encryption_spec_key_name (Optional[str]):
Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the model. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, this Model and all sub-resources of this Model will be secured by this key.
Overrides encryption_spec_key_name set in aiplatform.init
sync (bool):
Whether to execute this method synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
Returns:
endpoint ("Endpoint"):
Endpoint with the deployed model.
"""
# endpoint: Optional["Endpoint"] = (None,)
deployed_model_display_name: Optional[str] = None
traffic_percentage: Optional[int] = 0
traffic_split: Optional[Dict[str, int]] = None
machine_type: Optional[str] = None
min_replica_count: int = 1
max_replica_count: int = 1
accelerator_type: Optional[str] = None
accelerator_count: Optional[int] = None
service_account: Optional[str] = None
explanation_metadata: Optional[explain.ExplanationMetadata] = None
explanation_parameters: Optional[explain.ExplanationParameters] = None
metadata: Optional[Sequence[Tuple[str, str]]] = ()
encryption_spec_key_name: Optional[str] = None
class DatasetTrainingDeployPipeline(managed_dataset_pipeline.ManagedDatasetPipeline):
"""
Create a new Vertex AI managed dataset and trains an arbitrary AutoML or custom model
"""
def __init__(
self,
name: str,
managed_dataset: managed_dataset_pipeline.ManagedDataset,
metric_key_for_comparison: str,
is_metric_greater_better: bool,
deploy_info: Optional[DeployInfo],
export_info: Optional[ExportInfo],
):
super().__init__(name=name, managed_dataset=managed_dataset)
self.metric_key_for_comparison = metric_key_for_comparison
self.is_metric_greater_better = is_metric_greater_better
self.deploy_info = deploy_info
self.export_info = export_info
@abc.abstractmethod
def create_training_op(self, project: str, dataset: Dataset) -> Callable:
pass
@abc.abstractmethod
def create_get_metric_op(
self,
project: str,
location: str,
pipeline_root: str,
model: Model,
metric_name: str,
) -> Optional[Callable]:
pass
@abc.abstractmethod
def create_get_incumbent_metric_op(
self, project: str, pipeline_root: str, model: Model, metric_name: str
) -> Optional[Callable]:
pass
def create_pipeline_metric_comparison_op(
self,
project: str,
location: str,
pipeline_root: str,
metric: float,
incumbent_metric: float,
) -> Optional[Callable]:
@component(
packages_to_install=[
"google-cloud-storage",
"google-cloud-aiplatform",
"pandas",
]
)
def pipeline_metric_comparison_op(
project: str,
location: str,
metric: float,
incumbent_metric: float,
is_greater_better: bool = True,
) -> bool:
# If no target metric key is provided, return True
if incumbent_metric is None:
return True
elif metric is None:
return False
else:
if is_greater_better:
return metric >= incumbent_metric
else:
return metric <= incumbent_metric
return pipeline_metric_comparison_op(
project=project,
location=location,
metric=metric,
incumbent_metric=incumbent_metric,
is_greater_better=self.is_metric_greater_better,
)
def create_confusion_matrix_op(
self, project: str, pipeline_root: str
) -> Optional[Callable]:
return None
def create_classification_report_op(
self, project: str, pipeline_root: str
) -> Optional[Callable]:
return None
def create_model_history_op(
self, project: str, pipeline_root: str
) -> Optional[Callable]:
return None
def create_model_history_test_op(
self, project: str, pipeline_root: str
) -> Optional[Callable]:
return None
def create_pipeline(
self, project: str, location: str, pipeline_run_name: str, pipeline_root: str
) -> Callable[..., Any]:
@kfp.dsl.pipeline(name=self.name, pipeline_root=pipeline_root)
def pipeline():
dataset_op = self.managed_dataset.as_kfp_op(
project=project, location=location
)
training_op = self.create_training_op(
project=project, pipeline_root=pipeline_root, dataset=dataset_op.output
)
confusion_matrix_op = self.create_confusion_matrix_op(
project=project,
pipeline_root=pipeline_root,
)
if confusion_matrix_op:
confusion_matrix_op = confusion_matrix_op.after(training_op)
classification_report_op = self.create_classification_report_op(
project=project,
pipeline_root=pipeline_root,
)
if classification_report_op:
classification_report_op = classification_report_op.after(training_op)
model_history_op = self.create_model_history_op(
project=project,
pipeline_root=pipeline_root,
)
if model_history_op:
model_history_op = model_history_op.after(training_op)
model_history_test_op = self.create_model_history_test_op(
project=project,
pipeline_root=pipeline_root,
)
if model_history_test_op:
model_history_test_op = model_history_test_op.after(training_op)
get_metric_op = self.create_get_metric_op(
project=project,
location=location,
pipeline_root=pipeline_root,
model=training_op.output,
metric_name=self.metric_key_for_comparison,
)
if get_metric_op:
get_metric_op = get_metric_op.after(training_op)
get_incumbent_metric_op = self.create_get_incumbent_metric_op(
project=project,
pipeline_root=pipeline_root,
model=training_op.output,
metric_name=self.metric_key_for_comparison,
)
if get_incumbent_metric_op:
get_incumbent_metric_op = get_incumbent_metric_op.after(training_op)
if get_metric_op and get_incumbent_metric_op:
pipeline_metric_comparison_op = (
self.create_pipeline_metric_comparison_op(
project=project,
location=location,
pipeline_root=pipeline_root,
metric=get_metric_op.output,
incumbent_metric=get_incumbent_metric_op.output,
)
)
if pipeline_metric_comparison_op and self.deploy_info or self.export_info:
with Condition(
pipeline_metric_comparison_op.output == "true",
name="post_train_decision",
):
# if self.deploy_info:
# deploy_op = gcc_aip.ModelDeployOp(
# project=project,
# model=training_op.output,
# # endpoint=self.deploy_info.endpoint,
# deployed_model_display_name=self.deploy_info.deployed_model_display_name,
# # traffic_percentage=self.deploy_info.traffic_percentage,
# traffic_split=self.deploy_info.traffic_split,
# machine_type=self.deploy_info.machine_type,
# min_replica_count=self.deploy_info.min_replica_count,
# max_replica_count=self.deploy_info.max_replica_count,
# accelerator_type=self.deploy_info.accelerator_type,
# accelerator_count=self.deploy_info.accelerator_count,
# service_account=self.deploy_info.service_account,
# explanation_metadata=self.deploy_info.explanation_metadata,
# explanation_parameters=self.deploy_info.explanation_parameters,
# # metadata=self.deploy_info.metadata,
# encryption_spec_key_name=self.deploy_info.encryption_spec_key_name,
# )
if self.export_info:
export_op = gcc_aip.ModelExportOp(
model=training_op.output,
export_format_id=self.export_info.export_format_id,
artifact_destination=self.export_info.artifact_destination,
image_destination=self.export_info.image_destination,
)
return pipeline
| 43.292929 | 103 | 0.631941 | 1,954 | 17,144 | 5.332651 | 0.195496 | 0.029942 | 0.022841 | 0.020729 | 0.288388 | 0.229846 | 0.175432 | 0.162092 | 0.136852 | 0.114779 | 0 | 0.002878 | 0.310954 | 17,144 | 395 | 104 | 43.402532 | 0.879201 | 0.491601 | 0 | 0.342105 | 0 | 0 | 0.008835 | 0.002822 | 0 | 0 | 0 | 0 | 0 | 1 | 0.063158 | false | 0.015789 | 0.042105 | 0.021053 | 0.257895 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d3860320a8f96b2cb4f1e64f815a4818f17ba4f3 | 1,293 | py | Python | solutions/107.py | abawchen/leetcode | 41d3b172a7694a46a860fbcb0565a3acccd000f2 | [
"MIT"
] | null | null | null | solutions/107.py | abawchen/leetcode | 41d3b172a7694a46a860fbcb0565a3acccd000f2 | [
"MIT"
] | null | null | null | solutions/107.py | abawchen/leetcode | 41d3b172a7694a46a860fbcb0565a3acccd000f2 | [
"MIT"
] | null | null | null | """
https://leetcode.com/problems/binary-tree-level-order-traversal-ii/description
Given a binary tree, return the bottom-up level order traversal of its nodes' values. (ie, from left to right, level by level from leaf to root).
For example:
Given binary tree [3,9,20,null,null,15,7],
3
/ \
9 20
/ \
15 7
return its bottom-up level order traversal as:
[
[15,7],
[9,20],
[3]
]
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def levelOrderBottom(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
if root is None:
return []
from collections import defaultdict
level = 0
levels = defaultdict(list)
levels[0] = [root]
ans = []
while len(levels[level]) != 0:
tmp = []
for node in levels[level]:
tmp.append(node.val)
if node.left is not None:
levels[level+1].append(node.left)
if node.right is not None:
levels[level+1].append(node.right)
ans.insert(0, tmp)
level += 1
return ans
| 24.396226 | 145 | 0.54215 | 167 | 1,293 | 4.173653 | 0.419162 | 0.057389 | 0.081779 | 0.05165 | 0.166428 | 0.088953 | 0.088953 | 0.088953 | 0 | 0 | 0 | 0.033175 | 0.347254 | 1,293 | 52 | 146 | 24.865385 | 0.792654 | 0.368136 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0 | 0.04 | 0 | 0.28 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d38e659f1b9f7b0e2e90d4b321084e607d07d859 | 834 | py | Python | extraResources/electron-backend/routes/toc_pdf.py | vilaj46/ad1-ad2-briefs | 8bd5de28315a0525b28adb4cf8f1a7d22eefef25 | [
"MIT"
] | null | null | null | extraResources/electron-backend/routes/toc_pdf.py | vilaj46/ad1-ad2-briefs | 8bd5de28315a0525b28adb4cf8f1a7d22eefef25 | [
"MIT"
] | null | null | null | extraResources/electron-backend/routes/toc_pdf.py | vilaj46/ad1-ad2-briefs | 8bd5de28315a0525b28adb4cf8f1a7d22eefef25 | [
"MIT"
] | null | null | null | import fitz
import os
from classes.Table_Of_Contents import get_my_toc
from classes.Brief import get_my_brief
def toc_pdf():
BRIEF = get_my_brief()
doc = BRIEF.data['document']
TABLE_OF_CONTENTS = get_my_toc()
page_number_start = TABLE_OF_CONTENTS.data['pageNumberStartForMe']
page_number_end = TABLE_OF_CONTENTS.data['pageNumberEndForMe']
if str(page_number_start) != 'False' and str(page_number_end) != 'False':
toc_doc = fitz.open()
toc_doc.insertPDF(doc, from_page=page_number_start,
to_page=page_number_end)
file_path = BRIEF.data['filePath']
last_slash = file_path.rindex('\\')
output_path = file_path[0:last_slash + 1] + 'tocPDF.pdf'
toc_doc.save(output_path)
toc_doc.close()
return output_path
return ''
| 32.076923 | 77 | 0.678657 | 115 | 834 | 4.547826 | 0.373913 | 0.114723 | 0.114723 | 0.072658 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003077 | 0.220624 | 834 | 25 | 78 | 33.36 | 0.801538 | 0 | 0 | 0 | 0 | 0 | 0.091127 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.190476 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d390ad35f1f75a5024b0e3c28f3a3904dc282f20 | 1,555 | py | Python | utils.py | ksajan/Dominant_color-HexCode- | 4e5487e5e534222a77fd8279ffbbb7b676a97dea | [
"MIT"
] | null | null | null | utils.py | ksajan/Dominant_color-HexCode- | 4e5487e5e534222a77fd8279ffbbb7b676a97dea | [
"MIT"
] | null | null | null | utils.py | ksajan/Dominant_color-HexCode- | 4e5487e5e534222a77fd8279ffbbb7b676a97dea | [
"MIT"
] | null | null | null | # import the necessary packages
import numpy as np
import cv2
def centroid_histogram(clt):
# grab the number of different clusters and create a histogram
# based on the number of pixels assigned to each cluster
print(clt)
numLabels = np.arange(0, len(np.unique(clt.labels_)) + 1)
(hist, _) = np.histogram(clt.labels_, bins = numLabels)
# normalize the histogram, such that it sums to one
hist = hist.astype("float")
hist /= hist.sum()
# return the histogram
return hist
#print(hist)
def plot_colors(hist, centroids):
# initialize the bar chart representing the relative frequency
# of each of the colors
bar = np.zeros((50, 300, 3), dtype = "uint8")
#print(centroids)
#print(hist)
startX = 0
# loop over the percentage of each cluster and the color of
# each cluster
for (percent, color) in zip(hist, centroids):
#normcolor = color.astype("uint8")
#print(normcolor)
# plot the relative percentage of each cluster
endX = startX + (percent * 300)
#print(percentage)
cv2.rectangle(bar, (int(startX), 0), (int(endX), 50),
color.tolist(), -1)
startX = endX
# return the bar chart
return bar
#color.astype("uint8")
"""
if [255,255,255] in normcolor:
normcolor = np.delete(normcolor, [255,255,255])
return normcolor
if [0,0,0] in normcolor:
normcolor = np.delete(normcolor, [0,0,0])
return normcolor
if [128,128,128] in normcolor:
normcolor = np.delete(normcolor, [128,128,128])
return normcolor
else:
return normcolor
""" | 28.796296 | 64 | 0.669453 | 218 | 1,555 | 4.752294 | 0.37156 | 0.042471 | 0.037645 | 0.063707 | 0.107143 | 0.107143 | 0 | 0 | 0 | 0 | 0 | 0.051682 | 0.216077 | 1,555 | 54 | 65 | 28.796296 | 0.798195 | 0.362058 | 0 | 0 | 0 | 0 | 0.017094 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.111111 | 0 | 0.333333 | 0.055556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d3914a636ecdc5d216e629807b7d61aa41084744 | 3,031 | py | Python | html2txt/converters/html2markdown.py | renesugar/html2txt | 069ff7048417737f9072dea86dd6a33b31049b2a | [
"MIT"
] | null | null | null | html2txt/converters/html2markdown.py | renesugar/html2txt | 069ff7048417737f9072dea86dd6a33b31049b2a | [
"MIT"
] | null | null | null | html2txt/converters/html2markdown.py | renesugar/html2txt | 069ff7048417737f9072dea86dd6a33b31049b2a | [
"MIT"
] | 2 | 2021-09-20T21:47:22.000Z | 2021-12-10T03:59:58.000Z | import os
import sys
import argparse
from html2txt import parsers
#from importlib import reload # reload
#reload(parsers)
#!/usr/bin/env python3
# Copyright (c) 2020 Rene Sugar.
# License: MIT (http://www.opensource.org/licenses/mit-license.php)
class Html2Markdown(object):
def __init(self):
self.root_ = None
def convert(self, data):
p = parsers.HtmlParser()
self.root_ = p.parse(data)
m = parsers.MarkdownVisitor()
m.visit(self.root_)
return m.text
@property
def root(self):
return self.root_
def html_to_markdown(data):
hmd = Html2Markdown()
return hmd.convert(data)
def checkExtension(file, exts):
name, extension = os.path.splitext(file)
extension = extension.lstrip(".")
processFile = 0
if len(extension) == 0:
processFile = 0
elif len(exts) == 0:
processFile = 1
elif extension in exts:
processFile = 1
else:
processFile = 0
return processFile
def checkExclusion(dir, rootPath, excludePaths):
processDir = 0
if (dir[0:1] == "."):
processDir = 0
elif os.path.join(rootPath,dir) in excludePaths:
processDir = 0
else:
processDir = 1
return processDir
def filelist(dir, excludePaths, exts):
allfiles = []
for path, subdirs, files in os.walk(dir):
files = [os.path.join(path,x) for x in files if checkExtension(x, exts)]
# "[:]" alters the list of subdirectories walked by os.walk
# https://stackoverflow.com/questions/10620737/efficiently-removing-subdirectories-in-dirnames-from-os-walk
subdirs[:] = [os.path.join(path,x) for x in subdirs if checkExclusion(x, path, excludePaths)]
allfiles.extend(files)
for x in subdirs:
allfiles.extend(filelist(x, excludePaths, exts))
return allfiles
def main():
parser = argparse.ArgumentParser(description="mdtext")
parser.add_argument("--path", help="Base path of the project to be scanned", default=".")
parser.add_argument("--root", help="Root path of the project to be scanned", default="/")
parser.add_argument("--prefix", help="Replace root path with this prefix", default="/")
parser.add_argument("--extensions", help="File extensions that are processed", default=".html.svg")
parser.add_argument("--exclude", nargs='*', help="Paths of folders to exclude", default=[])
args = vars(parser.parse_args())
basePath = os.path.abspath(os.path.expanduser(args['path']))
rootPath = args['root']
rootPrefix = args['prefix']
fileExtensions = args['extensions'].lstrip(".").split(".")
excludePaths = args['exclude']
# Remove trailing path separator from each exclude path
excludePaths[:] = [x.rstrip(os.sep) for x in excludePaths]
files = filelist(basePath, excludePaths, fileExtensions)
# Read each file
for file in files:
file_canonical = file.replace(rootPath, rootPrefix, 1)
print("file = %s" % (file_canonical,))
with open(file, 'r') as f:
lines = f.readlines()
data = ''.join(lines)
md = html_to_markdown(data)
print(md)
if __name__ == "__main__":
main()
| 27.0625 | 111 | 0.684922 | 395 | 3,031 | 5.189873 | 0.367089 | 0.017561 | 0.041463 | 0.035122 | 0.070244 | 0.070244 | 0.070244 | 0.070244 | 0.049756 | 0.049756 | 0 | 0.012053 | 0.178819 | 3,031 | 111 | 112 | 27.306306 | 0.811571 | 0.132959 | 0 | 0.133333 | 0 | 0 | 0.10848 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.106667 | false | 0 | 0.053333 | 0.013333 | 0.253333 | 0.026667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d391ef9e203cd6b88ad46e17124f330475edc703 | 6,311 | py | Python | models/GAN_D.py | pkulwj1994/AdversarialConsistentScoreMatching | f439f242f004ce06382ed72f2aa7daf9c262abfa | [
"MIT"
] | 119 | 2020-09-09T13:59:28.000Z | 2022-03-17T17:04:10.000Z | models/GAN_D.py | pkulwj1994/AdversarialConsistentScoreMatching | f439f242f004ce06382ed72f2aa7daf9c262abfa | [
"MIT"
] | 2 | 2020-11-13T03:26:22.000Z | 2021-03-19T23:04:33.000Z | models/GAN_D.py | pkulwj1994/AdversarialConsistentScoreMatching | f439f242f004ce06382ed72f2aa7daf9c262abfa | [
"MIT"
] | 19 | 2020-09-14T05:56:51.000Z | 2021-12-28T15:53:34.000Z | import torch
import torch.nn.utils.spectral_norm as spectral_norm
class Activation(torch.nn.Module):
def __init__(self, arch):
super().__init__()
self.act = torch.nn.LeakyReLU(0.1 if arch == 1 else .02, inplace=True)
def forward(self, x):
return self.act(x)
def weights_init(m):
classname = m.__class__.__name__
if "Conv" in classname:
m.weight.data.normal_(0.0, 0.02)
elif "BatchNorm" in classname:
m.weight.data.normal_(1.0, 0.02) # Estimated variance, musst be around 1
m.bias.data.fill_(0) # Estimated mean, must be around 0
class DCGAN_D1(torch.nn.Module):
def __init__(self, a_config):
super().__init__()
self.dense = torch.nn.Linear(512 * 4 * 4, 1)
act = lambda: Activation(a_config.arch)
def addbn(m, size):
if a_config.no_batch_norm_D:
return
m.append(torch.nn.BatchNorm2d(size))
if a_config.spectral:
model = [spectral_norm(torch.nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=True)), act(),
spectral_norm(torch.nn.Conv2d(64, 64, kernel_size=4, stride=2, padding=1, bias=True)), act(),
spectral_norm(torch.nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=True)), act(),
spectral_norm(torch.nn.Conv2d(128, 128, kernel_size=4, stride=2, padding=1, bias=True)), act(),
spectral_norm(torch.nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1, bias=True)), act(),
spectral_norm(torch.nn.Conv2d(256, 256, kernel_size=4, stride=2, padding=1, bias=True)), act(),
spectral_norm(torch.nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1, bias=True)), act()]
else:
model = [torch.nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=True)]
addbn(model, 64)
model += [act()]
model += [torch.nn.Conv2d(64, 64, kernel_size=4, stride=2, padding=1, bias=True)]
addbn(model, 64)
model += [act()]
model += [torch.nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=True)]
addbn(model, 128)
model += [act()]
model += [torch.nn.Conv2d(128, 128, kernel_size=4, stride=2, padding=1, bias=True)]
addbn(model, 128)
model += [act()]
model += [torch.nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1, bias=True)]
addbn(model, 256)
model += [act()]
model += [torch.nn.Conv2d(256, 256, kernel_size=4, stride=2, padding=1, bias=True)]
addbn(model, 256)
model += [act()]
model += [torch.nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1, bias=True)]
model += [act()]
self.model = torch.nn.Sequential(*model)
def forward(self, inpt):
output = self.dense(self.model(inpt).view(-1, 512 * 4 * 4)).view(-1)
return output
# TODO can i kill this
class DCGAN_D0(torch.nn.Module):
def __init__(self, a_config):
super().__init__()
main = torch.nn.Sequential()
### Start block
# Size = n_colors x image_size x image_size
if a_config.spectral:
main.add_module('Start-SpectralConv2d', torch.nn.utils.spectral_norm(
torch.nn.Conv2d(3, a_config.D_h_size, kernel_size=4, stride=2, padding=1, bias=False)))
else:
main.add_module('Start-Conv2d',
torch.nn.Conv2d(3, a_config.D_h_size, kernel_size=4, stride=2, padding=1, bias=False))
if self.args.SELU:
main.add_module('Start-SELU', torch.nn.SELU(inplace=True))
else:
if self.args.Tanh_GD:
main.add_module('Start-Tanh', torch.nn.Tanh())
else:
main.add_module('Start-LeakyReLU', Activation(a_config.arch))
image_size_new = self.args.image_size // 2
# Size = D_h_size x image_size/2 x image_size/2
### Middle block (Done until we reach ? x 4 x 4)
mult = 1
ii = 0
while image_size_new > 4:
if a_config.spectral:
main.add_module('Middle-SpectralConv2d [%d]' % ii, torch.nn.utils.spectral_norm(
torch.nn.Conv2d(a_config.D_h_size * mult, a_config.D_h_size * (2 * mult), kernel_size=4, stride=2,
padding=1, bias=False)))
else:
main.add_module('Middle-Conv2d [%d]' % ii,
torch.nn.Conv2d(a_config.D_h_size * mult, a_config.D_h_size * (2 * mult),
kernel_size=4, stride=2, padding=1, bias=False))
if self.args.SELU:
main.add_module('Middle-SELU [%d]' % ii, torch.nn.SELU(inplace=True))
else:
if not a_config.no_batch_norm_D and not a_config.spectral:
main.add_module('Middle-BatchNorm2d [%d]' % ii,
torch.nn.BatchNorm2d(a_config.D_h_size * (2 * mult)))
if a_config.Tanh_GD:
main.add_module('Start-Tanh [%d]' % ii, torch.nn.Tanh())
else:
main.add_module('Middle-LeakyReLU [%d]' % ii, Activation(a_config.arch))
# Size = (D_h_size*(2*i)) x image_size/(2*i) x image_size/(2*i)
image_size_new = image_size_new // 2
mult *= 2
ii += 1
### End block
# Size = (D_h_size * mult) x 4 x 4
if a_config.spectral:
main.add_module('End-SpectralConv2d', torch.nn.utils.spectral_norm(
torch.nn.Conv2d(a_config.D_h_size * mult, 1, kernel_size=4, stride=1, padding=0, bias=False)))
else:
main.add_module('End-Conv2d',
torch.nn.Conv2d(a_config.D_h_size * mult, 1, kernel_size=4, stride=1, padding=0,
bias=False))
# Size = 1 x 1 x 1 (Is a real cat or not?)
self.main = main
def forward(self, inpt):
output = self.main(inpt)
# Convert from 1 x 1 x 1 to 1 so that we can compare to given label (cat or not?)
return output.view(-1)
| 44.758865 | 118 | 0.555855 | 883 | 6,311 | 3.799547 | 0.137033 | 0.077198 | 0.077496 | 0.066766 | 0.685246 | 0.661103 | 0.600596 | 0.524292 | 0.49687 | 0.481371 | 0 | 0.055364 | 0.310252 | 6,311 | 140 | 119 | 45.078571 | 0.715369 | 0.073206 | 0 | 0.309091 | 0 | 0 | 0.038937 | 0.003602 | 0 | 0 | 0 | 0.007143 | 0 | 1 | 0.072727 | false | 0 | 0.018182 | 0.009091 | 0.154545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d39260545154fd2d5d15f18cd19472a84271771d | 4,038 | py | Python | rpxdock/tests/body/test_body.py | quecloud/rpxdock | 41f7f98f5dacf24fc95897910263a0bec2209e59 | [
"Apache-2.0"
] | null | null | null | rpxdock/tests/body/test_body.py | quecloud/rpxdock | 41f7f98f5dacf24fc95897910263a0bec2209e59 | [
"Apache-2.0"
] | null | null | null | rpxdock/tests/body/test_body.py | quecloud/rpxdock | 41f7f98f5dacf24fc95897910263a0bec2209e59 | [
"Apache-2.0"
] | 1 | 2020-04-13T20:07:52.000Z | 2020-04-13T20:07:52.000Z | import _pickle
from time import perf_counter
import numpy as np, rpxdock as rp, rpxdock.homog as hm
from rpxdock.body import Body
def test_body(C2_3hm4, C3_1nza, sym1=2, sym2=3):
body1 = Body(C2_3hm4, sym1)
body2 = Body(C3_1nza, sym2)
assert body1.bvh_bb.max_id() == body1.nres - 1
assert body1.bvh_cen.max_id() == body1.nres - 1
assert body2.bvh_bb.max_id() == body2.nres - 1
assert body2.bvh_cen.max_id() == body2.nres - 2 # GLY
resl = 5
samp1 = range(0, 360 // sym1, resl)
samp2 = range(0, 360 // sym2, resl)
samp3 = range(-10, 11, resl)
samp3 = [[np.cos(d / 180 * np.pi), 0, np.sin(d / 180 * np.pi)] for d in samp3]
r1 = hm.hrot([0, 0, 1], 1, degrees=True)
best, bestpos = -9e9, None
t = perf_counter()
totslide = 0
nsamp, nhit = 0, 0
for a1 in samp1:
for a2 in samp2:
for dirn in samp3:
body1.move_to_center()
body2.move_to_center()
tmp = perf_counter()
d = body1.slide_to(body2, dirn)
totslide += perf_counter() - tmp
nsamp += 1
if d < 9e8:
nhit += 1
p = body1.contact_pairs(body2, 8.0)
if len(p) > 0:
p2 = body1.positioned_cen()[p[:, 0]]
p3 = body2.positioned_cen()[p[:, 1]]
assert np.max(np.linalg.norm(p3 - p2, axis=1)) < 8
if len(p) > best:
best = len(p)
bestpos = body1.pos.copy(), body2.pos.copy()
body2.move_by(r1)
body1.move_by(r1)
t = perf_counter() - t
print("best", best, "time", t, "rate", nsamp / t, "hitfrac", nhit / nsamp)
print(
"totslide",
totslide,
"slide/s",
nsamp / totslide,
"sqrt(npair)",
np.sqrt(len(body1.ss) * len(body2.ss)),
len(body1.ss),
len(body2.ss),
)
# print(bestpos[0])
# print(bestpos[1])
body1.move_to(bestpos[0])
body2.move_to(bestpos[1])
# body1.dump_pdb_from_bodies("body1.pdb")
# body2.dump_pdb_from_bodies("body2.pdb")
def test_body_pickle(C3_1nza, tmpdir):
b = Body(C3_1nza)
with open(tmpdir + "/a", "wb") as out:
_pickle.dump(b, out)
with open(tmpdir + "/a", "rb") as inp:
b2 = _pickle.load(inp)
assert np.allclose(b.coord, b2.coord)
assert np.allclose(b.pos, b2.pos)
assert np.allclose(b.cen, b2.cen)
assert b.sym == b2.sym
assert b.nfold == b2.nfold
assert np.all(b.seq == b2.seq)
assert np.all(b.ss == b2.ss)
assert np.allclose(b.chain, b2.chain)
assert np.allclose(b.resno, b2.resno)
assert np.allclose(b.bvh_bb.centers(), b2.bvh_bb.centers())
assert np.allclose(b.bvh_cen.centers(), b2.bvh_cen.centers())
def test_body_copy_sym(body_tiny):
c2 = body_tiny.copy_with_sym('C2')
rot = hm.hrot([0, 0, 1], np.pi)
rotated = rot @ body_tiny.coord[:, :, :, None]
# assert np.allclose(rotated.squeeze(), c2.coord[14:28])
assert np.allclose(rotated.squeeze(), c2.coord[21:])
def test_body_copy_xform(body_tiny):
x = hm.hrot([1, 1, 1], np.pi / 3) @ hm.htrans([1, 0, 0])
b2 = body_tiny.copy_xformed(x)
rotated = x @ body_tiny.coord[:, :, :, None]
assert np.allclose(rotated.squeeze(), b2.coord)
if __name__ == "__main__":
# from rpxdock.rosetta.triggers_init import get_pose_cached
# from tempfile import mkdtemp
# f1 = "rpxdock/data/pdb/C2_3hm4_1.pdb.gz"
# f2 = "rpxdock/data/pdb/C3_1nza_1.pdb.gz"
# f1 = "/home/sheffler/scaffolds/big/C2_3jpz_1.pdb"
# f2 = "/home/sheffler/scaffolds/big/C3_3ziy_1.pdb"
# f1 = "/home/sheffler/scaffolds/wheel/C3.pdb"
# f2 = "/home/sheffler/scaffolds/wheel/C5.pdb"
# pose1 = get_pose_cached(f1)
# pose2 = get_pose_cached(f2)
# test_body(pose1, pose2)
# test_body_pickle(f2, mkdtemp())
b = rp.data.get_body('tiny')
test_body_copy_sym(b)
test_body_copy_xform(b)
# nres 306 309 sqnpair 307 new 17743/s orig 13511/s
# nres 728 1371 sqnpair 999 new 8246/s orig 4287/s
# nres 6675 8380 sqnpair 7479 new 8629/s orig 627/s
| 33.65 | 81 | 0.603269 | 637 | 4,038 | 3.675039 | 0.268446 | 0.044425 | 0.068347 | 0.050833 | 0.152499 | 0.093977 | 0.058949 | 0.040154 | 0.040154 | 0 | 0 | 0.077705 | 0.244676 | 4,038 | 119 | 82 | 33.932773 | 0.689836 | 0.199108 | 0 | 0 | 0 | 0 | 0.020859 | 0 | 0 | 0 | 0 | 0 | 0.206897 | 1 | 0.045977 | false | 0 | 0.045977 | 0 | 0.091954 | 0.022989 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d39b5b5c2f8b7b36185feda5514fb6b6d986e3a3 | 5,153 | py | Python | JumpscaleCore/clients/threebot/ThreebotClient.py | grimpy/jumpscaleX_core | c24d6d47fccc0801e578fedb376ef110f7a00bad | [
"Apache-2.0"
] | null | null | null | JumpscaleCore/clients/threebot/ThreebotClient.py | grimpy/jumpscaleX_core | c24d6d47fccc0801e578fedb376ef110f7a00bad | [
"Apache-2.0"
] | null | null | null | JumpscaleCore/clients/threebot/ThreebotClient.py | grimpy/jumpscaleX_core | c24d6d47fccc0801e578fedb376ef110f7a00bad | [
"Apache-2.0"
] | null | null | null | import nacl
from Jumpscale import j
import binascii
JSConfigBase = j.baseclasses.object_config
from nacl.signing import VerifyKey
from nacl.public import PrivateKey, PublicKey, SealedBox
from Jumpscale.clients.gedis.GedisClient import GedisClientActors
class ThreebotClient(JSConfigBase):
_SCHEMATEXT = """
@url = jumpscale.threebot.client
name** = "" #is the bot dns
tid** = 0 (I) #threebot id
host = "127.0.0.1" (S) #for caching purposes
port = 8901 (ipport) #for caching purposes
pubkey = "" #for caching purposes
"""
def _init(self, **kwargs):
self._pubkey_obj = None
self._verifykey_obj = None
self._sealedbox_ = None
self._gedis_connections = {}
assert self.name != ""
@property
def actors_base(self):
cl = j.clients.gedis.get(name=self.name, host=self.host, port=self.port, package_name="zerobot.base")
return cl.actors
def client_get(self, packagename):
if not packagename in self._gedis_connections:
key = "%s__%s" % (self.name, packagename.replace(".", "__"))
cl = j.clients.gedis.get(name=key, port=8901, package_name=packagename)
self._gedis_connections[packagename] = cl
return self._gedis_connections[packagename]
def actors_get(self, package_name=None, status="installed"):
"""Get actors for package_name given. If status="all" then all the actors will be returned
:param package_name: name of package to be loaded that has the actors needed. If value is "all" then all actors from all packages are retrieved
:type package_name: str
:return: actors of package(s)
:type return: GedisClientActors (contains all the actors as properties)
"""
if not package_name:
actors = GedisClientActors()
package_manager_actor = j.clients.gedis.get(
name="packagemanager", host=self.host, port=self.port, package_name="zerobot.packagemanager"
).actors.package_manager
for package in package_manager_actor.packages_list(status=status).packages:
name = package.name
if name not in self._gedis_connections:
g = j.clients.gedis.get(
name=f"{name}_{self.name}", host=self.host, port=self.port, package_name=name
)
self._gedis_connections[name] = g
for k, v in self._gedis_connections[name].actors._ddict.items():
setattr(actors, k, v)
return actors
else:
if package_name not in self._gedis_connections:
g = j.clients.gedis.get(
name=f"{package_name}_{self.name}", host=self.host, port=self.port, package_name=package_name
)
self._gedis_connections[package_name] = g
return self._gedis_connections[package_name].actors
def reload(self):
for key, g in self._gedis_connections.items():
g.reload()
@property
def actors_all(self):
return self.actors_get(status="installed")
def encrypt_for_threebot(self, data, hex=False):
"""
Encrypt data using the public key of the remote threebot
:param data: data to be encrypted, should be of type binary
@return: encrypted data hex or binary
"""
if isinstance(data, str):
data = data.encode()
res = self._sealedbox.encrypt(data)
if hex:
res = binascii.hexlify(res)
return res
def verify_from_threebot(self, data, signature, data_is_hex=False):
"""
:param data, if string will unhexlify else binary data to verify against verification key of the threebot who send us the data
:return:
"""
if isinstance(data, str) or data_is_hex:
data = binascii.unhexlify(data)
if len(signature) == 128:
signature = binascii.unhexlify(signature)
return self.verifykey_obj.verify(data, signature=signature)
@property
def _sealedbox(self):
if not self._sealedbox_:
self._sealedbox_ = SealedBox(self.pubkey_obj)
return self._sealedbox_
@property
def pubkey_obj(self):
if not self._pubkey_obj:
self._pubkey_obj = self.verifykey_obj.to_curve25519_public_key()
return self._pubkey_obj
@property
def verifykey_obj(self):
if not self._verifykey_obj:
assert self.pubkey
verifykey = binascii.unhexlify(self.pubkey)
assert len(verifykey) == 32
self._verifykey_obj = VerifyKey(verifykey)
return self._verifykey_obj
def test_auth(self, bot_id):
nacl_cl = j.data.nacl.get()
nacl_cl._load_singing_key()
epoch = str(j.data.time.epoch)
signed_message = nacl_cl.sign(epoch.encode()).hex()
cmd = "auth {} {} {}".format(bot_id, epoch, signed_message)
return self._gedis._redis.execute_command(cmd)
| 37.889706 | 151 | 0.619445 | 618 | 5,153 | 4.980583 | 0.236246 | 0.05718 | 0.071475 | 0.025991 | 0.144899 | 0.107862 | 0.094867 | 0.094867 | 0.094867 | 0.078947 | 0 | 0.006818 | 0.288376 | 5,153 | 135 | 152 | 38.17037 | 0.832561 | 0.126334 | 0 | 0.070707 | 0 | 0 | 0.102418 | 0.016651 | 0 | 0 | 0 | 0 | 0.030303 | 1 | 0.121212 | false | 0 | 0.060606 | 0.010101 | 0.313131 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d39bc4b7aec9047b6724d025f5c54216daae68b1 | 3,669 | py | Python | f5/bigip/tm/analytics/test/unit/test_dos_vis_common.py | hixio-mh/f5-common-python | 53038d44afa381b70f6e2bb459f7b9b943f3172d | [
"Apache-2.0"
] | 272 | 2016-02-23T06:05:44.000Z | 2022-02-20T02:09:32.000Z | f5/bigip/tm/analytics/test/unit/test_dos_vis_common.py | hixio-mh/f5-common-python | 53038d44afa381b70f6e2bb459f7b9b943f3172d | [
"Apache-2.0"
] | 1,103 | 2016-02-11T17:48:03.000Z | 2022-02-15T17:13:37.000Z | f5/bigip/tm/analytics/test/unit/test_dos_vis_common.py | hixio-mh/f5-common-python | 53038d44afa381b70f6e2bb459f7b9b943f3172d | [
"Apache-2.0"
] | 167 | 2016-02-11T17:48:21.000Z | 2022-01-17T20:13:05.000Z | # Copyright 2018 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from f5.bigip import ManagementRoot
from f5.bigip.resource import OrganizingCollection
from f5.bigip.tm.analytics.dos_vis_common import Generate_Report
from f5.bigip.tm.analytics.dos_vis_common import Report_Results
from f5.sdk_exception import MissingRequiredCreationParameter
from f5.sdk_exception import UnsupportedOperation
import mock
import pytest
from six import iterkeys
@pytest.fixture
def FakeGenerateReport():
fake_analytics = mock.MagicMock()
fake_genrep = Generate_Report(fake_analytics)
fake_genrep._meta_data['bigip'].tmos_version = '13.1.0'
return fake_genrep
@pytest.fixture
def FakeReportResults():
fake_analytics = mock.MagicMock()
fake_repres = Report_Results(fake_analytics)
return fake_repres
class TestDosVisCommonOC(object):
def test_collection(self, fakeicontrolsession):
b = ManagementRoot('192.168.1.1', 'admin', 'admin')
t1 = b.tm.analytics.dos_vis_common
assert isinstance(t1, OrganizingCollection)
assert hasattr(t1, 'generate_reports')
assert hasattr(t1, 'report_results_s')
class TestGenerateReport(object):
def test_modify_raises(self, FakeGenerateReport):
with pytest.raises(UnsupportedOperation):
FakeGenerateReport.modify()
def test_create_no_args(self, FakeGenerateReport):
with pytest.raises(MissingRequiredCreationParameter):
FakeGenerateReport.create()
def test_create_two(self, fakeicontrolsession):
b = ManagementRoot('192.168.1.1', 'admin', 'admin')
t1 = b.tm.analytics.dos_vis_common.generate_reports.generate_report
t2 = b.tm.analytics.dos_vis_common.generate_reports.generate_report
assert t1 is t2
def test_collection(self, fakeicontrolsession):
b = ManagementRoot('192.168.1.1', 'admin', 'admin')
t = b.tm.analytics.dos_vis_common.generate_reports
test_meta = t._meta_data['attribute_registry']
test_meta2 = t._meta_data['allowed_lazy_attributes']
kind = 'tm:analytics:dos-vis-common:generate-report:avrgeneratereporttaskitemstate'
assert kind in list(iterkeys(test_meta))
assert Generate_Report in test_meta2
assert t._meta_data['object_has_stats'] is False
class TestReportResults(object):
def test_create_raises(self, FakeReportResults):
with pytest.raises(UnsupportedOperation):
FakeReportResults.create()
def test_modify_raises(self, FakeReportResults):
with pytest.raises(UnsupportedOperation):
FakeReportResults.modify()
def test_collection(self, fakeicontrolsession):
b = ManagementRoot('192.168.1.1', 'admin', 'admin')
t = b.tm.analytics.dos_vis_common.report_results_s
test_meta = t._meta_data['attribute_registry']
test_meta2 = t._meta_data['allowed_lazy_attributes']
kind = 'tm:analytics:dos-vis-common:report-results:avrreportresultitemstate'
assert kind in list(iterkeys(test_meta))
assert Report_Results in test_meta2
assert t._meta_data['object_has_stats'] is False
| 37.824742 | 91 | 0.738348 | 458 | 3,669 | 5.722707 | 0.303493 | 0.037772 | 0.048073 | 0.058375 | 0.508966 | 0.426555 | 0.423502 | 0.413583 | 0.317818 | 0.287295 | 0 | 0.020847 | 0.176342 | 3,669 | 96 | 92 | 38.21875 | 0.846459 | 0.15045 | 0 | 0.34375 | 0 | 0 | 0.123146 | 0.060284 | 0 | 0 | 0 | 0 | 0.15625 | 1 | 0.15625 | false | 0 | 0.140625 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d39c8a12749bed6d31aa0a5acbe582b417d5d34f | 2,822 | py | Python | clustering_methods/kmeans.py | AlgoLab/celluloid | 7d4e9d20563a1c140879433b371fd10a1708b362 | [
"MIT"
] | null | null | null | clustering_methods/kmeans.py | AlgoLab/celluloid | 7d4e9d20563a1c140879433b371fd10a1708b362 | [
"MIT"
] | null | null | null | clustering_methods/kmeans.py | AlgoLab/celluloid | 7d4e9d20563a1c140879433b371fd10a1708b362 | [
"MIT"
] | 1 | 2019-11-09T02:50:03.000Z | 2019-11-09T02:50:03.000Z | import numpy as np
from sklearn.cluster import KMeans
import argparse, os, sys, errno
parser = argparse.ArgumentParser(description='K-means clustering')
parser.add_argument('-f', '--file', type=str, required=True,
help='SCS matrix')
parser.add_argument('-k', type=int, required=True,
help='K value of k-means')
parser.add_argument('-c', '--cluster', required=True,
choices=['cells', 'mutations', 'both'],
help='Cluster either cells or mutations')
parser.add_argument('-o', '--outdir', type=str, required=True,
help='output path')
args = parser.parse_args()
scs_matrix_input = np.loadtxt(args.file, dtype='d', delimiter=' ')
# create output directory if it doesn't already exists
try:
os.makedirs(args.outdir)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(args.outdir):
pass
else:
raise
def cluster_and_output(k, matrix, clust_type, inputpath, outdir):
kmeans = KMeans(n_clusters=k)
kmeans.fit(matrix)
from collections import defaultdict
cluster_groups = defaultdict(list)
for j in range(matrix.shape[0]):
cluster_groups[kmeans.labels_[j]].append(j)
tot_rows = 0
for cluster in cluster_groups:
tot_rows += len(cluster_groups[cluster])
filename = os.path.splitext(os.path.basename(inputpath))[0]
outfile = os.path.join(outdir, filename)
centroids = kmeans.cluster_centers_
out_matrix = list()
for c in centroids:
x = list(map(int, list(map(round, c))))
out_matrix.append(x)
out_matrix = np.transpose(np.array(out_matrix))
np.savetxt('{0}_kmeans.matrix'.format(outfile), out_matrix, fmt='%d', delimiter=' ')
with open('{0}_kmeans_clusters.txt'.format(outfile), 'w+') as file_out:
for cluster in sorted(cluster_groups):
file_out.write('{0}\t"{1}"\n'.format(
cluster, ','.join([ str(x+1) for x in cluster_groups[cluster]])
))
with open('{0}_kmeans.mutations'.format(outfile), 'w+') as file_out:
for cluster in sorted(cluster_groups):
file_out.write('{0}\n'.format(
','.join([ str(x+1) for x in cluster_groups[cluster]])
))
print('Done.')
if args.cluster == 'cells':
# print('Clustering cells')
# cluster_and_output(args.k, scs_matrix_input, args.cluster, args.file, args.outdir)
print('not fully implemented yet')
elif args.cluster == 'mutations':
scs_matrix_input = np.transpose(scs_matrix_input)
print('Clustering mutations')
cluster_and_output(args.k, scs_matrix_input, args.cluster, args.file, args.outdir)
elif args.cluster == 'both':
print('not implemented yet')
else:
sys.exit('Something very wrong happened.')
| 32.813953 | 88 | 0.644578 | 376 | 2,822 | 4.707447 | 0.332447 | 0.058757 | 0.039548 | 0.021469 | 0.213559 | 0.187571 | 0.187571 | 0.187571 | 0.187571 | 0.187571 | 0 | 0.004966 | 0.215096 | 2,822 | 85 | 89 | 33.2 | 0.794131 | 0.057052 | 0 | 0.096774 | 0 | 0 | 0.129518 | 0.00866 | 0 | 0 | 0 | 0 | 0 | 1 | 0.016129 | false | 0.016129 | 0.064516 | 0 | 0.080645 | 0.064516 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d39cc52a8b68a5236fbfd51eb2a40568ace59d48 | 9,725 | py | Python | uai_tools/uai_tool.py | soar-zhengjian/uai-sdk | e195bd3fb2b97aca7dac6722d332c25b7070481f | [
"Apache-2.0"
] | 38 | 2017-04-26T04:00:09.000Z | 2022-02-10T02:51:05.000Z | uai_tools/uai_tool.py | soar-zhengjian/uai-sdk | e195bd3fb2b97aca7dac6722d332c25b7070481f | [
"Apache-2.0"
] | 17 | 2017-11-20T20:47:09.000Z | 2022-02-09T23:48:46.000Z | uai_tools/uai_tool.py | soar-zhengjian/uai-sdk | e195bd3fb2b97aca7dac6722d332c25b7070481f | [
"Apache-2.0"
] | 28 | 2017-07-08T05:23:13.000Z | 2020-08-18T03:12:27.000Z | # Copyright 2017 The UAI-SDK Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import argparse
from uai.utils.utils import param_filter
from uai.utils.logger import uai_logger
from uai.operation.tar.caffe_tar_op import UaiServiceCaffeTarOp
from uai.operation.tar.keras_tar_op import UaiServiceKerasTarOp
from uai.operation.tar.mxnet_tar_op import UaiServiceMxnetTarOp
from uai.operation.tar.tf_tar_op import UaiServiceTFTarOp
from uai.operation.pack.caffe_pack_op import UaiServiceCaffePackOp
from uai.operation.pack.keras_pack_op import UaiServiceKerasPackOp
from uai.operation.pack.mxnet_pack_op import UaiServiceMxnetPackOp
from uai.operation.pack.tf_pack_op import UaiServiceTFPackOp
from uai.operation.packdocker.caffe_packdocker_op import UaiServiceCaffeDockerPackOp
from uai.operation.packdocker.keras_packdocker_op import UaiServiceKerasDockerPackOp
from uai.operation.packdocker.mxnet_packdocker_op import UaiServiceMxnetDockerPackOp
from uai.operation.packdocker.tf_packdocker_op import UaiServiceTFDockerPackOp
from uai.operation.packdocker.self_define_packdokcer_op import UaiServiceSelfDockerPackOp
from uai.operation.create_uaiservice.create_uaiservice import UaiServiceCreateOp
from uai.operation.delete_uaiservice.delete_uaiservice import UaiServiceDeleteOp
from uai.operation.deploy_uaiservice.deploy_uaiservice import UaiServiceDeployByUfileOp
from uai.operation.deploy_uaiservice_docker.deploy_uaiservice_docker import UaiServiceDeployByDockerOp
from uai.operation.list_uaiservice.list_uaiservice import UaiServiceListServiceOp
from uai.operation.list_uaiversion.list_uaiversion import UaiServiceListSrvVersionOp
from uai.operation.modify_service_name.modify_service_name import UaiServiceModifyServiceNameOp
from uai.operation.modify_version_memo.modify_version_memo import UaiServiceModifySrvVersionMemoOp
from uai.operation.modify_version_weight.modify_version_weight import UaiServiceModifySrvVersionWeightOp
from uai.operation.modify_node_count.modify_node_count import UaiServiceModifySrvVersionNodeCountOp
from uai.operation.start_uaiservice.start_uaiservice import UaiServiceStartServiceOp
from uai.operation.stop_uaiservice.stop_uaiservice import UaiServiceStopServiceOp
from uai.operation.get_real_time_metric.get_real_time_metric import UaiServiceGetUAISrvRealTimeMetricOp
def parse_args(subparser):
create_parser = subparser.add_parser('create', help='Create UAI Service')
delete_parser = subparser.add_parser('delete', help='Delete UAI Service')
deploy_parser = subparser.add_parser('deploy', help='Deploy UAI Service by Ufile')
deploy_docker_parser = subparser.add_parser('deploydocker', help='Deploy UAI Service by Docker')
list_service_parser = subparser.add_parser('listservice', help='List UAI Service')
list_verison_parser = subparser.add_parser('listversion', help='List UAI Service Version')
modify_name_parser = subparser.add_parser('modifyname', help='Modify UAI Service Name')
modify_memo_parser = subparser.add_parser('modifymemo', help='Modify UAI Service Memo')
modify_weight_parser = subparser.add_parser('modifyweight', help='Modify UAI Service Version Weight')
modify_node_count_parser = subparser.add_parser('modifynodecount', help='Set UAI Service Node Count')
start_parser = subparser.add_parser('start', help='Start UAI Service')
stop_parser = subparser.add_parser('stop', help='Stop UAI Service')
metric_parser = subparser.add_parser('metric', help='Get real-time metric of UAI Service')
tar_parser = subparser.add_parser('tar', help='Tar User Files for UAI Service')
ai_tar_parser = tar_parser.add_subparsers(dest='ai_arch_type', help='ai_arch_type')
caffe_tar_parser = ai_tar_parser.add_parser('caffe', help='Tar Caffe User Files for UAI Service')
keras_tar_parser = ai_tar_parser.add_parser('keras', help='Tar Keras User Files for UAI Service')
mxnet_tar_parser = ai_tar_parser.add_parser('mxnet', help='Tar Mxnet User Files for UAI Service')
tf_tar_parser = ai_tar_parser.add_parser('tf', help='Tar Tensorflow User Files for UAI Service')
pack_parser = subparser.add_parser('pack', help='Pack User Files for UAI Service')
ai_pack_parser = pack_parser.add_subparsers(dest='ai_arch_type', help='ai_arch_type')
caffe_pack_parser = ai_pack_parser.add_parser('caffe', help='Pack Caffe User Files for UAI Service')
keras_pack_parser = ai_pack_parser.add_parser('keras', help='Pack Keras User Files for UAI Service')
mxnet_pack_parser = ai_pack_parser.add_parser('mxnet', help='Pack MXNet User Files for UAI Service')
tf_pack_parser = ai_pack_parser.add_parser('tf', help='Pack TF User Files for UAI Service')
packdocker_parser = subparser.add_parser('packdocker', help='Packdocker User Files for UAI Service')
ai_packdocker_parser = packdocker_parser.add_subparsers(dest='ai_arch_type', help='ai_arch_type')
caffe_packdocker_parser = ai_packdocker_parser.add_parser('caffe', help='Pack Docker of Caffe for UAI Service')
keras_packdocker_parser = ai_packdocker_parser.add_parser('keras', help='Pack Docker of Keras for UAI Service')
mxnet_packdocker_parser = ai_packdocker_parser.add_parser('mxnet', help='Pack Docker of MXNet for UAI Service')
tf_packdocker_parser = ai_packdocker_parser.add_parser('tf', help='Pack Docker of TF for UAI Service')
self_packdocker_parser = ai_packdocker_parser.add_parser('self', help='Pack Self-Defined Docker for UAI Service')
create_op = UaiServiceCreateOp(create_parser)
delete_op = UaiServiceDeleteOp(delete_parser)
deploy_op = UaiServiceDeployByUfileOp(deploy_parser)
docker_deploy_op = UaiServiceDeployByDockerOp(deploy_docker_parser)
list_servie_op = UaiServiceListServiceOp(list_service_parser)
list_version_op = UaiServiceListSrvVersionOp(list_verison_parser)
modify_name_op = UaiServiceModifyServiceNameOp(modify_name_parser)
modify_memo_op = UaiServiceModifySrvVersionMemoOp(modify_memo_parser)
modify_weight_op = UaiServiceModifySrvVersionWeightOp(modify_weight_parser)
modify_node_count_op = UaiServiceModifySrvVersionNodeCountOp(modify_node_count_parser)
start_op = UaiServiceStartServiceOp(start_parser)
stop_op = UaiServiceStopServiceOp(stop_parser)
metric_op = UaiServiceGetUAISrvRealTimeMetricOp(metric_parser)
caffe_tar_op = UaiServiceCaffeTarOp(caffe_tar_parser)
keras_tar_op = UaiServiceKerasTarOp(keras_tar_parser)
mxnet_tar_op = UaiServiceMxnetTarOp(mxnet_tar_parser)
tf_tar_op = UaiServiceTFTarOp(tf_tar_parser)
caffe_pack_op = UaiServiceCaffePackOp(caffe_pack_parser)
keras_pack_op = UaiServiceKerasPackOp(keras_pack_parser)
mxnet_pack_op = UaiServiceMxnetPackOp(mxnet_pack_parser)
tf_pack_op = UaiServiceTFPackOp(tf_pack_parser)
caffe_packdocker_op = UaiServiceCaffeDockerPackOp(caffe_packdocker_parser)
keras_packdocker_op = UaiServiceKerasDockerPackOp(keras_packdocker_parser)
mxnet_packdocker_op = UaiServiceMxnetDockerPackOp(mxnet_packdocker_parser)
tf_packdocker_op = UaiServiceTFDockerPackOp(tf_packdocker_parser)
self_packdocker_op = UaiServiceSelfDockerPackOp(self_packdocker_parser)
tar_op_dic = {
"caffe": caffe_tar_op,
"keras": keras_tar_op,
"mxnet": mxnet_tar_op,
"tf": tf_tar_op,
}
pack_op_dic = {
"caffe": caffe_pack_op,
"keras": keras_pack_op,
"mxnet": mxnet_pack_op,
"tf": tf_pack_op,
}
docker_pack_op_dic = {
"caffe": caffe_packdocker_op,
"keras": keras_packdocker_op,
"mxnet": mxnet_packdocker_op,
"tf": tf_packdocker_op,
"self": self_packdocker_op,
}
cmd_op_dic = {
"create": create_op,
"delete": delete_op,
"deploy": deploy_op,
"deploydocker": docker_deploy_op,
"listservice": list_servie_op,
"listversion": list_version_op,
"modifyname": modify_name_op,
"modifymemo": modify_memo_op,
"modifyweight": modify_weight_op,
"modifynodecount": modify_node_count_op,
"start": start_op,
"stop": stop_op,
"metric": metric_op,
"tar": tar_op_dic,
"pack": pack_op_dic,
"packdocker": docker_pack_op_dic,
}
return cmd_op_dic
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='UAI Inference Platform Commander',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
subparser = parser.add_subparsers(dest='commands', help='commands')
cmd_op_dic = parse_args(subparser)
cmd_args = param_filter(vars(parser.parse_args()))
uai_logger.info("cmd_args: {0}".format(cmd_args))
if cmd_args['commands'] == 'packdocker':
cmd_op_dic.get('packdocker').get(cmd_args['ai_arch_type']).cmd_run(cmd_args)
elif cmd_args['commands'] == 'pack':
cmd_op_dic.get('pack').get(cmd_args['ai_arch_type']).cmd_run(cmd_args)
elif cmd_args['commands'] == 'tar':
cmd_op_dic.get('tar').get(cmd_args['ai_arch_type']).cmd_run(cmd_args)
else:
cmd_op_dic.get(cmd_args['commands']).cmd_run(cmd_args)
| 54.027778 | 117 | 0.778303 | 1,241 | 9,725 | 5.771958 | 0.144239 | 0.036437 | 0.058076 | 0.053609 | 0.193355 | 0.154125 | 0.130392 | 0.040905 | 0.040905 | 0.040905 | 0 | 0.001067 | 0.132648 | 9,725 | 179 | 118 | 54.329609 | 0.848133 | 0.067558 | 0 | 0 | 0 | 0 | 0.164807 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.006993 | false | 0 | 0.202797 | 0 | 0.216783 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d3a31c5c3c5461ce7d67c2eebcd521d785635f6d | 815 | py | Python | QUIS SISTER 1/Muhammad Syiarul Amrullah/Semaphore.py | muhammadarl/Python-Pararel_SISTER | c38c694b059c545f66c7f4dfa50746bffc6e4e4f | [
"MIT"
] | null | null | null | QUIS SISTER 1/Muhammad Syiarul Amrullah/Semaphore.py | muhammadarl/Python-Pararel_SISTER | c38c694b059c545f66c7f4dfa50746bffc6e4e4f | [
"MIT"
] | 2 | 2022-03-23T13:29:15.000Z | 2022-03-23T15:55:47.000Z | QUIS SISTER 1/Muhammad Syiarul Amrullah/Semaphore.py | muhammadarl/Python-Pararel_SISTER | c38c694b059c545f66c7f4dfa50746bffc6e4e4f | [
"MIT"
] | 2 | 2022-03-23T03:36:52.000Z | 2022-03-23T03:41:55.000Z | import logging
import threading
import time
import random
LOG_FORMAT = '%(asctime)s %(threadName)-17s %(levelname)-8s %(message)s'
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT)
semaphore = threading.Semaphore(0)
item = 0
def pelanggan():
logging.info('Pelanggan meminta antrian')
semaphore.acquire()
logging.info('Pelanggan menerima: antrian no {}'.format(item))
def antrian():
global item
time.sleep(3)
item = random.randint(0, 1000)
logging.info('antrian generate: no antrian {}'.format(item))
semaphore.release()
def main():
for i in range(3):
t1 = threading.Thread(target=pelanggan)
t2 = threading.Thread(target=antrian)
t1.start()
t2.start()
t1.join()
t2.join()
if __name__ == "__main__":
main()
| 19.404762 | 72 | 0.655215 | 100 | 815 | 5.24 | 0.47 | 0.083969 | 0.076336 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.027907 | 0.208589 | 815 | 41 | 73 | 19.878049 | 0.784496 | 0 | 0 | 0 | 0 | 0 | 0.188957 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107143 | false | 0 | 0.142857 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d3a46ac9b50468413362c945c49af04d8ae59b76 | 10,094 | py | Python | customtkinter/customtkinter_entry.py | pythub-project/CustomTkinter | bb1fe2562517928dde5a98e0dce650b9a3ef2380 | [
"CC0-1.0"
] | null | null | null | customtkinter/customtkinter_entry.py | pythub-project/CustomTkinter | bb1fe2562517928dde5a98e0dce650b9a3ef2380 | [
"CC0-1.0"
] | null | null | null | customtkinter/customtkinter_entry.py | pythub-project/CustomTkinter | bb1fe2562517928dde5a98e0dce650b9a3ef2380 | [
"CC0-1.0"
] | null | null | null | import tkinter
import sys
from .customtkinter_tk import CTk
from .customtkinter_frame import CTkFrame
from .appearance_mode_tracker import AppearanceModeTracker
from .customtkinter_theme_manager import CTkThemeManager
from .customtkinter_canvas import CTkCanvas
from .customtkinter_settings import CTkSettings
from .customtkinter_draw_engine import DrawEngine
class CTkEntry(tkinter.Frame):
def __init__(self, *args,
master=None,
bg_color=None,
fg_color="default_theme",
text_color="default_theme",
placeholder_text_color="default_theme",
text_font="default_theme",
placeholder_text=None,
corner_radius=8,
border_width=0,
border_color="default_theme",
width=120,
height=30,
**kwargs):
if master is None:
super().__init__(*args)
else:
super().__init__(*args, master=master)
# overwrite configure methods of master when master is tkinter widget, so that bg changes get applied on child CTk widget too
if isinstance(self.master, (tkinter.Tk, tkinter.Frame)) and not isinstance(self.master, (CTk, CTkFrame)):
master_old_configure = self.master.config
def new_configure(*args, **kwargs):
if "bg" in kwargs:
self.configure(bg_color=kwargs["bg"])
elif "background" in kwargs:
self.configure(bg_color=kwargs["background"])
# args[0] is dict when attribute gets changed by widget[<attribut>] syntax
elif len(args) > 0 and type(args[0]) == dict:
if "bg" in args[0]:
self.configure(bg_color=args[0]["bg"])
elif "background" in args[0]:
self.configure(bg_color=args[0]["background"])
master_old_configure(*args, **kwargs)
self.master.config = new_configure
self.master.configure = new_configure
AppearanceModeTracker.add(self.change_appearance_mode, self)
self.appearance_mode = AppearanceModeTracker.get_mode() # 0: "Light" 1: "Dark"
self.configure_basic_grid()
self.bg_color = self.detect_color_of_master() if bg_color is None else bg_color
self.fg_color = CTkThemeManager.theme["color"]["entry"] if fg_color == "default_theme" else fg_color
self.text_color = CTkThemeManager.theme["color"]["text"] if text_color == "default_theme" else text_color
self.placeholder_text_color = CTkThemeManager.theme["color"]["entry_placeholder_text"] if placeholder_text_color == "default_theme" else placeholder_text_color
self.text_font = (CTkThemeManager.theme["text"]["font"], CTkThemeManager.theme["text"]["size"]) if text_font == "default_theme" else text_font
self.border_color = CTkThemeManager.theme["color"]["entry_border"] if border_color == "default_theme" else border_color
self.placeholder_text = placeholder_text
self.placeholder_text_active = False
self.pre_placeholder_arguments = {} # some set arguments of the entry will be changed for placeholder and then set back
self.width = width
self.height = height
self.corner_radius = CTkThemeManager.theme["shape"]["button_corner_radius"] if corner_radius == "default_theme" else corner_radius
self.border_width = CTkThemeManager.theme["shape"]["entry_border_width"] if border_width == "default_theme" else border_width
if self.corner_radius*2 > self.height:
self.corner_radius = self.height/2
elif self.corner_radius*2 > self.width:
self.corner_radius = self.width/2
super().configure(width=self.width, height=self.height)
self.canvas = CTkCanvas(master=self,
highlightthickness=0,
width=self.width,
height=self.height)
self.canvas.grid(column=0, row=0, sticky="we")
self.entry = tkinter.Entry(master=self,
bd=0,
width=1,
highlightthickness=0,
font=self.text_font,
**kwargs)
self.entry.grid(column=0, row=0, sticky="we", padx=self.corner_radius if self.corner_radius >= 6 else 6)
self.draw_engine = DrawEngine(self.canvas, CTkSettings.preferred_drawing_method)
super().bind('<Configure>', self.update_dimensions)
self.entry.bind('<FocusOut>', self.set_placeholder)
self.entry.bind('<FocusIn>', self.clear_placeholder)
self.draw()
self.set_placeholder()
def destroy(self):
AppearanceModeTracker.remove(self.change_appearance_mode)
super().destroy()
def configure_basic_grid(self):
self.grid_rowconfigure(0, weight=1)
self.grid_columnconfigure(0, weight=1)
def detect_color_of_master(self):
if isinstance(self.master, CTkFrame):
return self.master.fg_color
else:
try:
return self.master.cget("bg")
except:
pass
#print(self.master["style"])
#return self.master.cget("background")
def update_dimensions(self, event):
# only redraw if dimensions changed (for performance)
if self.width != event.width or self.height != event.height:
# print(event.x, event.width, self.width)
self.width = event.width
self.height = event.height
self.draw()
def set_placeholder(self, event=None):
if self.placeholder_text is not None:
if not self.placeholder_text_active and self.entry.get() == "":
self.placeholder_text_active = True
self.pre_placeholder_arguments = {"show": self.entry.cget("show")}
self.entry.config(fg=CTkThemeManager.single_color(self.placeholder_text_color, self.appearance_mode), show="")
self.entry.delete(0, tkinter.END)
self.entry.insert(0, self.placeholder_text)
def clear_placeholder(self, event=None):
if self.placeholder_text_active:
self.placeholder_text_active = False
self.entry.config(fg=CTkThemeManager.single_color(self.text_color, self.appearance_mode))
self.entry.delete(0, tkinter.END)
for argument, value in self.pre_placeholder_arguments.items():
self.entry[argument] = value
def draw(self):
self.canvas.configure(bg=CTkThemeManager.single_color(self.bg_color, self.appearance_mode))
self.entry.configure(bg=CTkThemeManager.single_color(self.fg_color, self.appearance_mode),
highlightcolor=CTkThemeManager.single_color(self.fg_color, self.appearance_mode),
fg=CTkThemeManager.single_color(self.text_color, self.appearance_mode),
insertbackground=CTkThemeManager.single_color(self.text_color, self.appearance_mode))
requires_recoloring = self.draw_engine.draw_rounded_rect_with_border(self.width, self.height, self.corner_radius, self.border_width)
self.canvas.itemconfig("inner_parts",
fill=CTkThemeManager.single_color(self.fg_color, self.appearance_mode),
outline=CTkThemeManager.single_color(self.fg_color, self.appearance_mode))
self.canvas.itemconfig("border_parts",
fill=CTkThemeManager.single_color(self.border_color, self.appearance_mode),
outline=CTkThemeManager.single_color(self.border_color, self.appearance_mode))
def bind(self, *args, **kwargs):
self.entry.bind(*args, **kwargs)
def config(self, *args, **kwargs):
self.configure(*args, **kwargs)
def configure(self, *args, **kwargs):
require_redraw = False # some attribute changes require a call of self.draw() at the end
if "bg_color" in kwargs:
self.bg_color = kwargs["bg_color"]
del kwargs["bg_color"]
require_redraw = True
if "fg_color" in kwargs:
self.fg_color = kwargs["fg_color"]
del kwargs["fg_color"]
require_redraw = True
if "text_color" in kwargs:
self.text_color = kwargs["text_color"]
del kwargs["text_color"]
require_redraw = True
if "corner_radius" in kwargs:
self.corner_radius = kwargs["corner_radius"]
if self.corner_radius * 2 > self.height:
self.corner_radius = self.height / 2
elif self.corner_radius * 2 > self.width:
self.corner_radius = self.width / 2
self.entry.grid(column=0, row=0, sticky="we", padx=self.corner_radius if self.corner_radius >= 6 else 6)
del kwargs["corner_radius"]
require_redraw = True
self.entry.configure(*args, **kwargs)
if require_redraw is True:
self.draw()
def delete(self, *args, **kwargs):
self.entry.delete(*args, **kwargs)
self.set_placeholder()
return
def insert(self, *args, **kwargs):
self.clear_placeholder()
return self.entry.insert(*args, **kwargs)
def get(self):
if self.placeholder_text_active:
return ""
else:
return self.entry.get()
def change_appearance_mode(self, mode_string):
if mode_string.lower() == "dark":
self.appearance_mode = 1
elif mode_string.lower() == "light":
self.appearance_mode = 0
if isinstance(self.master, (CTkFrame, CTk)):
self.bg_color = self.master.fg_color
else:
self.bg_color = self.master.cget("bg")
self.draw()
| 42.953191 | 167 | 0.614821 | 1,151 | 10,094 | 5.191138 | 0.149435 | 0.045188 | 0.040167 | 0.05523 | 0.373389 | 0.259414 | 0.2159 | 0.200669 | 0.161841 | 0.082678 | 0 | 0.006521 | 0.285912 | 10,094 | 234 | 168 | 43.136752 | 0.82242 | 0.051417 | 0 | 0.176796 | 0 | 0 | 0.055201 | 0.0023 | 0 | 0 | 0 | 0 | 0 | 1 | 0.088398 | false | 0.005525 | 0.049724 | 0 | 0.176796 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d3a6c62c5a61e17b49724465a6921872c6a131d6 | 694 | py | Python | dorsual/urls.py | thodel/heraldik | 5be41c9a63f971723f8601757d554a4759eba48e | [
"MIT"
] | null | null | null | dorsual/urls.py | thodel/heraldik | 5be41c9a63f971723f8601757d554a4759eba48e | [
"MIT"
] | 1 | 2020-06-16T10:12:48.000Z | 2020-06-17T11:58:14.000Z | dorsual/urls.py | thodel/heraldik | 5be41c9a63f971723f8601757d554a4759eba48e | [
"MIT"
] | 1 | 2020-06-16T09:50:58.000Z | 2020-06-16T09:50:58.000Z | from django.urls import path
from . import views
urlpatterns = [
path("", views.IndexView.as_view(), name="index"),
path('loginTranskribus/', views.loginTranskribus, name='loginTranskribus'),
path('getCollectionList/', views.getCollectionList, name='getCollectionList'),
path('getDocumentList/', views.getDocumentList, name='getDocumentList'),
path('getDocument/', views.getDocument, name='getDocument'),
path('submitJudgement/', views.submitJudgement, name='submitJudgement'),
path('checkFilter/', views.checkFilter, name='checkFilter'),
path('changeDorsualType/', views.changeDorsualType, name='changeDorsualType'),
]
| 46.266667 | 86 | 0.693084 | 58 | 694 | 8.275862 | 0.327586 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.158501 | 694 | 14 | 87 | 49.571429 | 0.821918 | 0 | 0 | 0 | 0 | 0 | 0.311239 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d3aad7914ff38df51594d2a2859994f30a2abace | 15,233 | py | Python | attendance_gui.py | romiembaye/wireless-student-attendance-system | 6609ee61e8b5d3be9938c94dbd2159e87d4a3198 | [
"MIT"
] | null | null | null | attendance_gui.py | romiembaye/wireless-student-attendance-system | 6609ee61e8b5d3be9938c94dbd2159e87d4a3198 | [
"MIT"
] | null | null | null | attendance_gui.py | romiembaye/wireless-student-attendance-system | 6609ee61e8b5d3be9938c94dbd2159e87d4a3198 | [
"MIT"
] | null | null | null | import os
import csv
import smtplib
import datetime
import openpyxl
import threading
from tkinter import *
from email import encoders
from tkinter import messagebox
import paho.mqtt.client as mqtt
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
class AttendanceSystemGUI:
clientName = "Attendance-Station"
mosquitoBrokerIP = "127.0.0.1"
mosquitoBrokerPort = 1883
mainWindowTitle = "Wireless Student Attendance System"
courseCode = ""
attendanceFolder = ""
attendanceDate = datetime.datetime.now()
listOfAttendees = {}
listOfNamesAndID = {}
def __init__(self):
"""DOCUMENTATION GOES HERE"""
self.mainWindow = Tk()
self.mosquitoClient = mqtt.Client(self.clientName)
frmTittle = Frame()
frmTittle.pack(side=TOP)
picTittle = PhotoImage(file="logo.png")
lblTittle = Label(frmTittle, image=picTittle)
lblTittle.image = picTittle
lblTittle.pack(side=TOP)
frmCourse = Frame(height=10)
frmCourse.pack(side=TOP)
Label(frmCourse, text="Course Code").pack(side=LEFT)
self.eCourse = Entry(frmCourse, bd=6, relief=RIDGE,
width=40, justify=CENTER)
self.eCourse.bind("<Return>", self.checkAttendanceFile)
self.eCourse.pack(side=LEFT, padx=10, pady=1)
self.eCourse.focus()
frmStatus = Frame(height=15)
frmStatus.pack(side=TOP)
picStart = PhotoImage(file="start.png")
self.btnStart = Button(frmStatus, relief=FLAT, image=picStart,
state=DISABLED, command=lambda:
[threading.Thread(target=self.startAttendance, daemon=True).start(), self.updateButtons(1)])
self.btnStart.config
self.btnStart.image = picStart
self.btnStart.pack(side=LEFT)
picStop = PhotoImage(file="stop.png")
self.btnStop = Button(frmStatus, relief=FLAT, image=picStop,
state=DISABLED, command=self.stopAttendance)
self.btnStop.image = picStop
self.btnStop.pack(side=LEFT)
picExport = PhotoImage(file="export.png")
self.btnExport = Button(frmStatus, relief=FLAT, image=picExport,
state=DISABLED, command=self.updateAttendance)
self.btnExport.image = picExport
self.btnExport.pack(side=LEFT)
picEmail = PhotoImage(file="email.png")
self.btnEmail = Button(frmStatus, relief=FLAT, image=picEmail,
state=DISABLED, command=self.emailAttendance)
self.btnEmail.image = picEmail
self.btnEmail.pack(side=LEFT)
self.mainWindow.resizable(False, False)
self.mainWindow.title(self.mainWindowTitle)
self.mainWindow.geometry(
'+' + str(int(self.mainWindow.winfo_screenwidth() / 2) -
int(self.mainWindow.winfo_screenwidth() / 6)) +
'+' + str(int(self.mainWindow.winfo_screenheight() / 2) -
int(self.mainWindow.winfo_screenheight() / 6)))
self.mainWindow.protocol("WM_DELETE_WINDOW", self.exitProgram)
self.mainWindow.mainloop()
def updateButtons(self, whichButton):
if whichButton == 1:
self.btnStart.config(state=DISABLED)
self.btnStop.config(state=ACTIVE)
self.btnExport.config(state=DISABLED)
self.btnEmail.config(state=DISABLED)
print("Started Taking Attendance")
elif whichButton == 2:
self.btnStart.config(state=ACTIVE)
self.btnStop.config(state=DISABLED)
self.btnExport.config(state=ACTIVE)
self.btnEmail.config(state=DISABLED)
elif whichButton == 3:
self.btnEmail.config(state=ACTIVE)
elif whichButton == 4:
pass
elif whichButton == 5:
self.btnStart.config(state=ACTIVE)
def startAttendance(self):
"""DOCUMENTATION GOES HERE"""
"""
This function sets up and starts the Mosquito MQTT client connection
"""
self.mosquitoClient.connect(self.mosquitoBrokerIP, port=self.mosquitoBrokerPort)
self.mosquitoClient.on_message = self.readTagID
self.mosquitoClient.subscribe("ATTENDANCE")
self.mosquitoClient.publish("STATION", "1")
self.mosquitoClient.loop_forever()
def stopAttendance(self):
"""DOCUMENTATION GOES HERE"""
"""
This function stops the Mosquito MQTT client connection
"""
self.updateButtons(2)
self.mosquitoClient.publish("STATION", "0")
self.mosquitoClient.unsubscribe("ATTENDANCE")
self.mosquitoClient.loop_stop()
self.mosquitoClient.disconnect()
print("Stooped Taking Attendance")
def updateAttendance(self):
"""DOCUMENTATION GOES HERE"""
"""
This function either creates or updates the excel file with attendance information
"""
self.updateButtons(3)
if not os.path.isdir(self.attendanceFolder[0:6]):
os.mkdir(self.attendanceFolder[0:6])
with open(self.attendanceFolder + self.courseCode + '_Student_Names.csv', 'w', newline='') as csvfile:
fieldnames = ['Id', 'Name']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for items in self.listOfNamesAndID.keys():
writer.writerow({'Id': items, 'Name': self.listOfNamesAndID[items]})
print("Name and ID file Created")
if not os.path.isfile(self.attendanceFolder + self.courseCode + '_Attendance.xlsx'):
attendanceFileWorkBook = openpyxl.Workbook(self.attendanceFolder +
self.courseCode + '_Attendance.xlsx')
attendanceFileSheet = attendanceFileWorkBook.create_sheet('Attendance')
attendanceFileSheet.append(["ID", "Name", str(self.attendanceDate.date())])
for records in self.listOfAttendees.keys():
attendanceFileSheet.append([records, self.listOfAttendees[records], "P"])
else:
attendanceFileWorkBook = openpyxl.load_workbook(self.attendanceFolder +
self.courseCode + '_Attendance.xlsx')
attendanceFileSheet = attendanceFileWorkBook.active
newColumn = attendanceFileSheet.max_column+1
newRow = attendanceFileSheet.max_row+1
attendanceFileSheet.cell(1, newColumn, str(self.attendanceDate.date()))
for col in attendanceFileSheet.iter_cols(1, 1, 2):
for records in self.listOfAttendees.keys():
isNewStudent = True
for currentCell in col:
if currentCell.value == records:
isNewStudent = False
attendanceFileSheet.cell(currentCell.row, newColumn, "P")
if isNewStudent:
attendanceFileSheet.cell(newRow, 1, records)
attendanceFileSheet.cell(newRow, 2, self.listOfAttendees[records])
attendanceFileSheet.cell(newRow, newColumn, "P")
attendanceFileWorkBook.save(self.attendanceFolder + self.courseCode + '_Attendance.xlsx')
print("Attendance file Created")
messagebox.showinfo("Export File", "The attendance file has been created/updated successfully!")
def emailAttendance(self):
"""DOCUMENTATION GOES HERE"""
def sendEmail(emailFrom, emailPassword, emailTo, emailAttachment, emailSubject = "", emailBody = ""):
"""DOCUMENTATION GOES HERE"""
emailWindow.update()
try:
msg = MIMEMultipart()
msg['From'] = emailFrom
msg['To'] = emailTo
msg['Subject'] = emailSubject
msg.attach(MIMEText(emailBody, 'plain'))
attachment = open(emailAttachment, "rb")
part = MIMEBase('application', 'octet-stream')
part.set_payload(attachment.read())
encoders.encode_base64(part)
part.add_header('Content-Disposition', "attachment; filename= %s" % emailAttachment.split("/")[1])
msg.attach(part)
server = smtplib.SMTP('outlook.office365.com', 587)
server.starttls()
server.login(emailFrom, emailPassword)
text = msg.as_string()
server.sendmail(emailFrom, emailTo, text)
server.quit()
except:
print("Not Emailed!!!!!")
messagebox.showerror("Error", "Email could not be sent! Please try again.")
return False
else:
print("Emailed")
messagebox.showinfo("Success", "Email has been sent to " + eTo.get())
emailWindow.destroy()
return True
self.updateButtons(4)
emailWindow = Toplevel()
emailWindow.resizable(False, False)
emailWindow.title("Email Attendance")
emailWindow.grab_set()
emailWindow.geometry(
'+' + str(int(emailWindow.winfo_screenwidth() / 2) -
int(emailWindow.winfo_screenwidth() / 9)) +
'+' + str(int(emailWindow.winfo_screenheight() / 2) -
int(emailWindow.winfo_screenheight() / 14)))
Label(emailWindow, text="Email Information", font=("Courier", 16), fg="red", bg="black").pack(fill=BOTH)
frmFrom = Frame(emailWindow)
frmFrom.pack(side=TOP)
Label(frmFrom, text=" From").pack(side=LEFT)
eFrom = Entry(frmFrom, bd=6, relief=RIDGE, width=40)
eFrom.pack(side=LEFT, padx=10, pady=1)
frmPassword = Frame(emailWindow)
frmPassword.pack(side=TOP)
Label(frmPassword, text="Password").pack(side=LEFT)
ePassword = Entry(frmPassword, bd=6, relief=RIDGE, show="*", width=40)
ePassword.pack(side=LEFT, padx=10, pady=1)
frmTo = Frame(emailWindow)
frmTo.pack(side=TOP)
Label(frmTo, text=" To").pack(side=LEFT)
eTo = Entry(frmTo, bd=6, relief=RIDGE, width=40)
eTo.pack(side=LEFT, padx=10, pady=1)
frmAttachment = Frame(emailWindow)
frmAttachment.pack(side=TOP)
Label(frmAttachment, text="Attachment").pack(side=LEFT)
Label(frmAttachment, text=self.courseCode + "_Attendance.xlsx", bd=6,
bg="#3fccb5", relief=RIDGE, width=32).pack(side=LEFT, padx=10, pady=1)
frmSubject = Frame(emailWindow)
frmSubject.pack(side=TOP)
Label(frmSubject, text=" Subject").pack(side=LEFT)
eSubject = Entry(frmSubject, bd=6, relief=RIDGE, width=40)
eSubject.pack(side=LEFT, padx=10, pady=1)
picSendEmail = PhotoImage(file="email.png")
btnSendEmail = Button(emailWindow, image=picSendEmail,
command=lambda: sendEmail(eFrom.get(), ePassword.get(),
eTo.get(), self.attendanceFolder + self.courseCode +
"_Attendance.xlsx", eSubject.get()))
btnSendEmail.image = picSendEmail
btnSendEmail.pack()
emailWindow.update()
self.mainWindow.update()
def checkAttendanceFile(self, event=None):
"""DOCUMENTATION GOES HERE"""
self.courseCode = str(self.eCourse.get()).upper()
self.eCourse.config(text=self.courseCode)
if not (len(self.courseCode) == 9) or not (self.courseCode[0:2].isalpha() and
self.courseCode[3:5].isdigit() and
self.courseCode[6:].isalpha()):
self.eCourse.config(bg="red", fg="white")
self.courseCode = ""
else:
self.eCourse.config(bg="green", fg="white")
self.attendanceFolder = self.courseCode[0:6] + '/'
# self.updateAttendance()
if not os.path.isdir(self.attendanceFolder[0:6]):
os.mkdir(self.attendanceFolder[0:6])
if os.path.isfile(self.attendanceFolder + self.courseCode + '_Student_Names.csv'):
with open(self.attendanceFolder + self.courseCode + '_Student_Names.csv', newline='') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
self.listOfNamesAndID[row['Id']] = row['Name']
print(self.listOfNamesAndID)
self.updateButtons(5)
def readTagID(self, client, data, uid):
"""DOCUMENTATION GOES HERE"""
def newStudent():
"""DOCUMENTATION GOES HERE"""
def addStudent(event=None):
"""DOCUMENTATION GOES HERE"""
if eName.get():
self.listOfAttendees[tagID] = eName.get()
self.listOfNamesAndID[tagID] = eName.get()
print(self.listOfAttendees)
self.mosquitoClient.publish("STATION", "S")
newStudentWindow.destroy()
newStudentWindow = Toplevel()
newStudentWindow.resizable(False, False)
newStudentWindow.title("New Student")
newStudentWindow.grab_set()
newStudentWindow.geometry(
'+' + str(int(newStudentWindow.winfo_screenwidth() / 2) -
int(newStudentWindow.winfo_screenwidth() / 9)) +
'+' + str(int(newStudentWindow.winfo_screenheight() / 2) -
int(newStudentWindow.winfo_screenheight() / 14)))
Label(newStudentWindow, text="Add New", font=("Courier", 16), fg="red", bg="black").pack(fill=BOTH)
frmName = Frame(newStudentWindow)
frmName.pack(side=TOP)
Label(frmName, text="Student Name").pack(side=LEFT)
eName = Entry(frmName, bd=6, relief=RIDGE, width=40)
eName.bind("<Return>", addStudent)
eName.pack(side=LEFT, padx=10, pady=1)
picAddStudent = PhotoImage(file="add.png")
btnAdd = Button(newStudentWindow, image=picAddStudent, command=addStudent)
btnAdd.image = picAddStudent
btnAdd.pack(fill=BOTH)
eName.focus()
tagID = str(uid.payload.decode("utf-8"))
if tagID == "1":
self.stopAttendance()
elif tagID not in self.listOfAttendees:
if tagID not in self.listOfNamesAndID:
newStudent()
else:
self.listOfAttendees[tagID] = self.listOfNamesAndID[tagID]
print(self.listOfAttendees)
self.mosquitoClient.publish("STATION", "S")
else:
self.mosquitoClient.publish("STATION", "A")
print(self.listOfAttendees)
def exitProgram(self):
"""DOCUMENTATION GOES HERE"""
self.stopAttendance()
self.mainWindow.destroy()
attendanceSystem = AttendanceSystemGUI()
| 45.471642 | 114 | 0.590297 | 1,446 | 15,233 | 6.188105 | 0.231674 | 0.025034 | 0.024139 | 0.034198 | 0.219379 | 0.152436 | 0.100358 | 0.070407 | 0.057667 | 0.023245 | 0 | 0.011487 | 0.297052 | 15,233 | 334 | 115 | 45.607784 | 0.82415 | 0.018906 | 0 | 0.091228 | 0 | 0 | 0.066521 | 0.001437 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042105 | false | 0.031579 | 0.045614 | 0 | 0.129825 | 0.035088 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d3ab1ebd9e7f99007d2c559d8136040e04d0a3aa | 1,626 | py | Python | recordpump.py | rsanger/heatpump-controller | 5007e4abf8d4f3b0be5c212663f219209324f8ec | [
"MIT"
] | 3 | 2018-02-07T17:51:56.000Z | 2019-01-31T01:47:41.000Z | recordpump.py | rsanger/heatpump-controller | 5007e4abf8d4f3b0be5c212663f219209324f8ec | [
"MIT"
] | null | null | null | recordpump.py | rsanger/heatpump-controller | 5007e4abf8d4f3b0be5c212663f219209324f8ec | [
"MIT"
] | 3 | 2017-03-22T18:53:48.000Z | 2021-12-28T13:53:51.000Z | # Copyright (c) 2017 Richard Sanger
#
# Licensed under MIT
#
# A simple debugging script to record and decode mode2 messages LIRC
# and decodes them
import os
import struct
import select
from heatpump import HeatPump
PULSE_BIT = 0x01000000
PULSE_MASK = 0x00FFFFFF
f = os.open("/dev/lirc0", os.O_RDONLY)
assert f > 0
grabbed = []
wait_pulse = True
last = None
cur = None
def decode(values):
global cur
global last
hp = HeatPump()
last = cur
try:
print(len(values))
cur = HeatPump.decode(values)
except:
return
print("Done it!!!!!!!!!")
print(cur)
try:
hp.load_bytes(cur)
except Exception as e:
print(e)
print("Failed decode")
print(str(hp))
while True:
s_res = select.select([f], [], [], 0.1)
if len(s_res[0]) and s_res[0][0] == f:
# Have data
bytes = os.read(f, 4)
assert len(bytes) == 4
as_int = struct.unpack('i', bytes)[0]
as_int = as_int & PULSE_MASK
if as_int > 1000000:
print("biff")
continue
#print(as_int, len(grabbed))
grabbed.append(as_int)
if len(grabbed) == 583:
print("good good")
decode(grabbed)
grabbed = []
else: # timeout
if len(grabbed) >= 291:
decode(grabbed)
grabbed = []
| 25.40625 | 68 | 0.473555 | 179 | 1,626 | 4.217877 | 0.47486 | 0.039735 | 0.013245 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.042918 | 0.426814 | 1,626 | 63 | 69 | 25.809524 | 0.767167 | 0.111316 | 0 | 0.142857 | 0 | 0 | 0.036934 | 0 | 0 | 0 | 0.013937 | 0 | 0.040816 | 1 | 0.020408 | false | 0 | 0.081633 | 0 | 0.122449 | 0.163265 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d3adcd548eceda1d220a8df2a63b93a12edcfa89 | 15,139 | py | Python | seqvec/seqvec.py | Rostlab/SeqVec | 0f10e187b7160f63c5f2d38f1e12425889638179 | [
"MIT"
] | 71 | 2019-05-08T18:57:46.000Z | 2022-03-23T17:15:45.000Z | seqvec/seqvec.py | Rostlab/SeqVec | 0f10e187b7160f63c5f2d38f1e12425889638179 | [
"MIT"
] | 13 | 2019-11-26T13:51:11.000Z | 2022-01-12T14:46:38.000Z | seqvec/seqvec.py | Rostlab/SeqVec | 0f10e187b7160f63c5f2d38f1e12425889638179 | [
"MIT"
] | 10 | 2019-11-08T12:56:04.000Z | 2022-01-16T04:39:18.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import gzip
import json
import logging
import urllib.request
from pathlib import Path
from typing import Dict, List, Tuple, Generator
import h5py
import numpy as np
import torch
from allennlp.commands.elmo import ElmoEmbedder
from tqdm import tqdm
logger = logging.getLogger(__name__)
EmbedderReturnType = Generator[Tuple[str, np.ndarray], None, None]
def get_elmo_model(model_dir: Path, cpu: bool) -> ElmoEmbedder:
weights_path = model_dir / "weights.hdf5"
options_path = model_dir / "options.json"
# if no pre-trained model is available, yet --> download it
if not (weights_path.exists() and options_path.exists()):
logger.info(
"No existing model found. Start downloading pre-trained SeqVec (~360MB)..."
)
Path.mkdir(model_dir, exist_ok=True)
repo_link = "http://rostlab.org/~deepppi/embedding_repo/embedding_models/seqvec"
options_link = repo_link + "/options.json"
weights_link = repo_link + "/weights.hdf5"
urllib.request.urlretrieve(options_link, str(options_path))
urllib.request.urlretrieve(weights_link, str(weights_path))
cuda_device = 0 if torch.cuda.is_available() and not cpu else -1
logger.info("Loading the model")
# The string casting comes from a typing bug in allennlp
# https://github.com/allenai/allennlp/pull/3358
return ElmoEmbedder(
weight_file=str(weights_path),
options_file=str(options_path),
cuda_device=cuda_device,
)
def read_fasta(
sequences: Dict[str, str], fasta_path: Path, split_char: str, id_field: int
):
""" Reads in fasta file containing multiple sequences.
Adds all sequences to the `sequences` dictionary.
"""
if fasta_path.suffix == ".gz":
handle = gzip.open(str(fasta_path), "rt")
else:
handle = fasta_path.open()
with handle as fasta_f:
for line in fasta_f:
# get uniprot ID from header and create new entry
if line.startswith(">"):
if id_field == -1:
uniprot_id = line.replace(">", "").strip()
else:
uniprot_id = (
line.replace(">", "").strip().split(split_char)[id_field]
)
sequences[uniprot_id] = ""
else:
# repl. all whie-space chars and join seqs spanning multiple lines
sequences[uniprot_id] += "".join(line.split()).upper()
def read_fasta_file(seq_dir: Path, split_char: str, id_field: int) -> Dict[str, str]:
seq_dict = dict()
""" Read in FASTA file """
if seq_dir.is_file(): # if single fasta file should be processed
read_fasta(seq_dict, seq_dir, split_char, id_field)
else: # if a directory was provided: read all files
assert seq_dir.is_dir(), f"'{seq_dir}' is neither a file nor a directory"
for seq_path in seq_dir.glob("**/*fasta*"):
read_fasta(seq_dict, seq_path, split_char, id_field)
return seq_dict
def process_embedding(
embedding: np.ndarray, per_protein: bool, layer: str
) -> np.ndarray:
"""
Direct output of ELMo has shape (3,L,1024), with L being the protein's
length, 3 being the number of layers used to train SeqVec (1 CharCNN, 2 LSTMs)
and 1024 being a hyperparameter chosen to describe each amino acid.
When a representation on residue level is required, you can sum
over the first dimension, resulting in a tensor of size (L,1024),
or just extract a specific layer.
If you want to reduce each protein to a fixed-size vector, regardless of its
length, you can average over dimension L.
"""
if layer == "sum":
# sum over residue-embeddings of all layers (3k->1k)
embedding = embedding.sum(axis=0)
elif layer == "CNN":
embedding = embedding[0]
elif layer == "LSTM1":
embedding = embedding[1]
elif layer == "LSTM2":
embedding = embedding[2]
else:
# Stack the layer (3,L,1024) -> (L,3072)
embedding = np.concatenate(embedding, axis=1)
if per_protein: # if embeddings are required on the level of whole proteins
embedding = embedding.mean(axis=0)
return embedding
_cpu_elmo_model = None # Cache the CPU model across invocations
def embed_with_fallback(
batch: List[Tuple[str, str]], model: ElmoEmbedder, model_dir: Path,
) -> EmbedderReturnType:
""" Tries to get the embeddings in this order:
* Full batch GPU
* Single Sequence GPU
* Single Sequence CPU
Single sequence processing is done in case of runtime error due to
a) very long sequence or b) too large batch size
If this fails, you might want to consider lowering batchsize and/or
cutting very long sequences into smaller chunks
Returns unprocessed embeddings
"""
global _cpu_elmo_model
# create List[List[str]] for batch-processing of ELMo
tokens = [list(seq) for _, seq in batch]
batch_ids = [identifier for identifier, _ in batch]
try: # try to get the embedding for the current sequence
with torch.no_grad():
embeddings = model.embed_batch(tokens)
assert len(batch) == len(embeddings)
for sequence_id, embedding in zip(batch_ids, embeddings):
yield sequence_id, embedding
except RuntimeError as e:
logger.error("Error processing batch of {} sequences: {}".format(len(batch), e))
logger.error("Sequences in the failing batch: {}".format(batch_ids))
logger.error("Starting single sequence processing")
for sample_id, seq in batch:
try:
with torch.no_grad():
embedding = model.embed_sentence(list(seq))
yield sample_id, embedding
except RuntimeError as e:
logger.error(
"RuntimeError for {} with {} residues: {}".format(
sample_id, len(seq), e
)
)
logger.error(
"Single sequence processing failed. Switching to CPU now. "
+ "This slows down the embedding process."
)
if not _cpu_elmo_model:
_cpu_elmo_model = get_elmo_model(model_dir, cpu=True)
with torch.no_grad():
embedding = _cpu_elmo_model.embed_sentence(list(seq))
yield sample_id, embedding
def get_embeddings(
seq_dir: Path,
model_dir: Path,
split_char: str = "|",
id_field: int = 1,
cpu: bool = False,
layer: str = "sum",
batchsize: int = 15000,
per_protein: bool = False,
) -> EmbedderReturnType:
""" Lazily generate all embeddings.
You can use this function if you want to do postprocessing or need a custom output format.
"""
seq_dict = read_fasta_file(seq_dir, split_char, id_field)
# Sort sequences
# Sorting sequences according to length is crucial for speed as batches
# of proteins with similar size increase throughput.
seq_dict = sorted(seq_dict.items(), key=lambda kv: len(seq_dict[kv[0]]))
logger.info("Total number of sequences: {}".format(len(seq_dict)))
model = get_elmo_model(model_dir, cpu)
batch = list()
length_counter = 0
for index, (identifier, sequence) in enumerate(
tqdm(seq_dict)
): # for all sequences in the set
# append sequence to batch and sum amino acids over proteins in batch
batch.append((identifier, sequence))
length_counter += len(sequence)
# Transform list of batches to embeddings
# if a) max. number of chars. for a batch is reached,
# if b) sequence is longer than half batchsize (avoids RuntimeError for very long seqs.)
# if c) the last sequence is reached
if not (
length_counter > batchsize
or len(sequence) > batchsize / 2
or index == len(seq_dict) - 1
):
continue
# Actually compute embeddings and postprocess
for sequence_id, embedding in embed_with_fallback(batch, model, model_dir):
yield sequence_id, process_embedding(embedding, per_protein, layer)
# Reset batch
batch = list()
length_counter = 0
def save_from_generator(
emb_path: Path,
per_protein: bool,
the_generator: Generator[Tuple[str, np.ndarray], None, None],
):
if emb_path.suffix == ".h5":
with h5py.File(str(emb_path), "w") as hf:
for sequence_id, embedding in the_generator:
if emb_path.suffix == ".h5":
# noinspection PyUnboundLocalVariable
hf.create_dataset(sequence_id, data=embedding)
elif emb_path.suffix == ".npz" or emb_path.suffix == ".npy":
if emb_path.suffix == ".npy" and not per_protein:
raise RuntimeError(
"You need to sum up per protein (`--protein True`) to save as .npy array"
)
emb_dict = dict()
for sequence_id, embedding in the_generator:
if embedding is None:
# The generator code already showed an error
continue
emb_dict[sequence_id] = embedding
if not emb_dict:
raise RuntimeError("Embedding dictionary is empty!")
logger.info("Total number of embeddings: {}".format(len(emb_dict)))
if emb_path.suffix == ".npy":
label_file = emb_path.with_suffix(".json")
logger.info(f"Writing embeddings to {emb_path} and the ids to {label_file}")
# save elmo representations
with label_file.open("w") as id_file:
json.dump(list(emb_dict.keys()), id_file)
# noinspection PyTypeChecker
np.save(emb_path, np.asarray(list(emb_dict.values())))
else:
logger.info(f"Writing embeddings to {emb_path}")
# With checked that the suffix can only be .npz
np.savez(emb_path, **emb_dict)
else:
raise RuntimeError(
f"The output file must end with .npz, .npy or .h5,"
f"but the path you provided ends with '{emb_path.suffix}'"
)
def create_arg_parser():
""" Creates and returns the ArgumentParser object. """
# Instantiate the parser
parser = argparse.ArgumentParser(
description=(
"seqvec.py creates ELMo embeddings for a given text "
+ " file containing sequence(s) in FASTA-format."
)
)
# Path to fasta file (required)
# noinspection PyTypeChecker
parser.add_argument(
"-i",
"--input",
required=True,
type=Path,
help="A path to a fasta-formatted text file containing protein sequence(s)."
+ "Can also be a directory holding multiple fasta files.",
)
# Path for writing embeddings (required)
# noinspection PyTypeChecker
parser.add_argument(
"-o",
"--output",
required=True,
type=Path,
help="A path to a file for saving the created embeddings. "
+ "By default, a HDF (.h5) file will be written which should also be indicated by the chosen filename."
+ "Only if you create per-protein embeddings, you can also write to numpy formats, i.e. .npy or .npz, which again should be indicated by the chosen filename."
+ "If you choose to write a .npy file, a .json file with the sequence ids will be created next to the .npy file.",
)
# Path to model (optional)
# noinspection PyTypeChecker
parser.add_argument(
"--model",
type=Path,
default=Path.cwd() / "model",
help="A path to a directory holding a pre-trained ELMo model. "
+ "If the model is not found in this path, it will be downloaded automatically."
+ "The file containing the weights of the model must be named weights.hdf5."
+ "The file containing the options of the model must be named options.json",
)
# Create embeddings for a single protein or for all residues within a protein
parser.add_argument(
"--protein",
action="store_true",
default=False,
help="Flag (no arguments needed) for summarizing embeddings from residue level to protein level "
+ "via averaging. Default: Not set (False)",
)
# Number of residues within one batch
parser.add_argument(
"--batchsize",
type=int,
default=15000,
help="Number of residues which need to be accumulated before starting batch "
+ "processing. If you encounter an OutOfMemoryError, lower this value. Default: 15000",
)
# Character for splitting fasta header
parser.add_argument(
"--split-char",
type=str,
default="|",
help="The character for splitting the FASTA header in order to retrieve "
+ "the protein identifier. Should be used in conjunction with --id. "
+ "Default: '|' ",
)
# Field index for protein identifier in fasta header after splitting with --split-char
parser.add_argument(
"--id",
type=int,
default=1,
help="The zero based index for the uniprot identifier field after splitting the "
+ "FASTA header after each symbole in ['|', '#', ':', ' ']. "
+ "Use -1 to deactivate splitting. "
+ "Default: 1",
)
# Whether to use CPU or GPU
parser.add_argument(
"--cpu",
action="store_true",
default=False,
help="Flag for using CPU to compute embeddings. Default: False",
)
parser.add_argument(
"--layer",
dest="layer",
choices=["sum", "all", "CNN", "LSTM1", "LSTM2"],
default="sum",
help="Decide whether to `sum` up the layers (1024 dimensions), concatenate `all` of them (3072 dimensions) or "
"select a specific layer (`CNN`, `LSTM1` or `LSTM2`). Defaults to `sum`",
)
parser.add_argument(
"--silent",
action="store_true",
default=False,
help="Embedder gives some information while processing. Default: True",
)
return parser
def main():
parser = create_arg_parser()
args = parser.parse_args()
seq_dir = args.input
emb_path = args.output
model_dir = args.model
split_char = args.split_char
id_field = args.id
cpu_flag = args.cpu
per_prot = args.protein
batchsize = args.batchsize
verbose = not args.silent
layer = args.layer
if verbose:
# Otherwise the default level is warning
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(message)s")
device_name = "GPU" if torch.cuda.is_available() and not cpu_flag else "CPU"
logger.info(f"Running on the {device_name}")
embeddings_generator = get_embeddings(
seq_dir, model_dir, split_char, id_field, cpu_flag, layer, batchsize, per_prot,
)
save_from_generator(emb_path, per_prot, embeddings_generator)
if __name__ == "__main__":
main()
| 35.874408 | 166 | 0.625867 | 1,937 | 15,139 | 4.769231 | 0.234383 | 0.012124 | 0.018402 | 0.010392 | 0.163564 | 0.108249 | 0.078264 | 0.054449 | 0.017103 | 0 | 0 | 0.008443 | 0.28027 | 15,139 | 421 | 167 | 35.95962 | 0.839391 | 0.199221 | 0 | 0.202055 | 0 | 0.013699 | 0.245327 | 0 | 0 | 0 | 0 | 0 | 0.006849 | 1 | 0.030822 | false | 0 | 0.041096 | 0 | 0.085616 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d3add85756a4fe737520f67737899256ef1e17ce | 3,261 | py | Python | grslra/tools.py | clemenshage/grslra | 00f61b4ef08208d12e8e803d10f8ebbe16d8614a | [
"MIT"
] | null | null | null | grslra/tools.py | clemenshage/grslra | 00f61b4ef08208d12e8e803d10f8ebbe16d8614a | [
"MIT"
] | null | null | null | grslra/tools.py | clemenshage/grslra | 00f61b4ef08208d12e8e803d10f8ebbe16d8614a | [
"MIT"
] | null | null | null | import numpy as np
import json
import os
def innerprod(A, B):
# This function computes the standard inner product between two matrices via vectorization
if isinstance(A, np.ndarray) and isinstance(B, np.ndarray):
return np.dot(A.flatten(), B.flatten())
elif isinstance(A, tuple) and isinstance(B, tuple):
return np.dot(A[0].flatten(), B[0].flatten()) * np.dot(A[1].flatten(), B[1].flatten())
else:
return False
def qr_positive(A, mode='reduced'):
# This function computes a QR decomposition of a matrix with positive pivot elements in R
(m, n) = A.shape
Q, R = np.linalg.qr(A, mode=mode)
if n > 1:
dvec = np.sign(np.diag(R))
else:
dvec = np.sign(R[0, 0])
dvec = dvec[:, np.newaxis]
d = dvec.shape[0]
R[0: d, :] = R[0: d, :] * dvec
Q[:, 0: d] = Q[:, 0: d] * dvec.T
return Q, R
def mcos(X):
Y = np.zeros(X.shape)
d = np.diag(X)
d = np.cos(d)
Y[:, :] = np.diag(d)
return Y
def msin(X):
Y = np.zeros(X.shape)
d = np.diag(X)
d = np.sin(d)
Y[:, :] = np.diag(d)
return Y
def subspace_angle(U_A, U_B):
U_A_U_B_orth = U_B - np.dot(U_A, np.dot(U_A.T, U_B))
theta = np.rad2deg(np.arcsin(np.minimum(np.linalg.norm(U_A_U_B_orth, 2), 1.0)))
return theta
def rmse(U_0, Y_0, U, Y):
m = U.shape[0]
n = Y.shape[1]
_, R1 = np.linalg.qr(np.hstack((U_0, U)))
_, R2 = np.linalg.qr(np.hstack((Y_0.T, -Y.T)))
return np.linalg.norm(np.dot(R1, R2.T), 'fro') / np.sqrt(m * n)
def orthogonality_check(U):
R = None
diff1 = np.abs(1.0 - np.dot(U[:, 0], U[:, 0]))
diff2 = np.abs(np.dot(U[:, 0], U[:, 1]))
if diff1 > 1e-12 or diff2 > 1e-12:
# print "Re-orthogonalizing U"
U, R = qr_positive(U)
return U, R
def incoherence(L, k):
m = L.shape[0]
n = L.shape[1]
U, _, V_T = np.linalg.svd(L, full_matrices=False)
U = U[:, :k]
V = V_T.T[:, :k]
rownorms_U = np.sqrt(np.sum(U.T * U.T, axis=0))
mu_U = np.sqrt(m) * np.amax(rownorms_U) / np.sqrt(k)
rownorms_V = np.sqrt(np.sum(V.T * V.T, axis=0))
mu_V = np.sqrt(n) * np.amax(rownorms_V) / np.sqrt(k)
return mu_U, mu_V
def load_params(name):
basefolder, _ = os.path.split(os.path.abspath(os.path.join(__file__, os.pardir)))
filename = basefolder + os.sep + 'params' + os.sep + name + '.json'
with open(filename, 'r') as f:
params = json.load(f)
for key, value in params.iteritems():
if value == "None" or value == "none":
params[key] = None
if value == "False" or value == "false":
params[key] = False
if value == "True" or value == "true":
params[key] = True
return params
def parse_params(params, defaultfile):
params_default = load_params(defaultfile)
if params is None:
return params_default
else:
for key in params_default:
if key not in params:
params[key] = params_default[key]
return params
def forecasting_preprocess(x, m, r):
omega = np.ones_like(x)
omega = np.hstack((omega, np.zeros(r, )))
N = omega.size
Omega = np.where(omega)[0]
n = N - m + 1
x_Omega = x[Omega]
return x_Omega, Omega, n
| 25.677165 | 94 | 0.567924 | 557 | 3,261 | 3.231598 | 0.24237 | 0.022222 | 0.013333 | 0.006667 | 0.087778 | 0.05 | 0.05 | 0.05 | 0.028889 | 0.028889 | 0 | 0.020025 | 0.264949 | 3,261 | 126 | 95 | 25.880952 | 0.730914 | 0.062864 | 0 | 0.141304 | 0 | 0 | 0.015727 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.119565 | false | 0 | 0.032609 | 0 | 0.304348 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d3ae82623bff425e28af5f552b9b90f5490745b4 | 780 | py | Python | flattentool/tests/test_cli.py | Xtuden-com/flatten-tool | c67c918daf9b940b08cb43bdd9c00371ba5ef2f1 | [
"MIT"
] | 86 | 2015-07-16T10:23:47.000Z | 2022-03-29T08:11:40.000Z | flattentool/tests/test_cli.py | Xtuden-com/flatten-tool | c67c918daf9b940b08cb43bdd9c00371ba5ef2f1 | [
"MIT"
] | 275 | 2015-03-31T14:51:31.000Z | 2022-03-07T14:54:05.000Z | flattentool/tests/test_cli.py | Xtuden-com/flatten-tool | c67c918daf9b940b08cb43bdd9c00371ba5ef2f1 | [
"MIT"
] | 16 | 2015-11-06T15:41:30.000Z | 2021-07-16T00:18:32.000Z | # hint: test_argparse is provided by libpythonX.Y-testsuite on ubuntu
from test.test_argparse import ArgumentParserError, stderr_to_parser_error
import pytest
from flattentool import cli
def test_create_parser():
"""
Command line arguments that should be acceptable
"""
parser = cli.create_parser()
args = parser.parse_args("create-template -s schema.json".split())
assert args.schema == "schema.json"
def test_create_parser_missing_required_options():
"""
If you do not supply certain arguments
you should be warned
"""
parser = cli.create_parser()
with pytest.raises(ArgumentParserError) as excinfo:
stderr_to_parser_error(parser.parse_args, "create-template".split())
assert "required" in excinfo.value.stderr
| 27.857143 | 76 | 0.735897 | 101 | 780 | 5.49505 | 0.534653 | 0.086486 | 0.05045 | 0.068468 | 0.104505 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.176923 | 780 | 27 | 77 | 28.888889 | 0.864486 | 0.226923 | 0 | 0.166667 | 0 | 0 | 0.112676 | 0 | 0 | 0 | 0 | 0 | 0.166667 | 1 | 0.166667 | false | 0 | 0.25 | 0 | 0.416667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d3af03d0b8d4cec429e097fd2c982c0b1aff5a01 | 681 | py | Python | geo_to_csv.py | simberaj/mobilib | ae350d095a34f53704bd4aaaf7f45e573bda779a | [
"MIT"
] | null | null | null | geo_to_csv.py | simberaj/mobilib | ae350d095a34f53704bd4aaaf7f45e573bda779a | [
"MIT"
] | null | null | null | geo_to_csv.py | simberaj/mobilib | ae350d095a34f53704bd4aaaf7f45e573bda779a | [
"MIT"
] | null | null | null | """Transform a geospatial file to a GeoCSV (WKT)."""
import operator
import geopandas as gpd
import mobilib.argparser
parser = mobilib.argparser.default(__doc__)
parser.add_argument('in_file', help='GDAL-compatible file')
parser.add_argument('out_file', help='path to output CSV')
if __name__ == '__main__':
args = parser.parse_args()
in_gdf = gpd.read_file(args.in_file)
if (in_gdf.geometry.geom_type == 'Point').all():
in_gdf['X'] = in_gdf.geometry.x
in_gdf['Y'] = in_gdf.geometry.y
else:
in_gdf['WKT'] = in_gdf.geometry.map(operator.attrgetter('wkt'))
in_gdf.drop('geometry', axis=1).to_csv(args.out_file, sep=';', index=False)
| 29.608696 | 79 | 0.688693 | 103 | 681 | 4.252427 | 0.485437 | 0.10274 | 0.118721 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001736 | 0.154185 | 681 | 22 | 80 | 30.954545 | 0.758681 | 0.067548 | 0 | 0 | 0 | 0 | 0.131955 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d3af6e41ab30d1ce91c30bacdb82ca34f8dd79a5 | 1,751 | py | Python | modify-subjects/utils.py | cacosandon/are-you-looking | be25d799eed452ddfd1ff393fddd53897ef94a4a | [
"MIT"
] | null | null | null | modify-subjects/utils.py | cacosandon/are-you-looking | be25d799eed452ddfd1ff393fddd53897ef94a4a | [
"MIT"
] | null | null | null | modify-subjects/utils.py | cacosandon/are-you-looking | be25d799eed452ddfd1ff393fddd53897ef94a4a | [
"MIT"
] | null | null | null | import json
import sys
def load_dataset(split):
data = {}
if split == 'synthetic':
with open('tasks/R2R-pano/data/R2R_literal_speaker_data_augmentation_paths.json') as f:
data = json.load(f)
else:
with open('tasks/R2R-pano/data/R2R_%s.json' % split) as f:
data = json.load(f)
# Return de dictionary
return data
def save_dataset(split, data, folder):
if split == 'synthetic':
with open(f'modify-subjects/{folder}/R2R_literal_speaker_data_augmentation_paths.json', "w") as f:
f.write(json.dumps(data, indent=4))
else:
with open(f'modify-subjects/{folder}/R2R_%s.json' % split, "w") as f:
f.write(json.dumps(data, indent=4))
print(f"Correctly saved in {folder} split {split}")
def print_progress(iteration, total, prefix='', suffix='', decimals=1, bar_length=100):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
bar_length - Optional : character length of bar (Int)
"""
str_format = "{0:." + str(decimals) + "f}"
percents = str_format.format(100 * (iteration / float(total)))
filled_length = int(round(bar_length * iteration / float(total)))
bar = '█' * filled_length + '-' * (bar_length - filled_length)
sys.stdout.write('\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)),
if iteration == total:
sys.stdout.write('\n')
sys.stdout.flush()
| 36.479167 | 106 | 0.61679 | 228 | 1,751 | 4.635965 | 0.350877 | 0.030274 | 0.008515 | 0.037843 | 0.302744 | 0.272469 | 0.242195 | 0.056764 | 0.056764 | 0.056764 | 0 | 0.012121 | 0.246145 | 1,751 | 47 | 107 | 37.255319 | 0.787879 | 0.244432 | 0 | 0.285714 | 0 | 0 | 0.232394 | 0.162754 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107143 | false | 0 | 0.071429 | 0 | 0.214286 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d3afd7ece6c84d77e23173308d21099c79031ed6 | 7,969 | py | Python | server/app.py | Algoanna/goanna-run | 4c5c0d83238cfa0b7cf552a2fabeaadd23def22d | [
"MIT"
] | null | null | null | server/app.py | Algoanna/goanna-run | 4c5c0d83238cfa0b7cf552a2fabeaadd23def22d | [
"MIT"
] | null | null | null | server/app.py | Algoanna/goanna-run | 4c5c0d83238cfa0b7cf552a2fabeaadd23def22d | [
"MIT"
] | null | null | null | import os, csv
import numpy as np
import urllib.request, json
from flask import Flask, escape, request, render_template, jsonify
from leaderboard.leaderboard import Leaderboard
import requests
import lxml.html as lh
import pandas as pd
from algosdk.v2client import algod
from algosdk import account, mnemonic
from config import *
from algosdk.future.transaction import AssetConfigTxn, AssetTransferTxn
from algosdk.transaction import write_to_file
from util import balance_formatter, sign_and_send
#asset_id = 15823566
asset_id = 40711649
import time, sys, random
import logging, json
# Setup HTTP client w/guest key provided by PureStake
class Connect():
def __init__(self):
# declaring the third party API
self.algod_address = "https://testnet-algorand.api.purestake.io/ps2"
self.algod_token = os.environ.get('PURESTAKE_API_KEY') # Sign up
# PURESTAKE.COM to get your personal token
self.headers = {"X-API-Key": self.algod_token}
def connectToNetwork(self):
# establish connection
return algod.AlgodClient(self.algod_token, self.algod_address, self.headers)
client = Connect().connectToNetwork()
def check_holdings(asset_id, address):
"""
Checks the asset balance for the specific address and asset id.
"""
account_info = client.account_info(address)
assets = account_info.get("assets")
if assets:
asset_holding = None
for i in account_info["assets"]:
if i['asset-id'] == asset_id:
return i
def revoke(asset_id, address, receiver_address, passphrase=None, target=None):
"""
Creates an unsigned transfer transaction for the specified asset id, to the
specified address, for the specified amount.
"""
params = client.suggested_params()
params.fee = 1000
params.flat_fee = True
amount = 1
txn = AssetTransferTxn(sender=address,
sp=params,
receiver=receiver_address,
amt=amount,
index=asset_id,
revocation_target=target)
#data = add_network_params(transfer_data, client)
#txn = AssetTransferTxn(**data)
if passphrase:
txinfo = sign_and_send(txn, passphrase, client)
#formatted_amount = balance_formatter(amount, asset_id, client)
print("Transferred {} from {} to {}".format(str(amount),
address, receiver_address))
print("Transaction ID Confirmation: {}".format(txinfo.get("txn")))
else:
write_to_file([txn], "clawback"+address+".txn")
def transfer(asset_id, address, receiver_address, passphrase=None):
"""
Creates an unsigned transfer transaction for the specified asset id, to the
specified address, for the specified amount.
"""
params = client.suggested_params()
params.fee = 1000
params.flat_fee = True
txn = AssetTransferTxn(sender=address,
sp=params,
receiver=receiver_address,
amt=1,
index=asset_id)
#data = add_network_params(transfer_data, client)
#txn = AssetTransferTxn(**data)
if passphrase:
txinfo = sign_and_send(txn, passphrase, client)
formatted_amount = balance_formatter(amount, asset_id, client)
print("Transferred {} from {} to {}".format(formatted_amount,
address, receiver_address))
print("Transaction ID Confirmation: {}".format(txinfo.get("tx")))
else:
write_to_file([txn], "transfer"+address+".txn")
highscore_lb = Leaderboard('highscores')
app = Flask(__name__)
@app.route('/score', methods=['POST'])
def score():
name = request.form.get('name', '')
address = request.form.get('address', '')
txid = request.form.get('txid', '')
if (not name or not address):
return 'name and address is required'
score = float(request.form.get('score', 0))
print(name, score)
def highscore_check(
self,
member,
current_score,
score,
member_data,
leaderboard_options):
if (current_score is None):
return True
if (score > current_score):
return True
return False
key = name.replace('@','') + '@' + address
oscore = highscore_lb.member_at(1)
if (not txid):
highscore_lb.rank_member_if(highscore_check, key, score)
nscore = highscore_lb.score_for(key)
result = 'saved'
beaten = ''
if (oscore == None):
result = 'high'
print('asset goes to you')
revoke(asset_id, goanna_address, address, goanna_passphrase, goanna_address)
else:
omember = oscore['member'].decode('ascii')
oname = omember.split('@')
beaten = oname[0] + '#' + oname[1][-4:]
oaddress = omember.split('@')[1]
oscore = oscore['score']
print('txid', txid, 'scores', oscore, nscore)
if (oscore < nscore):
print('{} beat high score with {}, the highest was {} who scored {}'
.format(name, str(nscore), omember, str(oscore)))
if (key != omember):
print("transfer asset to", address, "from", oaddress)
revoke(asset_id, goanna_address, address, goanna_passphrase, oaddress)
result = 'high'
elif (nscore > score):
print('high score for you {} is {}, the highest is {} who scored {}'
.format(name, str(nscore), omember, str(oscore)))
else:
print('new high score for you {} is {}, the highest is {} who scored {}'
.format(name, str(nscore), omember, str(oscore)))
response = jsonify({'score': result, 'note': beaten})
response.headers.add('Access-Control-Allow-Origin', '*')
return response
P = 50111
def encrypt(x):
a = np.random.randint(-P,P)
b = np.random.randint(-P,P)
#print(a, b)
c = (x - a - b) % P
#print(x, [a, b, c], decrypt(np.array((a, b, c))))
return np.array((a, b, c))
def decrypt(x):
return np.sum(x) % P
def multiply(x, y):
u1 = (x[1]*y[1] + x[1]*y[2] + x[2]*y[1])%P
u2 = (x[2]*y[2] + x[0]*y[2] + x[2]*y[0])%P
u3 = (x[0]*y[0] + x[0]*y[1] + x[1]*y[0])%P
return decrypt((u1,u2,u3))
@app.route('/encrypt', methods=['POST'])
def enc():
i = request.form.get('input', '0')
result = []
if ("," in i):
ss = i.split(',')
for s in ss:
result.append(encrypt(int(s)).tolist())
print(s, decrypt(encrypt(int(s)).tolist()))
else:
result = encrypt(i).tolist()
response = jsonify({'result': result})
response.headers.add('Access-Control-Allow-Origin', '*')
return response
@app.route('/top10')
def index():
leaders = highscore_lb.leaders(1)
for i in range(0, len(leaders)):
leader = leaders[i]['member'].decode('ascii').split('@')
leaders[i]['playerName'] = leader[0] + '#' + leader[1][-4:]
leaders[i]['score'] = str(int(leaders[i]['score']))
del leaders[i]['member']
del leaders[i]['rank']
response = jsonify(leaders)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
@app.route('/check')
def check():
address = request.args.get('address', '')
print(address)
if (not address):
return 'address is required'
holdings = check_holdings(asset_id, address)
response = jsonify(holdings)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
#return render_template('index.html', leaders=leaders)
| 31.876 | 94 | 0.583762 | 936 | 7,969 | 4.870727 | 0.237179 | 0.026102 | 0.015354 | 0.021057 | 0.383198 | 0.347445 | 0.347445 | 0.329458 | 0.307962 | 0.273744 | 0 | 0.01248 | 0.286109 | 7,969 | 249 | 95 | 32.004016 | 0.788891 | 0.11093 | 0 | 0.218391 | 0 | 0 | 0.113993 | 0.015389 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074713 | false | 0.045977 | 0.091954 | 0.011494 | 0.252874 | 0.074713 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6c8f1368b4b2faf32d859d74436012deac4e2f27 | 2,827 | py | Python | Code/IPA/dijkstra.py | Babdus/Protolanguage | 050aeed5e7ac5905515a887dcbab434457ae2f47 | [
"MIT"
] | 4 | 2019-06-14T09:31:51.000Z | 2019-11-14T22:45:36.000Z | Code/IPA/dijkstra.py | Babdus/Protolanguage | 050aeed5e7ac5905515a887dcbab434457ae2f47 | [
"MIT"
] | 2 | 2019-04-25T14:09:42.000Z | 2021-06-11T12:55:42.000Z | Code/IPA/dijkstra.py | Babdus/Protolanguage | 050aeed5e7ac5905515a887dcbab434457ae2f47 | [
"MIT"
] | null | null | null | from fibonacci_heap_mod import Fibonacci_heap as fh
from IPA.IPAData import *
def is_valid_sound(tup):
features = set(tup)
return (len(features & places) < 2 or (len(features & {'AL', 'PA'}) == 2 and len(features & places) == 2)) and len(features & manners) < 2
def neighbours(vertex, whole_set, distances):
ns = {}
if vertex == 'X':
vertex = ()
for elem in vertex:
if ('X', elem) in distances:
tup = tuple(e for e in vertex if e != elem)
if len(tup) == 0:
tup = 'X'
if is_valid_sound(tup):
ns[tup] = distances[('X', elem)]
for other_elem in whole_set - set(vertex):
if (other_elem, elem) in distances:
tup = tuple(sorted([other_elem if e == elem else e for e in vertex]))
if is_valid_sound(tup):
ns[tup] = distances[(other_elem, elem)]
for other_elem in whole_set - set(vertex):
if (other_elem, 'X') in distances:
tup = tuple(sorted(list(vertex + (other_elem,))))
if is_valid_sound(tup):
ns[tup] = distances[(other_elem, 'X')]
return ns
def add_intermediate_features(vertex, whole_set):
features1 = set(vertex)
features2 = whole_set - features1
vowel_list = ['CL', 'NC', 'MC', 'MI', 'MO', 'NO', 'OP']
if len(features1 & vowels) > 0 and len(features2 & vowels) > 0:
vowel1 = (features1 & vowels).pop()
vowel2 = (features2 & vowels).pop()
index1 = vowel_list.index(vowel1)
index2 = vowel_list.index(vowel2)
if index1 > index2:
adding_sublist = vowel_list[index2+1:index1]
else:
adding_sublist = vowel_list[index1+1:index2]
whole_set |= set(adding_sublist)
if len(features1 & vowelable_places) > 0 and len(features2 & vowelable_places) > 0:
whole_set.add('NE')
return whole_set
def dijkstra(vertex, whole_set, distances):
whole_set = add_intermediate_features(vertex, whole_set)
entries = {}
prev = {}
Q = fh()
entries[vertex] = Q.enqueue(vertex, 0)
while len(Q) > 0:
min_vertex_entry = Q.dequeue_min()
neighbour_distances = neighbours(min_vertex_entry.get_value(), whole_set, distances)
for neighbour in neighbour_distances:
alt = min_vertex_entry.get_priority() + neighbour_distances[neighbour]
if neighbour not in entries or alt < entries[neighbour].get_priority():
if neighbour in entries:
Q.decrease_key(entries[neighbour], alt)
else:
entries[neighbour] = Q.enqueue(neighbour, alt)
prev[neighbour] = min_vertex_entry.get_value()
dist = {key: entries[key].get_priority() for key in entries}
return dist, prev
| 41.573529 | 142 | 0.597099 | 361 | 2,827 | 4.501385 | 0.232687 | 0.059077 | 0.029538 | 0.036923 | 0.262154 | 0.187692 | 0.124923 | 0.124923 | 0.105846 | 0.105846 | 0 | 0.016857 | 0.286523 | 2,827 | 67 | 143 | 42.19403 | 0.788795 | 0 | 0 | 0.111111 | 0 | 0 | 0.009197 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.063492 | false | 0 | 0.031746 | 0 | 0.15873 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6c8f6f5b606269573ee9e3bfd8fed2b6b02cb54b | 637 | py | Python | reptile/urls.py | kartoza/kbims-reptile | 4273857d7a2b8f373622f8e06149581bd43703c5 | [
"MIT"
] | null | null | null | reptile/urls.py | kartoza/kbims-reptile | 4273857d7a2b8f373622f8e06149581bd43703c5 | [
"MIT"
] | null | null | null | reptile/urls.py | kartoza/kbims-reptile | 4273857d7a2b8f373622f8e06149581bd43703c5 | [
"MIT"
] | null | null | null | # coding=utf-8
from django.conf.urls import url
from reptile.api_views.reptile_collection_record import (
ReptileCollectionList,
ReptileCollectionDetail,
)
from django.contrib.auth.decorators import login_required
from reptile.views.csv_upload import CsvUploadView
api_urls = [
url(r'^api/reptile-collections/$', ReptileCollectionList.as_view()),
url(r'^api/reptile-collections/(?P<pk>[0-9]+)/$',
ReptileCollectionDetail.as_view()),
url(r'^reptile/upload/$',
login_required(CsvUploadView.as_view()),
name='reptile-csv-upload'),
]
urlpatterns = [
# Add custom URL paths here
] + api_urls
| 25.48 | 72 | 0.722135 | 77 | 637 | 5.831169 | 0.493506 | 0.026726 | 0.03118 | 0.062361 | 0.111359 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005525 | 0.147567 | 637 | 24 | 73 | 26.541667 | 0.821363 | 0.059655 | 0 | 0 | 0 | 0 | 0.171429 | 0.112605 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.235294 | 0 | 0.235294 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6c91e45aa3f7f410f20888f0c707dfdcc826ab8f | 5,359 | py | Python | EvalBox/Attack/AdvAttack/blb.py | Yzx835/AISafety | eb09551814898c7f6d86641b47faf7845c948640 | [
"MIT"
] | 32 | 2020-10-20T06:12:48.000Z | 2022-03-30T03:31:24.000Z | EvalBox/Attack/AdvAttack/blb.py | Yzx835/AISafety | eb09551814898c7f6d86641b47faf7845c948640 | [
"MIT"
] | 2 | 2021-03-24T13:54:50.000Z | 2021-10-11T13:37:31.000Z | EvalBox/Attack/AdvAttack/blb.py | Yzx835/AISafety | eb09551814898c7f6d86641b47faf7845c948640 | [
"MIT"
] | 19 | 2020-10-22T05:42:51.000Z | 2022-02-04T07:07:39.000Z | #!/usr/bin/env python
# coding=UTF-8
"""
@Author: Tao Hang
@LastEditors: Tao Hang
@Description:
@Date: 2019-03-29 10:41:16
@LastEditTime: 2019-04-15 09:25:43
"""
import numpy as np
import torch
from torch.autograd import Variable
from EvalBox.Attack.AdvAttack.attack import Attack
class BLB(Attack):
def __init__(self, model=None, device=None, IsTargeted=None, **kwargs):
"""
@description: Box-constrained L-BFGS attack
@param {
model:
device:
kwargs:
}
@return: None
"""
super(BLB, self).__init__(model, device, IsTargeted)
self.criterion = torch.nn.CrossEntropyLoss(reduction="none")
self._parse_params(**kwargs)
def _parse_params(self, **kwargs):
"""
@description:
@param {
init_const:
binary_search_steps:
max_iter:
}
@return: None
"""
#
self.init_const = float(kwargs.get("init_const", 0.01))
#
self.max_iter = int(kwargs.get("max_iter", 1000))
#
self.binary_search_steps = int(kwargs.get("binary_search_steps", 5))
def generate(self, xs=None, ys=None):
"""
@description:
@param {
xs:
ys:
}
@return: adv_xs
"""
device = self.device
targeted = self.IsTargeted
batch_size = xs.shape[0]
copy_xs = np.copy(xs.numpy())
copy_ys = np.copy(ys.numpy())
var_xs = Variable(
torch.from_numpy(copy_xs).float().to(device), requires_grad=True
)
var_ys = Variable(torch.LongTensor(copy_ys).to(device))
const_origin = np.ones(shape=batch_size, dtype=float) * self.init_const
c_upper_bound = [1e10] * batch_size
c_lower_bound = np.zeros(batch_size)
best_l2 = [1e10] * batch_size
best_perturbation = np.zeros(var_xs.shape)
current_prediction_class = [-1] * batch_size
def attack_achieved(pre_softmax, target_class):
targeted = self.IsTargeted
if targeted:
return np.argmax(pre_softmax) == target_class
else:
return np.argmax(pre_softmax) != target_class
for search_for_c in range(self.binary_search_steps):
# the perturbation
r = torch.zeros_like(var_xs).float()
r = Variable(r.to(device), requires_grad=True)
# use LBFGS to optimize the perturbation r, with default learning rate parameter and other parameters
optimizer = torch.optim.LBFGS([r], max_iter=self.max_iter)
var_const = Variable(torch.FloatTensor(const_origin).to(device))
print("\tbinary search step {}:".format(search_for_c))
# The steps to be done when doing optimization iteratively.
def closure():
perturbed_images = torch.clamp(var_xs + r, min=0.0, max=1.0)
prediction = self.model(perturbed_images)
l2dist = torch.sum((perturbed_images - var_xs) ** 2, [1, 2, 3])
constraint_loss = -self.criterion(prediction, var_ys)
if targeted:
constraint_loss = self.criterion(prediction, var_ys)
loss_f = var_const * constraint_loss
loss = (
l2dist.sum() + loss_f.sum()
) # minimize c|r| + loss_f(x+r,l), l is the target label, r is the perturbation
optimizer.zero_grad()
loss.backward(retain_graph=True)
return loss
optimizer.step(closure)
perturbed_images = torch.clamp(var_xs + r, min=0.0, max=1.0)
prediction = self.model(perturbed_images)
l2dist = torch.sum((perturbed_images - var_xs) ** 2, [1, 2, 3])
# the following is analogy to CW2 attack
for i, (dist, score, perturbation) in enumerate(
zip(
l2dist.data.cpu().numpy(),
prediction.data.cpu().numpy(),
perturbed_images.data.cpu().numpy(),
)
):
if dist < best_l2[i] and attack_achieved(score, copy_ys[i]):
best_l2[i] = dist
current_prediction_class[i] = np.argmax(score)
best_perturbation[i] = perturbation
# update the best constant c for each sample in the batch
for i in range(batch_size):
if (
current_prediction_class[i] == copy_ys[i]
and current_prediction_class[i] != -1
):
c_upper_bound[i] = min(c_upper_bound[i], const_origin[i])
if c_upper_bound[i] < 1e10:
const_origin[i] = (c_lower_bound[i] + c_upper_bound[i]) / 2.0
else:
c_lower_bound[i] = max(c_lower_bound[i], const_origin[i])
if c_upper_bound[i] < 1e10:
const_origin = (c_lower_bound[i] + c_upper_bound[i]) / 2
else:
const_origin[i] *= 10
adv_xs = torch.from_numpy(best_perturbation)
adv_xs = torch.tensor(adv_xs, dtype=torch.float32)
return adv_xs
| 35.726667 | 113 | 0.54861 | 632 | 5,359 | 4.436709 | 0.276899 | 0.021398 | 0.027461 | 0.025678 | 0.208987 | 0.191869 | 0.191869 | 0.136947 | 0.136947 | 0.119116 | 0 | 0.023701 | 0.34652 | 5,359 | 149 | 114 | 35.966443 | 0.776985 | 0.141258 | 0 | 0.195402 | 0 | 0 | 0.014844 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057471 | false | 0 | 0.045977 | 0 | 0.16092 | 0.011494 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6c966a95b20a1ce63d9f131af2e82012975b3710 | 7,252 | py | Python | q1/search_problem.py | dyrnade/ai_course | 32ef66ba3f8a733da3ac4010266f7868bff56a7f | [
"BSD-2-Clause"
] | 2 | 2017-02-05T20:03:29.000Z | 2017-12-25T22:47:14.000Z | q1/search_problem.py | dyrnade/ai_course | 32ef66ba3f8a733da3ac4010266f7868bff56a7f | [
"BSD-2-Clause"
] | null | null | null | q1/search_problem.py | dyrnade/ai_course | 32ef66ba3f8a733da3ac4010266f7868bff56a7f | [
"BSD-2-Clause"
] | null | null | null | # Cem GURESCI 200201027
from __future__ import print_function
from collections import deque # you will use it for breadth-first search
class State:
def __init__(self, name):
self.name = name
self.actions = []
self.is_goal = False
def add_action(self, action):
self.actions.append(action)
class Action:
def __init__(self, name, next_state, step_cost):
self.name = name
self.next_state = next_state
self.step_cost = step_cost
class Node:
#Class that represents nodes in the search algorithm. Use the
#constructor to initialize the state, parent node, action at the parent
#node and the path cost. In this homework, heuristic value is not used.
def __init__(self, state, parent, action, path_cost):
self.state = state
self.parent = parent
self.action = action
self.path_cost = path_cost
class Problem:
def __init__(self, name):
self.__name = name
self.__init_state = None
self.__frontier = None
self.__explored = None
self.__goal_states = None
def depth_first_search(self, init_state, goal_states):
#Computes the depth-first search result from the initial state to the
#goal state. When a goal node is drawn from the frontier list it is checked
#if it is a goal node. If node.state is not a goal state it is explored
#If it is a goal state it should return the value of self.__solution(goal_node).
#Depth-first search needs a LIFO queue (stack) for the frontier variable.
node = Node(init_state, None, init_state.actions, 0)
if node.state == goal_states[0]:
return self.self.__solution(node)
self.__frontier = [node]
self.__explored = []
while self.__frontier:
node = self.__frontier.pop() # pop the node from frontier list
self.__explored.append(node.state.name) # ADD node to explored list
if node.state.name == goal_states[0].name: # if it is G return the solution
return self.__solution(node)
for each in node.action: # for all childs
child = Node(each.next_state, node, each.next_state.actions, each.step_cost + node.path_cost) # create the child
if child.state.name not in self.__explored and child not in self.__frontier: # check the child if it is not in explored and frontier list
self.__frontier.append(child) # add it to frontier list
if child.state == goal_states[0]:
self.__explored.append(child.state.name) # ADD the last item G even it is not explored.
self.__print_diagnostics(node) # print every other nodes' diagnotics
def breadth_first_search(self, init_state, goal_states):
# Computes the breadth-first search result from the initial state to the
# goal state. When a goal node is drawn from the frontier list it is checked
# if it is a goal node. If node.state is not a goal state it is explored
# If it is a goal state it should return the value of self.__solution(goal_node).
# Breadth-first search needs a FIFO queue for the frontier variable.
node = Node(init_state, None, init_state.actions, 0)
self.__frontier = deque()
self.__frontier.append(node)
self.__explored = []
while self.__frontier:
node = self.__frontier.popleft() # pop the node from frontier list
if node.state.name not in self.__explored: # ADD node to explored list
self.__explored.append(node.state.name)
if node.state.name == goal_states[0].name: # if it is G return the solution
return self.__solution(node)
for each in sorted(node.action, key=lambda obj: obj.next_state.name): # for all childs (alphabetical order)
child = Node(each.next_state, node, each.next_state.actions, each.step_cost + node.path_cost) # create the child
if child.state.name not in self.__explored and child not in self.__frontier: # check the child if it is not in explored and frontier list
self.__frontier.append(child)
self.__explored.append(child.state.name)
self.__print_diagnostics(node) # print every other nodes' diagnotics
def __solution(self, goal_node):
# Returns a string representation of the solution containing the
# state names starting from the initial state to the given goal node.
# It should also contain information about the path cost although the
# search methods implemented here do not use the cost while finding the goal.
path = []
path.append(goal_node) # first add node G
while goal_node.parent != None: # until there is no parent (which is A in this case) do loop
path.append(goal_node.parent) # add node's parent to path list
goal_node = goal_node.parent # make new node goal_node's parent
return '->'.join([node.state.name for node in reversed(path)]) + " path_cost " + str(path[0].path_cost) # return the string PATH and PATH_COST of G
def __print_diagnostics(self, node):
print('Explored node ({0},{1})'.format(node.state.name, node.path_cost)) # Explored node's name and path_cost
print(' Frontier: {}'.format([(i.state.name, i.path_cost) for i in self.__frontier])) # Frontier list
@staticmethod
def connect_states(source_state, dest_state, step_cost):
source_initial = source_state.name[0].lower()
dest_initial = dest_state.name[0].lower()
action_name = source_initial + dest_initial
source_state.add_action(Action(action_name, dest_state, step_cost))
@staticmethod
def connect_states_both_ways(state0, state1, step_cost):
Problem.connect_states(state0, state1, step_cost)
Problem.connect_states(state1, state0, step_cost)
if __name__ == '__main__':
a = State('A')
b = State('B')
c = State('C')
d = State('D')
e = State('E')
f = State('F')
g = State('G')
Problem.connect_states_both_ways(a, b, 10)
Problem.connect_states_both_ways(a, d, 15)
Problem.connect_states_both_ways(b, c, 120)
Problem.connect_states_both_ways(c, e, 70)
Problem.connect_states_both_ways(c, g, 10)
Problem.connect_states_both_ways(d, e, 40)
Problem.connect_states_both_ways(e, f, 140)
Problem.connect_states_both_ways(f, g, 20)
problem = Problem('p1')
print('*** Depth-First Search algorithm from a to g ***')
print(problem.depth_first_search(a, [g]))
print('*** Breadth-First Search algorithm from a to g ***')
print(problem.breadth_first_search(a, [g]))
print(' -------- A new link between a and f is added ----------')
# Add a link between a and f with cost 50, then perform depth-first search
# again for 'a' node to 'g' node.
Problem.connect_states_both_ways(a, f, 50)
print(problem.depth_first_search(a, [g]))
| 43.42515 | 156 | 0.644926 | 1,023 | 7,252 | 4.364614 | 0.162268 | 0.030235 | 0.049272 | 0.047032 | 0.516013 | 0.475028 | 0.399104 | 0.354759 | 0.340873 | 0.280851 | 0 | 0.00924 | 0.268753 | 7,252 | 166 | 157 | 43.686747 | 0.832736 | 0.292609 | 0 | 0.283019 | 0 | 0 | 0.044928 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.103774 | false | 0 | 0.018868 | 0 | 0.198113 | 0.113208 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6c9842a0e63f1e0fc07d93d96a3cc727ac82586d | 15,495 | py | Python | hyperformer/finetune_mt5_trainer.py | acsets/hyperformer_for_mmt | 883a825f77b76a4bff292660392e8e37755c5ed6 | [
"Apache-2.0"
] | null | null | null | hyperformer/finetune_mt5_trainer.py | acsets/hyperformer_for_mmt | 883a825f77b76a4bff292660392e8e37755c5ed6 | [
"Apache-2.0"
] | null | null | null | hyperformer/finetune_mt5_trainer.py | acsets/hyperformer_for_mmt | 883a825f77b76a4bff292660392e8e37755c5ed6 | [
"Apache-2.0"
] | null | null | null | import sys
import torch
import datasets
import json
import logging
import os
from pathlib import Path
from transformers import AutoTokenizer, HfArgumentParser, set_seed
from transformers.trainer_utils import EvaluationStrategy
from hyperformer.third_party.models import T5Config, T5ForConditionalGeneration
from hyperformer.third_party.trainers import T5Trainer
from hyperformer.adapters import AdapterController, AutoAdapterConfig
from hyperformer.data import AutoTask
from hyperformer.third_party.utils import TaskCollator, check_output_dir
from hyperformer.metrics import build_compute_metrics_fn
from hyperformer.training_args import Seq2SeqTrainingArguments, ModelArguments, DataTrainingArguments, \
AdapterTrainingArguments
from hyperformer.utils import freezing_params, get_last_checkpoint_path, create_dir,\
handle_metrics, get_training_args
logger = logging.getLogger(__name__)
print('checkpoint: packages loaded')
def remove_rank_info_from_argv(args):
extra_parameters = {}
if args[1].startswith("--local_rank"):
extra_parameters.update({'local_rank': int(args[1].split('=')[-1])})
del args[1]
return extra_parameters
def main():
# See all possible arguments in src/transformers/training_args.py or by passing
# the --help flag to this script. We now keep distinct sets of args, for a cleaner
# separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments, AdapterTrainingArguments))
print('checkpoint: parser loaded')
# For running on multiple gpus with torch.distributed.launch, it adds a local_rank paramter, to allow the parser
# still use the config file, we add the local_rank to the config file.
if len(sys.argv) > 2 and sys.argv[1].startswith("--local_rank") and (sys.argv[2].endswith(".json")):
rank_info = remove_rank_info_from_argv(sys.argv)
args_dict = json.loads(Path(sys.argv[1]).read_text())
args_dict.update(rank_info)
model_args, data_args, training_args, adapter_args = parser.parse_dict(args_dict)
elif len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
logger.warning("config path: %s", sys.argv[1])
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args, adapter_args = parser.parse_json_file(
json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args, adapter_args = parser.parse_args_into_dataclasses()
check_output_dir(training_args)
print(f'model_args: {model_args}')
print(f'data_args: {data_args}')
print(f'training_args: {training_args}')
print(f'adapter_args: {adapter_args}')
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
training_args.local_rank,
training_args.device,
training_args.n_gpu,
bool(training_args.local_rank != -1),
training_args.fp16,
)
logger.info("Training/evaluation parameters %s", training_args)
# Set seed
set_seed(training_args.seed)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = T5Config.from_pretrained(
model_args.config_name if model_args.config_name else \
model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
)
extra_model_params = ("encoder_layerdrop", "decoder_layerdrop", "dropout",
"attention_dropout", "train_adapters")
for p in extra_model_params:
if getattr(training_args, p, None):
assert hasattr(config, p), f"({config.__class__.__name__}) doesn't have a `{p}` attribute"
setattr(config, p, getattr(training_args, p))
# Gets the adapter config and updates the specified parameters.
if training_args.train_adapters:
adapter_config = AutoAdapterConfig.get(adapter_args.adapter_config_name)
adapter_config.input_dim = config.d_model
adapter_config.tasks = data_args.tasks
adapter_config.task_to_adapter = {task:adapter for task, adapter in zip(data_args.tasks, data_args.adapters)} if data_args.adapters is not None else None
# If this is a parametric task embedding this mapping makes sense, but in case we use any task embeddings,
# then, we do not need any mapping as we use the pretrained task embeddings.
adapter_config.task_to_embeddings = {task:embedding for task, embedding in zip(data_args.tasks, data_args.task_embeddings)}\
if (data_args.task_embeddings is not None) else None
extra_adapter_params = ("task_embedding_dim",
"add_layer_norm_before_adapter",
"add_layer_norm_after_adapter",
"reduction_factor",
"hidden_dim",
"non_linearity",
"train_task_embeddings",
"projected_task_embedding_dim",
"task_hidden_dim",
"conditional_layer_norm",
"train_adapters_blocks",
"unique_hyper_net",
"unique_hyper_net_layer_norm",
"efficient_unique_hyper_net")
for p in extra_adapter_params:
if hasattr(adapter_args, p) and hasattr(adapter_config, p):
setattr(adapter_config, p, getattr(adapter_args, p))
else:
logger.warning(f"({adapter_config.__class__.__name__}) doesn't have a `{p}` attribute")
adapter_config.device = training_args.device
else:
adapter_config = None
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else \
model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
)
if model_args.not_load_t5_checkpoint:
model = T5ForConditionalGeneration(config=config, adapter_config=adapter_config)
else:
last_checkpoint_path = training_args.output_dir
model_path = model_args.model_name_or_path if ((training_args.optimize_from_scratch and not training_args.optimize_from_scratch_with_loading_model) or not os.path.exists(os.path.join(last_checkpoint_path, 'pytorch_model.bin')))\
else last_checkpoint_path
logger.warning("model path loaded from : %s", model_path)
model = T5ForConditionalGeneration.from_pretrained(
model_path,
from_tf=".ckpt" in model_args.model_name_or_path,
config=config,
cache_dir=model_args.cache_dir,
adapter_config=adapter_config
)
# set num_beams for evaluation
if data_args.eval_beams is None:
data_args.eval_beams = model.config.num_beams
# freezing the parameters.
if training_args.do_train:
freezing_params(model, training_args, model_args, adapter_args)
if training_args.print_num_parameters:
logger.info(model)
for name, param in model.named_parameters():
if param.requires_grad:
logger.info("Parameter name %s", name)
total_trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
total_params = sum(p.numel() for p in model.parameters())
logger.info("Total trainable parameters %s", total_trainable_params)
logger.info("Total parameters %s", total_params)
# Gets the training/test/validation datasets.
dataset_class = AutoTask
if training_args.do_train:
print('trying to load train data')
train_datasets = [dataset_class.get(task, seed=data_args.data_seed).get_dataset(
split="train", n_obs=data_args.n_train, add_prefix=False if training_args.train_adapters else True)
for task in data_args.tasks]
print('train_datasets loaded')
dataset_sizes = [len(train_dataset) for train_dataset in train_datasets]
print(f'dataset_sizes: {dataset_sizes}')
train_dataset = datasets.concatenate_datasets(train_datasets)
print('train_dataset loaded')
training_args.remove_unused_columns = False
print(f'train_datasets: {train_datasets}')
print(f'train_dataset: {train_dataset}')
print('train data loaded')
print('trying to load dev data')
eval_datasets = ({task: dataset_class.get(task, seed=data_args.data_seed).get_dataset(
split="validation", n_obs=data_args.n_val,
add_prefix=False if training_args.train_adapters else True,
split_validation_test=training_args.split_validation_test)
for task in data_args.eval_tasks}
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None)
print('dev data loaded')
print('trying to load test data')
test_dataset = (
{task: dataset_class.get(task, seed=data_args.data_seed).get_dataset(
split="test", n_obs=data_args.n_test,
add_prefix=False if training_args.train_adapters else True,
split_validation_test=training_args.split_validation_test)
for task in data_args.eval_tasks} if training_args.do_test else None
)
print('test data loaded')
print('checkpoint: dataset loaded')
# Defines the metrics for evaluation.
compute_metrics_fn = (
build_compute_metrics_fn(data_args.eval_tasks, tokenizer) if training_args.predict_with_generate else None
)
# Defines the trainer.
trainer = T5Trainer(
model=model,
config=config,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_datasets,
data_collator=TaskCollator(tokenizer, data_args, tpu_num_cores=training_args.tpu_num_cores),
compute_metrics=None,
multi_task_compute_metrics=compute_metrics_fn,
data_args=data_args,
dataset_sizes=dataset_sizes if training_args.do_train else None,
adapter_config=adapter_config
)
if trainer.is_world_process_zero():
arguments = get_training_args([model_args, data_args, training_args, adapter_args])
handle_metrics("arguments", arguments, training_args.output_dir)
# Trains the model.
if training_args.do_train:
if trainer.is_world_process_zero():
last_checkpoint_path = training_args.output_dir
model_path = model_args.model_name_or_path if (training_args.optimize_from_scratch or not os.path.exists(os.path.join(last_checkpoint_path, 'pytorch_model.bin')))\
else last_checkpoint_path
if training_args.compute_time:
torch.cuda.synchronize() # wait for move to complete
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
trainer.train(
#get_last_checkpoint_path(training_args.output_dir) \
model_path=model_path \
if (os.path.exists(training_args.output_dir) and not training_args.optimize_from_scratch) else None,
)
if training_args.compute_time:
torch.cuda.synchronize() # wait for all_reduce to complete
end.record()
total_time = {"total_time": start.elapsed_time(end)}
print("###### total_time ", total_time)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
trainer.state.save_to_json(os.path.join(training_args.output_dir, "trainer_state.json"))
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
all_metrics = {}
if training_args.do_eval or training_args.do_test:
if trainer.is_world_process_zero():
# By default we load the model from last checkpoint path,
# in case of saving the model with the best metrics, make sure to
# set save_total = 1 so the best model is loaded here.
# if not exists returns the path to the output_dir.
last_checkpoint_path = get_last_checkpoint_path(training_args.output_dir)
config = T5Config.from_pretrained(
last_checkpoint_path,
cache_dir=model_args.cache_dir)
model = T5ForConditionalGeneration.from_pretrained(
last_checkpoint_path,
from_tf=".ckpt" in training_args.output_dir,
config=config,
cache_dir=model_args.cache_dir,
adapter_config=adapter_config
)
# NOTE: if trainer is not re-defined, there is a bug in the codes, that making
# huggingface codes does not using the best checkpoint.
trainer = T5Trainer(
model=model,
config=config,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_datasets,
data_collator=TaskCollator(tokenizer, data_args, tpu_num_cores=training_args.tpu_num_cores),
compute_metrics=None,
multi_task_compute_metrics=compute_metrics_fn,
data_args=data_args,
dataset_sizes=dataset_sizes if training_args.do_train else None,
adapter_config=adapter_config
)
if training_args.train_adapters:
if adapter_args.adapter_config_name == "adapter" and data_args.adapters is not None:
for name, sub_module in model.named_modules():
task_to_adapter = {eval_task: adapter for eval_task, adapter in
zip(data_args.eval_tasks, data_args.adapters)}
if isinstance(sub_module, AdapterController):
sub_module.set_task_to_adapter_map(task_to_adapter)
if training_args.do_eval:
metrics = trainer.evaluate()
if trainer.is_world_process_zero():
handle_metrics("val", metrics, training_args.output_dir)
all_metrics.update(metrics)
if training_args.do_test:
metrics = trainer.evaluate(test_dataset)
if trainer.is_world_process_zero():
handle_metrics("test", metrics, training_args.output_dir)
all_metrics.update(metrics)
if torch.cuda.is_available() and training_args.compute_memory:
peak_memory = torch.cuda.max_memory_allocated()/1024**2
print(
"Memory utilization",
peak_memory,
"MB"
)
memory_usage = {"peak_memory": peak_memory}
return all_metrics
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 47.824074 | 236 | 0.671765 | 1,947 | 15,495 | 5.045198 | 0.177196 | 0.084292 | 0.034205 | 0.019546 | 0.373511 | 0.32658 | 0.285962 | 0.270182 | 0.243612 | 0.228952 | 0 | 0.003265 | 0.248854 | 15,495 | 323 | 237 | 47.972136 | 0.840708 | 0.110681 | 0 | 0.266917 | 0 | 0.003759 | 0.104708 | 0.019501 | 0 | 0 | 0 | 0 | 0.003759 | 1 | 0.011278 | false | 0 | 0.06391 | 0 | 0.082707 | 0.078947 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6c9ecfa28a220e7ab85d4b37494ab5624209e1ee | 330 | py | Python | data-set-without-time-with-negative_2.py | baruaranojoy/Mini-Project-3rd-Sem | 8ae699f1e52764b744179d84d14ffde95d70eea7 | [
"MIT"
] | null | null | null | data-set-without-time-with-negative_2.py | baruaranojoy/Mini-Project-3rd-Sem | 8ae699f1e52764b744179d84d14ffde95d70eea7 | [
"MIT"
] | null | null | null | data-set-without-time-with-negative_2.py | baruaranojoy/Mini-Project-3rd-Sem | 8ae699f1e52764b744179d84d14ffde95d70eea7 | [
"MIT"
] | null | null | null | fobj = open("mini_project_data_set_with_time_only.txt")
f = open("mini_project_dynamic_graph_data_set_without_time.txt", "w")
for line in fobj:
a,b,c = map(int,line.rstrip().split())
e = [b,c]
d1 = e[0]
d2 = e[1]
final = str(d1) + ' ' + str(d2) + '\n'
f.write(final)
#print e
fobj.close()
| 25.384615 | 70 | 0.587879 | 56 | 330 | 3.232143 | 0.642857 | 0.088398 | 0.165746 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02381 | 0.236364 | 330 | 12 | 71 | 27.5 | 0.694444 | 0.021212 | 0 | 0 | 0 | 0 | 0.309677 | 0.296774 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6c9f54bb218aacec1eeb0a5b1ef242f9d63757ed | 5,723 | py | Python | bin/make_dataset.py | htwangtw/nkicap | fa270a71d8e527560da791b18263ee8b63c8c25f | [
"MIT"
] | 1 | 2021-05-18T09:08:00.000Z | 2021-05-18T09:08:00.000Z | bin/make_dataset.py | htwangtw/nkicap | fa270a71d8e527560da791b18263ee8b63c8c25f | [
"MIT"
] | 13 | 2021-03-12T16:21:01.000Z | 2021-06-01T16:54:25.000Z | bin/make_dataset.py | htwangtw/nkicap | fa270a71d8e527560da791b18263ee8b63c8c25f | [
"MIT"
] | 1 | 2021-05-17T18:30:59.000Z | 2021-05-17T18:30:59.000Z | """
Get CAP data and MRIQ of the current sample.
Only need to be ran once for tidying things up, but keep it here for book keeping.
"""
import json
import os
import numpy as np
import pandas as pd
from scipy import io
from nkicap import get_project_path, read_tsv
SOURCE_MAT = "sourcedata/CAP_results_organized_toHaoTing.mat"
SOURCE_MRIQ = "sourcedata/ses-BAS1_mriq.csv"
PARTICIPANTS = "enhanced_nki/participants.tsv"
MRIQ = "enhanced_nki/mriq.tsv"
LABEL = "enhanced_nki/desg.tsv"
SCHAEFER = "parcellations/Schaefer2018_1000Parcels_7Networks_order.txt"
TIAN = "parcellations/Tian_Subcortex_S4_3T_label.txt"
CAP_OCC = "enhanced_nki/desc-cap_occurence.tsv"
CAP_DUR = "enhanced_nki/desc-cap_duration.tsv"
CAP_GROUP = "enhanced_nki/desc-cap_groupmap.tsv"
CAP_ROI = "enhanced_nki/desg.tsv"
data_dir = get_project_path() / "data"
def roi_labels():
"""Make ROI label refereces."""
shaefer = pd.read_table(data_dir / SCHAEFER, header=None, index_col=0)
tian = pd.read_table(data_dir / TIAN, header=None)
shaefer = shaefer.rename(columns={1: "name"})
tian = tian.rename(columns={0: "name"})
shaefer["index"] = range(1, 1001)
tian["index"] = range(1001, 1055)
lablel = pd.concat(
[shaefer.loc[:, ["index", "name"]], tian.loc[:, ["index", "name"]]],
ignore_index=True,
)
lablel.to_csv(data_dir / LABEL, index=False, sep="\t")
def source2raw():
"""Parse .mat file to txt."""
cap_results = io.loadmat(
data_dir / SOURCE_MAT, squeeze_me=True, simplify_cells=True
)["CAP_results"]
mriq_source = pd.read_csv(data_dir / SOURCE_MRIQ, index_col=0).dropna()
mriq_source["mriq"] = np.ones(mriq_source.shape[0])
# cap summary stats
occ = pd.DataFrame(
cap_results["occurence_rate"],
index=[f"occ_cap_{i+1:02d}" for i in range(8)],
columns=cap_results["subjects"],
).T
occ.index.name = "participant_id"
occ.to_csv(data_dir / CAP_OCC, sep="\t")
dur = pd.DataFrame(
cap_results["duration"],
index=[f"dur_cap_{i+1:02d}" for i in range(8)],
columns=cap_results["subjects"],
).T
dur.index.name = "participant_id"
dur.to_csv(data_dir / CAP_DUR, sep="\t")
# cap map and transition matrix
cap_labels = [f"cap_{i+1:02d}" for i in range(8)]
for transit, capmap, sub in zip(
cap_results["transition"],
cap_results["map_sub"],
cap_results["subjects"],
):
# create subject dir
sub_path = f"enhanced_nki/sub-{sub}/"
if not (data_dir / sub_path).exists():
os.makedirs(str(data_dir / sub_path))
transit = pd.DataFrame(transit, index=cap_labels, columns=cap_labels)
capmap = pd.DataFrame(capmap, columns=cap_labels, index=range(1, 1055))
transit.to_csv(
data_dir / f"enhanced_nki/sub-{sub}/sub-{sub}_desc-transition.tsv",
sep="\t",
)
capmap.to_csv(
data_dir / f"enhanced_nki/sub-{sub}/sub-{sub}_desc-capmap_bold.tsv",
sep="\t",
)
cap_group = pd.DataFrame(
cap_results["map_group"], columns=cap_labels, index=range(1, 1055)
)
cap_group.to_csv(data_dir / CAP_GROUP, sep="\t")
# create participants info file
participants = pd.DataFrame(
[cap_results["age"], cap_results["sex"]],
columns=cap_results["subjects"],
index=["age", "sex"],
).T
participants.index.name = "participant_id"
participants = pd.concat([participants, mriq_source["mriq"]], axis=1).dropna(
thresh=2
)
participants["mriq"] = participants["mriq"].fillna(0)
participants.to_csv(data_dir / PARTICIPANTS, sep="\t")
# save mriq
mriq = mriq_source.loc[participants[participants["mriq"] == 1].index, :]
mriq = mriq.drop(columns=["mriq"])
mriq = mriq.rename(columns={"rmiq_26": "mriq_26"})
mriq = mriq.loc[:, [f"mriq_{i+1:02d}" for i in range(31)]]
mriq.to_csv(data_dir / MRIQ, sep="\t")
def fetch_dataset():
"""
Get CAP data and MRIQ of the current sample.
Return
------
dataset: dict
A ditionary that contains path to CAP maps
master: pd.DataFrame
all individual differences data in one place,
including CAP derivatives, mriq and basic demographics.
"""
participants = read_tsv(data_dir / PARTICIPANTS, index_col=0).replace(
{"sex": {0: "F", 1: "M"}}
)
mriq = read_tsv(data_dir / MRIQ, index_col=0).replace({"MD": np.nan}).dropna()
occ = read_tsv(data_dir / CAP_OCC, index_col=0)
dur = read_tsv(data_dir / CAP_DUR, index_col=0)
roi = read_tsv(data_dir / CAP_ROI, index_col=0)
master = pd.concat([participants, mriq, occ, dur], axis=1, join="inner")
dataset = {
"group": f"data/{CAP_GROUP}",
"subject": {},
"roi": roi.values.squeeze().tolist(),
}
for subject in master.index.tolist():
sub_cap = f"data/enhanced_nki/sub-{subject}/sub-{subject}_desc-capmap_bold.tsv"
dataset["subject"][subject] = sub_cap
return dataset, master
if __name__ == "__main__":
source2raw()
roi_labels()
dataset, master = fetch_dataset()
master.to_csv(get_project_path() / "data" / "enhanced_nki.tsv", sep="\t")
with open(get_project_path() / "data" / "cap.json", "w") as fp:
json.dump(dataset, fp, indent=2)
def test_fetch_dataset():
"""
This test is only suitable to run locally with the full data dir
"""
dataset, master = fetch_dataset()
assert len(dataset["subject"]) == 711
assert len(dataset["roi"]) == 1054
assert dataset["group"] == "data/enhanced_nki/desc-cap_groupmap.tsv"
assert master.shape[0] == 711
assert type(dataset["roi"]) == list
| 33.863905 | 87 | 0.643544 | 803 | 5,723 | 4.383562 | 0.246575 | 0.041761 | 0.025568 | 0.027273 | 0.155682 | 0.113068 | 0.096591 | 0.074432 | 0.074432 | 0.048864 | 0 | 0.019201 | 0.208282 | 5,723 | 168 | 88 | 34.065476 | 0.757669 | 0.10519 | 0 | 0.082645 | 0 | 0 | 0.201349 | 0.119818 | 0 | 0 | 0 | 0 | 0.041322 | 1 | 0.033058 | false | 0 | 0.049587 | 0 | 0.090909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6c9fe2985c452a0c234c3052c6dde474ab221968 | 4,770 | py | Python | apps/combine-service/src/handlers/combine/validate.py | biosimulations/Biosimulations | 3b55968d810110459a126deabe6d36d8d2a6e1ae | [
"MIT"
] | 7 | 2020-09-30T16:57:55.000Z | 2021-07-15T12:24:03.000Z | apps/combine-service/src/handlers/combine/validate.py | biosimulations/Biosimulations | 3b55968d810110459a126deabe6d36d8d2a6e1ae | [
"MIT"
] | 1,884 | 2020-08-23T17:40:26.000Z | 2021-09-01T16:29:20.000Z | apps/combine-service/src/handlers/combine/validate.py | biosimulations/Biosimulations | 3b55968d810110459a126deabe6d36d8d2a6e1ae | [
"MIT"
] | 2 | 2020-10-07T14:20:15.000Z | 2021-06-25T04:19:18.000Z | from ...exceptions import BadRequestException
from ...utils import get_temp_dir, make_validation_report
from biosimulators_utils.combine.data_model import CombineArchiveContentFormat
from biosimulators_utils.combine.io import CombineArchiveReader
from biosimulators_utils.combine.validation import validate
from biosimulators_utils.config import Config
from biosimulators_utils.omex_meta.data_model import OmexMetadataInputFormat, OmexMetadataSchema
import os
import requests
import requests.exceptions
def handler(body, file=None):
''' Validate a COMBINE/OMEX archive
Args:
body (:obj:`dict`): dictionary in schema ``ValidateCombineArchiveFileOrUrl`` with keys
* ``url`` whose value has schema ``Url`` with the URL for a COMBINE/OMEX archive
* ``omexMetadataFormat`` (:obj:`str`): format of the OMEX Metadata files
* ``omexMetadataSchema`` (:obj:`str`): schema for validating the OMEX Metadata files
* ``validateOmexManifest`` (:obj:`bool`, optional): Whether to validate the OMEX manifest file in the archive
* ``validateSedml`` (:obj:`bool`, optional): Whether to validate the SED-ML files in the archive
* ``validateSedmlModels`` (:obj:`bool`, optional): Whether to validate the sources of the models in the SED-ML files in the archive
* ``validateOmexMetadata`` (:obj:`bool`, optional): Whether to validate the OMEX metdata files in the archive according to
`BioSimulators' conventions <https://biosimulators.org/conventions/metadata>`_
* ``validateImages`` (:obj:`bool`, optional): Whether to validate the images (BMP, GIF, JPEG, PNG, TIFF WEBP) files in the archive
file (:obj:`werkzeug.datastructures.FileStorage`): COMBINE/OMEX archive file
Returns:
``ValidationReport``: information about the validity or
lack thereof of a COMBINE/OMEX archive
'''
try:
omexMetadataInputFormat = OmexMetadataInputFormat(body['omexMetadataFormat'])
except ValueError as exception:
raise BadRequestException(title='`omexMetadataFormat` must be a recognized format.', exception=exception)
try:
omexMetadataSchema = OmexMetadataSchema(body['omexMetadataSchema'])
except ValueError as exception:
raise BadRequestException(title='`omexMetadataSchema` must be a recognized schema.', exception=exception)
config = Config(
OMEX_METADATA_INPUT_FORMAT=omexMetadataInputFormat,
OMEX_METADATA_SCHEMA=omexMetadataSchema,
VALIDATE_OMEX_MANIFESTS=body.get('validateOmexManifest', True),
VALIDATE_SEDML=body.get('validateSedml', True),
VALIDATE_SEDML_MODELS=body.get('validateSedmlModels', True),
VALIDATE_OMEX_METADATA=body.get('validateOmexMetadata', True),
VALIDATE_IMAGES=body.get('validateImages', True),
)
archive_file = file
archive_url = body.get('url', None)
if archive_url and archive_file:
raise BadRequestException(
title='Only one of `file` or `url` can be used at a time.',
instance=ValueError(),
)
if not archive_url and not archive_file:
raise BadRequestException(
title='One of `file` or `url` must be used.',
instance=ValueError(),
)
# create temporary working directory
temp_dirname = get_temp_dir()
archive_filename = os.path.join(temp_dirname, 'archive.omex')
# get COMBINE/OMEX archive
if archive_file:
archive_file.save(archive_filename)
else:
try:
response = requests.get(archive_url)
response.raise_for_status()
except requests.exceptions.RequestException as exception:
title = 'COMBINE/OMEX archive could not be loaded from `{}`'.format(
archive_url)
raise BadRequestException(
title=title,
instance=exception,
)
# save archive to local temporary file
with open(archive_filename, 'wb') as file:
file.write(response.content)
# read archive
archive_dirname = os.path.join(temp_dirname, 'archive')
reader = CombineArchiveReader()
errors = []
warnings = []
try:
archive = reader.run(archive_filename, archive_dirname, config=config)
except Exception as exception:
errors = [['The file could not be parsed as a COMBINE/OMEX archive.', [[str(exception)]]]]
if not errors:
errors, warnings = validate(
archive, archive_dirname,
formats_to_validate=list(CombineArchiveContentFormat.__members__.values()),
config=config,
)
return make_validation_report(errors, warnings, filenames=[archive_filename])
| 43.363636 | 143 | 0.685744 | 512 | 4,770 | 6.273438 | 0.289063 | 0.023973 | 0.039228 | 0.034247 | 0.157534 | 0.12391 | 0.106476 | 0.024284 | 0 | 0 | 0 | 0 | 0.224528 | 4,770 | 109 | 144 | 43.761468 | 0.868343 | 0.299161 | 0 | 0.152778 | 0 | 0 | 0.133109 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.013889 | false | 0 | 0.138889 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6ca1d97f0fbfd4bb8f6635c24a81cb79a9e200fe | 2,433 | py | Python | chazhi/newton.py | Tigeraus/MA | 45ff4e09e7dbf0b556922a13f4e42e928f996b80 | [
"MIT"
] | null | null | null | chazhi/newton.py | Tigeraus/MA | 45ff4e09e7dbf0b556922a13f4e42e928f996b80 | [
"MIT"
] | null | null | null | chazhi/newton.py | Tigeraus/MA | 45ff4e09e7dbf0b556922a13f4e42e928f996b80 | [
"MIT"
] | null | null | null | # coding: utf-8
import matplotlib.pyplot as plt
"""
@brief: 计算n阶差商 f[x0, x1, x2 ... xn]
@param: xi 所有插值节点的横坐标集合 o
@param: fi 所有插值节点的纵坐标集合 / \
@return: 返回xi的i阶差商(i为xi长度减1) o o
@notice: a. 必须确保xi与fi长度相等 / \ / \
b. 由于用到了递归,所以留意不要爆栈了. o o o o
c. 递归减递归(每层递归包含两个递归函数), 每层递归次数呈二次幂增长,总次数是一个满二叉树的所有节点数量(所以极易栈溢出)
"""
def get_order_diff_quot(xi = [], fi = []):
if len(xi) > 2 and len(fi) > 2:
return (get_order_diff_quot(xi[:len(xi) - 1], fi[:len(fi) - 1]) - get_order_diff_quot(xi[1:len(xi)], fi[1:len(fi)])) / float(xi[0] - xi[-1])
return (fi[0] - fi[1]) / float(xi[0] - xi[1])
"""
@brief: 获得Wi(x)函数;
Wi的含义举例 W1 = (x - x0); W2 = (x - x0)(x - x1); W3 = (x - x0)(x - x1)(x - x2)
@param: i i阶(i次多项式)
@param: xi 所有插值节点的横坐标集合
@return: 返回Wi(x)函数
"""
def get_Wi(i = 0, xi = []):
def Wi(x):
result = 1.0
for each in range(i):
result *= (x - xi[each])
return result
return Wi
"""
@brief: 获得牛顿插值函数
@
"""
def get_Newton_inter(xi = [], fi = []):
def Newton_inter(x):
result = fi[0]
for i in range(2, len(xi)):
result += (get_order_diff_quot(xi[:i], fi[:i]) * get_Wi(i-1, xi)(x))
return result
return Newton_inter
def get_xishu(xlist,ylist):
xishulist=[]
for i in range(1,len(xlist)):
xishulist.append(float(ylist[i+1]-ylist[i])/(xlist[i+1]-xlist[i]))
return xishulist
"""
demo:
"""
if __name__ == '__main__':
''' 插值节点, 这里用二次函数生成插值节点,每两个节点x轴距离位10 '''
sr_x = [i for i in range(-50, 51, 10)]
sr_fx = [i**2 for i in sr_x]
Nx = get_Newton_inter(sr_x, sr_fx) # 获得插值函数
tmp_x = [i for i in range(-50, 51)] # 测试用例
tmp_y = [Nx(i) for i in tmp_x] # 根据插值函数获得测试用例的纵坐标
''' 画图 '''
plt.figure("I love china")
ax1 = plt.subplot(111)
plt.sca(ax1)
plt.plot(sr_x, sr_fx, linestyle = '', marker='o', color='b')
plt.plot(tmp_x, tmp_y, linestyle = '--', color='r')
plt.show() | 27.965517 | 157 | 0.452117 | 314 | 2,433 | 3.372611 | 0.33121 | 0.022663 | 0.033994 | 0.060434 | 0.120869 | 0.032106 | 0.032106 | 0.032106 | 0 | 0 | 0 | 0.036005 | 0.394986 | 2,433 | 87 | 158 | 27.965517 | 0.683424 | 0.017263 | 0 | 0.055556 | 0 | 0 | 0.016938 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.027778 | 0 | 0.388889 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6ca2c09182cc5e54b66ed55e21933f11d1311b83 | 687 | py | Python | editpe.py | CyberSecLabBS/AdversarialMLMalwareSI2021 | a2d9d4e6049bcf8f11d412a85c40fc68a33d8643 | [
"CC0-1.0",
"MIT"
] | null | null | null | editpe.py | CyberSecLabBS/AdversarialMLMalwareSI2021 | a2d9d4e6049bcf8f11d412a85c40fc68a33d8643 | [
"CC0-1.0",
"MIT"
] | null | null | null | editpe.py | CyberSecLabBS/AdversarialMLMalwareSI2021 | a2d9d4e6049bcf8f11d412a85c40fc68a33d8643 | [
"CC0-1.0",
"MIT"
] | null | null | null | import lief
path_benign_exe = 'good.exe'
path_strings_exe = 'good.txt'
path_bad_exe = 'jigsaw_compress.exe'
path_angel_exe = 'angel.exe'
with open(path_bad_exe, 'rb') as f:
bytez = f.read()
# AGGIUNGI BINARIO
with open(path_benign_exe, 'rb') as f:
good_binary = f.read()
bytez += good_binary
#AGGIUNGI STRINGHE
with open(path_strings_exe, 'r') as f:
good_strings = f.read()
#for i in range(10): #ggiunge le stringe 10 volte
bytez += bytes(good_strings, encoding='ascii')
#MODIFICA TIMESTAMP
binary = lief.PE.parse(list(bytez))
binary.header.time_date_stamps
builder = lief.PE.Builder(binary)
builder.build_imports(False)
builder.build()
builder.write(path_angel_exe)
| 22.16129 | 49 | 0.740902 | 110 | 687 | 4.409091 | 0.454545 | 0.049485 | 0.074227 | 0.03299 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006711 | 0.13246 | 687 | 30 | 50 | 22.9 | 0.807047 | 0.142649 | 0 | 0 | 0 | 0 | 0.092624 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.105263 | 0 | 0.105263 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6ca3529ff35cdf5a1181dde039a07d3a4c603eca | 2,657 | py | Python | test.py | shaun95/variational-diffwave | b3edc2f1c3dc13fb72c068fb1dd0b24a2b9b423a | [
"Apache-2.0"
] | 1 | 2022-01-29T15:22:41.000Z | 2022-01-29T15:22:41.000Z | test.py | shaun95/variational-diffwave | b3edc2f1c3dc13fb72c068fb1dd0b24a2b9b423a | [
"Apache-2.0"
] | null | null | null | test.py | shaun95/variational-diffwave | b3edc2f1c3dc13fb72c068fb1dd0b24a2b9b423a | [
"Apache-2.0"
] | null | null | null | import argparse
import json
import torch
import torchaudio
import models as module_arch
from utils.utils import get_instance
from inference import *
def main(config, ckpt, infile, outfile, T, amp, deterministic):
device = torch.device('cuda')
trainer_config = config['trainer']
ckpt_dict = torch.load(ckpt, map_location=device)
n_fft = trainer_config['n_fft']
hop_length = trainer_config['hop_length']
n_mels = trainer_config['n_mels']
sr = trainer_config['sr']
train_T = trainer_config['train_T']
model = get_instance(module_arch, config['arch']).to(device)
mel_spec = module_arch.MelSpec(sr, n_fft, hop_length=hop_length,
f_min=20, f_max=8000, n_mels=n_mels).to(device)
model.load_state_dict(ckpt_dict['ema_model'])
if 'noise_scheduler' in ckpt_dict:
noise_scheduler = module_arch.NoiseScheduler().to(device)
noise_scheduler.load_state_dict(
ckpt_dict['noise_scheduler'], strict=False)
noise_scheduler.eval()
else:
max_log_snr = trainer_config['max_log_snr']
min_log_snr = trainer_config['min_log_snr']
noise_scheduler = module_arch.CosineScheduler(
gamma0=-max_log_snr, gamma1=-min_log_snr).to(device)
model.eval()
y, sr = torchaudio.load(infile)
y = y.mean(0, keepdim=True).to(device)
mels = mel_spec(y)
z_1 = torch.randn_like(y)
if train_T:
steps = torch.linspace(0, train_T, T + 1,
device=device).round().long()
gamma, steps = noise_scheduler(steps / train_T)
else:
steps = torch.linspace(0, 1, T + 1, device=device)
gamma, steps = noise_scheduler(steps)
with torch.no_grad():
if deterministic:
z_0 = reverse_process_ddim(z_1, mels, gamma, steps, model, with_amp=amp)
else:
z_0 = reverse_process_new(z_1, mels, gamma, steps, model, with_amp=amp)
x = z_0.squeeze().clip(-0.99, 0.99)
torchaudio.save(outfile, x.unsqueeze(0).cpu(), sr)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Inferencer')
parser.add_argument('config', type=str, help='config file')
parser.add_argument('ckpt', type=str)
parser.add_argument('infile', type=str)
parser.add_argument('outfile', type=str)
parser.add_argument('-T', type=int, default=20)
parser.add_argument('--amp', action='store_true')
parser.add_argument('--ddim', action='store_true')
args = parser.parse_args()
config = json.load(open(args.config))
main(config, args.ckpt, args.infile, args.outfile, args.T, args.amp, args.ddim)
| 35.426667 | 84 | 0.663154 | 370 | 2,657 | 4.5 | 0.294595 | 0.062462 | 0.071471 | 0.028829 | 0.140541 | 0.037237 | 0.037237 | 0.037237 | 0.037237 | 0 | 0 | 0.01381 | 0.209635 | 2,657 | 74 | 85 | 35.905405 | 0.779048 | 0 | 0 | 0.04918 | 0 | 0 | 0.071886 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.016393 | false | 0 | 0.114754 | 0 | 0.131148 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6ca361412d77a53e9f88dd91d91611b188b04118 | 5,471 | py | Python | app.py | darshil-IBI/ibi-jeopardy | dbaaf5ec3139d734a99681c95af6e14210aa98ba | [
"Apache-2.0"
] | null | null | null | app.py | darshil-IBI/ibi-jeopardy | dbaaf5ec3139d734a99681c95af6e14210aa98ba | [
"Apache-2.0"
] | null | null | null | app.py | darshil-IBI/ibi-jeopardy | dbaaf5ec3139d734a99681c95af6e14210aa98ba | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
from future.standard_library import install_aliases
install_aliases()
from urllib.parse import urlparse, urlencode
from urllib.request import urlopen, Request
from urllib.error import HTTPError
import json
import random
import os
from flask import Flask
from flask import request
from flask import make_response
# Flask app should start in global layout
app = Flask(__name__)
jepData = {}
currentQuestion = {}
@app.route('/webhook', methods=['POST'])
def webhook():
print('in webhook')
req = request.get_json(silent=True, force=True)
print("Request:")
print(json.dumps(req, indent=4))
result = req.get("result")
parameters = result.get("parameters")
speech = ""
if(parameters.get("playJeopardy")):
speech = processHelloRequest(parameters)
elif(parameters.get('suggestCategories')):
speech = processSuggestionRequest(parameters)
elif(parameters.get('answer')):
speech = processAnswerRequest(parameters)
else:
speech = processQuestionRequest(parameters)
res = makeWebhookResult(speech)
res = json.dumps(res, indent=4)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def makeWebhookResult(speech):
print("in makeWebHookResult")
print(speech)
if (speech == ''):
return {
"speech": "Hi. I'm the Jeopardy bot. Want to play?"
}
return {
"speech": speech,
"displayText": speech,
# "data": data,
# "contextOut": [],
"source": "apiai-weather-webhook-sample"
}
def processQuestionRequest(parameters):
print('processQuestionRequest')
global jepData
global currentQuestion
question_query = makeQuery(parameters)
currentQuestion = selectQuestion(question_query, jepData)
print (currentQuestion)
if(currentQuestion is not None):
return "Category: " + currentQuestion['category'] + " Round: " + currentQuestion['round'] + (" Value: " + currentQuestion['value'] if (currentQuestion['round'] != 'Final Jeopardy!') else '') + "\nAnswer: " + currentQuestion['question']
return "Couldn't find anything with those requirements."
def processHelloRequest(parameters):
print ('in processHelloRequest')
global currentQuestion
currentQuestion = {}
return "Ready when you are!"
def processSuggestionRequest(parameters):
print('in processSuggestionRequest')
global jepData
suggested = []
for question in jepData:
suggested.append(question['category'])
return ", ".join(random.sample(set(suggested), 5))
def processAnswerRequest(parameters):
print('in processAnswerRequest')
global currentQuestion
answer = parameters.get("answer")
if(answer.endswith("?")):
answer = answer[:-1]
if(currentQuestion is None):
return "The is no question to answer!"
#elif(answer.upper() in currentQuestion['answer'].upper()):
# currentQuestion = {}
return "You are Correct!" if (answer.upper() in currentQuestion['answer'].upper()) else "Incorrect answer!"
def makeQuery(parameters):
print('in makeQuery')
jsonFilter = {}
category = parameters.get("category")
value = parameters.get("value")
qround = parameters.get("round")
air_date = parameters.get("air_date")
print("Category:" + category)
print("Value:" + value)
if(category is not None and category != ""):
jsonFilter['category'] = category
if (value is not None and value != ""):
if(not value.startswith("$")):
value = "$" + value
jsonFilter['value'] = { "value": value, "range": "exact" }
if (qround is not None and qround != ""):
jsonFilter['round'] = qround
if (air_date is not None and air_date != ""):
jsonFilter['air_date'] = air_date
print("jsonfilter")
print(jsonFilter)
return jsonFilter
def selectQuestion(jsonFilter, data):
print('in selectQuestion')
if('category' in jsonFilter):
print("in category")
data = [x for x in data if jsonFilter['category'].upper() in x['category'].upper()]
if('value' in jsonFilter):
print("in value")
if(jsonFilter['value']['range'] == 'higher'):
print("in higher")
data = [x for x in data if x['value'] >= jsonFilter['values']['value']]
elif(jsonFilter['value']['range'] == 'lower'):
print("in lower")
data = [x for x in data if x['value'] <= jsonFilter['value']['value']]
else:
print("in exact")
data = [x for x in data if x['value'] == jsonFilter['value']['value']]
if('round' in jsonFilter):
print("in round")
data = [x for x in data if x['round'] == jsonFilter['round']]
if('air_date' in jsonFilter):
print("in question")
data = [x for x in data if jsonFilter['air_date'] in x['air_date'].upper()]
if('show_number' in jsonFilter):
data = [x for x in data if x['show_number'] == jsonFilter['show_number']]
if(data):
return random.choice(data)
return None
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
contents = open("JEOPARDY_QUESTIONS.json")
jepData = json.load(contents)
print('Jeopardy data loaded. Ready to rock!')
print("Starting app on port %d" % port)
app.run(debug=False, port=port, host='0.0.0.0')
| 29.896175 | 243 | 0.634984 | 609 | 5,471 | 5.642036 | 0.247947 | 0.028522 | 0.016298 | 0.018335 | 0.083527 | 0.083527 | 0.060827 | 0.060827 | 0.034633 | 0.034633 | 0 | 0.002838 | 0.227198 | 5,471 | 182 | 244 | 30.06044 | 0.809839 | 0.027966 | 0 | 0.080882 | 0 | 0 | 0.191453 | 0.018261 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.080882 | 0 | 0.227941 | 0.191176 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6ca90bc02d88cb1363f2ce7f4466d8d966e749f6 | 228 | py | Python | flask_reddit/subreddits/constants.py | bgigous/flask-labs-project4 | 46a62fd2b28d66acc2de7e89488f4750de8ab0df | [
"MIT"
] | null | null | null | flask_reddit/subreddits/constants.py | bgigous/flask-labs-project4 | 46a62fd2b28d66acc2de7e89488f4750de8ab0df | [
"MIT"
] | null | null | null | flask_reddit/subreddits/constants.py | bgigous/flask-labs-project4 | 46a62fd2b28d66acc2de7e89488f4750de8ab0df | [
"MIT"
] | null | null | null | # For simplicity, these values are shared among both threads and comments.
MAX_THREADS = 1000
MAX_NAME = 50
MAX_DESCRIPTION = 3000
MAX_ADMINS = 2
# status
DEAD = 0
ALIVE = 1
STATUS = {
DEAD: 'dead',
ALIVE: 'alive',
}
| 14.25 | 74 | 0.684211 | 33 | 228 | 4.606061 | 0.727273 | 0.131579 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.073446 | 0.223684 | 228 | 15 | 75 | 15.2 | 0.785311 | 0.346491 | 0 | 0 | 0 | 0 | 0.062069 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6caa67bed589d7b7cb2df1a1cf71d7085f927d76 | 689 | py | Python | src/web/dates/admin.py | fossabot/SIStema | 1427dda2082688a9482c117d0e24ad380fdc26a6 | [
"MIT"
] | 5 | 2018-03-08T17:22:27.000Z | 2018-03-11T14:20:53.000Z | src/web/dates/admin.py | fossabot/SIStema | 1427dda2082688a9482c117d0e24ad380fdc26a6 | [
"MIT"
] | 263 | 2018-03-08T18:05:12.000Z | 2022-03-11T23:26:20.000Z | src/web/dates/admin.py | fossabot/SIStema | 1427dda2082688a9482c117d0e24ad380fdc26a6 | [
"MIT"
] | 6 | 2018-03-12T19:48:19.000Z | 2022-01-14T04:58:52.000Z | from django.contrib import admin
from dates import models
class GroupKeyDateExceptionInline(admin.StackedInline):
model = models.GroupKeyDateException
extra = 0
autocomplete_fields = ('group',)
class UserKeyDateExceptionInline(admin.StackedInline):
model = models.UserKeyDateException
extra = 0
autocomplete_fields = ('user',)
@admin.register(models.KeyDate)
class KeyDateAdmin(admin.ModelAdmin):
list_display = (
'id',
'school',
'datetime',
'name',
)
list_filter = ('school',)
inlines = (GroupKeyDateExceptionInline, UserKeyDateExceptionInline)
search_fields = ('=id', 'name', 'school__name', 'datetime')
| 23.758621 | 71 | 0.690856 | 60 | 689 | 7.816667 | 0.55 | 0.076759 | 0.098081 | 0.123667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003623 | 0.198839 | 689 | 28 | 72 | 24.607143 | 0.846014 | 0 | 0 | 0.095238 | 0 | 0 | 0.089985 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.095238 | 0 | 0.714286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6cabc970a9b88e0dc2f995b14b61a394cc817f90 | 4,870 | py | Python | baseStation/src/vision/infrastructure/cvGoalFinder.py | olgam4/design3 | 6e05d123a24deae7dda646df535844a158ef5cc0 | [
"WTFPL"
] | null | null | null | baseStation/src/vision/infrastructure/cvGoalFinder.py | olgam4/design3 | 6e05d123a24deae7dda646df535844a158ef5cc0 | [
"WTFPL"
] | null | null | null | baseStation/src/vision/infrastructure/cvGoalFinder.py | olgam4/design3 | 6e05d123a24deae7dda646df535844a158ef5cc0 | [
"WTFPL"
] | null | null | null | import logging
from math import pi
from typing import Tuple, List
import cv2
import numpy as np
from pathfinding.domain.angle import Angle
from vision.domain.iGoalFinder import IGoalFinder
from vision.domain.image import Image
from vision.domain.rectangle import Rectangle
from vision.domain.visionError import VisionError
from vision.infrastructure.cvCamera import CvCamera
from vision.infrastructure.cvImageDisplay import CvImageDisplay
from vision.infrastructure.cvPlayAreaFinder import CvPlayAreaFinder
from vision.infrastructure.cvVisionException import GoalCouldNotBeFound
class CvGoalFinder(IGoalFinder):
def __init__(self) -> None:
self._goal: Rectangle = None
self._orientation: Angle = None
self._play_area_finder = CvPlayAreaFinder()
self._image_display = CvImageDisplay()
def find(self, image: Image) -> Tuple[Rectangle, Angle]:
play_area = self._play_area_finder.find(image)
image.crop(play_area).process(self._process)
self._goal = Rectangle(self._goal.top_left_corner.x + play_area.top_left_corner.x,
self._goal.top_left_corner.y + play_area.top_left_corner.y, self._goal.width,
self._goal.height)
return self._goal, self._orientation
def _process(self, image: np.ndarray) -> None:
grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
self._image_display.display_debug_image('[CvGoalFinder] grey', grey)
canny = cv2.Canny(grey, 100, 200)
self._image_display.display_debug_image('[CvGoalFinder] canny', canny)
contours, _ = cv2.findContours(canny, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = [CvGoalFinder._approximate_contour(c) for c in contours]
contours = [c for c in contours if CvGoalFinder._does_contour_fit_goal(c)]
if len(contours) == 0:
raise GoalCouldNotBeFound
goal_contour = CvGoalFinder._get_brightest_area(grey, contours)
self._goal = Rectangle(*cv2.boundingRect(goal_contour))
image_height, image_width, _ = image.shape
self._compute_orientation(image_width, image_height)
self._image_display.display_debug_contours('[CvGoalFinder] goal_contour', image, contours, [goal_contour])
@staticmethod
def _does_contour_fit_goal(contour: np.ndarray) -> bool:
is_contour_rectangle = len(contour) == 4
rectangle = Rectangle(*cv2.boundingRect(contour))
# goal area is 27cm * 7.5cm, which gives a width/height ratio of 3.6 or 0.27
ratio = rectangle.width_height_ratio
does_ratio_fit = 2.6 < ratio < 4.6 or 2.6 < (1.0 / ratio) < 4.6
# From experimentation, we know that the goal has an area of around 1650 pixels
does_area_fit = 650 < rectangle.area < 2650
return is_contour_rectangle and does_ratio_fit and does_area_fit
@staticmethod
def _approximate_contour(contour: np.ndarray) -> np.ndarray:
epsilon = 0.05 * cv2.arcLength(contour, True)
return cv2.approxPolyDP(contour, epsilon, True)
@staticmethod
def _get_brightest_area(grey: np.ndarray, contours: List[np.ndarray]) -> np.ndarray:
highest_mean_value = -1
brightest_area_contour = None
for contour in contours:
mask = np.zeros(grey.shape, np.uint8)
cv2.drawContours(mask, [contour], 0, 255, -1)
current_mean_value, _, _, _ = cv2.mean(grey, mask=mask)
if current_mean_value > highest_mean_value:
highest_mean_value = current_mean_value
brightest_area_contour = contour
return brightest_area_contour
def _compute_orientation(self, image_width, image_height) -> None:
goal_center = self._goal.get_center()
if self._goal.width_height_ratio > 1.0: # target is horizontal
if goal_center.y > image_height / 2: # target is on bottom
self._orientation = Angle(pi)
else:
self._orientation = Angle(0)
else: # target is vertical
if goal_center.x > image_width / 2: # target is on the right
self._orientation = Angle(pi / 2)
else:
self._orientation = Angle(3 * pi / 2)
def main():
camera = CvCamera(1)
goal_finder = CvGoalFinder()
logging.basicConfig(level=logging.DEBUG)
while True:
image = camera.take_picture()
try:
goal, orientation = goal_finder.find(image)
goal_center = goal.get_center()
print('Goal found. x: {}, y: {}, Area: {}, orientation: {}'.format(goal_center.x, goal_center.y, goal.area,
orientation))
except VisionError as e:
print(e.message)
if __name__ == '__main__':
main()
| 38.96 | 119 | 0.660986 | 591 | 4,870 | 5.192893 | 0.252115 | 0.026067 | 0.032584 | 0.022483 | 0.088954 | 0.029326 | 0.029326 | 0 | 0 | 0 | 0 | 0.019489 | 0.251951 | 4,870 | 124 | 120 | 39.274194 | 0.822948 | 0.048255 | 0 | 0.064516 | 0 | 0 | 0.02701 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086022 | false | 0 | 0.150538 | 0 | 0.290323 | 0.021505 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6cacbe3591ce7a5a39f6923fe754eaa23866efc3 | 1,980 | py | Python | Chapter25/forest_random.py | haohaoxiao/Deep-Reinforcement-Learning-Hands-On-Second-Edition | 1cbdff216fdc5cec02cc0da8664b788941f025c1 | [
"MIT"
] | 621 | 2019-07-27T19:24:56.000Z | 2022-03-31T14:19:52.000Z | Chapter25/forest_random.py | haohaoxiao/Deep-Reinforcement-Learning-Hands-On-Second-Edition | 1cbdff216fdc5cec02cc0da8664b788941f025c1 | [
"MIT"
] | 40 | 2019-09-01T09:45:22.000Z | 2022-03-24T13:13:00.000Z | Chapter25/forest_random.py | haohaoxiao/Deep-Reinforcement-Learning-Hands-On-Second-Edition | 1cbdff216fdc5cec02cc0da8664b788941f025c1 | [
"MIT"
] | 346 | 2019-07-26T15:16:56.000Z | 2022-03-30T15:33:20.000Z | #!/usr/bin/env python3
import os
import sys
sys.path.append(os.path.join(os.getcwd(), "MAgent/python"))
import magent
from magent.builtin.rule_model import RandomActor
MAP_SIZE = 64
if __name__ == "__main__":
env = magent.GridWorld("forest", map_size=MAP_SIZE)
env.set_render_dir("render")
# two groups of animal
deer_handle, tiger_handle = env.get_handles()
# init two models
models = [
RandomActor(env, deer_handle),
RandomActor(env, tiger_handle),
]
env.reset()
env.add_walls(method="random", n=MAP_SIZE * MAP_SIZE * 0.04)
env.add_agents(deer_handle, method="random", n=5)
env.add_agents(tiger_handle, method="random", n=2)
v = env.get_view_space(tiger_handle)
r = env.get_feature_space(tiger_handle)
print("Tiger view: %s, features: %s" % (v, r))
vv = env.get_view_space(deer_handle)
rr = env.get_feature_space(deer_handle)
print("Deer view: %s, features: %s" % (vv, rr))
done = False
step_idx = 0
while not done:
deer_obs = env.get_observation(deer_handle)
tiger_obs = env.get_observation(tiger_handle)
if step_idx == 0:
print("Tiger obs: %s, %s" % (
tiger_obs[0].shape, tiger_obs[1].shape))
print("Deer obs: %s, %s" % (
deer_obs[0].shape, deer_obs[1].shape))
print("%d: HP deers: %s" % (
step_idx, deer_obs[0][:, 1, 1, 2]))
print("%d: HP tigers: %s" % (
step_idx, tiger_obs[0][:, 4, 4, 2]))
deer_act = models[0].infer_action(deer_obs)
tiger_act = models[1].infer_action(tiger_obs)
env.set_action(deer_handle, deer_act)
env.set_action(tiger_handle, tiger_act)
env.render()
done = env.step()
env.clear_dead()
t_reward = env.get_reward(tiger_handle)
d_reward = env.get_reward(deer_handle)
print("Rewards: deer %s, tiger %s" % (d_reward, t_reward))
step_idx += 1
| 31.428571 | 66 | 0.612626 | 288 | 1,980 | 3.947917 | 0.291667 | 0.047493 | 0.034301 | 0.024626 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016801 | 0.248485 | 1,980 | 62 | 67 | 31.935484 | 0.747312 | 0.029293 | 0 | 0 | 0 | 0 | 0.1037 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.081633 | 0 | 0.081633 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6cad0222fc45b37be1b4bf924700e5712a233db1 | 809 | py | Python | sorting algorithms/quickSort.py | luizgfalqueto/algoritmosPython | a5f3d4728dbcce61c06e978f0aed7540d84eca54 | [
"MIT"
] | null | null | null | sorting algorithms/quickSort.py | luizgfalqueto/algoritmosPython | a5f3d4728dbcce61c06e978f0aed7540d84eca54 | [
"MIT"
] | null | null | null | sorting algorithms/quickSort.py | luizgfalqueto/algoritmosPython | a5f3d4728dbcce61c06e978f0aed7540d84eca54 | [
"MIT"
] | null | null | null | def swap(vet, i, j):
aux = vet[i]
vet[i] = vet[j]
vet[j] = aux
def partition(vet, left, right):
i = left + 1
j = right
pivot = vet[left]
while i <= j:
if vet[i] <= pivot:
i += 1
else:
if vet[j] >= pivot:
j -= 1
else:
if i <= j:
swap(vet, i, j)
i += 1
j -= 1
swap(vet, left, j)
return j
def quicksort(vet, left, right):
if left < right:
index = partition(vet, left, right)
quicksort(vet, left, index - 1)
quicksort(vet, index + 1, right)
return vet
def main():
vet = [6, 3, 4, 5, 2, 7, 1, 9, 8, 0, 10]
print(quicksort(vet, 0, len(vet) - 1))
if __name__ == '__main__':
main()
| 18.386364 | 44 | 0.42398 | 112 | 809 | 2.991071 | 0.276786 | 0.125373 | 0.107463 | 0.053731 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.045852 | 0.433869 | 809 | 43 | 45 | 18.813953 | 0.68559 | 0 | 0 | 0.1875 | 0 | 0 | 0.009889 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0 | 0 | 0.1875 | 0.03125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6cafeb9db3437527f31f0b9bf6699f3b1024f814 | 2,124 | py | Python | model/layer.py | gzn00417/RandomResidualGCN | 0e5c1df07657cfc02451b20fe5293aefbed655c3 | [
"Apache-2.0"
] | null | null | null | model/layer.py | gzn00417/RandomResidualGCN | 0e5c1df07657cfc02451b20fe5293aefbed655c3 | [
"Apache-2.0"
] | null | null | null | model/layer.py | gzn00417/RandomResidualGCN | 0e5c1df07657cfc02451b20fe5293aefbed655c3 | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
import math
class GCN(nn.Module):
# 初始化层:输入feature,输出feature,权重,偏移
def __init__(self, in_features, out_features, bias=True):
super(GCN, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features)) # FloatTensor建立tensor
# 常见用法self.v = torch.nn.Parameter(torch.FloatTensor(hidden_size)):
# 首先可以把这个函数理解为类型转换函数,将一个不可训练的类型Tensor转换成可以训练的类型parameter并将这个parameter
# 绑定到这个module里面,所以经过类型转换这个self.v变成了模型的一部分,成为了模型中根据训练可以改动的参数了。
# 使用这个函数的目的也是想让某些变量在学习的过程中不断的修改其值以达到最优化。
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter("bias", None)
# Parameters与register_parameter都会向parameters写入参数,但是后者可以支持字符串命名
# self.reset_parameters()
# # 初始化权重
# def reset_parameters(self):
# stdv = 1.0 / math.sqrt(self.weight.size(1))
# # size()函数主要是用来统计矩阵元素个数,或矩阵某一维上的元素个数的函数 size(1)为行
# self.weight.data.uniform_(-stdv, stdv) # uniform() 方法将随机生成下一个实数,它在 [x, y] 范围内
# if self.bias is not None:
# self.bias.data.uniform_(-stdv, stdv)
"""
前馈运算 即计算A~ X W(0)
input X与权重W相乘,然后adj矩阵与他们的积稀疏乘
直接输入与权重之间进行torch.mm操作,得到support,即XW
support与adj进行torch.spmm操作,得到output,即AXW选择是否加bias
"""
def forward(self, input, adj):
support = torch.mm(input.cpu(), self.weight.cpu())
# torch.mm(a, b)是矩阵a和b矩阵相乘,torch.mul(a, b)是矩阵a和b对应位相乘,a和b的维度必须相等
output = torch.spmm(adj.cpu(), support.cpu())
if self.bias is not None:
return output + self.bias.cpu()
else:
return output
# 通过设置断点,可以看出output的形式是0.01,0.01,0.01,0.01,0.01,#0.01,0.94],里面的值代表该x对应标签不同的概率,故此值可转换为#[0,0,0,0,0,0,1],对应我们之前把标签onthot后的第七种标签
def __repr__(self):
return (self.__class__.__name__ + " (" + str(self.in_features) + " -> " + str(self.out_features) + ")")
| 39.333333 | 129 | 0.64548 | 239 | 2,124 | 5.577406 | 0.426778 | 0.049512 | 0.018755 | 0.022506 | 0.046512 | 0.042011 | 0.013503 | 0.013503 | 0.013503 | 0.013503 | 0 | 0.02037 | 0.237288 | 2,124 | 53 | 130 | 40.075472 | 0.802469 | 0.397834 | 0 | 0.086957 | 0 | 0 | 0.010547 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.130435 | false | 0 | 0.173913 | 0.043478 | 0.478261 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6cb15b5fa4c388b5d318004d7ed72dd97abb681e | 3,887 | py | Python | dashboard/forms.py | vulture990/Django-MangementApp | eafd2219bae7f565510d7aeefa64167329060582 | [
"Apache-2.0"
] | null | null | null | dashboard/forms.py | vulture990/Django-MangementApp | eafd2219bae7f565510d7aeefa64167329060582 | [
"Apache-2.0"
] | null | null | null | dashboard/forms.py | vulture990/Django-MangementApp | eafd2219bae7f565510d7aeefa64167329060582 | [
"Apache-2.0"
] | null | null | null | from django import forms
from django.forms import widgets
from django.utils.safestring import mark_safe
from decimal import Decimal
from bootstrap_modal_forms.forms import BSModalForm
from .models import SmartLinks, Earnings, Payments, SupportManager, Balance
from profiles.models import User
TYPE_CHOICES = [
('regular', 'Regular'),
('commercial', 'Commercial'),
('progress', 'Progress'),
]
STATUS_CHOICES = [
('paid', 'Paid'),
('unpaid', 'Unpaid'),
]
class SmartLinksForm(BSModalForm):
customer = forms.ModelChoiceField(queryset=User.objects.all().order_by('email'), widget=forms.Select(),
empty_label=None)
smart_link = forms.CharField(
max_length=255,
widget=forms.TextInput(attrs={'class': 'form-control m-input m-input--square', 'placeholder': 'Smart Link'})
)
class Meta:
model = SmartLinks
fields = ('customer', 'smart_link')
class DateTimePickerWidget(widgets.TextInput):
def render(self, name, value, attrs=None, renderer=None):
return mark_safe(u'''<div class="input-group date" id="id_%s" data-target-input="nearest">%s<div class=
"input-group-append" data-target="#id_%s" data-toggle="datetimepicker"><div class="input-group-text">
<i class="far fa-calendar-alt"></i></div></div></div>''' %
(name, super(DateTimePickerWidget, self).render(name, value, attrs), name))
class EarningsForm(BSModalForm):
customer = forms.ModelChoiceField(queryset=User.objects.all().order_by('email'), widget=forms.Select(),
empty_label=None)
earning_date = forms.DateTimeField(
widget=DateTimePickerWidget(attrs={
'class': 'form-control datetimepicker-input',
'id': 'earning_date',
'data-target': '#id_earning_date'
})
)
hits = forms.IntegerField(widget=forms.NumberInput(attrs={
'class': 'form-control',
'min': '0'
}))
leads = forms.IntegerField(widget=forms.NumberInput(attrs={
'class': 'form-control',
'min': '0'
}))
money = forms.DecimalField(widget=forms.NumberInput(attrs={
'class': 'form-control'
}), min_value=Decimal('0.00'))
class Meta:
model = Earnings
fields = ('customer', 'earning_date', 'hits', 'leads', 'money')
class PaymentsForm(BSModalForm):
customer = forms.ModelChoiceField(queryset=User.objects.all().order_by('email'), widget=forms.Select(),
empty_label=None)
payment_date = forms.DateTimeField(
widget=DateTimePickerWidget(attrs={
'class': 'form-control',
'id': 'payment_date',
'data-target': '#id_payment_date'
})
)
invoice_id = forms.IntegerField(widget=forms.NumberInput(attrs={
'class': 'form-control',
'min': '0'
}))
type = forms.ChoiceField(choices=TYPE_CHOICES, widget=forms.Select())
amount = forms.DecimalField(widget=forms.NumberInput(attrs={
'class': 'form-control'
}), min_value=Decimal('0.00'))
status = forms.ChoiceField(choices=STATUS_CHOICES, widget=forms.Select())
class Meta:
model = Payments
fields = ('customer', 'payment_date', 'invoice_id', 'type', 'amount', 'status')
class SupportManagerForm(BSModalForm):
class Meta:
model = SupportManager
fields = ('name', 'email', 'avatar', 'phone_number', 'skype', 'website')
class UserForm(BSModalForm):
class Meta:
model = User
fields = ('email', 'first_name', 'last_name', 'is_active', 'is_superuser')
class BalanceForm(BSModalForm):
class Meta:
model = Balance
fields = ('balance',)
| 32.663866 | 117 | 0.605351 | 395 | 3,887 | 5.860759 | 0.298734 | 0.052268 | 0.04838 | 0.07257 | 0.346004 | 0.346004 | 0.346004 | 0.346004 | 0.346004 | 0.286393 | 0 | 0.004124 | 0.251351 | 3,887 | 118 | 118 | 32.940678 | 0.791409 | 0 | 0 | 0.333333 | 0 | 0.022222 | 0.220748 | 0.034757 | 0 | 0 | 0 | 0 | 0 | 1 | 0.011111 | false | 0 | 0.077778 | 0.011111 | 0.388889 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6cb18bde8984f3d41ba8709bb51f9df5446e4811 | 5,316 | py | Python | alphastarmini/core/sl/sl_train_by_tensor.py | liuruoze/mini-AlphaStar | cf9de2507d526a5fb8ef67676aab2ffb92738640 | [
"Apache-2.0"
] | 108 | 2021-02-10T13:24:56.000Z | 2022-03-21T09:58:28.000Z | alphastarmini/core/sl/sl_train_by_tensor.py | liuruoze/Raw-vs-Human-in-AlphaStar | 99acae772eb5c93000dca87b78d6acdf7699f331 | [
"Apache-2.0"
] | 21 | 2021-04-09T18:46:05.000Z | 2022-03-29T02:44:15.000Z | alphastarmini/core/sl/sl_train_by_tensor.py | liuruoze/Raw-vs-Human-in-AlphaStar | 99acae772eb5c93000dca87b78d6acdf7699f331 | [
"Apache-2.0"
] | 19 | 2021-08-03T01:49:02.000Z | 2022-03-30T10:21:13.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
" Train from the replay .pt files through pytorch tensor"
import os
USED_DEVICES = "7"
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = USED_DEVICES
os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
import sys
import time
import traceback
import argparse
import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset
from torch.optim import Adam, RMSprop
from absl import flags
from absl import app
from tqdm import tqdm
from alphastarmini.core.arch.agent import Agent
from alphastarmini.core.sl.feature import Feature
from alphastarmini.core.sl.label import Label
from alphastarmini.core.sl.dataset import SC2ReplayData, SC2ReplayDataset
from alphastarmini.lib.hyper_parameters import Arch_Hyper_Parameters as AHP
from alphastarmini.lib.hyper_parameters import SL_Training_Hyper_Parameters as SLTHP
__author__ = "Ruo-Ze Liu"
debug = False
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--path", default="./data/replay_data/", help="The path where data stored")
parser.add_argument("-r", "--restore", action="store_true", default=False, help="whether to restore model or not")
parser.add_argument("-t", "--type", choices=["val", "test", "deploy"], default="val", help="Train type")
parser.add_argument("-m", "--model", choices=["lstm", "gru", "fc"], default="lstm", help="Choose policy network")
parser.add_argument("-n", "--norm", choices=[True, False], default=False, help="Use norm for data")
args = parser.parse_args()
# training paramerters
PATH = args.path
MODEL = args.model
TYPE = args.type
RESTORE = args.restore
NORM = args.norm
# hyper paramerters
BATCH_SIZE = AHP.batch_size
SEQ_LEN = AHP.sequence_length
NUM_EPOCHS = SLTHP.num_epochs
LEARNING_RATE = SLTHP.learning_rate
WEIGHT_DECAY = SLTHP.weight_decay
CLIP = SLTHP.clip
# set random seed
torch.manual_seed(SLTHP.seed)
np.random.seed(SLTHP.seed)
# gpu setting
ON_GPU = torch.cuda.is_available()
DEVICE = torch.device("cuda:0" if ON_GPU else "cpu")
torch.autograd.set_detect_anomaly(True)
# model path
MODEL_PATH = "./model/"
if not os.path.exists(MODEL_PATH):
os.mkdir(MODEL_PATH)
SAVE_PATH = os.path.join(MODEL_PATH, MODEL + "_" + time.strftime("%y-%m-%d_%H-%M-%S", time.localtime()))
def train_for_val(feature, replay_data):
train_feature = SC2ReplayData.get_training_for_val_data(feature)
val_feature = SC2ReplayData.get_val_data(feature)
train_set = SC2ReplayDataset(train_feature, seq_length=SEQ_LEN, training=NORM)
val_set = SC2ReplayDataset(val_feature, seq_length=SEQ_LEN, training=False)
train_loader = DataLoader(train_set, batch_size=BATCH_SIZE, num_workers=1, shuffle=False)
val_loader = DataLoader(val_set, batch_size=BATCH_SIZE, num_workers=1, shuffle=False)
#model = load_latest_model() if RESTORE else choose_model(MODEL)
# model.to(DEVICE)
agent = Agent()
agent.to(DEVICE)
optimizer = Adam(agent.model.parameters(), lr=LEARNING_RATE, weight_decay=WEIGHT_DECAY)
train_loss = 0
for epoch in range(NUM_EPOCHS):
agent.model.train()
loss_sum = 0.0
i = 0
for traj in train_loader:
traj = traj.to(DEVICE).float()
with torch.autograd.detect_anomaly():
loss = agent.get_sl_loss(traj, replay_data)
optimizer.zero_grad()
loss.backward() # note, we don't need retain_graph=True if we set hidden_state.detach()
# add a grad clip
parameters = [p for p in agent.model.parameters() if p is not None and p.requires_grad]
torch.nn.utils.clip_grad_norm_(parameters, CLIP)
optimizer.step()
loss_sum += loss.item()
i += 1
train_loss = loss_sum / (i + 1e-9)
#val_loss = eval(model, criterion, val_loader, train_set, val_set)
print("Train loss: {:.6f}.".format(train_loss))
#print("Train loss: {:.6f}, Val loss: {:.6f}.".format(train_loss, val_loss))
torch.save(agent.model, SAVE_PATH + "_val" + ".pkl")
def eval(model, criterion, data_loader, train_set, val_set):
model.eval()
n_samples = len(val_set)
loss_sum = 0.0
for feature, target in data_loader:
feature = feature.to(DEVICE).float()
target = target.to(DEVICE).float()
output = agent.unroll(feature)
if debug:
print("feature.size(): ", feature.size())
print("target.size(): ", target.size())
print("output.size(): ", output.size())
break
loss = criterion(output, target)
loss_sum += output.size(0) * loss.item()
return loss_sum / n_samples
def test(on_server):
# get all the data
# Note: The feature here is actually feature+label
replay_data = SC2ReplayData()
features = replay_data.get_trainable_data(PATH)
#print('out_features:', features)
if TYPE == 'val':
train_for_val(features, replay_data) # select the best hyper-parameters
elif TYPE == 'test':
train_for_test(features, replay_data) # for test the performance in real life
elif TYPE == 'deploy':
train_for_deploy(features, replay_data) # only used for production
else:
train_for_val(features, replay_data)
| 31.455621 | 114 | 0.68924 | 742 | 5,316 | 4.746631 | 0.292453 | 0.025554 | 0.024134 | 0.019591 | 0.105054 | 0.081772 | 0.024986 | 0.024986 | 0.024986 | 0.024986 | 0 | 0.006042 | 0.190557 | 5,316 | 168 | 115 | 31.642857 | 0.812456 | 0.126975 | 0 | 0.055046 | 0 | 0 | 0.098781 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027523 | false | 0 | 0.174312 | 0 | 0.211009 | 0.036697 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6cb8388b7958b4b4de72f977446539bd94faa728 | 1,265 | py | Python | pycroft/model/alembic/versions/ae8f4b63876a_add_confirmed_status_to_transaction.py | agdsn/pycroft | ea771141d59c88fdb8a782eafbe106240550a33a | [
"Apache-2.0"
] | 18 | 2016-04-20T19:00:56.000Z | 2021-12-19T16:43:57.000Z | pycroft/model/alembic/versions/ae8f4b63876a_add_confirmed_status_to_transaction.py | agdsn/pycroft | ea771141d59c88fdb8a782eafbe106240550a33a | [
"Apache-2.0"
] | 461 | 2016-07-20T00:42:59.000Z | 2022-03-25T17:03:07.000Z | pycroft/model/alembic/versions/ae8f4b63876a_add_confirmed_status_to_transaction.py | agdsn/pycroft | ea771141d59c88fdb8a782eafbe106240550a33a | [
"Apache-2.0"
] | 15 | 2016-07-15T18:46:43.000Z | 2021-03-17T20:08:39.000Z | """Add confirmed status to transaction
Revision ID: ae8f4b63876a
Revises: 7c1927c937af
Create Date: 2019-05-31 18:08:19.462983
"""
from alembic import op
import sqlalchemy as sa
import pycroft
# revision identifiers, used by Alembic.
revision = 'ae8f4b63876a'
down_revision = '7c1927c937af'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('transaction', sa.Column('confirmed', sa.Boolean(), nullable=False, server_default=sa.schema.DefaultClause("1")))
# ### end Alembic commands ###
op.add_column('config',
sa.Column('treasurer_group_id', sa.Integer(), nullable=False,
server_default='13'))
op.create_foreign_key(None, 'config', 'property_group',
['treasurer_group_id'], ['id'])
op.alter_column('config', 'treasurer_group_id', server_default=None)
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('transaction', 'confirmed')
# ### end Alembic commands ###
op.drop_constraint('config_treasurer_group_id_fkey', 'config',
type_='foreignkey')
op.drop_column('config', 'treasurer_group_id')
| 29.418605 | 131 | 0.665613 | 147 | 1,265 | 5.537415 | 0.462585 | 0.085995 | 0.09828 | 0.081081 | 0.176904 | 0.108108 | 0.108108 | 0.108108 | 0 | 0 | 0 | 0.052579 | 0.203162 | 1,265 | 42 | 132 | 30.119048 | 0.75496 | 0.250593 | 0 | 0 | 0 | 0 | 0.246981 | 0.032931 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.15 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6cbad915ffd2da94701b301e5b98fdc0b1cbc5ae | 4,592 | py | Python | TempoFinder.py | b0nz0/TwisterTempo | fc975af4095509d8ec4fe2f84313fe152577bed2 | [
"MIT"
] | null | null | null | TempoFinder.py | b0nz0/TwisterTempo | fc975af4095509d8ec4fe2f84313fe152577bed2 | [
"MIT"
] | null | null | null | TempoFinder.py | b0nz0/TwisterTempo | fc975af4095509d8ec4fe2f84313fe152577bed2 | [
"MIT"
] | null | null | null | """
class TempoFinder
"""
from time import time, perf_counter, sleep
import logging
import numpy
import sounddevice
from aubio import tempo, pitch, sink
WRITE_WAV = False
WAV_NAME = "out.wav"
class TempoFinder(object):
MAX_LISTEN_PER_RECORD_MS = 20. * 1000.
def __init__(self, win_size=512, samplerate=44100):
# total number of audio frames read
self._total_frames = 0
# list of beats, in seconds since start
self._beats = []
# recording window and hop size
self._win_size = win_size
self._hop_size = win_size // 2 # hop size
# audio sample rate
self._samplerate = samplerate
# tempo finder algorithm instance
self._tempo_alg = tempo("default", self._win_size, self._hop_size, self._samplerate)
logging.info("Tempo silence=%d" % self._tempo_alg.get_silence())
self._tempo_alg.set_silence(-40)
self.on_pause = False
# pitch algorithm instance
# self.pitch_alg = pitch("default", self.win_size, self.hop_size, self.samplerate)
self._mono_vec = numpy.array([], dtype=numpy.float32)
self._tempo_found_callback = TempoFinder.default_tempo_found_callback
self._starting_millis = time() * 1000.
self._stream = sounddevice.InputStream(
channels=1, samplerate=float(self._samplerate), dtype='float32',
latency='low', callback=self.audio_callback)
if WRITE_WAV:
self._out_file = sink(samplerate=int(self._samplerate))
def start(self):
self._stream.start()
def end(self):
if WRITE_WAV:
self._out_file.close()
def set_tempo_found_callback(self, tempo_found_callback):
assert callable(tempo_found_callback)
self._tempo_found_callback = tempo_found_callback
@staticmethod
def default_tempo_found_callback(seconds, millis, confidence):
logging.debug("Beat found at: %d:%d.%d, confidence=%.2f" %
(seconds // 60, seconds % 60, millis % 1000, confidence))
def audio_callback(self, indata, frames, timez, status):
"""This is called (from a separate thread) for each audio block."""
if status:
logging.info("Input status: %s. Read %d blocks" % (status, len(indata)))
# mix down to mono and append data
self._mono_vec = numpy.append(self._mono_vec, indata)
logging.debug("In record_hop")
def increase_sensibility(self):
self._tempo_alg.set_silence(self._tempo_alg.get_silence() - 10)
def decrease_sensibility(self):
self._tempo_alg.set_silence(self._tempo_alg.get_silence() + 10)
def record_hop(self, nd):
# record some audio
now = perf_counter()
logging.debug("in record_hop with %d blocks, @ %f" % (len(self._mono_vec), now))
if len(self._mono_vec) >= self._hop_size:
# consider as many slices of size hop_size as possible
# within a boxed timeframe of MAX_LISTEN_PER_RECORD_MS
rec_start_millis = time()
while len(self._mono_vec) >= self._hop_size \
and time() - rec_start_millis < TempoFinder.MAX_LISTEN_PER_RECORD_MS:
compute_vec = self._mono_vec[0:self._hop_size]
self._mono_vec = self._mono_vec[self._hop_size:]
if WRITE_WAV:
self._out_file(compute_vec, len(compute_vec))
# algorithm found a beat?
is_beat = self._tempo_alg(compute_vec)
if is_beat and not self.on_pause:
logging.debug("Beat found") # . Latency: %.2f" % self.mic.latency)
this_beat_s = self._tempo_alg.get_last_s()
this_beat_ms = self._tempo_alg.get_last_ms()
this_beat_confidence = self._tempo_alg.get_confidence()
act_millis = time() * 1000. - self._starting_millis
self._tempo_found_callback(act_millis // 1000, act_millis, this_beat_confidence)
self._beats.append(act_millis // 1000)
self._total_frames = self._total_frames + self._hop_size
logging.debug("@ %d after while delta = %f" % ((self._total_frames // self._samplerate),
(perf_counter() - now)))
def get_bpms(self):
return self._tempo_alg.get_bpm()
if __name__ == '__main__':
# for test purposes
tf = TempoFinder(samplerate=8000)
tf.start()
while True:
tf.record_hop(0)
sleep(0.1)
| 37.032258 | 100 | 0.624347 | 578 | 4,592 | 4.622837 | 0.268166 | 0.053892 | 0.053892 | 0.039296 | 0.23765 | 0.160554 | 0.128743 | 0.08009 | 0.08009 | 0.047904 | 0 | 0.018385 | 0.277439 | 4,592 | 123 | 101 | 37.333333 | 0.78692 | 0.12696 | 0 | 0.037975 | 0 | 0 | 0.051231 | 0 | 0 | 0 | 0 | 0 | 0.012658 | 1 | 0.126582 | false | 0 | 0.063291 | 0.012658 | 0.227848 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6cbc4c893bebb0e0fdc746c032f287d046336644 | 1,116 | py | Python | CeaserCipher.py | suresh021/ISA | 90430babd72137e98c0554657ba91284755f64fc | [
"MIT"
] | null | null | null | CeaserCipher.py | suresh021/ISA | 90430babd72137e98c0554657ba91284755f64fc | [
"MIT"
] | null | null | null | CeaserCipher.py | suresh021/ISA | 90430babd72137e98c0554657ba91284755f64fc | [
"MIT"
] | null | null | null |
"""Ceaser Cipher Implementation"""
def encrypt(text, k):
cipher = []
for character in text:
# convert character to ascii and add k
temp = ord(character) - 96 # Assuming message is in lowercases, assign 1 to 26
# encipher the character
temp = (temp + k) % 26
# convert ascii to character
cipher.append(chr(temp+96))
return cipher
def decrypt(text, k):
plaintext = []
for number in text:
# decipher the code
character = (ord(number)-96 - k) % 26
# convert character to ascii
temp = chr(character+96) # Assuming message is in lowercases
# convert ascii to character
plaintext.append(temp)
return plaintext
original_text = "hellopassworld"
# k is number of characters to shift i.e. If k=1, A->B
k = 1
print("Original message: ", original_text)
# encrypt
cipher = encrypt(original_text, k)
print("Encrypted message: ", ''.join(cipher))
# decrypt
plaintext = decrypt(cipher, k)
# join characters to make string
plaintext = "".join(plaintext)
print("Decrypted message: ", plaintext)
| 22.32 | 88 | 0.640681 | 142 | 1,116 | 5.014085 | 0.359155 | 0.021067 | 0.050562 | 0.064607 | 0.11236 | 0.11236 | 0.11236 | 0 | 0 | 0 | 0 | 0.020556 | 0.258961 | 1,116 | 49 | 89 | 22.77551 | 0.840387 | 0.333333 | 0 | 0 | 0 | 0 | 0.096419 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0.045455 | 0 | 0 | 0.181818 | 0.136364 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6cbc7e8c90e67810830495fd2b86b4fdef9cc60e | 5,294 | py | Python | grpc4bmi/bmi_client_docker.py | cffbots/grpc4bmi | 4924f4155e78d4d1215ece36d49c88b3eccd2d58 | [
"Apache-2.0"
] | 4 | 2019-04-17T12:52:01.000Z | 2021-03-25T09:22:27.000Z | grpc4bmi/bmi_client_docker.py | cffbots/grpc4bmi | 4924f4155e78d4d1215ece36d49c88b3eccd2d58 | [
"Apache-2.0"
] | 78 | 2018-06-05T08:00:54.000Z | 2021-11-11T08:32:38.000Z | grpc4bmi/bmi_client_docker.py | cffbots/grpc4bmi | 4924f4155e78d4d1215ece36d49c88b3eccd2d58 | [
"Apache-2.0"
] | 2 | 2019-04-17T12:52:04.000Z | 2022-02-03T08:56:36.000Z | import os
import errno
import time
import docker
from grpc4bmi.bmi_grpc_client import BmiClient
from grpc4bmi.utils import stage_config_file
class LogsException(Exception):
pass
class DeadDockerContainerException(ChildProcessError):
"""
Exception for when a Docker container has died.
Args:
message (str): Human readable error message
exitcode (int): The non-zero exit code of the container
logs (str): Logs the container produced
"""
def __init__(self, message, exitcode, logs, *args):
super().__init__(message, *args)
#: Exit code of container
self.exitcode = exitcode
#: Stdout and stderr of container
self.logs = logs
class BmiClientDocker(BmiClient):
"""
BMI gRPC client for dockerized server processes: the initialization launches the docker container which should have the
run-bmi-server as its command. Also, it should expose the tcp port 55555 for communication with this client. Upon
destruction, this class terminates the corresponding docker server.
Args:
image (str): Docker image name of grpc4bmi wrapped model
image_port (int): Port of server inside the image
host (str): Host on which the image port is published on a random port
input_dir (str): Directory for input files of model
output_dir (str): Directory for input files of model
user (str): Username or UID of Docker container
remove (bool): Automatically remove the container and logs when it exits.
delay (int): Seconds to wait for Docker container to startup, before connecting to it
timeout (int): Seconds to wait for gRPC client to connect to server
extra_volumes (Dict[str,Dict]): Extra volumes to attach to Docker container.
The key is either the hosts path or a volume name and the value is a dictionary with the keys:
- ``bind`` The path to mount the volume inside the container
- ``mode`` Either ``rw`` to mount the volume read/write, or ``ro`` to mount it read-only.
For example:
.. code-block:: python
{'/data/shared/forcings/': {'bind': '/forcings', 'mode': 'ro'}}
"""
input_mount_point = "/data/input"
output_mount_point = "/data/output"
def __init__(self, image, image_port=55555, host=None,
input_dir=None, output_dir=None,
user=os.getuid(), remove=False,
delay=5, timeout=None, extra_volumes=None):
port = BmiClient.get_unique_port()
client = docker.from_env()
volumes = {}
if extra_volumes is not None:
volumes.update(extra_volumes)
self.input_dir = None
if input_dir is not None:
self.input_dir = os.path.abspath(input_dir)
if not os.path.isdir(self.input_dir):
raise NotADirectoryError(input_dir)
volumes[self.input_dir] = {"bind": BmiClientDocker.input_mount_point, "mode": "rw"}
self.output_dir = None
if output_dir is not None:
self.output_dir = os.path.abspath(output_dir)
try:
# Create output dir ourselves, otherwise Docker will create it as root user, resulting in permission
# errors
os.mkdir(self.output_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
volumes[self.output_dir] = {"bind": BmiClientDocker.output_mount_point, "mode": "rw"}
self.container = client.containers.run(image,
ports={str(image_port) + "/tcp": port},
volumes=volumes,
user=user,
remove=remove,
detach=True)
time.sleep(delay)
if not remove:
# Only able to reload, read logs when container is not in auto remove mode
self.container.reload()
if self.container.status == 'exited':
exitcode = self.container.attrs["State"]["ExitCode"]
logs = self.container.logs()
msg = f'Failed to start Docker container with image {image}, Container log: {logs}'
raise DeadDockerContainerException(msg, exitcode, logs)
super(BmiClientDocker, self).__init__(BmiClient.create_grpc_channel(port=port, host=host), timeout=timeout)
def __del__(self):
if hasattr(self, "container"):
self.container.stop()
def initialize(self, filename):
fn = stage_config_file(filename, self.input_dir, self.input_mount_point)
super(BmiClientDocker, self).initialize(fn)
def get_value_ptr(self, var_name):
raise NotImplementedError("Cannot exchange memory references across process boundary")
def logs(self):
"""Logs of the Docker container"""
try:
return self.container.logs()
except docker.errors.APIError as e:
raise LogsException("Unable to fetch logs, try pass remove=False to BmiClientDocker constructor, so logs are retained after container dies") from e
| 41.685039 | 159 | 0.618247 | 637 | 5,294 | 5.029827 | 0.326531 | 0.024969 | 0.018727 | 0.011236 | 0.05618 | 0.021848 | 0.021848 | 0.021848 | 0 | 0 | 0 | 0.003786 | 0.301473 | 5,294 | 126 | 160 | 42.015873 | 0.862628 | 0.350963 | 0 | 0.029412 | 0 | 0.014706 | 0.09782 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.088235 | false | 0.029412 | 0.088235 | 0 | 0.264706 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6cbdfec8b49dc333209a13496c7bf2001116ce84 | 13,023 | py | Python | GA/Individual.py | rexzhang2014/geneticalgorithm | bd48924511c64285bb6f5ab3540ee8a0bd2e6874 | [
"MIT"
] | null | null | null | GA/Individual.py | rexzhang2014/geneticalgorithm | bd48924511c64285bb6f5ab3540ee8a0bd2e6874 | [
"MIT"
] | null | null | null | GA/Individual.py | rexzhang2014/geneticalgorithm | bd48924511c64285bb6f5ab3540ee8a0bd2e6874 | [
"MIT"
] | null | null | null | #params aaaaa
import numpy as np
from copy import copy , deepcopy
from collections import Iterable, Collection
import pandas as pd
import random
DEBUG = False
class InvalidArguments(Exception) :
def __init__(self, err="invalid arguments") :
self.err = err
def __str__(self) :
return self.err
class Individual() :
class KeyValueError(Exception) :
def __init__(self, err_msg) :
self.err_msg = err_msg
def __str__(self) :
str(self.err_msg)
def __init__(self, *args, **kwargs) :
if args is not None and isinstance(args[0], Individual) and len(args) == 1:
tmp = args[0]
self.chromosome = tmp.chromosome.copy()
self.generation = tmp.generation
self.age = tmp.age
self.fitness = 0
elif len(args) + len(kwargs) >= 3 :
self.chromosome = args[0] if args[0] is not None else kwargs["chromosome"]
self.generation = args[1] if args[1] is not None else kwargs["generation"]
self.age = args[2] if args[2] is not None else kwargs["age"]
self.fitness = 0
else :
raise Exception("non sufficient arguments")
def mutate(self, t = 0.1, prob=None) :
if prob is None :
prob = np.random.rand(len(self.chromosome))
factor = prob < t
xor = lambda x, y : (x != y).astype(np.int32)
chromosome = xor(np.array(self.chromosome), factor)
generation = self.generation #+ 1
age = 0
self.grow()
return Individual(chromosome.tolist(), generation, age)
def grow(self) :
self.age += 1
def __setitem__(self,k,v) :
if type(k) == int :
self.chromosome[k] = v
elif type(k) == slice :
self.chromosome[k] = v
elif isinstance(k, Collection) :
s = pd.Series(self.chromosome)
s[k] = v
self.chromosome = s.values.tolist()
else :
raise Individual.KeyValueError("Cannot set chromosome with a key type is not int, slice or Collection")
def __getitem__(self,k) :
if type(k) == int :
return Individual(self.chromosome[k],self.generation,self.age)
elif type(k) == slice :
return Individual(self.chromosome[k],self.generation,self.age)
elif isinstance(k, Collection) :
s = pd.Series(self.chromosome)
return Individual(s.values.tolist(), self.generation, self.age)
else :
raise Individual.KeyValueError("Cannot get chromosome with a key type is not int, slice or Collection")
def __str__(self) :
if DEBUG == True :
return str([self.chromosome, self.generation, self.age])
else :
return str(self.chromosome)
# def __eq__(self, obj) :
# return self.chromosome == obj.chromosome
def __add__(self, another) :
# implement concatenation of two chromosome so it is not mutable addition.
chromosome = self.chromosome + another.chromosome
generation = self.generation + 1
age = 0
self.grow()
another.grow()
return Individual(chromosome, generation, age)
def __truediv__(self, div) :
# div : a number or a np.ndarray
chromosome = (np.array(self.chromosome) / div).astype(float).tolist()
generation = self.generation
age = self.age
return Individual(chromosome, generation, age)
def __mul__(self, mul) :
# mul : a number or a np.ndarray
chromosome = (np.array(self.chromosome) * mul).astype(float).tolist()
generation = self.generation
age = self.age
return Individual(chromosome, generation, age)
def indexOf(self, vals) :
if isinstance(vals, Collection) :
indices = []
for v in vals :
indices.append(self.chromosome.index(v))
return indices
else :
indices = []
for i in range(len(self.chromosome)) :
if self.chromosome[i] == vals :
indices.append(i)
return indices
def indexOfPositive(self) :
indices = []
for i in range(len(self.chromosome)) :
if self.chromosome[i] > 0 :
indices.append(i)
return indices
def copy(self) :
return deepcopy(self)
def sum(self) :
return sum(self.chromosome)
def __len__(self) :
return len(self.chromosome)
# def __eq__(self, another) :
# return "".join(self.chromosome) == "".join(another.chromosome)
class WeightedIndividual(Individual) :
def __init__(self, *args, **kwargs) :
Individual.__init__(self, *args, **kwargs)
self.cost = kwargs["cost"]
# self.reweigh()
def weights(self) :
indices = self.indexOfPositive()
w = [self.chromosome[i] for i in indices]
# for i in indices :
# w.append(self.chromosome[i])
return indices, np.array(w)
def reweigh(self) :
self.chromosome = (np.array(self.chromosome) / sum(self.chromosome) * self.cost).tolist()
def mutate(self, t = 0.1, prob=None) :
if prob is None :
prob = np.random.normal(0, 1, len(self.chromosome))
# prob = (prob - np.mean(prob)) / (np.max(prob) - np.min(prob))
prob = 0 + (1 - (0)) * (prob - np.min(prob)) / (np.max(prob) - np.min(prob))
# prob = np.random.rand(len(self.chromosome))
factor = []
for p in prob :
if abs(p) <= t :
factor.append(0)
else :
factor.append(p)
factor = np.array(factor)
# factor = prob if prob < t else 1
action = lambda x, p : (1 - p ) * x + p * ( self.cost - x )
chromosome = action(np.array(self.chromosome), factor)
# Equal Rights: make every gene has the proportion of cost as they own the weights
chromosome = (chromosome / sum(chromosome) * self.cost).tolist()
generation = self.generation + 1
age = 0
self.grow()
return WeightedIndividual(chromosome, generation, age, cost=self.cost)
def __getitem__(self,k) :
if type(k) == int :
return WeightedIndividual(self.chromosome[k],self.generation,self.age, cost=self.cost)
if type(k) == slice :
return WeightedIndividual(self.chromosome[k],self.generation,self.age, cost=self.cost)
elif isinstance(k, Collection) :
s = pd.Series(self.chromosome)
return WeightedIndividual(s.values.tolist(), self.generation, self.age)
else :
raise Individual.KeyValueError("Cannot get chromosome with a key type is not int, slice or Collection")
def __add__(self, another) :
# by default, chromosome is a list. In weighted individual , it is arithmetically added as vector(np.ndarray)
chromosome = (np.array(self.chromosome) + np.array(another.chromosome)).tolist()
generation = self.generation + 1
age = 0
self.grow()
another.grow()
return WeightedIndividual(chromosome, generation, age, cost=self.cost)
def __truediv__(self, div) :
# div : a number or a np.ndarray
chromosome = (np.array(self.chromosome) / div).astype(float).tolist()
generation = self.generation
age = self.age
return WeightedIndividual(chromosome, generation, age, cost=self.cost)
def __mul__(self, mul) :
# mul : a number or a np.ndarray
chromosome = (np.array(self.chromosome) * mul).astype(float).tolist()
generation = self.generation
age = self.age
return WeightedIndividual(chromosome, generation, age, cost=self.cost)
class IntegerIndividual(Individual) :
def __init__(self, *args, **kwargs) :
Individual.__init__(self, *args, **kwargs)
if "domain" not in kwargs :
raise InvalidArgs("")
self.domain = kwargs["domain"] # a list of integers
self.upper = max(self.domain)
self.lower = min(self.domain)
# def weights(self) :
# indices = self.indexOfPositive()
# w = [self.chromosome[i] for i in indices]
# # for i in indices :
# # w.append(self.chromosome[i])
# return indices, np.array(w)
# def reweigh(self) :
# self.chromosome = (np.array(self.chromosome) / sum(self.chromosome) * self.cost).tolist()
def mutate(self, t = 0.1, prob=None) :
# if prob is None :
# prob = random.sample(self.domain, 1)[0]
chromosome = self.chromosome.copy()
chr_len = len(self.chromosome)
if prob is None :
prob = np.random.rand(chr_len)
factor = prob < t
# if prob < t :
# idx = random.sample(range(len(self.chromosome)), 1)[0]
for i in range(chr_len) :
if factor[i] :
chr_lst = self.domain.copy()
chr_lst.remove(chromosome[i])
chromosome[i] = random.choice(chr_lst)
# fit = InSetFitness()
# chromosome = [ random.choice(- chromosome[i] if factor[i] else chromosome[i] for i in range(chr_len) ]
generation = self.generation + 1
age = 0
self.grow()
return IntegerIndividual(chromosome, generation, age, domain=self.domain)
def __getitem__(self,k) :
if type(k) == int :
return self.chromosome[k] #IntegerIndividual(self.chromosome[k],self.generation,self.age, domain=self.domain)
if type(k) == slice :
# return self.chromosome[k]
return IntegerIndividual(self.chromosome[k],self.generation,self.age, domain=self.domain)
elif isinstance(k, Collection) :
s = pd.Series(self.chromosome)
return IntegerIndividual(s.values.tolist(), self.generation, self.age)
else :
raise Individual.KeyValueError("Cannot get chromosome with a key type is not int, slice or Collection")
class Portfolio(Individual) :
def __init__(self, *args, **kwargs) :
Individual.__init__(self, *args, **kwargs)
self.cost = kwargs["cost"]
# self.total = kwargs["total"]
def weights(self) :
indices = self.indexOfPositive()
w = []
for i in indices :
w.append(self.chromosome[i])
return indices, np.array(w)
def mutate(self, t = 0.1, alpha = 0.01, prob=None) :
if prob is None :
prob = np.random.rand(len(self.chromosome))
factor = []
for p in prob :
if p < t :
factor.append(p)
else :
factor.append(1)
factor = np.array(factor)
# factor = prob if prob < t else 1
action = lambda x, p : (1 - alpha) * x * p + alpha * self.cost
chromosome = action(np.array(self.chromosome), factor)
chromosome = (chromosome / sum(chromosome) * self.cost).tolist()
generation = self.generation #+ 1
age = 0
self.grow()
return Portfolio(chromosome, generation, age, cost=self.cost)
def __getitem__(self,k) :
if type(k) == int :
return Portfolio(self.chromosome[k],self.generation,self.age, cost=self.cost)
if type(k) == slice :
return Portfolio(self.chromosome[k],self.generation,self.age, cost=self.cost)
def __add__(self, another) :
chromosome = self.chromosome + another.chromosome
generation = self.generation + 1
age = 0
self.grow()
another.grow()
return Portfolio(chromosome, generation, age, cost=self.cost)
def __truediv__(self, div) :
# div : a number or a np.ndarray
chromosome = (np.array(self.chromosome) / div).astype(float).tolist()
generation = self.generation
age = self.age
return Portfolio(chromosome, generation, age, cost=self.cost)
def __mul__(self, mul) :
# mul : a number or a np.ndarray
chromosome = (np.array(self.chromosome) * mul).astype(float).tolist()
generation = self.generation
age = self.age
return Portfolio(chromosome, generation, age, cost=self.cost)
if __name__ == '__main__' :
# np.random.seed(1)
ind = IntegerIndividual([1,1,0,0,0],0,0,domain=list(range(84)))
ind1 = ind.copy()
ind2 = ind.mutate()
ind1[0] = 100
ind3 = ind1 + ind2
print("ind: {}".format(ind))
print("ind1: {}".format(ind1))
print("ind2: {}".format(ind2))
print("ind3: {}".format(ind3))
print("indexOf 1: {}".format(ind3.indexOfPositive()))
print("ind3 / 2: {}".format(ind3/2))
print("ind3 * 2: {}".format(ind3*2)) | 34.452381 | 121 | 0.574215 | 1,537 | 13,023 | 4.777489 | 0.113208 | 0.114395 | 0.032412 | 0.034318 | 0.67629 | 0.630805 | 0.606019 | 0.583277 | 0.572927 | 0.546643 | 0 | 0.010209 | 0.307994 | 13,023 | 378 | 122 | 34.452381 | 0.804594 | 0.124088 | 0 | 0.611538 | 0 | 0 | 0.038367 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.138462 | false | 0 | 0.019231 | 0.015385 | 0.315385 | 0.026923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6cbec43cc046b63a3a8368a3e6c0c6785dde4c1c | 652 | py | Python | setup.py | garonfok/Scorechive | 925ca50f91fa18a61f8e18425d0480d968112ee6 | [
"MIT"
] | null | null | null | setup.py | garonfok/Scorechive | 925ca50f91fa18a61f8e18425d0480d968112ee6 | [
"MIT"
] | null | null | null | setup.py | garonfok/Scorechive | 925ca50f91fa18a61f8e18425d0480d968112ee6 | [
"MIT"
] | null | null | null | from scorechive._version import __version__
from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="scorechive",
version=__version__,
author="Garon Fok",
author_email="fokgaron@gmail.com",
packages=["example_pkg"],
description="Scorechive is a fast and lightweight CLI program that is designed to keep track of your music scores using SQLite.",
long_description=long_description,
long_description_content_type="text/markdown",
license='MIT',
python_requires='>=3.9.2',
install_requires=[
"click>=7.1.2",
"columnize">="0.3.10"
]
)
| 28.347826 | 133 | 0.687117 | 84 | 652 | 5.107143 | 0.75 | 0.13986 | 0.088578 | 0.13986 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018939 | 0.190184 | 652 | 22 | 134 | 29.636364 | 0.793561 | 0 | 0 | 0 | 0 | 0.05 | 0.340491 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1 | 0 | 0.1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6cbf411962fa4328504fcd5ad3302c946a121d0c | 12,071 | py | Python | volatility/framework/symbols/__init__.py | fengjixuchui/volatility3 | 8d3cc1dc1e6e301798aac1072b77cadecfc16091 | [
"Linux-OpenIB"
] | 3 | 2021-05-31T19:43:14.000Z | 2021-08-02T00:09:26.000Z | volatility/framework/symbols/__init__.py | fengjixuchui/volatility3 | 8d3cc1dc1e6e301798aac1072b77cadecfc16091 | [
"Linux-OpenIB"
] | null | null | null | volatility/framework/symbols/__init__.py | fengjixuchui/volatility3 | 8d3cc1dc1e6e301798aac1072b77cadecfc16091 | [
"Linux-OpenIB"
] | 1 | 2021-04-08T03:02:20.000Z | 2021-04-08T03:02:20.000Z | # This file is Copyright 2019 Volatility Foundation and licensed under the Volatility Software License 1.0
# which is available at https://www.volatilityfoundation.org/license/vsl-v1.0
#
import collections
import collections.abc
import enum
import logging
from typing import Any, Dict, Iterable, Iterator, TypeVar, List
from volatility.framework import constants, exceptions, interfaces, objects
vollog = logging.getLogger(__name__)
SymbolSpaceReturnType = TypeVar("SymbolSpaceReturnType", interfaces.objects.Template,
interfaces.symbols.SymbolInterface, Dict[str, Any])
class SymbolType(enum.Enum):
TYPE = 1
SYMBOL = 2
ENUM = 3
class SymbolSpace(interfaces.symbols.SymbolSpaceInterface):
"""Handles an ordered collection of SymbolTables.
This collection is ordered so that resolution of symbols can proceed
down through the ranks if a namespace isn't specified.
"""
def __init__(self) -> None:
super().__init__()
self._dict = collections.OrderedDict() # type: Dict[str, interfaces.symbols.BaseSymbolTableInterface]
# Permanently cache all resolved symbols
self._resolved = {} # type: Dict[str, interfaces.objects.Template]
self._resolved_symbols = {} # type: Dict[str, interfaces.objects.Template]
def clear_symbol_cache(self, table_name: str = None) -> None:
"""Clears the symbol cache for the specified table name. If no table
name is specified, the caches of all symbol tables are cleared."""
table_list = list() # type: List[interfaces.symbols.BaseSymbolTableInterface]
if table_name is None:
table_list = list(self._dict.values())
else:
table_list.append(self._dict[table_name])
for table in table_list:
table.clear_symbol_cache()
def free_table_name(self, prefix: str = "layer") -> str:
"""Returns an unused table name to ensure no collision occurs when
inserting a symbol table."""
count = 1
while prefix + str(count) in self:
count += 1
return prefix + str(count)
### Symbol functions
def get_symbols_by_type(self, type_name: str) -> Iterable[str]:
"""Returns all symbols based on the type of the symbol."""
for table in self._dict:
for symbol_name in self._dict[table].get_symbols_by_type(type_name):
yield table + constants.BANG + symbol_name
def get_symbols_by_location(self, offset: int, size: int = 0, table_name: str = None) -> Iterable[str]:
"""Returns all symbols that exist at a specific relative address."""
table_list = self._dict.values() # type: Iterable[interfaces.symbols.BaseSymbolTableInterface]
if table_name is not None:
if table_name in self._dict:
table_list = [self._dict[table_name]]
else:
table_list = []
for table in table_list:
for symbol_name in table.get_symbols_by_location(offset = offset, size = size):
yield table.name + constants.BANG + symbol_name
### Space functions
def __len__(self) -> int:
"""Returns the number of tables within the space."""
return len(self._dict)
def __getitem__(self, i: str) -> Any:
"""Returns a specific table from the space."""
return self._dict[i]
def __iter__(self) -> Iterator[str]:
"""Iterates through all available tables in the symbol space."""
return iter(self._dict)
def append(self, value: interfaces.symbols.BaseSymbolTableInterface) -> None:
"""Adds a symbol_list to the end of the space."""
if not isinstance(value, interfaces.symbols.BaseSymbolTableInterface):
raise TypeError(value)
if value.name in self._dict:
self.remove(value.name)
self._dict[value.name] = value
def remove(self, key: str) -> None:
"""Removes a named symbol_list from the space."""
# Reset the resolved list, since we're removing some symbols
self._resolved = {}
del self._dict[key]
### Resolution functions
class UnresolvedTemplate(objects.templates.ReferenceTemplate):
"""Class to highlight when missing symbols are present.
This class is identical to a reference template, but differentiable by its classname.
It will output a debug log to indicate when it has been instantiated and with what name.
This class is designed to be output ONLY as part of the SymbolSpace resolution system.
Individual SymbolTables that cannot resolve a symbol should still return a SymbolError to
indicate this failure in resolution.
"""
def __init__(self, type_name: str, **kwargs) -> None:
vollog.debug("Unresolved reference: {}".format(type_name))
super().__init__(type_name = type_name, **kwargs)
def _weak_resolve(self, resolve_type: SymbolType, name: str) -> SymbolSpaceReturnType:
"""Takes a symbol name and resolves it with ReferentialTemplates."""
if resolve_type == SymbolType.TYPE:
get_function = 'get_type'
elif resolve_type == SymbolType.SYMBOL:
get_function = 'get_symbol'
elif resolve_type == SymbolType.ENUM:
get_function = 'get_enumeration'
else:
raise TypeError("Weak_resolve called without a proper SymbolType")
name_array = name.split(constants.BANG)
if len(name_array) == 2:
table_name = name_array[0]
component_name = name_array[1]
try:
return getattr(self._dict[table_name], get_function)(component_name)
except KeyError as e:
raise exceptions.SymbolError(component_name, table_name,
'Type {} references missing Type/Symbol/Enum: {}'.format(name, e))
raise exceptions.SymbolError(name, None, "Malformed name: {}".format(name))
def _iterative_resolve(self, traverse_list):
"""Iteratively resolves a type, populating linked child
ReferenceTemplates with their properly resolved counterparts."""
replacements = set()
# Whole Symbols that still need traversing
while traverse_list:
template_traverse_list, traverse_list = [self._resolved[traverse_list[0]]], traverse_list[1:]
# Traverse a single symbol looking for any ReferenceTemplate objects
while template_traverse_list:
traverser, template_traverse_list = template_traverse_list[0], template_traverse_list[1:]
for child in traverser.children:
if isinstance(child, objects.templates.ReferenceTemplate):
# If we haven't seen it before, subresolve it and also add it
# to the "symbols that still need traversing" list
if child.vol.type_name not in self._resolved:
traverse_list.append(child.vol.type_name)
try:
self._resolved[child.vol.type_name] = self._weak_resolve(
SymbolType.TYPE, child.vol.type_name)
except exceptions.SymbolError:
self._resolved[child.vol.type_name] = self.UnresolvedTemplate(child.vol.type_name)
# Stash the replacement
replacements.add((traverser, child))
elif child.children:
template_traverse_list.append(child)
for (parent, child) in replacements:
parent.replace_child(child, self._resolved[child.vol.type_name])
def get_type(self, type_name: str) -> interfaces.objects.Template:
"""Takes a symbol name and resolves it.
This method ensures that all referenced templates (including
self-referential templates) are satisfied as ObjectTemplates
"""
# Traverse down any resolutions
if type_name not in self._resolved:
self._resolved[type_name] = self._weak_resolve(SymbolType.TYPE, type_name) # type: ignore
self._iterative_resolve([type_name])
if isinstance(self._resolved[type_name], objects.templates.ReferenceTemplate):
table_name = None
index = type_name.find(constants.BANG)
if index > 0:
table_name, type_name = type_name[:index], type_name[index + 1:]
raise exceptions.SymbolError(type_name, table_name, "Unresolvable symbol requested: {}".format(type_name))
return self._resolved[type_name]
def get_symbol(self, symbol_name: str) -> interfaces.symbols.SymbolInterface:
"""Look-up a symbol name across all the contained symbol spaces."""
retval = self._weak_resolve(SymbolType.SYMBOL, symbol_name)
if symbol_name not in self._resolved_symbols and retval.type is not None:
self._resolved_symbols[symbol_name] = self._subresolve(retval.type)
if not isinstance(retval, interfaces.symbols.SymbolInterface):
table_name = None
index = symbol_name.find(constants.BANG)
if index > 0:
table_name, symbol_name = symbol_name[:index], symbol_name[index + 1:]
raise exceptions.SymbolError(symbol_name, table_name, "Unresolvable Symbol: {}".format(symbol_name))
return retval
def _subresolve(self, object_template: interfaces.objects.Template) -> interfaces.objects.Template:
"""Ensure an ObjectTemplate doesn't contain any ReferenceTemplates"""
for child in object_template.children:
if isinstance(child, objects.templates.ReferenceTemplate):
new_child = self.get_type(child.vol.type_name)
else:
new_child = self._subresolve(child)
object_template.replace_child(old_child = child, new_child = new_child)
return object_template
def get_enumeration(self, enum_name: str) -> interfaces.objects.Template:
"""Look-up a set of enumeration choices from a specific symbol
table."""
retval = self._weak_resolve(SymbolType.ENUM, enum_name)
if not isinstance(retval, interfaces.objects.Template):
table_name = None
index = enum_name.find(constants.BANG)
if index > 0:
table_name, enum_name = enum_name[:index], enum_name[index + 1:]
raise exceptions.SymbolError(enum_name, table_name, "Unresolvable Enumeration: {}".format(enum_name))
return retval
def _membership(self, member_type: SymbolType, name: str) -> bool:
"""Test for membership of a component within a table."""
name_array = name.split(constants.BANG)
if len(name_array) == 2:
table_name = name_array[0]
component_name = name_array[1]
else:
return False
if table_name not in self:
return False
table = self[table_name]
if member_type == SymbolType.TYPE:
return component_name in table.types
elif member_type == SymbolType.SYMBOL:
return component_name in table.symbols
elif member_type == SymbolType.ENUM:
return component_name in table.enumerations
return False
def has_type(self, name: str) -> bool:
return self._membership(SymbolType.TYPE, name)
def has_symbol(self, name: str) -> bool:
return self._membership(SymbolType.SYMBOL, name)
def has_enumeration(self, name: str) -> bool:
return self._membership(SymbolType.ENUM, name)
def symbol_table_is_64bit(context: interfaces.context.ContextInterface, symbol_table_name: str) -> bool:
"""Returns a boolean as to whether a particular symbol table within a
context is 64-bit or not."""
return context.symbol_space.get_type(symbol_table_name + constants.BANG + "pointer").size == 8
| 46.072519 | 118 | 0.648993 | 1,422 | 12,071 | 5.326301 | 0.203938 | 0.035648 | 0.026406 | 0.0169 | 0.23277 | 0.144045 | 0.109982 | 0.056113 | 0.038289 | 0.023237 | 0 | 0.003954 | 0.266672 | 12,071 | 261 | 119 | 46.249042 | 0.851672 | 0.227322 | 0 | 0.190476 | 0 | 0 | 0.031487 | 0.002312 | 0 | 0 | 0 | 0 | 0 | 1 | 0.130952 | false | 0 | 0.035714 | 0.017857 | 0.315476 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6cbfae888700fbf15186058110371fc5dde3d25e | 2,578 | py | Python | Notebooks/mnist_loader.py | sashakarimi/ML-Hult-Summer-2019 | 12d4204914760c05951314e3a8de3e1af3cf8cc8 | [
"MIT"
] | 2 | 2019-07-06T20:14:49.000Z | 2019-11-21T17:50:16.000Z | Notebooks/mnist_loader.py | sashakarimi/ML-Hult-Summer-2019 | 12d4204914760c05951314e3a8de3e1af3cf8cc8 | [
"MIT"
] | null | null | null | Notebooks/mnist_loader.py | sashakarimi/ML-Hult-Summer-2019 | 12d4204914760c05951314e3a8de3e1af3cf8cc8 | [
"MIT"
] | 4 | 2019-07-06T20:14:54.000Z | 2019-12-10T05:25:28.000Z | # Adapted from https://github.com/sorki/python-mnist/blob/master/mnist/loader.py
import os
import struct
from array import array
import numpy as np
from matplotlib import pyplot as plt
class MNIST(object):
def __init__(self, path=os.path.join('..', 'DataSets')):
self.path = path
self.test_img_fname = 't10k-images-idx3-ubyte'
self.test_lbl_fname = 't10k-labels-idx1-ubyte'
self.train_img_fname = 'train-images-idx3-ubyte'
self.train_lbl_fname = 'train-labels-idx1-ubyte'
self.test_images = []
self.test_labels = []
self.train_images = []
self.train_labels = []
def load_testing(self):
ims, labels = self.load(os.path.join(self.path, self.test_img_fname),
os.path.join(self.path, self.test_lbl_fname))
self.test_images = ims
self.test_labels = labels
# return all the images and the labels
return ims, labels
def load_training(self):
ims, labels = self.load(os.path.join(self.path, self.train_img_fname),
os.path.join(self.path, self.train_lbl_fname))
self.train_images = ims
self.train_labels = labels
# return all the images and labels
return ims, labels
@classmethod
def load(cls, path_img, path_lbl):
with open(path_lbl, 'rb') as file:
magic, size = struct.unpack(">II", file.read(8))
if magic != 2049:
raise ValueError('Magic number mismatch, expected 2049,'
'got {}'.format(magic))
labels = array("B", file.read())
with open(path_img, 'rb') as file:
magic, size, rows, cols = struct.unpack(">IIII", file.read(16))
if magic != 2051:
raise ValueError('Magic number mismatch, expected 2051,'
'got {}'.format(magic))
image_data = array("B", file.read())
images = []
for i in range(size):
images.append([0] * rows * cols)
for i in range(size):
images[i][:] = image_data[i * rows * cols:(i + 1) * rows * cols]
return images, labels
@classmethod
def display(cls, image):
# input is a single image from the output of the load function
# MNIST images are 28x28 each
img_h_px = 28
img_w_px = 28
return plt.imshow(np.array(image).reshape(img_h_px, img_w_px), interpolation='nearest')
| 30.690476 | 95 | 0.567494 | 328 | 2,578 | 4.32622 | 0.317073 | 0.045102 | 0.035236 | 0.039464 | 0.288936 | 0.250881 | 0.162086 | 0.102889 | 0.060606 | 0.060606 | 0 | 0.021155 | 0.321567 | 2,578 | 84 | 96 | 30.690476 | 0.790166 | 0.091932 | 0 | 0.150943 | 0 | 0 | 0.088613 | 0.038527 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09434 | false | 0 | 0.09434 | 0 | 0.283019 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6cc1feb632fd2d7254708480eb59522de73462c8 | 29,678 | py | Python | core/feature/phone_screen_touch_features/phone_screen_touch_features_all_app.py | MD2Korg/CerebralCortex-DataAnalysis | 73f5ea2430bc7c23de422dccb7b65ef9f8917595 | [
"BSD-2-Clause"
] | 1 | 2018-04-24T18:11:24.000Z | 2018-04-24T18:11:24.000Z | core/feature/phone_screen_touch_features/phone_screen_touch_features_all_app.py | Boris69bg/CerebralCortex-DataAnalysis | 49565bdff348d69153bd5d3a37e73f1645f82b32 | [
"BSD-2-Clause"
] | 10 | 2018-03-13T19:04:09.000Z | 2018-05-12T01:40:03.000Z | core/feature/phone_screen_touch_features/phone_screen_touch_features_all_app.py | Boris69bg/CerebralCortex-DataAnalysis | 49565bdff348d69153bd5d3a37e73f1645f82b32 | [
"BSD-2-Clause"
] | 42 | 2017-12-07T17:08:14.000Z | 2019-06-02T08:25:12.000Z | # Copyright (c) 2018, MD2K Center of Excellence
# - Md Shiplu Hawlader <shiplu.cse.du@gmail.com; mhwlader@memphis.edu>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from cerebralcortex.core.datatypes.datastream import DataStream
from cerebralcortex.core.datatypes.datastream import DataPoint
from cerebralcortex.core.datatypes.stream_types import StreamTypes
from core.computefeature import ComputeFeatureBase
import numpy as np
from datetime import timedelta
import datetime
import traceback
import copy
from sklearn.mixture import GaussianMixture
from typing import List, Callable, Any
feature_class_name = 'PhoneScreenTouchFeaturesAllApp'
class PhoneScreenTouchFeaturesAllApp(ComputeFeatureBase):
"""
Compute all features related to phone touch screen which needed all days data of a user and can not be paralleled.
"""
def get_filtered_data(self, data: List[DataPoint],
admission_control: Callable[[Any], bool] = None) -> List[DataPoint]:
"""
Return the filtered list of DataPoints according to the admission control provided
:param List(DataPoint) data: Input data list
:param Callable[[Any], bool] admission_control: Admission control lambda function, which accepts the sample and
returns a bool based on the data sample validity
:return: Filtered list of DataPoints
:rtype: List(DataPoint)
"""
if admission_control is None:
return data
filtered_data = []
for d in data:
if admission_control(d.sample):
filtered_data.append(d)
elif type(d.sample) is list and len(d.sample) == 1 and admission_control(d.sample[0]):
d.sample = d.sample[0]
filtered_data.append(d)
return filtered_data
def get_data_by_stream_name(self, stream_name: str, user_id: str, day: str,
localtime: bool=True) -> List[DataPoint]:
"""
Combines data from multiple streams data of same stream based on stream name.
:param str stream_name: Name of the stream
:param str user_id: UUID of the stream owner
:param str day: The day (YYYYMMDD) on which to operate
:param bool localtime: The way to structure time, True for operating in participant's local time, False for UTC
:return: Combined stream data if there are multiple stream id
:rtype: List(DataPoint)
"""
stream_ids = self.CC.get_stream_id(user_id, stream_name)
data = []
for stream in stream_ids:
if stream is not None:
ds = self.CC.get_stream(stream['identifier'], user_id=user_id, day=day, localtime=localtime)
if ds is not None:
if ds.data is not None:
data += ds.data
if len(stream_ids)>1:
data = sorted(data, key=lambda x: x.start_time)
return data
def inter_event_time_list(self, data: List[DataPoint]) -> List[float]:
"""
Helper function to compute inter-event times
:param List(DataPoint) data: A list of DataPoints
:return: Time deltas between DataPoints in seconds
:rtype: list(float)
"""
if not data:
return None
last_end = data[0].end_time
ret = []
flag = False
for cd in data:
if flag == False:
flag = True
continue
dif = cd.start_time - last_end
ret.append(max(0, dif.total_seconds()))
last_end = max(last_end, cd.end_time)
return list(filter(lambda x: x != 0.0, ret))
def get_screen_touch_variance_hourly(self, data: List[DataPoint], typing_episodes: List) -> List[DataPoint]:
"""
This method returns hourly variance of time between two consecutive touch in a typing episode. In case of
multiple typing episode, variance is calculated for each typing episode and combined using standard formula
to combine multiple variances.
:param List(DataPoint) data: screen touch stream data points
:param List(Tuple) typing_episodes: (start_time, end_time) for each item in the list, the starting and end time
of a typing episode
:return: A list of variances for each hour (if there is input data for this hour) of a day.
:rtype: List(DataPoint)
"""
if len(data) <= 1:
return None
combined_data = copy.deepcopy(data)
for s in combined_data:
s.end_time = s.start_time
new_data = []
tmp_time = copy.deepcopy(combined_data[0].start_time)
tmp_time = tmp_time.replace(hour=0, minute=0, second=0, microsecond=0)
for h in range(0, 24):
datalist = []
start = tmp_time.replace(hour=h)
end = start + datetime.timedelta(minutes=59)
for d in combined_data:
if start <= d.start_time <= end or start <= d.end_time <= end:
datalist.append(d)
if len(datalist) <= 1:
continue
splitted_data = [[]]*len(typing_episodes)
for i, ep in enumerate(typing_episodes):
for d in datalist:
if ep[0]<= d.start_time and d.end_time <= ep[1]:
splitted_data[i].append(d)
splitted_data = list(filter(lambda x: len(x)>1, splitted_data))
if not splitted_data:
continue
episode_data = list(map(self.inter_event_time_list, splitted_data))
Xc = np.mean(episode_data)
var = 0
n = 0
for L in episode_data:
X = np.mean(L)
V = np.var(L)
var += len(L) * (V + (X - Xc)*(X - Xc))
n += len(L)
var /= n
if np.isnan(var):
continue
new_data.append(DataPoint(start_time=start, end_time=end, offset=combined_data[0].offset,
sample=var))
return new_data
def get_screen_touch_rate(self, data: List[DataPoint], typing_episodes: List) -> List[DataPoint]:
"""
Average screen touch rate for a whole day during typing episodes (only productivity and communication apps are
considered during calculation)
:param List(DataPoint) data: screen touch stream data points
:param List(Tuple) typing_episodes: (start_time, end_time) for each item in the list, the starting and end time
of a typing episode
:return: A list with single data point containing the average screen touch rate.
:rtype: List(DataPoint)
"""
if not data:
return None
total_touch_count = 0
total_typing_time = 0
for ep in typing_episodes:
total_typing_time += (ep[1] - ep[0]).total_seconds()
for d in data:
if ep[0] <= d.start_time <= ep[1]:
total_touch_count += 1
if total_typing_time == 0 or total_touch_count == 0:
return None
start_time = copy.deepcopy(data[0].start_time)
start_time = start_time.replace(hour=0, minute=0, second=0, microsecond=0)
end_time = datetime.datetime.combine(start_time.date(), datetime.time.max)
end_time = end_time.replace(tzinfo=data[0].start_time.tzinfo)
return [DataPoint(start_time=start_time, end_time=end_time, offset=data[0].offset,
sample=total_touch_count/total_typing_time)]
def get_typing_episodes(self, typing_episodes: List) -> List[DataPoint]:
new_data = []
for d in typing_episodes:
new_data.append(DataPoint(d[0], d[1], 0, (d[1] - d[0]).total_seconds()))
return new_data
def process_screentouch_type_day_data(self, user_id, touchtypedata, touchscreendata, input_touchtype_stream,
input_touchscreen_stream):
"""
This method is responsible to calculate and store the screen touch related features, for example, hourly
variance of screen touch time gap.
:param user_id: UUID of the stream owner
:param touchtypedata: screen touch type stream data points
:param touchscreendata: screen touch time stream data points
:param input_touchtype_stream: touch type stream object
:param input_touchscreen_stream: touch time stream object
:return:
"""
typing_episodes = []
pos = 0
while pos < len(touchtypedata):
while pos < len(touchtypedata):
t = touchtypedata[pos]
if t.sample in ["typing", "pause", "reading"]:
break
pos += 1
if pos == len(touchtypedata):
break
start = pos
pos += 1
while pos < len(touchtypedata):
t = touchtypedata[pos]
if t.sample not in ["typing", "pause", "reading"]:
break
t1 = touchtypedata[pos-1]
if t1.end_time != t.start_time:
break
pos += 1
typing_episodes.append((touchtypedata[start].start_time, touchtypedata[pos-1].start_time))
try:
data = self.get_typing_episodes(typing_episodes)
self.store_stream(filepath="phone_typing_episode.json",
input_streams=[input_touchtype_stream, input_touchscreen_stream], user_id=user_id,
data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.get_screen_touch_variance_hourly(touchscreendata, typing_episodes)
self.store_stream(filepath="phone_touch_response_time_variance.json",
input_streams=[input_touchtype_stream, input_touchscreen_stream], user_id=user_id,
data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.get_screen_touch_rate(touchscreendata, typing_episodes)
self.store_stream(filepath="phone_screen_touch_rate.json",
input_streams=[input_touchtype_stream, input_touchscreen_stream], user_id=user_id,
data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
def get_appusage_duration_by_category(self, appdata: List[DataPoint], categories: List[str],
appusage_gap_threshold_seconds: float=120) -> List:
"""
Given the app category, it will return the list of duration when the app was used.
It is assumed that if the gap between two consecutive data points with same app usage
is within the appusage_gap_threshold_seconds time then, the app usage is in same session.
:param List(DataPoint) appdata: App category data stream
:param List(str) categories: List of app categories of which the usage duration should be calculated
:param float appusage_gap_threshold_seconds: Threshold in seconds, which is the gap allowed between two
consecutive DataPoint of same app
:return: A list of intervals of the given apps (categories) usage [start_time, end_time, category]
:rtype: List
"""
appdata = sorted(appdata, key=lambda x: x.start_time)
appusage = []
i = 0
threshold = timedelta(seconds=appusage_gap_threshold_seconds)
while i< len(appdata):
d = appdata[i]
category = d.sample[1]
if category not in categories:
i += 1
continue
j = i+1
while j<len(appdata) and d.sample == appdata[j].sample \
and appdata[j-1].start_time + threshold <= appdata[j].start_time:
j += 1
if j > i+1:
appusage.append([d.start_time, appdata[j-1].start_time, category])
i = j-1
i += 1
return appusage
def appusage_interval_list(self, data: List[DataPoint]) -> List[int]:
"""
Helper function to get screen touch gap for specific app categories
:param List(DataPoint) data: Phone screen touch data stream
:return: A list of integers containing screen touch gap as in touch screen timestamp unit (milliseconds)
:rtype: List(int)
"""
ret = []
for i in range(1, len(data)):
ret.append(data[i].sample - data[i-1].sample)
return ret
def label_appusage_intervals(self, data: List[DataPoint], intervals: List,
interval_label: List[str]) -> List[DataPoint]:
"""
Helper function to label screen touch in a fixed app category usage
:param List(DataPoint) data: Phone touch screen data stream
:param List appusage: List appusage: list of app usage duration of specific app categories of the form
[start_time, end_time, category]
:param intervals: List of integers containing screen touch gap as in touch screen timestamp unit (milliseconds)
:param interval_label: A list of possible type of screen touch which are [typing, pause, reading, unknown]
:return: Labelled touche interval
:rtype: List(DataPoint)
"""
ret = []
for i in range(1, len(data)):
last = data[i-1].start_time
diff = (data[i].start_time - last).total_seconds()
for j in range(len(interval_label)):
if intervals[j][0] <= diff <= intervals[j][1]:
if len(ret) > 0:
last_entry = ret.pop()
if last_entry.end_time == last and last_entry.sample == interval_label[j]:
ret.append(DataPoint(start_time = last_entry.start_time,
end_time = data[i].start_time, offset = last_entry.offset,
sample = last_entry.sample))
else:
ret.append(last_entry)
ret.append(DataPoint(start_time = last, end_time = data[i].start_time,
offset = data[i].offset, sample=interval_label[j]))
else:
ret.append(DataPoint(start_time = last, end_time = data[i].start_time,
offset = data[i].offset, sample=interval_label[j]))
break
return ret
def process_phonescreen_all_day_data(self, user_id: str, all_days: List[str],
touchescreen_stream_name: str, input_touchstream: DataStream) \
-> GaussianMixture:
"""
This method create a unsupervised model using screen touch gap during productivity and communication app usage.
:param str user_id: UUID of the user.
:param List(str) all_days: List of days with format 'YYYYMMDD'
:param str touchescreen_stream_name: Phone touch screen stream name
:param str appcategory_stream_name: App category stream name
:return: GaussianMixture object of the created model
:rtype: GaussianMixture
"""
MIN_TAP_DATA = 100
td = []
appd = []
for day in all_days:
touchstream = self.get_data_by_stream_name(touchescreen_stream_name, user_id, day)
touchstream = self.get_filtered_data(touchstream, lambda x: (type(x) is float and x>1000000000.0))
td += touchstream
# appcategorystream = self.get_data_by_stream_name(appcategory_stream_name, user_id, day)
# appcategorystream = self.get_filtered_data(appcategorystream, lambda x: (type(x) is list and len(x)==4))
# appd += appcategorystream
td = sorted(td, key=lambda x: x.start_time)
# appusage = self.get_appusage_duration_by_category(appd, ["Communication", "Productivity"])
# tapping_gap = self.appusage_interval_list(td, appusage)
tapping_gap = []
for i in range(1, len(td)):
tapping_gap.append(td[i].sample - td[i-1].sample)
if len(tapping_gap) < MIN_TAP_DATA:
self.CC.logging.log("Not enough screen touch data")
return None
tapping_gap = sorted(tapping_gap)
X = (np.array(tapping_gap)/1000).reshape(-1, 1)
best_model = None
min_bic = 1000000000000
best_k = 4
for k in range(4, 10):
gm = GaussianMixture(n_components = k, max_iter = 500)#, covariance_type = 'spherical')
gm.fit(X)
bic = gm.bic(X)
if bic < min_bic:
min_bic = bic
best_model = gm
best_k = k
P = best_model.predict(X)
mx = np.zeros(best_k)
mn = np.full(best_k, np.inf)
for i in range(len(P)):
x = P[i]
mx[x] = max(mx[x], X[i][0])
mn[x] = min(mn[x], X[i][0])
intervals = []
for i in range(len(mx)):
intervals.append((mn[i], mx[i]))
intervals = sorted(intervals)
labels = ["typing", "pause", "reading", "unknown"]
while len(labels) < len(intervals):
labels.append("unknown")
values = [ [] for _ in range(len(labels))]
for idx in range(len(X)):
values[P[idx]].append(X[idx][0])
parameters = []
for i in range(4):
print(np.mean(values[i]), np.std(values[i]))
parameters.append((np.mean(values[i]), np.std(values[i])))
parameters.sort()
try:
data = []
for day in all_days:
start_time = datetime.datetime.strptime(day,"%Y%m%d")
start_time = start_time.replace(tzinfo=datetime.timezone.utc)
end_time = datetime.datetime.combine(start_time.date(), datetime.time.max)
end_time = end_time.replace(tzinfo=datetime.timezone.utc)
data.append(DataPoint(start_time, end_time, 0, [parameters[0][0], parameters[0][1]]))
if data:
self.store_stream(filepath="phone_active_typing_parameters_all_app.json",
input_streams=[input_touchstream],
user_id=user_id, data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = []
for day in all_days:
start_time = datetime.datetime.strptime(day,"%Y%m%d")
start_time = start_time.replace(tzinfo=datetime.timezone.utc)
end_time = datetime.datetime.combine(start_time.date(), datetime.time.max)
end_time = end_time.replace(tzinfo=datetime.timezone.utc)
data.append(DataPoint(start_time, end_time, 0, [parameters[1][0], parameters[1][1]]))
if data:
self.store_stream(filepath="phone_typing_pause_parameters_all_app.json",
input_streams=[input_touchstream],
user_id=user_id, data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = []
for day in all_days:
start_time = datetime.datetime.strptime(day,"%Y%m%d")
start_time = start_time.replace(tzinfo=datetime.timezone.utc)
end_time = datetime.datetime.combine(start_time.date(), datetime.time.max)
end_time = end_time.replace(tzinfo=datetime.timezone.utc)
data.append(DataPoint(start_time, end_time, 0, [parameters[2][0], parameters[2][1]]))
if data:
self.store_stream(filepath="phone_reading_in_typing_parameters_all_app.json",
input_streams=[input_touchstream],
user_id=user_id, data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
return gm
def process_phonescreen_day_data(self, user_id: str, touchstream: List[DataPoint],
input_touchstream: DataStream, gm: GaussianMixture):
"""
Analyze the phone touch screen gap to find typing, pause between typing, reading
and unknown sessions. It uses the Gaussian Mixture algorithm to find different peaks
in a mixture of 4 different gaussian distribution of screen touch gap.
:param str user_id: UUID of the stream owner
:param List(DataPoint) touchstream: Phone touch screen stream data
:param DataStream input_touchstream: DataStream object of phone touch screen
:param DataStream input_categorystream: DataStream object of app category stream
:param GaussianMixture gm: GaussianMixture object created from all day data of the user
:return:
"""
touchstream = sorted(touchstream, key=lambda x: x.start_time)
# appusage = self.get_appusage_duration_by_category(categorystream, ["Communication", "Productivity"])
# tapping_gap = self.appusage_interval_list(touchstream, appusage)
# if len(tapping_gap) < 50:
# self.CC.logging.log("Not enough screen touch data")
# return
tapping_gap = []
for i in range(1, len(touchstream)):
tapping_gap.append(touchstream[i].sample - touchstream[i-1].sample)
tapping_gap = sorted(tapping_gap)
if len(tapping_gap)==0:
self.CC.logging.log("Not enough screen touch data")
return
#gm = GaussianMixture(n_components = 4, max_iter = 500)#, covariance_type = 'spherical')
X = (np.array(tapping_gap)/1000).reshape(-1, 1)
#gm.fit(X)
P = gm.predict(X)
mx = np.zeros(gm.get_params()['n_components'])
mn = np.full(gm.get_params()['n_components'], np.inf)
for i in range(len(P)):
x = P[i]
mx[x] = max(mx[x], X[i][0])
mn[x] = min(mn[x], X[i][0])
intervals = []
for i in range(len(mx)):
intervals.append((mn[i], mx[i]))
intervals = sorted(intervals)
try:
data = self.label_appusage_intervals(touchstream, intervals,
["typing", "pause", "reading", "unknown"])
if data:
self.store_stream(filepath="phone_touch_type_all_app.json",
input_streams=[input_touchstream],
user_id=user_id, data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
def process_data(self, user_id: str, all_user_streams: dict, all_days: List[str]):
"""
Getting all the necessary input datastreams for a user
and run all feature processing modules for all the days
of the user.
:param str user_id: UUID of the stream owner
:param dict all_user_streams: Dictionary containing all the user streams, where key is the stream name, value
is the stream metadata
:param List(str) all_days: List of all days for the processing in the format 'YYYYMMDD'
:return:
"""
input_touchscreenstream = None
touchescreen_stream_name = "TOUCH_SCREEN--org.md2k.phonesensor--PHONE"
streams = all_user_streams
days = None
if not streams or not len(streams):
self.CC.logging.log('No streams found for user %s for feature %s'
% (str(user_id), self.__class__.__name__))
return
for stream_name, stream_metadata in streams.items():
if stream_name == touchescreen_stream_name:
input_touchscreenstream = stream_metadata
if not input_touchscreenstream:
self.CC.logging.log("No input stream found FEATURE %s STREAM %s "
"USERID %s" %
(self.__class__.__name__, touchescreen_stream_name,
str(user_id)))
else:
gm = self.process_phonescreen_all_day_data(user_id, all_days, touchescreen_stream_name,
input_touchscreenstream)
if gm:
for day in all_days:
touchstream = self.get_data_by_stream_name(touchescreen_stream_name, user_id, day)
touchstream = self.get_filtered_data(touchstream, lambda x: (type(x) is float and x>=0))
self.process_phonescreen_day_data(user_id, touchstream, input_touchscreenstream, gm)
input_touchtype_stream = None
touchtype_stream_name = 'org.md2k.data_analysis.feature.phone.all_app.touch_type'
streams = self.CC.get_user_streams(user_id)
for stream_name, stream_metadata in streams.items():
if stream_name == touchtype_stream_name:
input_touchtype_stream = stream_metadata
elif stream_name == touchescreen_stream_name:
input_touchscreen_stream = stream_metadata
if not input_touchtype_stream:
self.CC.logging.log("No input stream found FEATURE %s STREAM %s "
"USERID %s" %
(self.__class__.__name__, touchtype_stream_name,
str(user_id)))
elif not input_touchscreen_stream:
self.CC.logging.log("No input stream found FEATURE %s STREAM %s "
"USERID %s" %
(self.__class__.__name__, touchescreen_stream_name,
str(user_id)))
else:
for day in all_days:
touchtypedata = self.get_data_by_stream_name(touchtype_stream_name, user_id, day, localtime=False)
touchtypedata = self.get_filtered_data(touchtypedata, lambda x: (type(x) is str and
x in ["typing", "pause", "reading", "unknown"]))
touchscreendata = self.get_data_by_stream_name(touchescreen_stream_name, user_id, day, localtime=False)
touchscreendata = self.get_filtered_data(touchscreendata, lambda x: (type(x) is float or
(type(x) is list and len(x)==1 and type(x[0]) is float)))
for d in touchscreendata:
if type(d.sample) is list:
d.sample = d.sample[0]
self.process_screentouch_type_day_data(user_id, touchtypedata, touchscreendata,
input_touchtype_stream, input_touchscreen_stream)
def process(self, user_id: str, all_days: List[str]):
"""
Main processing function inherited from ComputerFeatureBase
:param str user_id: UUID of the user
:param List(str) all_days: List of days with format 'YYYYMMDD'
:return:
"""
if self.CC is not None:
self.CC.logging.log("Processing PhoneTouchScreenFeatures")
streams = self.CC.get_user_streams(user_id)
self.process_data(user_id, streams, all_days)
| 46.663522 | 120 | 0.593335 | 3,603 | 29,678 | 4.724951 | 0.125173 | 0.028548 | 0.0168 | 0.020677 | 0.429276 | 0.369831 | 0.340637 | 0.326598 | 0.292411 | 0.278548 | 0 | 0.008232 | 0.320574 | 29,678 | 635 | 121 | 46.737008 | 0.836044 | 0.263731 | 0 | 0.411911 | 0 | 0 | 0.043532 | 0.0193 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034739 | false | 0 | 0.027295 | 0 | 0.109181 | 0.002481 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6cc2380f179da03035e761cb03c466795eb19876 | 2,477 | py | Python | src/pybedforms/new_dunes.py | RichardScottOZ/pybedfroms | 4805823f0b25a27499c462be724d48866c261c03 | [
"BSD-3-Clause"
] | 22 | 2021-05-11T17:35:59.000Z | 2021-07-29T13:21:07.000Z | src/pybedforms/new_dunes.py | RichardScottOZ/pybedfroms | 4805823f0b25a27499c462be724d48866c261c03 | [
"BSD-3-Clause"
] | 4 | 2021-05-28T15:34:09.000Z | 2021-08-06T20:25:44.000Z | src/pybedforms/new_dunes.py | RichardScottOZ/pybedfroms | 4805823f0b25a27499c462be724d48866c261c03 | [
"BSD-3-Clause"
] | 4 | 2021-05-11T17:42:15.000Z | 2021-05-28T22:39:42.000Z | import argparse
import numpy as np
import configparser
from .dune_topo import DuneTopo
def main(color, k, n, f, r, t, w, erosion, d, *infiles):
c = configparser.ConfigParser()
for file in infiles:
with open(file) as src:
c.read_file(src)
config = c.defaults()
make_movie(color, k, n, f, r, t, w, erosion, d, config)
pass
def make_movie(color, k, n, f, r, t, w, erosion, d, config):
m = k + n
q = m + f
j = r / f
s = q + t
v = s + w
b = -erosion * w
p = v + d
if color in ('color','c','Color','COLOR'):
Total_Frames_Per_Movie = p
#
dT = np.arange(0, k)
dT[k + 1: m] = k
dT[m + 1: p] = k
dTrend = np.arange(0, m)
dTrend[m + 1: q] = np.arange(j, j, r)
dTrend[q + 1: s] = r
dTrend[s + 1: p] = r
dZHO = np.arange(0,s)
dZHO[s + 1: v] = np.arange(-erosion, -erosion, b)
dZHO[v + 1: p] = b
NumberOfFrames = len(dT)
# make the dune topo
dune = DuneTopo(**config)
pass
if __name__ == '__main__':
p = argparse.ArgumentParser(description='New Dune Tool')
"""
How many files do you want to run?
Name of input parameter file?
Name of input parameter file?
Enter “color” for movies/tiffs or “bw” for single image post-scripts : color
How many frames showing deposition? : 100
How many pause frames after deposition? : 5
How many frames showing rotation? : 90
How many rotational degrees? : 45
How many pause frames after rotation? : 5
How many frames showing erosion? : 100
How much erosion between frames? : 0.025
How many pause frames after erosion?
"""
p.add_argument('color', default='bw', type=str, dest='color')
p.add_argument('k', type=int, help='How many frames showing deposition?')
p.add_argument('n', type=int, help='How many pause frames after deposition?')
p.add_argument('f', type=int, help='How many frames showing rotation?')
p.add_argument('r', type=int, help='How much total rotation in degrees?')
p.add_argument('t', type=int, help='How many pause frames after rotation?')
p.add_argument('w', type=int, help='How many frames showing erosion?')
p.add_argument('erosion', type=float, help='How much erosion between frames? (0.025 suggested)', default=0.025)
p.add_argument('d', type=int, help='How many pause frames at the end?')
p.add_argument('infiles', nargs='+')
args = p.parse_args()
main(*args) | 33.472973 | 115 | 0.616875 | 391 | 2,477 | 3.841432 | 0.278772 | 0.065246 | 0.079893 | 0.065246 | 0.40213 | 0.280293 | 0.225699 | 0.103196 | 0.057923 | 0.045273 | 0 | 0.01828 | 0.249092 | 2,477 | 74 | 116 | 33.472973 | 0.789247 | 0.007267 | 0 | 0.040816 | 0 | 0 | 0.192308 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.040816 | false | 0.040816 | 0.081633 | 0 | 0.122449 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6cc6984a118ef4825fe0b09329eb2f6b70a70968 | 4,155 | py | Python | tableau.py | frank4466/inkscape-tableau | 40ff7febcf7d5711acbafd5ae2fce5dccfb38ca5 | [
"FSFAP"
] | 5 | 2017-11-28T13:03:17.000Z | 2020-04-21T10:26:15.000Z | tableau.py | frank4466/inkscape-tableau | 40ff7febcf7d5711acbafd5ae2fce5dccfb38ca5 | [
"FSFAP"
] | 2 | 2020-05-16T08:13:14.000Z | 2020-10-14T09:07:52.000Z | tableau.py | frank4466/inkscape-tableau | 40ff7febcf7d5711acbafd5ae2fce5dccfb38ca5 | [
"FSFAP"
] | 2 | 2020-04-21T10:26:11.000Z | 2021-01-13T12:49:43.000Z | #!/usr/bin/env python
# coding: utf-8
# toutes les chaines sont en unicode (même les docstrings)
from __future__ import unicode_literals
"""
tableau.py
Création de tableau simple pour Inkscape
Codé par Frank SAURET - http://www.electropol.fr -
License : Public Domain
"""
import inkex
__version__ = '2020.2'
# Version validée pour Inkscape 1.02
inkex.localization.localize()
class Tableau(inkex.GenerateExtension):
def add_arguments(self, pars):
#Récupération des paramêtres
pars.add_argument("--rows", type=int, dest="NbLigne", default=2, help="Nombre de lignes")
pars.add_argument("--cols", type=int, dest="NbColonne", default=3, help="Nombre de colonnes")
pars.add_argument("--units", type=str, dest="Unitee", default="mm", help="Unitée pour la cellule")
pars.add_argument("--width", type=float, dest="CelL", default=10.0, help="Largeur de la cellule")
pars.add_argument("--height", type=float, dest="CelH", default=20.0, help="Hauteur de la cellule")
pars.add_argument("--weight", type=float, dest="ETrait", default=0.1, help="Epaisseur des traits")
pars.add_argument("--color", type=inkex.Color, dest="Couleur", default=inkex.Color(0), help="Couleur des traits")
pars.add_argument("--round", type=float, dest="Arrondi", default=10.0, help="Rayon de l'arrondi")
pars.add_argument("--active-tab", type=str, dest="active_tab", default="options", help="Active tab.")
def generate(self):
#Récupération des dimensions d'une cellule selon l'unitée
HauteurCellule=self.svg.unittouu( str(self.options.CelH) + self.options.Unitee )
LargeurCellule=self.svg.unittouu( str(self.options.CelL) + self.options.Unitee )
EpaisseurTrait=self.svg.unittouu( str(self.options.ETrait) + self.options.Unitee )
rf=self.svg.unittouu(str(self.options.Arrondi)+ self.options.Unitee )
r=str(rf)
#Les éléments de dessin
Ahd=' a '+r+','+r+' 0 0 1 '+r+','+r
Abd=' a '+r+','+r+' 0 0 1 -'+r+','+r
Abg=' a '+r+','+r+' 0 0 1 -'+r+',-'+r
Ahg=' a '+r+','+r+' 0 0 1 '+r+',-'+r
BordGauche=Ahg+Abg
BordDroit=' m '+r+',-'+r+Abd+Ahd
Croisillon=Ahd+Ahg+Abg+Abd
SegmentH=' h '+ str(self.svg.unittouu( str(self.options.CelL-2*self.options.Arrondi) + self.options.Unitee ))
SegmentV=' v '+ str(self.svg.unittouu( str(self.options.CelH-2*self.options.Arrondi) + self.options.Unitee ))
SegmentHNeg=' h -'+ str(self.svg.unittouu( str(self.options.CelL-2*self.options.Arrondi) + self.options.Unitee ))
DeplacementV=str(self.svg.unittouu( str(self.options.CelH-2*self.options.Arrondi) + self.options.Unitee ))
#Tracé du tableau avec rattrapage de l'épaisseur des traits et arrondis
## Positionnement du début du tableau
y=str(rf)
x=str(0)
tableau_path=' M '+x+','+y
##Tracé de la première ligne (pour optimiser le déplacement du laser)
for i in range (0,self.options.NbColonne):
tableau_path=tableau_path+Ahg+SegmentH+Ahd
## Tracé des lignes médianes (pour optimiser le déplacement du laser)
DecalageD=' m '+r+','+r
DecalageM=' m '+str(2*rf)+',0'
for i in range (1,self.options.NbLigne):
y=str(rf+i*HauteurCellule)
#Première cellule
tableau_path=tableau_path+'M 0,'+y+ BordGauche+DecalageD+SegmentH
#Cellules suivantes
for j in range (0,self.options.NbColonne-1):
x=str(j*LargeurCellule)
tableau_path=tableau_path+Croisillon+DecalageM+SegmentH
#Dernière cellule
tableau_path=tableau_path+BordDroit
##Tracé de la dernière ligne (dans l'autre sens pour optimiser le déplacement du laser)
tableau_path=tableau_path+'m 0,'+DeplacementV
for i in range (0,self.options.NbColonne):
tableau_path=tableau_path+Abd+SegmentHNeg+Abg
#Tracé des colonnes
for i in range (0,self.options.NbColonne+1):
x=str(i*LargeurCellule)
for j in range (0,self.options.NbLigne):
y=str(j*HauteurCellule+rf)
tableau_path=tableau_path+' M '+x+','+y+SegmentV
#Construction puis écriture du chemin
style = inkex.Style({
'fill' : 'none',
'stroke' : self.options.Couleur,
'stroke-width' : EpaisseurTrait
})
return inkex.PathElement(d=tableau_path, style=str(style))
if __name__ == '__main__':
Tableau().run() | 39.951923 | 116 | 0.699398 | 617 | 4,155 | 4.638574 | 0.28363 | 0.103774 | 0.04717 | 0.050314 | 0.368274 | 0.305381 | 0.207198 | 0.174703 | 0.162124 | 0.139064 | 0 | 0.01459 | 0.142238 | 4,155 | 104 | 117 | 39.951923 | 0.78844 | 0.159326 | 0 | 0.032258 | 0 | 0 | 0.130135 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032258 | false | 0 | 0.032258 | 0 | 0.096774 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6cc6e2f2075b25aafd82cb80821c29d8bb2bae8b | 2,196 | py | Python | crop_to_train.py | watcharap0n/dlib-dc | 0c6dce2618686f4ae2ee41213da9e073c8b62825 | [
"MIT"
] | 2 | 2021-06-16T07:16:15.000Z | 2021-07-29T03:22:38.000Z | crop_to_train.py | watcharap0n/FaceRecognition-DC | 0c6dce2618686f4ae2ee41213da9e073c8b62825 | [
"MIT"
] | null | null | null | crop_to_train.py | watcharap0n/FaceRecognition-DC | 0c6dce2618686f4ae2ee41213da9e073c8b62825 | [
"MIT"
] | null | null | null | import cv2
from os import listdir
import os
import dlib
import time
import pickle
detector = dlib.get_frontal_face_detector()
sp = dlib.shape_predictor('model_image/shape_predictor_68_face_landmarks.dat')
model = dlib.face_recognition_model_v1('model_image/dlib_face_recognition_resnet_model_v1.dat')
path = 'datasets/crop_train'
img_pixel = 128
scale = 0.5
FACE_DETS = []
FACE_NAME = []
time_avg = []
stff = time.time()
for fn in listdir(path):
if not fn.startswith('.'):
path_fn = os.path.join(path, fn)
path_enter = os.path.join(path_fn, 'label')
if not path_enter.startswith('.'):
os.makedirs(path_enter, exist_ok=True)
for i in listdir(path_fn):
path_img = os.path.join(path_fn, i)
if i.endswith('.jpg') or i.endswith('.png') or i.endswith('.jpeg') or i.endswith('.JPG'):
image_color = cv2.imread(path_img)
image = cv2.resize(image_color, None, fx=scale, fy=scale, interpolation=cv2.INTER_AREA)
gray_scale = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
dets = detector(gray_scale, 1)
for k, d in enumerate(dets):
x, y = d.left(), d.top()
w, h = d.right(), d.bottom()
cropped_image = image[y:h, x:w] # cropped_image
image = cv2.resize(cropped_image, (img_pixel, img_pixel), interpolation=cv2.INTER_AREA)
cv2.imwrite(os.path.join(path_enter, i), image)
print('finishing cropped...')
for e in listdir(path_enter):
path_label = os.path.join(path_enter, e)
img = cv2.imread(path_label, cv2.COLOR_BGR2RGB)
dets = detector(img, 1)
for k, d in enumerate(dets):
shape = sp(img, d)
face_desc = model.compute_face_descriptor(img, shape, 1)
FACE_DETS.append(face_desc)
FACE_NAME.append(fn)
sec = (time.time() - stff)
time_avg.append(sec)
avg = time_avg[-1] / len(time_avg)
print('avg: {} sec '.format(str(round(avg, 2))))
pickle.dump((FACE_DETS, FACE_NAME), open('trainingset_dc.pk', 'wb'))
| 39.927273 | 107 | 0.600638 | 305 | 2,196 | 4.118033 | 0.334426 | 0.042994 | 0.039809 | 0.055732 | 0.101911 | 0.033439 | 0.033439 | 0 | 0 | 0 | 0 | 0.016907 | 0.272769 | 2,196 | 54 | 108 | 40.666667 | 0.769568 | 0.00592 | 0 | 0.04 | 0 | 0 | 0.089867 | 0.046768 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.12 | 0 | 0.12 | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6cc853f69acfcdf92a2864e2ae98eea2353230d2 | 3,614 | py | Python | video_demo.py | zhreshold/mxnet-yolo | 711daacecdb3ab603e1b37b86171bdac066d60b8 | [
"MIT"
] | 266 | 2017-05-22T08:07:50.000Z | 2022-02-15T03:01:06.000Z | video_demo.py | liben2018/mxnet-yolov2 | 5f822a6022de7bc5d154174fc7dcf598ada40223 | [
"MIT"
] | 44 | 2017-05-22T06:44:30.000Z | 2018-12-11T02:58:49.000Z | video_demo.py | liben2018/mxnet-yolov2 | 5f822a6022de7bc5d154174fc7dcf598ada40223 | [
"MIT"
] | 100 | 2017-06-24T06:23:16.000Z | 2022-03-18T21:29:05.000Z | import os
import cv2
import numpy as np
import random
from moviepy.editor import *
import mxnet as mx
from detect.detector import Detector
class video_generator:
def __init__(self,video_path,fps,output_path='./result.mp4'):
self.clip = VideoFileClip(video_path)
self.output_path = output_path
self.fps = fps
self.record = None
def set_record(self,record):
self.record = record
def commit(self):
def draw(img,bboxes):
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
for b in bboxes:
xmin,ymin,xmax,ymax = b[:]
cv2.rectangle(img, (xmin,ymin), (xmax,ymax),(255,255,0) ,thickness=2)
return img
def make_frame(t):
idx = t*(self.clip.fps/self.fps)
frm = self.clip.get_frame(t)
height ,width = frm.shape[:2]
for t,bboxes in self.record:
if t==idx:
frm = draw(frm,bboxes)
else:
pass
return frm
new_clip = VideoClip(make_frame, duration=self.clip.duration) # 3-second clip
new_clip.fps=self.clip.fps
new_clip.to_videofile(self.output_path)
def get_mxnet_detector(net, prefix, epoch, data_shape, mean_pixels, ctx,batch_size = 1):
detector = Detector(net, prefix, epoch, data_shape, mean_pixels, ctx=ctx,batch_size = 1)
return detector
def img_preprocessing(img,data_shape):
img = cv2.resize(img,(data_shape,data_shape))
img = np.swapaxes(img, 0, 2)
img = np.swapaxes(img, 1, 2)
# img = img[np.newaxis, :]
return [mx.nd.array([img])]
def get_bboxes(img,dets,thresh = 0.5 ):
height = img.shape[0]
width = img.shape[1]
colors = dict()
bboxes = []
for i in range(dets.shape[0]):
cls_id = int(dets[i, 0])
if cls_id >= 0:
score = dets[i, 1]
if score > thresh:
xmin = int(dets[i, 2] * width)
ymin = int(dets[i, 3] * height)
xmax = int(dets[i, 4] * width)
ymax = int(dets[i, 5] * height)
bboxes.append([xmin,ymin,xmax,ymax])
# cv2.rectangle(img, (xmin,ymin), (xmax,ymax),(255,255,0) ,thickness=2)
# cv2.imwrite('./img.jpg',img)
return bboxes
def main():
#args
net = None
# prefix = os.path.join(os.getcwd(), 'model', 'yolo2_darknet19_416')
# epoch = 240
prefix = os.path.join(os.getcwd(), 'model', 'resnet50_yolov2_resnet50_416')
epoch = 158
data_shape = 416
mean_pixels = (123,117,104)
ctx = mx.gpu(0)
detector = get_mxnet_detector(net, prefix, epoch, data_shape, mean_pixels, ctx=ctx,batch_size = 1)
video_path = '/home/share/test_video/a1004s101_ch0.mp4'
clip = VideoFileClip(video_path)
record = []
frames = clip.iter_frames(fps=clip.fps ,with_times = True)
for t,frm in frames:
data = img_preprocessing(frm,data_shape)
det_batch = mx.io.DataBatch(data,[])
detector.mod.forward(det_batch, is_train=False)
detections = detector.mod.get_outputs()[0].asnumpy()
result = []
for i in range(detections.shape[0]):
det = detections[i, :, :]
res = det[np.where(det[:, 0] >= 0)[0]]
result.append(res)
bboxes = get_bboxes(frm,res)
record.append([t,bboxes])
vg = video_generator(video_path,fps = clip.fps)
vg.set_record(record)
vg.commit()
main()
| 33.775701 | 103 | 0.565855 | 479 | 3,614 | 4.137787 | 0.292276 | 0.036327 | 0.020182 | 0.032291 | 0.165489 | 0.165489 | 0.165489 | 0.136226 | 0.136226 | 0.136226 | 0 | 0.037185 | 0.307969 | 3,614 | 106 | 104 | 34.09434 | 0.755298 | 0.073326 | 0 | 0 | 0 | 0 | 0.026283 | 0.021027 | 0 | 0 | 0 | 0 | 0 | 1 | 0.104651 | false | 0.011628 | 0.081395 | 0 | 0.255814 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6cc863462beb8552be03ca2688c76f0614e388b1 | 3,135 | py | Python | aws_quality_of_life/route53_record.py | opentokix/yolo-dangerzone | 7ea5ab54e3369997c9715fb762cc5604023b3aee | [
"Unlicense"
] | 2 | 2020-02-13T02:38:25.000Z | 2022-01-24T15:02:29.000Z | aws_quality_of_life/route53_record.py | opentokix/yolo-dangerzone | 7ea5ab54e3369997c9715fb762cc5604023b3aee | [
"Unlicense"
] | null | null | null | aws_quality_of_life/route53_record.py | opentokix/yolo-dangerzone | 7ea5ab54e3369997c9715fb762cc5604023b3aee | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
# Very simple tool to add A or AAAA records in a route53 hosted zone from the command line.
import boto3
import click
import os
import logging
import ipaddress
logger = logging.getLogger('route53_tool')
logger.setLevel(logging.ERROR)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s", "%Y-%m-%d %H:%M:%S")
ch.setFormatter(formatter)
logger.addHandler(ch)
def get_available_zones(route53):
"""Get zones handled by this account, for some basic checks. Put result in a dict"""
z = {}
response = route53.list_hosted_zones()
zones = response['HostedZones']
for i in range(len(zones)):
z[zones[i]['Name']] = zones[i]['Id']
if not z:
logger.error("No zones possible")
exit(1)
else:
logger.debug(z)
return z
def sanitycheck_of_zone(zones, fqdn):
"""Make sure you own the zone you are trying to add records in."""
if fqdn.count('.') < 2:
logger.error("You need a hostname. ie: hostname.domain.tld, not only domain.tld")
exit(1)
wip = fqdn.split('.')
zone = wip[-2:]
zone = f"{zone[0]}.{zone[1]}."
for key in zones:
if zone == key:
logger.debug(f"Zone ID: {zones[zone]}")
return str(zones[zone])
logger.error(f"What are you doing? You don't control {zone}")
exit(1)
def add_rrset(route53, zone_id, fqdn, ip):
"""Adding the resource record, also check if its A or AAAA to be added."""
try:
address = ipaddress.ip_address(ip)
except:
logger.error(f"[ {ip} ] is not a valid ipaddress")
logger.debug(f"{address} is ip version {address.version}")
if address.version == 4:
record_type = "A"
elif address.version == 6:
record_type = "AAAA"
else:
logger.error("Unknown IP")
exit(1)
response = route53.change_resource_record_sets(
HostedZoneId=zone_id, ChangeBatch={'Comment': 'Autoupdated record', 'Changes': [
{'Action': 'UPSERT', 'ResourceRecordSet': {
'Name': fqdn, 'Type': record_type, 'TTL': 300, 'ResourceRecords':
[
{'Value': ip},
],
}
},
]})
return response
@click.command()
@click.option('--ip', '-i', required=True, default=None, help="The ip-number your A or AAAA record should point to")
@click.option('--fqdn', required=True, help="Fully qualitifed domain name of the A or AAAA record to be added.")
@click.option('--verbose', '-V', is_flag=True, default=False, help="Verbose flag to get debug info")
def main(ip, fqdn, verbose):
"""This is where the magic happens."""
if verbose:
logger.setLevel(logging.DEBUG)
AWS_ACCESS = os.environ['ROUTE53_ACCESS']
AWS_SECRET = os.environ['ROUTE53_SECRET']
logger.debug(fqdn)
route53 = boto3.client('route53', aws_access_key_id=AWS_ACCESS, aws_secret_access_key=AWS_SECRET)
zone_id = sanitycheck_of_zone(get_available_zones(route53), fqdn)
logger.info(add_rrset(route53, zone_id, fqdn, ip))
if __name__ == '__main__':
main()
| 34.076087 | 116 | 0.638278 | 435 | 3,135 | 4.496552 | 0.381609 | 0.028119 | 0.014315 | 0.02454 | 0.027607 | 0.027607 | 0.027607 | 0 | 0 | 0 | 0 | 0.016373 | 0.220734 | 3,135 | 91 | 117 | 34.450549 | 0.784282 | 0.1126 | 0 | 0.078947 | 0 | 0 | 0.234058 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.065789 | 0 | 0.157895 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6cc8b81b0bf5fb8e1755277b7ccc4e2995a908cd | 860 | py | Python | scripts/download_deps_linux.py | MikuAuahDark/love-fuser | 042acb78bb5e1c713c4478eb29e7596b66167c4e | [
"Xnet",
"X11"
] | 7 | 2021-08-01T16:36:59.000Z | 2022-03-22T21:05:34.000Z | scripts/download_deps_linux.py | MikuAuahDark/love-fuser | 042acb78bb5e1c713c4478eb29e7596b66167c4e | [
"Xnet",
"X11"
] | 2 | 2021-12-06T09:22:01.000Z | 2022-01-05T13:25:00.000Z | scripts/download_deps_linux.py | MikuAuahDark/love-fuser | 042acb78bb5e1c713c4478eb29e7596b66167c4e | [
"Xnet",
"X11"
] | 2 | 2021-10-11T04:27:44.000Z | 2022-02-04T18:02:11.000Z | import argparse
import os
def run_command(command: str):
ret = os.system(command)
if ret != 0:
raise RuntimeError(f"Command returned {ret}")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("url", type=str, help="tarball url")
parser.add_argument("dest", type=str, help="tarball new folder name")
args = parser.parse_args()
# Get filename
filename: str = os.path.basename(args.url)
query = filename.find('?')
if query != -1:
filename = filename[:query]
# Get folder name when extracting
basename = filename[:filename.find(".tar")]
# tar flags
tarflags = 'xzf' if filename[-3:] == '.gz' else 'xf'
# Execute
run_command(f"curl -Lfo {filename} {args.url}")
run_command(f"tar {tarflags} {filename}")
run_command(f"mv {basename} {args.dest}") | 33.076923 | 73 | 0.644186 | 112 | 860 | 4.8125 | 0.455357 | 0.074212 | 0.061224 | 0.06679 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004392 | 0.205814 | 860 | 26 | 74 | 33.076923 | 0.784773 | 0.072093 | 0 | 0 | 0 | 0 | 0.207809 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.1 | 0 | 0.15 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6cce0038f12dcc1c0161fccaf35951d15f13f876 | 2,230 | py | Python | ProtectionDomains/scripts/Profile_Nutanix_Action_ListVMSnapshots_Task_ListSnapshot.py | kingsleyck/NutanixCalm | 35012c983a78490ee454d564ebbcbaf221055b8d | [
"MIT"
] | null | null | null | ProtectionDomains/scripts/Profile_Nutanix_Action_ListVMSnapshots_Task_ListSnapshot.py | kingsleyck/NutanixCalm | 35012c983a78490ee454d564ebbcbaf221055b8d | [
"MIT"
] | null | null | null | ProtectionDomains/scripts/Profile_Nutanix_Action_ListVMSnapshots_Task_ListSnapshot.py | kingsleyck/NutanixCalm | 35012c983a78490ee454d564ebbcbaf221055b8d | [
"MIT"
] | null | null | null | # REST API call to Prism Central to list all available VM snapshots
# setup common variables
uri = 'localhost:9440'
cluster_uuid = '@@{platform.status.cluster_reference.uuid}@@'
vm_uuid = '@@{id}@@'
hostname = '@@{calm_application_name}@@'
remote_cluster = '@@{remote_protection_domain_cluster}@@'
# setup credentials
username = '@@{Creds_PrismCentral.username}@@'
username_secret = '@@{Creds_PrismCentral.secret}@@'
###################### DEFINE FUNCTIONS ######################
def rest_call( url, method, username=username, username_secret=username_secret, payload="" ):
headers = { 'content-type': 'application/json' }
if payload:
resp = urlreq(
url,
verb=method,
params=json.dumps(payload),
auth='BASIC',
user=username,
passwd=username_secret,
headers=headers,
verify=False
)
else:
resp = urlreq(
url,
verb=method,
auth='BASIC',
user=username,
passwd=username_secret,
headers=headers,
verify=False
)
if resp.ok:
try:
return json.loads(resp.content)
except:
return resp.content
else:
print('Request failed')
print('Headers: {}'.format(headers))
print('Payload: {}'.format(json.dumps(payload)))
print('Status code: {}'.format(resp.status_code))
print(resp.content)
exit(1)
################ DETERMINE ID OF LAST SNAPSHOT ###################
url = 'https://{}/PrismGateway/services/rest/v2.0/protection_domains/{}/dr_snapshots?proxyClusterUuid={}'.format(
uri,
hostname,
cluster_uuid
)
method = 'GET'
response = rest_call(url=url,method=method)
entities = response['entities']
# quick and dirty tabular output
print('{0:25}' '{1}'.format('SnapshotID','Time'))
for entity in entities:
snapshot_id = entity['snapshot_id']
# time is in usec and needs trimmed, formatted
time = str(entity['snapshot_create_time_usecs'])
time = int(time[:-6:])
time = _datetime.datetime.fromtimestamp(time).strftime('%x %X')
print('{0:25}' '{1}'.format(snapshot_id,time)) | 28.589744 | 113 | 0.589686 | 236 | 2,230 | 5.449153 | 0.466102 | 0.054432 | 0.034215 | 0.026439 | 0.161742 | 0.102644 | 0.102644 | 0.102644 | 0.102644 | 0.102644 | 0 | 0.009484 | 0.243498 | 2,230 | 78 | 114 | 28.589744 | 0.752816 | 0.103587 | 0 | 0.315789 | 0 | 0 | 0.243342 | 0.103916 | 0 | 0 | 0 | 0 | 0 | 1 | 0.017544 | false | 0.035088 | 0 | 0 | 0.052632 | 0.122807 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6cce0af4fc80f8a76f65a2e1945b84732ac968c9 | 2,885 | py | Python | src/methods_saxs/ensemble.py | spirit01/ensemble-fit_docker_version | 6396184c9bf311ac83c012f94ad293605a32798c | [
"MIT"
] | 1 | 2018-10-23T17:12:44.000Z | 2018-10-23T17:12:44.000Z | src/methods_saxs/ensemble.py | spirit01/ensemble-fit_docker_version | 6396184c9bf311ac83c012f94ad293605a32798c | [
"MIT"
] | null | null | null | src/methods_saxs/ensemble.py | spirit01/ensemble-fit_docker_version | 6396184c9bf311ac83c012f94ad293605a32798c | [
"MIT"
] | null | null | null | #Script must have these three parts:
# prepare_data()
# make_experiment()
# collect_result()
import logging
import pathlib
import shutil
import subprocess
import sys
from saxs_experiment import Colors
from saxs_experiment import LogPipe
def prepare_data(all_files, tmpdir, mydirvariable):
pathlib.Path(f'{tmpdir}/pdbs/ensembles').mkdir(parents=True, exist_ok=True)
for i, file in enumerate(all_files, start=1):
shutil.copy(f'{mydirvariable}/{file}.pdb', f'{tmpdir}/pdbs/ensembles/{i:02d}.pdb')
def make_experiment(all_files, tmpdir, verbose, verbose_logfile, path, mydirvariable):
# RUN ensemble
command = f'{path} -L -p {tmpdir}/pdbs/ensembles/ -n {len(all_files)} -m {tmpdir}/method/curve.modified.dat'
if verbose == 3:
print(f'{Colors.OKBLUE} Command for ensemble fit \n {Colors.ENDC, command} \n')
if verbose_logfile:
logpipe = LogPipe(logging.DEBUG)
logpipe_err = LogPipe(logging.ERROR)
logging.info(f'Command for ensemble fit \n {command}')
call = subprocess.run([f'{path}', '-L', '-p', f'{tmpdir}/pdbs/ensembles/', '-n', f'{len(all_files)}',
'-m', f'{tmpdir}/method/curve.modified.dat'],
cwd=f'{tmpdir}/results/', stdout=logpipe, stderr=logpipe_err)
logpipe.close()
logpipe_err.close()
else:
call = subprocess.run(
[f'{path}', '-L', '-p', f'{tmpdir}/pdbs/ensembles/', '-n', f'{len(all_files)}', '-m',
f'{tmpdir}/method/curve.modified.dat'],
cwd=f'{tmpdir}/results/', stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if call.returncode:
print(f'ERROR: ensemble failed', file=sys.stderr)
logging.error(f'Ensemble failed.')
sys.exit(1)
def collect_results(tmpdir, all_files):
# Process with result from ensemble
result_chi_and_weights_ensemble = []
# 5000
# 1.08e+01,0.952,2.558,4.352610,0.000,0.300,0.000,0.000,0.000,0.000,0.000,0.092,0.000,0.908
# 1.08e+01,0.950,2.558,4.752610,0.000,0.100,0.000,0.000,0.000,0.000,0.000,0.092,0.000,0.908
with open(f'{tmpdir}/results/result', 'r') as f:
next(f)
for line in f:
line = line.rstrip()
value_of_chi2 = float(line.split(',')[3])
values_of_index_result = [float(value) for value in line.split(',')[4:]]
result_chi_and_weights_ensemble.append((value_of_chi2, values_of_index_result))
ensemble_results = []
structure = []
for chi2, weights in result_chi_and_weights_ensemble:
for i, weight in enumerate(weights):
if weight >= 0.001:
structure.append((f'{all_files[i]}.pdb', weight))
ensemble_results.append((chi2, structure))
return ensemble_results
# ((chi2, [('mod10.pdb', 0.3), ('mod15.pdb', 0.7)]),(chi2(strucutre, weight),(strucutre, weight)))
| 39.520548 | 112 | 0.632582 | 408 | 2,885 | 4.357843 | 0.303922 | 0.031496 | 0.03937 | 0.035996 | 0.245782 | 0.15973 | 0.15973 | 0.15973 | 0.15973 | 0.15973 | 0 | 0.065122 | 0.206932 | 2,885 | 72 | 113 | 40.069444 | 0.711976 | 0.143501 | 0 | 0 | 0 | 0.02 | 0.234458 | 0.114181 | 0 | 0 | 0 | 0 | 0 | 1 | 0.06 | false | 0 | 0.14 | 0 | 0.22 | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |