hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0446287e343b809baec3c4682452b35d3c243b0d | 566 | py | Python | tests/test_math.py | JosephMontoya-TRI/monty | facef1776c7d05c941191a32a0b93f986a9761dd | [
"MIT"
] | null | null | null | tests/test_math.py | JosephMontoya-TRI/monty | facef1776c7d05c941191a32a0b93f986a9761dd | [
"MIT"
] | null | null | null | tests/test_math.py | JosephMontoya-TRI/monty | facef1776c7d05c941191a32a0b93f986a9761dd | [
"MIT"
] | null | null | null | # coding: utf-8
#!/usr/bin/env python
from __future__ import division, unicode_literals
__author__ = 'Shyue Ping Ong'
__copyright__ = 'Copyright 2014, The Materials Virtual Lab'
__version__ = '0.1'
__maintainer__ = 'Shyue Ping Ong'
__email__ = 'ongsp@ucsd.edu'
__date__ = '1/24/14'
import unittest
from monty.math import nCr, nPr
class FuncTest(unittest.TestCase):
def test_nCr(self):
self.assertEqual(nCr(4, 2), 6)
def test_deprecated_property(self):
self.assertEqual(nPr(4, 2), 12)
if __name__ == "__main__":
unittest.main()
| 18.866667 | 59 | 0.704947 | 77 | 566 | 4.662338 | 0.714286 | 0.050139 | 0.066852 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.040773 | 0.176678 | 566 | 29 | 60 | 19.517241 | 0.729614 | 0.058304 | 0 | 0 | 0 | 0 | 0.190566 | 0 | 0 | 0 | 0 | 0 | 0.125 | 1 | 0.125 | false | 0 | 0.1875 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
044734cfe1196714dd666049793f24e1b681b8b3 | 4,450 | py | Python | exercises/fit_gaussian_estimators.py | AlonViz/IML.HUJI | 107f7c20b8bd64d41452e4a5b66abe843af7eb18 | [
"MIT"
] | null | null | null | exercises/fit_gaussian_estimators.py | AlonViz/IML.HUJI | 107f7c20b8bd64d41452e4a5b66abe843af7eb18 | [
"MIT"
] | null | null | null | exercises/fit_gaussian_estimators.py | AlonViz/IML.HUJI | 107f7c20b8bd64d41452e4a5b66abe843af7eb18 | [
"MIT"
] | null | null | null | from IMLearn.learners import UnivariateGaussian, MultivariateGaussian
import numpy as np
import plotly.io as pio
import plotly.express as px
import pandas as pd
pio.templates.default = "simple_white"
def test_univariate_gaussian():
# Question 1 - Draw samples and print fitted model
"""Create a sample of size 1000 of N(10,1) and print the expectaton and
variance estimations of the univariate gaussian estimator"""
mu_, stdev_ = 10, 1
var_ = stdev_ * stdev_
samples_ = np.random.normal(loc=mu_, scale=stdev_, size=1000)
estimator = UnivariateGaussian()
estimator.fit(samples_)
print(estimator.mu_, estimator.var_)
# Question 2 - Empirically showing sample mean is consistent
"""using samples of increasing sizes(10,20,...,1000), plot the difference
of the estimated and real expectation as a function of sample size."""
sample_estimator = UnivariateGaussian()
estimations = list()
for size in range(10, 1010, 10):
sample_estimator.fit(samples_[:size])
estimations.append(np.abs(mu_ - sample_estimator.mu_))
# Plot:
df_sample_size = pd.DataFrame(
np.array([list(range(10, 1010, 10)), estimations]).transpose(),
columns=["Sample Size", "Estimation Error"])
fig_1 = px.scatter(df_sample_size, x="Sample Size", y="Estimation Error")
fig_1.update_layout(
title_text='Univariate Gaussian Estimator<br><sup> Error in'
' expectancy as a function of sample size</sup>'
, title_x=0.5, title_font_size=25)
fig_1.show()
# Question 3: Plot the PDF using fitted model
"""Compute the PDF of the previously drawn samples using the model fitted
in question 1. Plot the empirical PDF function under the fitted model"""
# Create:
samples_.sort()
pdfs_ = estimator.pdf(samples_)
fig_2 = px.scatter(x=samples_, y=pdfs_)
fig_2.update_layout(title_text="Univariate Gaussian Estimator<br><sup>"
" sample density plotted on empirical PDF</sup>",
xaxis_title="Sample Value",
yaxis_title="Empirical PDF", title_x=0.5,
title_font_size=25)
fig_2.update_traces(marker=dict(size=2))
fig_2.show()
def test_multivariate_gaussian():
# Question 4 - Draw samples and print fitted model
"""Fit a multivariate Gaussian and print the estimated expectation
and covariance matrix."""
mu_ = np.array([0, 0, 4, 0])
cov_ = np.array([[1, 0.2, 0, 0.5], [0.2, 2, 0, 0],
[0, 0, 1, 0], [0.5, 0, 0, 1]])
samples_ = np.random.multivariate_normal(mu_, cov_, size=1000)
estimator = MultivariateGaussian()
estimator.fit(samples_)
print(estimator.mu_)
print(estimator.cov_)
# Question 5 - Likelihood evaluation
"""Using the samples drawn in the question above calculate the log-likelihood
for models with expectation µ = [f1,0,f3,0]. Plot a heatmap of f1 values
as rows, f3 values as columns and the color being the calculated log likelihood."""
sample_count = 200 # needs to be 200
f1 = np.linspace(-10, 10, sample_count)
f3 = np.linspace(-10, 10, sample_count)
func = lambda x, y: MultivariateGaussian.log_likelihood(
np.array([x, 0, y, 0]), cov_, samples_)
func_vec = np.vectorize(func)
res = func_vec(f1[:, np.newaxis], f3)
labels_dict = {"x": "f3", "y": "f1", "color": "Log-likelihood"}
fig = px.imshow(res, x=f1, y=f3, labels=labels_dict)
fig.update_layout(title_text="Multivariate Gaussian Estimator<br><sup>"
"Log-likelihood of expectation µ = [f1,0,f3,0] and known covariance, "
"values drawn with expectation [0,0,4,0]</sup>",
title_x=0.5, title_font_size=25,
legend_x=0)
fig.show()
# Question 6 - Maximum likelihood
"""Of all values tested in question 5, which model (pair of values for
feature 1 and 3) achieved the maximum log-likelihood value? Round to 3 decimal places"""
argmax_tup = np.unravel_index(res.argmax(), res.shape)
print("Max value achieved: {0}".format(res.max()))
print("Argmax: f1 = {0}, f3 = {1}".format(f1[argmax_tup[0]],
f3[argmax_tup[1]]))
if __name__ == '__main__':
np.random.seed(0)
test_univariate_gaussian()
test_multivariate_gaussian()
| 43.203883 | 103 | 0.644045 | 611 | 4,450 | 4.540098 | 0.288052 | 0.005768 | 0.0292 | 0.022711 | 0.16186 | 0.16186 | 0.067412 | 0.067412 | 0.067412 | 0 | 0 | 0.043375 | 0.243596 | 4,450 | 102 | 104 | 43.627451 | 0.780749 | 0.115056 | 0 | 0.028986 | 0 | 0 | 0.15704 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028986 | false | 0 | 0.072464 | 0 | 0.101449 | 0.072464 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0448e36555008095af9e18f603a31c577ffa7f06 | 22,847 | py | Python | matilda/data_pipeline/data_scapers/financial_statements_scraper/xbrl_scraper_sec_edgar.py | AlainDaccache/Quantropy | 6cfa06ed2b764471382ebf94d40af867f10433bb | [
"MIT"
] | 45 | 2021-01-28T04:12:21.000Z | 2022-02-24T13:15:50.000Z | matilda/data_pipeline/data_scapers/financial_statements_scraper/xbrl_scraper_sec_edgar.py | AlainDaccache/Quantropy | 6cfa06ed2b764471382ebf94d40af867f10433bb | [
"MIT"
] | 32 | 2021-03-02T18:45:16.000Z | 2022-03-12T00:53:10.000Z | matilda/data_pipeline/data_scapers/financial_statements_scraper/xbrl_scraper_sec_edgar.py | AlainDaccache/Quantropy | 6cfa06ed2b764471382ebf94d40af867f10433bb | [
"MIT"
] | 10 | 2020-12-25T15:02:40.000Z | 2021-12-30T11:40:15.000Z | import traceback
import xml.etree.ElementTree as ET
from datetime import datetime, timedelta
from pprint import pprint
import requests
import re
from bs4 import BeautifulSoup, NavigableString
from zope.interface import implementer
import numpy as np
from matilda.data_pipeline.data_preparation_helpers import flatten_dict
from matilda.data_pipeline.data_scapers.financial_statements_scraper import financial_statements_scraper
def get_company_cik(ticker):
URL = 'http://www.sec.gov/cgi-bin/browse-edgar?CIK={}&Find=Search&owner=exclude&action=getcompany'.format(ticker)
response = requests.get(URL)
CIK_RE = re.compile(r'.*CIK=(\d{10}).*')
cik = CIK_RE.findall(response.text)[0]
print('Company CIK for {} is {}'.format(ticker, cik))
return cik
def get_filings_urls_first_layer(cik, filing_type):
base_url = "https://www.sec.gov/cgi-bin/browse-edgar?action=getcompany&CIK={}&type={}".format(cik, filing_type)
edgar_resp = requests.get(base_url).text
print(base_url)
soup = BeautifulSoup(edgar_resp, 'html.parser')
table_tag = soup.find('table', class_='tableFile2')
rows = table_tag.find_all('tr')
doc_links = []
for row in rows[1:]:
cells = row.find_all('td')
doc_links.append('https://www.sec.gov' + cells[1].a['href'])
return doc_links
def get_filings_urls_second_layer(doc_links):
dates_and_links = []
for doc_link in doc_links:
doc_resp = requests.get(doc_link).text # Obtain HTML for document page
soup = BeautifulSoup(doc_resp, 'html.parser') # Find the XBRL link
head_divs = soup.find_all('div', class_='infoHead') # first, find period of report
cell_index = next((index for (index, item) in enumerate(head_divs) if item.text == 'Period of Report'), -1)
period_of_report = ''
try:
siblings = head_divs[cell_index].next_siblings
for sib in siblings:
if isinstance(sib, NavigableString):
continue
else:
period_of_report = sib.text
break
except:
traceback.print_exc()
# first, try finding a XML document
table_tag = soup.find('table', class_='tableFile', summary='Data Files')
if table_tag is not None:
rows = table_tag.find_all('tr')
for row_index, row in enumerate(rows[1:]):
cells = row.find_all('td')
link = 'https://www.sec.gov' + cells[2].a['href']
if 'XML' in cells[3].text or 'INS' in cells[3].text:
dates_and_links.append((period_of_report, link))
return dates_and_links
@implementer(financial_statements_scraper.FinancialStatementsParserInterface)
class XbrlParser:
regex_patterns = {
'Balance Sheet': {
'Assets': {
'Current Assets': {
'Cash and Short Term Investments': {
'Cash and Cash Equivalents': r'^Cash And Cash Equivalents At Carrying Value$',
'Marketable Securities Current': r'(^Available For Sale Securities Current'
r'|Available For Sale Securities Debt Securities'
r'|Marketable Securities Current$)',
'Cash and Short Term Investments': r'$^',
},
'Accounts Receivable': {
'Allowance for Doubtful Accounts': r'^Allowance For Doubtful Accounts Receivable Current$',
'Net Accounts Receivable': r'^Accounts Receivable Net Current$',
'Vendor Nontrade Receivables, Current': r'^Nontrade Receivables Current$'
},
'Prepaid Expense, Current': r'$^',
'Inventory, Net': r'^Inventory Net$',
'Income Taxes Receivable, Current': r'$^',
'Assets Held-for-sale': r'$^',
# taxes that have been already paid despite not yet having been incurred
'Deferred Tax Assets, Current': r'$^',
'Other Current Assets': r'^Other Assets Current$',
'Total Current Assets': r'^Assets Current$'
},
'Non Current Assets': {
'Marketable Securities Non Current': r'^Marketable Securities Noncurrent$',
'Restricted Cash Non Current': r'$^',
'Property, Plant and Equipment': {
'Gross Property, Plant and Equipment': r'^Property Plant And Equipment Gross$',
'Accumulated Depreciation and Amortization': r'^Accumulated Depreciation Depletion And Amortization Property Plant And Equipment$',
'Property, Plant and Equipment, Net': r'^Property Plant And Equipment Net$',
},
'Operating Lease Right-of-use Assets': r'$^',
'Deferred Tax Assets Non Current': r'$^',
'Intangible Assets': {
'Goodwill': r'^Goodwill$',
'Intangible Assets, Net (Excluding Goodwill)': r'^Intangible Assets Net Excluding Goodwill$',
'Total Intangible Assets': r'$^',
},
'Other Non Current Assets': r'^Other Assets Noncurrent$',
'Total Non Current Assets': r'^Assets Noncurrent$'
},
'Total Assets': r'^Assets$'
},
"Liabilities and Shareholders\' Equity": {
'Liabilities': {
'Current Liabilities': {
# this is the short-term debt, i.e. the amount of a loan that is payable to the lender within one year.
'Long-term Debt, Current Maturities': r'^LongTermDebtCurrent$',
'Accounts Payable': r'^Accounts Payable Current$',
# always a current anyways
'Other Accounts Payable': r'^Accounts Payable Other Current$',
'Operating Lease, Liability, Current': r'$^',
'Employee-related Liabilities, Current': r'$^',
'Accrued Income Taxes': r'$^',
'Accrued Liabilities, Current': r'^Accrued Liabilities Current$',
'Deferred Revenue, Current': r'^Contract With Customer Liability Current$',
'Commercial Paper': r'^Commercial Paper$',
'Income Taxes Payable': r'$^',
'Other Current Liabilities': r'^Other Liabilities Current$',
'Total Current Liabilities': r'^Liabilities Current$',
},
'Non Current Liabilities': {
'Deferred Tax Liabilities': r'$^',
# this debt is due after one year in contrast to current maturities which are due within this year
'Long-term Debt, Noncurrent Maturities': r'^Long Term Debt Noncurrent$',
'Operating Lease, Liability, Noncurrent': r'$^',
'Liability, Defined Benefit Plan, Noncurrent': r'$^',
'Accrued Income Taxes, Noncurrent': r'$^',
'Deferred Revenue, Noncurrent': r'$^',
'Long-Term Unearned Revenue': r'$^',
'Other Liabilities, Noncurrent': r'^Other Liabilities Noncurrent$',
'Total Non Current Liabilities': r'^Liabilities Noncurrent$'
},
'Total Liabilities': r'^Liabilities$'
# sometimes at the bottom there are two tabs, our code can't catch it i.e. Other non-current liabilities then tab Total non-current liabilities then tab Total liabilities
},
"Shareholders' Equity": {
'Preferred Stock, Value, Issued': r'$^',
'Common Stock and Additional Paid in Capital': {
'Common Stock, Value, Issued': r'^Common Stock Value$',
'Additional Paid in Capital': r'^Additional Paid In Capital$',
'Common Stocks, Including Additional Paid in Capital': r'^Common Stocks Including Additional Paid In Capital$',
'Weighted Average Number of Shares Outstanding, Basic': r'^Weighted Average Number Of Shares Outstanding Basic$ ',
'Weighted Average Number Diluted Shares Outstanding Adjustment': r'$^',
'Weighted Average Number of Shares Outstanding, Diluted': r'^Weighted Average Number Of Diluted Shares Outstanding$',
},
'Treasury Stock, Value': r'$^',
'Retained Earnings (Accumulated Deficit)': r'^Retained Earnings Accumulated Deficit$',
'Accumulated Other Comprehensive Income (Loss)': r'^Accumulated Other Comprehensive Income Loss Net Of Tax$',
'Deferred Stock Compensation': r'$^',
'Stockholders\' Equity Attributable to Parent': r'$^',
'Minority Interest': r'$^',
'Stockholders\' Equity, Including Portion Attributable to Noncontrolling Interest': '(?!.*Before)(?=.*Noncontrolling interest)(?=.*Equity(?!.*[_]))(?!.*Liabilities(?!.*[_]))'
},
'Total Liabilities and Shareholders\' Equity': r'^Liabilities And Stockholders Equity$'
},
},
'Income Statement': {
'Revenues': {
'Service Sales': r'$^',
'Product Sales': r'$^',
'Net Sales': r'^(Revenue From Contract With Customer Excluding Assessed Tax|Sales Revenue Net)$'
},
'Cost of Goods and Services Sold': {
'Cost of Products': r'^(Cost Of Goods And Services Sold ProductMember|Revenue From Contract With Customer Excluding Assessed Tax ProductMember)$',
'Cost of Services': r'^(Cost Of Goods And Services Sold ServiceMember|Revenue From Contract With Customer Excluding Assessed Tax ServiceMember)$',
'Cost of Goods and Services Sold': r'^(Cost Of Revenue|Cost Of Goods And Services Sold)$',
'Gross Margin': r'^Gross Profit$',
},
'Provision for Loan, Lease, and Other Losses': r'$^',
'Operating Expenses': {
'Research and Development Expense': r'^Research And Development Expense$',
'Selling, General and Administrative': {
'Marketing Expense': r'$^',
'Selling and Marketing Expense': r'^Selling And Marketing Expense$',
'General and Administrative Expense': r'^General And Administrative Expense$',
'Selling, General and Administrative Expense': r'^Selling General And Administrative Expense$'
},
'Other Operating Expenses': r'$^', # TODO
'EBITDA': r'$^',
'Total Operating Expenses': r'^Operating Expenses$'
},
'Costs and Expenses': r'^Costs And Expenses$',
'Operating Income (Loss) / EBIT': r'^Operating Income Loss$',
'Other (Non-Operating) Income (Expense)': {
'Interest Income': r'^Investment Income Interest$',
'Interest and Dividend Income': r'$^',
'Interest Expense': r'^Interest Expense$',
'Interest Income (Expense), Net': r'$^',
'Foreign Currency Transaction Gain (Loss)': r'^Foreign Currency Transaction Gain Loss Before Tax$',
'Other Nonoperating Income (Expense)': '^Other Nonoperating Income Expense$',
# below is for 'Interest and other income, net' and 'Total other income/(expense), net'
'Non-Operating Income (Expense)': r'^Nonoperating Income Expense$'
},
'Income (Loss) before Income Taxes, Noncontrolling Interest': r'^Income Loss From Continuing Operations Before Income Taxes Extraordinary Items Noncontrolling Interest$',
'Income Tax Expense (Benefit)': r'^Income Tax Expense Benefit$',
'Net Income (Loss), Including Portion Attributable to Noncontrolling Interest': r'$^',
'Net Income (Loss) Attributable to Noncontrolling (Minority) Interest': r'$^',
'Net Income (Loss) Attributable to Parent': r'^Net Income Loss$',
'Undistributed Earnings (Loss) Allocated to Participating Securities, Basic': r'^Undistributed Earnings Loss Allocated To Participating Securities Basic$',
'Preferred Stock Dividends': r'$^',
'Net Income (Loss) Available to Common Stockholders, Basic': r'^Net Income Loss Available To Common Stockholders Basic$',
'Other Comprehensive Income (Loss)': r'$^',
'Comprehensive Income (Loss), Net of Tax, Attributable to Parent': r'$^',
'Earnings Per Share, Basic': '^Earnings Per Share Basic$',
'Earnings Per Share, Diluted': '^Earnings Per Share Diluted$',
},
'Cash Flow Statement': {
'Cash, Cash Equivalents, Restricted Cash and Restricted Cash Equivalents, Beginning Balance':
'$^',
'Operating Activities': {
'Net Income (Loss) Attributable to Parent': r'$^',
'Adjustments to Reconcile Net Income': {
'Depreciation, Depletion and Amortization': r'$^',
'Share-based Payment Arrangement, Noncash Expense': r'$^',
'Deferred Income Tax Expense (Benefit)': r'$^',
'Other Noncash Income (Expense)': r'$^'
},
'Change in Assets and Liabilities': {
'Increase (Decrease) in Accounts Receivable': r'$^',
'Increase (Decrease) in Inventories': r'$^',
'Increase (Decrease) in Other Receivables': r'$^',
'Increase (Decrease) in Prepaid Expense and Other Assets': r'$^',
'Increase (Decrease) in Other Operating Assets': r'$^',
'Increase (Decrease) in Accounts Payable': r'$^',
'Increase (Decrease) in Other Accounts Payable': r'$^',
'Increase (Decrease) in Accrued Liabilities': r'$^',
'Increase (Decrease) in Deferred Revenue, Liability': r'$^',
'Increase (Decrease) in Other Operating Liabilities': r'$^'
},
'Net Cash Provided by (Used in) Operating Activities': r'$^'
},
'Investing Activities': {
'Payments to Acquire Marketable Securities, Available-for-sale': r'$^',
'Proceeds from Maturities, Prepayments and Calls of Debt Securities, Available-for-sale': r'$^',
'Proceeds from Sale of Debt Securities, Available-for-sale': r'$^',
'Payments to Acquire Property, Plant, and Equipment': r'$^',
'Payments to Acquire Businesses, Net of Cash Acquired': r'$^',
'Payments to Acquire Other Investments': r'$^',
'Proceeds from Sale and Maturity of Other Investments': r'$^',
'Payments for (Proceeds from) Other Investing Activities': r'$^',
'Net Cash Provided by (Used in) Investing Activities': r'$^'
},
'Financing Activities': {
'Proceeds from Issuance of Common Stock': r'$^',
'Payment, Tax Withholding, Share-based Payment Arrangement': r'$^',
'Payments of Dividends': r'$^',
'Payments for Repurchase of Common Stock': r'$^',
'Proceeds from Issuance of Long-term Debt': r'$^',
'Repayments of Long-term Debt': r'$^',
'Finance Lease, Principal Payments': r'$^',
'Proceeds from (Repayments of) Bank Overdrafts': r'$^',
'Proceeds from (Repayments of) Commercial Paper': r'$^',
'Proceeds from (Payments for) Other Financing Activities': r'$^',
'Net Cash Provided by (Used in) Financing Activities': r'$^'
},
'Effect of Exchange Rate on Cash, Cash Equivalents, Restricted Cash and Restricted Cash Equivalents': r'$^',
'Cash, Cash Equivalents, Restricted Cash and Restricted Cash Equivalents, Period Increase (Decrease), Including Exchange Rate Effect': r'$^',
# we are hardcoding the Ending balance to be Cash, Cash Equivalents, Restricted Cash and Restricted Cash Equivalents in XBRL because we filtered the beginning balance (which can be taken from previous year)
'Cash, Cash Equivalents, Restricted Cash and Restricted Cash Equivalents, Ending Balance': r'$^',
'Supplemental': {}
}
}
def load_data_source(self, ticker: str) -> dict:
"""Load in the file links"""
cik = get_company_cik(ticker)
doc_links_yearly = get_filings_urls_first_layer(cik, '10-K')
doc_links_quarterly = get_filings_urls_first_layer(cik, '10-Q')
filings_dictio_yearly = get_filings_urls_second_layer(doc_links_yearly)
filings_dictio_quarterly = get_filings_urls_second_layer(doc_links_quarterly)
return {'Yearly': filings_dictio_yearly, 'Quarterly': filings_dictio_quarterly}
def scrape_tables(self, url: str, filing_date: datetime, filing_type: str) -> dict:
"""Extract tables from the currently loaded file."""
current_quarter = ''
response = requests.get(url).text
elements = ET.fromstring(response)
all_in_one_dict = {'Yearly': {filing_date: {'': {}}}, 'Quarterly': {filing_date: {'': {}}},
'6 Months': {filing_date: {'': {}}}, '9 Months': {filing_date: {'': {}}}}
# First, get all the us-gaap xbrl tags (that correspond to the current year or quarter)
found_current_quarter = False
for element in elements.iter():
if 'context' in element.tag and not found_current_quarter:
pattern = re.search('(Q\d)', element.attrib['id'])
if pattern:
current_quarter = pattern.groups()[-1]
found_current_quarter = True
if 'contextRef' in element.attrib.keys():
tag_name = re.sub(r"(\w)([A-Z])", r"\1 \2", element.tag.split('}')[1])
try:
tag_value = int(element.text)
except:
continue
axis_pattern = re.search(r'ProductOrServiceAxis_us-gaap_(.*)', element.attrib['contextRef'],
re.IGNORECASE)
if axis_pattern:
tag_name = tag_name + ' ' + axis_pattern.groups()[-1]
if ('Axis' not in element.attrib['contextRef']) or axis_pattern:
period = filing_type
# first pattern for date and period
date_pattern_yyyymmdd = re.search(r'(........)-{}'.format(filing_date.strftime('%Y%m%d')),
element.attrib['contextRef'])
if date_pattern_yyyymmdd:
prior_date = date_pattern_yyyymmdd.groups()[-1]
prior_date = datetime.strptime(prior_date, '%Y%m%d')
if filing_date > prior_date + timedelta(days=270):
period = '9 Months'
elif filing_date > prior_date + timedelta(days=180):
period = '6 Months'
all_in_one_dict[period][filing_date][''][tag_name] = tag_value
# second pattern for date and period
date_pattern_FDYYYYQd = re.search(r'FD{}{}(...)?'.format(filing_date.year, current_quarter),
element.attrib['contextRef'])
if date_pattern_FDYYYYQd and found_current_quarter:
period = date_pattern_FDYYYYQd.groups()[-1]
if period == 'YTD':
period = 'Yearly'
elif period == 'QTD':
period = 'Quarterly'
all_in_one_dict[period][filing_date][''][tag_name] = tag_value
# third pattern for date and period
date_pattern_FIYYYQd = re.search(r'FI{}{}'.format(filing_date.year, current_quarter),
element.attrib['contextRef'])
if date_pattern_FIYYYQd and found_current_quarter:
period = 'Yearly'
all_in_one_dict[period][filing_date][''][tag_name] = tag_value
# fourth pattern for date and period i.e. 'STD_364_20150926'
date_pattern_STD_delta_yyymmdd = re.search(r'STD_(\d+)_{}'.format(filing_date.strftime('%Y%m%d')),
element.attrib['contextRef'])
if date_pattern_STD_delta_yyymmdd:
period_unformatted = date_pattern_STD_delta_yyymmdd.groups()[-1]
if period_unformatted == '364' or period_unformatted == '0':
period = 'Yearly'
else:
period = 'Quarterly'
all_in_one_dict[period][filing_date][''][tag_name] = tag_value
return all_in_one_dict
def normalize_tables(self, filing_date, input_dict, visited_data_names) -> (dict, dict):
"""Standardize tables to match across years and companies"""
master_dict = {}
for normalized_category, pattern_string in flatten_dict(self.regex_patterns).items():
master_dict[normalized_category] = np.nan
for title, table in input_dict.items():
for scraped_name, scraped_value in flatten_dict(table).items():
for normalized_category, pattern_string in flatten_dict(self.regex_patterns).items():
if re.search(pattern_string, scraped_name, re.IGNORECASE):
master_dict[normalized_category] = scraped_value
break
pprint(master_dict)
return {}, master_dict
#
# if __name__ == '__main__':
# facebook = DataView('FB', '2019-12-31', '10-K')
# facebook.traverse_tree('StatementOfFinancialPositionClassified')
| 59.497396 | 218 | 0.557798 | 2,306 | 22,847 | 5.415438 | 0.205117 | 0.012812 | 0.014414 | 0.013693 | 0.355221 | 0.26057 | 0.195788 | 0.108584 | 0.080637 | 0.048527 | 0 | 0.003882 | 0.334792 | 22,847 | 383 | 219 | 59.652742 | 0.817805 | 0.060971 | 0 | 0.086567 | 0 | 0.008955 | 0.411009 | 0.004482 | 0 | 0 | 0 | 0.002611 | 0 | 1 | 0.01791 | false | 0 | 0.032836 | 0 | 0.074627 | 0.014925 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
044acfb56a61857481ffe8d49421650a3a0564af | 412 | py | Python | py/mbib.py | michaelpalmeruw/mbib | 2f9a076cce10fda821f9d2b9f60b79fab84c7d6d | [
"Apache-2.0"
] | null | null | null | py/mbib.py | michaelpalmeruw/mbib | 2f9a076cce10fda821f9d2b9f60b79fab84c7d6d | [
"Apache-2.0"
] | null | null | null | py/mbib.py | michaelpalmeruw/mbib | 2f9a076cce10fda821f9d2b9f60b79fab84c7d6d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import os
batch_arg = os.getenv('mbib_batch')
# print("batch_arg", batch_arg)
if batch_arg is None:
from bibapp import BibApp
from hub import hub
hub.is_batch = False
_app = BibApp()
hub.register('app', _app)
hub.register('tree', _app.tree)
hub.register('exit', _app.exit)
hub.app()
else:
from batchmode import BatchMode
BatchMode(batch_arg)()
| 18.727273 | 35 | 0.667476 | 60 | 412 | 4.4 | 0.4 | 0.151515 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003067 | 0.208738 | 412 | 21 | 36 | 19.619048 | 0.806748 | 0.123786 | 0 | 0 | 0 | 0 | 0.058824 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.285714 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
044bdba3231e4ede68a14c679aac47d587768f13 | 2,800 | py | Python | metaci/plan/views.py | sebastianocostanzo/MetaCI | a880a8b1caa7cf1445f220b6c2e4f83fe8d38312 | [
"BSD-3-Clause"
] | null | null | null | metaci/plan/views.py | sebastianocostanzo/MetaCI | a880a8b1caa7cf1445f220b6c2e4f83fe8d38312 | [
"BSD-3-Clause"
] | null | null | null | metaci/plan/views.py | sebastianocostanzo/MetaCI | a880a8b1caa7cf1445f220b6c2e4f83fe8d38312 | [
"BSD-3-Clause"
] | null | null | null | from django.contrib.admin.views.decorators import staff_member_required
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.shortcuts import get_object_or_404
from metaci.build.utils import view_queryset
from metaci.plan.models import Plan, PlanRepository
from metaci.plan.forms import RunPlanForm
from metaci.repository.models import Repository
def plan_list(request):
if request.user.is_staff:
plans = Plan.objects.all()
else:
plans = Plan.objects.filter(public=True)
context = {
'plans': plans,
}
return render(request, 'plan/list.html', context=context)
def plan_detail(request, plan_id):
query = {'id': plan_id}
if not request.user.is_staff:
query['public'] = True
plan = get_object_or_404(Plan, **query)
query = {'plan': plan}
builds = view_queryset(request, query)
context = {
'builds': builds,
'plan': plan,
}
return render(request, 'plan/detail.html', context=context)
def plan_detail_repo(request, plan_id, repo_owner, repo_name):
query = {'id': plan_id}
if not request.user.is_staff:
query['public'] = True
plan = get_object_or_404(Plan, **query)
repo = get_object_or_404(Repository, owner=repo_owner, name=repo_name)
query = {'plan': plan, 'repo': repo}
builds = view_queryset(request, query)
context = {
'builds': builds,
'plan': plan,
}
return render(request, 'plan/detail.html', context=context)
@staff_member_required
def plan_run(request, plan_id):
plan = get_object_or_404(Plan, id=plan_id)
context = {'plan': plan}
return render(request, 'plan/run_select_repo.html', context=context)
@staff_member_required
def plan_run_repo(request, plan_id, repo_owner, repo_name):
plan = get_object_or_404(Plan, id=plan_id)
repo = get_object_or_404(Repository, owner=repo_owner, name=repo_name)
if request.method == 'POST':
form = RunPlanForm(plan, repo, request.user, request.POST)
if form.is_valid():
build = form.create_build()
return HttpResponseRedirect(build.get_absolute_url())
else:
form = RunPlanForm(plan, repo, request.user, request.GET)
context = {
'form': form,
'plan': plan,
'repo': repo,
}
return render(request, 'plan/run.html', context=context)
@staff_member_required
def new_org_please(request):
plans = Plan.objects.filter(public=False, active=True, type='org').prefetch_related('repos')
plan_repos = PlanRepository.objects.filter(plan__in=plans).order_by('repo__name','plan__name')
context = {
'plans': plans,
'plan_repos': plan_repos,
}
return render(request, 'plan/new_org_please.html', context=context) | 32.941176 | 98 | 0.683214 | 366 | 2,800 | 5.010929 | 0.202186 | 0.059978 | 0.041985 | 0.053435 | 0.522356 | 0.476009 | 0.435115 | 0.368593 | 0.331516 | 0.257361 | 0 | 0.009362 | 0.198929 | 2,800 | 85 | 99 | 32.941176 | 0.808292 | 0 | 0 | 0.452055 | 0 | 0 | 0.079971 | 0.017494 | 0 | 0 | 0 | 0 | 0 | 1 | 0.082192 | false | 0 | 0.109589 | 0 | 0.287671 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
044d5417447d12920bf78a08d7241650d58ce648 | 7,743 | py | Python | imdb/spiders/movieCrawler.py | pawanmsr/imdb-scraper | 74fa529a19c4965667060b7158d35aabfe197993 | [
"MIT"
] | 2 | 2018-12-12T15:54:10.000Z | 2019-05-16T03:00:32.000Z | imdb/spiders/movieCrawler.py | pawanmsr/imdb-scraper | 74fa529a19c4965667060b7158d35aabfe197993 | [
"MIT"
] | null | null | null | imdb/spiders/movieCrawler.py | pawanmsr/imdb-scraper | 74fa529a19c4965667060b7158d35aabfe197993 | [
"MIT"
] | null | null | null | import scrapy
import json
import os
import sys
import time
movies_directory = "movies/"
links_directory = "links/"
if not os.path.exists(movies_directory):
os.makedirs(movies_directory)
if not os.path.exists(links_directory):
os.makedirs(links_directory)
linkfile = "links/imdbLinks.json"
links_dict = {}
try:
with open(linkfile,'r') as f:
links_dict = json.load(f)
except:
print("unable to open links file, movieCrawler will not run")
pass
links = []
for key in links_dict:
links.append("https://www.imdb.com" + links_dict[key])
class movieCrawler(scrapy.Spider):
name = "movieCrawler"
allowed_domains = ['imdb.com']
start_urls = links
people_links = {}
detail_fields = ["Taglines:", "Country:", "Language:", "Budget:", "Cumulative Worldwide Gross:", "Production Co:"]
director_fields = ["Director:", "Writers:"]
illegalChars = {'<':'', '>':'', ':':'', '"':'', '/':' ', '\\':' ', '|':'', '?':'', '*':' '}
def parse(self,response):
movie = {}
self.people_links = {}
doNotSave = False
saveName = response.request.url.split('/')[4]
movie['Id'] = saveName
title = response.xpath('//div[@class="title_wrapper"]/h1/text()').extract_first()
if title!=None:
title = ' '.join(title.split())
'''
for key in self.illegalChars:
if key in title: title = title.replace(key,self.illegalChars[key])
'''
movie["Title"] = title
else: doNotSave=True
film_rating = response.xpath('//div[@class="subtext"]/text()').extract_first()
if film_rating!=None: movie["Film_rating"] = ' '.join(film_rating.split())
else: movie["Film_rating"] = 'NA'
duration = response.xpath('//div[@class="subtext"]/time/text()').extract_first()
if duration!=None: movie["Duration"] = ' '.join(duration.split())
else: movie["Duration"] = 'NA'
description = response.xpath('//div[@class="summary_text"]/text()').extract_first()
if description!=None: movie["Description"] = ' '.join(description.split())
else: movie["Description"] = "NA"
imdb_rating = response.xpath('//span[@itemprop="ratingValue"]/text()').extract_first()
if imdb_rating!=None: movie["IMDB_rating"] = ' '.join(imdb_rating.split())
else: doNotSave=True
rating_count = response.xpath('//span[@itemprop="ratingCount"]/text()').extract_first()
if rating_count!=None: movie["IMDB_rating_count"] = ' '.join(rating_count.split())
else: doNotSave=True
movie["Genre"], movie["release_date"] = self.getGenreReleaseDate(response.xpath('//div[@class="subtext"]/a'))
movie["Storyline"] = self.getStoryline(response.xpath('//div[@id="titleStoryLine"]/div[1]/p'))
directors = self.getDirectors(response.xpath('//div[@class="credit_summary_item"]'))
movie['Cast'] = self.getCastList(response.xpath('//table[@class="cast_list"]/tr'))
movie['Taglines'] = self.getTagline(response.xpath('//div[@class="txt-block"]'))
details = self.getDetails(response.xpath('//div[@id="titleDetails"]'))
for key in directors:
movie[key] = directors[key]
for key in details:
movie[key] = details[key]
movie['url'] = response.request.url
if not doNotSave and not os.path.isfile(movies_directory+saveName+".json"):
with open(movies_directory+saveName+".json", 'w') as f:
json.dump(movie, f)
'''
if not doNotSave and not os.path.isfile(links_directory+saveName+" people"+'.json'):
with open(links_directory+saveName+" people"+'.json', 'w') as f:
json.dump(self.people_links, f)
'''
for anchor in response.xpath('//div[@class="rec-title"]'):
url = "https://www.imdb.com" + anchor.xpath('./a/@href').extract_first()
if url!=None or url!="":
#time.sleep(0.1)
yield response.follow(url, callback=self.parse)
def getGenreReleaseDate(self,subtext):
vals = []
for text in subtext:
vals.append(text.xpath('./text()').extract_first())
if vals!=None: release_date = ' '.join(vals[-1].split())
else: release_date = "NA"
genre = []
if vals!=None:
for val in vals[:-1]:
for element in val.split():
genre.append(element)
return genre, release_date
def getDirectors(self,csis):
directors = {"Director:":[], "Writers:":[]}
for csi in csis:
field = csi.xpath('./h4/text()').extract_first()
if field==None:
continue
field = ' '.join(csi.xpath('./h4/text()').extract_first().split())
if field in self.director_fields:
lst = []
for val in csi.xpath('./a'):
person = ' '.join(val.xpath('./text()').extract_first().split())
if "credits" not in person and "credit" not in person:
lst.append(person)
self.people_links[person] = val.xpath('./@href').extract_first()
directors[field] = lst
return directors
def getCastList(self,casts):
cast_list = []
for row in casts:
link = row.xpath('./td[not(@*)]/a')
people = link.xpath('./text()').extract_first()
if people != None:
people = ' '.join(people.split())
if "credits" not in people and "credit" not in people:
cast_list.append(people)
self.people_links[people] = link.xpath('./@href').extract_first()
return cast_list
def getTagline(self,txts):
taglines = ""
for txt in txts:
text = txt.xpath('./h4/text()').extract_first()
if text==None:
continue
text = ' '.join(txt.xpath('./h4/text()').extract_first().split())
if text == "Taglines:":
taglines = ' '.join(txt.xpath('./text()').extract()[1].split())
return taglines
def getDetails(self,titleDetails):
details = {"Budget":"", "Revenue":""}
for detail in titleDetails.xpath('./div[@class="txt-block"]'):
text = detail.xpath('./h4/text()').extract_first()
if text==None:
continue
text = ' '.join(detail.xpath('./h4/text()').extract_first().split())
if text=="Country:":
countryList = []
for country in detail.xpath('./a'):
countryList.append(' '.join(country.xpath('./text()').extract_first().split()))
details["Country"] = countryList
if text=="Language:":
languageList = []
for language in detail.xpath('./a'):
languageList.append(' '.join(language.xpath('./text()').extract_first().split()))
details["Language"] = languageList
if text=="Budget:":
details["Budget"] = ' '.join(detail.xpath('./text()').extract()[1].split())
if text=="Cumulative Worldwide Gross:":
details["Revenue"] = ' '.join(detail.xpath('./text()').extract()[1].split())
return details
def getStoryline(self,tsl):
texts = tsl.xpath('./span/text()').extract()
storyline = ""
for text in texts:
text = ' '.join(text.split()).replace(" (", "").replace(")","")
storyline += text
return storyline | 41.854054 | 118 | 0.546687 | 826 | 7,743 | 5.039952 | 0.194915 | 0.055489 | 0.065338 | 0.047562 | 0.199375 | 0.121307 | 0.077828 | 0.055249 | 0.023541 | 0.023541 | 0 | 0.002865 | 0.278703 | 7,743 | 185 | 119 | 41.854054 | 0.742525 | 0.001937 | 0 | 0.052632 | 0 | 0 | 0.163515 | 0.059992 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046053 | false | 0.006579 | 0.032895 | 0 | 0.171053 | 0.006579 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
044e65053c5708bfb9b6629befce98fe72cc5f1b | 1,748 | py | Python | samples-python/logbook/logbook/app.py | bracoe/ctrlx-automation-sdk | 6b2e61e146c557488125baf941e4d64c6fa6d0fb | [
"MIT"
] | 16 | 2021-08-23T13:07:12.000Z | 2022-02-21T13:29:21.000Z | samples-python/logbook/logbook/app.py | bracoe/ctrlx-automation-sdk | 6b2e61e146c557488125baf941e4d64c6fa6d0fb | [
"MIT"
] | null | null | null | samples-python/logbook/logbook/app.py | bracoe/ctrlx-automation-sdk | 6b2e61e146c557488125baf941e4d64c6fa6d0fb | [
"MIT"
] | 10 | 2021-09-29T09:58:33.000Z | 2022-01-13T07:20:00.000Z | # MIT License
#
# Copyright (c) 2021 Bosch Rexroth AG
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
from systemd.journal import JournaldLogHandler
def run():
print("Simple Snap for insert different loglevels with Python")
log = logging.getLogger()
log.setLevel(logging.DEBUG)
log.addHandler(JournaldLogHandler())
log.exception("I am an exception message")
log.critical("I am a critical message")
log.error("I am an error")
log.warning("I am a warning")
log.info("I am an info message")
log.debug("I am a debug message")
#<timestamp>|<userId>|<mainDiagnosisCode>|<mainTitle>|<detailedDiagnosisCode>|<detailedTitle>|<entity>|<dynamicSource>|<dynamicDescription>
| 42.634146 | 143 | 0.75 | 248 | 1,748 | 5.28629 | 0.540323 | 0.067124 | 0.011442 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002768 | 0.173341 | 1,748 | 41 | 144 | 42.634146 | 0.904498 | 0.689931 | 0 | 0 | 0 | 0 | 0.325 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.153846 | 0 | 0.230769 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
044fa9836068fd9e9ce4671f7fecacdfe22d4b05 | 4,585 | py | Python | SVC-project-with-EDA-&-Feature-Selecction/code.py | Sadique96645/ga-learner-dsmp-repo | 155f039083c22755b32c7dead39bfd86aff4c157 | [
"MIT"
] | null | null | null | SVC-project-with-EDA-&-Feature-Selecction/code.py | Sadique96645/ga-learner-dsmp-repo | 155f039083c22755b32c7dead39bfd86aff4c157 | [
"MIT"
] | null | null | null | SVC-project-with-EDA-&-Feature-Selecction/code.py | Sadique96645/ga-learner-dsmp-repo | 155f039083c22755b32c7dead39bfd86aff4c157 | [
"MIT"
] | 1 | 2020-09-02T04:25:13.000Z | 2020-09-02T04:25:13.000Z | # --------------
import pandas as pd
from collections import Counter
# Load dataset
data = pd.read_csv(path)
data.isnull().sum()
# --------------
import seaborn as sns
from matplotlib import pyplot as plt
sns.set_style(style='darkgrid')
# Store the label values
label = data.iloc[:,-1]
label.head(5)
sns.countplot(data=data,x='Activity')
# plot the countplot
# --------------
import numpy as np
# make the copy of dataset
data_copy = data.copy()
mask = ('WALKING_UPSTAIRS', 'WALKING_DOWNSTAIRS')
# Create an empty column
data_copy['duration'] = ''
# Calculate the duration
duration_df = data_copy.groupby([label.mask(label!= 'WALKING_UPSTAIRS', 'WALKING_DOWNSTAIRS'), 'subject'])['duration'].count() * 1.28
duration_df = pd.DataFrame(duration_df)
plot_data = duration_df.sort_values(by='duration',ascending= False)
plot_data.reset_index(inplace=True)
replaced_value = {'WALKING_UPSTAIRS':'Upstairs','WALKING_DOWNSTAIRS':'Downstairs'}
plot_data['Activity'] = plot_data['Activity'].map(replaced_value)
sns.barplot(data=plot_data,x='subject',y='duration')
# Sort the values of duration
# --------------
#exclude the Activity column and the subject column
feature_cols = data.select_dtypes(exclude=['object','int']).columns
#Calculate the correlation values
correlated_values = data[feature_cols].corr().stack().reset_index()
#stack the data and convert to a dataframe
correlated_values = pd.DataFrame(correlated_values)
correlated_values.rename(columns = {'level_0':'Feature_1','level_1':'Feature_2',0:'Correlation_score'},inplace=True)
#create an abs_correlation column
correlated_values['abs_correlation'] = correlated_values['Correlation_score'].abs()
#Picking most correlated features without having self correlated pairs
s_corr_list = correlated_values.sort_values(by='abs_correlation',ascending=False)
top_corr_fields = s_corr_list[(s_corr_list['abs_correlation']>0.8)]
top_corr_fields = top_corr_fields[(top_corr_fields['Feature_1'])!=(top_corr_fields['Feature_2'])]
print(top_corr_fields.head())
# --------------
# importing neccessary libraries
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import precision_recall_fscore_support as error_metric
from sklearn.metrics import confusion_matrix, accuracy_score
# Encoding the target variable
le = LabelEncoder()
le.fit(data['Activity'])
data['Activity'] = le.transform(data['Activity'])
# split the dataset into train and test
X = data.drop('Activity',1)
y = data['Activity']
X_train, X_test, y_train ,y_test = train_test_split(X,y,test_size=0.3,random_state=40)
classifier = SVC()
clf = classifier.fit(X_train,y_train)
y_pred = clf.predict(X_test)
precision, accuracy , f_score, _ = error_metric(y_test,y_pred,average = 'weighted')
model1_score = accuracy_score(y_test,y_pred)
print(precision)
print(accuracy)
print(f_score)
print(model1_score)
# --------------
# importing libraries
from sklearn.svm import LinearSVC
from sklearn.feature_selection import SelectFromModel
# Feature selection using Linear SVC
lsvc = LinearSVC(C=0.01,penalty = 'l1',dual = False,random_state =42)
lsvc.fit(X_train,y_train)
model_2 = SelectFromModel(lsvc,prefit=True)
new_train_features= model_2.transform(X_train)
new_test_features = model_2.transform(X_test)
classifier_2 = SVC()
clf_2 = classifier_2.fit(new_train_features,y_train)
y_pred_new = clf_2.predict(new_test_features)
model2_score = accuracy_score(y_test,y_pred_new)
precision, accuracy , f_score, _ = error_metric(y_test,y_pred_new,average = 'weighted')
# model building on reduced set of features
# --------------
# Importing Libraries
from sklearn.model_selection import GridSearchCV
# Set the hyperparmeters
parameters = {'kernel':['linear','rbf'],'C':[100, 20, 1, 0.1]}
# Usage of grid search to select the best hyperparmeters
svc = SVC()
selector = GridSearchCV(svc,parameters,scoring='accuracy')
selector.fit(new_train_features,y_train)
print(selector.best_params_)
print(selector.cv_results_)
means = selector.cv_results_['mean_test_score']
stds = selector.cv_results_['std_test_score']
params = selector.cv_results_['params']
print(means,stds,params)
classifier_3 = SVC(C=20,kernel='rbf')
clf_3 = classifier_3.fit(new_train_features,y_train)
y_pred_final = clf_3.predict(new_test_features)
model3_score = accuracy_score(y_test,y_pred_final)
precision,recall,f_score,_ = error_metric(y_test,y_pred_final,average='weighted')
print(precision)
print(recall)
print(f_score)
print(model3_score)
# Model building after Hyperparameter tuning
| 31.190476 | 133 | 0.770556 | 665 | 4,585 | 5.049624 | 0.285714 | 0.013401 | 0.023228 | 0.017868 | 0.142644 | 0.096188 | 0.077129 | 0.052114 | 0.026206 | 0.026206 | 0 | 0.012274 | 0.093784 | 4,585 | 146 | 134 | 31.40411 | 0.795909 | 0.178844 | 0 | 0.04878 | 0 | 0 | 0.124397 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.158537 | 0 | 0.158537 | 0.146341 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04521b79fa9e62c6a912d3c7017abfc3f6883ad5 | 8,354 | py | Python | recipes/Python/580640_Scrolling_frame_mouse_wheel/recipe-580640.py | tdiprima/code | 61a74f5f93da087d27c70b2efe779ac6bd2a3b4f | [
"MIT"
] | 2,023 | 2017-07-29T09:34:46.000Z | 2022-03-24T08:00:45.000Z | recipes/Python/580640_Scrolling_frame_mouse_wheel/recipe-580640.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 32 | 2017-09-02T17:20:08.000Z | 2022-02-11T17:49:37.000Z | recipes/Python/580640_Scrolling_frame_mouse_wheel/recipe-580640.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 780 | 2017-07-28T19:23:28.000Z | 2022-03-25T20:39:41.000Z | # Version: 0.22
# Author: Miguel Martinez Lopez
# Uncomment the next line to see my email
# print("Author's email: ", "61706c69636163696f6e616d656469646140676d61696c2e636f6d".decode("hex"))
try:
from Tkinter import Canvas, Frame
from ttk import Scrollbar
from Tkconstants import *
except ImportError:
from tkinter import Canvas, Frame
from tkinter.ttk import Scrollbar
from tkinter.constants import *
import platform
OS = platform.system()
class Mousewheel_Support(object):
# implemetation of singleton pattern
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = object.__new__(cls)
return cls._instance
def __init__(self, root, horizontal_factor = 2, vertical_factor=2):
self._active_area = None
if isinstance(horizontal_factor, int):
self.horizontal_factor = horizontal_factor
else:
raise Exception("Vertical factor must be an integer.")
if isinstance(vertical_factor, int):
self.vertical_factor = vertical_factor
else:
raise Exception("Horizontal factor must be an integer.")
if OS == "Linux" :
root.bind_all('<4>', self._on_mousewheel, add='+')
root.bind_all('<5>', self._on_mousewheel, add='+')
else:
# Windows and MacOS
root.bind_all("<MouseWheel>", self._on_mousewheel, add='+')
def _on_mousewheel(self,event):
if self._active_area:
self._active_area.onMouseWheel(event)
def _mousewheel_bind(self, widget):
self._active_area = widget
def _mousewheel_unbind(self):
self._active_area = None
def add_support_to(self, widget=None, xscrollbar=None, yscrollbar=None, what="units", horizontal_factor=None, vertical_factor=None):
if xscrollbar is None and yscrollbar is None:
return
if xscrollbar is not None:
horizontal_factor = horizontal_factor or self.horizontal_factor
xscrollbar.onMouseWheel = self._make_mouse_wheel_handler(widget,'x', self.horizontal_factor, what)
xscrollbar.bind('<Enter>', lambda event, scrollbar=xscrollbar: self._mousewheel_bind(scrollbar) )
xscrollbar.bind('<Leave>', lambda event: self._mousewheel_unbind())
if yscrollbar is not None:
vertical_factor = vertical_factor or self.vertical_factor
yscrollbar.onMouseWheel = self._make_mouse_wheel_handler(widget,'y', self.vertical_factor, what)
yscrollbar.bind('<Enter>', lambda event, scrollbar=yscrollbar: self._mousewheel_bind(scrollbar) )
yscrollbar.bind('<Leave>', lambda event: self._mousewheel_unbind())
main_scrollbar = yscrollbar if yscrollbar is not None else xscrollbar
if widget is not None:
if isinstance(widget, list) or isinstance(widget, tuple):
list_of_widgets = widget
for widget in list_of_widgets:
widget.bind('<Enter>',lambda event: self._mousewheel_bind(widget))
widget.bind('<Leave>', lambda event: self._mousewheel_unbind())
widget.onMouseWheel = main_scrollbar.onMouseWheel
else:
widget.bind('<Enter>',lambda event: self._mousewheel_bind(widget))
widget.bind('<Leave>', lambda event: self._mousewheel_unbind())
widget.onMouseWheel = main_scrollbar.onMouseWheel
@staticmethod
def _make_mouse_wheel_handler(widget, orient, factor = 1, what="units"):
view_command = getattr(widget, orient+'view')
if OS == 'Linux':
def onMouseWheel(event):
if event.num == 4:
view_command("scroll",(-1)*factor, what)
elif event.num == 5:
view_command("scroll",factor, what)
elif OS == 'Windows':
def onMouseWheel(event):
view_command("scroll",(-1)*int((event.delta/120)*factor), what)
elif OS == 'Darwin':
def onMouseWheel(event):
view_command("scroll",event.delta, what)
return onMouseWheel
class Scrolling_Area(Frame, object):
def __init__(self, master, width=None, anchor=N, height=None, mousewheel_speed = 2, scroll_horizontally=True, xscrollbar=None, scroll_vertically=True, yscrollbar=None, background=None, inner_frame=Frame, **kw):
Frame.__init__(self, master, class_="Scrolling_Area", background=background)
self.grid_columnconfigure(0, weight=1)
self.grid_rowconfigure(0, weight=1)
self._width = width
self._height = height
self.canvas = Canvas(self, background=background, highlightthickness=0, width=width, height=height)
self.canvas.grid(row=0, column=0, sticky=N+E+W+S)
if scroll_vertically:
if yscrollbar is not None:
self.yscrollbar = yscrollbar
else:
self.yscrollbar = Scrollbar(self, orient=VERTICAL)
self.yscrollbar.grid(row=0, column=1,sticky=N+S)
self.canvas.configure(yscrollcommand=self.yscrollbar.set)
self.yscrollbar['command']=self.canvas.yview
else:
self.yscrollbar = None
if scroll_horizontally:
if xscrollbar is not None:
self.xscrollbar = xscrollbar
else:
self.xscrollbar = Scrollbar(self, orient=HORIZONTAL)
self.xscrollbar.grid(row=1, column=0, sticky=E+W)
self.canvas.configure(xscrollcommand=self.xscrollbar.set)
self.xscrollbar['command']=self.canvas.xview
else:
self.xscrollbar = None
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
self.innerframe = inner_frame(self.canvas, **kw)
self.innerframe.pack(anchor=anchor)
self.canvas.create_window(0, 0, window=self.innerframe, anchor='nw', tags="inner_frame")
self.canvas.bind('<Configure>', self._on_canvas_configure)
Mousewheel_Support(self).add_support_to(self.canvas, xscrollbar=self.xscrollbar, yscrollbar=self.yscrollbar)
@property
def width(self):
return self.canvas.winfo_width()
@width.setter
def width(self, width):
self.canvas.configure(width= width)
@property
def height(self):
return self.canvas.winfo_height()
@height.setter
def height(self, height):
self.canvas.configure(height = height)
def set_size(self, width, height):
self.canvas.configure(width=width, height = height)
def _on_canvas_configure(self, event):
width = max(self.innerframe.winfo_reqwidth(), event.width)
height = max(self.innerframe.winfo_reqheight(), event.height)
self.canvas.configure(scrollregion="0 0 %s %s" % (width, height))
self.canvas.itemconfigure("inner_frame", width=width, height=height)
def update_viewport(self):
self.update()
window_width = self.innerframe.winfo_reqwidth()
window_height = self.innerframe.winfo_reqheight()
if self._width is None:
canvas_width = window_width
else:
canvas_width = min(self._width, window_width)
if self._height is None:
canvas_height = window_height
else:
canvas_height = min(self._height, window_height)
self.canvas.configure(scrollregion="0 0 %s %s" % (window_width, window_height), width=canvas_width, height=canvas_height)
self.canvas.itemconfigure("inner_frame", width=window_width, height=window_height)
if __name__== '__main__':
try:
from Tkinter import Tk, Label
except ImportError:
from tkinter import Tk, Label
root = Tk()
scrolling_area = Scrolling_Area(root)
scrolling_area.pack(expand=1, fill=BOTH)
for i in range(20):
rowFrame = Frame(scrolling_area.innerframe)
rowFrame.pack()
for j in range(8):
Label(rowFrame, text="Label %s, %s" % (str(i), str(j))).pack(side="left")
root.mainloop()
| 36.480349 | 214 | 0.627484 | 936 | 8,354 | 5.411325 | 0.194444 | 0.037512 | 0.025271 | 0.029615 | 0.259625 | 0.157552 | 0.121224 | 0.071076 | 0.071076 | 0.054886 | 0 | 0.014126 | 0.271247 | 8,354 | 228 | 215 | 36.640351 | 0.817838 | 0.028011 | 0 | 0.191358 | 0 | 0 | 0.040429 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.067901 | 0.012346 | 0.228395 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
045b11de118c2d3ad2b59923763b0a8d674e2677 | 2,231 | py | Python | examples/realtime_1kHz_scope.py | benjie-git/pyFirmata2 | b5c0cbd4822db2906b328a989709dd799b1aed3b | [
"MIT"
] | 33 | 2018-11-04T04:03:22.000Z | 2022-03-22T00:13:15.000Z | examples/realtime_1kHz_scope.py | benjie-git/pyFirmata2 | b5c0cbd4822db2906b328a989709dd799b1aed3b | [
"MIT"
] | 8 | 2021-03-06T23:11:10.000Z | 2022-03-17T08:10:14.000Z | examples/realtime_1kHz_scope.py | benjie-git/pyFirmata2 | b5c0cbd4822db2906b328a989709dd799b1aed3b | [
"MIT"
] | 20 | 2018-12-04T07:34:04.000Z | 2021-10-01T15:50:05.000Z | #!/usr/bin/python3
"""
Plots channel zero at 1kHz. Requires pyqtgraph.
Copyright (c) 2018-2021, Bernd Porr <mail@berndporr.me.uk>
see LICENSE file.
"""
import sys
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
import numpy as np
from pyfirmata2 import Arduino
PORT = Arduino.AUTODETECT
# create a global QT application object
app = QtGui.QApplication(sys.argv)
# signals to all threads in endless loops that we'd like to run these
running = True
class QtPanningPlot:
def __init__(self,title):
self.win = pg.GraphicsLayoutWidget()
self.win.setWindowTitle(title)
self.plt = self.win.addPlot()
self.plt.setYRange(-1,1)
self.plt.setXRange(0,500)
self.curve = self.plt.plot()
self.data = []
# any additional initalisation code goes here (filters etc)
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.update)
self.timer.start(100)
self.layout = QtGui.QGridLayout()
self.win.setLayout(self.layout)
self.win.show()
def update(self):
self.data=self.data[-500:]
if self.data:
self.curve.setData(np.hstack(self.data))
def addData(self,d):
self.data.append(d)
# Let's create two instances of plot windows
qtPanningPlot1 = QtPanningPlot("Arduino 1st channel")
# sampling rate: 100Hz
samplingRate = 1000
# called for every new sample at channel 0 which has arrived from the Arduino
# "data" contains the new sample
def callBack(data):
# filter your channel 0 samples here:
# data = self.filter_of_channel0.dofilter(data)
# send the sample to the plotwindow
qtPanningPlot1.addData(data)
# Get the Ardunio board.
board = Arduino(PORT)
# Set the sampling rate in the Arduino
board.samplingOn(1000 / samplingRate)
# Register the callback which adds the data to the animated plot
# The function "callback" (see above) is called when data has
# arrived on channel 0.
board.analog[0].register_callback(callBack)
# Enable the callback
board.analog[0].enable_reporting()
board.analog[1].enable_reporting()
# showing all the windows
app.exec_()
# needs to be called to close the serial port
board.exit()
print("Finished")
| 25.352273 | 77 | 0.699686 | 312 | 2,231 | 4.971154 | 0.509615 | 0.030948 | 0.015474 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.024775 | 0.203944 | 2,231 | 87 | 78 | 25.643678 | 0.848536 | 0.39758 | 0 | 0 | 0 | 0 | 0.020501 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.097561 | false | 0 | 0.121951 | 0 | 0.243902 | 0.02439 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
045ecad18e64ea527f95697217787aaa821d67c1 | 5,684 | py | Python | timeutil.py | tanupoo/timeutil | 9ee236a680db323f416c7af151a257039936ee08 | [
"MIT"
] | null | null | null | timeutil.py | tanupoo/timeutil | 9ee236a680db323f416c7af151a257039936ee08 | [
"MIT"
] | null | null | null | timeutil.py | tanupoo/timeutil | 9ee236a680db323f416c7af151a257039936ee08 | [
"MIT"
] | 1 | 2019-06-24T17:27:56.000Z | 2019-06-24T17:27:56.000Z | #!/usr/bin/env python
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from os import environ
from datetime_util import *
default_tzname = environ.get("TZ", "GMT")
usage = '''
%(prog)s [options] STR
This command converts STR into the format specified.
STR is a datetime string.
%(prog)s [options] STR1 STR2
This command shows the difference of time between STR1 and STR2.
STR1 is a datetime string as same as STR2.
"epoch" is acceptable as STR2, which means 1970-01-01T00:00:00
%(prog)s [options] STR1 (+|-|/|x) STR2
This command shows the result where the operand is adopted.
STR1 is a datetime string. STR2 is a string, which is the arguments
of the timedelta object in python. The STR2 format is like below:
days[,seconds[,microseconds[,milliseconds[,minutes[,hours[,weeks]]]]]]
'''
desc = '''
description:
yet another kitchen nife for datetime.
if STR is "now", current time is used.
"opt" of the -m option may be used to specify the output format.
It is one of the following string.
iso: iso8601. (default)
ctime: ctime(3). e.g. Sat Jul 29 16:37:02 JST 2017
day: days from 1970-01-01T00:00:00 (epoch).
hour: hours from epoch.
min: minutes from epoch.
sec: seconds from epoch.
msec: miliseconds from epoch.
e.g. 1546919546426 if datetime object is like below:
datetime.datetime(2019, 1, 8, 12, 52, 26, 426765)
usec: seconds with microseconds from epoch.
e.g. 1546919546.426765 if datetime object is like below:
datetime.datetime(2019, 1, 8, 12, 52, 26, 426765)
hex: microseconds in a hex string of the big endian.
if an operand is not specified, "-" is used.
e.g.
'''
ap = ArgumentParser(
formatter_class=RawDescriptionHelpFormatter,
description=desc,
usage=usage,
epilog="")
ap.add_argument("args", metavar="ARGs [...]", type=str, nargs="*",
help="a datetime string such as iso8601, ctime,"
" timestamp, etc.")
ap.add_argument("-m", action="store", dest="output_format",
help="specify the output format. default is 'iso'.")
ap.add_argument("-r",
action="store_true", dest="output_rounded",
help="specify to round the output.")
ap.add_argument("--input-tz", action="store", dest="input_tzname",
help="specify the timezone name for the input string"
" in case the datetime string doesn't have any timezone."
f" default is {default_tzname}")
ap.add_argument("--replace-tz", action="store_true", dest="replace_tz",
help="replace the timezone in the input string"
" into the one specified by --input-tz"
" even when the datetime has a timezone.")
ap.add_argument("--output-tz", action="store", dest="output_tzname",
help="specify the timezone to show."
" default is same as the one specified by"
" the --input-tz option")
ap.add_argument("-v", action="store_true", dest="verbose",
help="enable verbose mode.")
opt = ap.parse_args()
if opt.output_tzname is None:
opt.output_tzname = opt.input_tzname
#
if opt.verbose:
print("Input Timezone:", opt.input_tzname)
print("Output Timezone:", opt.output_tzname)
print("Replace Timezone:", opt.replace_tz)
# conversion
if len(opt.args) == 1:
#
if opt.output_format is None:
opt.output_format = "iso"
dt1 = datestr_to_datetime(opt.args[0], default_tzname=opt.input_tzname,
replace_tz=opt.replace_tz)
if opt.verbose:
print("STR1:", dt1)
result = datetime_to_datestr(dt1, output_form=opt.output_format,
output_rounded=opt.output_rounded,
output_tzname=opt.output_tzname)
elif len(opt.args) == 2:
#
if opt.output_format is None:
opt.output_format = "sec"
dt1 = datestr_to_datetime(opt.args[0], default_tzname=opt.input_tzname,
replace_tz=opt.replace_tz)
if opt.args[1] in ["epoch", "EPOCH"]:
arg2 = "1970-01-01T00:00:00"
else:
arg2 = opt.args[1]
dt2 = datestr_to_datetime(arg2, default_tzname=opt.input_tzname,
replace_tz=opt.replace_tz)
if opt.verbose:
print("STR1:", dt1)
print("STR2:", dt2)
result = timedelta_to_datestr(dt1 - dt2, output_form=opt.output_format,
output_tzname=opt.output_tzname)
elif len(opt.args) == 3:
#
if opt.output_format is None:
opt.output_format = "iso"
dt1 = datestr_to_datetime(opt.args[0], default_tzname=opt.input_tzname,
replace_tz=opt.replace_tz)
op = opt.args[1]
time_delta = datestr_to_timedelta(opt.args[2])
if opt.verbose:
print("STR1:", dt1)
print("STR2:", time_delta)
if op == "+":
result = datetime_to_datestr(dt1 + time_delta,
output_form=opt.output_format,
output_rounded=opt.output_rounded,
output_tzname=opt.output_tzname)
elif op == "-":
result = datetime_to_datestr(dt1 - time_delta,
output_form=opt.output_format,
output_rounded=opt.output_rounded,
output_tzname=opt.output_tzname)
else:
ap.print_help()
exit(1)
else:
ap.print_help()
exit(1)
#
print(result)
| 37.394737 | 76 | 0.603976 | 738 | 5,684 | 4.52439 | 0.249322 | 0.053908 | 0.044924 | 0.029949 | 0.382749 | 0.315663 | 0.296196 | 0.296196 | 0.283618 | 0.256963 | 0 | 0.042779 | 0.288529 | 5,684 | 151 | 77 | 37.642384 | 0.782888 | 0.005454 | 0 | 0.307087 | 0 | 0 | 0.411442 | 0.020546 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.023622 | 0 | 0.023622 | 0.086614 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f086d4ed3018b4e65a14abd8edc1f77c15caba1d | 545 | py | Python | alien_invasion/game_stats.py | faizkhan12/Alien_invasion | fe1b72aabb40ae4b2e61c5d31936f65709df425c | [
"MIT"
] | 4 | 2018-07-15T17:53:19.000Z | 2021-07-30T12:40:47.000Z | alien_invasion/game_stats.py | faizkhan12/Alien_invasion | fe1b72aabb40ae4b2e61c5d31936f65709df425c | [
"MIT"
] | null | null | null | alien_invasion/game_stats.py | faizkhan12/Alien_invasion | fe1b72aabb40ae4b2e61c5d31936f65709df425c | [
"MIT"
] | 1 | 2018-07-15T17:55:05.000Z | 2018-07-15T17:55:05.000Z | class GameStats():
"""Track statistics for Alien Invasion"""
def __init__(self,ai_settings):
"""Initialise statistics"""
self.ai_settings=ai_settings
self.reset_stats()
#High score shoud never be reset
self.high_score=0
#Start Alien Invasion in an inactive stat
self.game_active=False
def reset_stats(self):
"""Initialise statistics that can change during the game"""
self.ship_remaining=self.ai_settings.ship_limit
self.score=0
self.level=1
| 28.684211 | 67 | 0.649541 | 69 | 545 | 4.927536 | 0.565217 | 0.117647 | 0.123529 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007481 | 0.26422 | 545 | 18 | 68 | 30.277778 | 0.840399 | 0.33578 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f089d2d0cb1571a790213ee4285268f680d729e0 | 3,448 | py | Python | api/licences/tests/test_register_open_general_licence.py | django-doctor/lite-api | 1ba278ba22ebcbb977dd7c31dd3701151cd036bf | [
"MIT"
] | 3 | 2019-05-15T09:30:39.000Z | 2020-04-22T16:14:23.000Z | api/licences/tests/test_register_open_general_licence.py | django-doctor/lite-api | 1ba278ba22ebcbb977dd7c31dd3701151cd036bf | [
"MIT"
] | 85 | 2019-04-24T10:39:35.000Z | 2022-03-21T14:52:12.000Z | api/licences/tests/test_register_open_general_licence.py | django-doctor/lite-api | 1ba278ba22ebcbb977dd7c31dd3701151cd036bf | [
"MIT"
] | 1 | 2021-01-17T11:12:19.000Z | 2021-01-17T11:12:19.000Z | from django.urls import reverse
from django.utils import timezone
from parameterized import parameterized
from rest_framework import status
from api.cases.enums import CaseTypeEnum
from api.cases.models import CaseType
from api.licences.enums import LicenceStatus
from api.licences.models import Licence
from api.open_general_licences.enums import OpenGeneralLicenceStatus
from api.open_general_licences.models import OpenGeneralLicenceCase
from api.open_general_licences.tests.factories import OpenGeneralLicenceFactory, OpenGeneralLicenceCaseFactory
from test_helpers.clients import DataTestClient
class RegisterOpenGeneralLicenceTests(DataTestClient):
def setUp(self):
super().setUp()
self.url = reverse("licences:open_general_licences")
self.open_general_licence = OpenGeneralLicenceFactory(case_type=CaseType.objects.get(id=CaseTypeEnum.OGTCL.id))
self.exporter_user.set_role(self.organisation, self.exporter_super_user_role)
def test_register_open_general_licence_success(self):
data = {
"open_general_licence": str(self.open_general_licence.id),
}
response = self.client.post(self.url, data, **self.exporter_headers)
response_data = response.json()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqualIgnoreType(response_data["open_general_licence"], self.open_general_licence.id)
self.assertEqual(response_data["registrations"], [str(OpenGeneralLicenceCase.objects.get().id)])
self.assertEqual(OpenGeneralLicenceCase.objects.count(), 1)
ogl_case = OpenGeneralLicenceCase.objects.get()
self.assertTrue(
Licence.objects.filter(
reference_code=ogl_case.reference_code,
case=ogl_case,
status=LicenceStatus.ISSUED,
start_date=timezone.now().date(),
duration__isnull=False,
).exists()
)
@parameterized.expand(
[
("status", OpenGeneralLicenceStatus.DEACTIVATED), # Can't register deactivated OGLs
("registration_required", False), # Can't register OGLs that don't require registration
]
)
def test_register_open_general_licence_failure(self, param, value):
setattr(self.open_general_licence, param, value)
self.open_general_licence.save()
data = {
"open_general_licence": str(self.open_general_licence.id),
}
response = self.client.post(self.url, data, **self.exporter_headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(OpenGeneralLicenceCase.objects.count(), 0)
def test_register_existing_open_general_licence_does_nothing(self):
OpenGeneralLicenceCaseFactory(
open_general_licence=self.open_general_licence,
site=self.organisation.primary_site,
organisation=self.organisation,
)
data = {
"open_general_licence": str(self.open_general_licence.id),
}
response = self.client.post(self.url, data, **self.exporter_headers)
response_data = response.json()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqualIgnoreType(response_data["open_general_licence"], self.open_general_licence.id)
self.assertEqual(OpenGeneralLicenceCase.objects.count(), 1)
| 42.04878 | 119 | 0.718387 | 373 | 3,448 | 6.396783 | 0.27882 | 0.101425 | 0.135792 | 0.082984 | 0.422464 | 0.369237 | 0.341576 | 0.270327 | 0.270327 | 0.270327 | 0 | 0.004324 | 0.195186 | 3,448 | 81 | 120 | 42.567901 | 0.855496 | 0.024072 | 0 | 0.253731 | 0 | 0 | 0.050565 | 0.01517 | 0 | 0 | 0 | 0 | 0.149254 | 1 | 0.059701 | false | 0 | 0.179104 | 0 | 0.253731 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f08a350375764531781d37f668e6c988dd05358e | 1,530 | py | Python | tests/unit/dataactvalidator/test_c4_award_financial_2.py | brianherman/data-act-broker-backend | 80eb055b9d245046192f7ad4fd0be7d0e11d2dec | [
"CC0-1.0"
] | null | null | null | tests/unit/dataactvalidator/test_c4_award_financial_2.py | brianherman/data-act-broker-backend | 80eb055b9d245046192f7ad4fd0be7d0e11d2dec | [
"CC0-1.0"
] | null | null | null | tests/unit/dataactvalidator/test_c4_award_financial_2.py | brianherman/data-act-broker-backend | 80eb055b9d245046192f7ad4fd0be7d0e11d2dec | [
"CC0-1.0"
] | 1 | 2020-07-17T23:50:56.000Z | 2020-07-17T23:50:56.000Z | from tests.unit.dataactcore.factories.staging import AwardFinancialFactory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'c4_award_financial_2'
def test_column_headers(database):
expected_subset = {'row_number', 'obligations_delivered_orde_fyb', 'ussgl490100_delivered_orde_fyb', 'difference',
'uniqueid_TAS', 'uniqueid_PIID', 'uniqueid_FAIN', 'uniqueid_URI'}
actual = set(query_columns(_FILE, database))
assert (actual & expected_subset) == expected_subset
def test_success(database):
""" ObligationsDeliveredOrdersUnpaidTotal in File C = USSGL 4901 + 4981 in File C for the same date context
(FYB)
"""
af = AwardFinancialFactory(obligations_delivered_orde_fyb=None, ussgl490100_delivered_orde_fyb=None)
assert number_of_errors(_FILE, database, models=[af]) == 0
af = AwardFinancialFactory(obligations_delivered_orde_fyb=1, ussgl490100_delivered_orde_fyb=1)
assert number_of_errors(_FILE, database, models=[af]) == 0
def test_failure(database):
""" ObligationsDeliveredOrdersUnpaidTotal in File C = USSGL 4901 + 4981 in File C for the same date context
(FYB)
"""
af = AwardFinancialFactory(obligations_delivered_orde_fyb=1, ussgl490100_delivered_orde_fyb=None)
assert number_of_errors(_FILE, database, models=[af]) == 1
af = AwardFinancialFactory(obligations_delivered_orde_fyb=1, ussgl490100_delivered_orde_fyb=2)
assert number_of_errors(_FILE, database, models=[af]) == 1
| 37.317073 | 118 | 0.75817 | 186 | 1,530 | 5.897849 | 0.322581 | 0.118505 | 0.145852 | 0.123063 | 0.615314 | 0.615314 | 0.615314 | 0.615314 | 0.615314 | 0.538742 | 0 | 0.043948 | 0.152288 | 1,530 | 40 | 119 | 38.25 | 0.80185 | 0.143137 | 0 | 0.222222 | 0 | 0 | 0.117925 | 0.04717 | 0 | 0 | 0 | 0 | 0.277778 | 1 | 0.166667 | false | 0 | 0.111111 | 0 | 0.277778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f08d564fadd2e70edb8c834bad59039c008f1ce7 | 988 | py | Python | main.py | sebastianpikand/docker_demo | 5c836547c17485835fdda492dc8978c838eb1b40 | [
"MIT"
] | null | null | null | main.py | sebastianpikand/docker_demo | 5c836547c17485835fdda492dc8978c838eb1b40 | [
"MIT"
] | null | null | null | main.py | sebastianpikand/docker_demo | 5c836547c17485835fdda492dc8978c838eb1b40 | [
"MIT"
] | null | null | null | from sp_calculator import Calculator
def main():
calc = Calculator()
allowed_operations = ['*','/','+','-', 'nth_root', 'reset']
while True:
operation = input('Choose an operation [*,/,+,-, nth_root, reset] or [q] for quitting: ')
if operation == 'q':
break
if (operation in allowed_operations):
if (operation != 'reset'):
value = input('Choose a value (int or float): ')
match operation:
case '*':
res = calc.multiply(value)
print(res)
case '/':
res = calc.divide(value)
print(res)
case '+':
res = calc.add(value)
print(res)
case '-':
res = calc.subtract(value)
print(res)
case 'nth_root':
res = calc.nth_root(value)
print(res)
else:
res = calc.reset()
print(res)
if __name__ == '__main__':
main() | 23.52381 | 93 | 0.474696 | 98 | 988 | 4.632653 | 0.397959 | 0.092511 | 0.143172 | 0.14978 | 0.15859 | 0.15859 | 0 | 0 | 0 | 0 | 0 | 0 | 0.38664 | 988 | 42 | 94 | 23.52381 | 0.749175 | 0 | 0 | 0.1875 | 0 | 0 | 0.143579 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03125 | false | 0 | 0.03125 | 0 | 0.0625 | 0.1875 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f08f5526732f9aed9c895a04ad39d7a0ecc751df | 6,877 | py | Python | Work/Codes/DataAugmentation/main.py | jeevanpuchakay/BTP | 33f372bda2859a9b41207766d6e1bc043c6b2aeb | [
"Apache-2.0"
] | null | null | null | Work/Codes/DataAugmentation/main.py | jeevanpuchakay/BTP | 33f372bda2859a9b41207766d6e1bc043c6b2aeb | [
"Apache-2.0"
] | null | null | null | Work/Codes/DataAugmentation/main.py | jeevanpuchakay/BTP | 33f372bda2859a9b41207766d6e1bc043c6b2aeb | [
"Apache-2.0"
] | null | null | null | import os
import json
from PIL import Image
from pathlib import Path
import torch
from torchvision import transforms
def rotate_images(input_folder, output_folder, rotation_angle, old_annotations, new_annotations):
for file_name in os.listdir(input_folder):
if file_name.endswith(".jpg") == False:
continue
img = Image.open(input_folder + file_name)
width, height = img.width, img.height
rotated_image = img.rotate(rotation_angle, expand=False)
rotated_image.save(output_folder + file_name) # , file_name.split('.')[-1].lower())
x_center, y_center = width / 2, height / 2
failed_files = []
try:
if rotation_angle == -90:
old_annotation = old_annotations["imgs"][file_name.split('.')[0]]
new_annotation = old_annotation
new_annotation["objects"] = []
for object in old_annotation["objects"]:
bbox = object["bbox"]
bbox["xmin"] -= x_center
bbox["ymin"] -= y_center
bbox["xmax"] -= x_center
bbox["ymax"] -= y_center
bbox["xmin"], bbox["ymin"] = bbox["ymin"], -bbox["xmin"]
bbox["xmax"], bbox["ymax"] = bbox["ymax"], -bbox["xmax"]
bbox["xmin"], bbox["ymin"] = bbox["xmin"] + x_center, bbox["ymin"] + y_center
bbox["xmax"], bbox["ymax"] = bbox["xmax"] + x_center, bbox["ymax"] + y_center
new_annotation["objects"].append(bbox)
elif rotation_angle == -180:
old_annotation = old_annotations["imgs"][file_name.split('.')[0]]
new_annotation = old_annotation
new_annotation["objects"] = []
for object in old_annotation["objects"]:
bbox = object["bbox"]
bbox["xmin"] -= x_center
bbox["ymin"] -= y_center
bbox["xmax"] -= x_center
bbox["ymax"] -= y_center
bbox["xmin"], bbox["ymin"] = -bbox["xmin"], -bbox["ymin"]
bbox["xmax"], bbox["ymax"] = -bbox["xmax"], -bbox["ymax"]
bbox["xmin"], bbox["ymin"] = bbox["xmin"] + x_center, bbox["ymin"] + y_center
bbox["xmax"], bbox["ymax"] = bbox["xmax"] + x_center, bbox["ymax"] + y_center
new_annotation["objects"].append(bbox)
elif rotation_angle == -270:
old_annotation = old_annotations["imgs"][file_name.split('.')[0]]
new_annotation = old_annotation
new_annotation["objects"] = []
for object in old_annotation["objects"]:
bbox = object["bbox"]
bbox["xmin"] -= x_center
bbox["ymin"] -= y_center
bbox["xmax"] -= x_center
bbox["ymax"] -= y_center
bbox["xmin"], bbox["ymin"] = bbox["ymin"], -bbox["xmin"]
bbox["xmax"], bbox["ymax"] = bbox["ymax"], -bbox["xmax"]
bbox["xmin"], bbox["ymin"] = bbox["xmin"] + x_center, bbox["ymin"] + y_center
bbox["xmax"], bbox["ymax"] = bbox["xmax"] + x_center, bbox["ymax"] + y_center
new_annotation["objects"].append(bbox)
except KeyError:
print(file_name + " File details not found in source annotation.")
failed_files.append(file_name)
return new_annotations, failed_files
def change_brightness(new_brightness_level, input_folder, output_folder):
for file_name in os.listdir(input_folder):
if not file_name.endswith(".jpg"):
continue
img = Image.open(input_folder + file_name)
new_image = transforms.ColorJitter(brightness=new_brightness_level)(img)
new_image.save(output_folder + file_name)
return
def change_contrast(new_contrast, input_folder, output_folder):
for file_name in os.listdir(input_folder):
if not file_name.endswith(".jpg"):
continue
img = Image.open(input_folder + file_name)
new_image = transforms.ColorJitter(contrast=new_contrast)(img)
new_image.save(output_folder + file_name)
return
def change_hue(new_hue, input_folder, output_folder):
for file_name in os.listdir(input_folder):
if not file_name.endswith(".jpg"):
continue
img = Image.open(input_folder + file_name)
new_image = transforms.ColorJitter(hue=new_hue)(img)
new_image.save(output_folder + file_name)
return
def resize_pictures(new_size, input_folder, output_folder):
for file_name in os.listdir(input_folder):
if not file_name.endswith(".jpg"):
continue
img = Image.open(input_folder + file_name)
resized_image = img.resize(new_size)
resized_image.save(output_folder + file_name)
def read_json_file(file_path):
with open(file_path, "r") as file:
return json.load(file)
def makedir(path):
try:
path = Path(path)
path.mkdir(parents=True)
print("Directory created")
except FileExistsError as e:
print("Output directory already exists.")
def write_to_json(new_annotations, new_annotation_path):
with open(new_annotation_path, 'w') as f:
f.write(json.dumps(new_annotations))
def write_to_txt(array, txt_file_path):
with open(txt_file_path, 'w') as f:
json.dump(array, f)
if __name__ == "__main__":
base_path = "/mnt/g/Drive/BTP/TSD"
input_folder = base_path + "/tt100k_2021/TSD/"
dataset_name = "tt100k_2021_hu_0_4"
output_folder = base_path + "/HueVariations/" + dataset_name + "/TSD/"
annotations_file_path = base_path + "/tt100k_2021/annotations_all.json"
new_annotations_file_path = base_path + "/HueVariations/" + dataset_name + "/annotations_all.json"
failed_files_list_path = base_path + "/HueVariations/" + dataset_name + "/failed_files.txt"
old_annotations = read_json_file(annotations_file_path)
makedir(output_folder)
new_annotations = {"types": old_annotations["types"], "imgs": {}}
# resize_pictures((1024,1024),input_folder=input_folder, output_folder=output_folder)
change_hue(new_hue=0.4, input_folder=input_folder, output_folder=output_folder)
# change_contrast(new_contrast=2.5, input_folder=input_folder, output_folder=output_folder)
# change_brightness(new_brightness_level=3.5, input_folder=input_folder, output_folder=output_folder)
# new_annotations, failed_files = rotate_images(input_folder, output_folder, -180, old_annotations, new_annotations)
# write_to_json(new_annotations, new_annotations_file_path)
# print(failed_files)
# write_to_txt(failed_files, failed_files_list_path)
| 41.932927 | 120 | 0.610295 | 827 | 6,877 | 4.783555 | 0.14994 | 0.052578 | 0.063701 | 0.05814 | 0.64636 | 0.607937 | 0.536148 | 0.536148 | 0.526289 | 0.465875 | 0 | 0.01068 | 0.264796 | 6,877 | 163 | 121 | 42.190184 | 0.771756 | 0.080413 | 0 | 0.536 | 0 | 0 | 0.102138 | 0.008551 | 0 | 0 | 0 | 0 | 0 | 1 | 0.072 | false | 0 | 0.048 | 0 | 0.16 | 0.024 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f091ec957e06a07d8edc7a3b7208748d886e4532 | 3,148 | py | Python | Filters/Points/Testing/Python/TestPointInterpolator2D.py | satya-arjunan/vtk8 | ee7ced57de6d382a2d12693c01e2fcdac350b25f | [
"BSD-3-Clause"
] | 1,755 | 2015-01-03T06:55:00.000Z | 2022-03-29T05:23:26.000Z | Filters/Points/Testing/Python/TestPointInterpolator2D.py | satya-arjunan/vtk8 | ee7ced57de6d382a2d12693c01e2fcdac350b25f | [
"BSD-3-Clause"
] | 29 | 2015-04-23T20:58:30.000Z | 2022-03-02T16:16:42.000Z | Filters/Points/Testing/Python/TestPointInterpolator2D.py | satya-arjunan/vtk8 | ee7ced57de6d382a2d12693c01e2fcdac350b25f | [
"BSD-3-Clause"
] | 1,044 | 2015-01-05T22:48:27.000Z | 2022-03-31T02:38:26.000Z | #!/usr/bin/env python
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Parameters for debugging
NPts = 1000000
math = vtk.vtkMath()
# create pipeline: use terrain dataset
#
# Read the data: a height field results
demReader = vtk.vtkDEMReader()
demReader.SetFileName(VTK_DATA_ROOT + "/Data/SainteHelens.dem")
demReader.Update()
lo = demReader.GetOutput().GetScalarRange()[0]
hi = demReader.GetOutput().GetScalarRange()[1]
geom = vtk.vtkImageDataGeometryFilter()
geom.SetInputConnection(demReader.GetOutputPort())
warp = vtk.vtkWarpScalar()
warp.SetInputConnection(geom.GetOutputPort())
warp.SetNormal(0, 0, 1)
warp.UseNormalOn()
warp.SetScaleFactor(2)
warp.Update()
bds = warp.GetOutput().GetBounds()
center = warp.GetOutput().GetCenter()
# A randomized point cloud, whose attributes are set via implicit function
points = vtk.vtkPoints()
points.SetDataTypeToFloat()
points.SetNumberOfPoints(NPts)
for i in range(0,NPts):
points.SetPoint(i,math.Random(bds[0],bds[1]),math.Random(bds[2],bds[3]),math.Random(bds[4],bds[5]))
source = vtk.vtkPolyData()
source.SetPoints(points)
sphere = vtk.vtkSphere()
sphere.SetCenter(center[0],center[1]-7500,center[2])
attr = vtk.vtkSampleImplicitFunctionFilter()
attr.SetInputData(source)
attr.SetImplicitFunction(sphere)
attr.Update()
# Gaussian kernel-------------------------------------------------------
gaussianKernel = vtk.vtkGaussianKernel()
gaussianKernel.SetSharpness(4)
gaussianKernel.SetRadius(50)
voronoiKernel = vtk.vtkVoronoiKernel()
interpolator1 = vtk.vtkPointInterpolator2D()
interpolator1.SetInputConnection(warp.GetOutputPort())
interpolator1.SetSourceConnection(attr.GetOutputPort())
#interpolator1.SetKernel(gaussianKernel)
interpolator1.SetKernel(voronoiKernel)
interpolator1.SetNullPointsStrategyToClosestPoint()
# Time execution
timer = vtk.vtkTimerLog()
timer.StartTimer()
interpolator1.Update()
timer.StopTimer()
time = timer.GetElapsedTime()
print("Interpolate Terrain Points (Gaussian): {0}".format(time))
scalarRange = attr.GetOutput().GetScalarRange()
intMapper1 = vtk.vtkPolyDataMapper()
intMapper1.SetInputConnection(interpolator1.GetOutputPort())
intMapper1.SetScalarRange(scalarRange)
intActor1 = vtk.vtkActor()
intActor1.SetMapper(intMapper1)
# Create an outline
outline1 = vtk.vtkOutlineFilter()
outline1.SetInputConnection(warp.GetOutputPort())
outlineMapper1 = vtk.vtkPolyDataMapper()
outlineMapper1.SetInputConnection(outline1.GetOutputPort())
outlineActor1 = vtk.vtkActor()
outlineActor1.SetMapper(outlineMapper1)
# Create the RenderWindow, Renderer and both Actors
#
ren0 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren0)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
#
ren0.AddActor(intActor1)
ren0.AddActor(outlineActor1)
ren0.SetBackground(0.1, 0.2, 0.4)
renWin.SetSize(250, 250)
cam = ren0.GetActiveCamera()
cam.SetFocalPoint(center)
fp = cam.GetFocalPoint()
cam.SetPosition(fp[0]+.2,fp[1]+.1,fp[2]+1)
ren0.ResetCamera()
iren.Initialize()
# render the image
#
renWin.Render()
iren.Start()
| 25.803279 | 103 | 0.775413 | 348 | 3,148 | 7.002874 | 0.454023 | 0.028314 | 0.016003 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.028028 | 0.081957 | 3,148 | 121 | 104 | 26.016529 | 0.815225 | 0.147078 | 0 | 0 | 0 | 0 | 0.023979 | 0.008243 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.025974 | 0 | 0.025974 | 0.012987 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f092fdc888832d78dce05a7e9c60b5fe4f8e29cd | 1,797 | py | Python | quadpy/e2r/stroud.py | gdmcbain/quadpy | c083d500027d7c1b2187ae06ff2b7fbdd360ccc7 | [
"MIT"
] | 1 | 2019-01-02T19:04:42.000Z | 2019-01-02T19:04:42.000Z | quadpy/e2r/stroud.py | gdmcbain/quadpy | c083d500027d7c1b2187ae06ff2b7fbdd360ccc7 | [
"MIT"
] | null | null | null | quadpy/e2r/stroud.py | gdmcbain/quadpy | c083d500027d7c1b2187ae06ff2b7fbdd360ccc7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
"""
Arthur Stroud,
Approximate Calculation of Multiple Integrals,
Prentice Hall, 1971.
"""
from __future__ import division
import numpy
import sympy
from . import rabinowitz_richter
from . import stroud_secrest
from ..helpers import untangle
def _gen4_1(symbolic):
frac = sympy.Rational if symbolic else lambda x, y: x / y
sqrt = numpy.vectorize(sympy.sqrt) if symbolic else numpy.sqrt
cos = numpy.vectorize(sympy.cos) if symbolic else numpy.cos
sin = numpy.vectorize(sympy.sin) if symbolic else numpy.sin
pi = sympy.pi if symbolic else numpy.pi
pts = (
2
* sqrt(5)
* numpy.array(
[
[cos(2 * i * pi / 5) for i in range(5)],
[sin(2 * i * pi / 5) for i in range(5)],
]
).T
)
data = [(frac(7, 10), numpy.array([[0, 0]])), (frac(3, 50), pts)]
return 4, data
# The boolean tells whether the factor 2*pi is already in the weights
_gen = {
"4-1": (_gen4_1, False),
"5-1": (stroud_secrest.v, False),
"7-1": (stroud_secrest.vi, False),
"9-1": (rabinowitz_richter.gen1, True),
"11-1": (rabinowitz_richter.gen2, True),
"11-2": (rabinowitz_richter.gen3, True),
# ERR misprint in Stroud copied from original article
# '13-1': (rabinowitz_richter.gen4,
"15-1": (rabinowitz_richter.gen5, True),
}
class Stroud(object):
keys = _gen.keys()
def __init__(self, key, symbolic=False):
self.name = "Stroud_E2r({})".format(key)
self.degree, data = _gen[key][0](symbolic)
weights_contain_2pi = _gen[key][1]
self.points, self.weights = untangle(data)
pi = sympy.pi if symbolic else numpy.pi
if not weights_contain_2pi:
self.weights *= 2 * pi
return
| 27.227273 | 69 | 0.609905 | 251 | 1,797 | 4.247012 | 0.386454 | 0.095685 | 0.078799 | 0.089118 | 0.08818 | 0.08818 | 0.08818 | 0.08818 | 0.031895 | 0 | 0 | 0.043446 | 0.257095 | 1,797 | 65 | 70 | 27.646154 | 0.755056 | 0.144129 | 0 | 0.045455 | 0 | 0 | 0.024902 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.136364 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f093c60ce6164b4582a2d6f0d5cb7a2a32083d64 | 5,524 | py | Python | build/lib/comptools/similarity.py | pedrofariacomposer/comptools | 71f540fa6e109365cb7faf57b354fd0603b4d396 | [
"MIT"
] | 2 | 2022-01-12T19:47:53.000Z | 2022-02-17T00:39:00.000Z | comptools/similarity.py | pedrofariacomposer/comptools | 71f540fa6e109365cb7faf57b354fd0603b4d396 | [
"MIT"
] | null | null | null | comptools/similarity.py | pedrofariacomposer/comptools | 71f540fa6e109365cb7faf57b354fd0603b4d396 | [
"MIT"
] | null | null | null | """
Module with the similarity tools of the Comp_Tools library.
For more information, see:
Isaacson - Similarity of Interval-Class Content Between Pitch-Class Sets: The IcVSIM Relation
"""
from .basic_tools import interval_vector, prime_form
from ._all_classes import allClasses
from numpy import sqrt, reshape, array
from typing import Sequence, List
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.feature_extraction.text import TfidfVectorizer
import pandas as pd
def forte(
pcset1: Sequence,
pcset2: Sequence,
) -> str:
"""Returns the Forte similarity relation between two pitch class sets.
If the sets don't have the same cardinality, the function returns None.
"""
if len(pcset1) != len(pcset2):
forte_relation = None
else:
v1 = interval_vector(pcset1)
v2 = interval_vector(pcset2)
common_entries = [i for i in range(6) if v1[i] == v2[i]]
if len(common_entries) == 0:
forte_relation = "R0"
elif len(common_entries) == 4:
diff_entries = [i for i in range(6) if i not in common_entries]
pair1 = [v1[x] for x in diff_entries]
pair2 = [v2[x] for x in diff_entries]
if sorted(pair1) == sorted(pair2):
forte_relation = "R1"
else:
forte_relation = "R2"
return forte_relation
def morris(
pcset1: Sequence,
pcset2: Sequence,
) -> int:
"""Returns the Morris similarity relation between two pitch class sets.
"""
vector1 = interval_vector(pcset1)
vector2 = interval_vector(pcset2)
result = 0
for i in range(len(vector1)):
result += abs(vector1[i]-vector2[i])
return result
def lord(
pcset1: Sequence,
pcset2: Sequence,
) -> float:
"""Returns the Lord similarity relation between two pitch class sets.
"""
return morris(pcset1,pcset2) / 2
def rahn(
pcset1: Sequence,
pcset2: Sequence,
) -> int:
"""Returns the Rahn similarity relation between two pitch class sets.
"""
vector1 = interval_vector(pcset1)
vector2 = interval_vector(pcset2)
result = 0
for i in range(6):
if (vector1[i] == 0 or vector2[i] == 0):
pass
else:
result += vector1[i] + vector2[i]
return result
def rahn_mod(
pcset1: Sequence,
pcset2: Sequence,
) -> float:
"""Returns the modified Rahn similarity relation between two pitch class sets.
"""
original_rahn = rahn(pcset1, pcset2)
return (1/2) * original_rahn / ((len(pcset1) * (len(pcset1) - 1)) +(len(pcset2) * (len(pcset2) - 1)))
def lewin(
pcset1: Sequence,
pcset2: Sequence,
) -> float:
"""Returns the Lewin similarity relation between two pitch class sets.
"""
vector1 = interval_vector(pcset1)
vector2 = interval_vector(pcset2)
factor1 = 0
for i in range(6):
factor1 += sqrt(vector1[i] * vector2[i])
factor2 = ((len(pcset1) * (len(pcset1) - 1)) *(len(pcset2) * (len(pcset2) - 1)))
return (2 * factor1) / sqrt(factor2)
def teitelbaum(
pcset1: Sequence,
pcset2: Sequence,
) -> float:
"""Returns the Teitelbaum similarity relation between two pitch class sets.
"""
vector1 = interval_vector(pcset1)
vector2 = interval_vector(pcset2)
factor1 = 0
for i in range(6):
factor1 += (vector1[i]-vector2[i]) ** 2
return sqrt(factor1)
def isaacson(
pcset1: Sequence,
pcset2: Sequence,
) -> float:
"""Returns the Isaacson similarity relation between two pitch class sets.
"""
vector1 = interval_vector(pcset1)
vector2 = interval_vector(pcset2)
idv = [vector2[i]-vector1[i] for i in range(6)]
idv_mean = sum(idv) / 6
factor1 = 0
for i in range(6):
factor1 += (idv[i] - idv_mean) ** 2
return sqrt(factor1/6)
def text_set_class(
set_class: Sequence,
) -> str:
"""Converts a set class into a string representing its interval vector.
"""
id_dict = {0: "one",
1: "two",
2: "three",
3: "four",
4: "five",
5: "six"}
result = ""
for i, el in enumerate(interval_vector(set_class)):
for _ in range(el):
result += id_dict[i] + " "
return result.rstrip()
def text_sim(
sc1: Sequence,
sc2: Sequence,
) -> float:
"""Returns the Text_Sim similarity measure between two pitch class sets.
"""
sc1 = prime_form(sc1)
sc2 = prime_form(sc2)
corpus = [text_set_class(x) for x in sorted(allClasses)]
vectorizer = TfidfVectorizer()
trsfm = vectorizer.fit_transform(corpus)
text_similarity = cosine_similarity(trsfm)
names = [str(x) for x in sorted(allClasses)]
df = pd.DataFrame(text_similarity.round(3), columns=names, index=names)
return df[str(sc1)][str(sc2)]
def simile_table(
pitch_class_sets1: Sequence,
pitch_class_sets2: Sequence,
simile_function,
) -> array :
"""Creates a numpy array with the similarities between two sets of pitch-class sets.
"""
mod = ['X'] + pitch_class_sets2
new = [mod]
for i in pitch_class_sets1:
new2 = [i]
for j in pitch_class_sets2:
a = simile_function(i, j)
new2.append(a)
new.append(new2)
k = len(pitch_class_sets1) + 1
n = len(pitch_class_sets2) + 1
m = reshape(new, (k, n))
return m | 24.551111 | 105 | 0.611694 | 697 | 5,524 | 4.74462 | 0.223816 | 0.05443 | 0.046568 | 0.05443 | 0.402782 | 0.395525 | 0.365891 | 0.231932 | 0.195343 | 0.195343 | 0 | 0.03619 | 0.279689 | 5,524 | 225 | 106 | 24.551111 | 0.794923 | 0.195873 | 0 | 0.359712 | 0 | 0 | 0.006868 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.079137 | false | 0.007194 | 0.05036 | 0 | 0.208633 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0949414173bc974ed0bfc16a4d388d9b02e4b7d | 8,370 | py | Python | sphinxcontrib/openapi.py | diegodelemos/sphinxcontrib-openapi | eba539802bc270c818f2aa2a246a137688a6f2f4 | [
"BSD-2-Clause"
] | null | null | null | sphinxcontrib/openapi.py | diegodelemos/sphinxcontrib-openapi | eba539802bc270c818f2aa2a246a137688a6f2f4 | [
"BSD-2-Clause"
] | null | null | null | sphinxcontrib/openapi.py | diegodelemos/sphinxcontrib-openapi | eba539802bc270c818f2aa2a246a137688a6f2f4 | [
"BSD-2-Clause"
] | 1 | 2018-10-12T15:11:46.000Z | 2018-10-12T15:11:46.000Z | """
sphinxcontrib.openapi
---------------------
The OpenAPI spec renderer for Sphinx. It's a new way to document your
RESTful API. Based on ``sphinxcontrib-httpdomain``.
:copyright: (c) 2016, Ihor Kalnytskyi.
:license: BSD, see LICENSE for details.
"""
import io
import itertools
import collections
import yaml
import jsonschema
from docutils import nodes
from docutils.parsers.rst import directives
from docutils.statemachine import ViewList
from sphinx.util.compat import Directive
from sphinx.util.nodes import nested_parse_with_titles
from sphinxcontrib import httpdomain
# Dictionaries do not guarantee to preserve the keys order so when we load
# JSON or YAML - we may loose the order. In most cases it's not important
# because we're interested in data. However, in case of OpenAPI spec it'd
# be really nice to preserve them since, for example, endpoints may be
# grouped logically and that improved readability.
class _YamlOrderedLoader(yaml.SafeLoader):
pass
_YamlOrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
lambda loader, node: collections.OrderedDict(loader.construct_pairs(node))
)
def _resolve_refs(uri, spec):
"""Resolve JSON references in a given dictionary.
OpenAPI spec may contain JSON references to its nodes or external
sources, so any attempt to rely that there's some expected attribute
in the spec may fail. So we need to resolve JSON references before
we use it (i.e. replace with referenced object). For details see:
https://tools.ietf.org/html/draft-pbryan-zyp-json-ref-02
The input spec is modified in-place despite being returned from
the function.
"""
resolver = jsonschema.RefResolver(uri, spec)
def _do_resolve(node):
if isinstance(node, collections.Mapping) and '$ref' in node:
with resolver.resolving(node['$ref']) as resolved:
return resolved
elif isinstance(node, collections.Mapping):
for k, v in node.items():
node[k] = _do_resolve(v)
elif isinstance(node, (list, tuple)):
for i in range(len(node)):
node[i] = _do_resolve(node[i])
return node
return _do_resolve(spec)
def _httpresource(endpoint, method, properties):
parameters = properties.get('parameters', [])
responses = properties['responses']
indent = ' '
yield '.. http:{0}:: {1}'.format(method, endpoint)
yield ' :synopsis: {0}'.format(properties.get('summary', 'null'))
yield ''
if 'summary' in properties:
for line in properties['summary'].splitlines():
yield '{indent}**{line}**'.format(**locals())
yield ''
if 'description' in properties:
for line in properties['description'].splitlines():
yield '{indent}{line}'.format(**locals())
yield ''
# print request's route params
for param in filter(lambda p: p['in'] == 'path', parameters):
yield indent + ':param {type} {name}:'.format(**param)
for line in param.get('description', '').splitlines():
yield '{indent}{indent}{line}'.format(**locals())
# print request's query params
for param in filter(lambda p: p['in'] == 'query', parameters):
yield indent + ':query {type} {name}:'.format(**param)
for line in param.get('description', '').splitlines():
yield '{indent}{indent}{line}'.format(**locals())
# print response status codes
for status, response in responses.items():
yield '{indent}:status {status}:'.format(**locals())
for line in response['description'].splitlines():
yield '{indent}{indent}{line}'.format(**locals())
# print request header params
for param in filter(lambda p: p['in'] == 'header', parameters):
yield indent + ':reqheader {name}:'.format(**param)
for line in param.get('description', '').splitlines():
yield '{indent}{indent}{line}'.format(**locals())
# print response headers
for status, response in responses.items():
for headername, header in response.get('headers', {}).items():
yield indent + ':resheader {name}:'.format(name=headername)
for line in header['description'].splitlines():
yield '{indent}{indent}{line}'.format(**locals())
yield ''
def _normalize_spec(spec, **options):
# OpenAPI spec may contain JSON references, so we need resolve them
# before we access the actual values trying to build an httpdomain
# markup. Since JSON references may be relative, it's crucial to
# pass a document URI in order to properly resolve them.
spec = _resolve_refs(options.get('uri', ''), spec)
# OpenAPI spec may contain common endpoint's parameters top-level.
# In order to do not place if-s around the code to handle special
# cases, let's normalize the spec and push common parameters inside
# endpoints definitions.
for endpoint in spec['paths'].values():
parameters = endpoint.pop('parameters', [])
for method in endpoint.values():
method.setdefault('parameters', [])
method['parameters'].extend(parameters)
def openapi2httpdomain(spec, **options):
generators = []
# OpenAPI spec may contain JSON references, common properties, etc.
# Trying to render the spec "As Is" will require to put multiple
# if-s around the code. In order to simplify flow, let's make the
# spec to have only one (expected) schema, i.e. normalize it.
_normalize_spec(spec, **options)
# If 'paths' are passed we've got to ensure they exist within an OpenAPI
# spec; otherwise raise error and ask user to fix that.
if 'paths' in options:
if not set(options['paths']).issubset(spec['paths']):
raise ValueError(
'One or more paths are not defined in the spec: %s.' % (
', '.join(set(options['paths']) - set(spec['paths'])),
)
)
for endpoint in options.get('paths', spec['paths']):
for method, properties in spec['paths'][endpoint].items():
generators.append(_httpresource(endpoint, method, properties))
return iter(itertools.chain(*generators))
class OpenApi(Directive):
required_arguments = 1 # path to openapi spec
final_argument_whitespace = True # path may contain whitespaces
option_spec = {
'encoding': directives.encoding, # useful for non-ascii cases :)
'paths': lambda s: s.split(), # endpoints to be rendered
}
def run(self):
env = self.state.document.settings.env
relpath, abspath = env.relfn2path(directives.path(self.arguments[0]))
# Add OpenAPI spec as a dependency to the current document. That means
# the document will be rebuilt if the spec is changed.
env.note_dependency(relpath)
# Read the spec using encoding passed to the directive or fallback to
# the one specified in Sphinx's config.
encoding = self.options.get('encoding', env.config.source_encoding)
with io.open(abspath, 'rt', encoding=encoding) as stream:
spec = yaml.load(stream, _YamlOrderedLoader)
# URI parameter is crucial for resolving relative references. So
# we need to set this option properly as it's used later down the
# stack.
self.options.setdefault('uri', 'file://%s' % abspath)
# reStructuredText DOM manipulation is pretty tricky task. It requires
# passing dozen arguments which is not easy without well-documented
# internals. So the idea here is to represent OpenAPI spec as
# reStructuredText in-memory text and parse it in order to produce a
# real DOM.
viewlist = ViewList()
for line in openapi2httpdomain(spec, **self.options):
viewlist.append(line, '<openapi>')
# Parse reStructuredText contained in `viewlist` and return produced
# DOM nodes.
node = nodes.section()
node.document = self.state.document
nested_parse_with_titles(self.state, viewlist, node)
return node.children
def setup(app):
if 'http' not in app.domains:
httpdomain.setup(app)
app.add_directive('openapi', OpenApi)
| 38.045455 | 78 | 0.653166 | 1,048 | 8,370 | 5.180344 | 0.328244 | 0.024314 | 0.013262 | 0.028366 | 0.160987 | 0.154172 | 0.111254 | 0.095782 | 0.085835 | 0.068153 | 0 | 0.002192 | 0.236918 | 8,370 | 219 | 79 | 38.219178 | 0.847816 | 0.339068 | 0 | 0.121739 | 0 | 0 | 0.115108 | 0.020291 | 0 | 0 | 0 | 0 | 0 | 1 | 0.06087 | false | 0.008696 | 0.095652 | 0 | 0.243478 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f095110e765b2827d7fdc2f784891c575cdab197 | 1,274 | py | Python | hypernets/hyperctl/dao.py | lyhue1991/Hypernets | d726bd297869eacb0cba84376fbac30206bbb60a | [
"Apache-2.0"
] | 3 | 2022-03-25T23:27:44.000Z | 2022-03-27T01:32:28.000Z | hypernets/hyperctl/dao.py | lyhue1991/Hypernets | d726bd297869eacb0cba84376fbac30206bbb60a | [
"Apache-2.0"
] | null | null | null | hypernets/hyperctl/dao.py | lyhue1991/Hypernets | d726bd297869eacb0cba84376fbac30206bbb60a | [
"Apache-2.0"
] | null | null | null | import os
from pathlib import Path
from hypernets.hyperctl import get_context
from hypernets.hyperctl.batch import ShellJob
def get_job_by_name(job_name):
for job in get_context().batch.jobs:
if job.name == job_name:
return job
return None
def get_jobs():
return get_context().batch.jobs
def change_job_status(job: ShellJob, next_status):
current_status = job.status
target_status_file = job.status_file_path(next_status)
if next_status == job.STATUS_INIT:
raise ValueError(f"can not change to {next_status} ")
elif next_status == job.STATUS_RUNNING:
if current_status != job.STATUS_INIT:
raise ValueError(f"only job in {job.STATUS_INIT} can change to {next_status}")
elif next_status in job.FINAL_STATUS:
if current_status != job.STATUS_RUNNING:
raise ValueError(f"only job in {job.STATUS_RUNNING} can change to "
f"{next_status} but now is {current_status}")
# delete running status file
running_status_file = job.status_file_path(job.STATUS_RUNNING)
os.remove(running_status_file)
else:
raise ValueError(f"unknown status {next_status}")
with open(target_status_file, 'w') as f:
pass
| 30.333333 | 90 | 0.685243 | 180 | 1,274 | 4.605556 | 0.277778 | 0.119421 | 0.09047 | 0.079614 | 0.329312 | 0.289505 | 0.224367 | 0.082027 | 0 | 0 | 0 | 0 | 0.235479 | 1,274 | 41 | 91 | 31.073171 | 0.851129 | 0.020408 | 0 | 0 | 0 | 0 | 0.165329 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.103448 | false | 0.034483 | 0.137931 | 0.034483 | 0.344828 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0990d409066675bc8254e738cd19e11296ac77d | 19,592 | py | Python | optbinning/binning/mip.py | jensgk/optbinning | 5ccd892fa4ee0a745ab539cee10a2069b35de6da | [
"Apache-2.0"
] | 207 | 2020-01-23T21:32:59.000Z | 2022-03-30T06:33:21.000Z | optbinning/binning/mip.py | jensgk/optbinning | 5ccd892fa4ee0a745ab539cee10a2069b35de6da | [
"Apache-2.0"
] | 133 | 2020-01-23T22:14:35.000Z | 2022-03-29T14:05:04.000Z | optbinning/binning/mip.py | jensgk/optbinning | 5ccd892fa4ee0a745ab539cee10a2069b35de6da | [
"Apache-2.0"
] | 50 | 2020-01-27T15:37:08.000Z | 2022-03-30T06:33:25.000Z | """
Generalized assigment problem: solve constrained optimal binning problem.
Mixed-Integer programming implementation.
"""
# Guillermo Navas-Palencia <g.navas.palencia@gmail.com>
# Copyright (C) 2019
import numpy as np
from ortools.linear_solver import pywraplp
from .model_data import model_data
class BinningMIP:
def __init__(self, monotonic_trend, min_n_bins, max_n_bins, min_bin_size,
max_bin_size, min_bin_n_event, max_bin_n_event,
min_bin_n_nonevent, max_bin_n_nonevent, min_event_rate_diff,
max_pvalue, max_pvalue_policy, gamma, user_splits_fixed,
mip_solver, time_limit):
self.monotonic_trend = monotonic_trend
self.min_n_bins = min_n_bins
self.max_n_bins = max_n_bins
self.min_bin_size = min_bin_size
self.max_bin_size = max_bin_size
self.min_bin_n_event = min_bin_n_event
self.max_bin_n_event = max_bin_n_event
self.min_bin_n_nonevent = min_bin_n_nonevent
self.max_bin_n_nonevent = max_bin_n_nonevent
self.min_event_rate_diff = min_event_rate_diff
self.max_pvalue = max_pvalue
self.max_pvalue_policy = max_pvalue_policy
self.gamma = gamma
self.user_splits_fixed = user_splits_fixed
self.mip_solver = mip_solver
self.time_limit = time_limit
self.solver_ = None
self._n = None
self._x = None
def build_model(self, divergence, n_nonevent, n_event, trend_change):
# Parameters
D, V, pvalue_violation_indices = model_data(divergence, n_nonevent,
n_event, self.max_pvalue,
self.max_pvalue_policy)
n = len(n_nonevent)
n_records = n_nonevent + n_event
# Initialize solver
if self.mip_solver == "bop":
solver = pywraplp.Solver(
'BinningMIP', pywraplp.Solver.BOP_INTEGER_PROGRAMMING)
elif self.mip_solver == "cbc":
solver = pywraplp.Solver(
'BinningMIP', pywraplp.Solver.CBC_MIXED_INTEGER_PROGRAMMING)
# Decision variables
x, y, t, d, u, bin_size_diff = self.decision_variables(solver, n)
# Objective function
if self.gamma:
total_records = int(n_records.sum())
regularization = self.gamma / total_records
pmax = solver.IntVar(0, total_records, "pmax")
pmin = solver.IntVar(0, total_records, "pmin")
solver.Maximize(solver.Sum([(V[i][i] * x[i, i]) +
solver.Sum([(V[i][j] - V[i][j+1]) * x[i, j]
for j in range(i)])
for i in range(n)]) -
regularization * (pmax - pmin))
else:
solver.Maximize(solver.Sum([(V[i][i] * x[i, i]) +
solver.Sum([(V[i][j] - V[i][j+1]) * x[i, j]
for j in range(i)])
for i in range(n)]))
# Constraint: unique assignment
self.add_constraint_unique_assignment(solver, n, x)
# Constraint: continuity
self.add_constraint_continuity(solver, n, x)
# Constraint: min / max bins
self.add_constraint_min_max_bins(solver, n, x, d)
# Constraint: min / max bin size
self.add_constraint_min_max_bin_size(solver, n, x, u, n_records,
bin_size_diff)
# Constraint: min / max n_nonevent per bin
if (self.min_bin_n_nonevent is not None or
self.max_bin_n_nonevent is not None):
for i in range(n):
bin_ne_size = solver.Sum([n_nonevent[j] * x[i, j]
for j in range(i + 1)])
if self.min_bin_n_nonevent is not None:
solver.Add(bin_ne_size >= self.min_bin_n_nonevent*x[i, i])
if self.max_bin_n_nonevent is not None:
solver.Add(bin_ne_size <= self.max_bin_n_nonevent*x[i, i])
# Constraint: min / max n_event per bin
if (self.min_bin_n_event is not None or
self.max_bin_n_event is not None):
for i in range(n):
bin_e_size = solver.Sum([n_event[j] * x[i, j]
for j in range(i + 1)])
if self.min_bin_n_event is not None:
solver.Add(bin_e_size >= self.min_bin_n_event * x[i, i])
if self.max_bin_n_event is not None:
solver.Add(bin_e_size <= self.max_bin_n_event * x[i, i])
# Constraints: monotonicity
if self.monotonic_trend == "ascending":
self.add_constraint_monotonic_ascending(solver, n, D, x)
elif self.monotonic_trend == "descending":
self.add_constraint_monotonic_descending(solver, n, D, x)
elif self.monotonic_trend == "concave":
self.add_constraint_monotonic_concave(solver, n, D, x)
elif self.monotonic_trend == "convex":
self.add_constraint_monotonic_convex(solver, n, D, x)
elif self.monotonic_trend in ("peak", "valley"):
for i in range(n):
solver.Add(t >= i - n * (1 - y[i]))
solver.Add(t <= i + n * y[i])
if self.monotonic_trend == "peak":
self.add_constraint_monotonic_peak(solver, n, D, x, y)
else:
self.add_constraint_monotonic_valley(solver, n, D, x, y)
elif self.monotonic_trend == "peak_heuristic":
self.add_constraint_monotonic_peak_heuristic(
solver, n, D, x, trend_change)
elif self.monotonic_trend == "valley_heuristic":
self.add_constraint_monotonic_valley_heuristic(
solver, n, D, x, trend_change)
# Constraint: reduction of dominating bins
if self.gamma:
for i in range(n):
bin_size = solver.Sum([n_records[j] * x[i, j]
for j in range(i + 1)])
solver.Add(pmin <= total_records * (1 - x[i, i]) + bin_size)
solver.Add(pmax >= bin_size)
solver.Add(pmin <= pmax)
# Constraint: max-pvalue
self.add_max_pvalue_constraint(solver, x, pvalue_violation_indices)
# Constraint: fixed splits
self.add_constraint_fixed_splits(solver, n, x)
self.solver_ = solver
self._n = n
self._x = x
def solve(self):
self.solver_.SetTimeLimit(self.time_limit * 1000)
status = self.solver_.Solve()
if status in (pywraplp.Solver.OPTIMAL, pywraplp.Solver.FEASIBLE):
if status == pywraplp.Solver.OPTIMAL:
status_name = "OPTIMAL"
else:
status_name = "FEASIBLE"
solution = np.array([self._x[i, i].solution_value()
for i in range(self._n)]).astype(bool)
else:
if status == pywraplp.Solver.ABNORMAL:
status_name = "ABNORMAL"
elif status == pywraplp.Solver.INFEASIBLE:
status_name = "INFEASIBLE"
elif status == pywraplp.Solver.UNBOUNDED:
status_name = "UNBOUNDED"
else:
status_name = "UNKNOWN"
solution = np.zeros(self._n).astype(bool)
solution[-1] = True
return status_name, solution
def decision_variables(self, solver, n):
x = {}
for i in range(n):
for j in range(i + 1):
x[i, j] = solver.BoolVar("x[{}, {}]".format(i, j))
y = None
t = None
d = None
u = None
bin_size_diff = None
if self.monotonic_trend in ("peak", "valley"):
# Auxiliary binary variables
y = {}
for i in range(n):
y[i] = solver.BoolVar("y[{}]".format(i))
# Change point
t = solver.IntVar(0, n, "t")
if self.min_n_bins is not None and self.max_n_bins is not None:
n_bin_diff = self.max_n_bins - self.min_n_bins
# Range constraints auxiliary variables
d = solver.IntVar(0, n_bin_diff, "n_bin_diff")
if self.min_bin_size is not None and self.max_bin_size is not None:
bin_size_diff = self.max_bin_size - self.min_bin_size
# Range constraints auxiliary variables
u = {}
for i in range(n):
u[i] = solver.IntVar(0, bin_size_diff, "u[{}]".format(i))
return x, y, t, d, u, bin_size_diff
def add_constraint_unique_assignment(self, solver, n, x):
for j in range(n):
solver.Add(solver.Sum([x[i, j] for i in range(j, n)]) == 1)
def add_constraint_continuity(self, solver, n, x):
for i in range(n):
for j in range(i):
solver.Add(x[i, j] - x[i, j+1] <= 0)
def add_constraint_min_max_bins(self, solver, n, x, d):
if self.min_n_bins is not None or self.max_n_bins is not None:
trace = solver.Sum([x[i, i] for i in range(n)])
if self.min_n_bins is not None and self.max_n_bins is not None:
solver.Add(d + trace - self.max_n_bins == 0)
elif self.min_n_bins is not None:
solver.Add(trace >= self.min_n_bins)
elif self.max_n_bins is not None:
solver.Add(trace <= self.max_n_bins)
def add_constraint_min_max_bin_size(self, solver, n, x, u, n_records,
bin_size_diff):
if self.min_bin_size is not None or self.max_bin_size is not None:
for i in range(n):
bin_size = solver.Sum([n_records[j] * x[i, j]
for j in range(i + 1)])
if (self.min_bin_size is not None and
self.max_bin_size is not None):
solver.Add(u[i] + bin_size -
self.max_bin_size * x[i, i] == 0)
solver.Add(u[i] <= bin_size_diff * x[i, i])
elif self.min_bin_size is not None:
solver.Add(bin_size >= self.min_bin_size * x[i, i])
elif self.max_bin_size is not None:
solver.Add(bin_size <= self.max_bin_size * x[i, i])
def add_constraint_monotonic_ascending(self, solver, n, D, x):
for i in range(1, n):
for z in range(i):
solver.Add(
solver.Sum([(D[z][j] - D[z][j+1]) * x[z, j]
for j in range(z)]) +
D[z][z] * x[z, z] - 1 - (D[i][i] - 1) * x[i, i] -
solver.Sum([(D[i][j] - D[i][j + 1]) * x[i, j]
for j in range(i)]) +
self.min_event_rate_diff * (x[i, i] + x[z, z] - 1) <= 0)
# Preprocessing
if self.min_event_rate_diff == 0:
for i in range(n - 1):
if D[i+1][i] - D[i+1][i+1] > 0:
solver.Add(x[i, i] == 0)
for j in range(n - i - 1):
if D[i+1+j][i] - D[i+1+j][i+1+j] > 0:
solver.Add(x[i+j, i+j] == 0)
def add_constraint_monotonic_descending(self, solver, n, D, x):
for i in range(1, n):
for z in range(i):
solver.Add(
solver.Sum([(D[i][j] - D[i][j + 1]) * x[i, j]
for j in range(i)]) + D[i][i] * x[i, i] -
1 - (D[z][z] - 1) * x[z, z] -
solver.Sum([(D[z][j] - D[z][j+1]) * x[z, j]
for j in range(z)]) +
self.min_event_rate_diff * (x[i, i] + x[z, z] - 1) <= 0)
# Preprocessing
if self.min_event_rate_diff == 0:
for i in range(n - 1):
if D[i+1][i] - D[i+1][i+1] < 0:
solver.Add(x[i, i] == 0)
for j in range(n - i - 1):
if D[i+1+j][i] - D[i+1+j][i+1+j] < 0:
solver.Add(x[i+j, i+j] == 0)
def add_constraint_monotonic_concave(self, solver, n, D, x):
for i in range(2, n):
for j in range(1, i):
for k in range(j):
solver.Add(
-(solver.Sum([(D[i][z] - D[i][z+1]) * x[i, z]
for z in range(i)]) + D[i][i]*x[i, i]) +
2 * (solver.Sum([(D[j][z] - D[j][z+1]) * x[j, z]
for z in range(j)])
+ D[j][j] * x[j, j]) -
(solver.Sum([(D[k][z] - D[k][z+1]) * x[k, z]
for z in range(k)]) +
D[k][k] * x[k, k]) >= (
x[i, i] + x[j, j] + x[k, k] - 3))
def add_constraint_monotonic_convex(self, solver, n, D, x):
for i in range(2, n):
for j in range(1, i):
for k in range(j):
solver.Add(
(solver.Sum([(D[i][z] - D[i][z+1]) * x[i, z]
for z in range(i)]) + D[i][i] * x[i, i]) -
2 * (solver.Sum([(D[j][z] - D[j][z+1]) * x[j, z]
for z in range(j)]) +
D[j][j] * x[j, j]) +
(solver.Sum([(D[k][z] - D[k][z+1]) * x[k, z]
for z in range(k)]) +
D[k][k] * x[k, k]) >= (
x[i, i] + x[j, j] + x[k, k] - 3))
def add_constraint_monotonic_peak(self, solver, n, D, x, y):
for i in range(1, n):
for z in range(i):
solver.Add(
y[i] + y[z] + 1 + (D[z][z] - 1) * x[z, z] +
solver.Sum([(D[z][j] - D[z][j+1]) * x[z, j]
for j in range(z)]) -
solver.Sum([(D[i][j] - D[i][j + 1]) * x[i, j]
for j in range(i)]) -
D[i][i] * x[i, i] >= 0)
solver.Add(
2 - y[i] - y[z] + 1 + (D[i][i] - 1) * x[i, i] +
solver.Sum([(D[i][j] - D[i][j + 1]) * x[i, j]
for j in range(i)]) -
solver.Sum([(D[z][j] - D[z][j+1]) * x[z, j]
for j in range(z)]) -
D[z][z] * x[z, z] >= 0)
def add_constraint_monotonic_valley(self, solver, n, D, x, y):
for i in range(1, n):
for z in range(i):
solver.Add(
y[i] + y[z] + 1 + (D[i][i] - 1) * x[i, i] +
solver.Sum([(D[i][j] - D[i][j + 1]) * x[i, j]
for j in range(i)]) -
solver.Sum([(D[z][j] - D[z][j+1]) * x[z, j]
for j in range(z)]) -
D[z][z] * x[z, z] >= 0)
solver.Add(
2 - y[i] - y[z] + 1 + (D[z][z] - 1) * x[z, z] +
solver.Sum([(D[z][j] - D[z][j+1]) * x[z, j]
for j in range(z)]) -
solver.Sum([(D[i][j] - D[i][j + 1]) * x[i, j]
for j in range(i)]) -
D[i][i] * x[i, i] >= 0)
def add_constraint_monotonic_peak_heuristic(self, solver, n, D, x, tc):
for i in range(1, tc):
for z in range(i):
solver.Add(
solver.Sum([(D[z][j] - D[z][j+1]) * x[z, j]
for j in range(z)]) +
D[z][z] * x[z, z] - 1 - (D[i][i] - 1) * x[i, i] -
solver.Sum([(D[i][j] - D[i][j + 1]) * x[i, j]
for j in range(i)]) +
self.min_event_rate_diff * (x[i, i] + x[z, z] - 1) <= 0)
# Preprocessing
if self.min_event_rate_diff == 0:
for i in range(tc - 1):
if D[i+1][i] - D[i+1][i+1] > 0:
solver.Add(x[i, i] == 0)
for j in range(tc - i - 1):
if D[i+1+j][i] - D[i+1+j][i+1+j] > 0:
solver.Add(x[i+j, i+j] == 0)
for i in range(tc, n):
for z in range(tc, i):
solver.Add(
solver.Sum([(D[i][j] - D[i][j + 1]) * x[i, j]
for j in range(i)]) + D[i][i] * x[i, i] -
1 - (D[z][z] - 1) * x[z, z] -
solver.Sum([(D[z][j] - D[z][j+1]) * x[z, j]
for j in range(z)]) +
self.min_event_rate_diff * (x[i, i] + x[z, z] - 1) <= 0)
# Preprocessing
if self.min_event_rate_diff == 0:
for i in range(tc, n - 1):
if D[i+1][i] - D[i+1][i+1] < 0:
solver.Add(x[i, i] == 0)
for j in range(tc, n - i - 1):
if D[i+1+j][i] - D[i+1+j][i+1+j] < 0:
solver.Add(x[i+j, i+j] == 0)
def add_constraint_monotonic_valley_heuristic(self, solver, n, D, x, tc):
for i in range(1, tc):
for z in range(i):
solver.Add(
solver.Sum([(D[i][j] - D[i][j + 1]) * x[i, j]
for j in range(i)]) + D[i][i] * x[i, i] -
1 - (D[z][z] - 1) * x[z, z] -
solver.Sum([(D[z][j] - D[z][j+1]) * x[z, j]
for j in range(z)]) +
self.min_event_rate_diff * (x[i, i] + x[z, z] - 1) <= 0)
# Preprocessing
if self.min_event_rate_diff == 0:
for i in range(tc - 1):
if D[i+1][i] - D[i+1][i+1] < 0:
solver.Add(x[i, i] == 0)
for j in range(tc - i - 1):
if D[i+1+j][i] - D[i+1+j][i+1+j] < 0:
solver.Add(x[i+j, i+j] == 0)
for i in range(tc, n):
for z in range(tc, i):
solver.Add(
solver.Sum([(D[z][j] - D[z][j+1]) * x[z, j]
for j in range(z)]) +
D[z][z] * x[z, z] - 1 - (D[i][i] - 1) * x[i, i] -
solver.Sum([(D[i][j] - D[i][j + 1]) * x[i, j]
for j in range(i)]) +
self.min_event_rate_diff * (x[i, i] + x[z, z] - 1) <= 0)
# Preprocessing
if self.min_event_rate_diff == 0:
for i in range(tc, n - 1):
if D[i+1][i] - D[i+1][i+1] > 0:
solver.Add(x[i, i] == 0)
for j in range(tc, n - i - 1):
if D[i+1+j][i] - D[i+1+j][i+1+j] > 0:
solver.Add(x[i+j, i+j] == 0)
def add_max_pvalue_constraint(self, solver, x, pvalue_violation_indices):
for ind1, ind2 in pvalue_violation_indices:
solver.Add(x[ind1[0], ind1[1]] + x[ind2[0], ind2[1]] <= 1)
def add_constraint_fixed_splits(self, solver, n, x):
if self.user_splits_fixed is not None:
for i in range(n - 1):
if self.user_splits_fixed[i]:
solver.Add(x[i, i] == 1)
| 41.596603 | 79 | 0.448857 | 2,809 | 19,592 | 2.974368 | 0.054112 | 0.070377 | 0.014363 | 0.048713 | 0.649791 | 0.559785 | 0.501017 | 0.474686 | 0.443208 | 0.403471 | 0 | 0.017177 | 0.414608 | 19,592 | 470 | 80 | 41.685106 | 0.711309 | 0.038842 | 0 | 0.491667 | 0 | 0 | 0.010586 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.008333 | 0 | 0.066667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f09e777349a3fb2b5e4eefb367e93e2d2b1b6306 | 3,125 | py | Python | aws_log_collector/converters/cloudwatch.py | theletterf/aws-log-collector | ac4201d43fde6b1e4631b279c1e5d11019b8488d | [
"Apache-2.0"
] | 1 | 2021-07-09T15:56:23.000Z | 2021-07-09T15:56:23.000Z | aws_log_collector/converters/cloudwatch.py | theletterf/aws-log-collector | ac4201d43fde6b1e4631b279c1e5d11019b8488d | [
"Apache-2.0"
] | 15 | 2021-06-29T09:40:38.000Z | 2022-03-29T20:23:43.000Z | aws_log_collector/converters/cloudwatch.py | theletterf/aws-log-collector | ac4201d43fde6b1e4631b279c1e5d11019b8488d | [
"Apache-2.0"
] | 2 | 2021-11-17T14:17:19.000Z | 2021-12-20T08:33:17.000Z | # Copyright 2021 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import gzip
import json
from io import BytesIO, BufferedReader
from aws_log_collector.converters.converter import Converter
from aws_log_collector.enrichers.cloudwatch import CloudWatchLogsEnricher
from aws_log_collector.metric import size_of_json
class CloudWatchLogsConverter(Converter):
def __init__(self, logs_enricher: CloudWatchLogsEnricher):
self._logs_enricher = logs_enricher
def supports(self, log_event):
try:
data = log_event["awslogs"]["data"]
return len(data) > 0
except KeyError:
return False
def _convert_to_hec(self, log_event, context, sfx_metrics):
aws_logs_base64 = log_event["awslogs"]["data"]
aws_logs_compressed = base64.b64decode(aws_logs_base64)
aws_logs = self._read_logs(aws_logs_compressed)
metadata = self._logs_enricher.get_metadata(aws_logs, context, sfx_metrics)
sfx_metrics.namespace(metadata["source"])
self._send_input_metrics(sfx_metrics, aws_logs_base64, aws_logs_compressed, aws_logs)
return self._enriched_logs_to_hec(aws_logs, metadata)
@staticmethod
def _read_logs(aws_logs):
with gzip.GzipFile(fileobj=BytesIO(aws_logs)) as decompress_stream:
data = b"".join(BufferedReader(decompress_stream))
return json.loads(data)
@staticmethod
def _enriched_logs_to_hec(logs, metadata):
def _get_fields():
result = dict(metadata)
del result["host"]
del result["source"]
del result["sourcetype"]
return result
fields = _get_fields()
for item in logs["logEvents"]:
timestamp_as_string = str(item['timestamp'])
hec_item = {"event": item["message"],
"fields": fields,
"host": metadata["host"],
"source": metadata["source"],
"sourcetype": metadata["sourcetype"],
"time": timestamp_as_string[0:-3] + "." + timestamp_as_string[-3:],
}
yield hec_item
@staticmethod
def _send_input_metrics(sfx_metrics, aws_logs_base64, aws_logs_compressed, logs):
sfx_metrics.counters(
("sf.org.awsLogCollector.num.inputBase64Bytes", len(aws_logs_base64)),
("sf.org.awsLogCollector.num.inputCompressedBytes", len(aws_logs_compressed)),
("sf.org.awsLogCollector.num.inputUncompressedBytes", size_of_json(logs))
)
| 37.650602 | 94 | 0.66528 | 369 | 3,125 | 5.384824 | 0.384824 | 0.056366 | 0.032713 | 0.028686 | 0.078007 | 0.056366 | 0.056366 | 0.056366 | 0.056366 | 0.056366 | 0 | 0.012712 | 0.2448 | 3,125 | 82 | 95 | 38.109756 | 0.829237 | 0.1744 | 0 | 0.054545 | 0 | 0 | 0.104443 | 0.05417 | 0 | 0 | 0 | 0 | 0 | 1 | 0.127273 | false | 0 | 0.127273 | 0 | 0.363636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0a187e8bd9b56832ad3e091b3ee4c6bde8bf101 | 10,099 | py | Python | source_document.py | thesecretlab/snippet-expander | 70db56a7d45fdec3808ea593f846a7c06841b6b3 | [
"MIT"
] | 2 | 2016-11-17T04:41:07.000Z | 2018-04-30T06:52:29.000Z | source_document.py | thesecretlab/snippet-expander | 70db56a7d45fdec3808ea593f846a7c06841b6b3 | [
"MIT"
] | null | null | null | source_document.py | thesecretlab/snippet-expander | 70db56a7d45fdec3808ea593f846a7c06841b6b3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import re
import itertools
import os
import logging
from fuzzywuzzy import process
SNIP_PREFIX="// snip"
SNIP_FILE_PREFIX="// snip-file"
TAG_PREFIX="// tag"
# The virtual "ref" that represents the current state of the files on disk,
# and may not necessarily be stored in the index or in a commit. Uses a
# space because these are very uncommon in tags or branch names, and not
# seen in commit hashes.
WORKSPACE_REF = "working-copy"
class SourceDocument(object):
"""A document, containing snippets that refer to tagged code."""
def __init__ (self, path):
self.path = path
with open(path, "r") as source_file:
self.contents = source_file.read()
@property
def filename(self):
return os.path.splitext(os.path.basename)[0]
@staticmethod
def find(base_path, extensions):
assert isinstance(base_path, str)
assert isinstance(extensions, list)
documents = []
starting_dir = base_path
for (path, dirs, files) in os.walk(starting_dir):
for filename in files:
for extension in extensions:
if filename.endswith("."+extension):
file_path = path+os.path.sep+filename
if ".git" in file_path:
continue
documents.append(SourceDocument(file_path))
return documents
@property
def cleaned_contents(self):
"""Returns a version of 'text' that has no expanded snippets."""
snip_with_code = re.compile("(//.*snip(\-file)*:?.*\n)(\+\n)?(\[.*\]\n)*----\n(.*\n)*?----\n", flags=re.IGNORECASE)
cleaned = re.sub(snip_with_code, r'\1', self.contents)
return cleaned
@property
def snippets(self):
"""Returns the list of snippets in this document, as a TagQuery."""
queries = []
from tagged_document import TagQuery
# start with a version of ourself that has no expanded snippets
source_lines = self.cleaned_contents.split("\n")
# the list of lines we're working with
output_lines = []
# default to working with files at the current state on disk; this
# can change to specific refs when a // tag: instruction is
# encountered in the document
current_ref = WORKSPACE_REF
tag_regex = re.compile(r"$\/\/\s*tag:?\s*(.*)^", flags=re.IGNORECASE)
snip_regex = re.compile(r"$\/\/\s*tag:?\s*(.*)^", flags=re.IGNORECASE)
for line in source_lines:
output_lines.append(line)
# change which tag we're looking at if we hit an instruction to
# do so
tag = tag_regex.match(line)
if tag:
current_ref = tag.groups(1).strip()
# is this a snippet?
snippet = snip_regex.match(line)
if snippet:
# figure out what tags we're supposed to be using here
query_text = snippet.groups(1)
# build the tag query from this
query = TagQuery(query_text, ref=current_ref)
queries.append(query)
return queries
@property
def tags_used(self):
"""Returns the set of all tags referred to in this document."""
return set([query.all_referenced_tags for query in self.snippets])
def render_snippet(self, query, tagged_documents):
from tagged_document import TagQuery
assert isinstance(query, TagQuery)
# get the list of documents that actually exist at this point
documents_at_current_tag = filter(None, [document[query.ref] for document in tagged_documents])
# get the tagged lines that apply from these documents
rendered_content = [document.query(query.query_string) for document in documents_at_current_tag]
# any document that produced no lines will have returned None;
# remove those
rendered_content = filter(None, rendered_content)
rendered_content = [content.split("\n") for content in rendered_content]
# we now have a list of list of lines; we want to flatten this to a
# plain list of lines
rendered_lines = list(itertools.chain.from_iterable(rendered_content))
rendered_lines = "\n".join(rendered_lines)
# finally, identify and remove any chain of 2 or more empty lines,
# replacing it with a single empty line
empty_lines = re.compile(r"(\s*?\n){2,}")
rendered_lines = re.sub(empty_lines, "\n\n", rendered_lines)
return rendered_lines
def render(self, tagged_documents, language=None, clean=False, show_query=True, file_getter=None, as_inline_list_items=False):
"""Returns a tuple of (string,bool): a version of itself after expanding snippets with code found in 'tagged_documents', and True if any snippets were rendered"""
assert isinstance(tagged_documents, list)
assert isinstance(language, str) or language is None
if clean:
return self.cleaned_contents, True
# start with a version of ourself that has no expanded snippets
source_lines = self.cleaned_contents.split("\n")
# the list of lines we're working with
output_lines = []
# default to working with files at HEAD
current_ref = WORKSPACE_REF
# true if this file rendered any snippets
dirty = False
all_tags_at_current_tag = list({tag for doc in tagged_documents for tag in doc[current_ref].tags})
snippet_count = 0
for line in source_lines:
output_lines.append(line)
# change which tag we're looking at if we hit an instruction to do so
if line.startswith(TAG_PREFIX):
current_ref = line[len(TAG_PREFIX)+1:].strip()
all_valid_docs_at_current_ref = [doc for doc in tagged_documents if doc[current_ref]]
all_tags_at_current_tag = list({tag for doc in all_valid_docs_at_current_ref for tag in doc[current_ref].tags})
# expand file snippets as we encounter them
if line.startswith(SNIP_FILE_PREFIX):
if not file_getter:
logging.warn("snip-file command used, but no file getter was provided")
continue
dirty = True
filename = line[len(SNIP_FILE_PREFIX)+1:].strip()
file_contents = file_getter(filename)
output_lines.append("----")
output_lines.append(file_contents)
output_lines.append("----")
# expand snippets as we encounter them
if line.startswith(SNIP_PREFIX):
dirty = True
# figure out what tags we're supposed to be using here
query_text = line[len(SNIP_PREFIX)+1:]
# get the list of documents that actually exist at this
# point
documents_at_current_tag = filter(None, [document[current_ref] for document in tagged_documents])
# get the tagged lines that apply from these documents
rendered_content = [document.query(query_text) for document in documents_at_current_tag]
# any document that produced no lines will have returned
# None; remove those
rendered_content = filter(None, rendered_content)
rendered_content = [content.split("\n") for content in rendered_content]
# we now have a list of list of lines; we want to flatten
# this to a plain list of lines
rendered_lines = list(itertools.chain.from_iterable(rendered_content))
if show_query:
from tagged_document import TagQuery
query_obj = TagQuery(query_text)
description = "// Snippet: {}-{}\n".format(snippet_count, query_obj.as_filename)
rendered_lines = [description] + rendered_lines
if not rendered_lines:
# if we got no lines, we log a warning and also render
# out that warning in the final output (so that a
# proofreader can spot it)
# try and find some potential tags that could fit
from tagged_document import TagQuery
query = TagQuery(query_text)
bests = [result[0] for result in process.extractBests(query.include[0], all_tags_at_current_tag, score_cutoff=80)]
import textwrap
warning = "No code found for query '{}' at ref '{}'. Possible replacement tags include: {}".format(query_text, current_ref, ", ".join(bests))
warning = textwrap.fill(warning, 80)
logging.warn("%s: %s", self.path, warning)
exclamations = "!" * 8
rendered_lines = [exclamations, warning, exclamations]
# time to produce our output!
if as_inline_list_items:
output_lines.append("+")
# add the language tag if one was specified
if language:
output_lines.append("[source,{}]".format(language))
# and output the snippet
output_lines.append("----")
output_lines += rendered_lines
output_lines.append("----")
snippet_count += 1
# render the output into a string
output = "\n".join(output_lines)
# finally, identify and remove any chain of 2 or more empty lines,
# replacing it with a single empty line
empty_lines = re.compile(r"(\s*?\n){2,}")
output = re.sub(empty_lines, "\n\n", output)
return output, dirty
| 35.435088 | 170 | 0.592138 | 1,233 | 10,099 | 4.707218 | 0.209246 | 0.024638 | 0.026361 | 0.01654 | 0.427636 | 0.391799 | 0.364921 | 0.356306 | 0.356306 | 0.340799 | 0 | 0.002939 | 0.326171 | 10,099 | 284 | 171 | 35.559859 | 0.849963 | 0.247648 | 0 | 0.253731 | 0 | 0 | 0.051057 | 0.013961 | 0 | 0 | 0 | 0 | 0.037313 | 1 | 0.059701 | false | 0 | 0.074627 | 0.007463 | 0.201493 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0a5c3fd2b17d5ab1a135bbe1cf07f52dd327cd8 | 4,173 | py | Python | middleware/legato/templates/legato_gfx_pda_tm4301b/Support_BSP_PIC32MZ_EF_Starter_Kit_MEB2.py | automaate/gfx3.8 | 55bf94302f00c8d513c84d910185cef2ca6b5be2 | [
"0BSD"
] | null | null | null | middleware/legato/templates/legato_gfx_pda_tm4301b/Support_BSP_PIC32MZ_EF_Starter_Kit_MEB2.py | automaate/gfx3.8 | 55bf94302f00c8d513c84d910185cef2ca6b5be2 | [
"0BSD"
] | null | null | null | middleware/legato/templates/legato_gfx_pda_tm4301b/Support_BSP_PIC32MZ_EF_Starter_Kit_MEB2.py | automaate/gfx3.8 | 55bf94302f00c8d513c84d910185cef2ca6b5be2 | [
"0BSD"
] | null | null | null | # coding: utf-8
##############################################################################
# Copyright (C) 2018 Microchip Technology Inc. and its subsidiaries.
#
# Subject to your compliance with these terms, you may use Microchip software
# and any derivatives exclusively with Microchip products. It is your
# responsibility to comply with third party license terms applicable to your
# use of third party software (including open source software) that may
# accompany Microchip software.
#
# THIS SOFTWARE IS SUPPLIED BY MICROCHIP "AS IS". NO WARRANTIES, WHETHER
# EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, INCLUDING ANY IMPLIED
# WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A
# PARTICULAR PURPOSE.
#
# IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE,
# INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND
# WHATSOEVER RELATED TO THE SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS
# BEEN ADVISED OF THE POSSIBILITY OR THE DAMAGES ARE FORESEEABLE. TO THE
# FULLEST EXTENT ALLOWED BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL CLAIMS IN
# ANY WAY RELATED TO THIS SOFTWARE WILL NOT EXCEED THE AMOUNT OF FEES, IF ANY,
# THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR THIS SOFTWARE.
##############################################################################
############ LCC + TOUCH I2C CONFIG ######################################################
bsp_pic32mzef_sk_meb_ActivateList = ["le_gfx_driver_lcc", "i2c_bb", "tmr2", "drv_i2c", "drv_i2c0", "core_timer", "sys_time", "ebi"]
bsp_pic32mzef_sk_meb_AutoConnectList = [["gfx_legato", "gfx_driver", "le_gfx_driver_lcc", "gfx_driver_lcc"],
["le_gfx_driver_lcc", "Graphics Display", "gfx_disp_pdatm4301b_480x272", "gfx_display"],
["drv_i2c_0", "drv_i2c_I2C_dependency", "i2c_bb", "I2C"],
["i2c_bb", "TMR", "tmr2", "TMR2_TMR"],
["gfx_maxtouch_controller", "i2c", "drv_i2c_0", "drv_i2c"],
["sys_time", "sys_time_TMR_dependency", "core_timer", "CORE_TIMER_TMR"],
["le_gfx_driver_lcc", "EBI_CS", "ebi", "ebi_cs0"]]
bsp_pic32mzef_sk_meb_PinConfig = [{"pin": 23, "name": "BSP_MAXTOUCH_CHG", "type": "GPIO", "direction": "In", "latch": "", "abcd": ""}, #RE8
{"pin": 26, "name": "GFX_DISP_INTF_PIN_DE", "type": "GPIO", "direction": "Out", "latch": "Low", "abcd": ""}, #RB4
{"pin": 35, "name": "GFX_DISP_INTF_PIN_HSYNC", "type": "GPIO", "direction": "Out", "latch": "Low", "abcd": ""}, #RB1
{"pin": 39, "name": "GFX_DISP_INTF_PIN_VSYNC", "type": "GPIO", "direction": "Out", "latch": "Low", "abcd": ""}, #RA9
{"pin": 57, "name": "GFX_DISP_INTF_PIN_BACKLIGHT", "type": "GPIO", "direction": "Out", "latch": "Low", "abcd": ""}, #RF13
{"pin": 117, "name": "GFX_DISP_INTF_PIN_RESET", "type": "GPIO", "direction": "Out", "latch": "High", "abcd": ""},
{"pin": 95, "name": "I2C_BB_SCL", "type": "GPIO", "direction": "In", "latch": "", "abcd": ""}, #RA14 - GPIO input for I2C BB SCL
{"pin": 96, "name": "I2C_BB_SDA", "type": "GPIO", "direction": "In", "latch": "", "abcd": ""}] #RA15 - GPIO input for I2C BB SDA
##################################################################################
def bsp_pic32mzef_sk_meb_EventHandler(event):
global pinConfigureFxn
if (event == "configure"):
#Override default pin configur function w/ PIC32M specific one
pinConfigureFxn = configurePinsPIC32M
try:
### Configure I2C BB driver
Database.setSymbolValue("i2c_bb", "I2C_CLOCK_SPEED", 50000, 1)
Database.setSymbolValue("i2c_bb", "I2CBB_SCL_PIN", 10, 1) #RA14
Database.setSymbolValue("i2c_bb", "I2CBB_SDA_PIN", 11, 1) #RA15
except:
return
bsp_pic32mzef_sk_meb_DisplayInterfaceList = ["LCC"]
bsp_pic32mzef_sk_meb_obj = bspSupportObj(bsp_pic32mzef_sk_meb_PinConfig,
bsp_pic32mzef_sk_meb_ActivateList,
None,
bsp_pic32mzef_sk_meb_AutoConnectList,
bsp_pic32mzef_sk_meb_EventHandler)
addDisplayIntfSupport("BSP_PIC32MZ_EF_Starter_Kit", bsp_pic32mzef_sk_meb_DisplayInterfaceList)
addBSPSupport("BSP_PIC32MZ_EF_Starter_Kit", "LCC", bsp_pic32mzef_sk_meb_obj) | 63.227273 | 140 | 0.639588 | 524 | 4,173 | 4.830153 | 0.410305 | 0.056895 | 0.066377 | 0.080601 | 0.33465 | 0.101936 | 0.050573 | 0 | 0 | 0 | 0 | 0.034278 | 0.154086 | 4,173 | 66 | 141 | 63.227273 | 0.68272 | 0.319434 | 0 | 0 | 0 | 0 | 0.391875 | 0.099713 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029412 | false | 0 | 0 | 0 | 0.058824 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0a5fca6a56c6ea38565e62b6423e3ad0179eeac | 4,618 | py | Python | torchdet3d/losses/regression_losses.py | phi-wol/3d-object-detection.pytorch | 9437e289ba878da2dbf03e7e7d4d7ae1eb9da486 | [
"MIT"
] | 6 | 2021-06-10T11:53:24.000Z | 2022-03-31T19:34:59.000Z | torchdet3d/losses/regression_losses.py | phi-wol/3d-object-detection.pytorch | 9437e289ba878da2dbf03e7e7d4d7ae1eb9da486 | [
"MIT"
] | 6 | 2021-03-15T11:01:27.000Z | 2021-09-25T16:58:16.000Z | torchdet3d/losses/regression_losses.py | phi-wol/3d-object-detection.pytorch | 9437e289ba878da2dbf03e7e7d4d7ae1eb9da486 | [
"MIT"
] | 2 | 2021-07-29T08:05:54.000Z | 2022-02-22T16:14:06.000Z | import math
import torch
from torch.nn.modules.loss import _Loss
__all__ = ['DiagLoss', 'ADD_loss', 'WingLoss', 'LossManager']
class DiagLoss(_Loss):
__constants__ = ['reduction']
def __init__(self, size_average=None, reduce=None, reduction: str = 'mean') -> None:
super().__init__(size_average, reduce, reduction)
self.l1_loss = torch.nn.SmoothL1Loss(beta=.4)
def forward(self, input_: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
diag_pr = compute_diag(input_)
diag_tr = compute_diag(target)
diag_diff = self.l1_loss(diag_pr, diag_tr)
return diag_diff
class ADD_loss(_Loss):
def forward(self, input_: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
# find distance between each point of the input and target. Sum it for each
# instance and mean it over all instances
return torch.mean(torch.sum(torch.linalg.norm(input_-target, dim=2), dim=1))
class WingLoss(_Loss):
def __init__(self, size_average=None, reduce=None, w=0.05, eps=2, reduction: str = 'mean') -> None:
super().__init__(size_average, reduce, reduction)
self.w = w
self.eps = eps
def forward(self, input_: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
wing_const = self.w - self.wing_core(self.w, self.w, self.eps)
loss = torch.abs(input_ - target)
loss[loss < self.w] = self.wing_core(loss[loss < self.w], self.w, self.eps)
loss[loss >= self.w] -= wing_const
# diag_dist = compute_diag(target)
# loss /= diag_dist.view(input_.size(0),1,1)
return torch.mean(loss)
@staticmethod
def wing_core(x, w, eps):
"""Calculates the wing function from https://arxiv.org/pdf/1711.06753.pdf"""
if isinstance(x, float):
return w*math.log(1. + x / eps)
return w*torch.log(1. + x / eps)
def compute_diag(input_: torch.Tensor):
x0 = torch.min(input_[:,:,0], dim=1).values
y0 = torch.min(input_[:,:,1], dim=1).values
x1 = torch.max(input_[:,:,0], dim=1).values
y1 = torch.max(input_[:,:,1], dim=1).values
diag = torch.sqrt((x1 - x0)**2 + (y1 - y0)**2)
return diag
class LossManager:
def __init__(self, criterions, coefficients, alwa):
self.reg_criterions, self.class_criterions = criterions
self.reg_coeffs, self.class_coeffs = coefficients
assert len(self.reg_coeffs) == len(self.reg_criterions)
assert len(self.class_coeffs) == len(self.class_criterions)
assert self.reg_criterions
self.use_alwa = alwa.use
if alwa.use:
assert self.class_criterions
assert self.reg_coeffs[0] == self.class_coeffs[0] == 1.
# init lambdas for alwa algorithm
self.lam_cls = alwa.lam_cls
self.lam_reg = alwa.lam_reg
self.s_cls = list()
self.s_reg = list()
self.C = alwa.C
self.alwa_version = 'ver_1' if alwa.compute_std else 'ver_2'
def parse_losses(self, pred_kp, gt_kp,
pred_cats, gt_cats, iter_):
class_loss = []
regress_loss = []
# compute losses
if self.class_criterions:
for k, cr in zip(self.class_coeffs, self.class_criterions):
class_loss.append(cr(pred_cats, gt_cats) * k)
else:
class_loss = torch.zeros(1, requires_grad=True)
for k, cr in zip(self.reg_coeffs, self.reg_criterions):
regress_loss.append(cr(pred_kp, gt_kp) * k)
reg_loss = sum(regress_loss)
cls_loss = sum(class_loss)
# compute alwa algo or just return sum of losses
if not self.use_alwa:
return sum(regress_loss) + sum(class_loss)
self.s_cls.append(self.lam_cls*cls_loss)
self.s_reg.append(self.lam_reg*reg_loss)
if iter_ % self.C == 0 and iter_ != 0:
cls_mean = torch.mean(torch.stack(self.s_cls))
cls_std = torch.std(torch.stack(self.s_cls))
reg_mean = torch.mean(torch.stack(self.s_reg))
reg_std = torch.std(torch.stack(self.s_reg))
self.s_cls.clear()
self.s_reg.clear()
if self.alwa_version == 'ver_1':
cls = cls_mean + cls_std
reg = reg_mean + reg_std
else:
cls = cls_mean
reg = reg_mean
if cls > reg:
self.lam_cls = (1 - (cls - reg)/cls).item()
print(f"classification coefficient changed : {self.lam_cls}")
return self.lam_reg * sum(regress_loss) + self.lam_cls * sum(class_loss)
| 39.810345 | 103 | 0.61217 | 652 | 4,618 | 4.102761 | 0.211656 | 0.041122 | 0.020187 | 0.02243 | 0.288598 | 0.22729 | 0.19215 | 0.136075 | 0.109159 | 0.109159 | 0 | 0.01533 | 0.265483 | 4,618 | 115 | 104 | 40.156522 | 0.77329 | 0.076873 | 0 | 0.076923 | 0 | 0 | 0.027758 | 0 | 0 | 0 | 0 | 0 | 0.054945 | 1 | 0.098901 | false | 0 | 0.032967 | 0.010989 | 0.274725 | 0.010989 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0aa15f7cbb7355fda96e634f5fd7046680597d8 | 474 | py | Python | example/model/tests/__init__.py | donghak-shin/dp-tornado | 095bb293661af35cce5f917d8a2228d273489496 | [
"MIT"
] | 18 | 2015-04-07T14:28:39.000Z | 2020-02-08T14:03:38.000Z | example/model/tests/__init__.py | donghak-shin/dp-tornado | 095bb293661af35cce5f917d8a2228d273489496 | [
"MIT"
] | 7 | 2016-10-05T05:14:06.000Z | 2021-05-20T02:07:22.000Z | example/model/tests/__init__.py | donghak-shin/dp-tornado | 095bb293661af35cce5f917d8a2228d273489496 | [
"MIT"
] | 11 | 2015-12-15T09:49:39.000Z | 2021-09-06T18:38:21.000Z | # -*- coding: utf-8 -*-
from dp_tornado.engine.model import Model as dpModel
class TestsModel(dpModel):
def assert_tuple(self, a, b, comp=True):
if comp is True:
if list(a) != list(b):
print('A > %s' % list(a))
print('B > %s' % list(b))
assert(list(a) == list(b))
elif comp is False:
assert(list(a) == list(b))
else:
assert(list(a) == comp and list(b) == comp)
| 24.947368 | 55 | 0.489451 | 65 | 474 | 3.538462 | 0.461538 | 0.108696 | 0.117391 | 0.130435 | 0.13913 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003257 | 0.352321 | 474 | 18 | 56 | 26.333333 | 0.745928 | 0.044304 | 0 | 0.166667 | 0 | 0 | 0.026608 | 0 | 0 | 0 | 0 | 0 | 0.333333 | 1 | 0.083333 | false | 0 | 0.083333 | 0 | 0.25 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0ab19e1a358fb99ef04574c61702c57419daa1d | 18,964 | py | Python | src/clarinet/train_student.py | roberthoenig/VQ-VAE-Speech | 3c537c17465bf59855f0b81d9265354f65016563 | [
"MIT"
] | 241 | 2019-03-27T09:08:14.000Z | 2022-03-12T07:19:01.000Z | src/clarinet/train_student.py | roberthoenig/VQ-VAE-Speech | 3c537c17465bf59855f0b81d9265354f65016563 | [
"MIT"
] | 5 | 2019-06-29T14:22:31.000Z | 2019-11-17T21:24:45.000Z | src/clarinet/train_student.py | roberthoenig/VQ-VAE-Speech | 3c537c17465bf59855f0b81d9265354f65016563 | [
"MIT"
] | 49 | 2019-05-27T07:43:27.000Z | 2022-03-21T16:37:05.000Z | #####################################################################################
# MIT License #
# #
# Copyright (C) 2018 Sungwon Kim #
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy #
# of this software and associated documentation files (the "Software"), to deal #
# in the Software without restriction, including without limitation the rights #
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #
# copies of the Software, and to permit persons to whom the Software is #
# furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in all #
# copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #
# SOFTWARE. #
#####################################################################################
from clarinet.data import LJspeechDataset, collate_fn, collate_fn_synthesize
from clarinet.modules import ExponentialMovingAverage, KL_Loss, STFT
from clarinet.wavenet import Wavenet
from clarinet.wavenet_iaf import Wavenet_Student
import torch
from torch import optim
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.distributions.normal import Normal
import numpy as np
import librosa
import os
import argparse
import json
import time
def build_model():
model_t = Wavenet(out_channels=2,
num_blocks=args.num_blocks_t,
num_layers=args.num_layers_t,
residual_channels=args.residual_channels,
gate_channels=args.gate_channels,
skip_channels=args.skip_channels,
kernel_size=args.kernel_size,
cin_channels=args.cin_channels,
upsample_scales=[16, 16])
return model_t
def build_student():
model_s = Wavenet_Student(num_blocks_student=[1, 1, 1, 4],
num_layers=args.num_layers_s)
return model_s
def clone_as_averaged_model(model_s, ema):
assert ema is not None
averaged_model = build_student()
averaged_model.to(device)
averaged_model.load_state_dict(model_s.state_dict())
for name, param in averaged_model.named_parameters():
if name in ema.shadow:
param = ema.shadow[name].clone()
return averaged_model
def train(epoch, model_t, model_s, optimizer, ema):
global global_step
epoch_loss = 0.0
running_loss = [0.0, 0.0, 0.0, 0.0]
model_t.eval()
model_s.train()
start_time = time.time()
display_step = 100
for batch_idx, (x, y, c, _) in enumerate(train_loader):
global_step += 1
if global_step == 200000:
for param_group in optimizer.param_groups:
param_group['learning_rate'] *= 0.5
state['learning_rate'] = param_group['learning_rate']
if global_step == 400000:
for param_group in optimizer.param_groups:
param_group['learning_rate'] *= 0.5
state['learning_rate'] = param_group['learning_rate']
if global_step == 600000:
for param_group in optimizer.param_groups:
param_group['learning_rate'] *= 0.5
state['learning_rate'] = param_group['learning_rate']
x, y, c = x.to(device), y.to(device), c.to(device)
q_0 = Normal(x.new_zeros(x.size()), x.new_ones(x.size()))
z = q_0.sample()
optimizer.zero_grad()
c_up = model_t.upsample(c)
x_student, mu_s, logs_s = model_s(z, c_up) # q_T ~ N(mu_tot, logs_tot.exp_())
mu_logs_t = model_t(x_student, c)
if args.KL_type == 'pq':
loss_t, loss_KL, loss_reg = criterion_t(mu_logs_t[:, 0:1, :-1], mu_logs_t[:, 1:, :-1], mu_s, logs_s)
elif args.KL_type == 'qp':
loss_t, loss_KL, loss_reg = criterion_t(mu_s, logs_s, mu_logs_t[:, 0:1, :-1], mu_logs_t[:, 1:, :-1])
stft_student, _ = stft(x_student[:, :, 1:])
stft_truth, _ = stft(x[:, :, 1:])
loss_frame = criterion_frame(stft_student, stft_truth)
loss_tot = loss_t + loss_frame
loss_tot.backward()
nn.utils.clip_grad_norm_(model_s.parameters(), 10)
optimizer.step()
if ema is not None:
for name, param in model_s.named_parameters():
if name in ema.shadow:
ema.update(name, param.data)
running_loss[0] += loss_tot.item() / display_step
running_loss[1] += loss_KL.item() / display_step
running_loss[2] += loss_reg.item() / display_step
running_loss[3] += loss_frame.item() / display_step
epoch_loss += loss_tot.item()
if (batch_idx + 1) % display_step == 0:
end_time = time.time()
print('Global Step : {}, [{}, {}] [Total Loss, KL Loss, Reg Loss, Frame Loss] : {}'
.format(global_step, epoch, batch_idx + 1, np.array(running_loss)))
print('{} Step Time : {}'.format(display_step, end_time - start_time))
start_time = time.time()
running_loss = [0.0, 0.0, 0.0, 0.0]
del loss_tot, loss_frame, loss_KL, loss_reg, loss_t, x, y, c, c_up, stft_student, stft_truth, q_0, z
del x_student, mu_s, logs_s, mu_logs_t
print('{} Epoch Training Loss : {:.4f}'.format(epoch, epoch_loss / (len(train_loader))))
return epoch_loss / len(train_loader)
def evaluate(model_t, model_s, ema=None):
if ema is not None:
model_s_ema = clone_as_averaged_model(model_s, ema)
model_t.eval()
model_s_ema.eval()
running_loss = [0., 0., 0., 0.]
epoch_loss = 0.
display_step = 100
for batch_idx, (x, y, c, _) in enumerate(test_loader):
x, y, c = x.to(device), y.to(device), c.to(device)
q_0 = Normal(x.new_zeros(x.size()), x.new_ones(x.size()))
z = q_0.sample()
c_up = model_t.upsample(c)
x_student, mu_s, logs_s = model_s_ema(z, c_up)
mu_logs_t = model_t(x_student, c)
if args.KL_type == 'pq':
loss_t, loss_KL, loss_reg = criterion_t(mu_logs_t[:, 0:1, :-1], mu_logs_t[:, 1:, :-1], mu_s, logs_s)
elif args.KL_type == 'qp':
loss_t, loss_KL, loss_reg = criterion_t(mu_s, logs_s, mu_logs_t[:, 0:1, :-1], mu_logs_t[:, 1:, :-1])
stft_student, _ = stft(x_student[:, :, 1:])
stft_truth, _ = stft(x[:, :, 1:])
loss_frame = criterion_frame(stft_student, stft_truth.detach())
loss_tot = loss_t + loss_frame
running_loss[0] += loss_tot.item() / display_step
running_loss[1] += loss_KL.item() / display_step
running_loss[2] += loss_reg.item() / display_step
running_loss[3] += loss_frame.item() / display_step
epoch_loss += loss_tot.item()
if (batch_idx + 1) % display_step == 0:
print('{} [Total, KL, Reg, Frame Loss] : {}'.format(batch_idx + 1, np.array(running_loss)))
running_loss = [0., 0., 0., 0.]
del loss_tot, loss_frame, loss_KL, loss_reg, loss_t, x, y, c, c_up, stft_student, stft_truth, q_0, z
del x_student, mu_s, logs_s, mu_logs_t
epoch_loss /= len(test_loader)
print('Evaluation Loss : {:.4f}'.format(epoch_loss))
del model_s_ema
return epoch_loss
def synthesize(model_t, model_s, ema=None):
global global_step
if ema is not None:
model_s_ema = clone_as_averaged_model(model_s, ema)
model_s_ema.eval()
for batch_idx, (x, y, c, _) in enumerate(synth_loader):
if batch_idx == 0:
x, c = x.to(device), c.to(device)
q_0 = Normal(x.new_zeros(x.size()), x.new_ones(x.size()))
z = q_0.sample()
wav_truth_name = '{}/{}/{}/generate_{}_{}_truth.wav'.format(args.sample_path, args.teacher_name,
args.model_name, global_step, batch_idx)
librosa.output.write_wav(wav_truth_name, y.squeeze().numpy(), sr=22050)
print('{} Saved!'.format(wav_truth_name))
torch.cuda.synchronize()
start_time = time.time()
c_up = model_t.upsample(c)
with torch.no_grad():
y_gen = model_s_ema.generate(z, c_up).squeeze()
torch.cuda.synchronize()
print('{} seconds'.format(time.time() - start_time))
wav = y_gen.to(torch.device("cpu")).data.numpy()
wav_name = '{}/{}/{}/generate_{}_{}.wav'.format(args.sample_path, args.teacher_name,
args.model_name, global_step, batch_idx)
librosa.output.write_wav(wav_name, wav, sr=22050)
print('{} Saved!'.format(wav_name))
del y_gen, wav, x, y, c, c_up, z, q_0
del model_s_ema
def save_checkpoint(model, optimizer, global_step, global_epoch, ema=None):
checkpoint_path = os.path.join(args.save, args.teacher_name, args.model_name, "checkpoint_step{:09d}.pth".format(global_step))
optimizer_state = optimizer.state_dict()
torch.save({"state_dict": model.state_dict(),
"optimizer": optimizer_state,
"global_step": global_step,
"global_epoch": global_epoch}, checkpoint_path)
if ema is not None:
averaged_model = clone_as_averaged_model(model, ema)
checkpoint_path = os.path.join(args.save, args.teacher_name, args.model_name, "checkpoint_step{:09d}_ema.pth".format(global_step))
torch.save({"state_dict": averaged_model.state_dict(),
"optimizer": optimizer_state,
"global_step": global_step,
"global_epoch": global_epoch}, checkpoint_path)
def load_checkpoint(step, model_s, optimizer, ema=None):
global global_step
global global_epoch
checkpoint_path = os.path.join(args.save, args.teacher_name, args.model_name, "checkpoint_step{:09d}.pth".format(step))
print("Load checkpoint from: {}".format(checkpoint_path))
checkpoint = torch.load(checkpoint_path)
model_s.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
global_step = checkpoint["global_step"]
global_epoch = checkpoint["global_epoch"]
if ema is not None:
checkpoint_path = os.path.join(args.save, args.teacher_name, args.model_name, "checkpoint_step{:09d}_ema.pth".format(step))
checkpoint = torch.load(checkpoint_path)
averaged_model = build_student()
averaged_model.to(device)
averaged_model.load_state_dict(checkpoint["state_dict"])
for name, param in averaged_model.named_parameters():
if param.requires_grad:
ema.register(name, param.data)
return model_s, optimizer, ema
def load_teacher_checkpoint(path, model_t):
print("Load checkpoint from: {}".format(path))
checkpoint = torch.load(path, map_location=lambda storage, loc: storage)
model_t.load_state_dict(checkpoint["state_dict"])
return model_t
if __name__ == "__main__":
torch.backends.cudnn.benchmark = True
np.set_printoptions(precision=4)
parser = argparse.ArgumentParser(description='Train WaveNet of LJSpeech',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--data_path', type=str, default='../DATASETS/ljspeech/', help='Dataset Path')
parser.add_argument('--sample_path', type=str, default='../samples', help='Sample Path')
parser.add_argument('--save', '-s', type=str, default='../params', help='Folder to save checkpoints.')
parser.add_argument('--load', '-l', type=str, default='../params', help='Checkpoint path to resume / test.')
parser.add_argument('--loss', type=str, default='../loss', help='Folder to save loss')
parser.add_argument('--log', type=str, default='../log', help='Log folder.')
parser.add_argument('--teacher_name', type=str, default='wavenet_gaussian_01', help='Model Name')
parser.add_argument('--model_name', type=str, default='wavenet_student_gaussian_01', help='Model Name')
parser.add_argument('--teacher_load_step', type=int, default=0, help='Teacher Load Step')
parser.add_argument('--load_step', type=int, default=0, help='Student Load Step')
parser.add_argument('--KL_type', type=str, default='qp', help='KL_pq vs KL_qp')
parser.add_argument('--epochs', '-e', type=int, default=1000, help='Number of epochs to train.')
parser.add_argument('--batch_size', '-b', type=int, default=4, help='Batch size.')
parser.add_argument('--learning_rate', '-lr', type=float, default=1e-3, help='The Learning Rate.')
parser.add_argument('--ema_decay', type=float, default=0.9999, help='Exponential Moving Average Decay')
parser.add_argument('--num_blocks_t', type=int, default=4, help='Number of blocks (Teacher)')
parser.add_argument('--num_layers_t', type=int, default=6, help='Number of layers (Teacher)')
parser.add_argument('--num_layers_s', type=int, default=6, help='Number of layers (Student)')
parser.add_argument('--residual_channels', type=int, default=128, help='Residual Channels')
parser.add_argument('--gate_channels', type=int, default=256, help='Gate Channels')
parser.add_argument('--skip_channels', type=int, default=128, help='Skip Channels')
parser.add_argument('--kernel_size', type=int, default=3, help='Kernel Size')
parser.add_argument('--cin_channels', type=int, default=80, help='Cin Channels')
parser.add_argument('--num_workers', type=int, default=3, help='Number of workers')
args = parser.parse_args()
# Init logger
if not os.path.isdir(args.log):
os.makedirs(args.log)
# Checkpoint dir
if not os.path.isdir(args.save):
os.makedirs(args.save)
if not os.path.isdir(args.loss):
os.makedirs(args.loss)
if not os.path.isdir(os.path.join(args.save, args.teacher_name)):
os.makedirs(os.path.join(args.save, args.teacher_name))
if not os.path.isdir(os.path.join(args.save, args.teacher_name, args.model_name)):
os.makedirs(os.path.join(args.save, args.teacher_name, args.model_name))
if not os.path.isdir(os.path.join(args.sample_path, args.teacher_name)):
os.makedirs(os.path.join(args.sample_path, args.teacher_name))
if not os.path.isdir(os.path.join(args.sample_path, args.teacher_name, args.model_name)):
os.makedirs(os.path.join(args.sample_path, args.teacher_name, args.model_name))
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# LOAD DATASETS
train_dataset = LJspeechDataset(args.data_path, True, 0.1)
test_dataset = LJspeechDataset(args.data_path, False, 0.1)
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, collate_fn=collate_fn,
num_workers=args.num_workers, pin_memory=True)
test_loader = DataLoader(test_dataset, batch_size=args.batch_size, collate_fn=collate_fn,
num_workers=args.num_workers)
synth_loader = DataLoader(test_dataset, batch_size=1, collate_fn=collate_fn_synthesize,
num_workers=args.num_workers, pin_memory=True)
teacher_step = args.teacher_load_step
path = os.path.join(args.load, args.teacher_name, "checkpoint_step{:09d}_ema.pth".format(teacher_step))
model_t = build_model()
model_t = load_teacher_checkpoint(path, model_t)
model_s = build_student()
stft = STFT(filter_length=1024, hop_length=256)
model_t.to(device)
model_s.to(device)
stft.to(device)
optimizer = optim.Adam(model_s.parameters(), lr=args.learning_rate)
criterion_t = KL_Loss()
criterion_frame = nn.MSELoss()
ema = ExponentialMovingAverage(args.ema_decay)
for name, param in model_s.named_parameters():
if param.requires_grad:
ema.register(name, param.data)
for name, param in model_t.named_parameters():
if param.requires_grad:
param.requires_grad = False
global_step, global_epoch = 0, 0
load_step = args.load_step
log = open(os.path.join(args.log, '{}.txt'.format(args.model_name)), 'w')
state = {k: v for k, v in args._get_kwargs()}
if load_step == 0:
list_train_loss, list_loss = [], []
log.write(json.dumps(state) + '\n')
test_loss = 100.0
else:
model_s, optimizer, ema = load_checkpoint(load_step, model_s, optimizer, ema)
list_train_loss = np.load('{}/{}_train.npy'.format(args.loss, args.model_name)).tolist()
list_loss = np.load('{}/{}.npy'.format(args.loss, args.model_name)).tolist()
list_train_loss = list_train_loss[:global_epoch]
list_loss = list_loss[:global_epoch]
test_loss = np.min(list_loss)
for epoch in range(global_epoch + 1, args.epochs + 1):
training_epoch_loss = train(epoch, model_t, model_s, optimizer, ema)
with torch.no_grad():
test_epoch_loss = evaluate(model_t, model_s, ema)
state['training_loss'] = training_epoch_loss
state['eval_loss'] = test_epoch_loss
state['epoch'] = epoch
list_train_loss.append(training_epoch_loss)
list_loss.append(test_epoch_loss)
if test_loss > test_epoch_loss:
test_loss = test_epoch_loss
save_checkpoint(model_s, optimizer, global_step, epoch, ema)
print('Epoch {} Model Saved! Loss : {:.4f}'.format(epoch, test_loss))
synthesize(model_t, model_s, ema)
np.save('{}/{}_train.npy'.format(args.loss, args.model_name), list_train_loss)
np.save('{}/{}.npy'.format(args.loss, args.model_name), list_loss)
log.write('%s\n' % json.dumps(state))
log.flush()
print(state)
log.close()
| 46.940594 | 138 | 0.620755 | 2,564 | 18,964 | 4.333853 | 0.134165 | 0.018359 | 0.036717 | 0.017639 | 0.518089 | 0.445734 | 0.37653 | 0.358531 | 0.325504 | 0.305616 | 0 | 0.014016 | 0.247574 | 18,964 | 403 | 139 | 47.057072 | 0.764735 | 0.09576 | 0 | 0.353698 | 0 | 0.003215 | 0.102298 | 0.014471 | 0 | 0 | 0 | 0 | 0.003215 | 1 | 0.028939 | false | 0 | 0.048232 | 0 | 0.099678 | 0.041801 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0ae863ab9a27628499b3a102924f3d1a73f6ab0 | 5,647 | py | Python | _GTW/_OMP/_SWP/Picture.py | Tapyr/tapyr | 4235fba6dce169fe747cce4d17d88dcf4a3f9f1d | [
"BSD-3-Clause"
] | 6 | 2016-12-10T17:51:10.000Z | 2021-10-11T07:51:48.000Z | _GTW/_OMP/_SWP/Picture.py | Tapyr/tapyr | 4235fba6dce169fe747cce4d17d88dcf4a3f9f1d | [
"BSD-3-Clause"
] | null | null | null | _GTW/_OMP/_SWP/Picture.py | Tapyr/tapyr | 4235fba6dce169fe747cce4d17d88dcf4a3f9f1d | [
"BSD-3-Clause"
] | 3 | 2020-03-29T07:37:03.000Z | 2021-01-21T16:08:40.000Z | # -*- coding: utf-8 -*-
# Copyright (C) 2010-2015 Mag. Christian Tanzer All rights reserved
# Glasauergasse 32, A--1130 Wien, Austria. tanzer@swing.co.at
# ****************************************************************************
# This module is part of the package GTW.OMP.SWP.
#
# This module is licensed under the terms of the BSD 3-Clause License
# <http://www.c-tanzer.at/license/bsd_3c.html>.
# ****************************************************************************
#
#++
# Name
# GTW.OMP.SWP.Picture
#
# Purpose
# Model a picture that can be displayed on a web page
#
# Revision Dates
# 22-Mar-2010 (CT) Creation
# 13-Oct-2010 (CT) `example` added
# 5-Sep-2011 (CT) `width.max_value` increased from 1000 to 1200
# 22-Sep-2011 (CT) s/C_Type/P_Type/ for _A_Composite_ attributes
# 18-Nov-2011 (CT) Import `unicode_literals` from `__future__`
# 30-Jan-2013 (MG) Make `extension` changeable, change min values for
# width and height
# 31-Jan-2013 (MG) change kind of `extension` to `Optional`
# 15-May-2013 (CT) Replace `auto_cache` by `rev_ref_attr_name`
# 22-May-2013 (CT) Change `max_value` of `height` and `width` to 1280
# 30-Oct-2013 (CT) Remove unnecessary `Picture.left.rev_ref_attr_name`
# 25-Nov-2015 (CT) Change `_Pic_.path` from `A_String` to `A_Text`
# * don't want a restrictive `max_length`
# ««revision-date»»···
#--
from _MOM.import_MOM import *
from _GTW import GTW
import _GTW._OMP._SWP.Gallery
from _MOM._Attr.A_2D import A_2D_Int, D2_Value_Int
from _TFL import sos
from _TFL.I18N import _, _T, _Tn
_Ancestor_Essence = D2_Value_Int
class _Pic_ (_Ancestor_Essence) :
"""Model a picture"""
class _Attributes (_Ancestor_Essence._Attributes) :
_Ancestor = _Ancestor_Essence._Attributes
class dir (A_String) :
"""Directory in gallery holding pictures."""
kind = Attr.Const
default = "im"
# end class dir
class extension (A_String) :
"""Extension of file holding picture."""
kind = Attr.Optional
Kind_Mixins = (Attr.Init_Only_Mixin, )
max_length = 10
default = ".jpg"
# end class extension
class height (_Ancestor.y) :
"""Height of picture."""
max_value = 1280
min_value = 200
# end class height
class path (A_Text) :
"""Path of file holding picture."""
kind = Attr.Computed
def computed (self, obj) :
owner = obj.owner
if owner :
p = sos.path.join \
(owner.gallery.directory, obj.dir, owner.name)
return p + obj.extension
# end def computed
# end class path
class width (_Ancestor.x) :
"""Width of picture."""
max_value = 1280
min_value = 200
# end class width
# end class _Attributes
# end class _Pic_
_Ancestor_Essence = _Pic_
class _Thumb_ (_Ancestor_Essence) :
"""Model a thumbnail of a picture."""
class _Attributes (_Ancestor_Essence._Attributes) :
_Ancestor = _Ancestor_Essence._Attributes
class dir (_Ancestor.dir) :
"""Directory in gallery holding thumbnails."""
default = "th"
example = "th"
# end class dir
class height (_Ancestor.height) :
max_value = 200
min_value = 50
# end class height
class width (_Ancestor.width) :
max_value = 200
min_value = 50
# end class width
# end class _Attributes
# end class _Thumb_
_Ancestor_Essence = GTW.OMP.SWP.Link1
class Picture (_Ancestor_Essence) :
"""Model a picture that can be displayed on a web page."""
class _Attributes (_Ancestor_Essence._Attributes) :
_Ancestor = _Ancestor_Essence._Attributes
### Primary attributes
class left (_Ancestor.left) :
"""Gallery to which this picture belongs."""
role_type = GTW.OMP.SWP.Gallery
# end class left
class number (A_Int) :
"""Number of picture in gallery."""
kind = Attr.Primary
check = ("value >= 0", )
# end class number
### Non-primary attributes
class name (A_String) :
kind = Attr.Optional
Kind_Mixins = (Attr.Computed_Set_Mixin, )
max_length = 100
def computed (self, obj) :
if obj.number is not None :
return "%4.4d" % obj.number
# end def computed
# end class name
class photo (A_2D_Int) :
"""Picture."""
kind = Attr.Necessary
P_Type = _Pic_
typ = "Picture"
# end class photo
class thumb (A_2D_Int) :
"""Thumbnail"""
kind = Attr.Necessary
P_Type = _Thumb_
typ = "Thumbnail"
# end class thumb
# end class _Attributes
# end class Picture
if __name__ != "__main__" :
GTW.OMP.SWP._Export ("*")
### __END__ GTW.OMP.SWP.Picture
| 27.149038 | 78 | 0.518682 | 615 | 5,647 | 4.538211 | 0.312195 | 0.054461 | 0.022573 | 0.022573 | 0.283053 | 0.229667 | 0.190971 | 0.190971 | 0.147976 | 0.12182 | 0 | 0.037731 | 0.366389 | 5,647 | 207 | 79 | 27.280193 | 0.740358 | 0.383212 | 0 | 0.28169 | 0 | 0 | 0.014903 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028169 | false | 0 | 0.084507 | 0 | 0.408451 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0b371551aa30a140c9ee0200648fe4590ea0606 | 2,432 | py | Python | util/common/readParams.py | xinglun/TestFramework | c262599cd563d1aed4ddf79a1860748fb498fdd4 | [
"MIT"
] | null | null | null | util/common/readParams.py | xinglun/TestFramework | c262599cd563d1aed4ddf79a1860748fb498fdd4 | [
"MIT"
] | null | null | null | util/common/readParams.py | xinglun/TestFramework | c262599cd563d1aed4ddf79a1860748fb498fdd4 | [
"MIT"
] | null | null | null | import re
from util.yaml.yaml_util import YamlUtil
from util.randomData import choiceData,getTime,randomInt,randomFloat,randomString
def read_param(value):
# re
int_list = re.findall('\\$randomInt\\(([0-9]*,[0-9]*?)\\)\\$', value)
string_list = re.findall('\\$randomString\\(([0-9]*?)\\)\\$', value)
float_list = re.findall("\\$randomFloat\\(([0-9]*,[0-9]*,[0-9]*)\\)\\$", value)
time_list = re.findall("\\$getTime\\(time_type=(.*?),layout=(.*?),unit=([0-9],[0-9],[0-9],[0-9],[0-9])\\)\\$", value)
choice_list = re.findall("\\$choiceData\\(((?!\\$Choice\\().*?)\\)\\$", value)
config_list = re.findall("\\$getConfigData\\((.*?)\\)\\$", value)
# init var
if len(int_list):
for i in int_list:
pattern = re.compile('\\$randomInt\\(' + i + '\\)\\$')
k = str(randomInt.random_int(i))
value = re.sub(pattern, k, value, count=1)
value = read_param(value)
elif len(string_list):
# 获取字符串替换
for j in string_list:
pattern = re.compile('\\$RandomString\\(' + j + '\\)\\$')
k = randomString.random_string(j)
value = re.sub(pattern, k, value, count=1)
value = read_param(value)
elif len(float_list):
# 获取浮点数
for n in float_list:
if len(n.split(",")) == 3:
pattern = re.compile('\\$RandomFloat\\(' + n + '\\)\\$')
k = str(randomFloat.random_float(n))
value = re.sub(pattern, k, value, count=1)
value = read_param(value)
elif len(time_list):
# 获取时间替换
for n in time_list:
if len(n[0]) and len(n[1]):
pattern = re.compile('\\$GetTime\\(time_type='+n[0]+',layout='+n[1]+',unit='+n[2]+'\\)\\$')
k = str(getTime.get_time(n[0], n[1], n[2]))
value = re.sub(pattern, k, value, count=1)
value = read_param(value)
elif len(choice_list):
# 调用choice方法
for n in choice_list:
pattern = re.compile('\\$choiceData\\(' + n + '\\)list\\$')
k = str(choiceData.choice_data(n))
value = re.sub(pattern, k, value, count=1)
value = read_param(value)
else:
for n in config_list:
pattern = re.compile('\\$getConfigData\\(' + n + '\\)\\$')
k = YamlUtil().read_config_yaml_item(n)
print(k)
value = k
return value
| 39.868852 | 121 | 0.515625 | 303 | 2,432 | 4.026403 | 0.20462 | 0.018033 | 0.017213 | 0.022951 | 0.25 | 0.242623 | 0.242623 | 0.229508 | 0.221311 | 0.221311 | 0 | 0.020362 | 0.273026 | 2,432 | 60 | 122 | 40.533333 | 0.669683 | 0.017681 | 0 | 0.204082 | 0 | 0.020408 | 0.182773 | 0.12395 | 0 | 0 | 0 | 0 | 0 | 1 | 0.020408 | false | 0 | 0.061224 | 0 | 0.102041 | 0.020408 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0b5158379c142f1483c71cf1930f5a16ca17ba6 | 9,367 | py | Python | rl/augmentations/augmentations.py | Luca96/carla-driving-rl-agent | 00ae9ec6dc61f82ecd19e96b6c1a5e1903911e62 | [
"MIT"
] | 26 | 2021-01-27T21:42:17.000Z | 2022-03-31T08:46:30.000Z | rl/augmentations/augmentations.py | Martje55555/carla-rl-agent | 0d38cc3080cab900f4eaa3cd4735918c5868103a | [
"MIT"
] | 9 | 2021-05-21T14:50:57.000Z | 2022-03-25T17:50:03.000Z | rl/augmentations/augmentations.py | Martje55555/carla-rl-agent | 0d38cc3080cab900f4eaa3cd4735918c5868103a | [
"MIT"
] | 10 | 2021-03-23T14:10:14.000Z | 2022-03-24T17:49:12.000Z | """Data augmentations based on tf's functions"""
import tensorflow as tf
from typing import Union, List, Tuple
from rl import utils
Size = Union[List[int], Tuple[int, ...], tf.TensorShape]
# -------------------------------------------------------------------------------------------------
# -- Geometric/Spatial Augmentations
# -------------------------------------------------------------------------------------------------
def tf_resize(image, size: Size):
return tf.image.resize(image, size)
def tf_crop(image, size: Size, resize=False, seed=None):
cropped = tf.image.random_crop(image, size, seed=seed)
if resize:
return tf_resize(cropped, size=image.shape[:2])
return cropped
def tf_flip(image, horizontal=True, vertical=False, seed=None):
if horizontal:
image = tf.image.random_flip_left_right(image, seed=seed)
if vertical:
image = tf.image.random_flip_up_down(image, seed=seed)
return image
def tf_quality(image, min_quality: int, max_quality: int, seed=None):
return tf.image.random_jpeg_quality(image, min_jpeg_quality=min_quality, max_jpeg_quality=max_quality, seed=seed)
@tf.function
def tf_cutout(image, size=5, seed=None):
cut_mask = tf.random.normal(shape=(size, size), seed=seed)
cut_mask = tf.where(condition=cut_mask == tf.reduce_max(cut_mask), x=0.0, y=1.0)
cut_mask = tf.stack((cut_mask,) * 3, axis=-1)
cut_mask = tf.image.resize([cut_mask], size=image.shape[:2],
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)[0]
return image * cut_mask
@tf.function
def tf_cutout_batch(images, size=5, seed=None):
masks = []
for _ in range(images.shape[0]):
cut_mask = tf.random.normal(shape=(size, size), seed=seed)
cut_mask = tf.where(condition=cut_mask == tf.reduce_max(cut_mask), x=0.0, y=1.0)
cut_mask = tf.stack((cut_mask,) * 3, axis=-1)
masks.append(cut_mask)
masks = tf.stack(masks, axis=0)
masks = tf.image.resize(masks, size=images.shape[1:3],
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)[0]
return images * masks
@tf.function
def tf_coarse_dropout(image, size=25, amount=0.1, seed=None):
drop_mask = tf.keras.backend.random_binomial((size, size), p=1.0 - amount, seed=seed)
drop_mask = tf.stack((drop_mask,) * 3, axis=-1)
drop_mask = tf.image.resize([drop_mask], size=image.shape[:2],
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)[0]
return image * drop_mask
@tf.function
def tf_coarse_dropout_batch(images, size=25, amount=0.1, seed=None):
masks = []
for _ in range(images.shape[0]):
drop_mask = tf.keras.backend.random_binomial((size, size), p=1.0 - amount, seed=seed)
drop_mask = tf.stack((drop_mask,) * 3, axis=-1)
masks.append(drop_mask)
masks = tf.stack(masks, axis=0)
masks = tf.image.resize(masks, size=images.shape[1:3],
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)[0]
return images * masks
def tf_rotate(image, degrees=90):
assert degrees % 90 == 0
return tf.image.rot90(image, k=degrees // 90)
# -------------------------------------------------------------------------------------------------
# -- Appearance Augmentations
# -------------------------------------------------------------------------------------------------
def tf_saturation(image, lower=0.5, upper=1.5, seed=None):
return tf.image.random_saturation(image, lower, upper, seed=seed)
def tf_contrast(image, lower=0.4, upper=1.6, seed=None):
return tf.image.random_contrast(image, lower, upper, seed=seed)
def tf_brightness(image, delta=0.75, seed=None):
return tf.image.random_brightness(image, max_delta=delta, seed=seed)
def tf_hue(image, delta=0.5, seed=None):
return tf.image.random_hue(image, max_delta=delta, seed=seed)
def tf_grayscale(rgb_image):
return tf.image.rgb_to_grayscale(rgb_image)
def tf_rgb(gray_image):
return tf.image.grayscale_to_rgb(gray_image)
@tf.function
def tf_gaussian_noise(image, amount=0.25, std=0.2, seed=None):
mask_select = tf.keras.backend.random_binomial(image.shape[:2], p=amount, seed=seed)
mask_select = tf.stack((mask_select,) * 3, axis=-1)
mask_noise = tf.random.normal(shape=image.shape, stddev=std, seed=seed)
mask_noise = tf.clip_by_value(mask_noise, 0.0, 1.0)
return image + (mask_select * mask_noise)
@tf.function
def tf_gaussian_noise_batch(images, amount=0.25, std=0.2, seed=None):
masks = []
for _ in range(images.shape[0]):
mask_select = tf.keras.backend.random_binomial(images.shape[1:3], p=amount, seed=seed)
mask_select = tf.stack((mask_select,) * 3, axis=-1)
mask_noise = tf.random.normal(shape=images.shape[1:], stddev=std, seed=seed)
masks.append(tf.clip_by_value(mask_select * mask_noise, 0.0, 1.0))
return images + tf.stack(masks, axis=0)
@tf.function
def tf_salt_and_pepper(image, amount=0.1, prob=0.5, seed=None):
# source: https://stackoverflow.com/questions/55653940/how-do-i-implement-salt-pepper-layer-in-keras
mask_select = tf.keras.backend.random_binomial(image.shape[:2], p=amount / 10, seed=seed)
mask_select = tf.stack((mask_select,) * 3, axis=-1)
mask_noise = tf.keras.backend.random_binomial(image.shape[:2], p=prob, seed=seed)
mask_noise = tf.stack((mask_noise,) * 3, axis=-1)
return image * (1 - mask_select) + mask_noise * mask_select
@tf.function
def tf_salt_and_pepper_batch(images, amount=0.1, prob=0.5, seed=None):
# source: https://stackoverflow.com/questions/55653940/how-do-i-implement-salt-pepper-layer-in-keras
masks_select = []
masks_noise = []
for _ in range(images.shape[0]):
mask_select = tf.keras.backend.random_binomial(images.shape[1:3], p=amount / 10, seed=seed)
mask_select = tf.stack((mask_select,) * 3, axis=-1)
masks_select.append(mask_select)
mask_noise = tf.keras.backend.random_binomial(images.shape[1:3], p=prob, seed=seed)
mask_noise = tf.stack((mask_noise,) * 3, axis=-1)
masks_noise.append(mask_noise)
mask_select = tf.stack(masks_select, axis=0)
mask_noise = tf.stack(masks_noise, axis=0)
return images * (1 - mask_select) + mask_noise * mask_select
@tf.function
def tf_gaussian_blur(image, size=5, std=0.25, seed=None):
# source: https://gist.github.com/blzq/c87d42f45a8c5a53f5b393e27b1f5319
gaussian_kernel = tf.random.normal(shape=(size, size, image.shape[-1], 1), mean=1.0, stddev=std, seed=seed)
if len(image.shape) == 3:
image = tf.expand_dims(image, axis=0)
image = tf.nn.depthwise_conv2d(image, gaussian_kernel, [1, 1, 1, 1], padding='SAME',
data_format='NHWC')[0]
else:
image = tf.nn.depthwise_conv2d(image, gaussian_kernel, [1, 1, 1, 1], padding='SAME',
data_format='NHWC')
return image
@tf.function
def tf_median_blur(image, size=5):
median_kernel = tf.ones((size, size, image.shape[-1], 1))
if len(image.shape) == 3:
image = tf.expand_dims(image, axis=0)
image = tf.nn.depthwise_conv2d(image, median_kernel, [1, 1, 1, 1], padding='SAME',
data_format='NHWC')[0]
else:
image = tf.nn.depthwise_conv2d(image, median_kernel, [1, 1, 1, 1], padding='SAME',
data_format='NHWC')
return image
@tf.function
def tf_multiply_channels(image, strength=1.0, seed=None):
"""Channel-wise multiplication of given image by random scalars. The scalars sum to one, each scalar multiplies
an entire channel
"""
assert len(image.shape) == 3
logits = tf.random.uniform(shape=(image.shape[2],), minval=-1, maxval=1, seed=seed)
alpha = tf.nn.softmax(logits) * strength
return tf_normalize(image * alpha)
@tf.function
def tf_sobel(image, grayscale=False, restore_depth=True, normalize=True):
"""Applies Sobel filtering"""
if grayscale:
depth = image.shape[2]
image = tf_grayscale(image)
image = tf.image.sobel_edges(tf.expand_dims(image, axis=0))
dx, dy = tf.unstack(image[0], axis=-1)
result = dx + dy
if grayscale and restore_depth:
result = tf_repeat_channels(result, n=depth)
if normalize:
return tf_normalize(result)
return result
# -------------------------------------------------------------------------------------------------
@tf.function
def tf_normalize(image, eps=utils.EPSILON):
"""Scales the given image in range [0.0, 1.0]"""
image -= tf.reduce_min(image)
image /= tf.reduce_max(image) + eps
return image
@tf.function
def tf_normalize_batch(images):
return tf.map_fn(fn=tf_normalize, elems=images)
def tf_chance(seed=None):
"""Use to get a single random number between 0 and 1"""
return tf.random.uniform(shape=(1,), minval=0.0, maxval=1.0, seed=seed)
@tf.function
def tf_repeat_channels(image, n=3):
if len(image.shape) == 2:
return tf.stack((image,) * n, axis=-1)
return tf.concat((image,) * n, axis=-1)
def tf_scale_shape(image, scale: Tuple[float, float]):
h, w, d = image.shape
return utils.to_int((h * scale[0], w * scale[0], d))
def tf_size(image):
return image.shape[:2]
| 33.33452 | 117 | 0.62891 | 1,352 | 9,367 | 4.210059 | 0.150148 | 0.025474 | 0.034259 | 0.039529 | 0.569396 | 0.527407 | 0.462755 | 0.421293 | 0.404076 | 0.382994 | 0 | 0.02987 | 0.185118 | 9,367 | 280 | 118 | 33.453571 | 0.715839 | 0.118288 | 0 | 0.388235 | 0 | 0 | 0.003897 | 0 | 0 | 0 | 0 | 0 | 0.011765 | 1 | 0.170588 | false | 0 | 0.017647 | 0.058824 | 0.376471 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0b6251dcba60014a19dc34f004de3d228f64666 | 17,771 | py | Python | invsolve/measure.py | danassutula/biomech-inverse | 4ee415f181e815085660dfe722bd861c99da0cd9 | [
"MIT"
] | 2 | 2020-08-09T08:48:28.000Z | 2020-10-07T22:05:51.000Z | invsolve/measure.py | danassutula/biomech-inverse | 4ee415f181e815085660dfe722bd861c99da0cd9 | [
"MIT"
] | null | null | null | invsolve/measure.py | danassutula/biomech-inverse | 4ee415f181e815085660dfe722bd861c99da0cd9 | [
"MIT"
] | 1 | 2019-11-25T17:25:23.000Z | 2019-11-25T17:25:23.000Z | '''
For converting a sequence of measurements into a measurement expression.
'''
import numpy as np
from dolfin import Function
from dolfin import UserExpression
from matplotlib.tri import Triangulation
from matplotlib.tri import LinearTriInterpolator
SEQUENCE_TYPES = (tuple, list)
def make_measurement_setter_with_time_as_argument(*args):
'''Make a measurement setter function for expressions.
Parameters
----------
args : MeasurementExpressionBase
Measurement expressions.
Returns
-------
measurement_setter : function(t:float)
Measurement setting function.
'''
if not all(isinstance(arg, MeasurementExpressionBase) for arg in args):
raise TypeError('`args` must have base type `MeasurementExpressionBase`.')
def measurement_setter(t:float):
'''Set all measurements at time.'''
for arg in args: arg.at_time(t)
return measurement_setter
def make_measurement_setter_with_index_as_argument(*args):
'''Make a measurement setter function for expressions.
Parameters
----------
args : MeasurementExpressionBase
Measurement expressions.
Returns
-------
measurement_setter : function(i:int)
Measurement setting function.
'''
if not all(isinstance(arg, MeasurementExpressionBase) for arg in args):
raise TypeError('`args` must have base type `MeasurementExpressionBase`.')
def measurement_setter(i:int):
'''Set all measurements at index.'''
for arg in args: arg.at_index(i)
return measurement_setter
def measurement_expression(f_msr, t_msr=None, degree=None):
'''Return a suitable measurement expression for the type of parameters.
Parameters
----------
f_msr : sequence of dolfin.Function's or numpy.ndarray's
Sequence of measurement snapshots.
t_msr : a sequence of ascending values or a single value (optional)
Measurement times. Could be a sequence of values for the measurement
snapshots, or a sequence of two values for the first time and the
last time of the snapshots, or a single value for the last time.
'''
if not isinstance(f_msr, (list, tuple, np.ndarray)):
raise TypeError('Expecting parameter `f_msr` to be a '
'sequence of measurement snapshots.')
if all(isinstance(f_msr_i, Function) for f_msr_i in f_msr):
return MeasurementExpressionFromFunctions(f_msr, t_msr, degree= \
f_msr[0].ufl_element().degree() if degree is None else degree)
elif all(isinstance(f_msr_i, np.ndarray) for f_msr_i in f_msr):
return MeasurementExpressionFromArrays(f_msr, t_msr, degree=0)
elif all(isinstance(f_msr_i, (float, int)) for f_msr_i in f_msr):
return MeasurementExpressionFromScalars(f_msr, t_msr, degree=0)
else:
raise TypeError('Expected parameter `f_msr` to be a sequence of '
'either `dolfin.Function`s or `numpy.ndarray`s.')
class MeasurementExpressionBase(UserExpression):
_msr_rtol = 1e-14
def __new__(cls, *args, **kwargs):
'''Must be extended by deriving class.'''
if 'degree' not in kwargs and 'element' not in kwargs:
raise TypeError('Require `degree` or `element` as keyword argument.')
self = super().__new__(cls)
self._ufl_shape = None
return self
def __init__(self, f_msr, t_msr=None, **kwargs):
'''Must be extended by deriving class.
Parameters
----------
f_msr : sequence of object's
Sequence of measurement snapshots.
t_msr : a sequence of ascending values or a single value (optional)
Measurement times. Could be a sequence of values for the measurement
snapshots, or a sequence of two values for the first time and the
last time of the snapshots, or a single value for the last time.
Keyword Parameters
------------------
degree : int
The `degree` must be given if no `element` is given.
element : dolfin.Element (optional)
The `element` must be given if no `degree` is given.
'''
# Must initialize base class
super().__init__(**kwargs)
n_msr = len(f_msr)
if t_msr is None:
t_msr = tuple(np.linspace(0, 1, n_msr, dtype=float))
elif not hasattr(t_msr, '__getitem__'):
t_msr = tuple(np.linspace(0, t_msr, n_msr, dtype=float))
else:
if not all(t_i < t_j for t_i, t_j in zip(t_msr[:-1], t_msr[1:])):
raise TypeError('Parameter `t_msr` must be an ascending sequence.')
if len(t_msr) == 2:
t_msr = tuple(np.linspace(t_msr[0], t_msr[1], n_msr, dtype=float))
elif len(t_msr) != n_msr:
raise TypeError('Parameter `t_msr` is incompatible with `f_msr`.')
if len(t_msr) > 1:
self._msr_atol = self._msr_rtol * (t_msr[-1] - t_msr[0])
else:
self._msr_atol = self._msr_rtol
self._msr_f_msr = f_msr if isinstance(f_msr, tuple) else tuple(f_msr)
self._msr_t_msr = t_msr if isinstance(t_msr, tuple) else tuple(t_msr)
self._msr_n_msr = n_msr
self._msr_f_cur = None
self._msr_t_cur = t_msr[0]
self._msr_i_cur = 0
def __repr__(self):
return f'<{self.__class__.__name__} at {hex(id(self))}>'
def _msr_index_from_time(self, t, i_start=0):
'''Find the index `i` that corresponds to the left of (or at) time `t`.
`i_start` can be specified to start the search around index `i_start`,
otherwise `i_start=0` and so the search starts from begining.'''
while i_start < 0:
i_start += self._msr_n_msr
if t >= self._msr_t_msr[i_start]:
# search to the right of `i_start`
if t >= self._msr_t_msr[-2]: # edge case
return self._msr_n_msr-2
# NOTE: t >= self._msr_t_msr[i_start] and t < self._msr_t_msr[-2]
# hence, first lesser between `i_start+1` and `end-1`
return next(i for i, t_j in enumerate(
self._msr_t_msr[i_start+1:-1], i_start) if t < t_j)
else: # t < self._msr_t_msr[i_start]:
# search to the left of `i_start`
if t <= self._msr_t_msr[1]: # edge case
return 0
# NOTE: t < self._msr_t_msr[i_start] and t > self._msr_t_msr[1]
# hence, first greater between `i_start-1` and `0`
return next(i_start-i for i, t_j in enumerate(
self._msr_t_msr[i_start-1:0:-1], start=1) if t > t_j)
def _msr_index_and_weight_from_time(self, t):
'''Index and weight of the adjacent left measurement for time `t`.'''
if (t < self._msr_t_msr[0]-self._msr_atol or
t > self._msr_t_msr[-1]+self._msr_atol):
raise ValueError('Measurement time `t` out of range.')
i = self._msr_index_from_time(t, self._msr_i_cur)
assert (0 <= i < self._msr_n_msr-1), f'i = {i}'
w = (self._msr_t_msr[i+1]-t)/(self._msr_t_msr[i+1]-self._msr_t_msr[i])
assert (-self._msr_rtol < w < 1.0 + self._msr_rtol), f'w = {w}'
return i, w
@property
def n_msr(self):
'''Number of measurements.'''
return self._msr_n_msr
@property
def t_msr(self):
'''All measurement times.'''
return self._msr_t_msr
@property
def f_msr(self):
'''All measurement values.'''
return self._msr_f_msr
def at_index(self, i):
'''Set measurement at index.'''
raise NotImplementedError
def at_time(self, t):
'''Set measurement at time.'''
raise NotImplementedError
def get_index(self):
'''Current measurement index.'''
return self._msr_i_cur
def get_time(self):
'''Current measurement time.'''
return self._msr_t_cur
def get_value(self, copy=True):
'''Current measurement value.'''
return NotImplementedError
def eval(self, value, x):
raise NotImplementedError
def value_shape(self):
return self._ufl_shape
class MeasurementExpressionFromFunctions(MeasurementExpressionBase):
def __new__(cls, f_msr, *args, **kwargs):
self = super().__new__(cls, **kwargs)
if not isinstance(f_msr, SEQUENCE_TYPES) or \
not all(isinstance(f, Function) for f in f_msr):
raise TypeError('Parameter `f_msr` must be a '
'sequence of `dolfin.Function`s.')
self._ufl_shape = f_msr[0].ufl_shape
return self
def __init__(self, f_msr, t_msr=None, **kwargs):
'''
Parameters
----------
f_msr : sequence of dolfin.Function's.
Sequence of measurement snapshots.
t_msr : a sequence of ascending values or a single value (optional)
Measurement times. Could be a sequence of values for the measurement
snapshots, or a sequence of two values for the first time and the
last time of the snapshots, or a single value for the last time.
Keyword Parameters
------------------
degree : int
The `degree` must be given if no `element` is given.
element : dolfin.Element (optional)
The `element` must be given if no `degree` is given.
'''
super().__init__(f_msr, t_msr, **kwargs)
self._msr_f_cur = Function.copy(f_msr[0], deepcopy=True)
def at_index(self, i):
'''Set measurement at index `i`.'''
if i < 0:
i += self._msr_n_msr
try:
self._msr_f_cur.vector()[:] = self._msr_f_msr[i].vector()
self._msr_i_cur, self._msr_t_cur = i, self._msr_t_msr[i]
except IndexError:
raise IndexError('Measurement index `i` out of range.')
return self
def at_time(self, t):
'''Set measurement at time `t`.'''
# Adjacent left measurement index and weight
i, w = self._msr_index_and_weight_from_time(t)
self._msr_f_cur.vector()[:] = self._msr_f_msr[i].vector()*w \
+ self._msr_f_msr[i+1].vector()*(1.0-w)
self._msr_t_cur = t
self._msr_i_cur = i
return self
def get_value(self, copy=True):
'''Current measurement value.'''
return self._msr_f_cur.copy(True) if copy else self._msr_f_cur
def eval(self, value, x):
self._msr_f_cur.eval(value, x)
class MeasurementExpressionFromArrays(MeasurementExpressionBase):
def __new__(cls, f_msr, *args, **kwargs):
self = super().__new__(cls, **kwargs)
if not hasattr(f_msr, '__getitem__') or \
not all(isinstance(f, np.ndarray) for f in f_msr):
raise TypeError('Parameter `f_msr` must be a '
'sequence of `numpy.ndarray`s.')
self._ufl_shape = f_msr[0].shape
return self
def __init__(self, f_msr, t_msr=None, **kwargs):
'''
Parameters
----------
f_msr : sequence of numpy.ndarray's.
Sequence of measurement snapshots.
t_msr : a sequence of ascending values or a single value (optional)
Measurement times. Could be a sequence of values for the measurement
snapshots, or a sequence of two values for the first time and the
last time of the snapshots, or a single value for the last time.
Keyword Parameters
------------------
degree : int
The `degree` must be given if no `element` is given.
element : dolfin.Element (optional)
The `element` must be given if no `degree` is given.
'''
super().__init__(f_msr, t_msr, **kwargs)
self._msr_f_cur = np.array(f_msr[0], float)
def at_index(self, i):
'''Set measurement at index `i`.'''
if i < 0:
i += self._msr_n_msr
try:
self._msr_f_cur[:] = self._msr_f_msr[i]
self._msr_t_cur = self._msr_t_msr[i]
self._msr_i_cur = i
except IndexError:
raise IndexError('Measurement index `i` out of range.')
return self
def at_time(self, t):
'''Set measurement at time `t` by linear interpolation.'''
# Adjacent left measurement index and weight
i, w = self._msr_index_and_weight_from_time(t)
self._msr_f_cur[:] = self._msr_f_msr[i]*w \
+ self._msr_f_msr[i+1]*(1.0-w)
self._msr_t_cur = t
self._msr_i_cur = i
return self
def get_value(self, copy=True):
'''Current measurement value.'''
return self._msr_f_cur.copy() if copy else self._msr_f_cur
def eval(self, value, x):
value[:] = self._msr_f_cur
class MeasurementExpressionFromScalars(MeasurementExpressionBase):
def __new__(cls, f_msr, *args, **kwargs):
self = super().__new__(cls, **kwargs)
if not hasattr(f_msr, '__getitem__') or \
not all(isinstance(f, (float, int)) for f in f_msr):
raise TypeError('Parameter `f_msr` must be a '
'sequence of float\'s or int\'s.')
self._ufl_shape = ()
return self
def __init__(self, f_msr, t_msr=None, **kwargs):
'''
Parameters
----------
f_msr : sequence of reals.
Sequence of measurement snapshots.
t_msr : a sequence of ascending values or a single value (optional)
Measurement times. Could be a sequence of values for the measurement
snapshots, or a sequence of two values for the first time and the
last time of the snapshots, or a single value for the last time.
Keyword Parameters
------------------
degree : int
The `degree` must be given if no `element` is given.
element : dolfin.Element (optional)
The `element` must be given if no `degree` is given.
'''
super().__init__(f_msr, t_msr, **kwargs)
self._msr_f_cur = f_msr[0]
def at_index(self, i):
'''Set measurement at index `i`.'''
if i < 0:
i += self._msr_n_msr
try:
self._msr_f_cur = self._msr_f_msr[i]
self._msr_t_cur = self._msr_t_msr[i]
self._msr_i_cur = i
except IndexError:
raise IndexError('Measurement index `i` out of range.')
return self
def at_time(self, t):
'''Set measurement at time `t` by linear interpolation.'''
# Adjacent left measurement index and weight
i, w = self._msr_index_and_weight_from_time(t)
self._msr_f_cur = self._msr_f_msr[i]*w \
+ self._msr_f_msr[i+1]*(1.0-w)
self._msr_t_cur = t
self._msr_i_cur = i
return self
def get_value(self, copy=True):
'''Current measurement value.'''
return self._msr_f_cur
def eval(self, value, x):
value[:] = self._msr_f_cur
# class MeasurementExpressionFromScatters(MeasurementExpressionFromArrays):
#
# def __new__(cls, x_msr, f_msr, *args, **kwargs):
# self = super().__new__(cls, f_msr, *args, **kwargs)
#
# if any(f_i.ndim != 2 for f_i in f_msr):
# raise TypeError('Parameter `f_msr` must contain 2D `numpy.ndarray`s.')
#
# self._ufl_shape = f_msr[0][0].shape
#
# return self
#
# def __init__(self, x_msr, f_msr, t_msr=None, **kwargs):
# '''
#
# Parameters
# ----------
# x_msr : numpy.ndarray (2D)
# Coordinates of measurement points.
# f_msr : sequence of numpy.ndarray's.
# Sequence of measurement snapshots.
# t_msr : a sequence of ascending values or a single value (optional)
# Measurement times. Could be a sequence of values for the measurement
# snapshots, or a sequence of two values for the first time and the
# last time of the snapshots, or a single value for the last time.
#
# Keyword Parameters
# ------------------
# degree : int
# The `degree` must be given if no `element` is given.
# element : dolfin.Element (optional)
# The `element` must be given if no `degree` is given.
#
# '''
#
# super().__init__(f_msr, t_msr, **kwargs)
# tri = Triangulation(x_msr[:,0], x_msr[:,1])
# self._msr_z_cur = np.empty((len(f_msr[0]),), float)
# self.interp = LinearTriInterpolator(tri, self._msr_z_cur)
#
#
# def __repr__(self):
# return f'<{self.__class__.__name__} at {hex(id(self))}>'
#
# def eval(self, value, x):
# for i in range(self._ufl_shape[0]):
# self._msr_z_cur[:] = self._msr_f_cur[:,i]
# value[i] = self.interp(*x).data
#
#
# def LinearInterpolator2D(self, xk, fk):
#
# self._xk = np.array(xk, order='C')
# self._fk = np.array(fk, order='F')
# self._zk = np.empty((len(fk),))
#
# tri = Triangulation(self._xk[:,0], self._xk[:,1])
# self.interpolator = LinearTriInterpolator(tri, self._zk)
#
# mesh = dolfin.Mesh()
# editor = dolfin.MeshEditor()
#
# editor.open(mesh, 'triangle', tdim=xk.shape[1], gdim=xk.shape[1])
# editor.init_vertices(len(self._xk))
# editor.init_cells(len(tri.triangles))
#
# for i, v_i in enumerate(self._xk):
# editor.add_vertex(i, v_i.tolist())
#
# for i, c_i in enumerate(tri.triangles):
# editor.add_cell(i, c_i.tolist())
#
# editor.close()
| 31.84767 | 84 | 0.595127 | 2,437 | 17,771 | 4.086172 | 0.087813 | 0.063266 | 0.0239 | 0.022093 | 0.681964 | 0.636373 | 0.600422 | 0.582045 | 0.562161 | 0.540972 | 0 | 0.006122 | 0.292274 | 17,771 | 557 | 85 | 31.904847 | 0.78564 | 0.402847 | 0 | 0.440191 | 0 | 0 | 0.084829 | 0.008411 | 0 | 0 | 0 | 0 | 0.009569 | 1 | 0.181818 | false | 0 | 0.023923 | 0.009569 | 0.37799 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0b7f2b80875da9580a1f340ffc38e5131c56a51 | 1,834 | py | Python | legacy/legacy_scripts/legacy/.ipynb_checkpoints/clean_all_data-checkpoint.py | tomkimpson/ML4L | ffa8360cb80df25bd6af4fa5cc39b42bd6f405cd | [
"MIT"
] | 1 | 2022-02-23T12:31:56.000Z | 2022-02-23T12:31:56.000Z | legacy/legacy_scripts/legacy/.ipynb_checkpoints/clean_all_data-checkpoint.py | tomkimpson/ML4L | ffa8360cb80df25bd6af4fa5cc39b42bd6f405cd | [
"MIT"
] | null | null | null | legacy/legacy_scripts/legacy/.ipynb_checkpoints/clean_all_data-checkpoint.py | tomkimpson/ML4L | ffa8360cb80df25bd6af4fa5cc39b42bd6f405cd | [
"MIT"
] | null | null | null | import pandas as pd
import xarray as xr
import numpy as np
import sys
from config import *
def index_level_dtypes(df):
return [f"{df.index.names[i]}: {df.index.get_level_values(n).dtype}"
for i, n in enumerate(df.index.names)]
def process_x_df(df):
#Convert to long1 and set the index
df['latitude'] = np.round(df.index.get_level_values('latitude').values,3)
df['longitude'] = np.round((df.index.get_level_values('longitude').values +180.0) %360.0 - 180.0,3)
df['time'] = df.index.get_level_values('time').values
print ('X time:')
print (np.unique(df['time']))
df = df.set_index(['latitude', 'longitude','time'], drop=True)
return df.dropna()
def process_y_df(df):
#Reindex dfy via a linear shift
#---ATTENTION---!> We add a linear shift of 0.0250 such that the coordinates match between the X and Y data
# We need to clarify the proper way to deal with this. Perhaps some interpolation method?
df['latitude'] = np.round(df.index.get_level_values('y').values - 0.0250,3)
df['longitude'] = np.round(df.index.get_level_values('x').values - 0.0250,3)
print ('Y time:')
print (np.unique(df['time']))
df = df.set_index(['latitude', 'longitude','time'], drop=True)
selected_y_columns = ['LST_Day_CMG'] #only use these columns, drop the others
df = df[selected_y_columns]
return df.dropna()
#Load the data
cds_xarray = xr.open_dataset(data_root+"xdata.nc")
df_x = cds_xarray.to_dataframe()
df_y = pd.read_pickle(data_root+'modis.pkl')
#Process and clean the data
df_x_clean = process_x_df(df_x)
df_y_clean = process_y_df(df_y)
#Process the X data
df_merged = pd.merge(df_x_clean,df_y_clean,how='inner',left_index=True, right_index=True)
df_merged.to_pickle(data_root+"df_clean.pkl")
print (df_merged)
| 22.641975 | 111 | 0.685387 | 308 | 1,834 | 3.896104 | 0.340909 | 0.046667 | 0.05 | 0.075 | 0.271667 | 0.236667 | 0.236667 | 0.236667 | 0.236667 | 0.173333 | 0 | 0.021053 | 0.17121 | 1,834 | 80 | 112 | 22.925 | 0.768421 | 0.193021 | 0 | 0.181818 | 0 | 0 | 0.155693 | 0.024691 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.151515 | 0.030303 | 0.333333 | 0.151515 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0b7f2d97fdba8a6a8b62217d5ef8ab4b19d2f17 | 2,788 | py | Python | lib/sphinx_exhibit/_offset_annotator.py | anntzer/sphinx-exhibit | 5bdb0c41ef5bde3aea72b48e5aebe292696c53c1 | [
"MIT"
] | null | null | null | lib/sphinx_exhibit/_offset_annotator.py | anntzer/sphinx-exhibit | 5bdb0c41ef5bde3aea72b48e5aebe292696c53c1 | [
"MIT"
] | null | null | null | lib/sphinx_exhibit/_offset_annotator.py | anntzer/sphinx-exhibit | 5bdb0c41ef5bde3aea72b48e5aebe292696c53c1 | [
"MIT"
] | null | null | null | import ast
import bisect
import itertools
import tokenize
def iter_attribute_tokens(fname):
with open(fname, "rb") as file:
# The call to filter handles cases where an attribute access dot is at
# the end of a line and the attribute itself on the next one.
tokens = filter(lambda token: token.string != "\n",
tokenize.tokenize(file.readline))
for token in tokens:
if token.string == ".":
yield next(tokens) # Also catches submodule imports :/
def parse(fname, code_line_idxs):
attr_tokens = iter_attribute_tokens(fname)
with tokenize.open(fname) as file:
source = file.read()
lines = source.splitlines(keepends=True)
skipped_line_idxs = {*range(1, len(lines) + 1)}.difference(code_line_idxs)
for idx in skipped_line_idxs:
lines[idx - 1] = ""
line_start_offsets = [
0, *itertools.accumulate(len(line) for line in lines)]
def to_offset(lineno, col_offset):
return line_start_offsets[lineno - 1] + col_offset
class OffsetAnnotator(ast.NodeVisitor):
def visit_Name(self, node):
self.generic_visit(node)
# NOTE: For decorators, this will miss the "@" just before. This
# is taken into account at the annotation embedding stage.
# NOTE: Something funky is going on with whether @foo.bar is
# highlighted fully as a decorator or only partially...
node.offset = to_offset(node.lineno, node.col_offset)
def visit_Attribute(self, node):
self.generic_visit(node)
while True:
# Skip spurious ".foo" coming from submodule imports.
token = next(attr_tokens)
if node.attr == token.string:
break
node.offset = to_offset(*token.start)
# These are only necessary to handle fields in the order in which they
# appear in the source, rather than the order they appear in the node.
def visit_FunctionDef(self, node):
for expr in node.decorator_list:
self.visit(expr)
self.visit(node.args)
if node.returns:
self.visit(node.returns)
for stmt in node.body:
self.visit(stmt)
visit_AsyncFunctionDef = visit_FunctionDef
def visit_ClassDef(self, node):
for expr in node.decorator_list:
self.visit(expr)
for expr in node.bases:
self.visit(expr)
for keyword in node.keywords:
self.visit(keyword)
for stmt in node.body:
self.visit(stmt)
mod = ast.parse(source)
OffsetAnnotator().visit(mod)
return mod
| 35.74359 | 78 | 0.601506 | 347 | 2,788 | 4.73487 | 0.391931 | 0.043822 | 0.016433 | 0.023737 | 0.161899 | 0.127815 | 0.093731 | 0.093731 | 0.057212 | 0.057212 | 0 | 0.002633 | 0.318867 | 2,788 | 77 | 79 | 36.207792 | 0.862559 | 0.210187 | 0 | 0.2 | 0 | 0 | 0.002282 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.127273 | false | 0 | 0.072727 | 0.018182 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0ba3d67bb844650e0e8a241e604236780948e92 | 8,242 | py | Python | interface/github.py | ubclaunchpad/rocket2 | 7a4f05f46229d1c9a900aac1694b3d822f9d6b0f | [
"MIT"
] | 14 | 2019-01-20T21:54:36.000Z | 2021-10-09T21:06:23.000Z | interface/github.py | ubclaunchpad/rocket2 | 7a4f05f46229d1c9a900aac1694b3d822f9d6b0f | [
"MIT"
] | 510 | 2018-11-18T20:07:51.000Z | 2022-02-01T15:34:03.000Z | interface/github.py | ubclaunchpad/rocket2.0 | 7a4f05f46229d1c9a900aac1694b3d822f9d6b0f | [
"MIT"
] | 9 | 2019-08-20T16:57:21.000Z | 2021-05-04T12:51:47.000Z | """Utility classes for interacting with Github API via PyGithub."""
from github import Github, GithubException
from github.NamedUser import NamedUser
from github.Team import Team
from interface.exceptions.github import GithubAPIException
from interface.github_app import GithubAppInterface, \
DefaultGithubAppAuthFactory
from app.model import Team as ModelTeam
from typing import cast, List
from functools import wraps
import logging
def handle_github_error(func):
"""Github error handler that updates Github App API token if necessary."""
@wraps(func)
def wrapper(self, *arg, **kwargs):
try:
return func(self, *arg, **kwargs)
except GithubException as e:
logging.warning(f"GithubException raised with message {e.data}"
f" and error code {e.status}")
if e.status == 401:
logging.warning(
"Attempting to create new instance of pygithub interface")
self.github = self.github_factory.create()
logging.warning(
"Attempting to create new instance of organization object")
self.org = self.github.get_organization(self.org_name)
try:
return func(self, *arg, **kwargs)
except GithubException as e:
logging.error("Second attempt of using pygithub interface"
f" failed with message {e.data} and error "
f"code {e.status}")
raise GithubAPIException(e.data)
else:
logging.error(f"Unable to handle error code {e.status}")
raise GithubAPIException(e.data)
return wrapper
class DefaultGithubFactory:
"""Default factory for creating interface to Github API."""
def __init__(self, app_id: str, private_key: str):
"""
Init factory.
:param app_id: Github Apps ID
:param private_key: Private key provided by Github Apps registration
"""
self.auth = GithubAppInterface(
DefaultGithubAppAuthFactory(app_id, private_key))
self.github = Github
def create(self) -> Github:
"""Create instance of pygithub interface with Github Apps API token."""
logging.info("Creating new instance of pygithub interface")
return self.github(self.auth.create_api_token())
class GithubInterface:
"""Utility class for interacting with Github API."""
def __init__(self,
github_factory: DefaultGithubFactory,
org: str):
"""Initialize bot by creating Github object and get organization."""
logging.info("Creating rocket's Github interface")
self.org_name = org
self.github_factory = github_factory
self.github = github_factory.create()
try:
self.org = self.github.get_organization(org)
logging.info(f"Successfully fetched {org} Github organization")
except GithubException as e:
logging.error(f"Failed to fetch {org} Github organization with "
f"error message {e.data} and error code {e.status}")
raise GithubAPIException(e.data)
@handle_github_error
def org_add_member(self, username: str) -> str:
"""
Add/update to member with given username to organization.
If the user is already in the organization, don't do anything.
"""
user = cast(NamedUser, self.github.get_user(username))
if not self.org.has_in_members(user):
self.org.add_to_members(user, "member")
return str(user.id)
@handle_github_error
def org_add_admin(self, username: str):
"""Add member with given username as admin to organization."""
user = cast(NamedUser, self.github.get_user(username))
self.org.add_to_members(user, "admin")
@handle_github_error
def org_remove_member(self, username: str):
"""Remove member with given username from organization."""
user = cast(NamedUser, self.github.get_user(username))
self.org.remove_from_membership(user)
@handle_github_error
def org_has_member(self, username: str) -> bool:
"""Return true if user with username is member of organization."""
user = cast(NamedUser, self.github.get_user(username))
return cast(bool, self.org.has_in_members(user))
@handle_github_error
def org_get_team(self, id: int) -> Team:
"""Given Github team ID, return team from organization."""
return self.org.get_team(id)
@handle_github_error
def org_create_team(self, name: str) -> int:
"""
Create team with given name and add to organization.
:param name: name of team
:return: Github team ID
"""
team = self.org.create_team(name, privacy="closed")
return cast(int, team.id)
@handle_github_error
def org_delete_team(self, id: int):
"""Get team with given ID and delete it from organization."""
team = self.org_get_team(id)
team.delete()
@handle_github_error
def org_edit_team(self,
key: int,
name: str,
description: str = None):
"""
Get team with given ID and edit name and description.
:param key: team's Github ID
:param name: new team name
:param description: new team description
"""
team = self.org_get_team(key)
if description is not None:
team.edit(name, description)
else:
team.edit(name)
@handle_github_error
def org_get_teams(self) -> List[ModelTeam]:
"""Return array of teams associated with organization."""
teams = self.org.get_teams()
team_array = []
for team in teams:
# convert PaginatedList to List
team_model = ModelTeam(str(team.id), team.name, "")
team_model.members = set(str(user.id)
for user in
self.list_team_members(team.id))
team_array.append(team_model)
return team_array
# ---------------------------------------------------------------
# --------------- methods related to team members ---------------
# ---------------------------------------------------------------
@handle_github_error
def list_team_members(self, team_id: str) -> List[NamedUser]:
"""Return a list of users in the team of id team_id."""
team = self.org.get_team(int(team_id))
return list(team.get_members())
@handle_github_error
def get_team_member(self, username: str, team_id: str) -> NamedUser:
"""Return a team member with a username of username."""
try:
team = self.org.get_team(int(team_id))
team_members = team.get_members()
return next(member for member in team_members
if member.name == username)
except StopIteration:
raise GithubAPIException(
f"User \"{username}\" does not exist in team \"{team_id}\"!")
@handle_github_error
def add_team_member(self, username: str, team_id: str):
"""Add user with given username to team with id team_id."""
team = self.org.get_team(int(team_id))
new_member = cast(NamedUser, self.github.get_user(username))
team.add_membership(new_member)
@handle_github_error
def has_team_member(self, username: str, team_id: str) -> bool:
"""Check if team with team_id contains user with username."""
team = self.org.get_team(int(team_id))
member = cast(NamedUser, self.github.get_user(username))
return cast(bool, team.has_in_members(member))
@handle_github_error
def remove_team_member(self, username: str, team_id: str):
"""Remove user with given username from team with id team_id."""
team = self.org.get_team(int(team_id))
to_be_removed_member = cast(NamedUser, self.github.get_user(username))
team.remove_membership(to_be_removed_member)
| 39.435407 | 79 | 0.609561 | 993 | 8,242 | 4.906344 | 0.149043 | 0.027094 | 0.05234 | 0.057471 | 0.385878 | 0.303161 | 0.222906 | 0.202997 | 0.117611 | 0.094622 | 0 | 0.000508 | 0.283184 | 8,242 | 208 | 80 | 39.625 | 0.824137 | 0.203349 | 0 | 0.282609 | 0 | 0 | 0.092232 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.137681 | false | 0 | 0.065217 | 0 | 0.304348 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0be5283bf687e9f7f7e680dad449395b66740f6 | 1,612 | py | Python | director/commands/celery.py | LiniusAustPty/celery-director | 5308c49e1f8502e244765025eb75b45bbe3c2d45 | [
"BSD-3-Clause"
] | null | null | null | director/commands/celery.py | LiniusAustPty/celery-director | 5308c49e1f8502e244765025eb75b45bbe3c2d45 | [
"BSD-3-Clause"
] | 4 | 2021-12-07T19:31:20.000Z | 2022-03-10T10:17:22.000Z | director/commands/celery.py | LiniusAustPty/celery-director | 5308c49e1f8502e244765025eb75b45bbe3c2d45 | [
"BSD-3-Clause"
] | null | null | null | import os
import click
from urllib.parse import urlparse
from director.context import pass_ctx
@click.group()
def celery():
"""Celery commands"""
@celery.command(name="beat", context_settings=dict(ignore_unknown_options=True))
@click.option("--dev", "dev_mode", default=False, is_flag=True, type=bool)
@click.argument("beat_args", nargs=-1, type=click.UNPROCESSED)
def beat(dev_mode, beat_args):
"""Start the beat instance"""
args = [
"celery",
"-A",
"director._auto:cel",
"beat",
]
if dev_mode:
args += [
"--loglevel",
"INFO",
]
args += list(beat_args)
os.execvp(args[0], args)
@celery.command("worker", context_settings=dict(ignore_unknown_options=True))
@click.option("--dev", "dev_mode", default=False, is_flag=True, type=bool)
@click.argument("worker_args", nargs=-1, type=click.UNPROCESSED)
def worker(dev_mode, worker_args):
"""Start a Celery worker instance"""
args = [
"celery",
"-A",
"director._auto:cel",
"worker",
]
if dev_mode:
args += [
"--loglevel",
"INFO",
]
args += list(worker_args)
os.execvp(args[0], args)
@celery.command(name="flower", context_settings=dict(ignore_unknown_options=True))
@click.argument("flower_args", nargs=-1, type=click.UNPROCESSED)
@pass_ctx
def flower(ctx, flower_args):
"""Start the flower instance"""
broker = ctx.app.config["CELERY_CONF"]["broker_url"]
args = ["celery", "flower", "-b", broker] + list(flower_args)
os.execvp(args[0], args[1:])
| 26 | 82 | 0.620968 | 202 | 1,612 | 4.79703 | 0.287129 | 0.043344 | 0.058824 | 0.077399 | 0.599587 | 0.599587 | 0.546956 | 0.408669 | 0.220846 | 0.220846 | 0 | 0.005512 | 0.212159 | 1,612 | 61 | 83 | 26.42623 | 0.75748 | 0.059553 | 0 | 0.425532 | 0 | 0 | 0.132441 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085106 | false | 0.042553 | 0.085106 | 0 | 0.170213 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0c0cafb1abfa54ba047b3d42c073b1ab5bb4969 | 462 | py | Python | PyPoll/main.py | d-jenkins/python-challenge | e45d85d7d4df9543b522718e688784d5bcd1e354 | [
"MIT"
] | null | null | null | PyPoll/main.py | d-jenkins/python-challenge | e45d85d7d4df9543b522718e688784d5bcd1e354 | [
"MIT"
] | null | null | null | PyPoll/main.py | d-jenkins/python-challenge | e45d85d7d4df9543b522718e688784d5bcd1e354 | [
"MIT"
] | null | null | null | #modules
import os
import csv
csvpath = os.path.join('Resources', 'election_data.csv')
#Print Total Votes set with separator
print('Election Results')
print("----------------------------")
#open csv
with open(csvpath) as csv_file:
#call csv reader
csv_reader = csv.reader(csv_file,delimiter=',')
csv_header = next(csv_reader)
total_votes = len(list(csv_reader))
print("Total Votes: ",
total_votes)
| 19.25 | 57 | 0.608225 | 57 | 462 | 4.77193 | 0.473684 | 0.165441 | 0.132353 | 0.132353 | 0.110294 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.225108 | 462 | 23 | 58 | 20.086957 | 0.759777 | 0.142857 | 0 | 0 | 0 | 0 | 0.214286 | 0.071429 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.181818 | 0 | 0.181818 | 0.272727 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0c23188163e8da685c4778386fca45ff71610b9 | 1,985 | py | Python | raspi_shutdown_daemon.py | NAKKA-K/raspi-shutdown-daemon | df624134f968b5233e5abacca39fc339e1ad076b | [
"MIT"
] | null | null | null | raspi_shutdown_daemon.py | NAKKA-K/raspi-shutdown-daemon | df624134f968b5233e5abacca39fc339e1ad076b | [
"MIT"
] | null | null | null | raspi_shutdown_daemon.py | NAKKA-K/raspi-shutdown-daemon | df624134f968b5233e5abacca39fc339e1ad076b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os
import sys
import RPi.GPIO as GPIO
import time
import pygame.mixer
import argparse
import tempfile
# 5秒間スイッチを長押しすると、シャットダウンする
# 長押ししている間はLEDが光る
# defalt: LED1(PIN21) & SW1(PIN7)
def raspi_shutdown_unit(SW1=7, LED1=21):
GPIO.setmode(GPIO.BCM)
GPIO.setup(SW1, GPIO.IN)
GPIO.setup(LED1, GPIO.OUT)
cnt = 0
while 1:
if GPIO.input(SW1) == 1:
cnt += 1
else:
cnt = 0
if cnt >= 25:
info_shutdown_daemon('!!!!! This raspi shutdown !!!!!')
os.system('sudo shutdown -h now')
GPIO.output(LED1, cnt)
time.sleep(0.2)
# このデーモン用log
def info_shutdown_daemon(info):
print(info)
args = get_flags()
if args.alert:
alert_message_at_shutdown("シャットダウンします")
# CL引数のパース
def get_flags():
parser = argparse.ArgumentParser()
parser.add_argument("--alert", help="Alert message at shutdown.", action="store_true")
return parser.parse_args()
def alert_message_at_shutdown(message):
temp_wav = '/tmp/temp_damemon.wav'
try:
make_alert_wav(temp_wav, message)
pygame.mixer.init()
pygame.mixer.music.load(temp_wav)
pygame.mixer.music.play()
finally:
os.remove(temp_wav)
def make_alert_wav(file_name, message):
jtalk_option="\
-m /usr/share/hts-voice/mei/mei_normal.htsvoice \
-x /var/lib/mecab/dic/open-jtalk/naist-jdic \
-ow {}".format(file_name)
os.system("echo {} | open_jtalk {}".format(message, jtalk_option))
# プロセスのフォークと親プロセスの終了
def fork():
pid = os.fork()
if pid:
write_pid(pid)
sys.exit()
# pidファイルへ書き込み
def write_pid(pid):
with open('/var/run/raspi_shutdown_daemon.pid', 'w') as pid_file:
pid_file.write(str(pid)+"\n")
# deamonプロセスの起動
def daemon():
fork() # 親プロセスを殺し、子プロセスを孤児化させる
os.setsid()
fork() # セッションリーダーを殺し、プロセスを完全に独立化
raspi_shutdown_unit()
if __name__ == '__main__':
daemon()
| 23.081395 | 90 | 0.63728 | 262 | 1,985 | 4.645038 | 0.473282 | 0.036154 | 0.034511 | 0.054232 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016361 | 0.230227 | 1,985 | 85 | 91 | 23.352941 | 0.780105 | 0.104282 | 0 | 0.065574 | 0 | 0 | 0.109225 | 0.031126 | 0 | 0 | 0 | 0 | 0 | 1 | 0.131148 | false | 0 | 0.114754 | 0 | 0.262295 | 0.016393 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0c4d0d98821b73c57cf31ce6c154f663684fffa | 603 | py | Python | scapy_port_scanner.py | corentinmusard/scapy_port_scanner | 8c41c1c1f6bb1899222c49548d49eb9e01c41516 | [
"MIT"
] | 4 | 2017-10-31T17:39:51.000Z | 2018-08-21T18:37:43.000Z | scapy_port_scanner.py | corentinmusard/scapy_port_scanner | 8c41c1c1f6bb1899222c49548d49eb9e01c41516 | [
"MIT"
] | 2 | 2021-04-20T19:38:54.000Z | 2021-06-02T01:11:44.000Z | scapy_port_scanner.py | corentinmusard/scapy_port_scanner | 8c41c1c1f6bb1899222c49548d49eb9e01c41516 | [
"MIT"
] | 1 | 2018-07-21T21:58:33.000Z | 2018-07-21T21:58:33.000Z | #!/usr/bin/env python
"""
scapy_port_scanner.py
"""
from src import ScanFinder
def main() -> None:
"""Main function of scapy_port_scanner"""
scan_finder = ScanFinder()
scan_name = scan_finder.find_type()
if scan_name is None:
print("Scan's name not found.")
exit()
scan = scan_finder.get_scan(scan_name)
if scan is None:
print("Scan not found.")
exit()
scan.start() # Start the scan with thread
scan.join() # Wait the end of all thread
scan.info() # Print some info about the scan
if __name__ == "__main__":
main()
| 19.451613 | 49 | 0.620232 | 85 | 603 | 4.164706 | 0.482353 | 0.084746 | 0.090395 | 0.084746 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.263682 | 603 | 30 | 50 | 20.1 | 0.797297 | 0.270315 | 0 | 0.125 | 0 | 0 | 0.107143 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.0625 | 0 | 0.125 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0c523a76b4f98a35781377d46fb51baa9a01a0f | 2,387 | py | Python | optonome_V1/optonome.py | dyyyni/fluorometerCodes | 8f192ce656ba492e42cb80b6e6a655ea926418a4 | [
"MIT"
] | 1 | 2021-12-08T14:12:43.000Z | 2021-12-08T14:12:43.000Z | optonome_V1/optonome.py | dyyyni/fluorometerCodes | 8f192ce656ba492e42cb80b6e6a655ea926418a4 | [
"MIT"
] | null | null | null | optonome_V1/optonome.py | dyyyni/fluorometerCodes | 8f192ce656ba492e42cb80b6e6a655ea926418a4 | [
"MIT"
] | null | null | null | # globals
task = None
exit_flag = False
wrt_file = None
counts_prev = None
counts_now = None
interval = 1
n_measurements = 0
def clear_screen():
import os
os.system('cls')
return
def set_interval():
import sys
global interval
if sys.argv.__len__() < 2:
return
interval = float(sys.argv[1])
return
def prepare_file():
import os
import sys
global wrt_file
save_path = os.getcwd() + '\\12hsadfTesti'
sys.stdout.write('Saving data to \'' + save_path + '\'\nTo save data and exit the program hit Enter \nInitialising...')
wrt_file = open(save_path, 'w')
return
def start_device():
import sys
import nidaqmx as ni
global task
devices = ni.system.system.System.local().devices
if devices.__len__() < 1:
print('No NI device detected. Aborting program execution.')
sys.exit(1)
name = devices[0].name + '/ctr1'
if devices.__len__() > 1:
print('Multiple NI devices detected. Using device/channel \'' + name + '\'')
task = ni.Task('digital readout')
task.ci_channels.add_ci_count_edges_chan(name)
task.start()
return
def abort_acquisition():
global exit_flag
input()
exit_flag = True
return
def enable_user_input_abortion():
import threading
thread = threading.Thread(target=abort_acquisition)
thread.start()
return
def read_counts():
import time
global n_measurements
global counts_now
counts_now = task.ci_channels[0].ci_count
time.sleep(interval)
return
def write_counts():
import sys
import datetime
global n_measurements
global counts_prev
counts = counts_now - counts_prev
n_measurements += 1
sys.stdout.write('\r\033[KCounts at ' + '{:.2f}'.format(n_measurements * interval) + 's: ' + str(counts))
wrt_file.write(str(datetime.datetime.now()) + ' ' + str(counts) + '\n')
return
def stop_device():
task.stop()
task.close()
wrt_file.close()
return
def main():
global counts_prev
clear_screen()
set_interval()
prepare_file()
start_device()
enable_user_input_abortion()
while not exit_flag:
read_counts()
if counts_prev is not None:
write_counts()
counts_prev = counts_now
stop_device()
return
if __name__ == '__main__': main() | 16.692308 | 123 | 0.639715 | 306 | 2,387 | 4.738562 | 0.339869 | 0.055862 | 0.02069 | 0.017931 | 0.067586 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00954 | 0.253456 | 2,387 | 143 | 124 | 16.692308 | 0.804153 | 0.002933 | 0 | 0.238636 | 0 | 0 | 0.118537 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.113636 | false | 0 | 0.113636 | 0 | 0.352273 | 0.022727 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0c8cd99962986b595c6a3341057ee8c726d9975 | 2,083 | py | Python | prep_train_data.py | Sinpex-GmbH/pygaggle | 9d0c18f5ececdb07e80270afb26d0805503961d3 | [
"Apache-2.0"
] | null | null | null | prep_train_data.py | Sinpex-GmbH/pygaggle | 9d0c18f5ececdb07e80270afb26d0805503961d3 | [
"Apache-2.0"
] | null | null | null | prep_train_data.py | Sinpex-GmbH/pygaggle | 9d0c18f5ececdb07e80270afb26d0805503961d3 | [
"Apache-2.0"
] | null | null | null | """
Data Prep
https://github.com/castorini/pygaggle/blob/master/docs/experiments-monot5-tpu.md
Gives example dataset and how to convert it to use with t5 model training
"""
import json
import csv
fpath = "/Users/nikolettatoth/T5_ranking/pygaggle/test_files/labels_for_training_q5q6q17/train_ar_tf_q5q6q17_v1.json"
with open(fpath, "r") as infile:
train_data = json.load(infile)
collect_dict = {}
for label in train_data["data"]:
question = label["paragraphs"][0]["qas"][0]["question"]
context = label["paragraphs"][0]["context"]
if question in collect_dict:
list_pf_context = collect_dict[question]
else:
list_pf_context = []
list_pf_context.append(context)
collect_dict.update({question: list_pf_context})
question_par_pairs = []
for query, context_list in collect_dict.items():
# remove doubled question-paragraph pairs
context_list = list(set(context_list))
print(query + " " + str(len(context_list)))
for context_i in context_list:
question_par_pairs.append([query, context_i])
output_path = fpath.replace(".json", "_pairs.tsv")
# finally we have 505 query - context pairs
# What is the identification number of the company? 166
# Which commercial register is the company registered in? 124
# How much is the capital share? 215
"""
This script creates monoT5 input files for training,
Each line in the monoT5 input file follows the format:
f'Query: {query} Document: {document} Relevant:\t{label}\n')
"""
from tqdm import tqdm
# input file should be a tsv with the following lines:
# NOTE: we should add negative examples !!!!!!!!!!!
# <query> \t <positive_document> \t <negative_document>"
with open(output_path, 'w') as fout_t5:
for item_id, item in enumerate(tqdm(question_par_pairs)):
# item = ['query', 'positive_context']
query, positive_document = item[0], item[1]
fout_t5.write(f'Query: {query} Document: {positive_document} Relevant:\ttrue\n')
#fout_t5.write(f'Query: {query} Document: {negative_document} Relevant:\tfalse\n')
print('Done!')
| 32.546875 | 117 | 0.713874 | 297 | 2,083 | 4.838384 | 0.457912 | 0.045929 | 0.036187 | 0.039666 | 0.041754 | 0.041754 | 0.041754 | 0 | 0 | 0 | 0 | 0.01954 | 0.164666 | 2,083 | 63 | 118 | 33.063492 | 0.806322 | 0.324532 | 0 | 0 | 0 | 0 | 0.194536 | 0.088576 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.107143 | 0 | 0.107143 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0c9f121e87959133c7bed647a70db229bdfd15d | 4,324 | py | Python | python/ray/serve/tests/storage_tests/test_kv_store.py | linyiyue/ray | 90d2456ec70270a1f894ec3ef6f3004533859e03 | [
"Apache-2.0"
] | 21,382 | 2016-09-26T23:12:52.000Z | 2022-03-31T21:47:45.000Z | python/ray/serve/tests/storage_tests/test_kv_store.py | linyiyue/ray | 90d2456ec70270a1f894ec3ef6f3004533859e03 | [
"Apache-2.0"
] | 19,689 | 2016-09-17T08:21:25.000Z | 2022-03-31T23:59:30.000Z | python/ray/serve/tests/storage_tests/test_kv_store.py | linyiyue/ray | 90d2456ec70270a1f894ec3ef6f3004533859e03 | [
"Apache-2.0"
] | 4,114 | 2016-09-23T18:54:01.000Z | 2022-03-31T15:07:32.000Z | import os
import tempfile
import sys
from typing import Optional
import pytest
from ray.serve.constants import DEFAULT_CHECKPOINT_PATH
from ray.serve.storage.checkpoint_path import make_kv_store
from ray.serve.storage.kv_store import (RayInternalKVStore, RayLocalKVStore,
RayS3KVStore)
from ray.serve.storage.kv_store_base import KVStoreBase
def test_ray_internal_kv(serve_instance): # noqa: F811
with pytest.raises(TypeError):
RayInternalKVStore(namespace=1)
RayInternalKVStore(namespace=b"")
kv = RayInternalKVStore()
with pytest.raises(TypeError):
kv.put(1, b"1")
with pytest.raises(TypeError):
kv.put("1", 1)
with pytest.raises(TypeError):
kv.put("1", "1")
kv.put("1", b"2")
assert kv.get("1") == b"2"
kv.put("2", b"4")
assert kv.get("2") == b"4"
kv.put("1", b"3")
assert kv.get("1") == b"3"
assert kv.get("2") == b"4"
def test_ray_internal_kv_collisions(serve_instance): # noqa: F811
kv1 = RayInternalKVStore()
kv1.put("1", b"1")
assert kv1.get("1") == b"1"
kv2 = RayInternalKVStore("namespace")
assert kv2.get("1") is None
kv2.put("1", b"-1")
assert kv2.get("1") == b"-1"
assert kv1.get("1") == b"1"
def _test_operations(kv_store):
# Trival get & put
kv_store.put("1", b"1")
assert kv_store.get("1") == b"1"
kv_store.put("2", b"2")
assert kv_store.get("1") == b"1"
assert kv_store.get("2") == b"2"
# Overwrite same key
kv_store.put("1", b"-1")
assert kv_store.get("1") == b"-1"
# Get non-existing key
assert kv_store.get("3") is None
# Delete existing key
kv_store.delete("1")
kv_store.delete("2")
assert kv_store.get("1") is None
assert kv_store.get("2") is None
# Delete non-existing key
kv_store.delete("3")
def test_external_kv_local_disk():
kv_store = RayLocalKVStore(
"namespace", os.path.join(tempfile.gettempdir(), "test_kv_store.db"))
_test_operations(kv_store)
@pytest.mark.skip(reason="Need to figure out credentials for testing")
def test_external_kv_aws_s3():
kv_store = RayS3KVStore(
"namespace",
bucket="jiao-test",
s3_path="/checkpoint",
aws_access_key_id=os.environ.get("AWS_ACCESS_KEY_ID", None),
aws_secret_access_key=os.environ.get("AWS_SECRET_ACCESS_KEY", None),
aws_session_token=os.environ.get("AWS_SESSION_TOKEN", None),
)
_test_operations(kv_store)
class MyNonCompliantStoreCls:
pass
class MyCustomStorageCls(KVStoreBase):
def __init__(self, namespace, **kwargs):
self.namespace = namespace
self.kwargs = kwargs
def delete(self, key: str) -> None:
return super().delete(key)
def get(self, key: str) -> Optional[bytes]:
return super().get(key)
def get_storage_key(self, key: str) -> str:
return super().get_storage_key(key)
def put(self, key: str, val: bytes) -> bool:
return super().put(key, val)
@pytest.mark.skipif(sys.platform == "win32", reason="Using tmp dir.")
def test_make_kv_store(serve_instance):
namespace = "ns"
assert isinstance(
make_kv_store(DEFAULT_CHECKPOINT_PATH, namespace), RayInternalKVStore)
assert isinstance(
make_kv_store("file:///tmp/deep/dir/my_path", namespace),
RayLocalKVStore)
assert isinstance(
make_kv_store("s3://object_store/my_path", namespace), RayS3KVStore)
with pytest.raises(ValueError, match="shouldn't be empty"):
# Empty path
make_kv_store("file://", namespace)
with pytest.raises(ValueError, match="must be one of"):
# Wrong prefix
make_kv_store("s4://some_path", namespace)
module_name = "ray.serve.tests.storage_tests.test_kv_store"
with pytest.raises(ValueError, match="doesn't inherit"):
make_kv_store(
f"custom://{module_name}.MyNonCompliantStoreCls",
namespace=namespace)
store = make_kv_store(
f"custom://{module_name}.MyCustomStorageCls?arg1=val1&arg2=val2",
namespace=namespace)
assert store.namespace == namespace
assert store.kwargs == {"arg1": "val1", "arg2": "val2"}
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-s", __file__]))
| 28.077922 | 78 | 0.646392 | 591 | 4,324 | 4.529611 | 0.226734 | 0.081061 | 0.012327 | 0.041838 | 0.266716 | 0.150542 | 0.10721 | 0.061636 | 0.061636 | 0.023907 | 0 | 0.024354 | 0.211841 | 4,324 | 153 | 79 | 28.261438 | 0.76115 | 0.033765 | 0 | 0.17757 | 0 | 0 | 0.127639 | 0.053503 | 0 | 0 | 0 | 0 | 0.186916 | 1 | 0.102804 | false | 0.009346 | 0.093458 | 0.037383 | 0.252336 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0cbf7d65b4f209f86204912736057b7423145a0 | 7,258 | py | Python | lib/sqlalchemy_json_querybuilder/querybuilder/operators.py | suyash248/sqlalchemy-json-querybuilder | 3deefd8935ce49c484a4936a751e9ccb5eb574a6 | [
"MIT"
] | 32 | 2018-06-21T17:07:57.000Z | 2021-11-29T14:04:40.000Z | lib/sqlalchemy_json_querybuilder/querybuilder/operators.py | suyash248/sqlalchemy-json-querybuilder | 3deefd8935ce49c484a4936a751e9ccb5eb574a6 | [
"MIT"
] | null | null | null | lib/sqlalchemy_json_querybuilder/querybuilder/operators.py | suyash248/sqlalchemy-json-querybuilder | 3deefd8935ce49c484a4936a751e9ccb5eb574a6 | [
"MIT"
] | 10 | 2018-12-11T10:00:16.000Z | 2022-02-12T14:07:31.000Z | __author__ = "Suyash Soni"
__email__ = "suyash.soni248@gmail.com"
from ..commons.error_handlers.exceptions.exceptions import ExceptionBuilder, SqlAlchemyException
from ..constants.error_codes import ErrorCode
class OperatorEvaluator(object):
"""Represents an operator"""
def __init__(self, model_cls, field_name, field_value):
self.model_cls = model_cls
self.field_name = field_name
self.field_value = field_value
@staticmethod
def obj(operator_name, model_cls, field_name, field_value):
operator_name = operator_name.lower()
try:
op_eval_cls = __OPERATORS_MAPPING__[operator_name]
return op_eval_cls(model_cls, field_name, field_value)
except:
ExceptionBuilder(SqlAlchemyException).error(ErrorCode.INVALID_OPERATOR, operator_name,
message="Invalid operator: {}".format(operator_name)).throw()
def expr(self):
"""Evaluates criterion and returns expression to be used inside `model_cls.query.filter(*expressions)` method.
Concrete operator classes must override this method."""
ExceptionBuilder(SqlAlchemyException).error(ErrorCode.INVALID_OPERATOR, self.field_name,
message="Invalid operator").throw()
@property
def model_field(self):
try:
return getattr(self.model_cls, self.field_name)
except:
ExceptionBuilder(SqlAlchemyException).error(ErrorCode.INVALID_FIELD, self.field_name,
message="Couldn't find {} under {}".format(self.field_name, self.model_cls.__name__)).throw()
class __Equals__(OperatorEvaluator):
def expr(self):
return self.model_field == self.field_value
class __NotEquals__(OperatorEvaluator):
def expr(self):
return self.model_field != self.field_value
class __LessThan__(OperatorEvaluator):
def expr(self):
return self.model_field < self.field_value
class __LessThanEq__(OperatorEvaluator):
def expr(self):
return self.model_field <= self.field_value
class __GreaterThan__(OperatorEvaluator):
def expr(self):
return self.model_field > self.field_value
class __GreaterThanEq__(OperatorEvaluator):
def expr(self):
return self.model_field >= self.field_value
class __IN__(OperatorEvaluator):
def expr(self):
try:
iter(self.field_value)
except TypeError as te:
ExceptionBuilder(SqlAlchemyException).error(ErrorCode.INVALID_DATA_TYPE, self.field_name,
message="field_value must be iterable").throw()
return self.model_field.in_(self.field_value)
class __NotIn__(OperatorEvaluator):
def expr(self):
try:
iter(self.field_value)
except TypeError as te:
ExceptionBuilder(SqlAlchemyException).error(ErrorCode.INVALID_DATA_TYPE, self.field_name,
message="field_value must be iterable").throw()
return ~self.model_field.in_(self.field_value)
class __IsNull__(OperatorEvaluator):
def expr(self):
return self.model_field.is_(None)
class __IsNotNull__(OperatorEvaluator):
def expr(self):
return self.model_field.isnot(None)
class __Like__(OperatorEvaluator):
def expr(self):
return self.model_field.like(self.field_value)
class __ILike__(OperatorEvaluator):
def expr(self):
return self.model_field.ilike(self.field_value)
class __StartsWith__(__Like__):
def expr(self):
if type(self.field_value) == str:
self.field_value = self.field_value + '%'
return super(__StartsWith__, self).expr()
ExceptionBuilder(SqlAlchemyException).error(ErrorCode.INVALID_DATA_TYPE, self.field_name,
message="field_value must be string").throw()
class __IStartsWith__(__ILike__):
def expr(self):
if type(self.field_value) == str:
self.field_value = self.field_value + '%'
return super(__IStartsWith__, self).expr()
ExceptionBuilder(SqlAlchemyException).error(ErrorCode.INVALID_DATA_TYPE, self.field_name,
message="field_value must be string").throw()
class __EndsWith__(__Like__):
def expr(self):
if type(self.field_value) == str:
self.field_value = '%' + self.field_value
return super(__EndsWith__, self).expr()
ExceptionBuilder(SqlAlchemyException).error(ErrorCode.INVALID_DATA_TYPE, self.field_name,
message="field_value must be string").throw()
class __IEndsWith__(__ILike__):
def expr(self):
if type(self.field_value) == str:
self.field_value = '%' + self.field_value
return super(__IEndsWith__, self).expr()
ExceptionBuilder(SqlAlchemyException).error(ErrorCode.INVALID_DATA_TYPE, self.field_name,
message="field_value must be string").throw()
class __Contains__(__Like__):
def expr(self):
if type(self.field_value) == str:
self.field_value = '%' + self.field_value + '%'
return super(__Contains__, self).expr()
ExceptionBuilder(SqlAlchemyException).error(ErrorCode.INVALID_DATA_TYPE, self.field_name,
message="field_value must be string").throw()
class __IContains__(__ILike__):
def expr(self):
if type(self.field_value) == str:
self.field_value = '%' + self.field_value + '%'
return super(__IContains__, self).expr()
ExceptionBuilder(SqlAlchemyException).error(ErrorCode.INVALID_DATA_TYPE, self.field_name,
message="field_value must be string").throw()
class __Match__(OperatorEvaluator):
def expr(self):
return self.model_field.match(self.field_value)
class __Any__(OperatorEvaluator):
def expr(self):
return self.model_field.any(self.field_value)
class __Has__(OperatorEvaluator):
def expr(self):
return self.model_field.has(self.field_value)
# Maps `operator_name` to corresponding 'operator` class.
__OPERATORS_MAPPING__ = {
'equals': __Equals__,
'eq': __Equals__,
'==': __Equals__,
'notequals': __NotEquals__,
'not_equals': __NotEquals__,
'ne': __NotEquals__,
'!=': __NotEquals__,
'~=': __NotEquals__,
'less_than': __LessThan__,
'lt': __LessThan__,
'<': __LessThan__,
'less_than_equals': __LessThanEq__,
'lte': __LessThanEq__,
'<=': __LessThanEq__,
'greater_than': __GreaterThan__,
'gt': __GreaterThan__,
'>': __GreaterThan__,
'greater_than_equals': __GreaterThanEq__,
'gte': __GreaterThanEq__,
'>=': __GreaterThanEq__,
'like': __Like__,
'ilike': __ILike__,
'startswith': __StartsWith__,
'istartswith': __IStartsWith__,
'endswith': __EndsWith__,
'iendswith': __IEndsWith__,
'contains': __Contains__,
'icontains': __IContains__,
'match': __Match__,
'in': __IN__,
'notin': __NotIn__,
'isnull': __IsNull__,
'isnotnull': __IsNotNull__,
'any': __Any__,
'has': __Has__
} | 35.753695 | 118 | 0.656793 | 750 | 7,258 | 5.786667 | 0.157333 | 0.097465 | 0.109677 | 0.096774 | 0.624424 | 0.614747 | 0.550922 | 0.550922 | 0.473502 | 0.473502 | 0 | 0.000543 | 0.239322 | 7,258 | 203 | 119 | 35.753695 | 0.785546 | 0.033067 | 0 | 0.37037 | 0 | 0 | 0.074286 | 0.003429 | 0 | 0 | 0 | 0 | 0 | 1 | 0.154321 | false | 0 | 0.012346 | 0.080247 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0cdfb201fe62710a14b49c8128fd0ea49481d0c | 1,018 | py | Python | admin.py | ax-rwnd/E-dot | b1b64fcce43c5d6f54dc38959498cdba95e757b1 | [
"BSD-3-Clause"
] | null | null | null | admin.py | ax-rwnd/E-dot | b1b64fcce43c5d6f54dc38959498cdba95e757b1 | [
"BSD-3-Clause"
] | 1 | 2015-12-05T02:04:35.000Z | 2015-12-11T02:47:28.000Z | admin.py | ax-rwnd/E-dot | b1b64fcce43c5d6f54dc38959498cdba95e757b1 | [
"BSD-3-Clause"
] | null | null | null | from flask import abort, g
from flask.ext.login import current_user
from access_levels import access
## Admin Table
# pk(user_id), level
# user_id - fk, int(11)
# level, unsigned int(4)
def test_access (uid, access):
db = getattr(g, 'db', None)
with db as cursor:
query = "select level from tbl_admin where user_id = %s;"
if cursor.execute(query, (uid,)) <= 0:
#That user has no clearance.
return False;
elif cursor.fetchone()[0] > access:
#That user has insufficient clearance.
return False;
else:
#That user has sufficient clearance.
return True
#check if current_user actually has access
def perimeter_check (access_str):
if not test_access(current_user.uid, access[access_str]):
abort(403)
def admin_config (uid, newaccess):
db = getattr(g, 'db', None)
with db as cursor:
query = "insert into tbl_admin (user_id, level) values (\
(select id from tbl_user where id = %s), %s) on\
duplicate key update level=values(level);"
cursor.execute(query, (uid, newaccess))
| 25.45 | 59 | 0.704322 | 157 | 1,018 | 4.458599 | 0.414013 | 0.034286 | 0.047143 | 0.034286 | 0.1 | 0.1 | 0.1 | 0.1 | 0.1 | 0.1 | 0 | 0.009627 | 0.183694 | 1,018 | 39 | 60 | 26.102564 | 0.832732 | 0.211198 | 0 | 0.26087 | 0 | 0 | 0.064313 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.130435 | false | 0 | 0.130435 | 0 | 0.391304 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0ce476bae592fd658b7dfce1f2bf351c00bb04e | 4,563 | py | Python | index.py | hongfs/python-douyin | 2cf63ce71158b03c0f08d873bcad1a5865ca8d03 | [
"MIT"
] | 165 | 2018-06-27T08:21:48.000Z | 2022-03-18T06:27:41.000Z | index.py | zhutieing/python-douyin | 2cf63ce71158b03c0f08d873bcad1a5865ca8d03 | [
"MIT"
] | 3 | 2018-09-09T23:30:39.000Z | 2019-01-11T12:16:59.000Z | index.py | zhutieing/python-douyin | 2cf63ce71158b03c0f08d873bcad1a5865ca8d03 | [
"MIT"
] | 104 | 2018-06-27T08:52:32.000Z | 2022-03-25T17:28:40.000Z | import requests, json, re, os, sys, time
from urllib.parse import urlparse
from contextlib import closing
class DY(object):
def __init__(self):
self.headers = {
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'zh-CN,zh;q=0.9',
'cache-control': 'max-age=0',
'upgrade-insecure-requests': '1',
'user-agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1'
}
self.domain = ['www.douyin.com', 'v.douyin.com', 'www.snssdk.com', 'www.amemv.com', 'www.iesdouyin.com', 'aweme.snssdk.com']
def hello(self):
print('*' * 60)
print('\t\t抖音无水印视频下载')
print('\t作者:hongfs(https://github.com/hongfs)')
print('*' * 60)
self.run()
def run(self):
self.share_url = input('请输入分享链接:')
# self.share_url = 'http://v.douyin.com/7CDeV/'
if not self.share_url:
return self.run()
self.share_url = self.getLocation()
share_url_parse = urlparse(self.share_url)
if not share_url_parse.scheme in ['http', 'https'] or not share_url_parse.netloc in self.domain:
return self.run()
uid = re.findall(r'\/share\/user\/(\d*)', share_url_parse.path)
if uid:
self.uid = uid[0]
else:
vid = re.findall(r'\/share\/video\/(\d*)', share_url_parse.path)
if vid:
self.getUid(self.share_url)
else:
print('链接无法识别,请提交issues')
return self.run()
self.count = 0
self.getUserData(self.uid)
def getLocation(self):
response = requests.get(self.share_url, headers=self.headers, allow_redirects=False)
if 'Location' in response.headers.keys():
return response.headers['Location']
else:
return self.share_url
def getUid(self, url):
response = requests.get(url, headers=self.headers)
if not response.status_code == 200:
return False
uid = re.findall(r'uid?: \"(\d*)"', response.text)
if uid:
self.uid = uid[0]
else:
return False
def getUserData(self, uid, cursor = 0):
url = 'https://www.douyin.com/aweme/v1/aweme/favorite/?user_id=%s&max_cursor=%s&count=35' % (uid, cursor)
response = requests.get(url, headers=self.headers)
if not response.status_code == 200:
return print('请求失败')
data = response.json()
if 'status_code' not in data.keys():
return print('获取数据失败')
if len(data['aweme_list']) == 0:
return print('\n完成')
self.nickname = data['aweme_list'][0]['author']['nickname']
if cursor == 0 and self.nickname not in os.listdir():
os.mkdir(self.nickname)
for item in data['aweme_list']:
if not 'video' in item.keys():
continue
if not self.nickname == item['author']['nickname']:
return print('\n完成')
video_id = item['video']['play_addr']['uri']
video_name = item['desc'] if item['desc'] else video_id
for c in r'\/:*?"<>|/':
video_name = video_name.replace(c, '')
path = os.path.join(self.nickname, video_name) + '.mp4'
self.count = self.count + 1
print('第' + str(self.count) + '个:')
if os.path.isfile(path):
print(video_name + ' -- 已存在')
continue
print(video_name + ' -- 下载中')
self.download(video_id, path)
self.getUserData(self.uid, data['max_cursor'])
def download(self, vid, path):
time.sleep(1)
url = 'https://aweme.snssdk.com/aweme/v1/play/?video_id=%s&line=0' % str(vid)
with closing(requests.get(url, headers=self.headers, stream=True)) as response:
chunk_size = 1024
content_size = int(response.headers['content-length'])
if response.status_code == 200:
print(' [文件大小]:%0.2f MB\n' % (content_size / chunk_size / 1024))
with open(r'' + path, 'wb') as file:
for data in response.iter_content(chunk_size = chunk_size):
file.write(data)
file.flush()
if __name__ == '__main__':
dy = DY()
dy.hello()
| 34.052239 | 163 | 0.550953 | 577 | 4,563 | 4.253033 | 0.313692 | 0.04238 | 0.03912 | 0.03423 | 0.103504 | 0.103504 | 0.07498 | 0.05868 | 0.05868 | 0.05868 | 0 | 0.02193 | 0.30046 | 4,563 | 133 | 164 | 34.308271 | 0.746867 | 0.009862 | 0 | 0.227723 | 0 | 0.039604 | 0.205492 | 0.029008 | 0 | 0 | 0 | 0 | 0 | 1 | 0.069307 | false | 0 | 0.029703 | 0 | 0.217822 | 0.128713 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0cf8fc7ef6e693e4d36f40c69aaf2886cad2131 | 808 | py | Python | base/config.py | joeportela/tinyAPI | f2469c38a605b00519acd0b79af17d0041f5ae7b | [
"MIT"
] | 6 | 2016-11-18T22:32:44.000Z | 2021-04-01T17:02:13.000Z | base/config.py | joeportela/tinyAPI | f2469c38a605b00519acd0b79af17d0041f5ae7b | [
"MIT"
] | 1 | 2018-12-20T23:07:52.000Z | 2018-12-20T23:07:52.000Z | base/config.py | joeportela/tinyAPI | f2469c38a605b00519acd0b79af17d0041f5ae7b | [
"MIT"
] | 10 | 2018-02-23T00:08:21.000Z | 2020-10-01T03:06:12.000Z | # ----- Info ------------------------------------------------------------------
__author__ = 'Michael Montero <mcmontero@gmail.com>'
# ----- Imports ---------------------------------------------------------------
from .exception import ConfigurationException
import tinyAPI_config
__all__ = ['ConfigManager']
# ----- Public Classes --------------------------------------------------------
class ConfigManager(object):
'''Handles retrieval and validation of configuration settings.'''
@staticmethod
def value(key):
'''Retrieves the configuration value named by key.'''
if key in tinyAPI_config.values:
return tinyAPI_config.values[key]
else:
raise ConfigurationException(
'"' + key + '" is not configured in tinyAPI_config')
| 31.076923 | 79 | 0.502475 | 61 | 808 | 6.459016 | 0.721311 | 0.13198 | 0.076142 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.184406 | 808 | 25 | 80 | 32.32 | 0.597876 | 0.423267 | 0 | 0 | 0 | 0 | 0.19426 | 0.046358 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.166667 | 0 | 0.416667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0d1a950996390512955a5f285ae00148e3d2ae1 | 5,735 | py | Python | snakewm/wm.py | Krobix/dioware | f9577eba58f26af6c609815b6d0ef706ffab044b | [
"MIT"
] | null | null | null | snakewm/wm.py | Krobix/dioware | f9577eba58f26af6c609815b6d0ef706ffab044b | [
"MIT"
] | null | null | null | snakewm/wm.py | Krobix/dioware | f9577eba58f26af6c609815b6d0ef706ffab044b | [
"MIT"
] | null | null | null | """
Snake Window Manager
"""
TESTMODE = __name__ == '__main__'
import os
import sys
import importlib
import pygame, pygame_gui
if TESTMODE:
from appmenu.appmenupanel import AppMenuPanel
else:
from snakewm.appmenu.appmenupanel import AppMenuPanel
class SnakeWM:
SCREEN = None
DIMS = None
BG = None
MANAGER = None
BG_COLOR = (0, 128, 128)
# currently focused window
FOCUS = None
# dict that will contain the apps directory structure
APPS = {}
# reference to the root app menu object
APPMENU = None
def __init__(self):
# populate the apps tree
apps_path = os.path.dirname(os.path.abspath(__file__)) + '/apps'
SnakeWM.iter_dir(self.APPS, apps_path)
pygame.init()
# initialize pygame to framebuffer
os.putenv('SDL_FBDEV', '/dev/fb0')
pygame.display.init()
# get screen dimensions
self.DIMS = (
pygame.display.Info().current_w,
pygame.display.Info().current_h
)
# init screen
self.SCREEN = pygame.display.set_mode(
self.DIMS,
pygame.FULLSCREEN
)
# init background
self.BG = pygame.Surface((self.DIMS))
self.BG.fill(self.BG_COLOR)
# init UI manager
self.MANAGER = pygame_gui.UIManager(self.DIMS)
pygame.mouse.set_visible(True)
pygame.display.update()
def iter_dir(tree, path):
"""
Static function that recursively populates dict 'tree' with the
app directory structure starting at 'path'.
"""
for f in os.listdir(path):
if os.path.isfile(path + '/' + f + '/__init__.py'):
tree[f] = None
elif os.path.isdir(path + '/' + f):
tree[f] = {}
SnakeWM.iter_dir(tree[f], path + '/' + f)
def loadapp(self, app, params=None):
"""
Load and run a Python module as an app (ie "apps.test.HelloWorld").
Apps are basically just Python packages. The loaded app package must
contain an __init__.py with a load() function that accepts a UIManager
parameter and a params list parameter.
The load() function should create an instance of the app to load and
add the app UI to the passed UIManager object. See existing apps for
examples.
"""
if not TESTMODE:
app = 'snakewm.' + app
_app = importlib.import_module(app)
_app.load(self.MANAGER, params)
def appmenu_load(self, app):
"""
This function is passed to AppMenuPanel objects to be called when
an app is selected to be opened.
The root app menu is destroyed, and the app is loaded.
"""
if self.APPMENU is not None:
self.APPMENU.destroy()
self.APPMENU = None
self.loadapp(app)
def set_bg_color(self, color):
"""
Set the desktop background to 'color', where color is an RGB tuple.
"""
self.BG = pygame.Surface((self.DIMS))
self.BG_COLOR = color
self.BG.fill(self.BG_COLOR)
def set_bg_image(self, file):
"""
Sets the desktop background to an image.
"""
filename, file_extension = os.path.splitext(file)
if file_extension == ".jpg" or file_extension == ".png":
self.BG = pygame.transform.scale(pygame.image.load(file), self.DIMS)
def run(self):
clock = pygame.time.Clock()
running = True
while running:
delta = clock.tick(60) / 1000.0
pressed = pygame.key.get_pressed()
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LSUPER:
if self.APPMENU is None:
# open app menu
self.APPMENU = AppMenuPanel(
self.MANAGER,
(0, 0),
'apps',
self.APPS,
self.appmenu_load
)
else:
# close app menu
self.APPMENU.destroy()
self.APPMENU = None
if pressed[pygame.K_LALT]:
if event.key == pygame.K_ESCAPE:
running = False
return pygame.quit()
elif event.type == pygame.USEREVENT:
if event.user_type == 'window_selected':
# focus selected window
if self.FOCUS is not None:
self.FOCUS.unfocus()
self.FOCUS = event.ui_element
self.FOCUS.focus()
elif event.user_type == pygame_gui.UI_COLOUR_PICKER_COLOUR_PICKED:
if event.ui_object_id == '#desktop_colour_picker':
# set desktop background color - no alpha channel
self.set_bg_color(event.colour[:-1])
elif event.user_type == pygame_gui.UI_FILE_DIALOG_PATH_PICKED:
if event.ui_object_id == '#background_picker':
self.set_bg_image(event.text)
self.MANAGER.process_events(event)
self.MANAGER.update(delta)
self.SCREEN.blit(self.BG, (0, 0))
self.MANAGER.draw_ui(self.SCREEN)
pygame.display.update()
if TESTMODE:
wm = SnakeWM()
wm.run()
| 31.510989 | 86 | 0.526417 | 636 | 5,735 | 4.61478 | 0.303459 | 0.018399 | 0.01431 | 0.025213 | 0.103578 | 0.091993 | 0.041567 | 0.022487 | 0 | 0 | 0 | 0.005682 | 0.386225 | 5,735 | 181 | 87 | 31.685083 | 0.828125 | 0.19599 | 0 | 0.132075 | 0 | 0 | 0.027248 | 0.004995 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066038 | false | 0 | 0.066038 | 0 | 0.226415 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0d20e65dbb954b1d329d0b6c224a54a1e459a35 | 2,877 | py | Python | convert_dict_to_json.py | blackCheetah/tvshortcut.herokuapp.com | 923b130802351a3c716cc6ef5121cd6cb1b6175d | [
"MIT"
] | 6 | 2017-08-06T08:46:56.000Z | 2020-04-07T20:09:51.000Z | convert_dict_to_json.py | blackCheetah/tvshortcut.herokuapp.com | 923b130802351a3c716cc6ef5121cd6cb1b6175d | [
"MIT"
] | 3 | 2021-03-19T22:21:02.000Z | 2021-12-13T19:44:44.000Z | convert_dict_to_json.py | blackCheetah/tvshortcut.herokuapp.com | 923b130802351a3c716cc6ef5121cd6cb1b6175d | [
"MIT"
] | null | null | null | """
# Structure
{
"tvShows": [
{
"name": "Arrow",
"shorctut": "arrow",
"new": ""
},
{
"name": "something",
"shorctut": "something",
"new": ""
}
]
}
"""
import json
import os
tv_shows = {
"Arrow":
"arrow",
"Agents of S.H.I.E.L.D.":
"agents-of-shield",
"Better Call Saul":
"better-call-saul",
"Daredevil":
"daredevil",
"Fear the Walking Dead":
"fear-the-walking-dead",
"Game of Thrones":
"game-of-thrones",
"Gotham":
"gotham",
"Iron Fist":
"iron-fist",
"Jessica Jones":
"jessica-jones",
"Legends of Tomorrow":
"legends-of-tomorrow",
"Luke Cage":
"luke-cage",
"Marco Polo":
"marco-polo",
"Mr. Robot":
"mr-robot",
"Supergirl":
"supergirl",
"Vikings":
"vikings",
"The Flash":
"the-flash",
"The Walking Dead":
"walking-dead",
"Prison break":
"prison-break",
"American Gods":
"american-gods",
"Narcos":
"narcos",
"House of Cards":
"house-of-cards",
"Peaky Blinders":
"peaky-blinders",
"West World":
"westworld",
"Homeland":
"homeland",
"The 100":
"the-hundred",
"SouthPark":
"south-park",
"Defenders":
"defenders"
}
def create_a_json_file(location, file_name, json_data):
try:
with open(os.path.join(location, file_name), "w", encoding='utf-8') as output_file:
output_file.write(json_data)
#json.dump(json_data, output_file, indent=4, sort_keys=True, ensure_ascii=False)
except FileNotFoundError as fNot:
#except IOError as e:
print("Jezuz christ!!! File not found!! \n{0}".format(fNot))
new_tv_shows = {'tvShows' : []}
default = {"name": "", "shortcut": "", "new" : ""}
for iterator in range(0, len(tv_shows.items())):
new_tv_shows.get('tvShows').append(default)
json_string = json.dumps(new_tv_shows)
jdict = json.loads(json_string)
iterator = 0
for key, value in tv_shows.items():
jdict["tvShows"][iterator]["name"] = key
jdict["tvShows"][iterator]["shortcut"] = value
jdict["tvShows"][iterator]["new"] = ""
iterator += 1
new_json_string = json.dumps(jdict, indent=4, sort_keys=True, ensure_ascii=False)
#rint(new_json_string)
create_a_json_file("data", "tvshows.json", new_json_string)
def load_json(location, file_name):
with open(os.path.join(location + "/", file_name), "r", encoding='utf-8', errors='ignore') as output_file:
json_load_file = json.load(output_file)
#print(json_load_file)
return json_load_file
loaded_json = load_json("data", "tvshows.json")
for i in range(0, len(loaded_json['tvShows'])):
print(loaded_json['tvShows'][i]['name'] + ' ' + loaded_json['tvShows'][i]['shortcut'])
| 18.927632 | 110 | 0.576295 | 345 | 2,877 | 4.657971 | 0.373913 | 0.026136 | 0.039826 | 0.022402 | 0.085874 | 0.085874 | 0.085874 | 0.085874 | 0 | 0 | 0 | 0.005558 | 0.249566 | 2,877 | 151 | 111 | 19.05298 | 0.738768 | 0.140076 | 0 | 0 | 0 | 0 | 0.322096 | 0.00853 | 0 | 0 | 0 | 0 | 0 | 1 | 0.023529 | false | 0 | 0.023529 | 0 | 0.058824 | 0.023529 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0d5ec44af0b7fd99da7def80b7579ceb6efab44 | 943 | py | Python | setup.py | expobrain/json-schema-codegen | e22b386333c6230e5d6f5984fd947fdd7b947e82 | [
"MIT"
] | 21 | 2018-06-15T16:08:57.000Z | 2022-02-11T16:16:11.000Z | setup.py | expobrain/json-schema-codegen | e22b386333c6230e5d6f5984fd947fdd7b947e82 | [
"MIT"
] | 14 | 2018-08-09T18:02:19.000Z | 2022-01-24T18:04:17.000Z | setup.py | expobrain/json-schema-codegen | e22b386333c6230e5d6f5984fd947fdd7b947e82 | [
"MIT"
] | 4 | 2018-11-30T18:19:10.000Z | 2021-11-18T04:04:36.000Z | import setuptools
from pkg_resources import parse_version
SETUPTOOLS_MIN_VER = "40.1.0"
if parse_version(setuptools.__version__) < parse_version(SETUPTOOLS_MIN_VER):
raise RuntimeError("setuptools minimum required version: %s" % SETUPTOOLS_MIN_VER)
from setuptools import setup, find_packages
setup(
name="json_codegen",
version="0.4.6",
keywords="python javascript json-schema codegen",
author="Daniele Esposti",
author_email="daniele.esposti@gmail.com",
url="https://github.com/expobrain/json-schema-codegen",
packages=find_packages(exclude=["tests", "tests.*"]),
entry_points={"console_scripts": ["json_codegen = json_codegen.cli:main"]},
python_requires=">=3",
license="MIT",
install_requires=["astor>=0.7.1", "setuptools>={}".format(SETUPTOOLS_MIN_VER)],
scripts=["bin/ast_to_js"],
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
)
| 34.925926 | 86 | 0.729586 | 120 | 943 | 5.466667 | 0.583333 | 0.079268 | 0.097561 | 0.07622 | 0.085366 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013285 | 0.121951 | 943 | 26 | 87 | 36.269231 | 0.778986 | 0 | 0 | 0 | 0 | 0 | 0.336161 | 0.04878 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.136364 | 0 | 0.136364 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0d7fc7ede4a8bc9347dce8d8fc42b285d73ea63 | 13,142 | py | Python | cogs/musik/listener.py | noaione/naoTimes | 39f3f1ae434baf4ff9f3ed4a19cbfd69f76f881d | [
"MIT"
] | 5 | 2019-06-14T01:29:46.000Z | 2021-02-08T08:21:24.000Z | cogs/musik/listener.py | naoTimesdev/naoTimes | 39f3f1ae434baf4ff9f3ed4a19cbfd69f76f881d | [
"MIT"
] | 21 | 2021-03-26T08:31:45.000Z | 2022-03-26T10:15:25.000Z | cogs/musik/listener.py | noaione/naoTimes | 39f3f1ae434baf4ff9f3ed4a19cbfd69f76f881d | [
"MIT"
] | 4 | 2019-06-26T14:18:09.000Z | 2021-02-08T08:21:39.000Z | import asyncio
import logging
import random
import traceback
from typing import TYPE_CHECKING, Dict, List, Optional, Union
import arrow
import discord
import wavelink
from discord.backoff import ExponentialBackoff
from discord.ext import commands
try:
from sentry_sdk import push_scope
except ImportError:
pass
from naotimes.bot import naoTimesBot
from naotimes.music import TrackEntry, TrackRepeat
from naotimes.utils import quote
if TYPE_CHECKING:
from cogs.botbrain.error import BotBrainErrorHandler
VocalChannel = Union[discord.VoiceChannel, discord.StageChannel]
class MusikPlayerListener(commands.Cog):
def __init__(self, bot: naoTimesBot):
self.bot = bot
self.logger = logging.getLogger("MusicP.Listener")
self.error_backoff: Dict[str, ExponentialBackoff] = {}
def delay_next(self, guild_id: str):
guild_id = str(guild_id)
if guild_id not in self.error_backoff:
# Dont delay first try
self.error_backoff[guild_id] = ExponentialBackoff()
return None
delay = self.error_backoff[guild_id].delay()
return delay
def clean_delay(self, guild_id: int):
guild_id = str(guild_id)
if guild_id in self.error_backoff:
try:
del self.error_backoff[guild_id]
except KeyError:
pass
@commands.Cog.listener("on_wavelink_node_ready")
async def on_node_ready(self, node: wavelink.Node):
self.logger.info(f"Node: <{node.identifier}> [{node.region.name}] is ready!")
@commands.Cog.listener("on_wavelink_track_end")
async def on_track_end(self, player: wavelink.Player, track: wavelink.Track, reason: str):
ctime = self.bot.now().int_timestamp
current = self.bot.ntplayer.get(player)
current_track = player.source or track
if current.current:
current_track = current.current.track
node = player.node
track_title = None
if current_track:
track_title = current_track.title
self.logger.info(
f"Player: <{player.guild}> [{node.identifier}] track [{track_title}] has ended with: {reason}"
)
# Dispatch task
self.bot.loop.create_task(
self.bot.ntplayer.play_next(player),
name=f"naotimes-track-end-{player.guild.id}_{ctime}_{reason}",
)
@commands.Cog.listener("on_wavelink_track_exception")
async def on_track_exception(self, player: wavelink.Player, track: wavelink.Track, error: Exception):
node = player.node
real_track = player.source or track
self.logger.warning(
f"Player: <{player.guild}> [{node.identifier}] track [{real_track.title}] has exception: {error}"
)
vc_player = self.bot.ntplayer.get(player)
channel = None
determine_announce = True
# Determine if we should announce error
# If the current position is around 5 seconds before the track end, dont announce it.
if vc_player.current:
channel = vc_player.current.channel
cpos = player.position
duration = vc_player.current.track.duration
grace_period = duration - 5
if cpos >= grace_period:
determine_announce = False
await self._push_error_to_sentry(player, vc_player.current, error, "track-exc")
if channel and determine_announce:
try:
await channel.send(
f"Terjadi kesalahan ketika menyetel lagu `{track.title}`, mohon kontak Owner Bot!"
)
except (discord.Forbidden, discord.HTTPException):
pass
@commands.Cog.listener("on_wavelink_track_start")
async def on_track_start(self, player: wavelink.Player, track: wavelink.Track):
instance = self.bot.ntplayer.get(player)
self.clean_delay(player.guild.id)
# Temporary update the position.
last_update = arrow.utcnow().datetime
player.last_position = 1
player.last_update = last_update
current = instance.current
track = current.track
track_title = track.title
self.logger.info(
f"Player: <{player.guild}> [{player.node.identifier}]: <{track_title}> has started playing!"
)
embed = self.bot.ntplayer.generate_track_embed(current)
try:
await current.channel.send(embed=embed)
except (discord.Forbidden, discord.HTTPException):
pass
async def _dispatch_playback_next_later(
self, player: wavelink.Player, delay: Optional[float], ctime: int
):
self.logger.info(f"Player: Delaying playback of next track by {delay} seconds")
if delay:
await asyncio.sleep(delay)
self.bot.loop.create_task(
self.bot.ntplayer.play_next(player),
name=f"naotimes-playback-retries-{player.guild.id}_{ctime}_{delay}",
)
@commands.Cog.listener("on_naotimes_playback_failed")
async def on_playback_failed(self, player: wavelink.Player, entry: TrackEntry, exception: Exception):
ctime = self.bot.now().int_timestamp
instance = self.bot.ntplayer.get(player)
self.logger.warning(
f"Player: <{player.guild}> failed to play track: {entry.track}", exc_info=exception
)
delay_next = self.delay_next(player.guild.id)
if instance.repeat != TrackRepeat.single:
delay_next = None
# Dispatch play_next agane
self.bot.loop.create_task(
self._dispatch_playback_next_later(player, delay_next, ctime),
name=f"naotimes-playback-retries-delayed-{player.guild.id}_{ctime}_{str(exception)}",
)
channel = entry.channel
if channel:
error_msg_delay = f"Lagu `{entry.track.title}` gagal diputar, bot akan melewati lagu tersebut!"
if delay_next:
error_msg_delay += (
f"\nBot akan mencoba menyetel lagu selanjutnya dalam {round(delay_next, 2)} detik"
)
try:
await channel.send(error_msg_delay)
except (discord.Forbidden, discord.HTTPException, Exception):
pass
_do_not_log = (wavelink.errors.LoadTrackError, wavelink.errors.BuildTrackError)
if isinstance(exception, _do_not_log):
return
# Push to log channel
embed = discord.Embed(
title="🎵 Music Error Log",
colour=0xFF253E,
description="Terjadi kesalahan ketika ingin memutar musik!",
timestamp=self.bot.now().datetime,
)
track = entry.track
_source = getattr(track, "source", "Unknown")
track_info = f"**Judul**: `{track.title}`\n**Artis**: `{track.author}`"
track_info += f"\n**Link**: [Link]({track.uri})\n**Source**: `{_source}`"
embed.add_field(name="Lagu", value=track_info, inline=False)
peladen_info = f"{player.guild.name} ({player.guild.id})"
author_info = f"{str(entry.requester)} ({entry.requester.id})"
embed.add_field(
name="Pemutar", value=f"**Peladen**: {peladen_info}\n**Pemutar**: {author_info}", inline=False
)
error_info = [
f"Lagu: {track.author} - {track.title}",
f"URL: {track.uri} ({_source})",
f"Peladen: {peladen_info}",
f"Pemutar: {author_info}",
]
tb = traceback.format_exception(type(exception), exception, exception.__traceback__)
tb_fmt = "".join(tb).replace("`", "")
tb_fmt_quote = quote(tb_fmt, True, "py")
full_pesan = "**Terjadi kesalahan pada pemutar musik**\n\n"
full_pesan += quote("\n".join(error_info), True, "py") + "\n\n"
full_pesan += tb_fmt_quote
embed.add_field(name="Traceback", value=tb_fmt_quote, inline=False)
error_cog: BotBrainErrorHandler = self.bot.get_cog("BotBrainErrorHandler")
await error_cog._push_bot_log_or_cdn(embed, full_pesan)
await self._push_error_to_sentry(player, entry, exception)
async def _push_error_to_sentry(
self, player: wavelink.Player, track: TrackEntry, e: Exception, handler: str = "playback"
):
if self.bot._use_sentry:
with push_scope() as scope:
scope.user = {
"id": track.requester.id,
"username": str(track.requester),
}
scope.set_tag("cog", "music-backend")
scope.set_tag("command", f"music-{handler}-handler")
track_src = getattr(track.track, "source", "Unknown")
scope.set_context(
"track",
{
"title": track.track.title,
"artist": track.track.author,
"source": track_src,
"link": track.track.uri,
},
)
scope.set_tag("command_type", "music")
scope.set_tag("guild_id", str(player.guild.id))
scope.set_tag("channel_id", str(player.channel.id))
self.logger.error(
f"Player: <{player.guild}> failed to play track: <{track.track}>", exc_info=e
)
def _select_members(
self, members: List[discord.Member], id_check: int = None
) -> Optional[discord.Member]:
# Select one member
# Use priority, so if the member an admin, pick them
# then check if they have specific permissions
# if none of them match, get random person.
administrator = []
moderators = []
normal_members = []
for member in members:
if member.bot:
continue
if member.id == id_check:
continue
if member.guild_permissions.administrator:
administrator.append(member)
elif member.guild_permissions.manage_guild:
moderators.append(member)
else:
normal_members.append(member)
if administrator:
return random.choice(administrator)
if moderators:
return random.choice(moderators)
if not normal_members:
# Mark no delegate, if someone joined, mark them as
# the new delegation later.
return None
return random.choice(normal_members)
async def _delegate_on_bot_new_channel(self, guild: discord.Guild, after_channel: Optional[VocalChannel]):
if after_channel is None:
self.logger.info(f"Player: Bot got kicked from <{guild.id}> VC, deleting queue...")
self.bot.ntplayer.delete(guild)
return
vc_members = after_channel.members
delegated = self._select_members(vc_members)
if delegated is None:
self.logger.info(f"Player<{guild.id}>: No delegate found, no one to delegate to.")
self.bot.ntplayer.change_dj(guild, None)
return
self.bot.ntplayer.change_dj(guild, delegated)
@commands.Cog.listener("on_voice_state_update")
async def _auto_voice_delegation(
self,
member: discord.Member,
before: discord.VoiceState,
after: discord.VoiceState,
):
"""Automatically delegate the DJ of the current music player"""
guild = member.guild
has_instance = self.bot.ntplayer.has(guild)
if not has_instance:
return
if member.id == self.bot.user.id:
return await self._delegate_on_bot_new_channel(guild, after.channel)
if member.bot:
return
vc_check = guild.voice_client
if not vc_check:
self.bot.ntplayer.delete(guild)
return
instance = self.bot.ntplayer.get(guild)
if instance.host is None:
self.logger.info(f"Player: <{guild.id}> no host set, using <{member}> as host")
self.bot.ntplayer.change_dj(guild, member)
return
if instance.host.id != member.id:
return
if before.channel is not None and before.channel.id == instance.channel.id:
if after.channel is None or after.channel.id != instance.channel.id:
channel = instance.channel
self.logger.info(f"Player: <{guild.id}> host left VC, trying to delegate...")
new_host = self._select_members(channel.members, member.id)
if new_host is None:
# No one to delegate to, mark as none while we wait for a new one.
self.logger.info(f"Player: <{guild.id}> no delegate found, marking as None")
self.bot.ntplayer.change_dj(guild, None)
return
self.logger.info(f"Player: <{guild.id}> delegate found, <{new_host}> is the new host.")
self.bot.ntplayer.change_dj(guild, new_host)
def setup(bot: naoTimesBot):
bot.add_cog(MusikPlayerListener(bot))
| 39.584337 | 110 | 0.607442 | 1,530 | 13,142 | 5.060784 | 0.188235 | 0.024409 | 0.030996 | 0.019372 | 0.249387 | 0.184554 | 0.133669 | 0.070257 | 0.054501 | 0.032804 | 0 | 0.000857 | 0.290062 | 13,142 | 331 | 111 | 39.703927 | 0.828939 | 0.040253 | 0 | 0.185455 | 0 | 0.014545 | 0.169126 | 0.042361 | 0 | 0 | 0.000638 | 0 | 0 | 1 | 0.018182 | false | 0.018182 | 0.058182 | 0 | 0.138182 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0d85ff21b9583ecc358a110a43fecddd61cee4d | 20,658 | py | Python | apis_core/apis_entities/forms.py | acdh-oeaw/apis-core | f7ece05eec46c820321fd28d3e947653dcb98ae7 | [
"MIT"
] | 11 | 2018-07-11T18:11:40.000Z | 2022-03-25T11:07:12.000Z | apis_core/apis_entities/forms.py | acdh-oeaw/apis-core | f7ece05eec46c820321fd28d3e947653dcb98ae7 | [
"MIT"
] | 309 | 2018-06-11T08:38:50.000Z | 2022-03-31T13:45:22.000Z | apis_core/apis_entities/forms.py | acdh-oeaw/apis-core | f7ece05eec46c820321fd28d3e947653dcb98ae7 | [
"MIT"
] | 5 | 2017-08-21T10:37:07.000Z | 2021-09-27T19:08:47.000Z | # -*- coding: utf-8 -*-
from crispy_forms.bootstrap import Accordion, AccordionGroup
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Fieldset
from crispy_forms.layout import Submit
from dal import autocomplete
from django import forms
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.core.validators import URLValidator
from django.db.models.fields import BLANK_CHOICE_DASH
from django.forms import ModelMultipleChoiceField, ModelChoiceField
from django.urls import reverse
from apis_core.apis_metainfo.models import Text, Uri, Collection
from apis_core.apis_vocabularies.models import TextType
from apis_core.helper_functions import DateParser
from apis_core.helper_functions.RDFParser import RDFParser
from .fields import ListSelect2, Select2Multiple
from .models import AbstractEntity
if "apis_highlighter" in settings.INSTALLED_APPS:
from apis_highlighter.models import AnnotationProject
class SearchForm(forms.Form):
search = forms.CharField(label="Search")
@property
def helper(self):
helper = FormHelper()
helper.form_id = "searchForm"
helper.form_tag = False
helper.add_input(Submit("fieldn", "search"))
helper.form_method = "GET"
return helper
def get_entities_form(entity):
# TODO __sresch__ : consider moving this class outside of the function call to avoid redundant class definitions
class GenericEntitiesForm(forms.ModelForm):
class Meta:
model = AbstractEntity.get_entity_class_of_name(entity)
exclude = [
"start_date",
"start_start_date",
"start_end_date",
"start_date_is_exact",
"end_date",
"end_start_date",
"end_end_date",
"end_date_is_exact",
"text",
"source",
"published",
]
exclude.extend(model.get_related_entity_field_names())
exclude.extend(model.get_related_relationtype_field_names())
def __init__(self, *args, **kwargs):
super(GenericEntitiesForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_class = entity.title() + "Form"
self.helper.form_tag = False
self.helper.help_text_inline = True
acc_grp1 = Fieldset("Metadata {}".format(entity.title()))
acc_grp2 = AccordionGroup("MetaInfo", "references", "notes", "review")
attrs = {
"data-placeholder": "Type to get suggestions",
"data-minimum-input-length": getattr(settings, "APIS_MIN_CHAR", 3),
"data-html": True,
}
# list to catch all fields that will not be inserted into accordion group acc_grp2
fields_list_unsorted = []
for f in self.fields.keys():
if isinstance(
self.fields[f], (ModelMultipleChoiceField, ModelChoiceField)
):
v_name_p = str(self.fields[f].queryset.model.__name__)
if isinstance(self.fields[f], ModelMultipleChoiceField):
widget1 = Select2Multiple
else:
widget1 = ListSelect2
if (
ContentType.objects.get(
app_label__in=[
"apis_entities",
"apis_metainfo",
"apis_relations",
"apis_vocabularies",
"apis_labels",
],
model=v_name_p.lower(),
).app_label.lower()
== "apis_vocabularies"
):
self.fields[f].widget = widget1(
url=reverse(
"apis:apis_vocabularies:generic_vocabularies_autocomplete",
kwargs={"vocab": v_name_p.lower(), "direct": "normal"},
),
attrs=attrs,
)
if self.instance:
res = []
if isinstance(self.fields[f], ModelMultipleChoiceField):
try:
for x in getattr(self.instance, f).all():
res.append((x.pk, x.label))
except ValueError:
pass
self.fields[f].initial = res
self.fields[f].choices = res
else:
try:
res = getattr(self.instance, f)
if res is not None:
self.fields[f].initial = (res.pk, res.label)
self.fields[f].choices = [
(res.pk, res.label),
]
except ValueError:
res = ""
if f not in acc_grp2:
# append to unsorted list, so that it can be sorted and afterwards attached to accordion group acc_grp1
fields_list_unsorted.append(f)
def sort_fields_list(list_unsorted, entity_label):
"""
Sorts a list of model fields according to a defined order.
:param list_unsorted: list
The unsorted list of fields.
:param entity_label: str
The string representation of entity type, necessary to find the entity-specific ordering (if it is defined)
:return: list
The sorted list if entity-specific ordering was defined, the same unordered list if not.
"""
entity_settings = getattr(settings, 'APIS_ENTITIES', None)
if entity_settings is None:
return list_unsorted
sort_preferences = entity_settings[entity_label].get('form_order', None)
sort_preferences_used = []
if sort_preferences is None:
return list_unsorted
else:
# list of tuples to be sorted later
field_rank_pair_list = []
for field in list_unsorted:
if field in sort_preferences:
# if this succeeds, then the field has been given a priorites ordering above
ranking_by_index = sort_preferences.index(field)
sort_preferences_used.append(field)
field_rank_pair = (field, ranking_by_index)
else:
# if no ordering for the field was found, then give it 'Inf'
# so that it will be attached at the end.
field_rank_pair = (field, float('Inf'))
field_rank_pair_list.append(field_rank_pair)
# Make a check if all items of sort_preferences were used. If not, this indicates an out of sync setting
# if len(sort_preferences) > 0:
if len(sort_preferences_used) != len(sort_preferences):
differences = []
for p in sort_preferences_used:
if p not in sort_preferences:
differences.append(p)
for p in sort_preferences:
if p not in sort_preferences_used:
differences.append(p)
raise Exception(
"An item of the entity setting 'form_order' list was not used. \n"
"This propably indicates that the 'form_order' settings is out of sync with the effective django models.\n"
f"The relevant entity is: {entity_label}\n"
f"And the differences between used list and settings list are: {differences}"
)
# sort the list according to the second element in each tuple
# and then take the first elements from it and return as list
return [ t[0] for t in sorted(field_rank_pair_list, key=lambda x: x[1]) ]
# sort field list, iterate over it and append each element to the accordion group
for f in sort_fields_list(fields_list_unsorted, entity):
acc_grp1.append(f)
self.helper.layout = Layout(Accordion(acc_grp1, acc_grp2))
self.fields["status"].required = False
self.fields["collection"].required = False
self.fields["start_date_written"].required = False
self.fields["end_date_written"].required = False
instance = getattr(self, "instance", None)
if instance != None:
if instance.start_date_written:
self.fields[
"start_date_written"
].help_text = DateParser.get_date_help_text_from_dates(
single_date=instance.start_date,
single_start_date=instance.start_start_date,
single_end_date=instance.start_end_date,
single_date_written=instance.start_date_written,
)
else:
self.fields[
"start_date_written"
].help_text = DateParser.get_date_help_text_default()
if instance.end_date_written:
self.fields[
"end_date_written"
].help_text = DateParser.get_date_help_text_from_dates(
single_date=instance.end_date,
single_start_date=instance.end_start_date,
single_end_date=instance.end_end_date,
single_date_written=instance.end_date_written,
)
else:
self.fields[
"end_date_written"
].help_text = DateParser.get_date_help_text_default()
def save(self, *args, **kwargs):
obj = super(GenericEntitiesForm, self).save(*args, **kwargs)
if obj.collection.all().count() == 0:
col_name = getattr(
settings, "APIS_DEFAULT_COLLECTION", "manually created entity"
)
col, created = Collection.objects.get_or_create(name=col_name)
obj.collection.add(col)
return obj
return GenericEntitiesForm
class GenericEntitiesStanbolForm(forms.Form):
def save(self, *args, **kwargs):
cd = self.cleaned_data
entity = RDFParser(cd["entity"], self.entity.title()).get_or_create()
return entity
def __init__(self, entity, *args, **kwargs):
attrs = {
"data-placeholder": "Type to get suggestions",
"data-minimum-input-length": getattr(settings, "APIS_MIN_CHAR", 3),
"data-html": True,
"style": "width: auto",
}
ent_merge_pk = kwargs.pop("ent_merge_pk", False)
super(GenericEntitiesStanbolForm, self).__init__(*args, **kwargs)
self.entity = entity
self.helper = FormHelper()
form_kwargs = {"entity": entity}
url = reverse(
"apis:apis_entities:generic_entities_autocomplete",
args=[entity.title(), "remove"],
)
label = "Create {} from reference resources".format(entity.title())
button_label = "Create"
if ent_merge_pk:
form_kwargs["ent_merge_pk"] = ent_merge_pk
url = reverse(
"apis:apis_entities:generic_entities_autocomplete",
args=[entity.title(), ent_merge_pk],
)
label = "Search for {0} in reference resources or db".format(entity.title())
button_label = "Merge"
self.helper.form_action = reverse(
"apis:apis_entities:generic_entities_stanbol_create", kwargs=form_kwargs
)
self.helper.add_input(Submit("submit", button_label))
self.fields["entity"] = autocomplete.Select2ListCreateChoiceField(
label=label,
widget=ListSelect2(url=url, attrs=attrs),
validators=[URLValidator],
)
class FullTextForm(forms.Form):
def save(self, entity):
cd = self.cleaned_data
text = None
for f in cd.keys():
text_type = TextType.objects.get(pk=f.split("_")[1])
text = Text.objects.filter(tempentityclass=entity, kind=text_type)
if text.count() == 1:
text = text[0]
text.text = cd[f]
text.save()
elif text.count() == 0:
text = Text(text=cd[f], kind=text_type)
text.save()
entity.text.add(text)
return text
def __init__(self, *args, **kwargs):
if "entity" in kwargs.keys():
entity = kwargs.pop("entity", None)
else:
entity = None
if "instance" in kwargs.keys():
instance = kwargs.pop("instance", None)
else:
instance = None
super(FullTextForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_class = "FullTextForm"
self.helper.form_tag = False
self.helper.help_text_inline = True
collections = []
if instance:
for i in instance.collection.all():
collections.append(i)
try:
if len(collections) > 0:
q = TextType.objects.filter(
entity__iexact=entity, collections__in=collections
)
else:
q = TextType.objects.filter(entity__iexact=entity)
for txt in q:
self.fields["text_" + str(txt.pk)] = forms.CharField(
label=txt.name,
help_text=txt.description,
required=False,
widget=forms.Textarea,
)
if instance:
for t in instance.text.all():
if "text_" + str(t.kind.pk) in self.fields.keys():
self.fields["text_" + str(t.kind.pk)].initial = t.text
except:
pass
class PersonResolveUriForm(forms.Form):
# person = forms.CharField(label=False, widget=al.TextWidget('PersonAutocomplete'))
person = forms.CharField(label=False)
person_uri = forms.CharField(required=False, widget=forms.HiddenInput())
def save(self, site_instance, instance=None, commit=True):
cd = self.cleaned_data
if cd["person"].startswith("http"):
uri = Uri.objects.create(uri=cd["person"], entity=site_instance)
else:
uri = Uri.objects.create(uri=cd["person_uri"], entity=site_instance)
return uri
def __init__(self, *args, **kwargs):
entity_type = kwargs.pop("entity_type", False)
self.request = kwargs.pop("request", False)
super(PersonResolveUriForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = False
def clean(self):
cleaned_data = super(PersonResolveUriForm, self).clean()
if Uri.objects.filter(uri=cleaned_data["person_uri"]).exists():
self.add_error("person", "This Person has already been added to the DB.")
elif cleaned_data["person"].startswith("http"):
if Uri.objects.filter(uri=cleaned_data["person"]).exists():
self.add_error("person", "This URI has already been added to the DB.")
class NetworkVizFilterForm(forms.Form):
ann_include_all = forms.BooleanField(
required=False,
label="Include general relations",
help_text="""Not all relations are connected to an annotation.\
If checked relations that are not attached to an annotation are include.\
This setting is only used when an Annotation project is specified.""",
)
start_date = forms.CharField(
label="Start date",
required=False,
widget=forms.TextInput(
attrs={"data-provide": "datepicker", "data-date-format": "dd.mm.yyyy"}
),
)
end_date = forms.CharField(
label="End date",
required=False,
widget=forms.TextInput(
attrs={"data-provide": "datepicker", "data-date-format": "dd.mm.yyyy"}
),
)
def __init__(self, *args, **kwargs):
rel_attrs = {
"data-placeholder": "Type to get suggestions",
"data-minimum-input-length": getattr(settings, "APIS_MIN_CHAR", 3),
"data-html": True,
}
attrs = {
"data-placeholder": "Type to get suggestions",
"data-minimum-input-length": getattr(settings, "APIS_MIN_CHAR", 3),
"data-html": True,
}
super(NetworkVizFilterForm, self).__init__(*args, **kwargs)
self.fields["select_relation"] = forms.ChoiceField(
label="Relation type",
choices=list(
("-".join(x.name.split()), x.name)
for x in ContentType.objects.filter(app_label="apis_relations")
),
help_text="Include only relations related to this annotation project \
(See the include general relations checkbox)",
)
self.fields["select_relation"].initial = ("person-place", "person place")
self.fields["search_source"] = autocomplete.Select2ListCreateChoiceField(
label="Search source",
widget=ListSelect2(
url=reverse(
"apis:apis_entities:generic_network_entities_autocomplete",
kwargs={"entity": "person"},
),
attrs=attrs,
),
)
self.fields["search_target"] = autocomplete.Select2ListCreateChoiceField(
label="Search target",
widget=ListSelect2(
url=reverse(
"apis:apis_entities:generic_network_entities_autocomplete",
kwargs={"entity": "place"},
),
attrs=attrs,
),
)
self.fields["select_kind"] = autocomplete.Select2ListCreateChoiceField(
label="Select kind",
widget=ListSelect2(
url=reverse(
"apis:apis_vocabularies:generic_vocabularies_autocomplete",
kwargs={"vocab": "personplacerelation", "direct": "normal"},
),
attrs=rel_attrs,
),
)
if "apis_highlighter" in settings.INSTALLED_APPS:
self.fields["annotation_proj"] = forms.ChoiceField(
label="Annotation Project",
choices=BLANK_CHOICE_DASH
+ list((x.pk, x.name) for x in AnnotationProject.objects.all()),
required=False,
help_text="Include only relations related to this annotation project \
(See the include general relations checkbox)",
)
self.helper = FormHelper()
self.helper.form_class = "FilterNodesForm"
self.helper.form_action = reverse("apis:apis_core:NetJson-list")
self.helper.add_input(Submit("Submit", "Add nodes"))
self.order_fields(
(
"select_relation",
"ann_include_all",
"annotation_proj",
"search_source",
"select_kind",
"search_target",
)
)
class GenericFilterFormHelper(FormHelper):
def __init__(self, *args, **kwargs):
super(GenericFilterFormHelper, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.form_class = "genericFilterForm"
self.form_method = "GET"
self.add_input(Submit("Filter", "Filter"))
| 42.506173 | 135 | 0.539742 | 2,043 | 20,658 | 5.263338 | 0.173275 | 0.026039 | 0.009207 | 0.010044 | 0.337952 | 0.261322 | 0.213057 | 0.177997 | 0.167023 | 0.167023 | 0 | 0.002923 | 0.370607 | 20,658 | 485 | 136 | 42.593814 | 0.824104 | 0.063268 | 0 | 0.290557 | 0 | 0 | 0.139628 | 0.027082 | 0 | 0 | 0 | 0.002062 | 0 | 1 | 0.033898 | false | 0.004843 | 0.046005 | 0 | 0.135593 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0d8ddfb1dd3917ee1c7754ff4a656b8aa207cb2 | 3,154 | py | Python | Python Programs/ROCK-o-DRUM-master/ROCK-o-DRUM-master/drum_player.py | Chibi-Shem/Hacktoberfest2020-Expert | 324843464aec039e130e85a16e74b76d310f1497 | [
"MIT"
] | 77 | 2020-10-01T10:06:59.000Z | 2021-11-08T08:57:18.000Z | Python Programs/ROCK-o-DRUM-master/ROCK-o-DRUM-master/drum_player.py | Chibi-Shem/Hacktoberfest2020-Expert | 324843464aec039e130e85a16e74b76d310f1497 | [
"MIT"
] | 46 | 2020-09-27T04:55:36.000Z | 2021-05-14T18:49:06.000Z | Python Programs/ROCK-o-DRUM-master/ROCK-o-DRUM-master/drum_player.py | Chibi-Shem/Hacktoberfest2020-Expert | 324843464aec039e130e85a16e74b76d310f1497 | [
"MIT"
] | 327 | 2020-09-26T17:06:03.000Z | 2021-10-09T06:04:39.000Z | import cv2 #importing modules
import numpy as np
from drum_styles import draw,drum_press
cap=cv2.VideoCapture(0)
while True:
ret,frame=cap.read() #accessing the frames
frame=cv2.flip(frame,1)
frame=cv2.GaussianBlur(frame,(9,9),0)
#gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#_, binary = cv2.threshold(gray, 225, 255, cv2.THRESH_BINARY_INV)
hsv=cv2.cvtColor(frame,cv2.COLOR_BGR2HSV) #converting to Hue_Saturation_Vue format
#mask=cv2.inRange(hsv,lower_red,upper_red)
#kernel=np.ones((5,5),np.float32)/25
#mask=cv2.filter2D(mask,-1,kernel)
#mask=cv2.blur(mask,(3,3))
draw(frame) #creating the rectangular drums
kernel1=np.ones((4,4),np.uint8) #kernels for smoothing the frames
kernel2=np.ones((15,15),np.uint8)
lower_red=np.array([132,90,120]) #creating the mask for red color
upper_red=np.array([179,255,255])
mask1=cv2.inRange(hsv, lower_red,upper_red)
lower_red=np.array([0,110,100])
upper_red= np.array([3,255,255])
mask2=cv2.inRange(hsv, lower_red,upper_red)
mask_r=mask1+mask2 #final red mask
mask_r=cv2.erode(mask_r,kernel1,iterations = 1)
mask_r=cv2.morphologyEx(mask_r,cv2.MORPH_CLOSE,kernel2)
xr,yr,wr,hr=0,0,0,0
contours_r,hierarchy=cv2.findContours(mask_r,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) #getting the contours in the mask
try:
for i in range (0,10):
xr,yr,wr,hr=cv2.boundingRect(contours_r[i])
if(wr*hr)>2000: #checking for a proper area to avoid noisy disturbances
break
except:
pass #passes if no maks are there in the image
lower_b=np.array([38,86,0]) #blue color range
upper_b= np.array([121,255,255])
mask_b=cv2.inRange(hsv, lower_b,upper_b) #final blue mask
mask_b=cv2.erode(mask_b,kernel1,iterations=1)
mask_b=cv2.morphologyEx(mask_b,cv2.MORPH_CLOSE,kernel2)
xb,yb,wb,hb=0,0,0,0
contours_r,hierarchy=cv2.findContours(mask_b,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) #getting the contours in the mask
try:
for i in range (0,10):
xb,yb,wb,hb=cv2.boundingRect(contours_r[i]) #getting the coordinates of the contour
if(wb*hb)>2000: #checking for a proper area to avoid noisy disturbances
break
except:
pass
cv2.rectangle(frame,(xr,yr),(xr+wr,yr+hr),(255,255,255),2) #drawing a rectangle around the red object
cv2.rectangle(frame,(xb,yb),(xb+wb,yb+hb),(255,255,255),2)
drum_press(frame,xr,yr,wr,hr) #checking the drums it hits
drum_press(frame,xb,yb,wb,hb)
frame=cv2.resize(frame,(800,600))
cv2.imshow('ROCK-o-DRUM',frame) #displaying the frames
#cv2.imshow('MASK_red',mask)
if cv2.waitKey(1)==ord('q'):
break
cap.release()
cv2.destroyAllWindows() | 39.425 | 122 | 0.603361 | 465 | 3,154 | 3.984946 | 0.329032 | 0.022666 | 0.021587 | 0.038856 | 0.295197 | 0.24231 | 0.24231 | 0.195359 | 0.195359 | 0.195359 | 0 | 0.076282 | 0.276791 | 3,154 | 80 | 123 | 39.425 | 0.736081 | 0.260938 | 0 | 0.203704 | 0 | 0 | 0.005202 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.037037 | 0.055556 | 0 | 0.055556 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0d99f2f6f98885e72a2e5530207ddd70dec3390 | 5,252 | py | Python | client.py | devilcius/funkwhale-predatum | cac9ea671275dd1d66d9ead7a1773047ef4324de | [
"Unlicense"
] | null | null | null | client.py | devilcius/funkwhale-predatum | cac9ea671275dd1d66d9ead7a1773047ef4324de | [
"Unlicense"
] | null | null | null | client.py | devilcius/funkwhale-predatum | cac9ea671275dd1d66d9ead7a1773047ef4324de | [
"Unlicense"
] | null | null | null | import json
import ssl
import time
from http.client import HTTPSConnection
import urllib.request
import urllib.error
import urllib.parse
from urllib.error import URLError, HTTPError
from http.client import BadStatusLine
from .funkwhale_startup import PLUGIN
import hashlib
LOGIN_SCROBBLER_URL = "https://api.predatum.com/api/login"
HOST_NAME = "api.predatum.com"
PATH_SUBMIT = "/api/scrobble"
SSL_CONTEXT = ssl.create_default_context()
class Track:
def __init__(self, artist_name, track_title, release_name=None, additional_info={}):
self.artist_name = artist_name
self.track_title = track_title
self.release_name = release_name
self.additional_info = additional_info
@staticmethod
def from_dict(data):
return Track(
data["artist_name"],
data["track_title"],
data.get("release_name", None),
data.get("additional_info", {}),
)
def to_dict(self):
return {
"artist_name": self.artist_name,
"track_title": self.track_title,
"release_name": self.release_name,
"additional_info": self.additional_info,
}
def __repr__(self):
return "Track(%s, %s)" % (self.artist_name, self.track_title)
class PredatumScrobbler:
def __init__(self, username, password):
self.__next_request_time = 0
self.logger = PLUGIN["logger"]
hashedAuth = hashlib.md5(
(username + " " + password).encode("utf-8")
).hexdigest()
self.token_cache_key = "predatum:sessionkey:{}".format(hashedAuth)
self.username = username
self.password = password
self.setToken()
def submit(self, listened_at, track):
payload = _get_payload(track, listened_at)
return self._submit("single", payload)
def _submit(self, listen_type, payload, retry=0):
self._wait_for_ratelimit()
self.logger.info("ListenPredatum %s: %r", listen_type, payload)
headers = {
"Authorization": "Bearer %s" % self.token,
"Accept": "application/json",
"Content-Type": "application/json"
}
body = json.dumps(payload)
conn = HTTPSConnection(HOST_NAME, context=SSL_CONTEXT)
conn.request("POST", PATH_SUBMIT, body, headers)
response = conn.getresponse()
response_text = response.read()
try:
response_data = json.loads(response_text)
except json.decoder.JSONDecodeError:
response_data = response_text
self._handle_ratelimit(response)
log_msg = "Response %s: %r" % (response.status, response_data)
if response.status == 429 and retry < 5: # Too Many Requests
self.logger.warning(log_msg)
return self._submit(listen_type, payload, retry + 1)
elif response.status == 401 and retry < 5:
self.logger.warning(log_msg)
self.setToken()
return self._submit(listen_type, payload, retry + 1)
elif response.status == 201:
self.logger.debug(log_msg)
else:
self.logger.error(log_msg)
return response
def _wait_for_ratelimit(self):
now = time.time()
if self.__next_request_time > now:
delay = self.__next_request_time - now
self.logger.debug("Rate limit applies, delay %d", delay)
time.sleep(delay)
def _handle_ratelimit(self, response):
remaining = int(response.getheader("X-RateLimit-Remaining", 0))
reset_in = int(response.getheader("X-RateLimit-Reset-In", 0))
self.logger.debug("X-RateLimit-Remaining: %i", remaining)
self.logger.debug("X-RateLimit-Reset-In: %i", reset_in)
if remaining == 0:
self.__next_request_time = time.time() + reset_in
def setToken(self, renew = False):
token = PLUGIN["cache"].get(self.token_cache_key)
if not token or renew:
token = self.login()
self.token = token
def login(self):
logger = PLUGIN["logger"]
params = dict(username = self.username,
password = self.password,
remember = '1',
submit = 'Submit')
data = urllib.parse.urlencode(params).encode('utf-8')
try:
request = urllib.request.Request(LOGIN_SCROBBLER_URL, data)
response = urllib.request.urlopen(request)
jsonResponse = json.loads(response.read().decode('utf-8'))
return jsonResponse['token']
except HTTPError as e:
logger.info('The server couldn\'t fulfill the authentication request.')
logger.info('Error code: {}'.format(e.read()))
except URLError as e:
print('We failed to reach a server.')
print(('Reason: ', e.reason))
except BadStatusLine as e:
print(("the status line can’t be parsed as a valid HTTP/1.0 or 1.1 status line: ", e.line))
def _get_payload(track, listened_at=None):
data = {"track_metadata": track.to_dict()}
if listened_at is not None:
data["listened_at"] = listened_at
return data
| 35.248322 | 105 | 0.612148 | 609 | 5,252 | 5.087028 | 0.272578 | 0.032279 | 0.018076 | 0.024532 | 0.148483 | 0.036798 | 0.036798 | 0.036798 | 0.036798 | 0.036798 | 0 | 0.007126 | 0.278561 | 5,252 | 148 | 106 | 35.486486 | 0.810504 | 0.003237 | 0 | 0.063492 | 0 | 0.007937 | 0.122683 | 0.016434 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095238 | false | 0.031746 | 0.087302 | 0.02381 | 0.269841 | 0.02381 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0dc38ecbe730e8453f2a6c493fc69be4e6d68ae | 12,654 | py | Python | src/panel/panel_props.py | KeithPinson/cityvilleburg | d002645c3fb6738e0406aded338f16efb75532eb | [
"MIT"
] | null | null | null | src/panel/panel_props.py | KeithPinson/cityvilleburg | d002645c3fb6738e0406aded338f16efb75532eb | [
"MIT"
] | null | null | null | src/panel/panel_props.py | KeithPinson/cityvilleburg | d002645c3fb6738e0406aded338f16efb75532eb | [
"MIT"
] | null | null | null | """Properties of the N-Key-Panel"""
#
# The properties of the N-Key-Panel. We want to store these
# in the Blend file and to facilitate this we need to
# put the properties in a class derived from the
# PropertyGroup.
#
# It is not obvious but if we attach the properties to
# the bpy.types.Scene object then Blender will keep the
# the properties in the bpy.context.scene object and
# the properties will be saved in the file.
#
# Copyright (c) 2021 Keith Pinson
from math import isclose
import bpy
from bpy.types import PropertyGroup
from bpy.props import (
PointerProperty, StringProperty, IntProperty, BoolProperty, EnumProperty)
# pylint: disable=relative-beyond-top-level
from ..terrain.terrain_props import CVB_TerrainProperties
from .citysketchname_props import CVB_CityNameProperties, is_sketch_list_empty
from ..utils.collection_utils import viewlayer_collections, collection_sibling_names
from ..utils.object_utils import object_get, object_get_or_add_empty, object_parent_all
from ..utils.fass_grid import fassGrid
from ..addon.preferences import cvb_icon, cvb_prefs
def _mini_factor(t, n):
"""For a square scale vector: factor will result in a NxN x, y geometry"""
# This is intended for tiles. As the lengths of x and y diverge, the
# resulting geometry will approach 2Nx0
f = 1
if len(t) > 1:
a = t[0]
b = t[1]
f = 2*n / (a + b) if (a + b) > 0 else 1
return f
class CVB_PanelProperties(PropertyGroup):
# pylint: disable=invalid-name, line-too-long
"""Panel properties saved to the blend file"""
_grid = fassGrid()
def decode_style(self, coded_style):
styles_found = \
[s[0] for s in self.sketch_style_list if s[0].startswith(coded_style)]
if styles_found:
style = styles_found[0]
else:
style = self.sketch_style_list[0][0]
return style
def encode_style(self, style):
result = style[0] if style else self.sketch_style_list[0][0][0]
return result
def get_mini_sketch(self):
"""Check the size of the Transform empty to determine if mini sketch"""
is_full = True
cvb = bpy.context.scene.CVB
sketch_name = cvb.city_props.get_sketch_name() if \
(not len(cvb.import_name_prop) > 0) and \
(not cvb.city_props.is_get_sketch_name_pending()) \
else ""
if sketch_name:
# Get the empty
transform_object = object_get("/CVB/{0}/{0} Transform".format(sketch_name))
if transform_object and hasattr(transform_object, "scale") and transform_object.scale:
is_full = isclose(1.0, transform_object.scale[0], abs_tol=0.0001)
return not is_full
def mini_sketch_add_or_toggle(self, is_mini=True):
"""Adds or modifies the Transform empty to change the size of the sketch"""
#
# Requirements:
# 1. Shrink the size of the sketch and related map,terrain,city to
# to a footprint factor of 10x10 to 20x~0 depending on aspect ratio
# 2. Center sketch to the tile zero position
# 3. Hide any other sketches and related maps,terrains,cities
#
cvb = bpy.context.scene.CVB
sketch_name = cvb.city_props.get_sketch_name() if \
(not len(cvb.import_name_prop) > 0) and \
(not cvb.city_props.is_get_sketch_name_pending()) \
else ""
if not sketch_name:
return
# Make sure the view layers are in sync
scene = viewlayer_collections("/CVB/{0}".format(sketch_name))
if scene:
scene.exclude = False
self.sketch_visibility_toggle(cvb.sketch_visible_prop)
#
# 1. Shrink Sketch
#
# Use the props for size rather than extracting size from sketch name
size = (cvb.sketch_xy_linked_prop, cvb.sketch_xy_linked_prop) if \
cvb.using_tile_id_prop else (cvb.sketch_x_prop, cvb.sketch_y_prop)
factor = _mini_factor(size, 10)
empty = self.parent_to_sketch(sketch_name)
if empty:
empty.scale = (factor, factor, factor) if is_mini else (1, 1, 1)
#
# 2. Center to Tile Zero
#
tile_position = 0 if is_mini else cvb.tile_id_prop
self.move_tile_position(empty, tile_position)
#
# 3. Hide other Sketches
#
sketch_path = "/CVB/{0}".format(sketch_name)
sibling_sketch_names = collection_sibling_names(sketch_path)
for sibling_sketch_name in sibling_sketch_names:
scene = viewlayer_collections("/CVB/{0}".format(sibling_sketch_name))
if scene:
scene.exclude = is_mini
# To keep everything toggling in sync make sure these are not mini
empty = self.parent_to_sketch(sibling_sketch_name)
if empty:
empty.scale = (1, 1, 1)
def move_tile_position(self, empty, tile_id):
if empty:
# TODO: Get position based off tile_id
pass
def parent_to_sketch(self, sketch_name):
sketch_path = "/CVB/{0}".format(sketch_name)
empty_name = "{0} Transform".format(sketch_name)
empty = object_get_or_add_empty(
sketch_path, empty_name, radius=0.12, display_type='CUBE')
if empty:
object_parent_all(empty, "/CVB/{0}/Sketch ~ {0}".format(sketch_name))
object_parent_all(empty, "/CVB/{0}/Map ~ {0}".format(sketch_name))
object_parent_all(empty, "/CVB/{0}/Terrain ~ {0}".format(sketch_name))
object_parent_all(empty, "/CVB/{0}/City ~ {0}".format(sketch_name))
return empty
def set_mini_sketch(self, value):
"""Toggle the mini sketch"""
self.mini_sketch_add_or_toggle(value)
def set_seed(self, value):
"""Keeps the addon preference seed in sync"""
if cvb_prefs(bpy.context):
if cvb_prefs(bpy.context).cvb_seed:
cvb_prefs(bpy.context).cvb_seed = value
def sketch_visibility_toggle(self, is_visible=True):
"""Turns the visibility of the sketch off or on"""
cvb = bpy.context.scene.CVB
sketch_name = cvb.city_props.get_sketch_name() if \
(not len(cvb.import_name_prop) > 0) and \
(not cvb.city_props.is_get_sketch_name_pending()) \
else ""
if sketch_name:
scene = viewlayer_collections("/CVB/{0}/Sketch ~ {0}".format(sketch_name))
if scene:
scene.exclude = not is_visible
def update_seed(self, context):
"""Seed update"""
cvb = context.scene.CVB
cvb.city_props.refresh_sketch_list(cvb)
self.set_seed(cvb.seed_prop)
def update_sketch_style(self, context):
"""Sketch style update"""
cvb = context.scene.CVB
cvb.city_props.refresh_sketch_list(cvb)
def update_sketch_visibility(self, context):
"""Toggle visibility of sketch layer"""
cvb = context.scene.CVB
self.sketch_visibility_toggle(cvb.sketch_visible_prop)
def update_sketch_xy_linked(self, context):
"""Sketch xy linked update"""
cvb = context.scene.CVB
cvb.city_props.refresh_sketch_list(cvb)
def update_sketch_x(self, context):
"""Sketch x update"""
cvb = context.scene.CVB
cvb.city_props.refresh_sketch_list(cvb)
def update_sketch_y(self, context):
"""Sketch y update"""
cvb = context.scene.CVB
cvb.city_props.refresh_sketch_list(cvb)
def update_tile_id(self, context):
"""Impacts the file name """
cvb = context.scene.CVB
(x,y) = self._grid.get_tile_xy(cvb.tile_id_prop)
# Default font bfont.ttf (DejaVu Sans) use of hyphen represents minus sign poorly
coords = "{0:+04d} {1:+04d}".format(x,y)
coords = coords.replace("-", "\u2212") # replace hyphen with minus sign
cvb.tile_position_prop = coords
cvb.city_props.refresh_sketch_list(cvb)
# def update_tile_position(self, context):
# """Translation of tile id to position"""
# cvb = context.scene.CVB
# cvb.sketch_xy_linked_prop =
# (x,y) = self._grid.get_tile_xy(cvb.tile_id_prop)
# print(cvb.tile_id_prop, "{0:+04d} {1:+04d}".format(x,y))
city_props: PointerProperty(type=CVB_CityNameProperties)
terrain_props: PointerProperty(type=CVB_TerrainProperties)
import_name_prop: StringProperty(
name="",
description="""Imported Sketch""",
default="")
seed_prop: IntProperty(
name="Seed",
description="""Reproducible random sketch id""",
default=1, min=1, max=32_767,
update=update_seed)
sketch_minimized_prop: BoolProperty(
name="Mini Sketch Toggle",
description="""Toggle Sketch Size""" if
not is_sketch_list_empty() else "Inactive until New Sketch",
get=get_mini_sketch,
set=set_mini_sketch)
# First letter of first element must be unique (it is used in city filename)
sketch_style_list = [
('grid', "Grid Plan City", "A city map modeled after the planned grid system"),
('medieval', "Medieval City Style", "A layout from years ago when cities formed inside a defensive wall"),
('skyscrapers', "Skyscraper City Style", "A city map modeled on the if you can't build out, build up"),
('western', "Western City Style", "A town built along a thoroughfare; water, rail, or road")
]
sketch_style_prop: EnumProperty(
name="",
description="""Style hint that affects map sketch""",
default='grid',
items=sketch_style_list,
update=update_sketch_style)
sketch_visible_prop: BoolProperty(
name="Sketch Visibility",
description="""Toggle Sketch Visibility""" if
not is_sketch_list_empty() else "Inactive until New Sketch",
default=True,
update=update_sketch_visibility)
sketch_xy_linked_prop: IntProperty(
name="Sketch XY",
description="""Sketch XY size""",
min=1,
max=10_000,
step=100,
default=1000,
update=update_sketch_xy_linked)
sketch_x_prop: IntProperty(
name="Sketch X",
description="""Sketch X size""",
min=1,
max=10_000,
step=100,
default=1000,
update=update_sketch_x)
sketch_y_prop: IntProperty(
name="Sketch Y",
description="""Sketch Y size""",
min=1,
max=10_000,
step=100,
default=1000,
update=update_sketch_y)
tile_id_prop: IntProperty(
name="",
description="""Unique ID of tile""",
default=0, min=0, max=_grid.get_last_tile(),
update=update_tile_id)
tile_position_prop: StringProperty(
name="",
description="""Matrix position from central tile""",
default="+000 +000")
# update=update_tile_position)
using_tile_id_prop: BoolProperty(
name="Multi-file Renders",
description="""Facilitates rendering across multiple files for one single city""",
default=False)
# Internal number, typically incremented when new sketch is added
variant_prop: IntProperty(
name="Variant",
description="""Sketch variant""",
default=0, min=0, max=999)
visible_city_sketch_prop: BoolProperty(
name="City Sketch Visible",
description="""Is City Sketch Visible""",
default=False)
visible_sketch_settings_prop: BoolProperty(
name="Sketch Settings Visible",
description="""Are Sketch Settings Visible""",
default=True)
visible_terrain_editor_prop: BoolProperty(
name="Terrain Editor Visible",
description="""Is Terrain Editor Visible""",
default=False)
def cvb_panel_register():
"""Panel properties to register"""
bpy.utils.register_class(CVB_TerrainProperties)
bpy.utils.register_class(CVB_CityNameProperties)
bpy.utils.register_class(CVB_PanelProperties)
# pylint: disable=assignment-from-no-return
bpy.types.Scene.CVB = PointerProperty(name='CVB', type=CVB_PanelProperties)
# pylint: enable=assignment-from-no-return
def cvb_panel_unregister():
"""Panel properties for unregistering"""
if bpy.types.Scene.CVB is not None:
del bpy.types.Scene.CVB
bpy.utils.unregister_class(CVB_PanelProperties)
bpy.utils.unregister_class(CVB_CityNameProperties)
bpy.utils.unregister_class(CVB_TerrainProperties)
| 33.654255 | 114 | 0.643038 | 1,652 | 12,654 | 4.719734 | 0.192494 | 0.035911 | 0.018469 | 0.017443 | 0.30345 | 0.231884 | 0.192638 | 0.178274 | 0.15724 | 0.15724 | 0 | 0.015963 | 0.257389 | 12,654 | 375 | 115 | 33.744 | 0.81377 | 0.186107 | 0 | 0.273128 | 0 | 0 | 0.109209 | 0 | 0 | 0 | 0 | 0.002667 | 0 | 1 | 0.0837 | false | 0.004405 | 0.066079 | 0 | 0.264317 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0dc9dcef3186a2cff574f5ec1ff43c473d8d93f | 218 | py | Python | ind1.py | LokiTheGodOfBitchez/Lab_6 | bd599514b39cea278b6563c3c3ff0b7cf6c463ad | [
"MIT"
] | null | null | null | ind1.py | LokiTheGodOfBitchez/Lab_6 | bd599514b39cea278b6563c3c3ff0b7cf6c463ad | [
"MIT"
] | null | null | null | ind1.py | LokiTheGodOfBitchez/Lab_6 | bd599514b39cea278b6563c3c3ff0b7cf6c463ad | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
if __name__ == '__main__':
s = input("Enter the text: ")
i = s.count('+')
j = s.count('-')
counter = i + j
print("The number of '-' and '+': ", counter)
| 19.818182 | 45 | 0.509174 | 30 | 218 | 3.433333 | 0.766667 | 0.116505 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012195 | 0.247706 | 218 | 10 | 46 | 21.8 | 0.615854 | 0.197248 | 0 | 0 | 0 | 0 | 0.306358 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0e000f1d7c3aa0289cff7dbc52536879580c2d3 | 9,931 | py | Python | bocce/responses.py | brianjpetersen/bocce | 20a4845400e8759173c5391ce52f18dafbf4c678 | [
"MIT"
] | null | null | null | bocce/responses.py | brianjpetersen/bocce | 20a4845400e8759173c5391ce52f18dafbf4c678 | [
"MIT"
] | null | null | null | bocce/responses.py | brianjpetersen/bocce | 20a4845400e8759173c5391ce52f18dafbf4c678 | [
"MIT"
] | null | null | null | # standard libraries
import os
import abc
import collections
import copy
import io
import gzip
import mimetypes
import datetime
# third party libraries
import numpy
import werkzeug
# first party libraries
from . import (surly, utils, headers, cookies, )
__where__ = os.path.dirname(os.path.abspath(__file__))
mimetypes._winreg = None # do not load mimetypes from windows registry
mimetypes.add_type('text/javascript', '.js') # stdlib default is application/x-javascript
mimetypes.add_type('image/x-icon', '.ico') # not among defaults
class FileIterator:
def __init__(self, filename, mode='rb', block_size=None):#, delete_after=False):
self.filename = filename
if block_size is None:
stats = self.stats = os.stat(filename)
block_size = getattr(stats, 'st_blksize', 4096)
self.file_ = open(filename, mode)
self.block_size = block_size
#self.delete_after = delete_after
def __iter__(self):
block_size = self.block_size
file_ = self.file_
try:
while True:
block = file_.read(block_size)
if not block:
break
yield block
finally:
file_.close()
"""
if self.delete_after:
try:
os.remove(self.filename)
except:
pass
"""
class BodyIterable:
def __init__(self, iterable, content_length, content_type, content_encoding):
self.iterable = iterable
self.content_length = content_length
self.content_type = content_type
self.content_encoding = content_encoding
@property
def wsgi(self):
return self.iterable
class BodyFile(BodyIterable):
def __init__(self, filename, mimetype=None, charset=None, is_compressed=False):
self.filename = filename
self.iterable = FileIterator(self.filename)
self.content_length = getattr(self.iterable.stats, 'st_size', None)
if mimetype is None:
mimetype, _ = mimetypes.guess_type(self.filename)
if mimetype is None:
self.content_type = None
else:
if charset is None:
self.content_type = mimetype
else:
self.content_type = '{}; {}'.format(mimetype, charset)
self.is_compressed = is_compressed
if self.is_compressed:
self.content_encoding = 'gzip'
else:
self.content_encoding = None
"""
def compress(self, level=2, threshold=128):
if len(self._content) < threshold:
return
"""
@property
def wsgi(self):
return self.iterable
class BodyContent:
def __init__(self, content, content_type):
self._content = content
self.content_type = content_type
self.content_encoding = None
@property
def content(self):
if self.content_encoding == 'gzip':
return self._compressed_content
else:
return self._content
@property
def content_length(self):
return len(self.content)
def compress(self, level=2, threshold=128):
if len(self._content) < threshold:
return
compressed_content = io.BytesIO()
with gzip.GzipFile(fileobj=compressed_content, mode='wb', compresslevel=level) as f:
f.write(self._content)
self._compressed_content = compressed_content.getvalue()
self.content_encoding = 'gzip'
@property
def wsgi(self):
return [self.content, ]
class JsonBody(collections.OrderedDict):
def __init__(self, charset='utf-8', indent=None, serializers=None):
super(JsonBody, self).__init__()
if serializers is None:
serializers = {}
serializers[surly.Url] = lambda url: str(url)
serializers[tuple] = lambda tuple: list(tuple)
serializers[set] = lambda set: list(set)
serializers[datetime.date] = lambda date: date.isoformat()
serializers[datetime.datetime] = lambda datetime: datetime.isoformat()
serializers[bytes] = lambda bytes: bytes.decode(charset)
serializers[numpy.ndarray] = lambda array: array.tolist()
self.serializers = serializers
self.indent = indent
self.charset = charset
self.content_type = 'application/json; charset={}'.format(charset)
self.content_encoding = None
self._cached_content = None
self._compression_level = None
self._compression_threshold = None
self._compression_requested = False
def __setitem__(self, *args, **kwargs):
self._cached_content = None
super(JsonBody, self).__setitem__(*args, **kwargs)
def __getitem__(self, *args, **kwargs):
self._cached_content = None
return super(JsonBody, self).__getitem__(*args, **kwargs)
@property
def content_length(self):
return len(self.content)
@property
def content(self):
if self._cached_content is not None:
return self._cached_content
encoder = utils.JsonEncoder(self.indent, self.serializers)
content = encoder.encode(self).encode(self.charset)
# attempt to compress
compression_threshold = getattr(self, '_compression_threshold', 128)
compression_level = getattr(self, '_compression_level', 2)
compression_requested = getattr(self, '_compression_requested', False)
if compression_requested and len(content) >= compression_threshold:
compressed_content = io.BytesIO()
with gzip.GzipFile(fileobj=compressed_content, mode='wb', compresslevel=compression_level) as f:
f.write(content)
content = compressed_content.getvalue()
self.content_encoding = 'gzip'
self._cached_content = content
return content
def compress(self, level=2, threshold=128):
self._cached_content = None
self._compression_level = level
self._compression_threshold = threshold
self._compression_requested = True
@property
def wsgi(self):
return [self.content, ]
class Body:
def __init__(self):
self.set_content(bytes())
@property
def content_length(self):
content_length = getattr(self._iterable, 'content_length', None)
if content_length is not None:
content_length = str(content_length)
return content_length
@property
def content_type(self):
content_type = getattr(self._iterable, 'content_type', None)
if content_type is not None:
content_type = str(content_type)
return content_type
@property
def content_encoding(self):
content_encoding = getattr(self._iterable, 'content_encoding', None)
if content_encoding is not None:
content_encoding = str(content_encoding)
return content_encoding
def compress(self, *args):
self._iterable.compress(*args)
def set_file(self):
raise NotImplementedError
def get_json(self):
if not isinstance(self._iterable, JsonBody):
self.set_json({})
return self._iterable
def set_json(self, value=None, charset='utf-8', indent=None, serializers=None):
previous_json_body = getattr(self, '_iterable', None)
if value is None:
value = {}
self._iterable = JsonBody(charset, indent, serializers)
if previous_json_body is not None:
if isinstance(previous_json_body, JsonBody):
level = previous_json_body._compression_level
threshold = previous_json_body._compression_threshold
requested = previous_json_body._compression_requested
self._iterable._compression_level = level
self._iterable._compression_threshold = threshold
self._iterable._compression_requested = requested
self._iterable.update(value)
def set_html(self, html, charset='utf-8'):
self.set_text(html, mimetype='text/html', charset=charset)
def set_text(self, text, mimetype='text/plain', charset='utf-8'):
if mimetype is None:
content_type = None
else:
if charset is None:
content_type = mimetype
else:
content_type = '{}; {}'.format(mimetype, charset)
self.set_content(text.encode(charset), content_type)
def set_content(self, content, content_type=None):
self._iterable = BodyContent(content, content_type)
def set_iterable(self, iterable, content_length=None, content_type=None,
content_encoding=None):
self._iterable = BodyIterable(iterable, content_length, content_type, content_encoding)
def __iter__(self):
return iter(self._iterable.wsgi)
content = property(fset=set_content)
text = property(fset=set_text)
json = property(fget=get_json, fset=set_json)
html = property(fset=set_html)
#file = property(fset=set_file)
class Response:
def __init__(self):
self.status_code = 200
self.body = Body()
self.cookies = cookies.ResponseCookies()
self.headers = headers.DelegatedResponseHeaders(
headers.CookiesResponseHeadersView(self.cookies),
headers.BodyResponseHeadersView(self.body),
)
@property
def status(self):
status_code = self.status_code
return '{} {}'.format(
status_code, werkzeug.http.HTTP_STATUS_CODES[status_code]
)
def start(self, start_response):
start_response(self.status, list(self.headers))
return self.body
| 32.993355 | 108 | 0.628436 | 1,067 | 9,931 | 5.597001 | 0.159325 | 0.055258 | 0.028634 | 0.014735 | 0.254521 | 0.232083 | 0.198593 | 0.145345 | 0.065975 | 0.0499 | 0 | 0.003791 | 0.282751 | 9,931 | 300 | 109 | 33.103333 | 0.83462 | 0.027389 | 0 | 0.263158 | 0 | 0 | 0.028806 | 0.004694 | 0 | 0 | 0 | 0 | 0 | 1 | 0.149123 | false | 0 | 0.048246 | 0.030702 | 0.328947 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0e3484f5109e28245966fa572c5449b1c193808 | 3,042 | py | Python | jesse/routes/__init__.py | b1nhm1nh/jesse-ctf | 98e519ba6a08af5dd8dd5bae09617a6056f3b5e4 | [
"MIT"
] | 3 | 2021-09-26T15:55:00.000Z | 2022-01-17T08:04:21.000Z | jesse/routes/__init__.py | b1nhm1nh/jesse-ctf | 98e519ba6a08af5dd8dd5bae09617a6056f3b5e4 | [
"MIT"
] | 26 | 2021-10-31T07:04:04.000Z | 2022-03-24T04:24:21.000Z | jesse/routes/__init__.py | b1nhm1nh/jesse | 98e519ba6a08af5dd8dd5bae09617a6056f3b5e4 | [
"MIT"
] | null | null | null | import sys
from typing import List, Any
import jesse.helpers as jh
from jesse import exceptions
from jesse.services import logger
from jesse.models import Route
class RouterClass:
def __init__(self) -> None:
self.routes = []
self.extra_candles = []
self.market_data = []
def _reset(self) -> None:
self.routes = []
self.extra_candles = []
self.market_data = []
@property
def formatted_routes(self) -> list:
"""
Example:
[{'exchange': 'Binance', 'strategy': 'A1', 'symbol': 'BTC-USDT', 'timeframe': '1m'}]
"""
return [
{
'exchange': r.exchange,
'symbol': r.symbol,
'timeframe': r.timeframe,
'strategy': r.strategy_name,
}
for r in self.routes
]
@property
def formatted_extra_routes(self) -> list:
"""
Example:
[{'exchange': 'Binance', 'symbol': 'BTC-USD', 'timeframe': '3m'}]
"""
return [{
'exchange': r['exchange'], 'symbol': r['symbol'], 'timeframe': r['timeframe']
} for r in self.extra_candles]
def initiate(self, routes: list, extra_routes: list = None):
if extra_routes is None:
extra_routes = []
self.set_routes(routes)
self.set_extra_candles(extra_routes)
from jesse.store import store
store.reset(force_install_routes=jh.is_unit_testing())
def set_routes(self, routes: List[Any]) -> None:
self._reset()
self.routes = []
for r in routes:
# validate strategy that the strategy file exists (if sent as a string)
if isinstance(r["strategy"], str):
strategy_name = r["strategy"]
if jh.is_unit_testing():
path = sys.path[0]
# live plugin
if path.endswith('jesse-live'):
strategies_dir = f'{sys.path[0]}/tests/strategies'
# main framework
else:
strategies_dir = f'{sys.path[0]}/jesse/strategies'
exists = jh.file_exists(f"{strategies_dir}/{strategy_name}/__init__.py")
else:
exists = jh.file_exists(f'strategies/{strategy_name}/__init__.py')
else:
exists = True
if not exists and isinstance(r["strategy"], str):
raise exceptions.InvalidRoutes(
f'A strategy with the name of "{r["strategy"]}" could not be found.')
self.routes.append(Route(r["exchange"], r["symbol"], r["timeframe"], r["strategy"], None))
def set_market_data(self, routes: List[Any]) -> None:
self.market_data = []
for r in routes:
self.market_data.append(Route(*r))
def set_extra_candles(self, extra_candles: list) -> None:
self.extra_candles = extra_candles
router: RouterClass = RouterClass()
| 32.361702 | 102 | 0.539119 | 331 | 3,042 | 4.791541 | 0.271903 | 0.050441 | 0.050441 | 0.022699 | 0.302648 | 0.302648 | 0.129887 | 0.129887 | 0.129887 | 0.129887 | 0 | 0.002979 | 0.337936 | 3,042 | 93 | 103 | 32.709677 | 0.784508 | 0.087442 | 0 | 0.227273 | 0 | 0 | 0.128735 | 0.052379 | 0 | 0 | 0 | 0 | 0 | 1 | 0.121212 | false | 0 | 0.106061 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0e5fa4bea32ee401cfc25d86926f7410166e17c | 2,978 | py | Python | LeetCode/contest-2018-11-6/decode_at_index.py | Max-PJB/python-learning2 | e8b05bef1574ee9abf8c90497e94ef20a7f4e3bd | [
"MIT"
] | null | null | null | LeetCode/contest-2018-11-6/decode_at_index.py | Max-PJB/python-learning2 | e8b05bef1574ee9abf8c90497e94ef20a7f4e3bd | [
"MIT"
] | null | null | null | LeetCode/contest-2018-11-6/decode_at_index.py | Max-PJB/python-learning2 | e8b05bef1574ee9abf8c90497e94ef20a7f4e3bd | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
@ Author : pengj
@ date : 2018/11/6 22:23
@ IDE : PyCharm
@ GitHub : https://github.com/JackyPJB
@ Contact : pengjianbiao@hotmail.com
-------------------------------------------------
Description : 880. 索引处的解码字符串
虚拟 用户通过次数 7
虚拟 用户尝试次数 92
虚拟 通过次数 7
虚拟 提交次数 92
题目难度 Medium
给定一个编码字符串 S。为了找出解码字符串并将其写入磁带,从编码字符串中每次读取一个字符,并采取以下步骤:
如果所读的字符是字母,则将该字母写在磁带上。
如果所读的字符是数字(例如 d),则整个当前磁带总共会被重复写 d-1 次。
现在,对于给定的编码字符串 S 和索引 K,查找并返回解码字符串中的第 K 个字母。
示例 1:
输入:S = "leet2code3", K = 10
输出:"o"
解释:
解码后的字符串为 "leetleetcodeleetleetcodeleetleetcode"。
字符串中的第 10 个字母是 "o"。
示例 2:
输入:S = "ha22", K = 5
输出:"h"
解释:
解码后的字符串为 "hahahaha"。第 5 个字母是 "h"。
示例 3:
输入:S = "a2345678999999999999999", K = 1
输出:"a"
解释:
解码后的字符串为 "a" 重复 8301530446056247680 次。第 1 个字母是 "a"。
提示:
2 <= S.length <= 100
S 只包含小写字母与数字 2 到 9 。
S 以字母开头。
1 <= K <= 10^9
解码后的字符串保证少于 2^63 个字母。
-------------------------------------------------
"""
import time
import re
__author__ = 'Max_Pengjb'
start = time.time()
# 下面写上代码块
def decode_at_index(S, K):
if re.match("[2-9]", S[0]):
return False
record = []
i = 0
count = 0
while count < K and i < K:
if not S[i].isdigit():
count += 1
else:
count *= int(S[i])
record.append((S[i], count))
i += 1
j = len(record) - 1
K = K % record[j][1]
print(record)
while K != 0:
# if re.match("[2-9]", record[j][0]):
# j -= 1
# K %= record[j][1]
# else:
# j -= 1
# K -= 1
# print(K, j, record[j][1])
while K < record[j][1]:
j -= 1
K %= record[j][1]
print("while K != 0:的时候", K, j, record[j][1])
print("While K=0 de j ", j)
while re.match("[2-9]", record[j][0]):
j -= 1
print(record[j][0])
return record
# 看看大神的优秀方法
def decodeAtIndex(S, K):
size = 0
for i in S:
if i.isdigit():
size *= int(i)
else:
size += 1
for index in reversed(S):
K %= size
if K == 0 and index.isalpha():
return index
if index.isdigit():
size /= int(index)
else:
size -= 1
return
# s_in = "a2345678999999999999999"
# k_in = 1
# s_in = "ha22"
# k_in = 5
# s_in = "leet2code3"
# k_in = 10
# s_in = "y959q969u3hb22odq595"
# k_in = 222280369
# s_in = "vk6u5xhq9v"
# k_in = 554
s_in = "vzpp636m8y"
k_in = 2920
print(decode_at_index(s_in, k_in))
# 判断一个字符是不是数字
# 正则的方法是
# import re
# return re.match('\d',x)
# python 内置了 isdigit() 和 isalpha() 函数
def is_digit(x):
try:
x = int(x)
return isinstance(x, int)
except ValueError:
return False
# 上面中间写上代码块
end = time.time()
print('Running time: %s Seconds' % (end - start))
| 19.212903 | 53 | 0.496978 | 406 | 2,978 | 3.586207 | 0.359606 | 0.013736 | 0.032967 | 0.024725 | 0.088599 | 0.061813 | 0.053571 | 0.026099 | 0.026099 | 0 | 0 | 0.092647 | 0.314976 | 2,978 | 154 | 54 | 19.337662 | 0.621078 | 0.508395 | 0 | 0.122807 | 0 | 0 | 0.059441 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.035088 | 0 | 0.192982 | 0.105263 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0e71e20f553f75129fec573500727dc91adb11d | 6,109 | py | Python | Fusion/modules/Fusion/Utils/WinUT.py | roadnarrows-robotics/rnr-sdk | aee20c65b49fb3eedf924c5c2ec9f19f4f1a1b29 | [
"MIT"
] | null | null | null | Fusion/modules/Fusion/Utils/WinUT.py | roadnarrows-robotics/rnr-sdk | aee20c65b49fb3eedf924c5c2ec9f19f4f1a1b29 | [
"MIT"
] | null | null | null | Fusion/modules/Fusion/Utils/WinUT.py | roadnarrows-robotics/rnr-sdk | aee20c65b49fb3eedf924c5c2ec9f19f4f1a1b29 | [
"MIT"
] | null | null | null | ################################################################################
#
# WinUT.py
#
""" Unit Test Window Module
Simple and Handy Unit Test Window Harness for Fusion Module Unit
Testing.
Author: Robin D. Knight
Email: robin.knight@roadnarrowsrobotics.com
URL: http://www.roadnarrowsrobotics.com
Date: 2006.12.05
Copyright (C) 2006. RoadNarrows LLC.
"""
#
# All Rights Reserved
#
# Permission is hereby granted, without written agreement and without
# license or royalty fees, to use, copy, modify, and distribute this
# software and its documentation for any purpose, provided that
# (1) The above copyright notice and the following two paragraphs
# appear in all copies of the source code and (2) redistributions
# including binaries reproduces these notices in the supporting
# documentation. Substantial modifications to this software may be
# copyrighted by their authors and need not follow the licensing terms
# described here, provided that the new terms are clearly indicated in
# all files where they apply.
#
# IN NO EVENT SHALL THE AUTHOR, ROADNARROWS LLC, OR ANY MEMBERS/EMPLOYEES
# OF ROADNARROW LLC OR DISTRIBUTORS OF THIS SOFTWARE BE LIABLE TO ANY
# PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL
# DAMAGES ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION,
# EVEN IF THE AUTHORS OR ANY OF THE ABOVE PARTIES HAVE BEEN ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
#
# THE AUTHOR AND ROADNARROWS LLC SPECIFICALLY DISCLAIM ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN
# "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE NO OBLIGATION TO
# PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
#
################################################################################
import tkinter as tk
import Fusion.Utils.IVTimer as IVTimer
import Fusion.Gui.GuiTextBar as GuiTextBar
#-------------------------------------------------------------------------------
# CLASS: WinUT
#-------------------------------------------------------------------------------
class WinUT:
""" Handy and Simple Unit Test Window Harness Class.
Derive from this class and add specific UT functions.
"""
#--
def __init__(self, title="Unit Test Window", ut={}):
""" Initialize the Window.
Parameters:
title - Title of this window.
ut - Unit test dictionary: {'Menu name': calback, ... }
"""
if not ut:
ut = {'<dummy test>': self.utDummyStart}
root = tk.Tk()
root.wm_title(title)
mb = tk.Menubutton(root, text="Select a Unit Test", bg="#00cccc",
relief=tk.RAISED)
mb.grid(row=0, column=0, stick=tk.W)
mb.menu = tk.Menu(mb, tearoff=0)
for k,v in ut.items():
mb.menu.add_command(label=k, command=v)
mb.config(menu=mb.menu)
b = tk.Button(root, text="Quit", bg="#990000", fg="#ffffff",
command=root.destroy)
b.grid(row=0, column=1, stick=tk.E)
frame = tk.Frame(root, relief=tk.SUNKEN)
frame.grid(row=1, column=0, columnspan=2,
padx=3, pady=5, sticky=tk.W+tk.E)
self.mStatusPane = GuiTextBar.GuiTextBar(frame, width=100, height=4,
maxHistory=1000)
self.mStatusPane.TagAdd("blue", foreground='blue')
self.mStatusPane.TagAdd("black", foreground='black')
self.mStatusPane.TagAdd("red", foreground='red')
self.mStatusPane.TagAdd("green", foreground='#009900')
self.mStatusPane.TagAdd("orange", foreground='#996600')
self.mRoot = root # this Unit Test Window's 'widget'
self.mSut = None # System Under Test
self.mIvt = None # handy interval timer
self.wut_showstatus("Ready", fg='green')
#--
def wut_this(self):
""" Return this UT window's widget. """
return self.mRoot
#--
def wut_mark_sut(self, sut):
""" Mark UT window's SUT window. """
self.mSut = sut
self.mRoot.tkraise()
#--
def wut_cancel(self):
""" Cancel any unit test residules. """
if self.mIvt:
self.mIvt.cancel()
#--
def wut_showstatus(self, msg, fg='black'):
""" Show UT status message. """
self.mStatusPane.ShowStatus(msg, tag=fg)
#--
def utDummyStart(self):
""" Dummy UT Start ."""
self.wut_showstatus("Started dummy UT.")
self.mIvt = IVTimer.IVTimer(0.5, 0.5, self.utDummyIter, cnt=0)
self.mIvt.start()
#--
def utDummyIter(self, ivt):
""" Dummy UT Iterator. """
self.wut_showstatus("Dummy UT: pass %d" % ivt.cnt)
ivt.cnt += 1
#-------------------------------------------------------------------------------
# Unit Test Code
#-------------------------------------------------------------------------------
if __name__ == '__main__':
import Fusion.Gui.GuiWinText as GuiWinText
class MyWinUT(WinUT):
def __init__(self):
ut = {
'My Test 1': self.utTest1Start,
'My Test 2': self.utTest2Start
}
WinUT.__init__(self, title="My Unit Test Window", ut=ut)
def utTest1Start(self):
self.wut_showstatus("Nothing to run for my test 1", fg='red')
def utTest2Start(self):
self.wut_showstatus("Started my test 2.")
self.mIvt = IVTimer.IVTimer(0.5, 0.5, self.utTest2Iter,
firsttime=True, cnt=0)
self.mIvt.start()
def utTest2Iter(self, ivt):
if ivt.firsttime:
msg = "First time for my test 2."
ivt.firsttime = False
else:
msg = "my test 2, pass #%d" % ivt.cnt
self.wut_showstatus(msg)
if self.mSut:
self.mSut.TextAdd(msg+'\n')
ivt.cnt += 1
#--
def main(how='base'):
""" WinUT Unit Test Main. """
if how == 'sut':
winUT = MyWinUT()
winSut = GuiWinText.GuiWinText(winUT.wut_this(), title="Text Window UT")
winUT.wut_mark_sut(winSut)
elif how == 'derived':
winUT = MyWinUT()
else: # 'base':
winUT = WinUT("WinUT Unit Test Window")
winUT.wut_this().mainloop()
winUT.wut_cancel()
# run unit test
#main(how='base')
#main(how='derived')
main(how='sut')
| 31.65285 | 80 | 0.610902 | 781 | 6,109 | 4.729834 | 0.363636 | 0.028154 | 0.02653 | 0.01137 | 0.043855 | 0.027071 | 0.016243 | 0.016243 | 0.016243 | 0 | 0 | 0.015539 | 0.199378 | 6,109 | 192 | 81 | 31.817708 | 0.739726 | 0.4202 | 0 | 0.090909 | 0 | 0 | 0.112005 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.136364 | false | 0.022727 | 0.045455 | 0 | 0.215909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0e8efa9862a958716652bef31005c1dfcb312ee | 2,241 | py | Python | src/main/interfaces/ui_ppt.py | NLGS2907/Alg1-Lector-de-Ejercicios | bb7e44bd8e5fd7420a61108e5ecb246b510b396b | [
"MIT"
] | 4 | 2021-09-23T16:06:18.000Z | 2021-09-23T23:17:32.000Z | src/main/interfaces/ui_ppt.py | NLGS2907/Alg1-Lector-de-Ejercicios | bb7e44bd8e5fd7420a61108e5ecb246b510b396b | [
"MIT"
] | null | null | null | src/main/interfaces/ui_ppt.py | NLGS2907/Alg1-Lector-de-Ejercicios | bb7e44bd8e5fd7420a61108e5ecb246b510b396b | [
"MIT"
] | null | null | null | """
Interfaz para un juego de \"Piedras, Papel o Tijeras\".
"""
from discord import Interaction
from discord import PartialEmoji as Emoji
from discord.enums import ButtonStyle
from discord.ui import Button, button
from ..archivos import DiccionarioStats
from ..ppt import jugar_partida_ppt
from .ui_general import VistaGeneral
class JuegoPPT(VistaGeneral):
"""
Interfaz con la que jugar Piedra, Papel, o Tijeras.
"""
def __init__(self, stats: DiccionarioStats) -> None:
"""
Crea uan instancia de 'JuegoPPT'.
"""
super().__init__()
self.stats_juego = stats
@button(style=ButtonStyle.blurple,
custom_id="rock",
label="Piedra",
emoji=Emoji.from_str("\N{rock}"))
async def elegir_piedra(self, interaccion: Interaction, _boton: Button) -> None:
"""
El usuario ha elegido 'Piedra' en una partida de 'Piedra, Papel o Tijeras'.
"""
await jugar_partida_ppt("PIEDRA",
str(interaccion.user.id),
self.stats_juego,
interaccion)
@button(style=ButtonStyle.blurple,
custom_id="paper",
label="Papel",
emoji=Emoji.from_str("\N{roll of paper}"))
async def elegir_papel(self, interaccion: Interaction, _boton: Button) -> None:
"""
El usuario ha elegido 'Piedra' en una partida de 'Piedra, Papel o Tijeras'.
"""
await jugar_partida_ppt("PAPEL",
str(interaccion.user.id),
self.stats_juego,
interaccion)
@button(style=ButtonStyle.blurple,
custom_id="scissors",
label="Tijeras",
emoji=Emoji.from_str("\N{Black Scissors}"))
async def elegir_tijeras(self, interaccion: Interaction, _boton: Button) -> None:
"""
El usuario ha elegido 'Piedra' en una partida de 'Piedra, Papel o Tijeras'.
"""
await jugar_partida_ppt("TIJERAS",
str(interaccion.user.id),
self.stats_juego,
interaccion)
| 32.478261 | 85 | 0.562249 | 230 | 2,241 | 5.334783 | 0.291304 | 0.02445 | 0.052975 | 0.06194 | 0.533007 | 0.488998 | 0.458843 | 0.458843 | 0.422168 | 0.422168 | 0 | 0 | 0.338242 | 2,241 | 68 | 86 | 32.955882 | 0.827377 | 0.062026 | 0 | 0.315789 | 0 | 0 | 0.054639 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026316 | false | 0 | 0.184211 | 0 | 0.236842 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0e9c98fae6ec582844793ea4f404f630174755a | 3,626 | py | Python | coramin/utils/mpi_utils.py | dilr/Coramin | 22187e5f9e1631867c29f981ff6dc035341bd23d | [
"BSD-3-Clause"
] | 11 | 2019-04-03T21:33:29.000Z | 2022-02-28T06:07:03.000Z | coramin/utils/mpi_utils.py | dilr/Coramin | 22187e5f9e1631867c29f981ff6dc035341bd23d | [
"BSD-3-Clause"
] | 50 | 2019-04-01T18:48:14.000Z | 2022-03-04T21:51:27.000Z | coramin/utils/mpi_utils.py | dilr/Coramin | 22187e5f9e1631867c29f981ff6dc035341bd23d | [
"BSD-3-Clause"
] | 9 | 2019-03-31T21:29:35.000Z | 2021-09-02T02:33:40.000Z | from mpi4py import MPI
import numpy as np
import sys
import os
class MPISyncError(Exception):
pass
class MPIInterface:
def __init__(self):
self._comm = MPI.COMM_WORLD
self._size = self._comm.Get_size()
self._rank = self._comm.Get_rank()
@property
def comm(self):
return self._comm
@property
def rank(self):
return self._rank
@property
def size(self):
return self._size
class MPIAllocationMap:
def __init__(self, mpi_interface, global_N):
self._mpi_interface = mpi_interface
self._global_N = global_N
rank = self._mpi_interface.rank
size = self._mpi_interface.size
# there must be a better way to do this
# find which entries in global correspond
# to this process (want them to be contiguous
# for the MPI Allgather calls later
local_N = [0 for i in range(self._mpi_interface.size)]
for i in range(global_N):
process_i = i % size
local_N[process_i] += 1
start = 0
end = None
for i,v in enumerate(local_N):
if i == self._mpi_interface.rank:
end = start + v
break
else:
start += v
self._local_map = list(range(start, end))
def local_allocation_map(self):
return list(self._local_map)
def local_list(self, global_data):
local_data = list()
assert(len(global_data) == self._global_N)
for i in self._local_map:
local_data.append(global_data[i])
return local_data
def global_list_float64(self, local_data_float64):
assert(len(local_data_float64) == len(self._local_map))
global_data_numpy = np.zeros(self._global_N, dtype='d')*np.nan
local_data_numpy = np.asarray(local_data_float64, dtype='d')
comm = self._mpi_interface.comm
comm.Allgatherv([local_data_numpy, MPI.DOUBLE],
[global_data_numpy, MPI.DOUBLE])
return global_data_numpy.tolist()
def activate_mpi_printing(style='rank-0-console', rank_0_filename='output_rank_0.txt'):
"""
Redirect standard output based on process rank.
Parameters
----------
style: str
Can be set to one of:
* 'ignore-all': ignore all printing (actually, redirect all printing to os.devnull)
* 'rank-0-console': printing from rank 0 will go to the console,
printing from other processes will be ignored
* 'rank-0-console-x-files': printing from rank 0 will go to the console,
printing from other processes will go to a separate file ('output_rank_x.txt')
* 'rank-0-file': printing from rank 0 will go to 'output_rank_0.txt'
* 'separate-files': printing from each processor will be redirected to a separate
file for each process ('output_rank_x.txt')
"""
rank = MPIInterface().rank
if style == 'ignore-all':
sys.stdout = open(os.devnull, 'w')
elif style == 'rank-0-console':
if rank != 0:
sys.stdout = open(os.devnull, 'w')
elif style == 'rank-0-file':
if rank == 0:
sys.stdout = open(rank_0_filename, 'w')
else:
sys.stdout = open(os.devnull, 'w')
elif style == 'rank-0-console-x-files':
if rank != 0:
sys.stdout = open('output_rank_{0}.txt'.format(str(MPIInterface().rank)), 'w')
elif style == 'separate-files':
sys.stdout = open('output_rank_{0}.txt'.format(str(MPIInterface().rank)), 'w')
| 32.375 | 94 | 0.60535 | 487 | 3,626 | 4.301848 | 0.2423 | 0.045346 | 0.053461 | 0.02673 | 0.229117 | 0.200477 | 0.184248 | 0.172315 | 0.172315 | 0.172315 | 0 | 0.012062 | 0.29123 | 3,626 | 111 | 95 | 32.666667 | 0.803113 | 0.239934 | 0 | 0.166667 | 0 | 0 | 0.055121 | 0.008194 | 0 | 0 | 0 | 0 | 0.027778 | 1 | 0.125 | false | 0.013889 | 0.055556 | 0.055556 | 0.305556 | 0.013889 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0ed48db9dedc8fe3209aa95d92dad8965be38ea | 6,281 | py | Python | ai/src/guest_identification.py | huonguy/dataspire-lite | e6953c5e3ece41373b66f50c8908eff60c1d3d66 | [
"MIT"
] | 12 | 2021-03-31T14:32:39.000Z | 2022-02-14T01:49:49.000Z | ai/src/guest_identification.py | huonguy/dataspire-lite | e6953c5e3ece41373b66f50c8908eff60c1d3d66 | [
"MIT"
] | 1 | 2021-09-11T06:02:12.000Z | 2021-09-11T06:02:12.000Z | ai/src/guest_identification.py | huonguy/dataspire-lite | e6953c5e3ece41373b66f50c8908eff60c1d3d66 | [
"MIT"
] | 8 | 2021-04-26T07:05:12.000Z | 2021-12-31T17:42:30.000Z | import numpy as np
import pandas as pd
from pandas import DataFrame
import fuzzymatcher
import traceback
from datetime import datetime
def load_data_file(file_path:str, nrows:int = 10e10):
"""
Load datafile from directory and return output as Dataframe Pandas for processing
Parameters
----------
file_path : str
Path to load the data file
"""
print(f"Start Loading Data File")
df_input = pd.DataFrame()
try:
df_input = pd.read_csv(file_path, nrows = nrows)
except Exception as error_sum:
print("___")
print("Error summary: \n", error_sum)
error_log = traceback.format_exc()
print("Error Details: \n", str(error_log))
print("___")
print(f"Loading File With Total {len(df_input)} Observations")
print(f"__________________________")
return df_input
def preprocess_data_file(df_input: DataFrame):
"""
Preprocess the dataframe input for fuzzy matching algorihtm in next step. Output of the function is the processed dataframe
Parameters
----------
df_input : DataFrame
Input DataFrame
"""
print(f"Start Preprocess Data File")
start_time = datetime.now()
df_input_processed = df_input.copy()
df_input_processed['RoomNo'] = df_input_processed['RoomNo'].fillna("")
df_input_processed['Children'] = df_input_processed['Children'].fillna(0)
try:
df_input_processed["orderid"] = df_input_processed.index
df_input_processed["LastName"] = df_input_processed["LastName"].fillna("")
df_input_processed["LastName"] = df_input_processed["LastName"].str.lower()
df_input_processed["LastName"] = df_input_processed["LastName"].str.strip()
df_input_processed["FirstName"] = df_input_processed["FirstName"].fillna("")
df_input_processed["FirstName"] = df_input_processed["FirstName"].str.lower()
df_input_processed["FirstName"] = df_input_processed["FirstName"].str.strip()
df_input_processed["tmp_name"] = df_input_processed["FirstName"] + " " + df_input_processed["LastName"]
df_input_processed["Email"] = df_input_processed["Email"].fillna("")
df_input_processed["Email"] = df_input_processed["Email"].str.lower()
df_input_processed["Email"] = df_input_processed["Email"].str.strip()
except Exception as error_sum:
print("___")
print("Error summary: \n", error_sum)
error_log = traceback.format_exc()
print("Error Details: \n", str(error_log))
print("___")
end_time = datetime.now()
process_time = str(end_time - start_time)
print(f"Preprocess Data File Time Consuming: {process_time}")
print(f"__________________________")
return df_input_processed
def fuzzy_matching_algorithm(df_input_processed: DataFrame):
"""
Run the fuzzy matching algorithm to return dataframe with guest id
Parameters
----------
df_input_processed : DataFrame
Input DataFrame
"""
cols_on_matching = ['tmp_name', 'Email']
print(f"Start Apply Fuzzy Matching Algorithm")
start_time = datetime.now()
df_output = pd.DataFrame()
try:
DF = dict()
id_features = cols_on_matching + ['orderid']
DF['guest_id_left'] = df_input_processed[id_features]
DF['guest_id_right'] = df_input_processed[id_features]
matched_results = fuzzymatcher.fuzzy_left_join(DF['guest_id_left'],
DF['guest_id_right'],
cols_on_matching,
cols_on_matching,
left_id_col='orderid',
right_id_col='orderid')
print(f"Guest Identification Output")
print(matched_results.sort_values(by="best_match_score", ascending=False).head(10))
print(f"__________________________")
df_matched = matched_results[matched_results["best_match_score"]>=0.05].copy().sort_values(by="best_match_score", ascending=True)
df_matched = df_matched[["__id_left", "__id_right"]]
df_output = pd.merge(df_input_processed, df_matched, how="left", left_on="orderid", right_on="__id_left")
df_output['__id_right'] = df_output['__id_right'].mask(pd.isnull, df_output['orderid'])
df_output = df_output.drop(columns = ["__id_left", "tmp_name", "orderid"])
df_output = df_output.rename(columns={"__id_right": "GuestID"})
df_output['GuestID'] = df_output['GuestID'].astype(int).astype(str)
except Exception as error_sum:
print("___")
print("Error summary: \n", error_sum)
error_log = traceback.format_exc()
print("Error Details: \n", str(error_log))
print("___")
end_time = datetime.now()
process_time = str(end_time - start_time)
print(f"Fuzzy Matching Algorithm Time Consuming: {process_time}")
print(f"__________________________")
return df_output
def guest_identification_process(data_input: str, folder_path: str, force_run: int=0):
try:
df_input = pd.read_json(data_input)
except:
df_input = pd.DataFrame(data_input)
print(f"\nInput DataFrame with total {len(df_input)} observations")
print(df_input.columns)
print(df_input.head(10))
print(f"__________________________")
if len(df_input) < 1000 and force_run == 0:
return df_input
elif len(df_input) > 75000 and force_run == 0:
return df_input
elif len(df_input) < 300:
return df_input
df_input_processed = preprocess_data_file(df_input)
df_output = fuzzy_matching_algorithm(df_input_processed)
df_output.to_csv(str(folder_path) + "data_input_with_guest_id.csv", index = False)
return df_output
if __name__ == "__main__":
file_path = "../sincq-dataset/SINCQ_merged_without_id.csv"
df_input = load_data_file(file_path, 10000)
df_input_processed = preprocess_data_file(df_input)
df_output = fuzzy_matching_algorithm(df_input_processed)
print(df_output.head(10)) | 38.29878 | 137 | 0.647827 | 758 | 6,281 | 4.833773 | 0.1781 | 0.112718 | 0.165939 | 0.045852 | 0.485262 | 0.396015 | 0.385644 | 0.333515 | 0.274563 | 0.193231 | 0 | 0.007131 | 0.240885 | 6,281 | 164 | 138 | 38.29878 | 0.761326 | 0.078491 | 0 | 0.405405 | 0 | 0 | 0.197326 | 0.036007 | 0 | 0 | 0 | 0 | 0 | 1 | 0.036036 | false | 0 | 0.054054 | 0 | 0.153153 | 0.261261 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0ed79fac549ead768c886ae922c556e689cb9a0 | 1,297 | py | Python | journal/views/utilities.py | kevinlee12/cas | 1284d5a05731e441d523a4894a28e8a194c491f0 | [
"Apache-2.0"
] | null | null | null | journal/views/utilities.py | kevinlee12/cas | 1284d5a05731e441d523a4894a28e8a194c491f0 | [
"Apache-2.0"
] | 3 | 2015-04-19T03:00:57.000Z | 2015-04-19T03:02:20.000Z | journal/views/utilities.py | kevinlee12/cas | 1284d5a05731e441d523a4894a28e8a194c491f0 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2015 The iU Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from actstream.models import user_stream
from django.http import HttpResponseRedirect
from django.shortcuts import render
import itertools
def unread_notifications_count(request):
"""Returns the number of unseen notifications"""
count = len(list(itertools.filterfalse(lambda x: x.data['seen'],
user_stream(request.user))))
return render(request, 'journal/unread_count.html',
{'unread_count': count})
def reset_notifications_count(request):
def set_seen(action):
action.data['seen'] = True
action.save()
list(map(set_seen, user_stream(request.user)))
return HttpResponseRedirect('/activities')
| 34.131579 | 74 | 0.728604 | 176 | 1,297 | 5.306818 | 0.596591 | 0.06424 | 0.027837 | 0.034261 | 0.066381 | 0.066381 | 0 | 0 | 0 | 0 | 0 | 0.008491 | 0.182729 | 1,297 | 37 | 75 | 35.054054 | 0.872642 | 0.487278 | 0 | 0 | 0 | 0 | 0.086822 | 0.03876 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.266667 | 0 | 0.6 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0eda1c7981350260ce730649b40e3ee0edd88c1 | 4,691 | py | Python | Maze.py | SheffieldCao/MazeSolver | 78dbeb93d414e0cc821be638ed0c4776e3ef0274 | [
"MIT"
] | null | null | null | Maze.py | SheffieldCao/MazeSolver | 78dbeb93d414e0cc821be638ed0c4776e3ef0274 | [
"MIT"
] | 3 | 2021-09-08T03:41:18.000Z | 2022-03-12T01:00:58.000Z | Maze.py | SheffieldCao/MazeSolver | 78dbeb93d414e0cc821be638ed0c4776e3ef0274 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
import utils
class MazeMap:
'''define Pixel Point class'''
class Point:
def __init__(self, position):
# Coordinate of Point
self.Position = position
# 4 neighbours, by default go straight
self.Neighbours = [None, None, None, None]
def __gt__(self, node):
# By default, the priority of the compared nodes is lower than self
return True
def __init__(self, im):
if np.any(im[0,:]) == True:
self.rotate = False
# don't rotate the img
elif np.any(im[:,0]) == True:
self.rotate = True
im = utils.rotate(im,-90)
width = im.shape[1]
height = im.shape[0]
data = list(im.ravel().astype(np.int)/255)
self.start = None
self.end = None
# initialize start Point
toppoints = [None] * width
count = 0
for x in range (1, width - 1):
# border of maze is wall, grayscale value:0 ,x from 1 to width-2
if data[x] > 0:
self.start = MazeMap.Point((0,x))
toppoints[x] = self.start
count += 1
break
for y in range (1, height - 1):
# border of maze is wall, grayscale value:0 ,y from 1 to height-2
row_offset = y * width
rowup_offset = row_offset - width
rowdown_offset = row_offset + width
# Initialize previous, current and next values
prv = False
cur = False
nxt = data[row_offset + 1] > 0 # initialize nxt = data[i*weight+1] for y = i
leftnode = None
for x in range (1, width - 1):
# Step by step, Move prev, current and next towards right.
# read all internal points of row_y
prv = cur
cur = nxt
nxt = data[row_offset + x + 1] > 0
n = None
if cur == False:
# cur is on wall, do nothing continue move
continue
if prv == True:
if nxt == True:
# prv, cur, nxt = road, road, road
# Create node only if paths above or below
if data[rowup_offset + x] > 0 or data[rowdown_offset + x] > 0:
n = MazeMap.Point((y,x))
leftnode.Neighbours[1] = n
n.Neighbours[3] = leftnode
leftnode = n
else:
# prv, cur, nxt = road, road, wall
# Create path at end of corridor
n = MazeMap.Point((y,x))
leftnode.Neighbours[1] = n
n.Neighbours[3] = leftnode
leftnode = None
else:
if nxt == True:
# prv, cur, nxt = wall, road, road
# Create path at start of corridor
n = MazeMap.Point((y,x))
leftnode = n
else:
# prv, cur, nxt = road, wall, road
# Create node only if in dead end
if (data[rowup_offset + x] == 0) or (data[rowdown_offset + x] == 0):
#print ("Create Node in dead end")
n = MazeMap.Point((y,x))
# If node isn't none, we can assume we can connect N-S somewhere
if n != None:
# Clear above, connect to waiting top node
if (data[rowup_offset + x] > 0):
t = toppoints[x]
t.Neighbours[2] = n
n.Neighbours[0] = t
# If clear below, put this new node in the top row for the next connection
if (data[rowdown_offset + x] > 0):
toppoints[x] = n
else:
toppoints[x] = None
count += 1
# Last row
row_offset = (height - 1) * width
for x in range (1, width - 1):
if data[row_offset + x] > 0:
self.end = MazeMap.Point((height - 1,x))
t = toppoints[x]
t.Neighbours[2] = self.end
self.end.Neighbours[0] = t
count += 1
break
self.count = count
self.width = width
self.height = height | 37.528 | 94 | 0.433596 | 525 | 4,691 | 3.820952 | 0.253333 | 0.007976 | 0.027916 | 0.027916 | 0.317547 | 0.274676 | 0.224327 | 0.158026 | 0.13659 | 0.102692 | 0 | 0.021892 | 0.483905 | 4,691 | 125 | 95 | 37.528 | 0.806691 | 0.219996 | 0 | 0.329412 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035294 | false | 0 | 0.035294 | 0.011765 | 0.105882 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0eece2bd0d0d30e357612d5e1a6f584f087d50f | 71,193 | py | Python | lib/ugrid_checks/check.py | bjlittle/ugrid-checks | c9247a3ab2412c6e2eaaf0fd1401e9c35a530595 | [
"BSD-3-Clause"
] | null | null | null | lib/ugrid_checks/check.py | bjlittle/ugrid-checks | c9247a3ab2412c6e2eaaf0fd1401e9c35a530595 | [
"BSD-3-Clause"
] | 2 | 2022-02-28T19:47:47.000Z | 2022-03-01T19:39:38.000Z | lib/ugrid_checks/check.py | bjlittle/ugrid-checks | c9247a3ab2412c6e2eaaf0fd1401e9c35a530595 | [
"BSD-3-Clause"
] | null | null | null | from pathlib import Path
import re
from typing import AnyStr, Dict, List, Set, Tuple, Union
import numpy as np
from .nc_dataset_scan import NcFileSummary, NcVariableSummary, scan_dataset
from .scan_utils import (
property_as_single_name,
property_namelist,
vars_w_props,
)
from .ugrid_logger import CheckLoggingInterface
__all__ = ["Checker", "check_dataset"]
_VALID_UGRID_LOCATIONS = [
"node",
"edge",
"face", # Not supporting 'volume' at present
]
_VALID_CONNECTIVITY_ROLES = [
"edge_node_connectivity",
"face_node_connectivity",
"face_edge_connectivity",
"edge_face_connectivity",
"face_face_connectivity",
"boundary_node_connectivity",
]
_VALID_UGRID_CF_ROLES = [
"mesh_topology",
"location_index_set",
] + _VALID_CONNECTIVITY_ROLES
_VALID_MESHCOORD_ATTRS = [
f"{location}_coordinates" for location in _VALID_UGRID_LOCATIONS
]
_VALID_CF_CF_ROLES = [
"timeseries_id",
"profile_id",
"trajectory_id",
]
# Valid cf varname regex : copied from iris.common.metadata code.
_VALID_NAME_REGEX = re.compile(r"""^[a-zA-Z][a-zA-Z0-9]*[\w.+\-@]*$""")
class Checker:
"""
Object to perform UGRID checking on a file.
Scans a file on creation, and records the checking messages on its
'self.logger', which is a :class:`CheckLoggingInterface`.
Can produce text reports for a checking summary, and a file structure
summary.
Could also be used programmatically to aid file analysis, but the way the
information is stored is not currently designed with external use in mind.
"""
def __init__(
self,
file_scan: NcFileSummary,
logger: CheckLoggingInterface = None,
do_data_checks: bool = False,
ignore_warnings=False,
ignore_codes: Union[List[str], None] = None,
):
self.file_scan = file_scan
if logger is None:
logger = CheckLoggingInterface()
self.logger = logger
self.do_data_checks = do_data_checks
if ignore_codes is None:
ignore_codes = []
self.ignore_codes = ignore_codes
self.ignore_warnings = ignore_warnings
# A shortcut for all the variables
self._all_vars = file_scan.variables
# Note: the following are filled in by 'dataset_identify_containers'
self._meshdata_vars: Dict[str, NcVariableSummary] = {}
self._mesh_vars: Dict[str, NcVariableSummary] = {}
self._lis_vars: Dict[str, NcVariableSummary] = {}
self._mesh_referrers: Dict[str, str] = {}
self._lis_referrers: Dict[str, str] = {}
# Note: these are filled by 'dataset_check_containers_and_map_dims'
self._all_mesh_dims: Dict[str, Dict[str, Union[None, str]]] = {}
self._allowed_cfrole_varnames: List[str]
self._orphan_connectivities: Dict[str, NcVariableSummary] = {}
# Initialise
self.check_dataset()
def state(self, errcode: str, vartype: str, varname: str, msg: str):
"""
Log a checking statement.
Interface as for :meth:`CheckLoggingInterface.state`.
"""
if errcode not in self.ignore_codes:
if not self.ignore_warnings or not errcode.startswith("A"):
self.logger.state(errcode, vartype, varname, msg)
def check_mesh_attr_is_varlist(
self, meshvar: NcVariableSummary, attrname: str
):
"""
Check that a mesh-var attribute, if it exists, is a valid varlist.
Parameters
----------
meshvar : class:`NcVariableSummary`
mesh variable
attrname : str
name of the attribute of 'meshvar' to check
Returns
-------
ok : bool
True iff no problems were found
"""
value = meshvar.attributes.get(attrname)
if value is None:
# Missing is ok. But NB *not* an empty string (see below).
success = True
else:
success = value.dtype.kind == "U"
if not success:
msg = (
f"attribute '{attrname}' has type \"{value.dtype}\", "
"which is not a string type."
)
self.state("R105", "Mesh", meshvar.name, msg)
if success:
varnames = property_namelist(value)
if not varnames:
# Empty is *not* a valid content.
# N.B. this includes non-string contents.
self.state(
"R105",
"Mesh",
meshvar.name,
f'has {attrname}="{value}", '
"which is not a valid list of netcdf variable names.",
)
success = False
if success:
for varname in varnames:
if not varname: # skip any extra blanks
continue
if not _VALID_NAME_REGEX.match(varname):
self.state(
"R105",
"Mesh",
meshvar.name,
f'has {attrname}="{varname}", '
"which is not a valid netcdf variable name.",
)
success = False
elif varname not in self._all_vars:
self.state(
"R106",
"Mesh",
meshvar.name,
f"attribute '{attrname}' refers to a variable "
f'"{varname}", but there is no such variable '
"in the dataset.",
)
success = False
return success
def var_ref_problem(self, attr_value: np.ndarray) -> str:
"""
Make a text description of any problems of a single-variable reference.
Check that the input contains a single, valid name, referring to an
existing variable.
If no problem, returns an empty string.
"""
succeed = True
if attr_value.dtype.kind != "U":
result = "is not a string value"
succeed = False
if succeed:
names = property_namelist(attr_value)
if len(names) != 1:
result = "is not a single variable name"
succeed = False
if succeed:
boundsvar_name = property_as_single_name(attr_value)
if not _VALID_NAME_REGEX.match(boundsvar_name):
result = "is not a valid netcdf variable name"
succeed = False
if succeed:
bounds_var = self._all_vars.get(boundsvar_name)
if bounds_var is None:
result = "is not a variable in the dataset"
succeed = False
if succeed:
result = ""
return result
def check_coord_bounds(self, coord: NcVariableSummary) -> List[Tuple[str]]:
"""
Validity-check the bounds of a coordinate (if any).
Ok for _no_ bounds-attribute, but not if it is an empty string.
Check: existence, n-dims, parent dimension, standard-name and units.
Note: this method does not log messages directly, but returns results
for the caller to log them with added context.
Returns
codes_and_messages : List[tuple(str, str)]
a list of codes and messages, to be logged in the context of the
parent coordinate variable.
"""
bounds_name = coord.attributes.get("bounds")
result_codes_and_messages = []
def log_bounds_statement(code, msg):
msg = f'has bounds="{bounds_name}", which {msg}'
result_codes_and_messages.append((code, msg))
has_bounds = bounds_name is not None
if has_bounds:
msg = self.var_ref_problem(bounds_name)
if msg != "":
log_bounds_statement("R203", f"{msg}.") # NB full stop !
has_bounds = False
if has_bounds:
# NB from the above check, we do have a bounds variable.
bounds_var = self._all_vars[str(bounds_name)]
bounds_dims = bounds_var.dimensions
(coord_dim,) = coord.dimensions # NB always has exactly 1
if coord_dim not in bounds_dims:
msg = (
f"has dimensions {bounds_dims!r}, which does not include "
f'the parent variable dimension, "{coord_dim}".'
)
log_bounds_statement("R203", msg)
n_bounds_dims = len(bounds_dims)
if n_bounds_dims != 2:
msg = (
f"has dimensions {bounds_dims!r}, of which there should "
f"be 2, instead of {n_bounds_dims}."
)
log_bounds_statement("R203", msg)
#
# Advisory checks
#
def check_attr_mismatch(attr_name):
coord_attr, bounds_attr = [
var.attributes.get(attr_name)
for var in (coord, bounds_var)
]
if bounds_attr is not None and bounds_attr != coord_attr:
if coord_attr is None:
coord_attr = "<none>"
msg = (
f'has {attr_name}="{bounds_attr}", which does not '
f"match the parent '{attr_name}' of \"{coord_attr}\"."
)
log_bounds_statement("R203", msg)
check_attr_mismatch("standard_name")
check_attr_mismatch("units")
# Do the data-values check. This is potentially costly.
if self.do_data_checks:
# TODO: enable data-value checks by attaching lazy data arrays
# to scan variables.
assert bounds_var.data is not None
raise ValueError("Not ready for data-value checks.")
log_bounds_statement("A205", "???")
return result_codes_and_messages
def check_mesh_coordinates(
self,
meshvar: NcVariableSummary,
attr_name: str,
):
"""Validity-check a coordinate attribute of a mesh-variable."""
# Note: the content of the coords attribute was already checked
# Elements which change as we scan the various coords.
coord = None
common_msg_prefix = ""
# Function to emit a statement message, adding context as to the
# specific coord variable.
def log_coord(code, msg):
self.state(
code, "Mesh coordinate", coord.name, common_msg_prefix + msg
)
coord_names = property_namelist(meshvar.attributes.get(attr_name))
for coord_name in coord_names:
if coord_name not in self._all_vars:
# This problem will already have been detected + logged.
continue
coord = self._all_vars[coord_name]
common_msg_prefix = f"within {meshvar.name}:{attr_name} "
coord_ndims = len(coord.dimensions)
if coord_ndims != 1:
msg = (
f"should have exactly one dimension, but has "
f"{coord_ndims} dimensions : {coord.dimensions!r}."
)
log_coord("R201", msg)
else:
# Check the dimension is the correct one according to location.
(coord_dim,) = coord.dimensions
location = attr_name.split("_")[0]
mesh_dim = self._all_mesh_dims[meshvar.name][location]
if coord_dim != mesh_dim:
msg = (
f'has dimension "{coord_dim}", but the parent mesh '
f'{location} dimension is "{mesh_dim}".'
)
log_coord("R202", msg)
# Check coord bounds (if any)
# N.B. this *also* assumes a single dim for the primary var
codes_and_messages = self.check_coord_bounds(coord)
for code, msg in codes_and_messages:
log_coord(code, msg)
#
# Advisory notes..
#
# A201 should have 1-and-only-1 parent mesh : this is handled by
# 'check_dataset', as it involves multiple meshes.
# A202 floating-point type
dtype = coord.dtype
if dtype.kind != "f":
log_coord(
"A202",
f'has type "{dtype}", which is not a floating-point type.',
)
# A203 standard-name : has+valid (can't handle fully ??)
stdname = coord.attributes.get("standard_name")
if not stdname:
log_coord("A203", "has no 'standard_name' attribute.")
# A204 units : has+valid (can't handle fully ??)
stdname = coord.attributes.get("units")
if not stdname:
log_coord("A204", "has no 'units' attribute.")
# A205 bounds data values match derived ones
# - did this already above, within "check_coord_bounds"
def check_connectivity(
self,
conn_var: NcVariableSummary,
meshvar: Union[NcVariableSummary, None] = None,
role_name: Union[str, None] = None,
):
"""
Validity-check a connectivity variable.
This is either in the context of a containing 'meshvar', **or** with
no containing mesh (so-called "orphan connectivity").
In the 'orphan' case, both meshvar and role_name are None.
"""
# Add to our list of variables 'allowed' to have a UGRID cf-role.
conn_name = conn_var.name
self._allowed_cfrole_varnames.append(conn_name)
if meshvar:
msg_prefix = f'of mesh "{meshvar.name}" '
else:
msg_prefix = ""
def log_conn(errcode, msg):
self.state(
errcode, "Mesh connectivity", conn_name, msg_prefix + msg
)
cf_role = conn_var.attributes.get("cf_role")
if cf_role is None:
log_conn("R301", "has no 'cf_role' attribute.")
elif cf_role not in _VALID_CONNECTIVITY_ROLES:
msg = (
f'has cf_role="{cf_role}", '
"which is not a valid UGRID connectivity attribute."
)
log_conn("R302", msg)
elif role_name and cf_role != role_name:
msg = (
f'has cf_role="{cf_role}", which is different from its '
f'role in the parent mesh, which is "{role_name}".'
)
log_conn("R303", msg)
if meshvar:
# In the context of a meshvar, take 'role_name' as the definition.
# -- we will then check the 'cf_role' attribute against that.
assert role_name
else:
# With no meshvar, use the 'cf_role' attribute as our role
# definition -- if there is one.
role_name = str(cf_role) if cf_role else None
conn_dims = conn_var.dimensions
dims_msg = f"has dimensions {conn_dims!r}"
if len(conn_dims) != 2:
msg = (
f"{dims_msg}, of which there are "
f"{len(conn_dims)}, instead of 2."
)
log_conn("R304", msg)
if meshvar:
# Check dims : can only be checked against a parent mesh
mesh_dims = self._all_mesh_dims[meshvar.name]
is_parent_dim = [dim in mesh_dims.values() for dim in conn_dims]
n_parent_dims = sum(is_parent_dim)
if n_parent_dims == 0:
msg = (
f"{dims_msg}, which does not contain any element "
f"dimension of the parent mesh."
)
log_conn("R305", msg)
elif n_parent_dims == len(conn_dims):
msg = (
f"{dims_msg}, which does not contain any dimension "
f"which is not an element dimension of the parent mesh."
)
log_conn("R306", msg)
else:
# Some are parent mesh-dims, and some not.
# Just check that the *expected* mesh-dim is there.
location = role_name.split("_")[0]
parent_dim = mesh_dims[location]
if parent_dim not in conn_dims:
msg = (
f"{dims_msg}, which does not include the expected "
f"{location} dimension of the parent mesh, "
f'"{parent_dim}".'
)
log_conn("R307", msg)
edgelike_conns = (
"edge_node_connectivity",
"boundary_node_connectivity",
)
if role_name in edgelike_conns and n_parent_dims == 1:
(conn_nonmesh_dim,) = (
dim
for dim, in_parent in zip(conn_dims, is_parent_dim)
if not in_parent
)
nonmesh_dim = self.file_scan.dimensions[conn_nonmesh_dim]
nonmesh_length = nonmesh_dim.length
if nonmesh_length != 2:
msg = (
f"{dims_msg}, which contains the non-mesh "
f'dimension "{conn_nonmesh_dim}", but this has '
f"length {nonmesh_length} instead of 2."
)
log_conn("R308", msg)
index_value = conn_var.attributes.get("start_index")
if index_value is not None:
# Note: check value, converted to int.
# This avoids an extra warning for strings like "0", "1",
# since a non-integral type triggers an A302 warning anyway.
if int(index_value) not in (0, 1):
msg = (
f'has start_index="{index_value}", which is not '
"either 0 or 1."
)
log_conn("R309", msg)
if role_name and self.do_data_checks:
if role_name.endswith("_node_connectivity"):
# Check for missing values
msg = "may have missing indices (NOT YET CHECKED)."
log_conn("R310", msg)
#
# Advisory checks
#
# A301 1-and-only-1 parent mesh
# In 'dataset_detect_multiple_refs', since it involves multiple meshes
if conn_var.dtype.kind != "i":
msg = (
f'has type "{conn_var.dtype}", '
"which is not an integer type."
)
log_conn("A302", msg)
if index_value is not None and index_value.dtype != conn_var.dtype:
msg = (
f"has a 'start_index' of type \"{index_value.dtype}\", "
"which is different from the variable type, "
f'"{conn_var.dtype}".'
)
log_conn("A303", msg)
fill_value = conn_var.attributes.get("_FillValue")
if (
role_name
and role_name.endswith("_node_connectivity")
and fill_value is not None
):
msg = (
f"has a '_FillValue' attribute, which should not be present "
f'on a "{role_name}" connectivity.'
)
log_conn("A304", msg)
if self.do_data_checks:
# check for missing indices
msg = "may have missing indices (NOT YET CHECKED)."
log_conn("A305", msg)
if fill_value is not None and fill_value.dtype != conn_var.dtype:
msg = (
f"has a '_FillValue' of type \"{fill_value.dtype}\", "
"which is different from the variable type, "
f'"{conn_var.dtype}".'
)
log_conn("A306", msg)
if fill_value is not None and fill_value >= 0:
msg = f'has _FillValue="{fill_value}", which is not negative.'
log_conn("A307", msg)
if meshvar and self.do_data_checks:
# check for missing indices
msg = (
"may have indices which exceed the length of the element "
"dimension (NOT YET CHECKED)."
)
log_conn("A308", msg)
def check_mesh_connectivity(
self,
meshvar: NcVariableSummary,
attr_name: str,
):
"""Validity-check a connectivity attribute of a mesh-variable."""
attr_value = meshvar.attributes.get(attr_name)
ok = attr_value is not None
if ok:
conn_name = property_as_single_name(attr_value)
ok = conn_name is not None
if ok:
conn_var = self._all_vars.get(conn_name)
ok = conn_var is not None
if ok:
# Remove from the orphan list
self._orphan_connectivities.pop(conn_name, None)
# Check it, in the context of the containing mesh
self.check_connectivity(conn_var, meshvar, attr_name)
def check_mesh_var(self, meshvar: NcVariableSummary) -> Dict[str, str]:
"""
Validity-check a mesh variable.
Parameters
----------
meshvar : :class:`NcVariableSummary`
meshvar to check
"""
def log_meshvar(code, msg):
self.state(code, "Mesh", meshvar.name, msg)
# First check for bad 'cf_role' :
# if wrong, meshvar can only have been identified by reference.
cfrole_prop = meshvar.attributes.get("cf_role", None)
if cfrole_prop != "mesh_topology":
# This variable does not have the expected 'cf_role', so if we are
# checking it, it must be referred to as 'mesh' by some variable.
referring_var_name = self._mesh_referrers[meshvar.name]
# Either there is no 'cf_role', or it is "wrong".
msg = (
f"appears to be a mesh, "
f'since it is the value of "{referring_var_name}:mesh". '
"But it has "
)
if cfrole_prop is None:
msg += "no 'cf_role' property,"
errcode = "R101"
else:
msg += f'cf_role="{cfrole_prop}",'
errcode = "R102"
msg += ' which should be "mesh_topology".'
# N.B. do not identify as a Mesh, statement just says "variable"
self.state(errcode, "", meshvar.name, msg)
# Also, if the 'cf_role' was something else, then check it is a
# valid option + emit an additional message if needed.
if (
cfrole_prop is not None
and cfrole_prop not in _VALID_UGRID_CF_ROLES
):
msg = (
f'has cf_role="{cfrole_prop}", '
"which is not a valid UGRID cf_role."
)
log_meshvar("A905", msg)
topology_dimension = meshvar.attributes.get("topology_dimension")
if topology_dimension is None:
log_meshvar("R103", "has no 'topology_dimension' attribute.")
else:
# Check the topology dimension.
# In principle, this controls which other connectivity properties
# may appear : In practice, it is better to parse those
# independently, and then cross-check.
if topology_dimension not in (0, 1, 2):
msg = (
f'has topology_dimension="{topology_dimension}", '
"which is not 0, 1 or 2."
)
log_meshvar("R104", msg)
# Handle this subsequently as if it was missing
topology_dimension = None
# Work out what topology-dimension is implied by the available mesh
# properties, which we will use *instead* of the declared one in
# subsequent tests (and check the declared one against it).
highest_connectivity = None
appropriate_dim = 0
if "face_node_connectivity" in meshvar.attributes:
appropriate_dim = 2
highest_connectivity = "face_node_connectivity"
elif "edge_node_connectivity" in meshvar.attributes:
appropriate_dim = 1
highest_connectivity = "edge_node_connectivity"
if topology_dimension is not None:
# Emit an error if the attributes present don't match the stated
# topology-dimension. If *no* topology-dimension, skip this : we
# already flagged that it was missing, above.
if topology_dimension != appropriate_dim:
if topology_dimension == 0:
if appropriate_dim == 1:
errcode = "R111" # unexpected edge-node
else:
assert appropriate_dim == 2
errcode = "R113" # unexpected face-node
elif topology_dimension == 1:
if appropriate_dim == 0:
errcode = "R112" # missing edge-node
else:
assert appropriate_dim == 2
errcode = "R113" # unexpected face-node
else:
assert topology_dimension == 2
errcode = "R113" # missing face-node
if topology_dimension < appropriate_dim:
# something is extra
msg = (
f'has topology_dimension="{topology_dimension}", '
f"but the presence of a '{highest_connectivity}' "
f"attribute implies it should be {appropriate_dim}."
)
else:
# something is missing
topology_required_attribute = {
0: "face_node",
1: "edge_node_connectivity",
2: "face_node_connectivity",
}[int(topology_dimension)]
msg = (
f'has topology_dimension="{topology_dimension}", '
f"but it has no '{topology_required_attribute}' "
f"attribute."
)
log_meshvar(errcode, msg)
# Check all coordinate and connectivity attributes are valid "varlists"
varlist_names = _VALID_MESHCOORD_ATTRS + _VALID_CONNECTIVITY_ROLES
for attr in varlist_names:
is_conn = attr in _VALID_CONNECTIVITY_ROLES
attr_value = meshvar.attributes.get(attr)
if attr_value is not None:
ok = self.check_mesh_attr_is_varlist(meshvar, attr)
var_names = property_namelist(attr_value)
if not ok:
errcode = "R109" if is_conn else "R108"
msg = (
f'has {attr}="{attr_value}", which is not '
"a list of variables in the dataset."
)
log_meshvar(errcode, msg)
elif is_conn and len(var_names) != 1:
msg = (
f'has {attr}="{attr_value}", which contains '
f"{len(var_names)} names, instead of 1."
)
log_meshvar("R107", msg)
# Work out the actual mesh dimensions.
mesh_dims = {
name: None for name in ("face", "edge", "node", "boundary")
}
self._all_mesh_dims[meshvar.name] = mesh_dims
if "node_coordinates" not in meshvar.attributes:
log_meshvar(
"R110", "does not have a 'node_coordinates' attribute."
)
else:
# Note: if a 'node_coordinates' attribute exists, then we already
# checked that it is a valid varlist.
# So don't re-raise any problems here, just press on.
coord_names = property_namelist(
meshvar.attributes["node_coordinates"]
)
if coord_names:
coord_var = self._all_vars.get(coord_names[0])
if coord_var:
# Answer is the first dimension, if any.
if len(coord_var.dimensions) > 0:
mesh_dims["node"] = coord_var.dimensions[0]
def deduce_element_dim(location):
# Identify the dim, and check consistency of relevant attributes.
# If found, set it in 'mesh_dims'
dimattr_name = f"{location}_dimension"
connattr_name = f"{location}_node_connectivity"
dimension_name = property_as_single_name(
meshvar.attributes.get(dimattr_name)
)
if location in ("boundary", "node"):
# No 'boundary_dimension' attribute is supported.
if dimension_name:
dimension_name = None
msg = (
f"has an attribute '{dimattr_name}', which is not "
"a valid UGRID term, and may be a mistake."
)
log_meshvar("A106", msg)
if dimension_name:
# There is an explicit 'xxx_dimension' property.
if connattr_name not in meshvar.attributes:
errcode = {"edge": "R123", "face": "R122"}[location]
msg = (
f"has an attribute '{dimattr_name}', "
"which is not valid "
f"since there is no '{connattr_name}'."
)
log_meshvar(errcode, msg)
elif dimension_name in self.file_scan.dimensions:
mesh_dims[location] = dimension_name
else:
errcode = {"edge": "R115", "face": "R117"}[location]
msg = (
f'has {dimattr_name}="{dimension_name}", which is not '
"a dimension in the dataset."
)
log_meshvar(errcode, msg)
elif connattr_name in meshvar.attributes:
# No "xxx_dimension" attribute, but we *do* have
# "xxx_node_connectivity", so mesh does _have_ this location.
connvar_name = property_as_single_name(
meshvar.attributes[connattr_name]
)
conn_var = self._all_vars.get(connvar_name)
if conn_var:
# Answer is the first dimension, if any.
if len(conn_var.dimensions) > 0:
mesh_dims[location] = conn_var.dimensions[0]
deduce_element_dim("node")
deduce_element_dim("boundary")
deduce_element_dim("edge")
deduce_element_dim("face")
# Check that, if any connectivities have non-standard dim order, then a
# dimension attribute exists.
def var_has_nonfirst_dim(varname, dimname):
conn_var = self._all_vars.get(varname)
result = conn_var is not None
if result:
result = dimname in conn_var.dimensions
if result:
result = conn_var.dimensions[0] != dimname
return result
location_altordered_conns = {}
for attr in _VALID_CONNECTIVITY_ROLES:
maindim_location = attr.split("_")[0]
assert maindim_location != "node" # no such connectivities
maindim_name = mesh_dims[maindim_location]
for conn_name in property_namelist(meshvar.attributes.get(attr)):
if var_has_nonfirst_dim(conn_name, maindim_name):
# We found a connectivity with a nonstandard dim order
dim_attr = f"{maindim_location}_dimension"
if dim_attr not in meshvar.attributes:
# There is no corresponding 'xxx_dimension', so warn.
conns = location_altordered_conns.get(
maindim_location, set()
)
conns.add(conn_name)
location_altordered_conns[maindim_location] = conns
for location, conns in location_altordered_conns.items():
# Found connectivities with a nonstandard dim order for this dim.
assert location in ("face", "edge")
errcode = {"edge": "R116", "face": "R118"}[location]
conn_names = [f'"{name}"' for name in conns]
conn_names_str = ", ".join(conn_names)
msg = (
f"has no '{dim_attr}' attribute, but there are "
f"{location} connectivities "
f"with non-standard dimension order : {conn_names_str}."
)
log_meshvar(errcode, msg)
# Check that all existing coordinates are valid.
for coords_name in _VALID_MESHCOORD_ATTRS:
location = coords_name.split("_")[0]
# Only check coords of locations present in the mesh.
# This avoids complaints about coords dis-connected by problems
# with the topology identification.
if mesh_dims[location]:
self.check_mesh_coordinates(meshvar, coords_name)
# Check that all existing connectivities are valid.
for attr in _VALID_CONNECTIVITY_ROLES:
self.check_mesh_connectivity(meshvar, attr)
# deal with the optional elements (connectivities)
def check_requires(errcode, attrname, location_1, location_2=None):
exist = attrname in meshvar.attributes
if exist:
elems = [location_1]
if location_2:
elems.append(location_2)
required_elements = [
f"{name}_node_connectivity" for name in elems
]
missing_elements = [
f"'{name}'"
for name in required_elements
if name not in meshvar.attributes
]
if missing_elements:
err_msg = (
f"has a '{attrname}' attribute, which is not valid "
f"since there is no "
)
err_msg += "or ".join(missing_elements)
err_msg += " attribute present."
log_meshvar(errcode, err_msg)
check_requires("R114", "boundary_node_connectivity", "face")
check_requires("R119", "face_face_connectivity", "face")
check_requires("R120", "face_edge_connectivity", "face", "edge")
check_requires("R121", "edge_face_connectivity", "face", "edge")
# Advisory checks.
if meshvar.dimensions:
log_meshvar("A101", "has dimensions.")
if "standard_name" in meshvar.attributes:
log_meshvar("A102", "has a 'standard_name' attribute.")
if "units" in meshvar.attributes:
log_meshvar("A103", "has a 'units' attribute.")
# NOTE: "A104" relates to multiple meshvars, so is handled in caller.
return mesh_dims
def check_meshdata_var(self, datavar: NcVariableSummary):
"""Validity-check a mesh data variable."""
def log_meshdata(errcode, msg):
self.state(errcode, "Mesh data", datavar.name, msg)
lis_name = datavar.attributes.get("location_index_set")
mesh_name = datavar.attributes.get("mesh")
location = datavar.attributes.get("location")
# At least one of these is true, or we would not have identified this
# as a mesh-data var.
assert mesh_name is not None or lis_name is not None
# Decide whether to check this as a lis-datavar or a mesh-datavar
# This is designed to produce 3 possible "clash" errors:
# lis & mesh & ~location --> R506
# lis & location & ~mesh --> R507
# mesh & lis --> R501
treat_as_lis = lis_name is not None and (
mesh_name is None or location is None
)
# Initialise reference used for the generic parent dimension check
parent_varname = None # Can be either a meshvar or a lis
parent_location = None
if treat_as_lis:
# Treat the datavar as a 'lis-datavar'
# --> has "location_index_set", but no "mesh" or "location"
ref_msg = self.var_ref_problem(lis_name)
if ref_msg:
# Invalid 'location_index_set' reference
msg = f'has location_index_set="{lis_name}", which {ref_msg}.'
log_meshdata("R508", msg)
else:
# We have a valid lis var. Take this as the 'parent' for
# the generic dimension test R510
parent_varname = str(lis_name)
lis_var = self._lis_vars[parent_varname]
# Also set the parent-location.
# NOTE: we are not checking the lis-var here, only the datavar,
# so just get a value that works if the lis is valid.
parent_location = str(lis_var.attributes.get("location", ""))
if parent_location not in _VALID_UGRID_LOCATIONS:
parent_location = None
if mesh_name is not None:
msg = (
"has a 'mesh' attribute, which is invalid since it is "
"based on a 'location_index_set' attribute."
)
log_meshdata("R506", msg)
if location is not None:
msg = (
"has a 'location' attribute, which is invalid since it is "
"based on a 'location_index_set' attribute."
)
log_meshdata("R507", msg)
else:
# Treat the datavar as a 'mesh-datavar'
# --> has "mesh" and "location", but no "location_index_set"
ref_msg = self.var_ref_problem(mesh_name)
if ref_msg:
# Invalid 'mesh' reference
msg = f'has mesh="{mesh_name}", which {ref_msg}.'
log_meshdata("R502", msg)
if lis_name is not None:
msg = (
"has a 'location_index_set' attribute, which is invalid "
"since it is based on a 'mesh' attribute."
)
log_meshdata("R501", msg)
if location is None:
log_meshdata("R503", "has no 'location' attribute.")
elif str(location) not in _VALID_UGRID_LOCATIONS:
msg = (
f'has location="{location}", which is not one of '
f'"face", "edge" or "node".'
)
log_meshdata("R504", msg)
else:
# Given a valid location, check that it exists in the parent
if not ref_msg:
parent_varname = str(mesh_name)
parent_location = str(location)
assert parent_varname in self._all_mesh_dims
mesh_dims = self._all_mesh_dims[parent_varname]
parent_dim = mesh_dims.get(parent_location)
if parent_dim is None:
msg = (
f'has location="{location}", which is a location '
"that does not exist in the parent mesh, "
f'"{parent_varname}".'
)
log_meshdata("R505", msg)
# Generic dimension testing, for either lis- or mesh-type datavars
# First check there is only 1 mesh-dim
data_dims = datavar.dimensions
data_mesh_dims = [
dim
for dim in data_dims
if any(
dim in self._all_mesh_dims[some_mesh_name].values()
for some_mesh_name in self._all_mesh_dims
)
]
n_data_mesh_dims = len(data_mesh_dims)
if n_data_mesh_dims != 1:
msg = (
f"has dimensions {data_dims}, of which {n_data_mesh_dims} "
"are mesh dimensions, instead of 1."
)
log_meshdata("R509", msg)
data_meshdim = None # cannot check against parent
else:
# We have a single element-dim : check against a parent mesh or lis
(data_meshdim,) = data_mesh_dims
if parent_varname and parent_location and data_meshdim:
# If we have a valid parent ref, and single mesh dimension of the
# datavar, check that they match
mesh_dims = self._all_mesh_dims[parent_varname]
parent_dim = mesh_dims[parent_location]
if parent_dim is not None and data_meshdim != parent_dim:
# Warn only if the parent_dim *exists*, but does not match
# N.B. missing parent dim is checked elsewhere : R505 or R404
if parent_varname in self._lis_vars:
typename = "location_index_set"
else:
typename = "mesh"
msg = (
f'has the element dimension "{data_meshdim}", which does '
f"not match the {parent_location} dimension of the "
f'"{parent_varname}" {typename}, which is "{parent_dim}".'
)
log_meshdata("R510", msg)
def check_lis_var(self, lis_var: NcVariableSummary):
"""Validity-check a location-index-set variable."""
# Add the lis element dimension into self._all_mesh_dims
dims = lis_var.dimensions
if len(dims) == 1:
# Lis has a valid location and single dim
# So we can record 'our' dim as an element-dim
(lis_dim,) = dims
# Note: record this under **all** locations.
# Since we want to recognise this as a 'mesh dim', even if the lis
# has an invalid mesh or location, and we don't use this to check
# it against the parent element dim.
self._all_mesh_dims[lis_var.name] = {
name: lis_dim for name in _VALID_UGRID_LOCATIONS
}
def log_lis(errcode, msg):
self.state(errcode, "location-index-set", lis_var.name, msg)
cf_role = lis_var.attributes.get("cf_role")
if cf_role is None:
log_lis("R401", "has no 'cf_role' attribute.")
elif cf_role != "location_index_set":
msg = f'has cf_role="{cf_role}", instead of "location_index_set".'
log_lis("R401", msg)
mesh_var = None # Used to skip additional checks when mesh is bad
mesh_name = lis_var.attributes.get("mesh")
if mesh_name is None:
log_lis("R402", "has no 'mesh' attribute.")
else:
msg_ref = self.var_ref_problem(mesh_name)
if msg_ref:
msg = f'has mesh="{mesh_name}", which {msg_ref}.'
log_lis("R402", msg)
else:
mesh_name = str(mesh_name)
mesh_var = self._mesh_vars.get(mesh_name)
if mesh_var is None:
msg = (
f'has mesh="{mesh_name}", '
"which is not a valid mesh variable."
)
log_lis("R402", msg)
location = lis_var.attributes.get("location")
parent_dim = None
if location is None:
log_lis("R403", "has no 'location' attribute.")
elif str(location) not in _VALID_UGRID_LOCATIONS:
msg = (
f'has location="{location}", which is not one of '
'"face", "edge" or "node".'
)
log_lis("R403", msg)
elif mesh_var:
# check the location exists in the parent mesh
location = str(location)
mesh_dims = self._all_mesh_dims[mesh_name]
parent_dim = mesh_dims[location]
if parent_dim is None:
msg = (
f'has location="{location}", which is a location '
"that does not exist in the parent mesh, "
f'"{mesh_name}".'
)
log_lis("R404", msg)
# Don't attempt any further checks against the mesh
mesh_var = None
lis_dims = lis_var.dimensions
n_lis_dims = len(lis_dims)
if n_lis_dims != 1:
msg = (
f"has dimensions {lis_dims!r}, of which there are "
f"{n_lis_dims} instead of 1."
)
log_lis("R405", msg)
lis_dim = None
else:
(lis_dim,) = lis_dims
index_value = lis_var.attributes.get("start_index")
if index_value is not None:
# Note: check value, converted to int.
# This avoids an extra warning for strings like "0", "1",
# since a non-integral type triggers an A407 warning anyway.
if int(index_value) not in (0, 1):
msg = (
f'has start_index="{index_value}", which is not '
"either 0 or 1."
)
log_lis("R406", msg)
#
# Advisory checks
#
if lis_var.dtype.kind != "i":
msg = f'has type "{lis_var.dtype}", which is not an integer type.'
log_lis("A401", msg)
if self.do_data_checks:
# TODO: data checks
log_lis("A402", "contains missing indices.")
if "_FillValue" in lis_var.attributes:
msg = (
"has a '_FillValue' attribute, which should not be present "
"on a location-index-set."
)
log_lis("A403", msg)
if mesh_var and lis_dim and parent_dim:
len_lis = self.file_scan.dimensions[lis_dim].length
len_parent = self.file_scan.dimensions[parent_dim].length
if len_lis >= len_parent:
msg = (
f'has dimension "{lis_dim}", length {len_lis}, which is '
f"longer than the {location} dimension of the parent "
f'mesh "{mesh_name}" : '
f'"{parent_dim}", length {len_parent}.'
)
log_lis("A404", msg)
if self.do_data_checks:
# TODO: data checks
msg = "contains repeated index values."
log_lis(
"A405",
)
if mesh_var:
msg = (
"contains index values which are outside the range of the "
f'parent mesh "{mesh_name}" {location} dimension, '
f' : "{parent_dim}", range 1..{len_parent}.'
)
log_lis(
"A406",
)
if index_value is not None and index_value.dtype != lis_var.dtype:
msg = (
f"has a 'start_index' of type \"{index_value.dtype}\", "
"which is different from the variable type, "
f'"{lis_var.dtype}".'
)
log_lis("A407", msg)
def dataset_identify_containers(self):
"""
Find "mesh" , "mesh data", and "location index set" variables,
Also include possibles due to mesh/lis references from data variables.
Results set as self properties :
self._meshdata_vars
self._mesh_vars
self._lis_vars
self._mesh_referrers
self._lis_referrers
"""
# Location index sets are those with a cf_role of 'location_index_set'
self._lis_vars = vars_w_props(
self._all_vars, cf_role="location_index_set"
)
# Mesh data variables are those with either a 'mesh' or
# 'location_index_set' attribute, but excluding the lis-vars.
self._meshdata_vars = {
varname: var
for varname, var in self._all_vars.items()
if (
varname not in self._lis_vars
and (
"mesh" in var.attributes
or "location_index_set" in var.attributes
)
)
}
# Mesh vars are those with cf_role="mesh_topology".
self._mesh_vars = vars_w_props(self._all_vars, cf_role="mesh_topology")
# Scan for any meshvars referred to by 'mesh' or 'location_index_set'
# properties in mesh-data vars.
# These are included among potential meshdata- and lis- variables
# (so they are detected + checked even without the correct cf_role)
self._mesh_referrers = {}
self._lis_referrers = {}
for referrer_name, referrer_var in list(self._meshdata_vars.items()):
# Note: taking a copy as we may modify _meshdata_vars in the loop
meshprop = referrer_var.attributes.get("mesh")
meshvar_name = property_as_single_name(meshprop)
if (
meshvar_name is not None
and meshvar_name in self._all_vars
and meshvar_name not in self._mesh_vars
):
# Add this reference to our list of all meshvars
self._mesh_vars[meshvar_name] = self._all_vars[meshvar_name]
# Record name of referring var.
# N.B. potentially this can overwrite a previous referrer,
# but "any one of several" will be OK for our purpose.
self._mesh_referrers[meshvar_name] = referrer_name
# Do something similar with lis references.
meshprop = referrer_var.attributes.get("location_index_set")
lisvar_name = property_as_single_name(meshprop)
if (
lisvar_name is not None
and lisvar_name in self._all_vars
and lisvar_name not in self._lis_vars
):
# Add this reference to our list of all meshvars
self._lis_vars[lisvar_name] = self._all_vars[lisvar_name]
# Also remove it from the meshdata-vars if it was there
# N.B. this could only happen if it has a wrong cf_role, but
# that is just the kind of error we dealing with here.
self._meshdata_vars.pop(lisvar_name, None)
# Record name of referring var.
self._lis_referrers[lisvar_name] = referrer_name
def dataset_check_containers_and_map_dims(self):
"""
Check all putative mesh + lis variables and collect dimension maps.
Writes self._all_mesh_dims: {<mesh or lis name>: {location: dim-name}}
Note: in checking the individual mesh variables, we also check all
the coordinates and connectivities.
This routine also sets self._allowed_cfrole_varnames
"""
# Build a map of the dimensions of all the meshes,
# all_meshes_dims: {meshname: {location: dimname}}
self._all_mesh_dims = {}
# This list of "UGRID variables" is used by 'dataset_global_checks' to
# find any vars with a UGRID-style 'cf_role' that should not have one.
# N.B. we don't include meshdata-variables, or coordinate variables,
# which should *not* have a 'cf_role' anyway.
# After this, all connectivities will be added by 'check_connectivity'.
self._allowed_cfrole_varnames = list(self._mesh_vars.keys()) + list(
self._lis_vars.keys()
)
# Find all connectivity variables and, initially, put them all on the
# "orphan connectivities" list : Those attached to meshes will be
# removed when we check the meshes (next).
self._orphan_connectivities = {
var_name: var
for var_name, var in self._all_vars.items()
if (
"cf_role" in var.attributes
and (
str(var.attributes.get("cf_role"))
in _VALID_CONNECTIVITY_ROLES
)
)
}
# Check all mesh vars
# Note: this call also fills in 'self._all_mesh_dims', and checks all
# the attached coordinates and connectivites for each mesh.
for meshvar in self._mesh_vars.values():
self.check_mesh_var(meshvar)
# Check all lis-vars
# Note: this call also fills in 'self._all_mesh_dims'.
for lis_var in self._lis_vars.values():
self.check_lis_var(lis_var)
def dataset_detect_shared_dims(self):
"""
Check for any dimensions shared between meshes - an advisory warning.
"""
# Convert all_meshes_dims: {meshname: {location: dimname}}
# .. to dim_meshes: {dimname: [meshnames]}
dim_meshes = {}
for mesh, location_dims in self._all_mesh_dims.items():
for location, dim in location_dims.items():
# Fetch list
meshnames = dim_meshes.get(dim, set())
if dim:
# TODO: what if a dim is used by 2 different locations of
# of the same mesh ?
meshnames.add(mesh)
# Write list back
dim_meshes[dim] = meshnames
# Check for any dims which are used more than once.
for dim, meshnames in dim_meshes.items():
if len(meshnames) > 1:
# TODO: what if a dim is used by 2 different locations of
# of the same mesh ?
# We would get a repeated meshname here...
meshnames = sorted(meshnames)
other_meshes, last_mesh = meshnames[:-1], meshnames[-1]
if len(other_meshes) == 1:
other_mesh = other_meshes[0]
msg = (
f'Dimension "{dim}" is mapped by both '
f'mesh "{other_mesh}" and mesh "{last_mesh}".'
)
else:
msg = f'Dimension "{dim}" is mapped by multiple meshes : '
msg += ", ".join(f'"{mesh}"' for mesh in other_meshes)
msg += f' and "{last_mesh}".'
self.state("A104", None, None, msg)
def dataset_detect_multiple_refs(self):
"""
Check for any coords and conns referenced by multiple meshes.
N.B. relevant errors are :
* A201 coord should have 1-and-only-1 parent mesh
* A301 connectivity should have 1-and-only-1 parent mesh
"""
var_refs_meshes_attrs = {}
all_ref_attrs = _VALID_MESHCOORD_ATTRS + _VALID_CONNECTIVITY_ROLES
for some_meshname in sorted(self._mesh_vars):
some_meshvar = self._mesh_vars[some_meshname]
for some_refattr in all_ref_attrs:
is_coord = some_refattr in _VALID_MESHCOORD_ATTRS
attrval = some_meshvar.attributes.get(some_refattr, None)
somevar_names = property_namelist(attrval)
for somevar_name in somevar_names:
# NB only collect valid refs (to real variables)
if somevar_name in self._all_vars:
meshes = var_refs_meshes_attrs.get(somevar_name, set())
meshes.add((some_meshname, some_refattr))
var_refs_meshes_attrs[somevar_name] = meshes
for some_varname, meshes_and_attrs in var_refs_meshes_attrs.items():
some_var = self._all_vars[some_varname] # NB have only 'real' refs
if len(meshes_and_attrs) > 1:
meshes_and_attrs = sorted(
meshes_and_attrs, key=lambda pair: pair[0]
)
refs_msg = ", ".join(
[
f"{some_mesh}:{attr_name}"
for some_mesh, attr_name in meshes_and_attrs
]
)
msg = f"is referenced by multiple mesh variables : {refs_msg}."
# Structurally, a var *could* be referenced as both a coord
# *and* a connectivity. But they have different required
# numbers of dims, so we use that to decide what to call it.
is_coord = len(some_var.dimensions) == 1
if is_coord:
vartype = "Mesh coordinate"
code = "A201"
else:
vartype = "Mesh connectivity"
code = "A301"
self.state(code, vartype, some_varname, msg)
def dataset_global_checks(self):
"""Do file-level checks not based on any particular variable type."""
def log_dataset(errcode, msg):
self.state(errcode, "", "", msg)
# A901 "dataset contents should also be CF compliant" -- not checkable,
# unless we integrate this code with cf-checker.
# Check the global Conventions attribute for a UGRID version.
conventions = self.file_scan.attributes.get("Conventions")
if conventions is None:
msg = ""
log_dataset("A902", "dataset has no 'Conventions' attribute.")
else:
conventions = str(conventions)
re_conventions = re.compile(r"UGRID-[0-9]+\.[0-9]+")
if not re_conventions.search(conventions):
# NOTE: just search. Don't attempt to split, as usage of
# comma/space/semicolon might be inconsistent, and we don't
# need to care about that here.
msg = (
f'dataset has Conventions="{conventions}", which does not '
"contain a UGRID convention statement of the form "
'"UGRID-<major>.<minor>".'
)
log_dataset("A903", msg)
# Check for any unexpected 'cf_role' usages.
# N.B. the logic here is that
# 1) if it has a UGRID-type cf-role, then *either* it was already
# identified (and checked), *or* it generates a A904 warning
# 2) if it has a CF cf-role, we don't comment
# 3) if it has some other cf-role, this is unrecognised -> A905
for var_name, var in self._all_vars.items():
if (
"cf_role" in var.attributes
and var_name not in self._allowed_cfrole_varnames
):
cf_role = str(var.attributes["cf_role"])
if cf_role in _VALID_UGRID_CF_ROLES:
msg = (
f'has cf_role="{cf_role}", which is a UGRID defined '
"cf_role term, but the variable is not recognised as "
"a UGRID mesh, location-index-set or connectivity "
"variable."
)
self.state("A904", "netcdf", var_name, msg)
elif cf_role not in _VALID_CF_CF_ROLES:
msg = (
f'has cf_role="{cf_role}", which is not a recognised '
"cf-role value defined by either CF or UGRID."
)
self.state("A905", "netcdf", var_name, msg)
def check_dataset(self):
"""
Run all conformance checks on the contained file scan.
All results logged via `self.state`.
"""
self.dataset_identify_containers()
self.dataset_check_containers_and_map_dims()
# Check any orphan connectivities.
for var_name, var in self._orphan_connectivities.items():
self.check_connectivity(var)
# Always flag these as a possible problem.
self.state("A301", "connectivity", var_name, "has no parent mesh.")
# Check all the mesh-data vars
for meshdata_var in self._meshdata_vars.values():
self.check_meshdata_var(meshdata_var)
# Do the checks which cut across different meshes
self.dataset_detect_shared_dims()
self.dataset_detect_multiple_refs()
# Do the miscellaneous dataset-level checks
self.dataset_global_checks()
def checking_report(self) -> str:
"""Produce a text summary of the checking results."""
report_lines = []
def line(msg: str):
report_lines.append(msg)
log = self.logger
logs = log.report_statement_logrecords()
line("")
line("UGRID conformance checks complete.")
line("")
if log.N_FAILURES + log.N_WARNINGS == 0:
line("No problems found.")
else:
if logs:
line("List of checker messages :")
for log_record in logs:
line(" " + log_record.msg)
line("")
line(
f"Total of {log.N_WARNINGS + log.N_FAILURES} "
"problems logged :"
)
line(f" {log.N_FAILURES} Rxxx requirement failures")
line(f" {log.N_WARNINGS} Axxx advisory recommendation warnings")
line("")
line("Done.")
return "\n".join(report_lines)
def structure_report(self, include_nonmesh: bool = False) -> str:
"""
Produce a text summary of the dataset UGRID structure.
Parameters
----------
include_nonmesh : bool, default False
If set, also output a list of file dimensions and variables *not*
relating to the UGRID meshes contained.
"""
result_lines = []
indent = " "
def line(msg, n_indent=0):
result_lines.append(indent * n_indent + msg)
def varlist_str(var: NcVariableSummary, attr_name: str) -> str:
names_attr = var.attributes.get(attr_name)
if not names_attr:
result = "<none>"
else:
names = str(names_attr).split(" ")
result = ", ".join(f'"{name}"' for name in names)
return result
if not self._mesh_vars:
line("Meshes : <none>")
else:
line("Meshes")
for mesh_name, mesh_var in self._mesh_vars.items():
line(f'"{mesh_name}"', 1)
dims = self._all_mesh_dims[mesh_name]
# Nodes is a bit 'special'
dim = dims["node"]
if not dim:
line("<? no node coordinates or dimension ?>", 2)
else:
line(f'node("{dim}")', 2)
coords = varlist_str(mesh_var, "node_coordinates")
line(f"coordinates : {coords}", 3)
# Other dims all reported in the same way
for location in ("edge", "face", "boundary"):
dim = dims[location]
if dim:
line(f'{location}("{dim}")', 2)
attr_name = f"{location}_node_connectivity"
conn_str = varlist_str(mesh_var, attr_name)
line(f"{attr_name} : {conn_str}", 3)
coord_name = f"{location}_coordinates"
if coord_name in mesh_var.attributes:
coords = varlist_str(mesh_var, coord_name)
line(f"coordinates : {coords}", 3)
if self._lis_vars:
line("")
line("Location Index Sets")
for lis_name, lis_var in self._lis_vars.items():
dim = self._all_mesh_dims[lis_name]
dim = varlist_str(dim)
line(f"{lis_name}({dim})", 2)
mesh = varlist_str(lis_var, "mesh")
line(f"mesh : {mesh}", 3)
loc = varlist_str(lis_var, "location")
line(f"location : {loc}", 3)
if self._orphan_connectivities:
line("")
line("?? Connectivities with no mesh ??")
for conn_name, conn_var in self._orphan_connectivities.items():
dims = ", ".join(f'"{dim}"' for dim in conn_var.dimensions)
line(f'"{conn_name}" ( {dims} )', 1)
cf_role = varlist_str(conn_var, "cf_role")
line(f"cf_role = {cf_role}", 2)
if self._meshdata_vars:
line("")
line("Mesh Data Variables")
for var_name, var in self._meshdata_vars.items():
line(f'"{var_name}"', 1)
attrs = {
attr_name: var.attributes.get(attr_name)
for attr_name in ("mesh", "location", "location_index_set")
}
# 'treat as' mirrors logic in 'check_meshdata_var'
treat_as_lis = attrs["location_index_set"] and (
not attrs["mesh"] or not attrs["location"]
)
if treat_as_lis:
order_and_expected = [
("location_index_set", True),
("mesh", False),
("location", False),
]
else:
order_and_expected = [
("mesh", True),
("location", True),
("location_index_set", False),
]
for attr_name, expected in order_and_expected:
attr = attrs[attr_name]
value = None
if attr:
value = varlist_str(var, attr_name)
if not expected:
value = f"? {value}"
elif expected:
value = "? <none>"
if value:
line(f"{attr_name} : {value}", 2)
if include_nonmesh:
# A non-mesh var is one that isn't one that isn't referred to
# by any UGRID mesh components.
def var_names_set(vars: List[NcVariableSummary]) -> Set[str]:
return set([var.name for var in vars])
all_mesh_varnames = (
var_names_set(self._mesh_vars.values())
| var_names_set(self._lis_vars.values())
| var_names_set(self._meshdata_vars.values())
| var_names_set(self._orphan_connectivities.values())
)
nonmesh_vars = set(self._all_vars.keys()) - all_mesh_varnames
# A mesh dimension is one that is a location dim of any
# mesh, or any connectivity (e.g. includes dims used for
# nodes of a face).
nonmesh_dims = set(self.file_scan.dimensions.keys())
# Exclude from 'nonmesh' : all dims and vars of each mesh.
for meshvar in self._mesh_vars.values():
# Exclude all mesh location dims.
mesh_dims = self._all_mesh_dims[meshvar.name]
nonmesh_dims -= set(mesh_dims.values())
# Exclude all location coordinates, and their bounds vars.
for location in _VALID_UGRID_LOCATIONS:
attrname = f"{location}_coordinates"
attr = meshvar.attributes.get(attrname)
location_coord_names = property_namelist(attr)
nonmesh_vars -= set(location_coord_names)
for coord_name in location_coord_names:
coord_var = self._all_vars.get(coord_name)
bounds_attr = coord_var.attributes.get("bounds")
bounds_varname = property_as_single_name(bounds_attr)
if bounds_varname:
nonmesh_vars.discard(bounds_varname)
# Exclude all connectivities, and all their dims.
for attrname in _VALID_CONNECTIVITY_ROLES:
conn_attr = meshvar.attributes.get(attrname)
conn_name = property_as_single_name(conn_attr)
if conn_name:
nonmesh_vars.discard(conn_name)
conn_var = self._all_vars.get(conn_name)
if conn_var:
nonmesh_dims -= set(conn_var.dimensions)
# Also exclude all dimensions of 'orphan' connectivities.
for conn_var in self._orphan_connectivities.values():
nonmesh_dims -= set(conn_var.dimensions)
# Add report section, if any nonmesh found.
if nonmesh_dims or nonmesh_vars:
line("")
line("Non-mesh variables and/or dimensions")
if nonmesh_dims:
line("dimensions:", 1)
for dim in sorted(nonmesh_dims):
line(f'"{dim}"', 2)
if nonmesh_vars:
line("variables:", 1)
for var in sorted(nonmesh_vars):
line(f'"{var}"', 2)
return "\n".join(result_lines)
def check_dataset(
file: Union[NcFileSummary, AnyStr, Path],
print_summary: bool = True,
omit_advisories: bool = False,
ignore_codes: Union[List[str], None] = None,
) -> Checker:
"""
Run UGRID conformance checks on a file.
Optionally print a result summary.
Optionally ignore errors below a logging level.
Returns a checker object with a file analysis and checking log records.
Parameters
----------
file : string, Path or :class:`NcFileSummary`
path to, or representation of a netcdf input file
print_summary : bool, default=True
print a results summary at the end
omit_advisories : bool, default False
If set, log only 'requirements' Rxxx statements, and ignore the
advisory 'Axxx' ones.
ignore_codes : list(str) or None, default None
A list of error codes to ignore.
Returns
-------
checker : Checker
A checker for the file.
"""
if isinstance(file, str):
file_path = Path(file)
elif isinstance(file, Path):
file_path = file
if isinstance(file, NcFileSummary):
file_scan = file
else:
file_scan = scan_dataset(file_path)
checker = Checker(
file_scan, ignore_codes=ignore_codes, ignore_warnings=omit_advisories
)
if print_summary:
# Print the results : this is the default action
print(checker.checking_report())
return checker
| 40.939045 | 79 | 0.533999 | 8,056 | 71,193 | 4.520606 | 0.088878 | 0.009885 | 0.008265 | 0.008238 | 0.273299 | 0.196991 | 0.146823 | 0.114476 | 0.100253 | 0.074826 | 0 | 0.010424 | 0.384195 | 71,193 | 1,738 | 80 | 40.962601 | 0.82026 | 0.205399 | 0 | 0.215654 | 0 | 0 | 0.165444 | 0.025409 | 0 | 0 | 0 | 0.001726 | 0.007494 | 1 | 0.029142 | false | 0 | 0.005828 | 0.000833 | 0.04413 | 0.002498 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0ef62d4a86e0b113ee61b8371810694544d577c | 731 | py | Python | Tests/Comms/TestRxChars.py | Simulators/PiBusRaider | ec091f3c74ea25c3287d26d990ff5d1b90e97e92 | [
"MIT"
] | 7 | 2021-01-23T04:37:18.000Z | 2022-01-08T04:44:00.000Z | Tests/Comms/TestRxChars.py | Simulators/PiBusRaider | ec091f3c74ea25c3287d26d990ff5d1b90e97e92 | [
"MIT"
] | 3 | 2021-04-01T11:28:31.000Z | 2021-05-10T09:56:05.000Z | Tests/Comms/TestRxChars.py | robdobsn/BusRaider | 691e7882a06408208ca2abece5e7c4bcb4b4fa45 | [
"MIT"
] | null | null | null | import serial
import threading
import keyboard
import time
# Read data from serial port and echo
def serialRead():
global serialIsClosing, serPort
while True:
if serialIsClosing:
break
if serPort.isOpen():
val = serPort.read()
if len(val) == 0:
continue
for v in val:
print("{:02x} ".format(v), end="")
print()
serPort = serial.Serial('COM5', 115200)
serialIsClosing = False
# Thread for reading from port
thread = threading.Thread(target=serialRead, args=())
thread.start()
while True:
if keyboard.is_pressed(' '):
serialIsClosing = True
time.sleep(1.0)
break
serPort.close()
| 20.885714 | 53 | 0.589603 | 81 | 731 | 5.308642 | 0.555556 | 0.04186 | 0.051163 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02381 | 0.310534 | 731 | 34 | 54 | 21.5 | 0.829365 | 0.087551 | 0 | 0.153846 | 0 | 0 | 0.018072 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0.153846 | 0 | 0.192308 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0f2f34a9ad73740c5bb4f118b59d33e045bc0bb | 1,150 | py | Python | apps/diana-cli/diana_cli/mock.py | thomasyi17/diana2 | 2167053dfe15b782d96cb1e695047433f302d4dd | [
"MIT"
] | 15 | 2019-02-12T23:26:09.000Z | 2021-12-21T08:53:58.000Z | apps/diana-cli/diana_cli/mock.py | thomasyi17/diana2 | 2167053dfe15b782d96cb1e695047433f302d4dd | [
"MIT"
] | 2 | 2019-01-23T21:13:12.000Z | 2019-06-28T15:45:51.000Z | apps/diana-cli/diana_cli/mock.py | thomasyi17/diana2 | 2167053dfe15b782d96cb1e695047433f302d4dd | [
"MIT"
] | 6 | 2019-01-23T20:22:50.000Z | 2022-02-03T03:27:04.000Z | import click
import yaml
from diana.apis import Orthanc
from diana.daemons import MockSite
from diana.daemons.mock_site import sample_site_desc
epilog = """
DESC must be a mock-site description in yaml format.
\b
---
- name: Example Hospital
services:
- name: Main CT
modality: CT
devices: 3
studies_per_hour: 15
- name: Main MR
modality: MR
devices: 2
studies_per_hour: 4
...
"""
@click.command(epilog=epilog, short_help="Generate mock DICOM traffic")
@click.argument('desc', required=False)
@click.option('--dest', help="Destination DICOM service")
@click.pass_context
def mock(ctx, desc, dest):
"""Generate synthetic studies on a schedule according to a site
description DESC. Studies are optionally forwarded to an endpoint DEST."""
services = ctx.obj.get('services')
click.echo(click.style('Generate mock DICOM data', underline=True, bold=True))
if not desc:
desc = sample_site_desc
desc = yaml.load(desc)
H = MockSite.Factory.create(desc=desc)
O = None
if dest:
_desc = services[dest]
O = Orthanc(**_desc)
for h in H:
h.run(pacs=O)
| 23 | 82 | 0.682609 | 163 | 1,150 | 4.736196 | 0.509202 | 0.034974 | 0.041451 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005507 | 0.210435 | 1,150 | 49 | 83 | 23.469388 | 0.844714 | 0.115652 | 0 | 0 | 0 | 0 | 0.343595 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026316 | false | 0.026316 | 0.131579 | 0 | 0.157895 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0f3345ae4e7d0fc3d248e4e59a1c69d38a3932d | 3,415 | py | Python | alphafold_components/cls_components/jackhmmer.py | aburdenko/alphafold-inference-pipeline | b48dc5dba162d02450ce111fe9d52a09d03a0236 | [
"Apache-2.0"
] | 4 | 2022-02-14T17:54:18.000Z | 2022-02-25T12:58:58.000Z | alphafold_components/cls_components/jackhmmer.py | jarokaz/alphafold-inference-pipeline | 846c90fa05f3f1b2f0ac03c4a43a34a0142987e9 | [
"Apache-2.0"
] | 1 | 2022-03-18T18:23:42.000Z | 2022-03-18T18:23:42.000Z | alphafold_components/cls_components/jackhmmer.py | aburdenko/alphafold-inference-pipeline | b48dc5dba162d02450ce111fe9d52a09d03a0236 | [
"Apache-2.0"
] | 1 | 2022-03-05T22:54:24.000Z | 2022-03-05T22:54:24.000Z | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from kfp.v2 import dsl
from kfp.v2.dsl import Output, Input, Artifact, Dataset
import config
@dsl.component(
base_image=config.CLS_WRAPPERS_IMAGE,
output_component_file='component_msa_search.yaml'
)
def jackhmmer(
project: str,
region: str,
database: str,
reference_databases: Input[Dataset],
sequence: Input[Dataset],
msa: Output[Dataset],
cls_logging: Output[Artifact],
maxseq:int=10_000,
machine_type:str='n1-standard-8',
boot_disk_size:int=100,
n_cpu:int=8,
):
"""Searches the specified database using jackhmmer.
This is a simple prototype using dsub to submit a Cloud Life Sciences pipeline.
We are using CLS as KFP does not support attaching pre-populated disks or premtible VMs.
GCSFuse does not perform well with genetic database search tools .
The prototype also lacks job control. If a pipeline step fails, the CLS job can get
orphaned
"""
import logging
import os
import sys
import time
from alphafold.data import parsers
from dsub_wrapper import run_dsub_job
_SUPPORTED_DATABASES = ['uniref90', 'mgnify']
_DSUB_PROVIDER = 'google-cls-v2'
_LOG_INTERVAL = '30s'
_ALPHAFOLD_RUNNER_IMAGE = 'gcr.io/jk-mlops-dev/alphafold'
_SCRIPT = '/scripts/alphafold_runners/jackhmmer_runner.py'
logging.basicConfig(format='%(asctime)s - %(message)s',
level=logging.INFO,
datefmt='%d-%m-%y %H:%M:%S',
stream=sys.stdout)
if not (str(database) in _SUPPORTED_DATABASES):
raise RuntimeError(f'Jackhmmer cannot be used with {database} database.')
job_params = [
'--machine-type', machine_type,
'--boot-disk-size', str(boot_disk_size),
'--logging', cls_logging.uri,
'--log-interval', _LOG_INTERVAL,
'--image', _ALPHAFOLD_RUNNER_IMAGE,
'--env', f'PYTHONPATH=/app/alphafold',
'--mount', f'DB_ROOT={reference_databases.metadata["disk_image"]}',
'--input', f'INPUT_PATH={sequence.uri}',
'--output', f'OUTPUT_PATH={msa.uri}',
'--env', f'DB_PATH={reference_databases.metadata[database]}',
'--env', f'N_CPU={n_cpu}',
'--env', f'MAXSEQ={maxseq}',
'--script', _SCRIPT
]
t0 = time.time()
logging.info('Starting database search...')
result = run_dsub_job(
provider=_DSUB_PROVIDER,
project=project,
regions=region,
params=job_params,
)
t1 = time.time()
logging.info(f'Search completed. Elapsed time: {t1-t0}')
with open(msa.path) as f:
msa_str = f.read()
parsed_msa = parsers.parse_stockholm(msa_str)
msa.metadata['data_format'] = 'sto'
msa.metadata['num of sequences'] = len(parsed_msa.sequences)
| 29.95614 | 92 | 0.658565 | 448 | 3,415 | 4.881696 | 0.475446 | 0.027435 | 0.016461 | 0.014632 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011398 | 0.229283 | 3,415 | 113 | 93 | 30.221239 | 0.819529 | 0.272328 | 0 | 0.029412 | 0 | 0 | 0.262188 | 0.11102 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014706 | false | 0 | 0.147059 | 0 | 0.161765 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0f3af4c9714fab190218b538b99ee23af39c663 | 1,169 | py | Python | reservas/utils/models.py | hello-alf/reservas | b5569fd92da62ecd2e26c6756c170de69b8afa0b | [
"MIT"
] | null | null | null | reservas/utils/models.py | hello-alf/reservas | b5569fd92da62ecd2e26c6756c170de69b8afa0b | [
"MIT"
] | null | null | null | reservas/utils/models.py | hello-alf/reservas | b5569fd92da62ecd2e26c6756c170de69b8afa0b | [
"MIT"
] | null | null | null | """Django models.utilities"""
from django.db import models
class BookingAudit(models.Model):
"""Comparte Ride base model, molde de atributos
BookingAudit Model acts as an abstract base class from which every
other model in the project will inherit. This class provides
every table with the following attributes:
+ created (DateTime): Store the datetime the objects was created
+ modified (DateTime): Store the last datetime the objects was modified
"""
created = models.DateTimeField(
'created at',
auto_now_add=True,
help_text='Date time on which the object was created.')
modified = models.DateTimeField(
'modified at',
auto_now=True,
help_text='Date time on which the object was modified.')
class Meta:
"""Meta option.
we define the abstract attribute for the database not consider this class and model physically
"""
abstract = True
#Para adicionar funcionalidad extra al objeto, no para mapearla en BD proxy = true
#proxy = True
get_latest_by = 'created'
ordering = ['-created', '-modified']
| 32.472222 | 106 | 0.663815 | 145 | 1,169 | 5.303448 | 0.537931 | 0.058518 | 0.041612 | 0.054616 | 0.10143 | 0.10143 | 0.10143 | 0.10143 | 0.10143 | 0.10143 | 0 | 0 | 0.266039 | 1,169 | 35 | 107 | 33.4 | 0.89627 | 0.502139 | 0 | 0 | 0 | 0 | 0.250482 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.071429 | 0 | 0.357143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0f4971c45d20418a287e24a19c39a4e09365b02 | 310 | py | Python | Python/13 - Regex and Parsing/Validating phone numbers.py | sohammanjrekar/HackerRank | 1f5010133a1ac1e765e855a086053c97d9e958be | [
"MIT"
] | null | null | null | Python/13 - Regex and Parsing/Validating phone numbers.py | sohammanjrekar/HackerRank | 1f5010133a1ac1e765e855a086053c97d9e958be | [
"MIT"
] | null | null | null | Python/13 - Regex and Parsing/Validating phone numbers.py | sohammanjrekar/HackerRank | 1f5010133a1ac1e765e855a086053c97d9e958be | [
"MIT"
] | null | null | null | import re
N = int(input())
for i in range(N):
number = input()
if 2 <= len(number) <= 15 and number.isdigit():
output = re.findall(r"^[789]\d{9}$", number)
if len(output) == 1:
print("YES")
else:
print("NO")
else:
print("NO")
| 20.666667 | 53 | 0.448387 | 40 | 310 | 3.475 | 0.675 | 0.129496 | 0.158273 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.041451 | 0.377419 | 310 | 14 | 54 | 22.142857 | 0.678756 | 0 | 0 | 0.333333 | 0 | 0 | 0.064189 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.083333 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0f8fcb01b70468adeeb63a76de6b7573aecfbe3 | 434 | py | Python | exam_at_home/1/bracket_matching.py | jamie-jjd/110_spring_IDS | 7f15c0c73b9d663373b791b9ddcc836957dcc3d2 | [
"MIT"
] | 2 | 2022-02-21T10:37:22.000Z | 2022-03-02T01:43:30.000Z | exam_at_home/1/bracket_matching.py | jamie-jjd/110_spring_IDS | 7f15c0c73b9d663373b791b9ddcc836957dcc3d2 | [
"MIT"
] | null | null | null | exam_at_home/1/bracket_matching.py | jamie-jjd/110_spring_IDS | 7f15c0c73b9d663373b791b9ddcc836957dcc3d2 | [
"MIT"
] | 3 | 2022-02-21T05:06:19.000Z | 2022-03-27T07:58:11.000Z | #
# author: wang-yang
# email: tnst92002@gmail.com
#
N = int(input())
def check(s: str):
st = []
for c in s:
if c == '(':
st.append(')')
elif c == '[':
st.append(']')
else:
if len(st) == 0 or st[-1] != c:
return False
st.pop()
return len(st) == 0
for _ in range(N):
s = input()
st = []
print("Yes" if check(s) else "No") | 18.869565 | 43 | 0.410138 | 58 | 434 | 3.051724 | 0.568966 | 0.067797 | 0.101695 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.030888 | 0.403226 | 434 | 23 | 44 | 18.869565 | 0.65251 | 0.101382 | 0 | 0.117647 | 0 | 0 | 0.023316 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0 | 0 | 0.176471 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0fbc18a3420f689083c5d29cf438daccff37a32 | 1,327 | py | Python | deepaugment/examples/run_full_model_on_pawprint_images.py | abcp4/deepaugment | dd45cdcbd00ca7dfb7c8035252e45ecaed05bfbd | [
"MIT"
] | 221 | 2019-02-22T06:48:41.000Z | 2022-03-30T11:34:03.000Z | deepaugment/examples/run_full_model_on_pawprint_images.py | abcp4/deepaugment | dd45cdcbd00ca7dfb7c8035252e45ecaed05bfbd | [
"MIT"
] | 16 | 2019-04-02T11:33:05.000Z | 2021-05-13T07:47:28.000Z | deepaugment/examples/run_full_model_on_pawprint_images.py | abcp4/deepaugment | dd45cdcbd00ca7dfb7c8035252e45ecaed05bfbd | [
"MIT"
] | 43 | 2019-02-14T00:53:06.000Z | 2022-03-23T10:25:52.000Z | import numpy as np
import os
from keras.preprocessing import image
from sklearn.model_selection import train_test_split
import sys
from os.path import dirname, realpath
file_path = realpath(__file__)
dir_of_file = dirname(file_path)
parent_dir_of_file = dirname(dir_of_file)
sys.path.insert(0, parent_dir_of_file)
from run_full_model import run_full_model
def load_images(image_dir_path):
subfolders = next(os.walk(image_dir_path))[1]
img_class = 0
X_list = []
y_list = []
for subfolder in subfolders:
subfolder_path = os.path.join(image_dir_path, subfolder)
print(subfolder_path)
for f in os.listdir(subfolder_path):
if f.startswith("."): # dont look .DS_store
print (f)
continue
img = image.load_img(os.path.join(subfolder_path,f), target_size=(100, 100))
img_arr = image.img_to_array(img)
X_list.append(img_arr)
y_list.append(img_class)
img_class+=1
X = np.array(X_list)
y = np.array(y_list)
return X, y
X, y = load_images("../../data/raw/pawprints/images")
# policies_path = "../../reports/experiments/pawprints_02-14_19-22/top20_policies.csv"
policies_path = "random"
run_full_model(X, y, epochs=200, batch_size=32, policies_path=policies_path)
| 23.280702 | 88 | 0.681236 | 199 | 1,327 | 4.236181 | 0.39196 | 0.023725 | 0.042705 | 0.03796 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.023992 | 0.21477 | 1,327 | 56 | 89 | 23.696429 | 0.785029 | 0.078372 | 0 | 0 | 0 | 0 | 0.031199 | 0.025452 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029412 | false | 0 | 0.205882 | 0 | 0.264706 | 0.088235 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0fcc6e422c29a91a03c1fc40f3b97ab36d8a918 | 4,373 | py | Python | phylotoast/test/test_otu_calc.py | bhawan1/phylotoast | 87d4b00f5da30855b9eb05398f2f605dcf61de38 | [
"MIT"
] | 19 | 2015-07-11T18:22:45.000Z | 2022-02-05T19:57:49.000Z | phylotoast/test/test_otu_calc.py | bhawan1/phylotoast | 87d4b00f5da30855b9eb05398f2f605dcf61de38 | [
"MIT"
] | 16 | 2015-12-24T22:11:54.000Z | 2021-12-18T20:26:17.000Z | phylotoast/test/test_otu_calc.py | bhawan1/phylotoast | 87d4b00f5da30855b9eb05398f2f605dcf61de38 | [
"MIT"
] | 7 | 2016-01-07T02:34:26.000Z | 2019-10-24T22:03:54.000Z | #!/usr/bin/env python
"""
:Author: Akshay Paropkari
:Date Created: 10/22/2014
:Abstract: Automated Tests for OTU calculations.
"""
import biom
import unittest
from phylotoast import otu_calc as oc
class otu_calc_Test(unittest.TestCase):
def setUp(self):
"""
Setting up the test module. Initializing BIOM format file.
"""
pass
def test_otu_name(self):
"""
Testing the otu_name() function of otu_calc.py.
:return: Returns OK if the test goals were achieved, otherwise
raises error.
"""
self.taxa = {
"Unclassified_Methanosarcinales":
["k__Archaea", "p__Euryarchaeota", "c__Methanomicrobia",
"o__Methanosarcinales", "f__",
"g__", "s__concilii"],
"Campylobacter_gracilis":
["k__Bacteria", "p__Proteobacteria", "c__Epsilonproteobacteria",
"o__Campylobacterales", "f__Campylobacteraceae", "g__Campylobacter",
"s__gracilis"],
"Escherichia_spp.":
["k__Bacteria", "p__Proteobacteria", "c__Gammaproteobacteria",
"o__Enterobacteriales", "f__Enterobacteriaceae", "g__Escherichia", "s__"],
"Fusobacterium_nucleatum":
["k__Bacteria", "p__Fusobacteria", "c__Fusobacteria", "o__",
"f__", "g__Fusobacterium", "s__nucleatum"],
"Fusobacterium_spp.":
["k__Bacteria", "p__Fusobacteria", "c__Fusobacteria", "o__",
"f__", "g__Fusobacterium", "s__"]
}
for expected, test in self.taxa.items():
self.result = oc.otu_name(test)
# Testing the validity of the otu_name() function
self.assertEqual(
self.result, expected,
msg="Error!\nExpected result: {}.\notu_name() result: {}".
format(expected, self.result)
)
def test_load_core_file(self):
"""
Testing the load_core_file() function of otu_calc.py
:return: Returns OK if the test goals were achieved, otherwise
raises error.
"""
result = oc.load_core_file("phylotoast/test/test_core.txt")
hand_calc = {"Actinomyces_spp.", "Campylobacter_spp.", "Capnocytophaga_spp.",
"Catonella_spp.", "Corynebacterium_spp.", "Dialister_spp.",
"Eikenella_spp.", "Filifactor_spp.", "Fusobacterium_spp.",
"Gemella_spp.", "Granulicatella_spp.", "Kingella_spp.",
"Leptotrichia_spp.", "Megasphaera_spp.", "Parvimonas_spp.",
"Prevotella_melaninogenica", "Prevotella_spp.", "Selenomonas_noxia",
"Selenomonas_spp.", "Streptococcus_anginosus", "Streptococcus_equi",
"Streptococcus_infantis", "Streptococcus_spp.",
"Unclassified_Lachnospiraceae", "Unclassified_TM7-3",
"Unclassified_[Mogibacteriaceae]", "Veillonella_dispar",
"Veillonella_parvula", "Veillonella_spp."}
# Testing if all core OTU's samples were in the output.
self.assertSetEqual(
result, hand_calc,
msg="Error! Genus-species names not calculated as expected."
)
def test_assign_otu_membership(self):
"""
Testing assign_otu_membership() function of otu_calc.py.
:return: Returns OK if the test goals were achieved, otherwise
raises error.
"""
self.biomf = biom.load_table("phylotoast/test/test.biom")
self.result = oc.assign_otu_membership(self.biomf)
# Obtaining the values to be tested
hand_calc = {"S9": ["GG_OTU_2", "GG_OTU_3", "GG_OTU_5"],
"S3": ["GG_OTU_1", "GG_OTU_2", "GG_OTU_4", "GG_OTU_5"],
"S6": ["GG_OTU_1", "GG_OTU_2", "GG_OTU_3", "GG_OTU_4", "GG_OTU_5"]}
# Testing the validity of assign_otu_membership() function
for sid in ["S3", "S6", "S9"]:
self.assertListEqual(
sorted(hand_calc[sid]), sorted(self.result[sid]),
msg="Error! OTU membership calculations are inaccurate!"
)
def tearDown(self):
"""
Tearing down of this unittest framework.
"""
pass
if __name__ == "__main__":
unittest.main()
| 39.044643 | 89 | 0.583124 | 446 | 4,373 | 5.336323 | 0.363229 | 0.02521 | 0.016807 | 0.021429 | 0.19874 | 0.177731 | 0.171849 | 0.171849 | 0.147899 | 0.147899 | 0 | 0.009135 | 0.299108 | 4,373 | 111 | 90 | 39.396396 | 0.767374 | 0.190258 | 0 | 0.061538 | 0 | 0 | 0.418301 | 0.102793 | 0 | 0 | 0 | 0 | 0.046154 | 1 | 0.076923 | false | 0.030769 | 0.046154 | 0 | 0.138462 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f0fd958fcbe4a518387af5bdb0b4ac10d866d7d0 | 4,086 | py | Python | old_python_scripts/pagelinks-creator.py | wikitools/wikigraph-controller | 9ceba7ee49259a7f65001b1c8e76cc5aaa087ea5 | [
"MIT"
] | null | null | null | old_python_scripts/pagelinks-creator.py | wikitools/wikigraph-controller | 9ceba7ee49259a7f65001b1c8e76cc5aaa087ea5 | [
"MIT"
] | null | null | null | old_python_scripts/pagelinks-creator.py | wikitools/wikigraph-controller | 9ceba7ee49259a7f65001b1c8e76cc5aaa087ea5 | [
"MIT"
] | null | null | null | import json
import time
import re
import pickle
import sys
import os
DUMPS_PATH = sys.argv[1] if len(sys.argv) >= 2 else '/'
if not DUMPS_PATH.endswith('/'):
DUMPS_PATH = DUMPS_PATH + '/'
DATA_FILES_FOLDER = 'data-files/'
DUMPS_VERSION = sys.argv[2] if len(sys.argv) >= 3 else 'latest'
TITLE_ID_MAP_FILE_NAME = 'title_to_id.map'
TITLE_ID_MAP_PATH = DATA_FILES_FOLDER + TITLE_ID_MAP_FILE_NAME
OUTFILE = 'links.map'
def create_title_to_id_map():
title_to_id = {}
start = time.time()
with open(DUMPS_PATH + 'enwiki-' + DUMPS_VERSION + '-pages-articles-multistream-index.txt', encoding="UTF-8") as f:
for line in f:
line = line[:-1]
parts = line.split(':', maxsplit=2)
if not parts[2].startswith('Category') and parts[2].__contains__(':'):
continue
title_to_id[parts[2]] = int(parts[1])
print('Article title to id map created in: ' + str(time.time() - start) + 's.')
return title_to_id
def save_title_to_id_map():
title_to_id = create_title_to_id_map()
with open(TITLE_ID_MAP_PATH, 'wb+') as map:
pickle.dump(title_to_id, map)
def load_title_to_id_map():
start = time.time()
with open(TITLE_ID_MAP_PATH, 'rb') as map:
title_to_id = pickle.load(map)
print('Article title to id map loaded in: ' + str(time.time() - start) + 's.')
return title_to_id
def create_page_links_map(lines_to_proccess=-1, inserts_per_line_to_proccess=-1):
start = time.time()
links = {}
with open(DUMPS_PATH + 'enwiki-' + DUMPS_VERSION + '-pagelinks.sql', encoding="UTF-8", errors='ignore') as f:
# opening pagelinks file - encoding errors
line_no = 0
for line in f:
temp_pageid = 0 # used for print every x lines
if not line.startswith('INSERT INTO'): # ignoring create and headers
continue
if len(line.split(' ')) != 5: # line has to have minimum 5 parts when it's an insert
print('Inserts line ' + str(line_no) + ' has unusual number of spaces ' + str(len(line.split(' '))))
inserts = line.split(' ')[4:] # extracting part with values to insert in one string
inserts = ''.join(inserts)
value_list = inserts.split('),(')
# extracting string with 4 values, separated by comma and backslashes
for i in range(len(value_list)):
# if 0 <= inserts_per_line_to_proccess < i:
# break
values = value_list[i].split('\'') # splittig each of 4 values separately
if (len(values) == 3):
try:
title = values[1] # trying to make title look like in indexes file
except:
title = 'Wrong splitting of values!!!'
print('bad title! - Line number' + str(line_no) + ', value: ' + value_list[i])
id_getter = values[0].split(',')
if i == 0:
page_id = int(id_getter[0][1:]) # deleting bracket in first occurance
else:
try:
page_id = int(id_getter[0])
except:
page_id = 0
print('bad id! - Line number' + str(line_no) + ', inserted data number: ' + str(i))
temp_pageid = page_id
if not page_id in links:
links[page_id] = [] # if occurs for the first time, create empty value
if title in title_to_id:
links[page_id].append(
title_to_id[title]) # if managed to find id based on title, append it as child to page
if line_no % 100 == 0:
print(str(line_no))
line_no += 1
# if 0 <= lines_to_proccess < line_no:
# break
print('Article links map created in: ' + str(time.time() - start) + 's.')
return links
def createJSON(links):
print('Creation of map started...')
json_object = {}
json_object['pagelinks'] = []
for parent, child in links.items():
pagelinks = str(parent)
for el in child:
pagelinks += "," + str(el) # creating long string consisted of ID and children IDs based on links dict
json_object['pagelinks'].append({
'pl': pagelinks
})
with open(DUMPS_PATH + OUTFILE, 'w') as outfile:
json.dump(json_object, outfile)
if not os.path.isfile(TITLE_ID_MAP_PATH):
if not os.path.exists("data-files/"):
os.makedirs(os.path.dirname(TITLE_ID_MAP_PATH))
save_title_to_id_map()
title_to_id = load_title_to_id_map()
createJSON(create_page_links_map(400))
print('Completed.') | 33.219512 | 116 | 0.672785 | 646 | 4,086 | 4.049536 | 0.26161 | 0.050841 | 0.065367 | 0.045872 | 0.231651 | 0.147171 | 0.099388 | 0.064602 | 0.045489 | 0.028287 | 0 | 0.011829 | 0.193098 | 4,086 | 123 | 117 | 33.219512 | 0.78162 | 0.163975 | 0 | 0.132653 | 0 | 0 | 0.142353 | 0.010882 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05102 | false | 0 | 0.061224 | 0 | 0.142857 | 0.091837 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b00751199a21103bbd2d9de1bbc3315e858f87a | 2,476 | py | Python | switch_inputs/bioenergy_clean.py | Switch-Mexico/switch-inputs | e2afa96c40b516435c350d525119e4594f1b7eca | [
"MIT"
] | 1 | 2020-07-14T21:50:28.000Z | 2020-07-14T21:50:28.000Z | switch_inputs/bioenergy_clean.py | Switch-Mexico/switch-inputs | e2afa96c40b516435c350d525119e4594f1b7eca | [
"MIT"
] | 14 | 2018-12-14T23:21:09.000Z | 2019-05-10T21:42:36.000Z | switch_inputs/bioenergy_clean.py | Switch-Mexico/switch-inputs | e2afa96c40b516435c350d525119e4594f1b7eca | [
"MIT"
] | 1 | 2020-07-14T21:50:37.000Z | 2020-07-14T21:50:37.000Z | """
Clean bioenergy data from AZEL
"""
import os
import json
import itertools
import geopandas as gpd
import pandas as pd
os.makedirs('data', exist_ok=True)
projection = 'epsg:4326'
name = ['pecuarios', 'forestales', 'industriales', 'urbanos']
scenario = ['E3', 'E1']
for scenario, name in itertools.product(scenario, name):
# Load bioenergy shape file
print ('Reading file: {}_R{}.shp'.format(scenario, name))
df = gpd.read_file('../data/interim/shapes/FBio_{0}_R{1}.shp'.format(scenario,
name))
df = df[df.geometry.notnull()].to_crs({'init': projection})
# Load transmission region dictionary
with open(os.path.join('../data/interim/', 'trans-regions.json'), 'r') as fp:
trans_regions = json.load(fp)
# Load transmission region shapefiles
lz = gpd.read_file('../data/interim/shapes/Mask_T.shp')
lz = lz.to_crs({'init': projection})
lz.loc[:, 'trans-region'] = (lz['ID'].astype(int)
.map('{0:02}'.format)
.map(trans_regions))
assert lz.crs == df.crs
if not 'forestal' in name:
join = gpd.sjoin(df, lz, op='within')
else:
join = gpd.overlay(lz, df, how='intersection')
# Get specific columns for output data
try:
columns = ['trans-region', 'X', 'Y', 'CLASIFICAC', 'TIPO', 'PROCESO',
'GENE_GWha', 'CAPINST_MW', 'FP']
bio = join[columns].copy();
except KeyError:
columns = ['trans-region', 'CLASIFICAC', 'TIPO', 'PROCESO',
'GENE_GWha', 'CAPINST_MW', 'FP']
bio = join[columns].copy();
bio['CLASIFICAC'] = bio.CLASIFICAC.map(str.lower).str.replace(' ', '_')
bio['TIPO'] = bio.TIPO.map(str.lower).str.replace(' ', '_')
bio['PROCESO'] = bio.PROCESO.map(str.lower).str.replace(' ', '_')
if 'E3' in scenario:
scenario = 'high'
else:
scenario = 'low'
bio.loc[:, 'scenario'] = scenario
bio.loc[:, 'id'] = name
bio = bio.rename(columns={'X': 'lng', 'Y': 'lat', 'CLASIFICAC': 'source',
'TIPO': 'category', 'FP': 'cf',
'GENE_GWha': 'gen_GWha', 'CAPINST_MW':'cap_MW',
'PROCESO': 'fuel_type'})
print ('Saving data: {0}_{1}'.format(scenario, name))
bio.to_csv('data/bioenergy_{0}_{1}.csv'.format(scenario, name), index=False)
| 38.6875 | 82 | 0.550485 | 293 | 2,476 | 4.559727 | 0.409556 | 0.053892 | 0.053892 | 0.031437 | 0.21482 | 0.164671 | 0.086826 | 0.086826 | 0.086826 | 0.086826 | 0 | 0.008874 | 0.271809 | 2,476 | 63 | 83 | 39.301587 | 0.732113 | 0.067044 | 0 | 0.122449 | 0 | 0 | 0.231808 | 0.043137 | 0 | 0 | 0 | 0 | 0.020408 | 1 | 0 | false | 0 | 0.102041 | 0 | 0.102041 | 0.040816 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b01eb37721be4134c94f63340c71ce5f1825746 | 11,010 | py | Python | src/preprocess.py | hongsups/blog | 01e416fa040ed021d85cd89beff69d72c8972e32 | [
"Apache-2.0"
] | null | null | null | src/preprocess.py | hongsups/blog | 01e416fa040ed021d85cd89beff69d72c8972e32 | [
"Apache-2.0"
] | 3 | 2021-02-13T23:21:48.000Z | 2021-11-04T13:04:54.000Z | src/preprocess.py | hongsups/blog | 01e416fa040ed021d85cd89beff69d72c8972e32 | [
"Apache-2.0"
] | null | null | null | import pandas as pd
import numpy as np
incident_causes_list = ['Traffic Stop', 'Emergency/Request for Assistance',
'Execution of a Warrant', 'Hostage/Barricade/Other Emergency', 'Other']
age_names = ['1-4' '5-14' '15-24' '25-34' '35-44' '45-54' '55-64' '65-74' '75+']
# report delay
report_delay_days_bins = [0, 7, 14, 30, 60, 90, 180, 360, 720]
report_delay_days_binnames = ['Same Day'] + ['{} to {} Days'.format(report_delay_days_bins[i]+1, report_delay_days_bins[i+1]) for i in range(len(report_delay_days_bins)-1)] + ['More than 720 Days']
def convert_date_cols(df, col_date='date'):
"""
Convert string format of date to numpy datetime (replace the old columns)
:param pd.DataFrame df:
:param str col_date: substring to identify the columns to convert
:return: dataframe with new replaced columns
"""
cols_date = df.columns[df.columns.str.contains(col_date)]
for col in cols_date:
df[col] = pd.to_datetime(df[col])
return df
def get_duplicates_from_cols(df, cols_to_use, what_to_keep='first'):
"""
Check duplicated rows by using the combination of multiple columns
This is a workaround in the case where one doesn't have unique identifiers for a dataset
:param pd.DataFrame df:
:param list cols_to_use: columns to use to create unique combination
:param str what_to_keep: arguments needed for the duplicated function of pandas,
decide which instance to keep
:return:
"""
# drop na to avoid confusion
df_non_na = df[cols_to_use].dropna().copy()
inds_duplicates_to_drop = df_non_na[df_non_na[cols_to_use].duplicated(keep=what_to_keep)].index
df_duplicates = df.loc[inds_duplicates_to_drop, cols_to_use]
df_unique = df.drop(index=inds_duplicates_to_drop)
return df_unique, df_duplicates
def crosstab_by_topN_cities(df, col_interest, col_incident_loc='incident_county',
N=5, ratio=False):
"""
Select top N location(county, city, etc.) based on the total number of incidents
Sort them by total number (descending) and then compute the crosstab (pd.crosstab)
for the column of interest (col_interest). df_pop_county has index as county names
and a single column that shows population of each county
:param pd.DataFrame df:
:param str col_interest: column names to visualize
:param col_incident_loc: colum names that have county (location) information
:param int N: no. of top counties to compute
:param bool ratio: if True, normalize the data and return the ratio. Otherwise, integer counts
:return:
"""
# get the index of the locations based on its total counts
topN_loc_indices = df.groupby(col_incident_loc)[col_interest].count().sort_values(
ascending=False)[:N].index
# transpose so that our interest becomes columns
df_crosstab = pd.crosstab(df[col_interest], df.loc[df[col_incident_loc].isin(topN_loc_indices), col_incident_loc]).T
df_crosstab['TOTAL'] = df_crosstab.sum(axis=1)
if 'race' in col_interest:
col_list = ['WHITE', 'BLACK', 'HISPANIC', 'OTHER']
# some category might be missing
if df_crosstab.shape[1] < len(col_list)+1:
missing_cols = list(set(col_list) - set(df_crosstab.columns))
for col in missing_cols:
df_crosstab[col] = 0
else:
col_list = list(np.sort(df[col_interest].unique()))
df_crosstab = df_crosstab.loc[:, col_list + ['TOTAL']]
if ratio:
df_crosstab = df_crosstab.sort_values(by='TOTAL', ascending=False)
df_crosstab_ratio = df_crosstab.apply(lambda x: x/x['TOTAL'], axis=1).drop('TOTAL', axis=1)
return df_crosstab_ratio
else:
return df_crosstab.sort_values(by='TOTAL', ascending=False)
def pct(df, axis):
"""
Compute percentage by normalizing based on the total sum
:param pd.DataFrame df:
:param int axis: 0 for rows, 1 for columns
:return: normalized dataframe
"""
if axis == 1:
return df.apply(lambda x: x/df.sum(axis=axis))*100
if axis == 0:
return df.apply(lambda x: x/df.sum(axis=axis), axis=1)*100
def count_agencies_by_year_type(df, agency_names, N=5):
"""
Count the number of agencies by agency type (police, sheriff, and others)
by year and county.
:param pd.DataFrame df: officer or civilian dataset
:param list or np.array agency_names: list of columns that have agency names,
e.g., 'agency_name_1'
:param int N: number of counties to visualize
:return:
"""
# select the agency names and remove empty values
df_agency_names = df[agency_names].values.ravel()
df_agency_names = df_agency_names[~pd.isnull(df_agency_names)]
# categorize agency names based on substring
dict_agency_names_all = dict()
dict_agency_names_all['police'] = [s for s in df_agency_names if 'POLICE' in s]
dict_agency_names_all['sheriff'] = [s for s in df_agency_names if 'SHER' in s]
dict_agency_names_all['other'] = [s for s in df_agency_names
if 'POLICE' not in s and 'SHER' not in s]
# select the top N agencies
dict_agency_topN = dict()
for key, val in dict_agency_names_all.items():
dict_agency_topN[key] = pd.Series(val).value_counts()[:N].index
# count the agency names by year and focus on the top N agencies
years = sorted(df['year'].unique())
df_agency_count = dict()
for year in years:
df_year = df[df['year']==year]
df_agency_names = df_year[agency_names].values.ravel()
df_agency_names = df_agency_names[pd.isnull(df_agency_names) == False]
dict_agency_names = dict()
dict_agency_names['police'] = [s for s in df_agency_names if 'POLICE' in s]
dict_agency_names['sheriff'] = [s for s in df_agency_names if 'SHER' in s]
dict_agency_names['other'] = [s for s in df_agency_names if 'POLICE' not in s and 'SHER' not in s]
dict_results = dict()
for key, val in dict_agency_names.items():
temp = pd.Series(val).value_counts()
temp_topN = temp[temp.index.isin(dict_agency_topN[key])]
dict_results['n_' + key] = len(np.unique(dict_agency_names[key]))
dict_results[key + '_top'] = temp_topN
df_agency_count[year] = dict_results
df_agency_count = pd.DataFrame(df_agency_count).T
# Using this information, create a dataframe for plotting
df_agency_count_plot = dict()
for key in dict_agency_names.keys(): # agency types
temp = pd.concat(df_agency_count[key + '_top'].values, axis=1).fillna(0).T
temp.index = years
if key is 'police':
temp.columns = [s.split('POLICE')[0].strip() for s in temp.columns]
elif key is 'sheriff':
temp.columns = [s.split('SHER')[0].strip() for s in temp.columns]
df_agency_count_plot[key] = temp
return df_agency_count, df_agency_count_plot
def clean_incident_causes(s):
if 'EMERGENCY' in s:
return 'Emergency/Request for Assistance'
elif 'HOSTAGE' in s:
return 'Hostage/Barricade/Other Emergency'
elif 'OTHER' in s:
return 'Other'
elif 'TRAFFIC STOP' in s:
return 'Traffic Stop'
elif 'WARRANT' in s:
return 'Execution of a Warrant'
else:
raise ValueError('Double check the string from incident causes.')
class Preprocess:
"""Preprocess the raw OIS data (csv file from the TJI website) and return a preprocessed dataframe
"""
def __init__(
self,
df,
correct_county_names,
years = [2016, 2017, 2018, 2019, 2020]
):
self.df = df
self.correct_county_names = correct_county_names
self.years = years
def add_date_cols(self):
self.df = convert_date_cols(self.df, 'date')
self.df.loc[:, 'year'] = self.df['date_incident'].dt.year.values
self.df.loc[:, 'month'] = self.df['date_incident'].dt.month.values
return self.df
def select_rows_by_year(self):
self.df = self.df.loc[self.df['year'].isin(self.years)]
def check_county_names(self):
non_existent_counties = set(self.df['incident_county']) - set(self.correct_county_names)
if len(non_existent_counties) > 0:
raise ValueError("Incorrect county names exist: {}".format(non_existent_counties))
def remove_duplicates(self):
df_civilian_unique, _ = get_duplicates_from_cols(
self.df,
['civilian_name_full', 'date_incident'],
what_to_keep='first'
)
self.df = df_civilian_unique
def add_death_indicator_col(self, death_injury_col_name):
self.df['died'] = self.df[death_injury_col_name]=='DEATH'
def clean_incident_cause_str(self):
self.df.loc[self.df['incident_result_of']=='EMERGENCY', 'incident_result_of'] = 'EMERGENCY CALL OR REQUEST FOR ASSISTANCE'
self.df.loc[self.df['incident_result_of']=='EMERGENCY CALL OR REQUEST FOR ASSISTANCE, TRAFFIC STOP', 'incident_result_of'] = \
'EMERGENCY CALL OR REQUEST FOR ASSISTANCE; TRAFFIC STOP'
self.df['incident_result_of'] = self.df['incident_result_of'].str.strip()
df_causes_list = self.df['incident_result_of'].str.split(';')
df_causes_list_clean = df_causes_list.apply(lambda x: [clean_incident_causes(s) for s in x]).apply(pd.Series)
df_causes_list_clean_separated = df_causes_list_clean.stack().str.get_dummies().sum(level=0)[incident_causes_list]
self.df = pd.concat([self.df, df_causes_list_clean_separated], axis=1)
def add_age_groups(self):
bins = [5, 15, 25, 35, 45, 55, 65, 75, 100]
self.df['civilian_age_binned'] = np.digitize(self.df['civilian_age'], bins)
def compute_report_delay(self):
self.df.loc[:, 'delay_days'] = (self.df['date_ag_received'] - self.df['date_incident']).dt.days
self.df.loc[self.df['delay_days']<0, 'delay_days'] = np.nan
# bin the report deplay
bins = [0, 7, 14, 30, 60, 90, 180, 360, 720]
delay_bins = np.digitize(self.df['delay_days'].values, bins, right=True)
nan_inds = np.argwhere(pd.isnull(self.df['delay_days']).values).ravel()
delay_bins[nan_inds] = -1
self.df.loc[:, 'delay_bin_label'] = delay_bins
def get_civilian_data(self):
self.check_county_names()
self.add_date_cols()
self.select_rows_by_year()
self.remove_duplicates()
self.add_death_indicator_col(death_injury_col_name='civilian_died')
self.clean_incident_cause_str()
self.add_age_groups()
self.compute_report_delay()
return self.df
def get_officer_data(self):
self.check_county_names()
self.add_date_cols()
self.select_rows_by_year()
self.add_death_indicator_col(death_injury_col_name='officer_harm')
self.compute_report_delay()
return self.df
| 40.036364 | 197 | 0.66812 | 1,631 | 11,010 | 4.275291 | 0.185776 | 0.033558 | 0.029829 | 0.007027 | 0.262297 | 0.205507 | 0.189302 | 0.164635 | 0.144271 | 0.117596 | 0 | 0.016575 | 0.221889 | 11,010 | 275 | 198 | 40.036364 | 0.797362 | 0.198819 | 0 | 0.08589 | 0 | 0 | 0.132104 | 0.005335 | 0 | 0 | 0 | 0 | 0 | 1 | 0.104294 | false | 0 | 0.01227 | 0 | 0.214724 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b070a6623cb430f75477957caa002003f205d14 | 1,745 | py | Python | spotify.py | themoat/spotify-dl | 67af7b25ed847c8432d37d5230ea93715409242b | [
"MIT"
] | 1 | 2020-06-28T23:05:44.000Z | 2020-06-28T23:05:44.000Z | spotify.py | themoat/spotify-dl | 67af7b25ed847c8432d37d5230ea93715409242b | [
"MIT"
] | null | null | null | spotify.py | themoat/spotify-dl | 67af7b25ed847c8432d37d5230ea93715409242b | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
import spotipy.util as util
from scaffold import *
from tokens import *
import youtube_dl
def authenticate():
return util.prompt_for_user_token(username,scope, CLIENT_ID, CLIENT_SECRET, REDIRECT_URL)
def fetch_saved_tracks(sp):
log.debug('Fetching saved tracks')
offset = 0
songs = []
while True:
results = sp.current_user_saved_tracks(limit=50, offset=offset)
log.debug('Got result json {}'.format(results))
for item in results['items']:
track = item['track']
log.debug('Appending {} to songs list'.format(track['name'] + ' - ' + track['artists'][0]['name']))
songs.append(track['name'] + ' - ' + track['artists'][0]['name'])
offset += 1
if results.get('next') is None:
log.info('All pages fetched, time to leave. Added {} songs in total'.format(offset))
break
return songs
def save_songs_to_file(songs):
with open('songs.txt', 'w') as f:
f.write('\n'.join(songs))
f.close()
def download_songs(songs,download_directory):
ydl_opts = {
'format': 'bestaudio/best',
'download_archive': 'downloaded_songs.txt',
'outtmpl': download_directory+'%(title)s.%(ext)s',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',
}],
}
log.debug('Songs to download: {}'.format(songs))
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
for item in songs:
try:
ydl.download([item])
except Exception:
print('Failed to download: {}'.format(item))
continue
| 29.083333 | 111 | 0.590831 | 203 | 1,745 | 4.940887 | 0.53202 | 0.031904 | 0.017946 | 0.041874 | 0.051844 | 0.051844 | 0 | 0 | 0 | 0 | 0 | 0.007855 | 0.270487 | 1,745 | 59 | 112 | 29.576271 | 0.780047 | 0 | 0 | 0 | 0 | 0 | 0.216743 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.108696 | 0.021739 | 0.23913 | 0.021739 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b0806474d34ea185831d946d0369770520ca74c | 8,585 | py | Python | 22_GScan/lib/plugins/File_Check.py | hemuke/python | bc99f2b5aee997083ae31f59a2b33db48c8255f3 | [
"Apache-2.0"
] | null | null | null | 22_GScan/lib/plugins/File_Check.py | hemuke/python | bc99f2b5aee997083ae31f59a2b33db48c8255f3 | [
"Apache-2.0"
] | null | null | null | 22_GScan/lib/plugins/File_Check.py | hemuke/python | bc99f2b5aee997083ae31f59a2b33db48c8255f3 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf8 -*-
# author: 咚咚呛
# 对系统重要文件夹进行监控,并把修改、创建的文件进行日志打印,
# 排除prelink服务对二进制文件修改对结果进行干扰,每次排查都会排除prelink的操作
from __future__ import print_function
import os, sys, hashlib
from lib.core.globalvar import *
from lib.core.common import *
class File_Check:
def __init__(self):
# 异常文件列表
self.file_malware = []
self.CHECK_DIR = ['/bin/', '/sbin/', '/usr/bin/', '/usr/sbin/', '/usr/local/sbin/', '/usr/local/bin/']
# 是否只针对特定文件进行监控
self.HIGH_FILTER = True
# 监控文件内容列表
self.HEIGH_FILE_ALARM = ["depmod", "fsck", "fuser", "ifconfig", "ifdown", "ifup", "init", "insmod", "ip",
"lsmod", "modinfo", "modprobe", "nologin", "rmmod", "route", "rsyslogd", "runlevel",
"sulogin", "sysctl", "awk", "basename", "bash", "cat", "chmod", "chown", "cp", "cut",
"date", "df", "dmesg", "echo", "egrep", "env", "fgrep", "find", "grep", "kill",
"logger", "login", "ls", "mail", "mktemp", "more", "mount", "mv", "netstat", "ping",
"ps", "pwd", "readlink", "rpm", "sed", "sh", "sort", "su", "touch", "uname", "gawk",
"mailx", "adduser", "chroot", "groupadd", "groupdel", "groupmod", "grpck", "lsof",
"pwck", "sestatus", "sshd", "useradd", "userdel", "usermod", "vipw", "chattr", "curl",
"diff", "dirname", "du", "file", "groups", "head", "id", "ipcs", "killall", "last",
"lastlog", "ldd", "less", "lsattr", "md5sum", "newgrp", "passwd", "perl", "pgrep",
"pkill", "pstree", "runcon", "sha1sum", "sha224sum", "sha256sum", "sha384sum",
"sha512sum", "size", "ssh", "stat", "strace", "strings", "sudo", "tail", "test", "top",
"tr", "uniq", "users", "vmstat", "w", "watch", "wc", "wget", "whereis", "which", "who",
"whoami", "test"]
# 系统执行路径
self.SYS_PATH = get_value('SYS_PATH')
self.HASH_DB = get_value('SYS_PATH') + '/db/hash_db.txt'
# prelink服务会修改二进制文件,此处保存prelink服务的相关日志路径
self.PRELINK_LOG_PATH = ['/var/log/prelink/prelink.log', '/var/log/prelink.log']
# 开始进行扫描
self.check_dir_hash()
# 计算一个文件的hash值
# 返回hash值字符串
def file_hash(self, file_path):
try:
md5obj = hashlib.md5()
size = 102400
fp = open(file_path, 'rb')
while True:
content = fp.read(size)
if not content:
break
md5obj.update(content)
fp.close()
return md5obj.hexdigest()
except:
return "error"
# 获取一个目录下的所有文件HASH值
# 返回内容hash_list_content,包含[[文件路径,hash值],[文件路径,hash值]]
def dir_hash(self, path):
hash_list_content = []
for root, dirs, files in os.walk(path, topdown=True):
for filename in files:
# 如果只监控重要名称文件,则其他文件抛弃不创建hash
if self.HIGH_FILTER:
if filename in self.HEIGH_FILE_ALARM:
# 存在软链指向真实文件不存在现象
if os.path.exists(os.path.join(root, filename)):
hash_list = []
hash_list.append(os.path.join(root, filename)) # 保存文件绝对路径
if 'error' == self.file_hash(os.path.join(root, filename)): continue
hash_list.append(self.file_hash(os.path.join(root, filename))) # 保存文件hash
hash_list_content.append(hash_list)
else:
# 存在软链指向真实文件不存在现象
if os.path.exists(os.path.join(root, filename)):
hash_list = []
hash_list.append(os.path.join(root, filename)) # 保存文件绝对路径
hash_list.append(self.file_hash(os.path.join(root, filename))) # 保存文件hash
hash_list_content.append(hash_list)
return hash_list_content
# 获取存储的hash值文件
# 返回内容history_hash_list_content,包含[[],[]]
def get_history_hash_list(self):
if not os.path.exists(self.HASH_DB):
self.write_hash_db("Initialization")
return "", ""
if os.path.getsize(self.HASH_DB) == 0:
self.write_hash_db("Initialization")
return "", ""
# 获取hash文件内容到数据组中
history_hash_list_content = []
# 获取文件路绝对路径到数组中
history_file_path_list = []
for line in open(self.HASH_DB):
if line != "" or line != None:
tmp_hash = []
tmp_hash.append(line.split('||')[0].split('\n')[0]) # 文件绝对路径
tmp_hash.append(line.split('||')[1].split('\n')[0]) # 文件hash
history_hash_list_content.append(tmp_hash)
history_file_path_list.append(line.split('||')[0].split('\n')[0])
return history_hash_list_content, history_file_path_list
# 写hash数据文件
# 传入参数为操作类型,
# Initialization为初始化hash文件,
# Coverage为文件变动时,覆盖原hash文件
def write_hash_db(self, type):
time_string = time.time()
if type == "Initialization":
if not os.path.exists(self.HASH_DB):
f = open(self.HASH_DB, "w")
f.truncate()
f.close()
if os.path.getsize(self.HASH_DB) == 0:
f = open(self.HASH_DB, 'w')
for check_dir in self.CHECK_DIR:
for hash_list in self.dir_hash(check_dir):
f.write(hash_list[0] + "||" + hash_list[1] + "||" + str(time_string) + "\n")
f.close()
if type == "Coverage":
if os.path.exists(self.HASH_DB):
os.remove(self.HASH_DB)
f = open(self.HASH_DB, 'w')
for check_dir in self.CHECK_DIR:
for hash_list in self.dir_hash(check_dir):
f.write(hash_list[0] + "||" + hash_list[1] + "||" + str(time_string) + "\n")
f.close()
# 检测操作类型,判断出现文件变动时,是修改还是创建
# True为修改
# Flase为创建
def check_operation_type(self, file_path, history_file_path_list):
return True if file_path in history_file_path_list else False
# 检测是否存在prelink服务
# 返回服务真假,和日志内容
def check_prelink_server(self):
for path in self.PRELINK_LOG_PATH:
if os.path.exists(path):
file_object = open(path)
try:
all_the_text = file_object.read()
finally:
file_object.close()
return True, all_the_text
return False, ""
# 检测相对应目录的hash是否进行了变化
def check_dir_hash(self):
# 判断是否出现文件变动
HASH_FILE_TYPE = False
# 最新hash文件列表
current_hash_list_content = []
# 获取HASH库文件列表
history_hash_list_content, history_file_path_list = self.get_history_hash_list()
if len(history_hash_list_content) == 0 or len(history_file_path_list) == 0:
return
# 判断是否存在prelink服务,并返回内容
PRELINK_SERVER, prelingk_log = self.check_prelink_server()
# 开始针对监控目录进行检测
for check_dir in self.CHECK_DIR:
try:
current_hash_list_content = self.dir_hash(check_dir)
for hash_list in current_hash_list_content:
# 判断是否存在hash记录
if not hash_list in history_hash_list_content:
HASH_FILE_TYPE = True
# 判断是否是prelink服务更新
if PRELINK_SERVER:
if len(prelingk_log) > 0:
# 判断是否存在prelink此条日志
if prelingk_log.find(hash_list[0]) > 0: continue
# 记录变动文件结果
self.file_malware.append({'file': hash_list[0],
'action': 'Edit' if self.check_operation_type(hash_list[0],
history_file_path_list) else 'Create',
'newMD5': hash_list[1]})
except:
continue
# 存在文件修改,hash进行覆盖
if HASH_FILE_TYPE: self.write_hash_db("Coverage")
if __name__ == '__main__':
info = File_Check().file_malware
for i in info:
print(i)
| 44.252577 | 134 | 0.508561 | 886 | 8,585 | 4.69526 | 0.328442 | 0.069231 | 0.050481 | 0.036538 | 0.271154 | 0.250481 | 0.223317 | 0.20625 | 0.155288 | 0.146635 | 0 | 0.008442 | 0.365288 | 8,585 | 193 | 135 | 44.481865 | 0.755001 | 0.091322 | 0 | 0.255474 | 0 | 0 | 0.114407 | 0.003612 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058394 | false | 0.007299 | 0.029197 | 0.007299 | 0.167883 | 0.014599 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b087fc92a5c2ccca05a7e240d35465e0d13304d | 2,960 | py | Python | src/lyxnotebook/entry_points.py | abarker/lyxNotebook | c458b21e1b183b94172414e14dea671e1f3d4b22 | [
"MIT"
] | 12 | 2015-07-16T13:39:04.000Z | 2022-02-14T15:36:10.000Z | src/lyxnotebook/entry_points.py | abarker/lyxNotebook | c458b21e1b183b94172414e14dea671e1f3d4b22 | [
"MIT"
] | 4 | 2020-03-11T00:33:50.000Z | 2020-05-21T22:05:13.000Z | src/lyxnotebook/entry_points.py | abarker/lyxNotebook | c458b21e1b183b94172414e14dea671e1f3d4b22 | [
"MIT"
] | 3 | 2015-07-16T13:39:06.000Z | 2020-04-15T19:17:45.000Z | """
These are the entry points to run LyxNotebook. They are set up in `setup.py`
to become command-line commands.
"""
import os
import sys
import argparse
from . import config_file_processing
script_path = os.path.abspath(__file__)
script_dir = os.path.dirname(script_path)
def parse_args():
"""Parse the command-line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument("--install", action="store_true", help=
"Install files in the LyX user directory specified by the '--user-dir'"
" argument, if one is given. By default files are installed in '~/.lyx'.")
parser.add_argument("--no-editable-insets", action="store_true", help=
"Whether or not the LyX version has editable insets (4.0 or greater)."
" Needed to generate the layout module files since a new, incompatible,"
" property was added (EditExternal). Only meaningful with '--install';"
" otherwise use the setting in the 'lyxnotebook.cfg' file.")
parser.add_argument("--user-dir", nargs=1, help=
"The LyX user directory in which to find or install LyX Notebook layout and"
" binding files and the 'lyxnotebook.cfg' config file. The default is"
" '~/.lyx'.")
parser.add_argument("--ensure-tty", action="store_true", help=
"Passed to run LyX Notebook from a LyX LFUN, or any other situation where "
"there is no obvious tty to associate with LyX Notebook. This checks first, "
"and opens a new tty if necessary. (Running the interpreters requires a tty "
"to be associated with them, and the LyX needs a place to write its stdout.)")
args = parser.parse_args()
return args
def run_lyxnotebook():
"""Run LyxNotebook in the ordinary way from a terminal."""
args = parse_args()
lyx_user_dir = args.user_dir[0] if args.user_dir else "~/.lyx"
lyx_user_dir = os.path.abspath(os.path.expanduser(lyx_user_dir))
if args.install:
from . import install
# Pass lfun script command to set up a key binding.
install.run_setup(lyx_user_dir, "\\\"lyxnotebook --ensure-tty --user-dir {}\\\"".format(lyx_user_dir),
has_editable_insets=not args.no_editable_insets)
return
from . import config_file_processing
config_file_processing.initialize_config_data(lyx_user_dir)
if args.ensure_tty:
cmd_string = "lyxnotebook " + " ".join(sys.argv[1:])
cmd_string = cmd_string.replace(" --ensure-tty", "") # Avoid recursive call.
print("\nCommand to start lyxnotebook, with terminal output:\n ", cmd_string)
from . import run_lyxnotebook_from_LFUN
run_lyxnotebook_from_LFUN.main(cmd_string) # Pass regular script name to call after setup.
else:
config_file_processing.initialize_config_data(lyx_user_dir)
from . import run_lyxnotebook
run_lyxnotebook.main()
| 41.690141 | 110 | 0.673649 | 406 | 2,960 | 4.751232 | 0.364532 | 0.043546 | 0.036288 | 0.029549 | 0.094349 | 0.05184 | 0.05184 | 0.05184 | 0.05184 | 0 | 0 | 0.002187 | 0.227703 | 2,960 | 70 | 111 | 42.285714 | 0.841645 | 0.106757 | 0 | 0.083333 | 0 | 0 | 0.394275 | 0 | 0.020833 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0.020833 | 0.166667 | 0 | 0.25 | 0.020833 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b088883b5dd7c121ec2fcac21cf028f979c4272 | 17,435 | py | Python | _kaggle/_render/heart-diseases-modeling/nb.py | jramirez857/soorgeon | 54ab679f72be38731f5b43c6835f9a14921c396d | [
"Apache-2.0"
] | 26 | 2021-12-01T10:00:31.000Z | 2022-03-24T18:21:58.000Z | _kaggle/_render/heart-diseases-modeling/nb.py | jramirez857/soorgeon | 54ab679f72be38731f5b43c6835f9a14921c396d | [
"Apache-2.0"
] | 31 | 2021-12-20T03:20:37.000Z | 2022-03-15T01:14:40.000Z | _kaggle/_render/heart-diseases-modeling/nb.py | jramirez857/soorgeon | 54ab679f72be38731f5b43c6835f9a14921c396d | [
"Apache-2.0"
] | 4 | 2022-02-03T21:40:55.000Z | 2022-03-26T21:55:33.000Z | # -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19"
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# %%
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import numpy as np
import pandas as pd
import statsmodels.api as sm
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import scale, StandardScaler
from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score
from sklearn.metrics import confusion_matrix, accuracy_score, mean_squared_error, r2_score, roc_auc_score, roc_curve, classification_report
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import mean_squared_error, r2_score
import matplotlib.pyplot as plt
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import scale
from sklearn.preprocessing import StandardScaler
from sklearn import model_selection
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
import numpy as np
from sklearn.neighbors import LocalOutlierFactor
from sklearn import neighbors
from sklearn.svm import SVR
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import cross_val_score, train_test_split, GridSearchCV
from xgboost import XGBRegressor
from lightgbm import LGBMRegressor
from sklearn import preprocessing
from sklearn.preprocessing import scale
from sklearn.metrics import mean_squared_log_error
from sklearn.preprocessing import OrdinalEncoder
from xgboost import XGBClassifier
from lightgbm import LGBMClassifier
from catboost import CatBoostClassifier
import random
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=FutureWarning)
models = [
LogisticRegression, KNeighborsClassifier, SVC, MLPClassifier,
DecisionTreeClassifier, RandomForestClassifier, GradientBoostingClassifier,
XGBClassifier, LGBMClassifier
] #,CatBoostClassifier
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', 10)
pd.set_option('display.float_format', lambda x: '%.5f' % x)
# %% [markdown]
# ## Adding Functions
# %%
def degisken_tiplerine_ayirma(data, cat_th, car_th):
"""
Veri:data parametresi ili fonksiyona girilen verinin değişkenlerin sınıflandırılması.
Parameters
----------
data: pandas.DataFrame
İşlem yapılacak veri seti
cat_th:int
categoric değişken threshold değeri
car_th:int
Cardinal değişkenler için threshold değeri
Returns
-------
cat_deg:list
categorik değişken listesi
num_deg:list
numeric değişken listesi
car_deg:list
categoric ama cardinal değişken listesi
Examples
-------
df = dataset_yukle("breast_cancer")
cat,num,car=degisken_tiplerine_ayirma(df,10,20)
Notes
-------
cat_deg + num_deg + car_deg = toplam değişken sayısı
"""
num_but_cat = [
i for i in data.columns
if data[i].dtypes != "O" and data[i].nunique() < cat_th
]
car_deg = [
i for i in data.columns
if data[i].dtypes == "O" and data[i].nunique() > car_th
]
num_deg = [
i for i in data.columns
if data[i].dtypes != "O" and i not in num_but_cat
]
cat_deg = [
i for i in data.columns if data[i].dtypes == "O" and i not in car_deg
]
cat_deg = cat_deg + num_but_cat
print(f"Dataset kolon/değişken sayısı: {data.shape[1]}")
print(f"Dataset satır/veri sayısı: {data.shape[0]}")
print("********************************************")
print(f"Datasetin numeric değişken sayısı: {len(num_deg)}")
print(f"Datasetin numeric değişkenler: {num_deg}")
print("********************************************")
print(f"Datasetin categoric değişken sayısı: {len(cat_deg)}")
print(f"Datasetin categoric değişkenler: {cat_deg}")
print("********************************************")
print(f"Datasetin cardinal değişken sayısı: {len(car_deg)}")
print(f"Datasetin cardinal değişkenler: {car_deg}")
print("********************************************")
return cat_deg, num_deg, car_deg
def categoric_ozet(data, degisken, plot=False, null_control=False):
"""
Task
----------
Datasetinde bulunan categoric değişkenlerin değişken tiplerinin sayısını ve totale karşı oranını bulur.
Ayrıca isteğe bağlı olarak değişken dağılımının grafiğini ve değişken içinde bulunan null sayısını çıkartır.
Parameters
----------
data:pandas.DataFrame
categoric değişkenin bulunduğu dataset.
degisken:String
Categoric değişken ismi.
plot:bool
Fonksiyonda categoric değişken dağılımının grafiğini çizdirmek için opsiyonel özellik.
null_control:bool
Fonksiyonda değişken içinde null değer kontolü için opsiyonel özellik
Returns
-------
tablo:pandas.DataFrame
Unique değişkenlerin ratio olarak oran tablosu
Examples
-------
df=dataset_yukle("titanic")
cat_deg,num_deg,car_deg=degisken_tiplerine_ayirma(df,10,20)
for i in cat_deg:
tablo=categoric_ozet(df,i,True,True)
"""
print(
pd.DataFrame({
degisken: data[degisken].value_counts(),
"Ratio": 100 * data[degisken].value_counts() / len(data)
}))
tablo = pd.DataFrame({
degisken:
data[degisken].value_counts(),
"Ratio":
100 * data[degisken].value_counts() / len(data)
})
print("##########################################")
if plot:
sns.countplot(x=data[degisken], data=data)
plt.show(block=True)
if null_control:
print(f"Null veri sayısı: {data[degisken].isnull().sum()}")
return tablo
def dataset_ozet(data, head=5):
print("##################### Shape #####################")
print(f"Satır sayısı: {data.shape[0]}")
print(f"Kolon sayısı: {data.shape[1]}")
print("##################### Types #####################")
print(data.dtypes)
print("##################### Head #####################")
print(data.head(head))
print("##################### Tail #####################")
print(data.tail(head))
print("##################### NA Kontrolü #####################")
print(data.isnull().sum())
print("##################### Quantiles #####################")
print(data.quantile([0, 0.05, 0.50, 0.95, 0.99, 1]).T)
print("##################### Describe Tablosu #####################")
print(data.describe().T)
def outlier_threshold(data, degisken):
Q1 = data[degisken].quantile(0.01)
Q3 = data[degisken].quantile(0.99)
Q_Inter_Range = Q3 - Q1
alt_limit = Q1 - 1.5 * Q_Inter_Range
ust_limit = Q3 + 1.5 * Q_Inter_Range
return alt_limit, ust_limit
def threshold_degisimi(data, degisken):
alt_limit, ust_limit = outlier_threshold(data, degisken)
data.loc[(data[degisken] < alt_limit), degisken] = alt_limit
data.loc[(data[degisken] > ust_limit), degisken] = ust_limit
#data[data[degisken]<alt_limit][degisken]=alt_limit
#data[data[degisken]>ust_limit][degisken]=ust_limit
return data
def numeric_ozet(data, degisken, plot=False, null_control=False):
"""
Task
----------
Datasetinde bulunan numeric değişkenlerin değişken tiplerinin sayısını ve totale karşı oranını bulur.
Ayrıca isteğe bağlı olarak değişken dağılımının grafiğini ve değişken içinde bulunan null sayısını çıkartır.
Parameters
----------
data:pandas.DataFrame
categoric değişkenin bulunduğu dataset.
degisken:String
Categoric değişken ismi.
plot:bool
Fonksiyonda categoric değişken dağılımının grafiğini çizdirmek için opsiyonel özellik.
null_control:bool
Fonksiyonda değişken içinde null değer kontolü için opsiyonel özellik
Returns
-------
tablo:pandas.DataFrame
Unique değişkenlerin ratio olarak oran tablosu
Examples
-------
df=dataset_yukle("titanic")
cat_deg,num_deg,car_deg=degisken_tiplerine_ayirma(df,10,20)
for i in cat_deg:
tablo=categoric_ozet(df,i,True,True)
"""
quantiles = [
0.05, 0.10, 0.20, 0.30, 0.40, 0.50, 0.60, 0.70, 0.80, 0.90, 0.95, 0.99
]
print(data[degisken].describe(quantiles).T)
if plot:
data[degisken].hist(bins=20)
plt.xlabel(degisken)
plt.title(degisken)
plt.show(block=True)
print("##########################################")
if null_control:
print(f"Null veri sayısı: {data[degisken].isnull().sum()}")
def missing_values_table(dataframe, na_name=False):
na_columns = [
col for col in dataframe.columns if dataframe[col].isnull().sum() > 0
]
n_miss = dataframe[na_columns].isnull().sum().sort_values(ascending=False)
ratio = (dataframe[na_columns].isnull().sum() / dataframe.shape[0] *
100).sort_values(ascending=False)
missing_df = pd.concat([n_miss, np.round(ratio, 2)],
axis=1,
keys=['n_miss', 'ratio'])
print(missing_df, end="\n")
if na_name:
return na_columns
def one_hot_encoder(dataframe, categorical_cols, drop_first=True):
dataframe = pd.get_dummies(dataframe,
columns=categorical_cols,
drop_first=drop_first)
return dataframe
def model_karsilastirma(df, model, target):
X = df.drop(columns=target)
y = df[target]
X_train, X_test, y_train, y_test = train_test_split(X,
y,
test_size=0.15,
random_state=42)
model_fit = model().fit(X_train, y_train)
y_pred = model_fit.predict(X_test)
acc = accuracy_score(y_test, y_pred)
print(model, "için sonuç doğruluk değeri:", acc)
return acc
def target_analyser(dataframe, target, num_deg, cat_deg):
for degisken in dataframe.columns:
if degisken in cat_deg:
print(degisken, ":", len(dataframe[degisken].value_counts()))
print(pd.DataFrame({
"COUNT":
dataframe[degisken].value_counts(),
"RATIO":
dataframe[degisken].value_counts() / len(dataframe),
"TARGET_MEAN":
dataframe.groupby(degisken)[target].mean()
}),
end="\n\n\n")
if degisken in num_deg:
print(pd.DataFrame(
{"TARGET_MEAN": dataframe.groupby(target)[degisken].mean()}),
end="\n\n\n")
# %% [markdown]
# ## Some image
# 
# %%
#loading dataset
df = pd.read_csv("../input/heart-disease-uci/heart.csv")
df.head()
# %% [markdown]
# ## Some info
# * age: The person's age in years
# * sex: The person's sex (1 = male, 0 = female)
# * cp: The chest pain experienced (Value 1: typical angina, Value 2: atypical angina, Value 3: non-anginal pain, Value 4: asymptomatic)
# * trestbps: The person's resting blood pressure (mm Hg on admission to the hospital)
# * chol: The person's cholesterol measurement in mg/dl
# * fbs: The person's fasting blood sugar (> 120 mg/dl, 1 = true; 0 = false)
# * restecg: Resting electrocardiographic measurement (0 = normal, 1 = having ST-T wave abnormality, 2 = showing probable or definite left ventricular hypertrophy by Estes' criteria)
# * thalach: The person's maximum heart rate achieved
# * exang: Exercise induced angina (1 = yes; 0 = no)
# * oldpeak: ST depression induced by exercise relative to rest ('ST' relates to positions on the ECG plot. See more here)
# * slope: the slope of the peak exercise ST segment (Value 1: upsloping, Value 2: flat, Value 3: downsloping)
# * ca: The number of major vessels (0-3)
# * thal: A blood disorder called thalassemia (3 = normal; 6 = fixed defect; 7 = reversable defect)
# * target: Heart disease (0 = no, 1 = yes)
# %%
#Analysis of Dataset
dataset_ozet(df)
cat_deg, num_deg, car_deg = degisken_tiplerine_ayirma(df, 10, 20)
# %%
#EDA of Dataset
for i in cat_deg:
categoric_ozet(df, i, True, True)
for i in num_deg:
numeric_ozet(df, i, True, True)
# %%
#All columns analaysis based on target column
target_analyser(df, "target", num_deg, cat_deg)
# %%
#Filling missing values
null_cols = missing_values_table(df, True)
for i in null_cols:
df[i].fillna(df[i].transform("mean"), inplace=True)
#There is no missing values
# %%
#Outlier processing
for i in num_deg:
df = threshold_degisimi(df, i)
# %%
#Data Extraction
df.age.describe()
df.loc[(df["age"] < 40), 'NEW_AGE_CAT'] = 'Young'
df.loc[(df["age"] >= 40) & (df["age"] < 50), 'NEW_AGE_CAT'] = 'Middle Age'
df.loc[(df["age"] >= 50) & (df["age"] < 60), 'NEW_AGE_CAT'] = 'Pre-Old'
df.loc[(df["age"] >= 60), 'NEW_AGE_CAT'] = 'Old'
df.groupby('NEW_AGE_CAT')["target"].mean()
# %%
df.trestbps.describe()
df.loc[(df["trestbps"] < 90), 'NEW_RBP_CAT'] = 'Low'
df.loc[(df["trestbps"] >= 90) & (df["trestbps"] < 120),
'NEW_RBP_CAT'] = 'Ideal'
df.loc[(df["trestbps"] >= 120) & (df["trestbps"] < 140),
'NEW_RBP_CAT'] = 'Pre-HIGH'
df.loc[(df["trestbps"] >= 140), 'NEW_RBP_CAT'] = 'Hypertension'
df.groupby('NEW_RBP_CAT')["target"].mean()
# %%
df.chol.describe()
df.loc[(df["chol"] < 200), 'NEW_CHOL_CAT'] = 'Ideal'
df.loc[(df["chol"] >= 200) & (df["chol"] < 240), 'NEW_CHOL_CAT'] = 'HIGH'
df.loc[(df["chol"] >= 240), 'NEW_CHOL_CAT'] = 'Very Risky'
df.groupby('NEW_CHOL_CAT')["target"].mean()
# %%
#Encoding of categoric columns
cat_deg, num_deg, car_deg = degisken_tiplerine_ayirma(df, 10, 20)
cat_deg = [i for i in cat_deg if i != "target"]
df = one_hot_encoder(df, cat_deg)
df.head()
# %%
#Scaling of numeric columns
scaler = StandardScaler()
df[num_deg] = scaler.fit_transform(df[num_deg])
# %%
#Comparing of all models
for mod in models:
model_karsilastirma(df, mod, "target")
# %% [markdown]
# ## SVM Tuning
# %%
X = df.drop(columns="target")
y = df["target"]
X_train, X_test, y_train, y_test = train_test_split(X,
y,
test_size=0.15,
random_state=42)
svm = SVC()
svm_tuned = SVC(C=1, kernel="linear").fit(X_train, y_train)
y_pred = svm_tuned.predict(X_test)
acc = accuracy_score(y_test, y_pred)
print("SVM accuracy: ", acc)
# %% [markdown]
# ## Logistic Regression Tuning
# %%
X = df.drop(columns="target")
y = df["target"]
X_train, X_test, y_train, y_test = train_test_split(X,
y,
test_size=0.15,
random_state=42)
loj_model = LogisticRegression(solver="liblinear").fit(X_train, y_train)
y_pred = loj_model.predict(X_test)
acc = accuracy_score(y_test, y_pred)
print("Lojistic_model accuracy: ", acc)
# %% [markdown]
# ## Light GBM Model Tuning
# %%
X = df.drop(columns="target")
y = df["target"]
X_train, X_test, y_train, y_test = train_test_split(X,
y,
test_size=0.15,
random_state=42)
lgbm_tuned = LGBMClassifier(learning_rate=0.01, max_depth=5,
n_estimators=250).fit(X_train, y_train)
y_pred = lgbm_tuned.predict(X_test)
acc = accuracy_score(y_test, y_pred)
print("LGBM accuracy: ", acc)
| 33.272901 | 182 | 0.643877 | 2,235 | 17,435 | 4.882774 | 0.246532 | 0.032255 | 0.006048 | 0.016494 | 0.390635 | 0.314671 | 0.292404 | 0.266563 | 0.251168 | 0.223861 | 0 | 0.019458 | 0.212962 | 17,435 | 523 | 183 | 33.33652 | 0.775762 | 0.291425 | 0 | 0.242754 | 0 | 0 | 0.151718 | 0.054501 | 0 | 0 | 0 | 0 | 0 | 1 | 0.036232 | false | 0 | 0.192029 | 0 | 0.253623 | 0.152174 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b0a88a328801934f25fa6c0a8307d1870201f48 | 6,892 | py | Python | light_field_neural_rendering/src/utils/config_utils.py | ParikhKadam/google-research | 00a282388e389e09ce29109eb050491c96cfab85 | [
"Apache-2.0"
] | 2 | 2022-01-21T18:15:34.000Z | 2022-01-25T15:21:34.000Z | light_field_neural_rendering/src/utils/config_utils.py | ParikhKadam/google-research | 00a282388e389e09ce29109eb050491c96cfab85 | [
"Apache-2.0"
] | 110 | 2021-10-01T18:22:38.000Z | 2021-12-27T22:08:31.000Z | light_field_neural_rendering/src/utils/config_utils.py | admariner/google-research | 7cee4b22b925581d912e8d993625c180da2a5a4f | [
"Apache-2.0"
] | 1 | 2022-02-10T10:43:10.000Z | 2022-02-10T10:43:10.000Z | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Config utilities."""
import dataclasses
from typing import Any, Callable, Optional
from jax import lax
#-------------------------------------------------------
# MLP parameters
#-------------------------------------------------------
@dataclasses.dataclass
class MLPParams:
"""Parameters for NeRF MLP."""
net_depth: int
net_width: int
net_activation: Callable[Ellipsis, Any]
num_rgb_channels: int
skip_layer: int
def get_mlp_config(config, net_activation):
return MLPParams(
net_depth=config.model.net_depth,
net_width=config.model.net_width,
net_activation=net_activation,
num_rgb_channels=config.model.num_rgb_channels,
skip_layer=config.model.skip_layer,
)
#-------------------------------------------------------
# Rendering parameters
#-------------------------------------------------------
@dataclasses.dataclass
class RenderParams:
"""Parameters related to rendering"""
near: float
far: float
lindisp: bool
white_bkgd: bool
num_coarse_samples: int
num_fine_samples: int
use_viewdirs: bool
noise_std: float
num_rgb_channels: int
rgb_activation: Callable
sigma_activation: Optional[Callable] = None
def get_render_params(config, rgb_activation, sigma_activation=None):
return RenderParams(
near=config.model.near,
far=config.model.far,
white_bkgd=config.model.white_bkgd,
lindisp=config.model.lindisp,
num_coarse_samples=config.model.num_coarse_samples,
num_fine_samples=config.model.num_fine_samples,
use_viewdirs=config.model.use_viewdirs,
noise_std=config.model.noise_std,
rgb_activation=rgb_activation,
sigma_activation=sigma_activation,
num_rgb_channels=config.model.num_rgb_channels,
)
#-------------------------------------------------------
# Position Encoding parameters
#-------------------------------------------------------
@dataclasses.dataclass
class EncodingParams:
"""Parameters for poisitonal encoding"""
name: str
min_deg_point: int
max_deg_point: int
deg_view: int
def get_encoding_params(config):
return EncodingParams(
name=config.model.mapping_type,
min_deg_point=config.model.min_deg_point,
max_deg_point=config.model.max_deg_point,
deg_view=config.model.deg_view,
)
#-------------------------------------------------------
# LightField parameters
#-------------------------------------------------------
@dataclasses.dataclass
class LightFieldParams:
"""Parameter of lightfield representation"""
name: str
# Light Slab parameters
st_plane: float
uv_plane: float
# Encoding parameters
encoding_name: bool
min_deg_point: int
max_deg_point: int
def get_lightfield_params(config):
config.lightfield.st_plane = config.model.near
config.lightfield.uv_plane = config.model.far
return LightFieldParams(
name=config.lightfield.name,
st_plane=config.lightfield.st_plane,
uv_plane=config.lightfield.uv_plane,
encoding_name=config.lightfield.encoding_name,
min_deg_point=config.lightfield.min_deg_point,
max_deg_point=config.lightfield.max_deg_point,
)
#-------------------------------------------------------
# Transformer parameters
#-------------------------------------------------------
@dataclasses.dataclass
class TransformerParams:
"""Parameters for Transformer."""
num_layers: int
attention_heads: int
qkv_params: Optional[int] = None
mlp_params: Optional[int] = None
dropout_rate: float = 0.
def __post_init__(self):
assert self.dropout_rate == 0, "Dropout not supported yet."
def get_epipolar_transformer_params(config):
return TransformerParams(
num_layers=config.model.transformer_layers,
attention_heads=config.model.transformer_heads,
qkv_params=config.model.qkv_dim,
mlp_params=config.model.transformer_mlp_dim,
dropout_rate=0.)
def get_view_transformer_params(config):
return TransformerParams(
num_layers=config.model.transformer_layers,
attention_heads=config.model.transformer_heads,
qkv_params=config.model.qkv_dim,
mlp_params=config.model.transformer_mlp_dim,
dropout_rate=0.)
#-------------------------------------------------------
# Epipolar Projection parameters
#-------------------------------------------------------
@dataclasses.dataclass
class EpipolarParams:
"""Parameters for epipolar projection"""
use_pixel_centers: bool
min_depth: int
max_depth: int
image_height: int
image_width: int
num_projections: int
num_train_views: int
use_learned_embedding: bool
learned_embedding_mode: str
mask_invalid_projection: bool
use_conv_features: bool
conv_feature_dim: int
ksize1: int
ksize2: int
interpolation_type: str
precision: lax.Precision
def __post_init__(self):
if self.interpolation_type == "linear":
assert (self.use_pixel_centers == False
), "Cannot use pixel center with linear interpolation"
def get_epipolar_params(config):
assert config.dataset.image_height != -1, ("Image height for dataset was not "
"set")
assert config.dataset.image_width != -1, "Image width for dataset was not set"
assert config.model.near != 0, "0 depth projections can lead to error"
assert config.dataset.num_train_views != -1, ("Number of train views should "
"be set")
return EpipolarParams(
use_pixel_centers=config.dataset.use_pixel_centers,
min_depth=config.model.near,
max_depth=config.model.far,
image_height=config.dataset.image_height,
image_width=config.dataset.image_width,
num_projections=config.model.num_projections,
num_train_views=config.dataset.num_train_views,
use_learned_embedding=config.model.use_learned_embedding,
learned_embedding_mode=config.model.learned_embedding_mode,
mask_invalid_projection=config.model.mask_invalid_projection,
use_conv_features=config.model.use_conv_features,
conv_feature_dim=config.model.conv_feature_dim,
ksize1=config.model.ksize1,
ksize2=config.model.ksize2,
interpolation_type=config.model.interpolation_type,
precision=getattr(lax.Precision, config.model.init_final_precision),
)
| 31.327273 | 80 | 0.67397 | 812 | 6,892 | 5.45936 | 0.238916 | 0.099256 | 0.040605 | 0.047372 | 0.162869 | 0.151139 | 0.151139 | 0.124521 | 0.111888 | 0.089781 | 0 | 0.004131 | 0.156994 | 6,892 | 219 | 81 | 31.47032 | 0.758864 | 0.237086 | 0 | 0.204082 | 0 | 0 | 0.043168 | 0 | 0 | 0 | 0 | 0 | 0.040816 | 1 | 0.061224 | false | 0 | 0.020408 | 0.034014 | 0.489796 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b0adb19e81258e1fbdcea69959d95afd67c7522 | 758 | py | Python | tests/test_xp_style.py | TNThieding/exif | 2e59701aec7416fbb3b2db76e7d090f166f1f132 | [
"MIT"
] | 51 | 2018-12-28T19:48:40.000Z | 2021-12-10T00:35:41.000Z | tests/test_xp_style.py | TNThieding/exif | 2e59701aec7416fbb3b2db76e7d090f166f1f132 | [
"MIT"
] | 33 | 2019-02-08T10:15:25.000Z | 2022-02-11T18:37:45.000Z | tests/test_xp_style.py | TNThieding/exif | 2e59701aec7416fbb3b2db76e7d090f166f1f132 | [
"MIT"
] | 11 | 2019-10-24T14:03:02.000Z | 2020-12-10T04:07:20.000Z | """Test special behavior for accessing Windows XP style EXIF attribute."""
import os
import pytest
from exif import Image
read_attributes = [
("xp_author", "XP-Style Author"),
("xp_comment", "XP-Style Comment ⛷"),
("xp_keywords", "XP-Style Keywords"),
("xp_subject", "XP-Style Subject 🤓"),
("xp_title", "XP-Style Title"),
]
@pytest.mark.parametrize(
"attribute, value", read_attributes, ids=[params[0] for params in read_attributes]
)
def test_read(attribute, value):
"""Test reading tags and compare to known baseline values."""
with open(
os.path.join(os.path.dirname(__file__), "windows_xp_tags.jpg"), "rb"
) as image_file:
image = Image(image_file)
assert getattr(image, attribute) == value
| 26.137931 | 86 | 0.671504 | 102 | 758 | 4.843137 | 0.5 | 0.08502 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001618 | 0.184697 | 758 | 28 | 87 | 27.071429 | 0.794498 | 0.163588 | 0 | 0 | 0 | 0 | 0.268058 | 0 | 0 | 0 | 0 | 0 | 0.052632 | 1 | 0.052632 | false | 0 | 0.157895 | 0 | 0.210526 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b0ebdaa54e0ceda206e818639c1c7b8395d9b82 | 3,713 | py | Python | tornado/modules/api/api.py | maqg/wcrobot | 7d026c1a34362c5434105c27c5bd25f08c6fabe2 | [
"MIT"
] | null | null | null | tornado/modules/api/api.py | maqg/wcrobot | 7d026c1a34362c5434105c27c5bd25f08c6fabe2 | [
"MIT"
] | null | null | null | tornado/modules/api/api.py | maqg/wcrobot | 7d026c1a34362c5434105c27c5bd25f08c6fabe2 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
from conf.dbconfig import TB_APITRACE
from core import dbmysql
from core.err_code import SEGMENT_NOT_EXIST, OCT_SUCCESS, err_desc_ch
from core.log import ERROR, WARNING
from models.Api import Api, API_STATE_NEW, API_STATE_FINISHED
from utils.commonUtil import CRC32
from models.Common import DEFAULT_ACCOUNT_ID
def getApiCount(db, cond=""):
return db.rowcount(TB_APITRACE, cond=cond)
def addTask(db, arg, taskParas):
api = Api(db)
api.accountId = arg["paras"].get("accountId")
api.user = arg["env"].get("USERNAME")
api.state = API_STATE_NEW
api.apiId = arg["api"]
api.type = "task"
api.name = arg.get("apiName") or ""
if (taskParas.get("object")):
api.name = api.name + "[%s]" % str(taskParas.get("object"))
api.request = taskParas
ret = api.add()
return (ret, api.myId)
def addApi(db, arg, taskParas):
api = Api(db)
api.accountId = arg["paras"].get("accountId")
api.user = arg["session"].get("username")
api.state = API_STATE_NEW
api.apiId = arg["api"]
api.name = arg.get("apiName") or ""
if (taskParas.get("object")):
api.name = api.name + "[%s]" % str(taskParas.get("object"))
api.request = arg
ret = api.add()
return (ret, api.simpleObj())
def buildApiResult(res):
errorNo = res["RetCode"]
return {
"errorObj": {
"errorNo": errorNo,
"errorMsg": err_desc_ch.get(errorNo),
},
"data": res["RetObj"]
}
def addApiResult(db, env, arg, result=None):
api = Api(db)
api.accountId = arg["paras"].get("accountId")
api.user = env["USERNAME"]
api.state = API_STATE_FINISHED
api.apiId = arg["api"]
api.name = arg.get("apiName") or ""
api.request = arg
api.reply = buildApiResult(result)
ret = api.add()
return (ret, api.simpleObj())
def deleteApi(db, arg):
apiId = arg["paras"].get("id")
api = getApi(db, apiId=apiId)
if (not api):
WARNING("api %s not exist" % apiId)
return SEGMENT_NOT_EXIST
return api.delete()
def updateApiReply(db, arg):
apiId = arg.get("id")
api = getApi(db, apiId=apiId)
if (not api):
WARNING("api %s not exist" % apiId)
return SEGMENT_NOT_EXIST
# TBD
return api.updateReply()
def getApis(db, arg):
listObj = {
"data": [],
"total": 0
}
cond = "WHERE 1=1 "
accountId = arg["paras"].get("accountId")
start = arg["paras"].get("start") or 0
limit = arg["paras"].get("limit") or 10
keyword = arg["paras"].get("keyword") or ""
type = arg["paras"].get("type")
apiName = arg["paras"].get("apiName")
serverTaskId = arg["paras"].get("serverTaskId")
if (accountId and accountId != DEFAULT_ACCOUNT_ID):
cond += "AND AT_AccountId='%s' " % (accountId)
if (type):
cond += "AND AT_Type='%s' " % type
if (apiName):
cond += "AND AT_Name LIKE '%%%s%%' " % (apiName)
if (keyword):
cond += "AND AT_ApiId LIKE '%%%s%%' " % (keyword)
if (serverTaskId):
cond += "AND AT_ServerTaskId='%s' " % serverTaskId
cond += "ORDER BY AT_StartTime DESC"
ret = db.select(TB_APITRACE, cond=cond, limit=int(limit), offset=int(start))
if ret == -1:
ERROR("get modules list error")
return (OCT_SUCCESS, listObj)
hashStr = ""
for dur in db.cur:
obj = dbmysql.row_to_dict(TB_APITRACE, dur)
api = Api(db, dbObj=obj)
api.loadFromObj()
obj = api.toObj()
hashStr += api.myId
hashStr += api.state
listObj["data"].append(obj)
listObj["total"] = getApiCount(db, cond=cond)
listObj["hashValue"] = CRC32(hashStr)
return (OCT_SUCCESS, listObj)
def getApi(db, apiId=None, apiName=None, submoduleId=None):
if (not apiId):
return None
cond = "WHERE ID='%s'" % (apiId)
dbObj = db.fetchone(TB_APITRACE, cond=cond)
if (not dbObj):
WARNING("module %s not exist" % cond)
return None
api = Api(db, dbObj=dbObj)
api.loadFromObj()
return api
| 22.233533 | 77 | 0.660382 | 540 | 3,713 | 4.464815 | 0.235185 | 0.036499 | 0.050187 | 0.033181 | 0.321858 | 0.299876 | 0.291165 | 0.291165 | 0.263791 | 0.263791 | 0 | 0.003863 | 0.16348 | 3,713 | 166 | 78 | 22.36747 | 0.772376 | 0.011312 | 0 | 0.327731 | 0 | 0 | 0.147492 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.07563 | false | 0 | 0.058824 | 0.008403 | 0.252101 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b0fef11f33991a8404426d2f39ce4db0fc17bdf | 1,157 | py | Python | app/httprpc.py | zhangpanyi/btsmonitor | d61599300f929753fcf8385cc37f6ed99726732c | [
"MIT"
] | 5 | 2018-02-18T14:35:50.000Z | 2019-07-10T13:53:33.000Z | app/httprpc.py | zhangpanyi/btsmonitor | d61599300f929753fcf8385cc37f6ed99726732c | [
"MIT"
] | 4 | 2018-02-18T23:37:59.000Z | 2021-11-26T14:23:24.000Z | app/httprpc.py | zhangpanyi/btsmonitor | d61599300f929753fcf8385cc37f6ed99726732c | [
"MIT"
] | 4 | 2018-02-18T14:35:10.000Z | 2019-05-17T10:35:58.000Z | # -*- coding:utf-8 -*-
import json
import aiohttp
from .asyncrpc import RPCError
from bitsharesbase.chains import known_chains
class HttpRPC(object):
''' 短链接RPC客户端
'''
def __init__(self, access, loop=None):
self._url = 'https://' + access
self._loop = loop
async def _rpc(self, method, params):
''' 远程过程调用
'''
# 生成请求内容
request = {'id': 1, 'method': method, 'params': params}
# 异步执行请求
async with aiohttp.ClientSession() as session:
async with session.post(self._url, json=request) as resp:
# 格式化返回结果
ret = json.loads(await resp.text())
if 'error' in ret:
if 'detail' in ret['error']:
raise RPCError(ret['error']['detail'])
else:
raise RPCError(ret['error']['message'])
return ret['result']
def __getattr__(self, name):
''' 简化方法调用
'''
async def method(*args, **kwargs):
return await self._rpc(name, [*args])
return method
| 28.925 | 70 | 0.499568 | 114 | 1,157 | 4.947368 | 0.517544 | 0.042553 | 0.056738 | 0.074468 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002782 | 0.378565 | 1,157 | 39 | 71 | 29.666667 | 0.781641 | 0.063959 | 0 | 0 | 0 | 0 | 0.0666 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.173913 | 0 | 0.434783 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b1070cbf18c29a6ad2654bf66152a86ebfd2a4a | 1,199 | py | Python | checkbook/loader.py | monospacesoftware/checkbook | b8e3ea498dc6b4d0c3fddedabfb208de5b516361 | [
"MIT"
] | null | null | null | checkbook/loader.py | monospacesoftware/checkbook | b8e3ea498dc6b4d0c3fddedabfb208de5b516361 | [
"MIT"
] | null | null | null | checkbook/loader.py | monospacesoftware/checkbook | b8e3ea498dc6b4d0c3fddedabfb208de5b516361 | [
"MIT"
] | null | null | null | import re
from os import listdir
from os.path import isfile
from checkbook.chase import Chase
from checkbook.database import Database
from checkbook.psecu import Psecu
from checkbook.transaction_source import TransactionSource
class Loader:
@classmethod
def load_incoming(cls, db: Database):
chase = Chase()
psecu = Psecu()
for path in cls.list_incoming_files():
if re.match(".*chase.*", path, re.IGNORECASE):
cls.load_trans(path, chase, db, "2_Amazon Credit Card")
elif re.match(".*psecu.*", path, re.IGNORECASE):
cls.load_trans(path, psecu, db, "1_PSECU Joint Checking")
else:
print(f"Skipping unrecognized file {path}")
@classmethod
def load_trans(cls, path: str, source: TransactionSource, db: Database, acct_name: str):
for tran in source.load(path, acct_name):
db.add(tran)
@classmethod
def list_incoming_files(cls):
paths = []
for file_name in listdir('incoming'):
path = f"incoming/{file_name}"
if not isfile(path):
continue
paths.append(path)
return paths
| 30.74359 | 92 | 0.621351 | 146 | 1,199 | 5 | 0.369863 | 0.071233 | 0.049315 | 0.052055 | 0.087671 | 0.087671 | 0.087671 | 0 | 0 | 0 | 0 | 0.002326 | 0.282736 | 1,199 | 38 | 93 | 31.552632 | 0.846512 | 0 | 0 | 0.09375 | 0 | 0 | 0.100917 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09375 | false | 0 | 0.21875 | 0 | 0.375 | 0.03125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b119d780ac562813940597b495ba869ce5283d0 | 1,281 | py | Python | deprecated/pelicanconf.py | IEEEComputerSocietyUNB/ieee-computer-society-unb | 49b4226a95450c359c4ddd1266b9af1fa7fe6bda | [
"MIT"
] | 1 | 2019-10-01T01:56:48.000Z | 2019-10-01T01:56:48.000Z | deprecated/pelicanconf.py | IEEEComputerSocietyUNB/ieee-computer-society-unb | 49b4226a95450c359c4ddd1266b9af1fa7fe6bda | [
"MIT"
] | 31 | 2019-09-02T12:53:30.000Z | 2019-10-19T20:34:14.000Z | deprecated/pelicanconf.py | IEEEComputerSocietyUNB/ieee-computer-society-unb | 49b4226a95450c359c4ddd1266b9af1fa7fe6bda | [
"MIT"
] | 2 | 2019-09-13T15:47:46.000Z | 2019-09-28T04:50:43.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = 'IEEE Computer Society UnB'
SITENAME = 'IEEE Computer Society UnB'
SITEURL = ''
# Customized settings
THEME = 'bulrush/bulrush'
LOAD_CONTENT_CACHE = False
PATH = 'content'
TIMEZONE = 'America/Sao_Paulo'
DEFAULT_LANG = 'pt-br'
ARTICLE_PATHS = ['articles', ]
ARTICLE_URL = 'articles/{slug}.html'
ARTICLE_SAVE_AS = 'articles/{slug}.html'
PAGE_URL = 'pages/{slug}/'
PAGE_SAVE_AS = 'pages/{slug}.html'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
# Social
GITHUB_URL = 'http://getpelican.com/'
TWITTER_URL = 'http://getpelican.com/'
FACEBOOK_URL = 'http://getpelican.com/'
# Blogroll
LINKS = (('Facebook', 'http://getpelican.com/'),
('Github', 'http://python.org/'),
('Jinja2', 'http://jinja.pocoo.org/'),
('You can modify those links in your config file', '#'),)
# Social widget
SOCIAL = (('You can add links in your config file', '#'),
('Facebook', 'http://getpelican.com/'),)
DEFAULT_PAGINATION = 10
# Uncomment following line if you want document-relative URLs when developing
# RELATIVE_URLS = True
| 24.634615 | 77 | 0.69555 | 166 | 1,281 | 5.180723 | 0.566265 | 0.081395 | 0.098837 | 0.069767 | 0.095349 | 0 | 0 | 0 | 0 | 0 | 0 | 0.0037 | 0.156128 | 1,281 | 51 | 78 | 25.117647 | 0.791859 | 0.191257 | 0 | 0 | 0 | 0 | 0.424951 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.034483 | 0 | 0.034483 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b15d62e42779dc542f62eb75c22bdace6503fd8 | 7,690 | py | Python | vispy/ext/_bundled/husl.py | hmaarrfk/vispy | 7f3f6f60c8462bb8a3a8fa03344a2e6990b86eb2 | [
"BSD-3-Clause"
] | 3 | 2019-02-28T16:05:33.000Z | 2020-05-03T21:29:03.000Z | vispy/ext/_bundled/husl.py | hmaarrfk/vispy | 7f3f6f60c8462bb8a3a8fa03344a2e6990b86eb2 | [
"BSD-3-Clause"
] | 1 | 2021-06-04T13:48:46.000Z | 2021-06-05T10:57:33.000Z | vispy/ext/_bundled/husl.py | hmaarrfk/vispy | 7f3f6f60c8462bb8a3a8fa03344a2e6990b86eb2 | [
"BSD-3-Clause"
] | 1 | 2019-04-03T12:49:18.000Z | 2019-04-03T12:49:18.000Z | """
HUSL colors python implementation.
Source: https://github.com/husl-colors/husl.py
Copyright (c) 2015 Alexei Boronine
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import operator
import math
__version__ = "4.0.2"
m = [
[3.240969941904521, -1.537383177570093, -0.498610760293],
[-0.96924363628087, 1.87596750150772, 0.041555057407175],
[0.055630079696993, -0.20397695888897, 1.056971514242878],
]
m_inv = [
[0.41239079926595, 0.35758433938387, 0.18048078840183],
[0.21263900587151, 0.71516867876775, 0.072192315360733],
[0.019330818715591, 0.11919477979462, 0.95053215224966],
]
refX = 0.95045592705167
refY = 1.0
refZ = 1.089057750759878
refU = 0.19783000664283
refV = 0.46831999493879
kappa = 903.2962962
epsilon = 0.0088564516
# Public API
def husl_to_rgb(h, s, l):
return lch_to_rgb(*husl_to_lch([h, s, l]))
def husl_to_hex(h, s, l):
return rgb_to_hex(husl_to_rgb(h, s, l))
def rgb_to_husl(r, g, b):
return lch_to_husl(rgb_to_lch(r, g, b))
def hex_to_husl(hex):
return rgb_to_husl(*hex_to_rgb(hex))
def huslp_to_rgb(h, s, l):
return lch_to_rgb(*huslp_to_lch([h, s, l]))
def huslp_to_hex(h, s, l):
return rgb_to_hex(huslp_to_rgb(h, s, l))
def rgb_to_huslp(r, g, b):
return lch_to_huslp(rgb_to_lch(r, g, b))
def hex_to_huslp(hex):
return rgb_to_huslp(*hex_to_rgb(hex))
def lch_to_rgb(l, c, h):
return xyz_to_rgb(luv_to_xyz(lch_to_luv([l, c, h])))
def rgb_to_lch(r, g, b):
return luv_to_lch(xyz_to_luv(rgb_to_xyz([r, g, b])))
def get_bounds(L):
sub1 = ((L + 16.0) ** 3.0) / 1560896.0
sub2 = sub1 if sub1 > epsilon else L / kappa
ret = []
for [m1, m2, m3] in m:
for t in [0, 1]:
top1 = (284517.0 * m1 - 94839.0 * m3) * sub2
top2 = ((838422.0 * m3 + 769860.0 * m2 + 731718.0 * m1)
* L * sub2 - 769860.0 * t * L)
bottom = (632260.0 * m3 - 126452.0 * m2) * sub2 + 126452.0 * t
ret.append((top1 / bottom, top2 / bottom))
return ret
def intersect_line_line(line1, line2):
return (line1[1] - line2[1]) / (line2[0] - line1[0])
def distance_from_pole(point):
return math.sqrt(point[0] ** 2 + point[1] ** 2)
def length_of_ray_until_intersect(theta, line):
m1, b1 = line
length = b1 / (math.sin(theta) - m1 * math.cos(theta))
if length < 0:
return None
return length
def max_safe_chroma_for_L(L):
lengths = []
for [m1, b1] in get_bounds(L):
x = intersect_line_line((m1, b1), (-1.0 / m1, 0.0))
lengths.append(distance_from_pole((x, b1 + x * m1)))
return min(lengths)
def max_chroma_for_LH(L, H):
hrad = H / 360.0 * math.pi * 2.0
lengths = []
for line in get_bounds(L):
ray_length = length_of_ray_until_intersect(hrad, line)
if ray_length is not None:
lengths.append(ray_length)
return min(lengths)
def dot_product(a, b):
return sum(map(operator.mul, a, b))
def f(t):
if t > epsilon:
return 116 * math.pow((t / refY), 1.0 / 3.0) - 16.0
else:
return (t / refY) * kappa
def f_inv(t):
if t > 8:
return refY * math.pow((t + 16.0) / 116.0, 3.0)
else:
return refY * t / kappa
def from_linear(c):
if c <= 0.0031308:
return 12.92 * c
else:
return (1.055 * math.pow(c, 1.0 / 2.4) - 0.055)
def to_linear(c):
a = 0.055
if c > 0.04045:
return (math.pow((c + a) / (1.0 + a), 2.4))
else:
return (c / 12.92)
def rgb_prepare(triple):
ret = []
for ch in triple:
ch = round(ch, 3)
if ch < -0.0001 or ch > 1.0001:
raise Exception("Illegal RGB value %f" % ch)
if ch < 0:
ch = 0
if ch > 1:
ch = 1
# Fix for Python 3 which by default rounds 4.5 down to 4.0
# instead of Python 2 which is rounded to 5.0 which caused
# a couple off by one errors in the tests. Tests now all pass
# in Python 2 and Python 3
ret.append(round(ch * 255 + 0.001, 0))
return ret
def hex_to_rgb(hex):
if hex.startswith('#'):
hex = hex[1:]
r = int(hex[0:2], 16) / 255.0
g = int(hex[2:4], 16) / 255.0
b = int(hex[4:6], 16) / 255.0
return [r, g, b]
def rgb_to_hex(triple):
[r, g, b] = triple
return '#%02x%02x%02x' % tuple(rgb_prepare([r, g, b]))
def xyz_to_rgb(triple):
xyz = map(lambda row: dot_product(row, triple), m)
return list(map(from_linear, xyz))
def rgb_to_xyz(triple):
rgbl = list(map(to_linear, triple))
return list(map(lambda row: dot_product(row, rgbl), m_inv))
def xyz_to_luv(triple):
X, Y, Z = triple
if X == Y == Z == 0.0:
return [0.0, 0.0, 0.0]
varU = (4.0 * X) / (X + (15.0 * Y) + (3.0 * Z))
varV = (9.0 * Y) / (X + (15.0 * Y) + (3.0 * Z))
L = f(Y)
# Black will create a divide-by-zero error
if L == 0.0:
return [0.0, 0.0, 0.0]
U = 13.0 * L * (varU - refU)
V = 13.0 * L * (varV - refV)
return [L, U, V]
def luv_to_xyz(triple):
L, U, V = triple
if L == 0:
return [0.0, 0.0, 0.0]
varY = f_inv(L)
varU = U / (13.0 * L) + refU
varV = V / (13.0 * L) + refV
Y = varY * refY
X = 0.0 - (9.0 * Y * varU) / ((varU - 4.0) * varV - varU * varV)
Z = (9.0 * Y - (15.0 * varV * Y) - (varV * X)) / (3.0 * varV)
return [X, Y, Z]
def luv_to_lch(triple):
L, U, V = triple
C = (math.pow(math.pow(U, 2) + math.pow(V, 2), (1.0 / 2.0)))
hrad = (math.atan2(V, U))
H = math.degrees(hrad)
if H < 0.0:
H = 360.0 + H
return [L, C, H]
def lch_to_luv(triple):
L, C, H = triple
Hrad = math.radians(H)
U = (math.cos(Hrad) * C)
V = (math.sin(Hrad) * C)
return [L, U, V]
def husl_to_lch(triple):
H, S, L = triple
if L > 99.9999999:
return [100, 0.0, H]
if L < 0.00000001:
return [0.0, 0.0, H]
mx = max_chroma_for_LH(L, H)
C = mx / 100.0 * S
return [L, C, H]
def lch_to_husl(triple):
L, C, H = triple
if L > 99.9999999:
return [H, 0.0, 100.0]
if L < 0.00000001:
return [H, 0.0, 0.0]
mx = max_chroma_for_LH(L, H)
S = C / mx * 100.0
return [H, S, L]
def huslp_to_lch(triple):
H, S, L = triple
if L > 99.9999999:
return [100, 0.0, H]
if L < 0.00000001:
return [0.0, 0.0, H]
mx = max_safe_chroma_for_L(L)
C = mx / 100.0 * S
return [L, C, H]
def lch_to_huslp(triple):
L, C, H = triple
if L > 99.9999999:
return [H, 0.0, 100.0]
if L < 0.00000001:
return [H, 0.0, 0.0]
mx = max_safe_chroma_for_L(L)
S = C / mx * 100.0
return [H, S, L]
| 23.162651 | 78 | 0.586606 | 1,333 | 7,690 | 3.273068 | 0.216804 | 0.016502 | 0.013752 | 0.011918 | 0.22347 | 0.189778 | 0.149438 | 0.12904 | 0.118726 | 0.075636 | 0 | 0.147863 | 0.272692 | 7,690 | 331 | 79 | 23.232628 | 0.632219 | 0.181014 | 0 | 0.265 | 0 | 0 | 0.006205 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.17 | false | 0 | 0.01 | 0.065 | 0.43 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b16807157db29b3659c0af09e72504b46a2a4be | 3,330 | py | Python | src/incorrect_disambiguation_detection/concatenate_subgraph_entity_embedding_with_cluster_connected_kg_embedding.py | mainuliitkgp/AR-BERT | d6d5e8542a3a1c76edac49cec9e99ebda6395725 | [
"MIT"
] | 4 | 2022-03-06T17:41:57.000Z | 2022-03-22T08:42:58.000Z | src/incorrect_disambiguation_detection/concatenate_subgraph_entity_embedding_with_cluster_connected_kg_embedding.py | mainuliitkgp/AR-BERT | d6d5e8542a3a1c76edac49cec9e99ebda6395725 | [
"MIT"
] | null | null | null | src/incorrect_disambiguation_detection/concatenate_subgraph_entity_embedding_with_cluster_connected_kg_embedding.py | mainuliitkgp/AR-BERT | d6d5e8542a3a1c76edac49cec9e99ebda6395725 | [
"MIT"
] | 1 | 2022-03-19T14:04:42.000Z | 2022-03-19T14:04:42.000Z | import sys
import numpy as np
import json
import pickle
unique_entity_fp = sys.argv[1]
sub_graph_embedding_fp = sys.argv[2]
sub_graph_embedding_indices_fp = sys.argv[3]
connected_entity_to_idx_dict_fp = sys.argv[4]
node_to_cluster_id_dict_fp = sys.argv[5]
cluster_kg_embedding_fp = sys.argv[6]
cluster_kg_embedding_indices_fp = sys.argv[7]
ds_name = sys.argv[8]
# prepare unique entity list
unique_entity_list = []
with open(unique_entity_fp) as fp:
for line in fp:
unique_entity_list.append(line.strip())
# read sub-graph entity embedding and corresponding indices
sub_graph_entity_embedding = np.load(sub_graph_embedding_fp)
sub_graph_entity_embedding_indices = []
with open(sub_graph_embedding_indices_fp) as fp:
for line in fp:
sub_graph_entity_embedding_indices.append(int(line.strip()))
# re-arrange sub-graph entity embedding wrt. unique entities
rearranged_sub_graph_entity_embedding = []
for i, entity in enumerate(unique_entity_list):
idx = sub_graph_entity_embedding_indices.index(i)
emd = sub_graph_entity_embedding[idx]
rearranged_sub_graph_entity_embedding.append(emd)
rearranged_sub_graph_entity_embedding = np.array(rearranged_sub_graph_entity_embedding, dtype = np.float32)
# read connected KG entity to id dictionary and map sub-graph entity index to connected KG entity index
connected_entity_to_idx_dict = json.load(open(connected_entity_to_idx_dict_fp))
sub_graph_entity_idx_to_connected_kg_idx = {}
for i, entity in enumerate(unique_entity_list):
try:
sub_graph_entity_idx_to_connected_kg_idx[int(i)] = connected_entity_to_idx_dict[entity.strip()]
except:
sub_graph_entity_idx_to_connected_kg_idx[int(i)] = -1 # for not mapped sub-graph entities in connected KG
# map sub-graph entity index to cluster id
with open(node_to_cluster_id_dict_fp, "rb") as fp:
node_to_cluster_id_dict = pickle.load(fp)
sub_graph_entity_idx_to_cluster_id_dict = {}
for i, entity in enumerate(unique_entity_list):
node_id_in_kg = sub_graph_entity_idx_to_connected_kg_idx[int(i)]
if node_id_in_kg != -1:
sub_graph_entity_idx_to_cluster_id_dict[int(i)] = node_to_cluster_id_dict[int(node_id_in_kg)]
else:
sub_graph_entity_idx_to_cluster_id_dict[int(i)] = -1 # no cluster membership
# read cluster KG embedding and corresponding indices
cluster_kg_embedding = np.load(cluster_kg_embedding_fp)
cluster_kg_embedding_indices = []
with open(cluster_kg_embedding_indices_fp) as fp:
for line in fp:
cluster_kg_embedding_indices.append(int(line.strip()))
# re-arrange cluster KG embedding wrt. unique entities
rearranged_cluster_kg_entity_embedding = np.zeros((len(unique_entity_list), 50), dtype = np.float32)
for i, entity in enumerate(unique_entity_list):
cluster_id = sub_graph_entity_idx_to_cluster_id_dict[int(i)]
if cluster_id != -1:
idx = cluster_kg_embedding_indices.index(cluster_id)
emd = cluster_kg_embedding[idx]
rearranged_cluster_kg_entity_embedding[i] = emd
# concatenate sub-graph entity embedding and cluster KG embedding
concatenated_embedding = np.concatenate((rearranged_sub_graph_entity_embedding, rearranged_cluster_kg_entity_embedding), axis = 1)
# save concatenated embedding
np.save(concatenated_embedding, 'concatenated_embedding_'+ds_name+'.npy')
| 37 | 130 | 0.791592 | 526 | 3,330 | 4.595057 | 0.148289 | 0.092677 | 0.133223 | 0.123707 | 0.590815 | 0.327265 | 0.248655 | 0.241622 | 0.117915 | 0.090608 | 0 | 0.00657 | 0.131532 | 3,330 | 89 | 131 | 37.41573 | 0.829184 | 0.167267 | 0 | 0.122807 | 0 | 0 | 0.010519 | 0.008342 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.070175 | 0 | 0.070175 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b16ad70e459e73dd2b22da02fbb9c36e130e787 | 20,678 | py | Python | metattack/utils.py | AndreamApp/gnn-meta-attack | 8391a5a477a0d19be9755237f8e236d854a9811c | [
"MIT"
] | null | null | null | metattack/utils.py | AndreamApp/gnn-meta-attack | 8391a5a477a0d19be9755237f8e236d854a9811c | [
"MIT"
] | null | null | null | metattack/utils.py | AndreamApp/gnn-meta-attack | 8391a5a477a0d19be9755237f8e236d854a9811c | [
"MIT"
] | null | null | null | """
Implementation of the method proposed in the paper:
'Adversarial Attacks on Graph Neural Networks via Meta Learning'
by Daniel Zügner, Stephan Günnemann
Published at ICLR 2019 in New Orleans, USA.
Copyright (C) 2019
Daniel Zügner
Technical University of Munich
"""
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import numpy as np
from sklearn.model_selection import train_test_split
import scipy.sparse as sp
from scipy.sparse.csgraph import connected_components
def load_npz(file_name):
"""Load a SparseGraph from a Numpy binary file.
Parameters
----------
file_name : str
Name of the file to load.
Returns
-------
sparse_graph : gust.SparseGraph
Graph in sparse matrix format.
"""
if not file_name.endswith('.npz'):
file_name += '.npz'
with np.load(file_name, allow_pickle=True) as loader:
loader = dict(loader)
adj_matrix = sp.csr_matrix((loader['adj_data'], loader['adj_indices'],
loader['adj_indptr']), shape=loader['adj_shape'])
if 'attr_data' in loader:
attr_matrix = sp.csr_matrix((loader['attr_data'], loader['attr_indices'],
loader['attr_indptr']), shape=loader['attr_shape'])
else:
attr_matrix = None
labels = loader.get('labels')
return adj_matrix, attr_matrix, labels
def largest_connected_components(adj, n_components=1):
"""Select the largest connected components in the graph.
Parameters
----------
adj : gust.SparseGraph
Input graph.
n_components : int, default 1
Number of largest connected components to keep.
Returns
-------
sparse_graph : gust.SparseGraph
Subgraph of the input graph where only the nodes in largest n_components are kept.
"""
_, component_indices = connected_components(adj)
component_sizes = np.bincount(component_indices)
components_to_keep = np.argsort(component_sizes)[::-1][:n_components] # reverse order to sort descending
nodes_to_keep = [
idx for (idx, component) in enumerate(component_indices) if component in components_to_keep
]
print("Selecting {0} largest connected components".format(n_components))
return nodes_to_keep
def train_val_test_split_tabular(*arrays, train_size=0.5, val_size=0.3, test_size=0.2, stratify=None,
random_state=None):
"""
Split the arrays or matrices into random train, validation and test subsets.
Parameters
----------
*arrays : sequence of indexables with same length / shape[0]
Allowed inputs are lists, numpy arrays or scipy-sparse matrices.
train_size : float, default 0.5
Proportion of the dataset included in the train split.
val_size : float, default 0.3
Proportion of the dataset included in the validation split.
test_size : float, default 0.2
Proportion of the dataset included in the test split.
stratify : array-like or None, default None
If not None, data is split in a stratified fashion, using this as the class labels.
random_state : int or None, default None
Random_state is the seed used by the random number generator;
Returns
-------
splitting : list, length=3 * len(arrays)
List containing train-validation-test split of inputs.
"""
if len(set(array.shape[0] for array in arrays)) != 1:
raise ValueError("Arrays must have equal first dimension.")
idx = np.arange(arrays[0].shape[0])
idx_train_and_val, idx_test = train_test_split(idx,
random_state=random_state,
train_size=(train_size + val_size),
test_size=test_size,
stratify=stratify)
if stratify is not None:
stratify = stratify[idx_train_and_val]
idx_train, idx_val = train_test_split(idx_train_and_val,
random_state=random_state,
train_size=(train_size / (train_size + val_size)),
test_size=(val_size / (train_size + val_size)),
stratify=stratify)
result = []
for X in arrays:
result.append(X[idx_train])
result.append(X[idx_val])
result.append(X[idx_test])
return result
def preprocess_graph(adj):
"""
Perform the processing of the adjacency matrix proposed by Kipf et al. 2017.
Parameters
----------
adj: sp.spmatrix
Input adjacency matrix.
Returns
-------
The matrix (D+1)^(-0.5) (adj + I) (D+1)^(-0.5)
"""
adj_ = adj + sp.eye(adj.shape[0])
rowsum = adj_.sum(1).A1
degree_mat_inv_sqrt = sp.diags(np.power(rowsum, -0.5))
adj_normalized = adj_.dot(degree_mat_inv_sqrt).T.dot(degree_mat_inv_sqrt).tocsr()
return adj_normalized
def unravel_index_tf(ix, shape):
"""
Unravels the input index similar to np.unravel_index. That is, given the "flat"
(i.e. between 0 and shape[0] * shape[1] - 1) input index and a 2D shape computes
the 2D index corresponding to the input index.
Parameters
----------
ix: tf.int32
The input index.
shape: tuple or list of ints with length 2
2D shape (e.g. adjacency matrix dimensions).
Returns
-------
tf.Tensor, dtype int, shape (2,)
The index in the 2D shape corresponding to the "flat" input index ix.
"""
output_list = []
output_list.append(ix // (shape[1]))
output_list.append(ix % (shape[1]))
return tf.stack(output_list)
def ravel_index(ix, shape):
"""
"Flattens" the 2D input index into a single index on the flattened matrix, similar to np.ravel_multi_index.
Parameters
----------
ix: array or list of ints of shape (2,)
The 2D input index.
shape: list or tuple of ints of length 2
The shape of the corresponding matrix.
Returns
-------
int between 0 and shape[0]*shape[1]-1
The index on the flattened matrix corresponding to the 2D input index.
"""
return ix[0]*shape[1] + ix[1]
def ravel_multiple_indices(ixs, shape):
"""
"Flattens" multiple 2D input indices into indices on the flattened matrix, similar to np.ravel_multi_index.
Does the same as ravel_index but for multiple indices at once.
Parameters
----------
ixs: array of ints shape (n, 2)
The array of n indices that will be flattened.
shape: list or tuple of ints of length 2
The shape of the corresponding matrix.
Returns
-------
array of n ints between 0 and shape[0]*shape[1]-1
The indices on the flattened matrix corresponding to the 2D input indices.
"""
return ixs[:, 0] * shape[1] + ixs[:, 1]
def compute_log_likelihood(n, alpha, sum_log_degrees, d_min):
"""
Computes thelog likelihood of the observed Powerlaw distribution given the Powerlaw exponent alpha.
Parameters
----------
n: int
The number of samples in the observed distribution whose value is >= d_min.
alpha: float
The Powerlaw exponent for which the log likelihood is to be computed.
sum_log_degrees: float
The sum of the logs of samples in the observed distribution whose values are >= d_min.
d_min: int
The minimum degree to be considered in the Powerlaw computation.
Returns
-------
float
The log likelihood of the given observed Powerlaw distribution and exponend alpha.
"""
return n * tf.log(alpha) + n * alpha * tf.log(d_min) + (alpha + 1) * sum_log_degrees
def update_sum_log_degrees(sum_log_degrees_before, n_old, d_old, d_new, d_min):
"""
Compute the sum of the logs of samples in the observed distribution whose values are >= d_min for a single edge
changing in the graph. That is, given that two degrees in the graph change from d_old to d_new respectively
(resulting from adding or removing a single edge), compute the updated sum of log degrees >= d_min.
Parameters
----------
sum_log_degrees_before: tf.Tensor of floats of length n
The sum of log degrees >= d_min before the change.
n_old: tf.Tensor of ints of length n
The number of degrees >= d_min before the change.
d_old: tf.Tensor of ints, shape [n, 2]
The old (i.e. before change) degrees of the two nodes affected by an edge to be inserted/removed. n corresponds
to the number of edges for which this will be computed in a vectorized fashion.
d_new: tf.Tensor of ints, shape [n,2]
The new (i.e. after the change) degrees of the two nodes affected by an edge to be inserted/removed.
n corresponds to the number of edges for which this will be computed in a vectorized fashion.
d_min: int
The minimum degree considered in the Powerlaw distribution.
Returns
-------
sum_log_degrees_after: tf.Tensor of floats shape (n,)
The updated sum of log degrees whose values are >= d_min after a potential edge being added/removed.
new_n: tf.Tensor dtype int shape (n,)
The updated number of degrees which are >= d_min after a potential edge being added/removed.
"""
# Find out whether the degrees before and after the change are above the threshold d_min.
old_in_range = d_old >= d_min
new_in_range = d_new >= d_min
# Mask out the degrees whose values are below d_min by multiplying them by 0.
d_old_in_range = tf.multiply(d_old, tf.cast(old_in_range, tf.float32))
d_new_in_range = tf.multiply(d_new, tf.cast(new_in_range, tf.float32))
# Update the sum by subtracting the old values and then adding the updated logs of the degrees.
sum_log_degrees_after = sum_log_degrees_before - tf.reduce_sum(tf.log(tf.maximum(d_old_in_range, 1)),
axis=1) + tf.reduce_sum(
tf.log(tf.maximum(d_new_in_range, 1)), axis=1)
# Update the number of degrees >= d_min
new_n = tf.cast(n_old, tf.int64) - tf.count_nonzero(old_in_range, axis=1) + tf.count_nonzero(new_in_range, axis=1)
return sum_log_degrees_after, new_n
def compute_alpha(n, sum_log_degrees, d_min):
"""
Compute the maximum likelihood value of the Powerlaw exponent alpha of the degree distribution.
Parameters
----------
n: int
The number of degrees >= d_min
sum_log_degrees: float
The sum of log degrees >= d_min
d_min: int
The minimum degree considered in the Powerlaw distribution.
Returns
-------
alpha: float
The maximum likelihood estimate of the Powerlaw exponent alpha.
"""
return n / (sum_log_degrees - n * tf.log(d_min - 0.5)) + 1
def degree_sequence_log_likelihood(degree_sequence, d_min):
"""
Compute the (maximum) log likelihood of the Powerlaw distribution fit on a degree distribution.
Parameters
----------
degree_sequence: tf.Tensor dtype int shape (N,)
Observed degree distribution.
d_min: int
The minimum degree considered in the Powerlaw distribution.
Returns
-------
ll: tf.Tensor dtype float, (scalar)
The log likelihood under the maximum likelihood estimate of the Powerlaw exponent alpha.
alpha: tf.Tensor dtype float (scalar)
The maximum likelihood estimate of the Powerlaw exponent.
n: int
The number of degrees in the degree sequence that are >= d_min.
sum_log_degrees: tf.Tensor dtype float (scalar)
The sum of the log of degrees in the distribution which are >= d_min.
"""
# Determine which degrees are to be considered, i.e. >= d_min.
in_range = tf.greater_equal(degree_sequence, d_min)
# Sum the log of the degrees to be considered
sum_log_degrees = tf.reduce_sum(tf.log(tf.boolean_mask(degree_sequence, in_range)))
# Number of degrees >= d_min
n = tf.cast(tf.count_nonzero(in_range), tf.float32)
# Maximum likelihood estimate of the Powerlaw exponent
alpha = compute_alpha(n, sum_log_degrees, d_min)
# Log likelihood under alpha
ll = compute_log_likelihood(n, alpha, sum_log_degrees, d_min)
return ll, alpha, n, sum_log_degrees
def updated_log_likelihood_for_edge_changes(node_pairs, adjacency_matrix, d_min):
"""
Compute the change of the log likelihood of the Powerlaw distribution fit on the input adjacency matrix's degree
distribution that results when adding/removing edges for the input node pairs. Assumes an undirected unweighted
graph.
Parameters
----------
node_pairs: tf.Tensor, shape (e, 2) dtype int
The e node pairs to consider, where each node pair consists of the two indices of the nodes.
adjacency_matrix: tf.Tensor shape (N,N) dtype int
The input adjacency matrix. Assumed to be unweighted and symmetric.
d_min: int
The minimum degree considered in the Powerlaw distribution.
Returns
-------
new_ll: tf.Tensor of shape (e,) and dtype float
The log likelihoods for node pair in node_pairs obtained when adding/removing the edge for that node pair.
new_alpha: tf.Tensor of shape (e,) and dtype float
For each node pair, contains the maximum likelihood estimates of the Powerlaw distributions obtained when
adding/removing the edge for that node pair.
new_n: tf.Tensor of shape (e,) and dtype float
The updated number of degrees which are >= d_min for each potential edge being added/removed.
sum_log_degrees_after: tf.Tensor of floats shape (e,)
The updated sum of log degrees whose values are >= d_min for each of the e potential edges being added/removed.
"""
# For each node pair find out whether there is an edge or not in the input adjacency matrix.
edge_entries_before = tf.cast(tf.gather_nd(adjacency_matrix, tf.cast(node_pairs, tf.int32)), tf.float32)
# Compute the degree for each node
degree_seq = tf.reduce_sum(adjacency_matrix, 1)
# Determine which degrees are to be considered, i.e. >= d_min.
in_range = tf.greater_equal(degree_seq, d_min)
# Sum the log of the degrees to be considered
sum_log_degrees = tf.reduce_sum(tf.log(tf.boolean_mask(degree_seq, in_range)))
# Number of degrees >= d_min
n = tf.cast(tf.count_nonzero(in_range), tf.float32)
# The changes to the edge entries to add an edge if none was present and remove it otherwise.
# i.e., deltas[ix] = -1 if edge_entries[ix] == 1 else 1
deltas = -2 * edge_entries_before + 1
# The degrees of the nodes in the input node pairs
d_edges_before = tf.gather(degree_seq, tf.cast(node_pairs, tf.int32))
# The degrees of the nodes in the input node pairs after performing the change (i.e. adding the respective value of
# delta.
d_edges_after = tf.gather(degree_seq, tf.cast(node_pairs, tf.int32)) + deltas[:, None]
# Sum the log of the degrees after the potential changes which are >= d_min
sum_log_degrees_after, new_n = update_sum_log_degrees(sum_log_degrees, n, d_edges_before, d_edges_after, d_min)
# Update the number of degrees >= d_min
new_n = tf.cast(new_n, tf.float32)
# Updated estimates of the Powerlaw exponents
new_alpha = compute_alpha(new_n, sum_log_degrees_after, d_min)
# Updated log likelihood values for the Powerlaw distributions
new_ll = compute_log_likelihood(new_n, new_alpha, sum_log_degrees_after, d_min)
return new_ll, new_alpha, new_n, sum_log_degrees_after
def likelihood_ratio_filter(node_pairs, modified_adjacency, original_adjacency, d_min, threshold=0.004):
"""
Filter the input node pairs based on the likelihood ratio test proposed by Zügner et al. 2018, see
https://dl.acm.org/citation.cfm?id=3220078. In essence, for each node pair return 1 if adding/removing the edge
between the two nodes does not violate the unnoticeability constraint, and return 0 otherwise. Assumes unweighted
and undirected graphs.
Parameters
----------
node_pairs: tf.Tensor, shape (e, 2) dtype int
The e node pairs to consider, where each node pair consists of the two indices of the nodes.
modified_adjacency: tf.Tensor shape (N,N) dtype int
The input (modified) adjacency matrix. Assumed to be unweighted and symmetric.
original_adjacency: tf.Tensor shape (N,N) dtype int
The input (original) adjacency matrix. Assumed to be unweighted and symmetric.
d_min: int
The minimum degree considered in the Powerlaw distribution.
threshold: float, default 0.004
Cutoff value for the unnoticeability constraint. Smaller means stricter constraint. 0.004 corresponds to a
p-value of 0.95 in the Chi-square distribution with one degree of freedom.
Returns
-------
allowed_mask: tf.Tensor, shape (e,), dtype bool
For each node pair p return True if adding/removing the edge p does not violate the
cutoff value, False otherwise.
current_ratio: tf.Tensor, shape (), dtype float
The current value of the log likelihood ratio.
"""
N = int(modified_adjacency.shape[0])
original_degree_sequence = tf.cast(tf.reduce_sum(original_adjacency, axis=1), tf.float32)
current_degree_sequence = tf.cast(tf.reduce_sum(modified_adjacency, axis=1), tf.float32)
# Concatenate the degree sequences
concat_degree_sequence = tf.concat((current_degree_sequence[None, :], original_degree_sequence[None, :]), axis=1)
# Compute the log likelihood values of the original, modified, and combined degree sequences.
ll_orig, alpha_orig, n_orig, sum_log_degrees_original = degree_sequence_log_likelihood(original_degree_sequence,
d_min)
ll_current, alpha_current, n_current, sum_log_degrees_current = degree_sequence_log_likelihood(
current_degree_sequence, d_min)
ll_comb, alpha_comb, n_comb, sum_log_degrees_combined = degree_sequence_log_likelihood(concat_degree_sequence,
d_min)
# Compute the log likelihood ratio
current_ratio = -2 * ll_comb + 2 * (ll_orig + ll_current)
# Compute new log likelihood values that would arise if we add/remove the edges corresponding to each node pair.
new_lls, new_alphas, new_ns, new_sum_log_degrees = updated_log_likelihood_for_edge_changes(node_pairs,
tf.cast(
modified_adjacency,
tf.float32), d_min)
# Combination of the original degree distribution with the distributions corresponding to each node pair.
n_combined = n_orig + new_ns
new_sum_log_degrees_combined = sum_log_degrees_original + new_sum_log_degrees
alpha_combined = compute_alpha(n_combined, new_sum_log_degrees_combined, d_min)
new_ll_combined = compute_log_likelihood(n_combined, alpha_combined, new_sum_log_degrees_combined, d_min)
new_ratios = -2 * new_ll_combined + 2 * (new_lls + ll_orig)
# Allowed edges are only those for which the resulting likelihood ratio measure is < than the threshold
allowed_edges = new_ratios < threshold
filtered_edges = tf.boolean_mask(node_pairs, allowed_edges)
# Get the flattened indices for the allowed edges [e,2] -> [e,], similar to np.ravel_multi_index
flat_ixs = ravel_multiple_indices(tf.cast(filtered_edges, tf.int32), modified_adjacency.shape)
# Also for the reverse direction (we assume unweighted graphs).
flat_ixs_reverse = ravel_multiple_indices(tf.reverse(tf.cast(filtered_edges, tf.int32), [1]),
modified_adjacency.shape)
# Construct a [N * N] array with ones at the admissible node pair locations and 0 everywhere else.
indices_1 = tf.scatter_nd(flat_ixs[:, None], tf.ones_like(flat_ixs, dtype=tf.float32), shape=[N * N])
indices_2 = tf.scatter_nd(flat_ixs_reverse[:, None], tf.ones_like(flat_ixs_reverse, dtype=tf.float32),
shape=[N * N])
# Add both directions
allowed_mask = tf.clip_by_value(indices_1 + indices_2, 0, 1)
return allowed_mask, current_ratio
| 40.78501 | 119 | 0.667231 | 2,969 | 20,678 | 4.471876 | 0.138094 | 0.015666 | 0.03427 | 0.010846 | 0.417338 | 0.360925 | 0.304135 | 0.274008 | 0.222867 | 0.180161 | 0 | 0.011937 | 0.25457 | 20,678 | 506 | 120 | 40.865613 | 0.849423 | 0.539172 | 0 | 0.06015 | 0 | 0 | 0.021505 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.097744 | false | 0 | 0.037594 | 0 | 0.233083 | 0.007519 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b17ab164469ffccaa291796f82605247060ae56 | 4,714 | py | Python | Embedded/test/test_readcsv.py | omnisci/mapd-core | cde582ebc3edba3fb86bacefa5bd9b3418a367b4 | [
"Apache-2.0"
] | 266 | 2018-09-27T06:11:36.000Z | 2019-05-10T15:03:55.000Z | Embedded/test/test_readcsv.py | omnisci/mapd-core | cde582ebc3edba3fb86bacefa5bd9b3418a367b4 | [
"Apache-2.0"
] | 96 | 2018-10-01T18:30:31.000Z | 2019-05-13T14:41:11.000Z | Embedded/test/test_readcsv.py | omnisci/mapd-core | cde582ebc3edba3fb86bacefa5bd9b3418a367b4 | [
"Apache-2.0"
] | 38 | 2018-10-04T01:02:54.000Z | 2019-05-09T04:23:35.000Z | import os
import io
import datetime
import pytest
import pyarrow as pa
from pyarrow import csv
import heavydbe as dbe
import ctypes
ctypes._dlopen('libDBEngine.so', ctypes.RTLD_GLOBAL)
root = os.path.join(
os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))),
"Tests/Import/datafiles"
)
def test_init():
global engine
engine = dbe.PyDbEngine(
enable_union=1,
enable_columnar_output=1,
enable_lazy_fetch=0,
null_div_by_zero=1,
)
assert bool(engine.closed) == False
engine = None
def test_santander():
table = csv.read_csv(root + "/santander_top1000.csv")
assert table
engine.importArrowTable("santander", table)
assert bool(engine.closed) == False
r = engine.executeDML("select * from santander")
assert r
assert r.colCount() == 202
assert r.rowCount() == 999
def test_usecols_csv():
target = {
'a': [1, 2, 3, 4, 5, 6],
'b': [2, 3, 4, 5, 6, 7],
'c': [3, 4, 5, 6, 7, 8],
'd': [4, 5, 6, 7, 8, 9],
'e': ['5', '6', '7', '8', '9', '0']
}
fp = io.BytesIO(
b'a,b,c,d,e\n1,2,3,4,5\n2,3,4,5,6\n3,4,5,6,7\n4,5,6,7,8\n5,6,7,8,9\n6,7,8,9,0'
)
fp.seek(0)
table = csv.read_csv(
fp,
convert_options=csv.ConvertOptions(
column_types={
'a': pa.int32(),
'b': pa.int64(),
'c': pa.int64(),
'd': pa.int64(),
'e': pa.string(),
}
)
)
assert table
engine.importArrowTable("usecols", table)
assert bool(engine.closed) == False
cursor = engine.executeDML("select * from usecols")
assert cursor
batch = cursor.getArrowRecordBatch()
assert batch
assert batch.to_pydict() == target
def test_time_parsing():
target = {
'timestamp': [datetime.datetime(2010, 4, 1, 0, 0), datetime.datetime(2010, 4, 1, 0, 30), datetime.datetime(2010, 4, 1, 1, 0)],
'symbol': ['USD/JPY', 'USD/JPY', 'USD/JPY'],
'high': [93.526, 93.475, 93.421],
'low': [93.361, 93.352, 93.326],
'open': [93.518, 93.385, 93.391],
'close': [93.382, 93.391, 93.384],
'spread': [0.005, 0.006, 0.006],
'volume': [3049, 2251, 1577]
}
fp = io.BytesIO(
b'timestamp,symbol,high,low,open,close,spread,volume\n'
b'2010-04-01 00:00:00,USD/JPY,93.52600,93.36100,93.51800,93.38200,0.00500,3049\n'
b'2010-04-01 00:30:00,USD/JPY,93.47500,93.35200,93.38500,93.39100,0.00600,2251\n'
b'2010-04-01 01:00:00,USD/JPY,93.42100,93.32600,93.39100,93.38400,0.00600,1577\n'
)
fp.seek(0)
table = csv.read_csv(fp)
assert table
engine.importArrowTable("time_parsing", table)
assert bool(engine.closed) == False
cursor = engine.executeDML("select * from time_parsing")
assert cursor
batch = cursor.getArrowRecordBatch()
assert batch
assert batch.to_pydict() == target
def test_csv_fillna():
target = {
'CRIM': [0.00632],
'ZN': [18.0],
'INDUS': [2.31],
'CHAS': [0.0],
'NOX': [0.538],
'RM': [6.575],
'AGE': [65.2],
'DIS': [4.09],
'RAD': [1.0],
'TAX': [296.0],
'PTRATIO': [15.3],
'B': [396.9],
'LSTAT': [4.98],
'PRICE': [24.0]
}
fp = io.BytesIO(
b',CRIM,ZN,INDUS,CHAS,NOX,RM,AGE,DIS,RAD,TAX,PTRATIO,B,LSTAT,PRICE\n'
b'0,0.00632,18.0,2.31,0.0,0.538,6.575,65.2,4.09,1.0,296.0,15.3,396.9,4.98,24.0\n'
)
fp.seek(0)
table = csv.read_csv(fp)
assert table
engine.importArrowTable("csv_fillna", table)
assert bool(engine.closed) == False
cursor = engine.executeDML("select CRIM,ZN,INDUS,CHAS,NOX,RM,AGE,DIS,RAD,TAX,PTRATIO,B,LSTAT,PRICE from csv_fillna")
assert cursor
batch = cursor.getArrowRecordBatch()
assert batch
assert batch.to_pydict() == target
def test_null_col():
target = {'a': [1, 2, 3], 'b': [1, 2, 3], 'c': [None, None, None]}
fp = io.BytesIO(b'a,b,c\n1,1,\n2,2,\n3,3,\n')
fp.seek(0)
table = csv.read_csv(
fp,
convert_options=csv.ConvertOptions(
column_types={
'a': pa.int32(),
'b': pa.int64(),
'c': pa.int64(),
}
)
)
assert table
engine.importArrowTable("test_null_col", table)
assert bool(engine.closed) == False
cursor = engine.executeDML("select * from test_null_col")
assert cursor
batch = cursor.getArrowRecordBatch()
assert batch
assert batch.to_pydict() == target
if __name__ == "__main__":
pytest.main(["-v", __file__])
| 29.647799 | 134 | 0.558125 | 687 | 4,714 | 3.745269 | 0.256186 | 0.006218 | 0.037311 | 0.051302 | 0.507579 | 0.450447 | 0.410805 | 0.399145 | 0.381656 | 0.381267 | 0 | 0.134027 | 0.265592 | 4,714 | 158 | 135 | 29.835443 | 0.609185 | 0 | 0 | 0.360544 | 0 | 0.047619 | 0.205389 | 0.128156 | 0 | 0 | 0 | 0 | 0.176871 | 1 | 0.040816 | false | 0 | 0.095238 | 0 | 0.136054 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b18f94f71d17a6b43b3acd30defa7fd83e9b6e1 | 317 | py | Python | app/healthcheck/healthcheck.py | biancarosa/todo-list | 9602c14b1f5d60b6010921b4918131d495b9ba69 | [
"MIT"
] | 1 | 2022-02-21T14:17:21.000Z | 2022-02-21T14:17:21.000Z | app/healthcheck/healthcheck.py | biancarosa/todo-list | 9602c14b1f5d60b6010921b4918131d495b9ba69 | [
"MIT"
] | 1 | 2022-02-21T13:34:49.000Z | 2022-02-21T13:34:49.000Z | app/healthcheck/healthcheck.py | biancarosa/todo-list | 9602c14b1f5d60b6010921b4918131d495b9ba69 | [
"MIT"
] | null | null | null | """app.healthcheck.healthcheck
Module that deals with HealthCheck route."""
from flask import jsonify
import logging
logger = logging.getLogger(__name__)
def healthcheck():
"""Returns health information"""
logging.info("Info endpoint hit")
return jsonify({
"message": "I feel good."
})
| 21.133333 | 44 | 0.687697 | 35 | 317 | 6.114286 | 0.771429 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.195584 | 317 | 15 | 45 | 21.133333 | 0.839216 | 0.305994 | 0 | 0 | 0 | 0 | 0.171429 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.25 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b194325430617ab5cb1adcd99354c592ca983d4 | 4,364 | py | Python | lib/utils_files.py | minimada/openbmc-test-automation | 1982e7b88a80690202d0f68f4bab978c1675e37f | [
"Apache-2.0"
] | 67 | 2016-12-06T17:52:06.000Z | 2022-01-17T22:12:37.000Z | lib/utils_files.py | minimada/openbmc-test-automation | 1982e7b88a80690202d0f68f4bab978c1675e37f | [
"Apache-2.0"
] | 2,181 | 2016-01-12T05:14:25.000Z | 2022-03-31T17:29:12.000Z | lib/utils_files.py | minimada/openbmc-test-automation | 1982e7b88a80690202d0f68f4bab978c1675e37f | [
"Apache-2.0"
] | 75 | 2015-12-21T06:23:46.000Z | 2021-12-31T15:05:53.000Z | #!/usr/bin/env python3
r"""
This module contains file functions such as file_diff.
"""
import time
import os
import re
from gen_cmd import cmd_fnc_u
robot_env = 1
try:
from robot.libraries.BuiltIn import BuiltIn
from robot.libraries import DateTime
except ImportError:
robot_env = 0
def file_diff(file1_path,
file2_path,
diff_file_path,
skip_string):
r"""
Compare the contents of two text files. The comparison uses the Unix
'diff' command. Differences can be selectively ignored by use of
the skip_string parameter. The output of diff command is written
to a user-specified file and is also written (logged) to the console.
Description of arguments:
file1_path File containing text data.
file2_path Text file to compare to file1.
diff_file_path Text file which will contain the diff output.
skip_string To allow for differences which may expected or immaterial,
skip_string parameter is a word or a string of comma
separated words which specify what should be ignored.
For example, "size,speed". Any line containing the word
size or the word speed will be ignored when the diff is
performed. This parameter is optional.
Returns:
0 if both files contain the same information or they differ only in
items specified by the skip_string.
2 if FILES_DO_NOT_MATCH.
3 if INPUT_FILE_DOES_NOT_EXIST.
4 if IO_EXCEPTION_READING_FILE.
5 if IO_EXCEPTION_WRITING_FILE.
6 if INPUT_FILE_MALFORMED
"""
FILES_MATCH = 0
FILES_DO_NOT_MATCH = 2
INPUT_FILE_DOES_NOT_EXIST = 3
IO_EXCEPTION_READING_FILE = 4
IO_EXCEPTION_WRITING_FILE = 5
INPUT_FILE_MALFORMED = 6
# The minimum size in bytes a file must be.
min_file_byte_size = 1
now = time.strftime("%Y-%m-%d %H:%M:%S")
if (not os.path.exists(file1_path) or (not os.path.exists(file2_path))):
return INPUT_FILE_DOES_NOT_EXIST
try:
with open(file1_path, 'r') as file:
initial = file.readlines()
with open(file2_path, 'r') as file:
final = file.readlines()
except IOError:
file.close()
return IO_EXCEPTION_READING_FILE
except ValueError:
file.close()
return INPUT_FILE_MALFORMED
else:
file.close()
# Must have more than a trivial number of bytes.
if len(initial) < min_file_byte_size:
return INPUT_FILE_MALFORMED
if (initial == final):
try:
file = open(diff_file_path, 'w')
except IOError:
file.close()
line_to_print = "Specified skip (ignore) string = " + \
skip_string + "\n\n"
file.write(line_to_print)
line_to_print = now + " found no difference between file " + \
file1_path + " and " + \
file2_path + "\n"
file.write(line_to_print)
file.close()
return FILES_MATCH
# Find the differences and write difference report to diff_file_path file
try:
file = open(diff_file_path, 'w')
except IOError:
file.close()
return IO_EXCEPTION_WRITING_FILE
# Form a UNIX diff command and its parameters as a string. For example,
# if skip_string="size,capacity", command = 'diff -I "size"
# -I "capacity" file1_path file2_path'.
skip_list = filter(None, re.split(r"[ ]*,[ ]*", skip_string))
ignore_string = ' '.join([("-I " + '"' + x + '"') for x in skip_list])
command = ' '.join(filter(None, ["diff", ignore_string, file1_path,
file2_path]))
line_to_print = now + " " + command + "\n"
file.write(line_to_print)
# Run the command and get the differences
rc, out_buf = cmd_fnc_u(command, quiet=0, print_output=0, show_err=0)
# Write the differences to the specified diff_file and console.
if robot_env == 1:
BuiltIn().log_to_console("DIFF:\n" + out_buf)
else:
print("DIFF:\n", out_buf)
file.write(out_buf)
file.close()
if rc == 0:
# Any differences found were on the skip_string.
return FILES_MATCH
else:
# We have at least one difference not in the skip_string.
return FILES_DO_NOT_MATCH
| 32.81203 | 79 | 0.63451 | 611 | 4,364 | 4.327332 | 0.306056 | 0.037821 | 0.024962 | 0.020424 | 0.121785 | 0.079803 | 0.055976 | 0.034796 | 0.034796 | 0.034796 | 0 | 0.011594 | 0.288497 | 4,364 | 132 | 80 | 33.060606 | 0.839936 | 0.401008 | 0 | 0.337662 | 0 | 0 | 0.055266 | 0 | 0.012987 | 0 | 0 | 0 | 0 | 1 | 0.012987 | false | 0 | 0.090909 | 0 | 0.207792 | 0.103896 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b1bd0979218d8a0b24010e5cf993177e766314c | 1,055 | py | Python | examples/blocking/block_io_operations.py | ckxz105/rltk | 2d08269002c00c0218421c8c2dc0cc7c4f677131 | [
"MIT"
] | 98 | 2017-03-07T22:59:41.000Z | 2022-02-02T16:10:40.000Z | examples/blocking/block_io_operations.py | ckxz105/rltk | 2d08269002c00c0218421c8c2dc0cc7c4f677131 | [
"MIT"
] | 26 | 2017-04-25T17:25:22.000Z | 2021-09-10T16:57:05.000Z | examples/blocking/block_io_operations.py | ckxz105/rltk | 2d08269002c00c0218421c8c2dc0cc7c4f677131 | [
"MIT"
] | 31 | 2017-03-09T22:40:40.000Z | 2022-03-11T16:28:23.000Z | import rltk
b1 = rltk.Block()
b1.add('001', '1', '1')
b1.add('001', '2', 'a')
b1.add('002', '1', '2')
b1.add('002', '2', 'b')
b1.add('002', '2', 'c')
print('--- block1 ---')
for bb in b1:
print(bb)
b2 = rltk.Block()
b2.add('001', '1', '1')
b2.add('001', '2', 'a')
b2.add('001', '2', 'd')
b2.add('002', '1', '1')
b2.add('002', '2', 'c')
b2.add('002', '3', 'k')
print('--- block2 (pairwise) ---')
for bb in b2.pairwise('1', '2'):
print(bb)
print('--- block2 (pairwise, single dataset) ---')
for bb in b2.pairwise('2'):
print(bb)
b1_inverted = rltk.BlockingHelper.generate_inverted_indices(b1)
b2_inverted = rltk.BlockingHelper.generate_inverted_indices(b2)
b3 = rltk.BlockingHelper.union(b1, b1_inverted, b2, b2_inverted)
print('--- union ---')
for bb in b3:
print(bb)
print('--- union raw ---')
for rr in b3.key_set_adapter:
print(rr)
b4 = rltk.BlockingHelper.intersect(b1, b1_inverted, b2, b2_inverted)
print('--- intersect --')
for bb in b4:
print(bb)
print('--- intersect raw --')
for rr in b4.key_set_adapter:
print(rr)
| 23.977273 | 68 | 0.603791 | 172 | 1,055 | 3.622093 | 0.22093 | 0.057785 | 0.05618 | 0.025682 | 0.369181 | 0.250401 | 0.093098 | 0 | 0 | 0 | 0 | 0.097669 | 0.145972 | 1,055 | 43 | 69 | 24.534884 | 0.593785 | 0 | 0 | 0.179487 | 0 | 0 | 0.193365 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.025641 | 0 | 0.025641 | 0.358974 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b1d82735f58ce19af7bfb48924b511d6c453095 | 5,058 | py | Python | posts/views.py | promokk/hw05_final | a8e31e9dde904e3e396740c2c9edf773a54865df | [
"BSD-3-Clause"
] | null | null | null | posts/views.py | promokk/hw05_final | a8e31e9dde904e3e396740c2c9edf773a54865df | [
"BSD-3-Clause"
] | null | null | null | posts/views.py | promokk/hw05_final | a8e31e9dde904e3e396740c2c9edf773a54865df | [
"BSD-3-Clause"
] | null | null | null | from django.shortcuts import render, get_object_or_404
from django.shortcuts import redirect
from django.views.generic import CreateView
from django.urls import reverse_lazy
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator
from .models import Post, Group, User, Comment, Follow
from .forms import PostForm, CommentForm
def index(request):
post_list = Post.objects.order_by("-pub_date").all()
paginator = Paginator(post_list, 10)
page_number = request.GET.get("page")
page = paginator.get_page(page_number)
return render(
request,
"index.html",
{"page": page, "paginator": paginator}
)
def group_posts(request, slug):
group = get_object_or_404(Group, slug=slug)
post_list = group.posts.all()
paginator = Paginator(post_list, 10)
page_number = request.GET.get('page')
page = paginator.get_page(page_number)
return render(
request,
"group.html",
{"group": group, 'page': page, 'paginator': paginator}
)
@login_required
def new_post(request):
form = PostForm(request.POST or None)
if request.method == "POST":
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
form.save()
return redirect("index")
return render(request, "new_post.html", {
"form": form,
"flag": True
})
def profile(request, username):
author = get_object_or_404(User, username=username)
post_list = author.posts.all()
paginator = Paginator(post_list, 10)
page_number = request.GET.get('page')
page = paginator.get_page(page_number)
is_follow = author.following.filter(user=request.user.id).exists()
return render(
request,
"profile.html",
{
"author": author,
"user": request.user,
"page": page,
"paginator": paginator,
"following": is_follow
}
)
def post_view(request, username, post_id):
post = get_object_or_404(Post, author__username=username, id=post_id)
comments = post.comments.all()
form = CommentForm(request.POST or None)
return render(
request,
"post.html",
{
"author": post.author,
"post": post,
"comments": comments,
"form": form
}
)
def post_edit(request, username, post_id):
post = get_object_or_404(Post, author__username=username, id=post_id)
form = PostForm(request.POST or None,
files=request.FILES or None,
instance=post)
if request.method == "POST":
if form.is_valid():
post.text = form.cleaned_data["text"]
post.group = form.cleaned_data["group"]
post.save()
return redirect(
"post",
username=username,
post_id=post_id
)
return render(
request,
"new_post.html",
{
"form": form,
"post": post,
"flag": False
}
)
@login_required
def add_comment(request, username, post_id):
post = get_object_or_404(Post, author__username=username, id=post_id)
form = CommentForm(request.POST or None)
if request.method == "POST":
if form.is_valid():
comment = form.save(commit=False)
comment.post = post
comment.author = request.user
comment.save()
return redirect(
"post",
username=username,
post_id=post_id
)
return render(
request,
"post.html",
{
"form": form,
"post": post
}
)
def page_not_found(request, exception):
return render(
request,
"misc/404.html",
{"path": request.path},
status=404
)
def server_error(request):
return render(request, "misc/500.html", status=500)
@login_required
def follow_index(request):
post_list = Post.objects.filter(author__following__user=request.user)
paginator = Paginator(post_list, 10)
page_number = request.GET.get("page")
page = paginator.get_page(page_number)
return render(
request,
"follow.html",
{
"page": page,
"paginator": paginator
}
)
@login_required
def profile_follow(request, username):
author = get_object_or_404(User, username=username)
if author != request.user:
Follow.objects.get_or_create(user=request.user, author=author)
return redirect("profile", username=username)
@login_required
def profile_unfollow(request, username):
author = get_object_or_404(User, username=username)
Follow.objects.filter(user=request.user, author=author).delete()
return redirect("profile", username=username)
| 27.639344 | 73 | 0.589363 | 561 | 5,058 | 5.144385 | 0.158645 | 0.033264 | 0.065835 | 0.038808 | 0.550243 | 0.484754 | 0.424463 | 0.395357 | 0.375606 | 0.361746 | 0 | 0.012479 | 0.302887 | 5,058 | 182 | 74 | 27.791209 | 0.806012 | 0 | 0 | 0.425806 | 0 | 0 | 0.062475 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.077419 | false | 0 | 0.051613 | 0.012903 | 0.225806 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b1edafe645cabe70b17100f13c91eeb991c939b | 1,694 | py | Python | gubernator/update_config.py | rodtreweek/test-infra | 5f2755189ee01b13ff4da26cbf70d0582545868a | [
"Apache-2.0"
] | 6 | 2018-01-31T07:36:42.000Z | 2019-06-17T21:47:39.000Z | gubernator/update_config.py | rodtreweek/test-infra | 5f2755189ee01b13ff4da26cbf70d0582545868a | [
"Apache-2.0"
] | 67 | 2017-07-14T08:18:28.000Z | 2020-11-23T08:59:51.000Z | gubernator/update_config.py | Acidburn0zzz/test-infra | ad19d04798049201a82c70639900bba593e740d6 | [
"Apache-2.0"
] | 8 | 2017-08-15T12:37:14.000Z | 2021-08-23T17:52:37.000Z | #!/usr/bin/env python
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Updates the Gubernator configuration from the Prow configuration."""
import argparse
import yaml
def main(prow_config, gubernator_config):
with open(prow_config) as prow_file:
prow_data = yaml.load(prow_file)
default_presubmits = []
for job in prow_data['presubmits']['kubernetes/kubernetes']:
if job.get('always_run'):
default_presubmits.append(job['name'])
with open(gubernator_config) as gubernator_file:
gubernator_data = yaml.load(gubernator_file)
gubernator_data['jobs']['kubernetes-jenkins/pr-logs/directory/'] = default_presubmits
with open(gubernator_config, 'w+') as gubernator_file:
yaml.dump(gubernator_data, gubernator_file, default_flow_style=False, explicit_start=True)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser()
PARSER.add_argument('prow_config', help="Path to Prow configuration YAML.")
PARSER.add_argument('gubernator_config', help="Path to Gubernator configuration YAML.")
ARGS = PARSER.parse_args()
main(ARGS.prow_config, ARGS.gubernator_config)
| 37.644444 | 98 | 0.743211 | 229 | 1,694 | 5.331878 | 0.484716 | 0.04914 | 0.021294 | 0.026208 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005634 | 0.161747 | 1,694 | 44 | 99 | 38.5 | 0.854225 | 0.381936 | 0 | 0 | 0 | 0 | 0.188716 | 0.05642 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.1 | 0 | 0.15 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b1fa845a5312447d4861e8c9f56fdff7415fd4e | 3,865 | py | Python | rlcard/envs/tienlen.py | xiviu123/rlcard | 2a5273dff6c9dd49a3d4ab84a952fed9a387955b | [
"MIT"
] | null | null | null | rlcard/envs/tienlen.py | xiviu123/rlcard | 2a5273dff6c9dd49a3d4ab84a952fed9a387955b | [
"MIT"
] | null | null | null | rlcard/envs/tienlen.py | xiviu123/rlcard | 2a5273dff6c9dd49a3d4ab84a952fed9a387955b | [
"MIT"
] | null | null | null | from collections import OrderedDict
from rlcard.envs import Env
import numpy as np
from rlcard.games.tienlen.game import TienlenGame as Game
from rlcard.games.tienlen.utils import encode_cards, encode_players_round_active, get_one_hot_array
class TienlenEnv(Env):
def __init__(self, config):
self.name = 'tienlen'
self.game = Game()
super().__init__(config)
self.state_shape = [[100] for _ in range(self.num_players)]
self.action_shape = [[54] for _ in range(self.num_players)]
def _extract_state(self, state):
'''
current_hand
current_played_cards
players_round_active
unknown_cards
up_opponent_played_cards
up_opponent_num_cards_left
down_opponent_played_cards
down_opponent_num_cards_left
op_opponent_played_cards
op_opponent_num_cards_left
'''
player_id = state['player_id']
current_hand = encode_cards(state['current_hand'])
players_round_active = encode_players_round_active(state['players_round_active'])
unknown_cards = encode_cards(state['unknown_cards'])
num_cards_left = state['num_cards_left']
played_cards = state['played_cards']
current_played_cards = encode_cards(played_cards[player_id])
up_opponent_id = ( player_id + 1) % self.game.num_players
up_opponent_played_cards = encode_cards(played_cards[up_opponent_id])
up_opponent_num_cards_left = get_one_hot_array(num_cards_left[up_opponent_id], 13)
if self.game.num_players >= 2:
down_opponent_id = self.game.num_players - player_id - 1
down_opponent_played_cards = encode_cards(played_cards[down_opponent_id])
down_opponent_num_cards_left = get_one_hot_array(num_cards_left[down_opponent_id], 13)
if self.game.num_players == 4:
op_opponent_id = ( player_id + 2) % self.game.num_players
op_opponent_played_cards = encode_cards(played_cards[op_opponent_id])
op_opponent_num_cards_left = get_one_hot_array(num_cards_left[op_opponent_id], 13)
if self.game.num_players == 2:
obs = np.concatenate((current_hand,
current_played_cards,
players_round_active,
unknown_cards,
up_opponent_played_cards,
up_opponent_num_cards_left))
elif self.game.num_players == 3:
obs = np.concatenate((current_hand,
current_played_cards,
players_round_active,
unknown_cards,
up_opponent_played_cards,
up_opponent_num_cards_left,
down_opponent_played_cards,
down_opponent_num_cards_left))
elif self.game.num_players == 4:
obs = np.concatenate((current_hand,
current_played_cards,
players_round_active,
unknown_cards,
up_opponent_played_cards,
up_opponent_num_cards_left,
down_opponent_played_cards,
down_opponent_num_cards_left,
op_opponent_played_cards,
op_opponent_num_cards_left))
legal_actions = OrderedDict({action_id: None for action_id in state['actions']})
extracted_state = OrderedDict({'obs': obs, 'legal_actions': legal_actions})
extracted_state['raw_obs'] = state
extracted_state['raw_legal_actions'] = list(legal_actions.keys())
return extracted_state
def _decode_action(self, action):
return action
def get_payoffs(self):
''' Get the payoffs of players. Must be implemented in the child class.
Returns:
payoffs (list): a list of payoffs for each player
'''
return self.game.judger.get_payoffs()
| 37.163462 | 99 | 0.656662 | 478 | 3,865 | 4.841004 | 0.171548 | 0.109334 | 0.088159 | 0.103717 | 0.524201 | 0.504322 | 0.469317 | 0.416162 | 0.402334 | 0.354797 | 0 | 0.006754 | 0.272186 | 3,865 | 103 | 100 | 37.524272 | 0.815855 | 0.093402 | 0 | 0.283582 | 0 | 0 | 0.040156 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.059701 | false | 0 | 0.074627 | 0.014925 | 0.19403 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9bc2141debdb3afcc0f8f3217d1f2d229c76d00a | 50,869 | py | Python | vbox/src/VBox/ValidationKit/testmanager/webui/wuitestresult.py | Nurzamal/rest_api_docker | a9cc01dfc235467d490d9663755b33ef6990bdd8 | [
"MIT"
] | null | null | null | vbox/src/VBox/ValidationKit/testmanager/webui/wuitestresult.py | Nurzamal/rest_api_docker | a9cc01dfc235467d490d9663755b33ef6990bdd8 | [
"MIT"
] | null | null | null | vbox/src/VBox/ValidationKit/testmanager/webui/wuitestresult.py | Nurzamal/rest_api_docker | a9cc01dfc235467d490d9663755b33ef6990bdd8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# $Id: wuitestresult.py 69111 2017-10-17 14:26:02Z vboxsync $
"""
Test Manager WUI - Test Results.
"""
__copyright__ = \
"""
Copyright (C) 2012-2017 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Revision: 69111 $"
# Python imports.
import datetime;
# Validation Kit imports.
from testmanager.webui.wuicontentbase import WuiContentBase, WuiListContentBase, WuiHtmlBase, WuiTmLink, WuiLinkBase, \
WuiSvnLink, WuiSvnLinkWithTooltip, WuiBuildLogLink, WuiRawHtml, \
WuiHtmlKeeper;
from testmanager.webui.wuimain import WuiMain;
from testmanager.webui.wuihlpform import WuiHlpForm;
from testmanager.webui.wuiadminfailurereason import WuiFailureReasonAddLink, WuiFailureReasonDetailsLink;
from testmanager.webui.wuitestresultfailure import WuiTestResultFailureDetailsLink;
from testmanager.core.failurereason import FailureReasonData, FailureReasonLogic;
from testmanager.core.report import ReportGraphModel, ReportModelBase;
from testmanager.core.testbox import TestBoxData;
from testmanager.core.testcase import TestCaseData;
from testmanager.core.testset import TestSetData;
from testmanager.core.testgroup import TestGroupData;
from testmanager.core.testresultfailures import TestResultFailureData;
from testmanager.core.build import BuildData;
from testmanager.core import db;
from testmanager import config;
from common import webutils, utils;
class WuiTestSetLink(WuiTmLink):
""" Test set link. """
def __init__(self, idTestSet, sName = WuiContentBase.ksShortDetailsLink, fBracketed = False):
WuiTmLink.__init__(self, sName, WuiMain.ksScriptName,
{ WuiMain.ksParamAction: WuiMain.ksActionTestResultDetails,
TestSetData.ksParam_idTestSet: idTestSet, }, fBracketed = fBracketed);
self.idTestSet = idTestSet;
class WuiTestResult(WuiContentBase):
"""Display test case result"""
def __init__(self, fnDPrint = None, oDisp = None):
WuiContentBase.__init__(self, fnDPrint = fnDPrint, oDisp = oDisp);
# Cyclic import hacks.
from testmanager.webui.wuiadmin import WuiAdmin;
self.oWuiAdmin = WuiAdmin;
def _toHtml(self, oObject):
"""Translate some object to HTML."""
if isinstance(oObject, WuiHtmlBase):
return oObject.toHtml();
if db.isDbTimestamp(oObject):
return webutils.escapeElem(self.formatTsShort(oObject));
if db.isDbInterval(oObject):
return webutils.escapeElem(self.formatIntervalShort(oObject));
if utils.isString(oObject):
return webutils.escapeElem(oObject);
return webutils.escapeElem(str(oObject));
def _htmlTable(self, aoTableContent):
"""Generate HTML code for table"""
sHtml = u' <table class="tmtbl-testresult-details" width="100%%">\n';
for aoSubRows in aoTableContent:
if not aoSubRows:
continue; # Can happen if there is no testsuit.
oCaption = aoSubRows[0];
sHtml += u' \n' \
u' <tr class="tmtbl-result-details-caption">\n' \
u' <td colspan="2">%s</td>\n' \
u' </tr>\n' \
% (self._toHtml(oCaption),);
iRow = 0;
for aoRow in aoSubRows[1:]:
iRow += 1;
sHtml += u' <tr class="%s">\n' % ('tmodd' if iRow & 1 else 'tmeven',);
if len(aoRow) == 1:
sHtml += u' <td class="tmtbl-result-details-subcaption" colspan="2">%s</td>\n' \
% (self._toHtml(aoRow[0]),);
else:
sHtml += u' <th scope="row">%s</th>\n' % (webutils.escapeElem(aoRow[0]),);
if len(aoRow) > 2:
sHtml += u' <td>%s</td>\n' % (aoRow[2](aoRow[1]),);
else:
sHtml += u' <td>%s</td>\n' % (self._toHtml(aoRow[1]),);
sHtml += u' </tr>\n';
sHtml += u' </table>\n';
return sHtml
def _highlightStatus(self, sStatus):
"""Return sStatus string surrounded by HTML highlight code """
sTmp = '<font color=%s><b>%s</b></font>' \
% ('red' if sStatus == 'failure' else 'green', webutils.escapeElem(sStatus.upper()))
return sTmp
def _anchorAndAppendBinaries(self, sBinaries, aoRows):
""" Formats each binary (if any) into a row with a download link. """
if sBinaries is not None:
for sBinary in sBinaries.split(','):
if not webutils.hasSchema(sBinary):
sBinary = config.g_ksBuildBinUrlPrefix + sBinary;
aoRows.append([WuiLinkBase(webutils.getFilename(sBinary), sBinary, fBracketed = False),]);
return aoRows;
def _formatEventTimestampHtml(self, tsEvent, tsLog, idEvent, oTestSet):
""" Formats an event timestamp with a main log link. """
tsEvent = db.dbTimestampToZuluDatetime(tsEvent);
#sFormattedTimestamp = u'%04u\u2011%02u\u2011%02u\u00a0%02u:%02u:%02uZ' \
# % ( tsEvent.year, tsEvent.month, tsEvent.day,
# tsEvent.hour, tsEvent.minute, tsEvent.second,);
sFormattedTimestamp = u'%02u:%02u:%02uZ' \
% ( tsEvent.hour, tsEvent.minute, tsEvent.second,);
sTitle = u'#%u - %04u\u2011%02u\u2011%02u\u00a0%02u:%02u:%02u.%06uZ' \
% ( idEvent, tsEvent.year, tsEvent.month, tsEvent.day,
tsEvent.hour, tsEvent.minute, tsEvent.second, tsEvent.microsecond, );
tsLog = db.dbTimestampToZuluDatetime(tsLog);
sFragment = u'%02u_%02u_%02u_%06u' % ( tsLog.hour, tsLog.minute, tsLog.second, tsLog.microsecond);
return WuiTmLink(sFormattedTimestamp, '',
{ WuiMain.ksParamAction: WuiMain.ksActionViewLog,
WuiMain.ksParamLogSetId: oTestSet.idTestSet, },
sFragmentId = sFragment, sTitle = sTitle, fBracketed = False, ).toHtml();
def _recursivelyGenerateEvents(self, oTestResult, sParentName, sLineage, iRow,
iFailure, oTestSet, iDepth): # pylint: disable=R0914
"""
Recursively generate event table rows for the result set.
oTestResult is an object of the type TestResultDataEx.
"""
# Hack: Replace empty outer test result name with (pretty) command line.
if iRow == 1:
sName = '';
sDisplayName = sParentName;
else:
sName = oTestResult.sName if sParentName == '' else '%s, %s' % (sParentName, oTestResult.sName,);
sDisplayName = webutils.escapeElem(sName);
# Format error count.
sErrCnt = '';
if oTestResult.cErrors > 0:
sErrCnt = ' (1 error)' if oTestResult.cErrors == 1 else ' (%d errors)' % oTestResult.cErrors;
# Format bits for adding or editing the failure reason. Level 0 is handled at the top of the page.
sChangeReason = '';
if oTestResult.cErrors > 0 and iDepth > 0 and self._oDisp is not None and not self._oDisp.isReadOnlyUser():
dTmp = {
self._oDisp.ksParamAction: self._oDisp.ksActionTestResultFailureAdd if oTestResult.oReason is None else
self._oDisp.ksActionTestResultFailureEdit,
TestResultFailureData.ksParam_idTestResult: oTestResult.idTestResult,
};
sChangeReason = ' <a href="?%s" class="tmtbl-edit-reason" onclick="addRedirectToAnchorHref(this)">%s</a> ' \
% ( webutils.encodeUrlParams(dTmp), WuiContentBase.ksShortEditLinkHtml );
# Format the include in graph checkboxes.
sLineage += ':%u' % (oTestResult.idStrName,);
sResultGraph = '<input type="checkbox" name="%s" value="%s%s" title="Include result in graph."/>' \
% (WuiMain.ksParamReportSubjectIds, ReportGraphModel.ksTypeResult, sLineage,);
sElapsedGraph = '';
if oTestResult.tsElapsed is not None:
sElapsedGraph = '<input type="checkbox" name="%s" value="%s%s" title="Include elapsed time in graph."/>' \
% ( WuiMain.ksParamReportSubjectIds, ReportGraphModel.ksTypeElapsed, sLineage);
if not oTestResult.aoChildren \
and len(oTestResult.aoValues) + len(oTestResult.aoMsgs) + len(oTestResult.aoFiles) == 0:
# Leaf - single row.
tsEvent = oTestResult.tsCreated;
if oTestResult.tsElapsed is not None:
tsEvent += oTestResult.tsElapsed;
sHtml = ' <tr class="%s tmtbl-events-leaf tmtbl-events-lvl%s tmstatusrow-%s" id="S%u">\n' \
' <td id="E%u">%s</td>\n' \
' <td>%s</td>\n' \
' <td>%s</td>\n' \
' <td>%s</td>\n' \
' <td colspan="2"%s>%s%s%s</td>\n' \
' <td>%s</td>\n' \
' </tr>\n' \
% ( 'tmodd' if iRow & 1 else 'tmeven', iDepth, oTestResult.enmStatus, oTestResult.idTestResult,
oTestResult.idTestResult,
self._formatEventTimestampHtml(tsEvent, oTestResult.tsCreated, oTestResult.idTestResult, oTestSet),
sElapsedGraph,
webutils.escapeElem(self.formatIntervalShort(oTestResult.tsElapsed)) if oTestResult.tsElapsed is not None
else '',
sDisplayName,
' id="failure-%u"' % (iFailure,) if oTestResult.isFailure() else '',
webutils.escapeElem(oTestResult.enmStatus), webutils.escapeElem(sErrCnt),
sChangeReason if oTestResult.oReason is None else '',
sResultGraph );
iRow += 1;
else:
# Multiple rows.
sHtml = ' <tr class="%s tmtbl-events-first tmtbl-events-lvl%s ">\n' \
' <td>%s</td>\n' \
' <td></td>\n' \
' <td></td>\n' \
' <td>%s</td>\n' \
' <td colspan="2">%s</td>\n' \
' <td></td>\n' \
' </tr>\n' \
% ( 'tmodd' if iRow & 1 else 'tmeven', iDepth,
self._formatEventTimestampHtml(oTestResult.tsCreated, oTestResult.tsCreated,
oTestResult.idTestResult, oTestSet),
sDisplayName,
'running' if oTestResult.tsElapsed is None else '', );
iRow += 1;
# Depth. Check if our error count is just reflecting the one of our children.
cErrorsBelow = 0;
for oChild in oTestResult.aoChildren:
(sChildHtml, iRow, iFailure) = self._recursivelyGenerateEvents(oChild, sName, sLineage,
iRow, iFailure, oTestSet, iDepth + 1);
sHtml += sChildHtml;
cErrorsBelow += oChild.cErrors;
# Messages.
for oMsg in oTestResult.aoMsgs:
sHtml += ' <tr class="%s tmtbl-events-message tmtbl-events-lvl%s">\n' \
' <td>%s</td>\n' \
' <td></td>\n' \
' <td></td>\n' \
' <td colspan="3">%s: %s</td>\n' \
' <td></td>\n' \
' </tr>\n' \
% ( 'tmodd' if iRow & 1 else 'tmeven', iDepth,
self._formatEventTimestampHtml(oMsg.tsCreated, oMsg.tsCreated, oMsg.idTestResultMsg, oTestSet),
webutils.escapeElem(oMsg.enmLevel),
webutils.escapeElem(oMsg.sMsg), );
iRow += 1;
# Values.
for oValue in oTestResult.aoValues:
sHtml += ' <tr class="%s tmtbl-events-value tmtbl-events-lvl%s">\n' \
' <td>%s</td>\n' \
' <td></td>\n' \
' <td></td>\n' \
' <td>%s</td>\n' \
' <td class="tmtbl-events-number">%s</td>\n' \
' <td class="tmtbl-events-unit">%s</td>\n' \
' <td><input type="checkbox" name="%s" value="%s%s:%u" title="Include value in graph."></td>\n' \
' </tr>\n' \
% ( 'tmodd' if iRow & 1 else 'tmeven', iDepth,
self._formatEventTimestampHtml(oValue.tsCreated, oValue.tsCreated, oValue.idTestResultValue, oTestSet),
webutils.escapeElem(oValue.sName),
utils.formatNumber(oValue.lValue).replace(' ', ' '),
webutils.escapeElem(oValue.sUnit),
WuiMain.ksParamReportSubjectIds, ReportGraphModel.ksTypeValue, sLineage, oValue.idStrName, );
iRow += 1;
# Files.
for oFile in oTestResult.aoFiles:
if oFile.sMime in [ 'text/plain', ]:
aoLinks = [
WuiTmLink('%s (%s)' % (oFile.sFile, oFile.sKind), '',
{ self._oDisp.ksParamAction: self._oDisp.ksActionViewLog,
self._oDisp.ksParamLogSetId: oTestSet.idTestSet,
self._oDisp.ksParamLogFileId: oFile.idTestResultFile, },
sTitle = oFile.sDescription),
WuiTmLink('View Raw', '',
{ self._oDisp.ksParamAction: self._oDisp.ksActionGetFile,
self._oDisp.ksParamGetFileSetId: oTestSet.idTestSet,
self._oDisp.ksParamGetFileId: oFile.idTestResultFile,
self._oDisp.ksParamGetFileDownloadIt: False, },
sTitle = oFile.sDescription),
]
else:
aoLinks = [
WuiTmLink('%s (%s)' % (oFile.sFile, oFile.sKind), '',
{ self._oDisp.ksParamAction: self._oDisp.ksActionGetFile,
self._oDisp.ksParamGetFileSetId: oTestSet.idTestSet,
self._oDisp.ksParamGetFileId: oFile.idTestResultFile,
self._oDisp.ksParamGetFileDownloadIt: False, },
sTitle = oFile.sDescription),
]
aoLinks.append(WuiTmLink('Download', '',
{ self._oDisp.ksParamAction: self._oDisp.ksActionGetFile,
self._oDisp.ksParamGetFileSetId: oTestSet.idTestSet,
self._oDisp.ksParamGetFileId: oFile.idTestResultFile,
self._oDisp.ksParamGetFileDownloadIt: True, },
sTitle = oFile.sDescription));
sHtml += ' <tr class="%s tmtbl-events-file tmtbl-events-lvl%s">\n' \
' <td>%s</td>\n' \
' <td></td>\n' \
' <td></td>\n' \
' <td>%s</td>\n' \
' <td></td>\n' \
' <td></td>\n' \
' <td></td>\n' \
' </tr>\n' \
% ( 'tmodd' if iRow & 1 else 'tmeven', iDepth,
self._formatEventTimestampHtml(oFile.tsCreated, oFile.tsCreated, oFile.idTestResultFile, oTestSet),
'\n'.join(oLink.toHtml() for oLink in aoLinks),);
iRow += 1;
# Done?
if oTestResult.tsElapsed is not None:
tsEvent = oTestResult.tsCreated + oTestResult.tsElapsed;
sHtml += ' <tr class="%s tmtbl-events-final tmtbl-events-lvl%s tmstatusrow-%s" id="E%d">\n' \
' <td>%s</td>\n' \
' <td>%s</td>\n' \
' <td>%s</td>\n' \
' <td>%s</td>\n' \
' <td colspan="2"%s>%s%s%s</td>\n' \
' <td>%s</td>\n' \
' </tr>\n' \
% ( 'tmodd' if iRow & 1 else 'tmeven', iDepth, oTestResult.enmStatus, oTestResult.idTestResult,
self._formatEventTimestampHtml(tsEvent, tsEvent, oTestResult.idTestResult, oTestSet),
sElapsedGraph,
webutils.escapeElem(self.formatIntervalShort(oTestResult.tsElapsed)),
sDisplayName,
' id="failure-%u"' % (iFailure,) if oTestResult.isFailure() else '',
webutils.escapeElem(oTestResult.enmStatus), webutils.escapeElem(sErrCnt),
sChangeReason if cErrorsBelow < oTestResult.cErrors and oTestResult.oReason is None else '',
sResultGraph);
iRow += 1;
# Failure reason.
if oTestResult.oReason is not None:
sReasonText = '%s / %s' % ( oTestResult.oReason.oFailureReason.oCategory.sShort,
oTestResult.oReason.oFailureReason.sShort, );
sCommentHtml = '';
if oTestResult.oReason.sComment and oTestResult.oReason.sComment.strip():
sCommentHtml = '<br>' + webutils.escapeElem(oTestResult.oReason.sComment.strip());
sCommentHtml = sCommentHtml.replace('\n', '<br>');
sDetailedReason = ' <a href="?%s" class="tmtbl-show-reason">%s</a>' \
% ( webutils.encodeUrlParams({ self._oDisp.ksParamAction:
self._oDisp.ksActionTestResultFailureDetails,
TestResultFailureData.ksParam_idTestResult:
oTestResult.idTestResult,}),
WuiContentBase.ksShortDetailsLinkHtml,);
sHtml += ' <tr class="%s tmtbl-events-reason tmtbl-events-lvl%s">\n' \
' <td>%s</td>\n' \
' <td colspan="2">%s</td>\n' \
' <td colspan="3">%s%s%s%s</td>\n' \
' <td>%s</td>\n' \
' </tr>\n' \
% ( 'tmodd' if iRow & 1 else 'tmeven', iDepth,
webutils.escapeElem(self.formatTsShort(oTestResult.oReason.tsEffective)),
oTestResult.oReason.oAuthor.sUsername,
webutils.escapeElem(sReasonText), sDetailedReason, sChangeReason,
sCommentHtml,
'todo');
iRow += 1;
if oTestResult.isFailure():
iFailure += 1;
return (sHtml, iRow, iFailure);
def _generateMainReason(self, oTestResultTree, oTestSet):
"""
Generates the form for displaying and updating the main failure reason.
oTestResultTree is an instance TestResultDataEx.
oTestSet is an instance of TestSetData.
"""
_ = oTestSet;
sHtml = ' ';
if oTestResultTree.isFailure() or oTestResultTree.cErrors > 0:
sHtml += ' <h2>Failure Reason:</h2>\n';
oData = oTestResultTree.oReason;
# We need the failure reasons for the combobox.
aoFailureReasons = FailureReasonLogic(self._oDisp.getDb()).fetchForCombo('Test Sheriff, you figure out why!');
assert aoFailureReasons;
# For now we'll use the standard form helper.
sFormActionUrl = '%s?%s=%s' % ( self._oDisp.ksScriptName, self._oDisp.ksParamAction,
WuiMain.ksActionTestResultFailureAddPost if oData is None else
WuiMain.ksActionTestResultFailureEditPost )
fReadOnly = not self._oDisp or self._oDisp.isReadOnlyUser();
oForm = WuiHlpForm('failure-reason', sFormActionUrl,
sOnSubmit = WuiHlpForm.ksOnSubmit_AddReturnToFieldWithCurrentUrl, fReadOnly = fReadOnly);
oForm.addTextHidden(TestResultFailureData.ksParam_idTestResult, oTestResultTree.idTestResult);
oForm.addTextHidden(TestResultFailureData.ksParam_idTestSet, oTestSet.idTestSet);
if oData is not None:
oForm.addComboBox(TestResultFailureData.ksParam_idFailureReason, oData.idFailureReason, 'Reason',
aoFailureReasons,
sPostHtml = u' ' + WuiFailureReasonDetailsLink(oData.idFailureReason).toHtml()
+ (u' ' + WuiFailureReasonAddLink('New', fBracketed = False).toHtml()
if not fReadOnly else u''));
oForm.addMultilineText(TestResultFailureData.ksParam_sComment, oData.sComment, 'Comment')
oForm.addNonText(u'%s (%s), %s'
% ( oData.oAuthor.sUsername, oData.oAuthor.sUsername,
self.formatTsShort(oData.tsEffective),),
'Sheriff',
sPostHtml = ' ' + WuiTestResultFailureDetailsLink(oData.idTestResult, "Show Details").toHtml() )
oForm.addTextHidden(TestResultFailureData.ksParam_tsEffective, oData.tsEffective);
oForm.addTextHidden(TestResultFailureData.ksParam_tsExpire, oData.tsExpire);
oForm.addTextHidden(TestResultFailureData.ksParam_uidAuthor, oData.uidAuthor);
oForm.addSubmit('Change Reason');
else:
oForm.addComboBox(TestResultFailureData.ksParam_idFailureReason, -1, 'Reason', aoFailureReasons,
sPostHtml = ' ' + WuiFailureReasonAddLink('New').toHtml() if not fReadOnly else '');
oForm.addMultilineText(TestResultFailureData.ksParam_sComment, '', 'Comment');
oForm.addTextHidden(TestResultFailureData.ksParam_tsEffective, '');
oForm.addTextHidden(TestResultFailureData.ksParam_tsExpire, '');
oForm.addTextHidden(TestResultFailureData.ksParam_uidAuthor, '');
oForm.addSubmit('Add Reason');
sHtml += oForm.finalize();
return sHtml;
def showTestCaseResultDetails(self, # pylint: disable=R0914,R0915
oTestResultTree,
oTestSet,
oBuildEx,
oValidationKitEx,
oTestBox,
oTestGroup,
oTestCaseEx,
oTestVarEx):
"""Show detailed result"""
def getTcDepsHtmlList(aoTestCaseData):
"""Get HTML <ul> list of Test Case name items"""
if aoTestCaseData:
sTmp = '<ul>'
for oTestCaseData in aoTestCaseData:
sTmp += '<li>%s</li>' % (webutils.escapeElem(oTestCaseData.sName),);
sTmp += '</ul>'
else:
sTmp = 'No items'
return sTmp
def getGrDepsHtmlList(aoGlobalResourceData):
"""Get HTML <ul> list of Global Resource name items"""
if aoGlobalResourceData:
sTmp = '<ul>'
for oGlobalResourceData in aoGlobalResourceData:
sTmp += '<li>%s</li>' % (webutils.escapeElem(oGlobalResourceData.sName),);
sTmp += '</ul>'
else:
sTmp = 'No items'
return sTmp
asHtml = []
from testmanager.webui.wuireport import WuiReportSummaryLink;
tsReportEffectiveDate = None;
if oTestSet.tsDone is not None:
tsReportEffectiveDate = oTestSet.tsDone + datetime.timedelta(days = 4);
if tsReportEffectiveDate >= self.getNowTs():
tsReportEffectiveDate = None;
# Test result + test set details.
aoResultRows = [
WuiHtmlKeeper([ WuiTmLink(oTestCaseEx.sName, self.oWuiAdmin.ksScriptName,
{ self.oWuiAdmin.ksParamAction: self.oWuiAdmin.ksActionTestCaseDetails,
TestCaseData.ksParam_idTestCase: oTestCaseEx.idTestCase,
self.oWuiAdmin.ksParamEffectiveDate: oTestSet.tsConfig, },
fBracketed = False),
WuiReportSummaryLink(ReportModelBase.ksSubTestCase, oTestCaseEx.idTestCase,
tsNow = tsReportEffectiveDate, fBracketed = False),
]),
];
if oTestCaseEx.sDescription:
aoResultRows.append([oTestCaseEx.sDescription,]);
aoResultRows.append([ 'Status:', WuiRawHtml('<span class="tmspan-status-%s">%s</span>'
% (oTestResultTree.enmStatus, oTestResultTree.enmStatus,))]);
if oTestResultTree.cErrors > 0:
aoResultRows.append(( 'Errors:', oTestResultTree.cErrors ));
aoResultRows.append([ 'Elapsed:', oTestResultTree.tsElapsed ]);
cSecCfgTimeout = oTestCaseEx.cSecTimeout if oTestVarEx.cSecTimeout is None else oTestVarEx.cSecTimeout;
cSecEffTimeout = cSecCfgTimeout * oTestBox.pctScaleTimeout / 100;
aoResultRows.append([ 'Timeout:',
'%s (%s sec)' % (utils.formatIntervalSeconds(cSecEffTimeout), cSecEffTimeout,) ]);
if cSecEffTimeout != cSecCfgTimeout:
aoResultRows.append([ 'Cfg Timeout:',
'%s (%s sec)' % (utils.formatIntervalSeconds(cSecCfgTimeout), cSecCfgTimeout,) ]);
aoResultRows += [
( 'Started:', WuiTmLink(self.formatTsShort(oTestSet.tsCreated), WuiMain.ksScriptName,
{ WuiMain.ksParamAction: WuiMain.ksActionResultsUnGrouped,
WuiMain.ksParamEffectiveDate: oTestSet.tsCreated, },
fBracketed = False) ),
];
if oTestSet.tsDone is not None:
aoResultRows += [ ( 'Done:',
WuiTmLink(self.formatTsShort(oTestSet.tsDone), WuiMain.ksScriptName,
{ WuiMain.ksParamAction: WuiMain.ksActionResultsUnGrouped,
WuiMain.ksParamEffectiveDate: oTestSet.tsDone, },
fBracketed = False) ) ];
else:
aoResultRows += [( 'Done:', 'Still running...')];
aoResultRows += [( 'Config:', oTestSet.tsConfig )];
if oTestVarEx.cGangMembers > 1:
aoResultRows.append([ 'Member No:', '#%s (of %s)' % (oTestSet.iGangMemberNo, oTestVarEx.cGangMembers) ]);
aoResultRows += [
( 'Test Group:',
WuiHtmlKeeper([ WuiTmLink(oTestGroup.sName, self.oWuiAdmin.ksScriptName,
{ self.oWuiAdmin.ksParamAction: self.oWuiAdmin.ksActionTestGroupDetails,
TestGroupData.ksParam_idTestGroup: oTestGroup.idTestGroup,
self.oWuiAdmin.ksParamEffectiveDate: oTestSet.tsConfig, },
fBracketed = False),
WuiReportSummaryLink(ReportModelBase.ksSubTestGroup, oTestGroup.idTestGroup,
tsNow = tsReportEffectiveDate, fBracketed = False),
]), ),
];
if oTestVarEx.sTestBoxReqExpr is not None:
aoResultRows.append([ 'TestBox reqs:', oTestVarEx.sTestBoxReqExpr ]);
elif oTestCaseEx.sTestBoxReqExpr is not None or oTestVarEx.sTestBoxReqExpr is not None:
aoResultRows.append([ 'TestBox reqs:', oTestCaseEx.sTestBoxReqExpr ]);
if oTestVarEx.sBuildReqExpr is not None:
aoResultRows.append([ 'Build reqs:', oTestVarEx.sBuildReqExpr ]);
elif oTestCaseEx.sBuildReqExpr is not None or oTestVarEx.sBuildReqExpr is not None:
aoResultRows.append([ 'Build reqs:', oTestCaseEx.sBuildReqExpr ]);
if oTestCaseEx.sValidationKitZips is not None and oTestCaseEx.sValidationKitZips != '@VALIDATIONKIT_ZIP@':
aoResultRows.append([ 'Validation Kit:', oTestCaseEx.sValidationKitZips ]);
if oTestCaseEx.aoDepTestCases:
aoResultRows.append([ 'Prereq. Test Cases:', oTestCaseEx.aoDepTestCases, getTcDepsHtmlList ]);
if oTestCaseEx.aoDepGlobalResources:
aoResultRows.append([ 'Global Resources:', oTestCaseEx.aoDepGlobalResources, getGrDepsHtmlList ]);
# Builds.
aoBuildRows = [];
if oBuildEx is not None:
aoBuildRows += [
WuiHtmlKeeper([ WuiTmLink('Build', self.oWuiAdmin.ksScriptName,
{ self.oWuiAdmin.ksParamAction: self.oWuiAdmin.ksActionBuildDetails,
BuildData.ksParam_idBuild: oBuildEx.idBuild,
self.oWuiAdmin.ksParamEffectiveDate: oTestSet.tsCreated, },
fBracketed = False),
WuiReportSummaryLink(ReportModelBase.ksSubBuild, oBuildEx.idBuild,
tsNow = tsReportEffectiveDate, fBracketed = False), ]),
];
self._anchorAndAppendBinaries(oBuildEx.sBinaries, aoBuildRows);
aoBuildRows += [
( 'Revision:', WuiSvnLinkWithTooltip(oBuildEx.iRevision, oBuildEx.oCat.sRepository,
fBracketed = False) ),
( 'Product:', oBuildEx.oCat.sProduct ),
( 'Branch:', oBuildEx.oCat.sBranch ),
( 'Type:', oBuildEx.oCat.sType ),
( 'Version:', oBuildEx.sVersion ),
( 'Created:', oBuildEx.tsCreated ),
];
if oBuildEx.uidAuthor is not None:
aoBuildRows += [ ( 'Author ID:', oBuildEx.uidAuthor ), ];
if oBuildEx.sLogUrl is not None:
aoBuildRows += [ ( 'Log:', WuiBuildLogLink(oBuildEx.sLogUrl, fBracketed = False) ), ];
aoValidationKitRows = [];
if oValidationKitEx is not None:
aoValidationKitRows += [
WuiTmLink('Validation Kit', self.oWuiAdmin.ksScriptName,
{ self.oWuiAdmin.ksParamAction: self.oWuiAdmin.ksActionBuildDetails,
BuildData.ksParam_idBuild: oValidationKitEx.idBuild,
self.oWuiAdmin.ksParamEffectiveDate: oTestSet.tsCreated, },
fBracketed = False),
];
self._anchorAndAppendBinaries(oValidationKitEx.sBinaries, aoValidationKitRows);
aoValidationKitRows += [ ( 'Revision:', WuiSvnLink(oValidationKitEx.iRevision, fBracketed = False) ) ];
if oValidationKitEx.oCat.sProduct != 'VBox TestSuite':
aoValidationKitRows += [ ( 'Product:', oValidationKitEx.oCat.sProduct ), ];
if oValidationKitEx.oCat.sBranch != 'trunk':
aoValidationKitRows += [ ( 'Product:', oValidationKitEx.oCat.sBranch ), ];
if oValidationKitEx.oCat.sType != 'release':
aoValidationKitRows += [ ( 'Type:', oValidationKitEx.oCat.sType), ];
if oValidationKitEx.sVersion != '0.0.0':
aoValidationKitRows += [ ( 'Version:', oValidationKitEx.sVersion ), ];
aoValidationKitRows += [
( 'Created:', oValidationKitEx.tsCreated ),
];
if oValidationKitEx.uidAuthor is not None:
aoValidationKitRows += [ ( 'Author ID:', oValidationKitEx.uidAuthor ), ];
if oValidationKitEx.sLogUrl is not None:
aoValidationKitRows += [ ( 'Log:', WuiBuildLogLink(oValidationKitEx.sLogUrl, fBracketed = False) ), ];
# TestBox.
aoTestBoxRows = [
WuiHtmlKeeper([ WuiTmLink(oTestBox.sName, self.oWuiAdmin.ksScriptName,
{ self.oWuiAdmin.ksParamAction: self.oWuiAdmin.ksActionTestBoxDetails,
TestBoxData.ksParam_idGenTestBox: oTestSet.idGenTestBox, },
fBracketed = False),
WuiReportSummaryLink(ReportModelBase.ksSubTestBox, oTestSet.idTestBox,
tsNow = tsReportEffectiveDate, fBracketed = False), ]),
];
if oTestBox.sDescription:
aoTestBoxRows.append([oTestBox.sDescription, ]);
aoTestBoxRows += [
( 'IP:', oTestBox.ip ),
#( 'UUID:', oTestBox.uuidSystem ),
#( 'Enabled:', oTestBox.fEnabled ),
#( 'Lom Kind:', oTestBox.enmLomKind ),
#( 'Lom IP:', oTestBox.ipLom ),
( 'OS/Arch:', '%s.%s' % (oTestBox.sOs, oTestBox.sCpuArch) ),
( 'OS Version:', oTestBox.sOsVersion ),
( 'CPUs:', oTestBox.cCpus ),
];
if oTestBox.sCpuName is not None:
aoTestBoxRows.append(['CPU Name', oTestBox.sCpuName.replace(' ', ' ')]);
if oTestBox.lCpuRevision is not None:
sMarch = oTestBox.queryCpuMicroarch();
if sMarch is not None:
aoTestBoxRows.append( ('CPU Microarch', sMarch) );
uFamily = oTestBox.getCpuFamily();
uModel = oTestBox.getCpuModel();
uStepping = oTestBox.getCpuStepping();
aoTestBoxRows += [
( 'CPU Family', '%u (%#x)' % ( uFamily, uFamily, ) ),
( 'CPU Model', '%u (%#x)' % ( uModel, uModel, ) ),
( 'CPU Stepping', '%u (%#x)' % ( uStepping, uStepping, ) ),
];
asFeatures = [ oTestBox.sCpuVendor, ];
if oTestBox.fCpuHwVirt is True: asFeatures.append(u'HW\u2011Virt');
if oTestBox.fCpuNestedPaging is True: asFeatures.append(u'Nested\u2011Paging');
if oTestBox.fCpu64BitGuest is True: asFeatures.append(u'64\u2011bit\u2011Guest');
if oTestBox.fChipsetIoMmu is True: asFeatures.append(u'I/O\u2011MMU');
aoTestBoxRows += [
( 'Features:', u' '.join(asFeatures) ),
( 'RAM size:', '%s MB' % (oTestBox.cMbMemory,) ),
( 'Scratch Size:', '%s MB' % (oTestBox.cMbScratch,) ),
( 'Scale Timeout:', '%s%%' % (oTestBox.pctScaleTimeout,) ),
( 'Script Rev:', WuiSvnLink(oTestBox.iTestBoxScriptRev, fBracketed = False) ),
( 'Python:', oTestBox.formatPythonVersion() ),
( 'Pending Command:', oTestBox.enmPendingCmd ),
];
aoRows = [
aoResultRows,
aoBuildRows,
aoValidationKitRows,
aoTestBoxRows,
];
asHtml.append(self._htmlTable(aoRows));
#
# Convert the tree to a list of events, values, message and files.
#
sHtmlEvents = '';
sHtmlEvents += '<table class="tmtbl-events" id="tmtbl-events" width="100%">\n';
sHtmlEvents += ' <tr class="tmheader">\n' \
' <th>When</th>\n' \
' <th></th>\n' \
' <th>Elapsed</th>\n' \
' <th>Event name</th>\n' \
' <th colspan="2">Value (status)</th>' \
' <th></th>\n' \
' </tr>\n';
sPrettyCmdLine = ' \\<br> \n'.join(webutils.escapeElem(oTestCaseEx.sBaseCmd
+ ' '
+ oTestVarEx.sArgs).split() );
(sTmp, _, cFailures) = self._recursivelyGenerateEvents(oTestResultTree, sPrettyCmdLine, '', 1, 0, oTestSet, 0);
sHtmlEvents += sTmp;
sHtmlEvents += '</table>\n'
#
# Put it all together.
#
sHtml = '<table class="tmtbl-testresult-details-base" width="100%">\n';
sHtml += ' <tr>\n'
sHtml += ' <td valign="top" width="20%%">\n%s\n</td>\n' % ' <br>\n'.join(asHtml);
sHtml += ' <td valign="top" width="80%" style="padding-left:6px">\n';
sHtml += self._generateMainReason(oTestResultTree, oTestSet);
sHtml += ' <h2>Events:</h2>\n';
sHtml += ' <form action="#" method="get" id="graph-form">\n' \
' <input type="hidden" name="%s" value="%s"/>\n' \
' <input type="hidden" name="%s" value="%u"/>\n' \
' <input type="hidden" name="%s" value="%u"/>\n' \
' <input type="hidden" name="%s" value="%u"/>\n' \
' <input type="hidden" name="%s" value="%u"/>\n' \
% ( WuiMain.ksParamAction, WuiMain.ksActionGraphWiz,
WuiMain.ksParamGraphWizTestBoxIds, oTestBox.idTestBox,
WuiMain.ksParamGraphWizBuildCatIds, oBuildEx.idBuildCategory,
WuiMain.ksParamGraphWizTestCaseIds, oTestSet.idTestCase,
WuiMain.ksParamGraphWizSrcTestSetId, oTestSet.idTestSet,
);
if oTestSet.tsDone is not None:
sHtml += ' <input type="hidden" name="%s" value="%s"/>\n' \
% ( WuiMain.ksParamEffectiveDate, oTestSet.tsDone, );
sHtml += ' <p>\n';
sFormButton = '<button type="submit" onclick="%s">Show graphs</button>' \
% ( webutils.escapeAttr('addDynamicGraphInputs("graph-form", "main", "%s", "%s");'
% (WuiMain.ksParamGraphWizWidth, WuiMain.ksParamGraphWizDpi, )) );
sHtml += ' ' + sFormButton + '\n';
sHtml += ' %s %s %s\n' \
% ( WuiTmLink('Log File', '',
{ WuiMain.ksParamAction: WuiMain.ksActionViewLog,
WuiMain.ksParamLogSetId: oTestSet.idTestSet,
}),
WuiTmLink('Raw Log', '',
{ WuiMain.ksParamAction: WuiMain.ksActionGetFile,
WuiMain.ksParamGetFileSetId: oTestSet.idTestSet,
WuiMain.ksParamGetFileDownloadIt: False,
}),
WuiTmLink('Download Log', '',
{ WuiMain.ksParamAction: WuiMain.ksActionGetFile,
WuiMain.ksParamGetFileSetId: oTestSet.idTestSet,
WuiMain.ksParamGetFileDownloadIt: True,
}),
);
sHtml += ' </p>\n';
if cFailures == 1:
sHtml += ' <p>%s</p>\n' % ( WuiTmLink('Jump to failure', '#failure-0'), )
elif cFailures > 1:
sHtml += ' <p>Jump to failure: ';
if cFailures <= 13:
for iFailure in range(0, cFailures):
sHtml += ' ' + WuiTmLink('#%u' % (iFailure,), '#failure-%u' % (iFailure,)).toHtml();
else:
for iFailure in range(0, 6):
sHtml += ' ' + WuiTmLink('#%u' % (iFailure,), '#failure-%u' % (iFailure,)).toHtml();
sHtml += ' ... ';
for iFailure in range(cFailures - 6, cFailures):
sHtml += ' ' + WuiTmLink('#%u' % (iFailure,), '#failure-%u' % (iFailure,)).toHtml();
sHtml += ' </p>\n';
sHtml += sHtmlEvents;
sHtml += ' <p>' + sFormButton + '</p>\n';
sHtml += ' </form>\n';
sHtml += ' </td>\n';
sHtml += ' </tr>\n';
sHtml += '</table>\n';
return ('Test Case result details', sHtml)
class WuiGroupedResultList(WuiListContentBase):
"""
WUI results content generator.
"""
def __init__(self, aoEntries, cEntriesCount, iPage, cItemsPerPage, tsEffective, fnDPrint, oDisp,
aiSelectedSortColumns = None):
"""Override initialization"""
WuiListContentBase.__init__(self, aoEntries, iPage, cItemsPerPage, tsEffective,
sTitle = 'Ungrouped (%d)' % cEntriesCount, sId = 'results',
fnDPrint = fnDPrint, oDisp = oDisp, aiSelectedSortColumns = aiSelectedSortColumns);
self._cEntriesCount = cEntriesCount
self._asColumnHeaders = [
'Start',
'Product Build',
'Kit',
'Box',
'OS.Arch',
'Test Case',
'Elapsed',
'Result',
'Reason',
];
self._asColumnAttribs = ['align="center"', 'align="center"', 'align="center"',
'align="center"', 'align="center"', 'align="center"',
'align="center"', 'align="center"', 'align="center"',
'align="center"', 'align="center"', 'align="center"',
'align="center"', ];
# Prepare parameter lists.
self._dTestBoxLinkParams = self._oDisp.getParameters();
self._dTestBoxLinkParams[WuiMain.ksParamAction] = WuiMain.ksActionResultsGroupedByTestBox;
self._dTestCaseLinkParams = self._oDisp.getParameters();
self._dTestCaseLinkParams[WuiMain.ksParamAction] = WuiMain.ksActionResultsGroupedByTestCase;
self._dRevLinkParams = self._oDisp.getParameters();
self._dRevLinkParams[WuiMain.ksParamAction] = WuiMain.ksActionResultsGroupedByBuildRev;
def _formatListEntry(self, iEntry):
"""
Format *show all* table entry
"""
oEntry = self._aoEntries[iEntry];
from testmanager.webui.wuiadmin import WuiAdmin;
from testmanager.webui.wuireport import WuiReportSummaryLink;
oValidationKit = None;
if oEntry.idBuildTestSuite is not None:
oValidationKit = WuiTmLink('r%s' % (oEntry.iRevisionTestSuite,),
WuiAdmin.ksScriptName,
{ WuiAdmin.ksParamAction: WuiAdmin.ksActionBuildDetails,
BuildData.ksParam_idBuild: oEntry.idBuildTestSuite },
fBracketed = False);
aoTestSetLinks = [];
aoTestSetLinks.append(WuiTmLink(oEntry.enmStatus,
WuiMain.ksScriptName,
{ WuiMain.ksParamAction: WuiMain.ksActionTestResultDetails,
TestSetData.ksParam_idTestSet: oEntry.idTestSet },
fBracketed = False));
if oEntry.cErrors > 0:
aoTestSetLinks.append(WuiRawHtml('-'));
aoTestSetLinks.append(WuiTmLink('%d error%s' % (oEntry.cErrors, '' if oEntry.cErrors == 1 else 's', ),
WuiMain.ksScriptName,
{ WuiMain.ksParamAction: WuiMain.ksActionTestResultDetails,
TestSetData.ksParam_idTestSet: oEntry.idTestSet },
sFragmentId = 'failure-0', fBracketed = False));
self._dTestBoxLinkParams[WuiMain.ksParamGroupMemberId] = oEntry.idTestBox;
self._dTestCaseLinkParams[WuiMain.ksParamGroupMemberId] = oEntry.idTestCase;
self._dRevLinkParams[WuiMain.ksParamGroupMemberId] = oEntry.iRevision;
sTestBoxTitle = u'';
if oEntry.sCpuVendor is not None:
sTestBoxTitle += 'CPU vendor:\t%s\n' % ( oEntry.sCpuVendor, );
if oEntry.sCpuName is not None:
sTestBoxTitle += 'CPU name:\t%s\n' % ( ' '.join(oEntry.sCpuName.split()), );
if oEntry.sOsVersion is not None:
sTestBoxTitle += 'OS version:\t%s\n' % ( oEntry.sOsVersion, );
asFeatures = [];
if oEntry.fCpuHwVirt is True: asFeatures.append(u'HW\u2011Virt');
if oEntry.fCpuNestedPaging is True: asFeatures.append(u'Nested\u2011Paging');
if oEntry.fCpu64BitGuest is True: asFeatures.append(u'64\u2011bit\u2011Guest');
#if oEntry.fChipsetIoMmu is True: asFeatures.append(u'I/O\u2011MMU');
sTestBoxTitle += u'CPU features:\t' + u', '.join(asFeatures);
# Testcase
if oEntry.sSubName:
sTestCaseName = '%s / %s' % (oEntry.sTestCaseName, oEntry.sSubName,);
else:
sTestCaseName = oEntry.sTestCaseName;
# Reason:
aoReasons = [];
for oIt in oEntry.aoFailureReasons:
sReasonTitle = 'Reason: \t%s\n' % ( oIt.oFailureReason.sShort, );
sReasonTitle += 'Category:\t%s\n' % ( oIt.oFailureReason.oCategory.sShort, );
sReasonTitle += 'Assigned:\t%s\n' % ( self.formatTsShort(oIt.tsFailureReasonAssigned), );
sReasonTitle += 'By User: \t%s\n' % ( oIt.oFailureReasonAssigner.sUsername, );
if oIt.sFailureReasonComment:
sReasonTitle += 'Comment: \t%s\n' % ( oIt.sFailureReasonComment, );
if oIt.oFailureReason.iTicket is not None and oIt.oFailureReason.iTicket > 0:
sReasonTitle += 'xTracker:\t#%s\n' % ( oIt.oFailureReason.iTicket, );
for i, sUrl in enumerate(oIt.oFailureReason.asUrls):
sUrl = sUrl.strip();
if sUrl:
sReasonTitle += 'URL#%u: \t%s\n' % ( i, sUrl, );
aoReasons.append(WuiTmLink(oIt.oFailureReason.sShort, WuiAdmin.ksScriptName,
{ WuiAdmin.ksParamAction: WuiAdmin.ksActionFailureReasonDetails,
FailureReasonData.ksParam_idFailureReason: oIt.oFailureReason.idFailureReason },
sTitle = sReasonTitle));
return [
oEntry.tsCreated,
[ WuiTmLink('%s %s (%s)' % (oEntry.sProduct, oEntry.sVersion, oEntry.sType,),
WuiMain.ksScriptName, self._dRevLinkParams, sTitle = '%s' % (oEntry.sBranch,), fBracketed = False),
WuiSvnLinkWithTooltip(oEntry.iRevision, 'vbox'), ## @todo add sRepository TestResultListingData
WuiTmLink(self.ksShortDetailsLink, WuiAdmin.ksScriptName,
{ WuiAdmin.ksParamAction: WuiAdmin.ksActionBuildDetails,
BuildData.ksParam_idBuild: oEntry.idBuild },
fBracketed = False),
],
oValidationKit,
[ WuiTmLink(oEntry.sTestBoxName, WuiMain.ksScriptName, self._dTestBoxLinkParams, fBracketed = False,
sTitle = sTestBoxTitle),
WuiTmLink(self.ksShortDetailsLink, WuiAdmin.ksScriptName,
{ WuiAdmin.ksParamAction: WuiAdmin.ksActionTestBoxDetails,
TestBoxData.ksParam_idTestBox: oEntry.idTestBox },
fBracketed = False),
WuiReportSummaryLink(ReportModelBase.ksSubTestBox, oEntry.idTestBox, fBracketed = False), ],
'%s.%s' % (oEntry.sOs, oEntry.sArch),
[ WuiTmLink(sTestCaseName, WuiMain.ksScriptName, self._dTestCaseLinkParams, fBracketed = False,
sTitle = (oEntry.sBaseCmd + ' ' + oEntry.sArgs) if oEntry.sArgs else oEntry.sBaseCmd),
WuiTmLink(self.ksShortDetailsLink, WuiAdmin.ksScriptName,
{ WuiAdmin.ksParamAction: WuiAdmin.ksActionTestCaseDetails,
TestCaseData.ksParam_idTestCase: oEntry.idTestCase },
fBracketed = False),
WuiReportSummaryLink(ReportModelBase.ksSubTestCase, oEntry.idTestCase, fBracketed = False), ],
oEntry.tsElapsed,
aoTestSetLinks,
aoReasons
];
| 55.71632 | 130 | 0.521673 | 3,979 | 50,869 | 6.634079 | 0.187736 | 0.005342 | 0.00644 | 0.005455 | 0.337879 | 0.266962 | 0.223775 | 0.218585 | 0.186612 | 0.127742 | 0 | 0.007742 | 0.367768 | 50,869 | 912 | 131 | 55.777412 | 0.813034 | 0.043012 | 0 | 0.273224 | 0 | 0.010929 | 0.105962 | 0.018414 | 0 | 0 | 0 | 0.001096 | 0.001366 | 1 | 0.019126 | false | 0 | 0.028689 | 0 | 0.072404 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9bc514a61a5a549beb8528bf4d495c4be510163e | 1,246 | py | Python | tests/tests.py | willmycroft/po10-api | 2b7839ed8404d2a482995cf251b5dd9dd5e7ceb7 | [
"MIT"
] | null | null | null | tests/tests.py | willmycroft/po10-api | 2b7839ed8404d2a482995cf251b5dd9dd5e7ceb7 | [
"MIT"
] | null | null | null | tests/tests.py | willmycroft/po10-api | 2b7839ed8404d2a482995cf251b5dd9dd5e7ceb7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 6 18:32:47 2018
@author: pmp13wm
"""
import unittest
from poweroften import PowerOfTen
class TestStringMethods(unittest.TestCase):
def test_search(self):
po10 = PowerOfTen()
df = po10.search('Will', 'Mycroft')
self.assertTrue(7172 in df.index)
def test_get_athlete(self):
po10 = PowerOfTen()
athlete_info, yearly_info, seasons_bests, results = po10.get_athlete(7172)
self.assertEqual(athlete_info['Name'], 'William Mycroft')
self.assertEqual(athlete_info['Gender'], 'Male')
self.assertTrue('Oxford Uni' in yearly_info[2010]['clubs'])
self.assertEqual(seasons_bests.loc['3000SC', '2017'], '9:01.89')
self.assertEqual(results[results.MeetingId == 199052].iloc[0].Event, '3000SC')
def test_get_rankings(self):
po10 = PowerOfTen()
df = po10.get_rankings('3000SC', 'SEN', 'M', 2017)
self.assertEqual(df.loc[7172].Rank, 17)
def test_get_results(self):
po10 = PowerOfTen()
df = po10.get_results(199052)['3000SC B']
self.assertEqual(df[df.AthleteId == 7172].iloc[0].Perf, '9:01.89')
if __name__ == '__main__':
unittest.main()
| 31.948718 | 86 | 0.632424 | 154 | 1,246 | 4.954545 | 0.454545 | 0.117955 | 0.094364 | 0.078637 | 0.102228 | 0.070773 | 0 | 0 | 0 | 0 | 0 | 0.102987 | 0.220706 | 1,246 | 38 | 87 | 32.789474 | 0.682801 | 0.060995 | 0 | 0.16 | 0 | 0 | 0.095525 | 0 | 0 | 0 | 0 | 0 | 0.32 | 1 | 0.16 | false | 0 | 0.08 | 0 | 0.28 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9bc6f4f3184c06b4722c241c5be229e97e002b91 | 5,297 | py | Python | utils/io_module.py | cioppaanthony/context-aware-loss | cc42187d49794d63845c4a277398cba094a52268 | [
"Apache-2.0"
] | 27 | 2020-03-19T16:09:44.000Z | 2022-01-03T07:26:44.000Z | utils/io_module.py | cioppaanthony/context-aware-loss | cc42187d49794d63845c4a277398cba094a52268 | [
"Apache-2.0"
] | 4 | 2020-03-20T06:01:17.000Z | 2021-03-26T14:37:04.000Z | utils/io_module.py | cioppaanthony/context-aware-loss | cc42187d49794d63845c4a277398cba094a52268 | [
"Apache-2.0"
] | 7 | 2020-04-24T03:24:25.000Z | 2021-02-08T07:24:32.000Z | """
----------------------------------------------------------------------------------------
Copyright (c) 2020 - see AUTHORS file
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
----------------------------------------------------------------------------------------
"""
import numpy as np
import json
import sys
import os
from tqdm import tqdm
import h5py
import time
import utils.constants as C
import utils.preprocessing
import random
import cv2
from utils.argument_parser import args
def readLabels(game_folder, sequence_length_first_half, sequence_length_second_half, framerate=2, num_classes=3):
json_data = json.load(open(game_folder + C.LABEL_NAME))
return utils.preprocessing.labelToCategorical(json_data, sequence_length_first_half, sequence_length_second_half, framerate, num_classes)
def readFeatures(game_folder, feature_type):
feature_1 = None
feature_2 = None
if os.path.exists(game_folder+ "/1_" + feature_type) and os.path.exists(game_folder+ "/2_" + feature_type):
feature_1 = np.load(game_folder+ "/1_" + feature_type)
feature_2 = np.load(game_folder+ "/2_" + feature_type)
else:
print("Warning... missing at least one half of the game: ", game_folder)
return feature_1, feature_2
class Dataset:
"""
Dataset class
This class deals with the loading of the dataset to be able to access the different elements.
The code loads the features extracted from args.featurestype and returns it.
It then creates the labels from the json files based on the number of frames where the features were extracted.
If the preprocessed features are passed as argument, then it will simply load them. Otherwise, it loads everything
from the original SoccerNet dataset.
"""
def __init__(self, dataset_path, set_type, feature_type = None, framerate=2, num_classes=3):
# Get the list of folders to read
self.dataset_path = dataset_path
self.datatype = set_type
self.num_classes = num_classes
self.input_shape=None
self.framerate = framerate
self.set_path = None
self.game_list = None
self.max_index = None
self.next_index = 0
self.max_index = 0
self.features = list()
self.labels = list()
self.feature_type = feature_type
def randomize(self):
random.shuffle(self.game_list)
self.next_index = 0
def nextFeatures(self):
# Get the features
feature_1, feature_2 = readFeatures(self.dataset_path + self.game_list[self.next_index], self.feature_type)
# Get the labels if the feature for this game exist
label_1, label_2 = (None, None)
if feature_1 is not None and feature_2 is not None:
label1, label2 = readLabels(self.dataset_path + self.game_list[self.next_index], feature_1.shape[0], feature_2.shape[0], self.framerate, self.num_classes)
# Transform the labels to the Time Shift Encodings
label_1 = utils.preprocessing.oneHotToShifts(label1, C.K_MATRIX)
label_2 = utils.preprocessing.oneHotToShifts(label2, C.K_MATRIX)
# Reading order management
self.next_index += 1
if self.next_index >= self.max_index:
self.randomize()
return feature_1, feature_2, label_1, label_2, False
return feature_1, feature_2, label_1, label_2, True
def storeFeatures(self):
# Loading from the preprocessed .npy files if available (faster)
file_path_features = self.dataset_path + self.datatype[0:-4] + "_" + args.featuretype[0:-4] + "_features.npy"
file_path_labels = self.dataset_path + self.datatype[0:-4] + "_" + args.featuretype[0:-4] + "_labels.npy"
if os.path.exists(file_path_features) and os.path.exists(file_path_labels):
self.features = np.load(file_path_features, allow_pickle=True)
self.labels = np.load(file_path_labels, allow_pickle=True)
self.input_shape = (args.chunksize*args.framerate, self.features[0].shape[1],1)
return
self.set_path = os.path.join(self.dataset_path + self.datatype)
self.game_list = np.load(self.set_path)
self.max_index = len(self.game_list)
# Otherwise, load the dataset from the original SoccerNet
ret = True
pbar = tqdm(total=len(self.game_list))
while ret:
feature_1, feature_2, label_1, label_2, ret = self.nextFeatures()
if feature_1 is not None:
self.features.append(feature_1)
self.labels.append(label_1)
if feature_2 is not None:
self.features.append(feature_2)
self.labels.append(label_2)
pbar.update(1)
pbar.close()
self.input_shape = (args.chunksize*args.framerate, self.features[0].shape[1],1)
self.features = np.array(self.features)
self.labels = np.array(self.labels)
# Save the preregistered features for faster loading next time
# Check if the folder exist, otherwise create it
#if not os.path.isdir(self.dataset_path + "/preregistered/"):
# os.mkdir(self.dataset_path + "/preregistered/")
#np.save(file_path_features, self.features)
#np.save(file_path_labels, self.labels) | 35.07947 | 157 | 0.730036 | 794 | 5,297 | 4.693955 | 0.258186 | 0.023611 | 0.036222 | 0.021465 | 0.236383 | 0.16689 | 0.152401 | 0.134156 | 0.126643 | 0.056882 | 0 | 0.017043 | 0.147064 | 5,297 | 151 | 158 | 35.07947 | 0.80788 | 0.336039 | 0 | 0.05 | 0 | 0 | 0.025266 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075 | false | 0 | 0.15 | 0 | 0.3 | 0.0125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9bc71507643f71645ffd0790bda8c2ca6b565c64 | 5,007 | py | Python | backend/app.py | trunghc97/fa | 66ebf8c43c6023b5e0a4da2debc61f8d04b7ad5f | [
"MIT"
] | null | null | null | backend/app.py | trunghc97/fa | 66ebf8c43c6023b5e0a4da2debc61f8d04b7ad5f | [
"MIT"
] | 11 | 2021-03-10T00:56:18.000Z | 2022-03-31T00:15:29.000Z | backend/app.py | hct97/fa | 66ebf8c43c6023b5e0a4da2debc61f8d04b7ad5f | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from flask import Flask
from flask import render_template, request, json
from flask_cors import CORS, cross_origin
from flask_sqlalchemy import SQLAlchemy
import tensorflow.compat.v1 as tf
import src.facenet
import pickle
import src.align.detect_face
import src.align_dataset_mtcnn
import src.classifier
import numpy as np
import cv2
import base64
import os
import pdb;
tf.disable_v2_behavior()
MINSIZE = 20
THRESHOLD = [0.6, 0.7, 0.7]
FACTOR = 0.709
IMAGE_SIZE = 182
INPUT_IMAGE_SIZE = 160
CLASSIFIER_PATH = 'Models/facemodel.pkl'
FACENET_MODEL_PATH = 'Models/20180402-114759.pb'
# Load The Custom Classifier
with open(CLASSIFIER_PATH, 'rb') as file:
model, class_names = pickle.load(file)
print("Custom Classifier, Successfully loaded")
tf.Graph().as_default()
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.6)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
# Load the model
print('Loading feature extraction model')
src.facenet.load_model(FACENET_MODEL_PATH)
# Get input and output tensors
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
embedding_size = embeddings.get_shape()[1]
pnet, rnet, onet = src.align.detect_face.create_mtcnn(sess, "src/align")
app = Flask(__name__)
CORS(app)
@app.route('/')
@cross_origin()
def index():
return "OK!"
@app.route('/attendances', methods=['POST'])
@cross_origin()
def upload_img_file():
if request.method == 'POST':
# base 64
if 'image' in request.files:
f = request.files['image'].read()
else:
f = request.form.get('image').split(',')[1]
f = base64.b64decode(f)
# w = int(request.form.get('w'))
# h = int(request.form.get('h'))
# decoded_string = base64.b64decode(f)
frame = np.fromstring(f, dtype=np.uint8)
# frame = frame.reshape(,,3)
frame = cv2.imdecode(frame, cv2.IMREAD_ANYCOLOR) # cv2.IMREAD_COLOR in OpenCV 3.1
bounding_boxes, _ = src.align.detect_face.detect_face(frame, MINSIZE, pnet, rnet, onet, THRESHOLD, FACTOR)
faces_found = bounding_boxes.shape[0]
name = []
if faces_found > 0:
det = bounding_boxes[:, 0:4]
bb = np.zeros((faces_found, 4), dtype=np.int32)
for i in range(faces_found):
bb[i][0] = det[i][0]
bb[i][1] = det[i][1]
bb[i][2] = det[i][2]
bb[i][3] = det[i][3]
# cropped = frame
cropped = frame[bb[i][1]:bb[i][3], bb[i][0]:bb[i][2], :]
if cropped.any():
scaled = cv2.resize(cropped, (INPUT_IMAGE_SIZE, INPUT_IMAGE_SIZE),
interpolation=cv2.INTER_CUBIC)
scaled = src.facenet.prewhiten(scaled)
scaled_reshape = scaled.reshape(-1, INPUT_IMAGE_SIZE, INPUT_IMAGE_SIZE, 3)
feed_dict = {images_placeholder: scaled_reshape, phase_train_placeholder: False}
emb_array = sess.run(embeddings, feed_dict=feed_dict)
predictions = model.predict_proba(emb_array)
best_class_indices = np.argmax(predictions, axis=1)
best_class_probabilities = predictions[
np.arange(len(best_class_indices)), best_class_indices]
best_name = class_names[best_class_indices[0]]
print("Name: {}, Probability: {}".format(best_name, best_class_probabilities))
if best_class_probabilities > 0.6:
# name = class_names[best_class_indices[0]]
name.append(class_names[best_class_indices[0]])
else:
# name = "Unknown"
name.append("Unknown")
return json_response(name)
@app.route('/train', methods=['GET'])
@cross_origin()
def train_model():
result = src.align_dataset_mtcnn.main()
src.classifier.main()
return json_response(result)
@app.route('/upload-images', methods=['POST'])
@cross_origin()
def upload_images():
if request.method == "POST":
files = request.files.getlist("image")
userID = request.form.get('userID')
folder = "Dataset/FaceData/raw/" + userID
if not os.path.exists(folder):
os.mkdir(folder)
for file in files:
file.save(os.path.join(folder, file.filename))
return json_response("Upload success %d images" %(len(files)))
def json_response(payload, status=200):
return json.dumps(payload), status, {'content-type': 'application/json'}
if __name__ == '__main__':
app.run(debug=True)
| 34.770833 | 114 | 0.63591 | 647 | 5,007 | 4.690881 | 0.330757 | 0.026689 | 0.031631 | 0.017792 | 0.107084 | 0.107084 | 0.059308 | 0.03888 | 0.028336 | 0 | 0 | 0.024532 | 0.24286 | 5,007 | 143 | 115 | 35.013986 | 0.776049 | 0.061913 | 0 | 0.055556 | 0 | 0 | 0.075363 | 0.009821 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046296 | false | 0 | 0.166667 | 0.018519 | 0.259259 | 0.037037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |