hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a6b2e5b7cf0173afb424be4c44105af0dae9900
| 7,577
|
py
|
Python
|
scripts/utils/import_languages.py
|
mozilla-releng/staging-mozilla-vpn-client
|
f31d3762a607ccf2d7c6a016f7b800305fbf0113
|
[
"Apache-2.0"
] | null | null | null |
scripts/utils/import_languages.py
|
mozilla-releng/staging-mozilla-vpn-client
|
f31d3762a607ccf2d7c6a016f7b800305fbf0113
|
[
"Apache-2.0"
] | null | null | null |
scripts/utils/import_languages.py
|
mozilla-releng/staging-mozilla-vpn-client
|
f31d3762a607ccf2d7c6a016f7b800305fbf0113
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python3
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
import xml.etree.ElementTree as ET
import os
import sys
import shutil
import atexit
import subprocess
# Use the project root as the working directory
prevdir = os.getcwd()
workdir = os.path.join(os.path.dirname(__file__), '..', '..')
os.chdir(workdir)
atexit.register(os.chdir, prevdir)
# Include only locales above this threshold (e.g. 70%) in production
l10n_threshold = 0.70
parser = argparse.ArgumentParser()
parser.add_argument(
'-m', '--macos', default=False, action="store_true", dest="ismacos",
help='Include the MacOS bundle data')
parser.add_argument(
'-q', '--qt_path', default=None, dest="qtpath",
help='The QT binary path. If not set, we try to guess.')
args = parser.parse_args()
stepnum = 1
def title(text):
global stepnum
print(f"\033[96m\033[1mStep {stepnum}\033[0m: \033[97m{text}\033[0m")
stepnum = stepnum+1
# Step 0
title("Find the Qt localization tools...")
def qtquery(qmake, propname):
try:
qtquery = os.popen(f'{qmake} -query {propname}')
qtpath = qtquery.read().strip()
if len(qtpath) > 0:
return qtpath
finally:
pass
return None
qtbinpath = args.qtpath
if qtbinpath is None:
qtbinpath = qtquery('qmake', 'QT_INSTALL_BINS')
if qtbinpath is None:
qtbinpath = qtquery('qmake6', 'QT_INSTALL_BINS')
if qtbinpath is None:
qtbinpath = qtquery('qmake5', 'QT_INSTALL_BINS')
if qtbinpath is None:
qtbinpath = qtquery('qmake-qt5', 'QT_INSTALL_BINS')
if qtbinpath is None:
print('Unable to locate qmake tool.')
sys.exit(1)
if not os.path.isdir(qtbinpath):
print(f"QT path is not a diretory: {qtbinpath}")
sys.exit(1)
lupdate = os.path.join(qtbinpath, 'lupdate')
lconvert = os.path.join(qtbinpath, 'lconvert')
lrelease = os.path.join(qtbinpath, 'lrelease')
# Step 0
# Let's update the i18n repo
os.system(f"git submodule init")
os.system(f"git submodule update --remote --depth 1 i18n")
# Step 1
# Go through the i18n repo, check each XLIFF file and take
# note which locale is complete above the minimum threshold.
# Adds path of .xliff and .ts to l10n_files.
title("Validate the XLIFF file...")
l10n_files = []
for locale in os.listdir('i18n'):
# Skip non folders
if not os.path.isdir(os.path.join('i18n', locale)):
continue
# Skip hidden folders
if locale.startswith('.'):
continue
xliff_path = os.path.join('i18n', locale, 'mozillavpn.xliff')
# If it's the source locale (en), ignore parsing for completeness and
# add it to the list.
if locale == 'en':
print(f'OK\t- en added (reference locale)')
l10n_files.append({
'locale': 'en',
'ts': os.path.join('translations', 'generated', 'mozillavpn_en.ts'),
'xliff': xliff_path
})
continue
tree = ET.parse(xliff_path)
root = tree.getroot()
sources = 0
translations = 0
for element in root.iter('{urn:oasis:names:tc:xliff:document:1.2}source'):
sources += 1
for element in root.iter('{urn:oasis:names:tc:xliff:document:1.2}target'):
translations += 1
completeness = translations/(sources*1.0)
# Ignore locale with less than 70% of completeness
if completeness < l10n_threshold:
print(f'KO\t- {locale} is translated at {round(completeness*100, 2)}%, at least {l10n_threshold*100}% is needed')
continue # Not enough translations next file please
print(f'OK\t- {locale} added ({round(completeness*100, 2)}% translated)')
l10n_files.append({
'locale': locale,
'ts': os.path.join('translations', 'generated', f'mozillavpn_{locale}.ts'),
'xliff': xliff_path
})
# Step 2
title("Create folders and localization files for the languages...")
for file in l10n_files:
locdirectory = os.path.join('translations', 'generated', file['locale'])
os.makedirs(locdirectory, exist_ok=True)
locversion = os.path.join(locdirectory, f'locversion.plist')
with open(locversion, 'w') as locversion_file:
locversion_file.write(f"""<?xml version=\"1.0\" encoding=\"UTF-8\"?>
<!DOCTYPE plist PUBLIC \"-//Apple Computer//DTD PLIST 1.0//EN\"
\"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">
<plist version=\"1.0\">
<dict>
<key>LprojCompatibleVersion</key>
<string>123</string>
<key>LprojLocale</key>
<string>{file['locale']}</string>
<key>LprojRevisionLevel</key>
<string>1</string>
<key>LprojVersion</key>
<string>123</string>
</dict>
</plist>""")
with open(os.path.join('translations', 'generated', 'macos.pri'), 'w') as macospri:
macospri.write('### AUTOGENERATED! DO NOT EDIT!! ###\n')
for file in l10n_files:
macospri.write(f"LANGUAGES_FILES_{file['locale']}.files += $$PWD/{file['locale']}/locversion.plist\n")
macospri.write(f"LANGUAGES_FILES_{file['locale']}.path = Contents/Resources/{file['locale']}.lproj\n")
macospri.write(f"QMAKE_BUNDLE_DATA += LANGUAGES_FILES_{file['locale']}\n\n")
# Step 3
title("Write resource file to import the locales that are ready...")
with open('translations/generated/translations.qrc', 'w') as qrcfile:
qrcfile.write('<!-- AUTOGENERATED! DO NOT EDIT!! -->\n')
qrcfile.write('<RCC>\n')
qrcfile.write(' <qresource prefix="/i18n">\n')
for file in l10n_files:
qrcfile.write(f' <file>mozillavpn_{file["locale"]}.qm</file>\n')
qrcfile.write(' </qresource>\n')
qrcfile.write('</RCC>\n')
# Step 4
title("Generate the Js/C++ string definitions...")
try:
subprocess.call([sys.executable, os.path.join('scripts', 'utils', 'generate_strings.py'),
'-o', os.path.join('translations', 'generated'),
os.path.join('translations', 'strings.yaml')])
except Exception as e:
print("generate_strings.py failed. Try with:\n\tpip3 install -r requirements.txt --user")
print(e)
exit(1)
# Build a dummy project to glob together everything that might contain strings.
title("Scanning for new strings...")
def scan_sources(projfile, dirpath):
projfile.write(f"HEADERS += $$files({dirpath}/*.h, true)\n")
projfile.write(f"SOURCES += $$files({dirpath}/*.cpp, true)\n")
projfile.write(f"RESOURCES += $$files({dirpath}/*.qrc, true)\n\n")
with open('translations/generated/dummy.pro', 'w') as dummyproj:
dummyproj.write('### AUTOGENERATED! DO NOT EDIT!! ###\n')
dummyproj.write(f"HEADERS += l18nstrings.h\n")
dummyproj.write(f"SOURCES += l18nstrings_p.cpp\n")
dummyproj.write(f"SOURCES += ../l18nstrings.cpp\n\n")
for l10n_file in l10n_files:
dummyproj.write(f"TRANSLATIONS += {os.path.basename(l10n_file['ts'])}\n")
dummyproj.write("\n")
scan_sources(dummyproj, '../../src')
scan_sources(dummyproj, '../../nebula')
# Step 5
title("Generate translation resources...")
for l10n_file in l10n_files:
os.system(f"{lconvert} -if xlf -i {l10n_file['xliff']} -o {l10n_file['ts']}")
os.system(f"{lupdate} translations/generated/dummy.pro")
for l10n_file in l10n_files:
os.system(f"{lrelease} -idbased {l10n_file['ts']}")
print(f'Imported {len(l10n_files)} locales')
git = os.popen(f'git submodule status i18n')
git_commit_hash = git.read().strip().replace("+","").split(' ')[0]
print(f'Current commit: https://github.com/mozilla-l10n/mozilla-vpn-client-l10n/commit/{git_commit_hash}')
| 35.57277
| 121
| 0.665171
| 1,057
| 7,577
| 4.705771
| 0.305582
| 0.021713
| 0.028146
| 0.026538
| 0.205468
| 0.145959
| 0.090269
| 0.060314
| 0.060314
| 0.020105
| 0
| 0.025207
| 0.17276
| 7,577
| 212
| 122
| 35.740566
| 0.768347
| 0.112973
| 0
| 0.18239
| 0
| 0.018868
| 0.440794
| 0.121696
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018868
| false
| 0.006289
| 0.056604
| 0
| 0.08805
| 0.062893
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a6e93c38ff63c100497bb656432f8f40340791b
| 1,026
|
py
|
Python
|
cogs/filter.py
|
Velgaster/Discord-User-Vote
|
4aacc0bf01a11b948fa5355a3775ef8c7ae9751e
|
[
"MIT"
] | null | null | null |
cogs/filter.py
|
Velgaster/Discord-User-Vote
|
4aacc0bf01a11b948fa5355a3775ef8c7ae9751e
|
[
"MIT"
] | null | null | null |
cogs/filter.py
|
Velgaster/Discord-User-Vote
|
4aacc0bf01a11b948fa5355a3775ef8c7ae9751e
|
[
"MIT"
] | null | null | null |
from discord.ext import commands
import discord
def setup(client):
client.add_cog(KeyWordFilter(client))
class KeyWordFilter(commands.Cog):
def __init__(self, client):
self.client = client
self.log_ch = self.client.get_channel(int(self.client.SETTINGS.LOG_CHANNEL))
@commands.Cog.listener()
async def on_message(self, msg):
if any(x in msg.content.split() for x in self.client.SETTINGS.BLACKLIST):
ctx = await self.client.get_context(msg)
await self.event_log(ctx, msg, "A blacklisted phrase was used!")
await msg.delete()
async def event_log(self, ctx, msg, event):
embed = discord.Embed()
embed.colour = discord.Colour.red()
embed.title = event
embed.add_field(name='User', value=msg.author, inline=True)
embed.add_field(name='Channel', value=msg.channel.name, inline=True)
embed.add_field(name='Message', value=f"> {msg.content}", inline=False)
await self.log_ch.send(embed=embed)
| 35.37931
| 84
| 0.665692
| 141
| 1,026
| 4.730496
| 0.404255
| 0.089955
| 0.058471
| 0.076462
| 0.08096
| 0.08096
| 0
| 0
| 0
| 0
| 0
| 0
| 0.210526
| 1,026
| 28
| 85
| 36.642857
| 0.823457
| 0
| 0
| 0
| 0
| 0
| 0.061404
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.090909
| 0
| 0.227273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a6fe4cb292136ed5cb190cbef1dbace08d2c9c3
| 1,975
|
py
|
Python
|
api/app.py
|
sai-krishna-msk/KickAssist
|
7fb256e3ef4beff231332f6491ebb975f3fe4b43
|
[
"MIT"
] | null | null | null |
api/app.py
|
sai-krishna-msk/KickAssist
|
7fb256e3ef4beff231332f6491ebb975f3fe4b43
|
[
"MIT"
] | 7
|
2021-06-08T21:18:49.000Z
|
2022-03-12T00:24:33.000Z
|
api/app.py
|
sai-krishna-msk/KickAssist
|
7fb256e3ef4beff231332f6491ebb975f3fe4b43
|
[
"MIT"
] | null | null | null |
from ml_model.model import KickModel
import numpy as np
import pandas as pd
import eli5
import joblib
import flask
from flask import Flask, render_template, request, jsonify
app = Flask(__name__)
model_oh = joblib.load('ml_model/estimators/model_oh.sav')
model_hel = joblib.load('ml_model/estimators/model_hel.sav')
encoder_oh = joblib.load('ml_model/estimators/encoder_oh.sav')
encoder_hel = joblib.load('ml_model/estimators/encoder_hel.sav')
encoder_label = joblib.load('ml_model/estimators/encoder_label.sav')
def get_predict(launch_date , deadline_date , goal , subcategory , category , currency , country , description, rewards):
pred_dict={
"launched_at":launch_date,
"deadline":deadline_date,
"goal":int(goal),
"sub_category":subcategory,
"category":category,
"currency":currency,
"location_country":country,
"blurb":description,
"rewards":[]
}
try:
for reward in rewards.split(","):
pred_dict["rewards"].append(int(reward))
except Exception as e:
raise Exception(f"Error sanatizing rewards with {e} error")
return pred_dict
@app.route('/predict/<launch_date>/<deadline_date>/<goal>/<subcategory>/<category>/<currency>/<country>/<description>/<rewards>')
def GetURL(launch_date , deadline_date , goal , subcategory , category , currency , country , description, rewards):
pred_dict = get_predict(launch_date , deadline_date , goal , subcategory , category , currency , country , description, rewards)
obj = KickModel(model_oh , model_hel , encoder_oh , encoder_hel , encoder_label)
obj.load_data(pred_dict)
obj.pred()
oh_pred = float(obj.pred_oh[0][1])
hel_pred = float(obj.pred_hel[0][1])
response = {
"prediction_oh":oh_pred,
"prediction_hel":hel_pred,
"prediction_oh_df":obj.pred_oh_intr.to_dict(),
"prediction_hel_intr":obj.pred_hel_intr.to_dict()
}
return response
if __name__=="__main__":
app.run(debug =True)
| 33.474576
| 132
| 0.716456
| 261
| 1,975
| 5.141762
| 0.302682
| 0.031297
| 0.044709
| 0.063338
| 0.395678
| 0.395678
| 0.264531
| 0.264531
| 0.264531
| 0.264531
| 0
| 0.003005
| 0.157468
| 1,975
| 59
| 133
| 33.474576
| 0.803486
| 0
| 0
| 0
| 0
| 0.020833
| 0.243927
| 0.144737
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.145833
| 0
| 0.229167
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a70669d9d055da240cf688e557bf0a87257569e
| 2,810
|
py
|
Python
|
snowddl/resolver/primary_key.py
|
littleK0i/SnowDDL
|
b24cb3676e41fec8876d61a101ba242e7272a18f
|
[
"Apache-2.0"
] | 21
|
2022-02-10T16:52:03.000Z
|
2022-03-18T15:27:18.000Z
|
snowddl/resolver/primary_key.py
|
littleK0i/SnowDDL
|
b24cb3676e41fec8876d61a101ba242e7272a18f
|
[
"Apache-2.0"
] | null | null | null |
snowddl/resolver/primary_key.py
|
littleK0i/SnowDDL
|
b24cb3676e41fec8876d61a101ba242e7272a18f
|
[
"Apache-2.0"
] | 1
|
2022-03-05T11:02:42.000Z
|
2022-03-05T11:02:42.000Z
|
from snowddl.blueprint import PrimaryKeyBlueprint
from snowddl.resolver.abc_schema_object_resolver import AbstractSchemaObjectResolver, ResolveResult, ObjectType
class PrimaryKeyResolver(AbstractSchemaObjectResolver):
def get_object_type(self) -> ObjectType:
return ObjectType.PRIMARY_KEY
def get_existing_objects_in_schema(self, schema: dict):
existing_objects = {}
constraints_by_name = {}
cur = self.engine.execute_meta("SHOW PRIMARY KEYS IN SCHEMA {database:i}.{schema:i}", {
"database": schema['database'],
"schema": schema['schema'],
})
for r in cur:
if r['constraint_name'] not in constraints_by_name:
constraints_by_name[r['constraint_name']] = {
"database": r['database_name'],
"schema": r['schema_name'],
"table": r['table_name'],
"columns": {r['key_sequence']: r['column_name']}
}
else:
constraints_by_name[r['constraint_name']]['columns'][r['key_sequence']] = r['column_name']
for c in constraints_by_name.values():
columns_list = [c['columns'][k] for k in sorted(c['columns'])]
full_name = f"{c['database']}.{c['schema']}.{c['table']}({','.join(columns_list)})"
existing_objects[full_name] = {
"database": c['database'],
"schema": c['schema'],
"table": c['table'],
"columns": columns_list,
}
return existing_objects
def get_blueprints(self):
return self.config.get_blueprints_by_type(PrimaryKeyBlueprint)
def create_object(self, bp: PrimaryKeyBlueprint):
self.engine.execute_safe_ddl("ALTER TABLE {table_name:i} ADD PRIMARY KEY ({columns:i})", {
"table_name": bp.table_name,
"columns": bp.columns,
})
return ResolveResult.CREATE
def compare_object(self, bp: PrimaryKeyBlueprint, row: dict):
if [str(c) for c in bp.columns] == row['columns']:
return ResolveResult.NOCHANGE
self.engine.execute_safe_ddl("ALTER TABLE {table_name:i} DROP PRIMARY KEY", {
"table_name": bp.table_name,
})
self.engine.execute_safe_ddl("ALTER TABLE {table_name:i} ADD PRIMARY KEY ({columns:i})", {
"table_name": bp.table_name,
"columns": bp.columns,
})
return ResolveResult.ALTER
def drop_object(self, row: dict):
self.engine.execute_safe_ddl("ALTER TABLE {database:i}.{schema:i}.{table:i} DROP PRIMARY KEY", {
"database": row['database'],
"schema": row['schema'],
"table": row['table'],
})
return ResolveResult.DROP
| 36.973684
| 111
| 0.5879
| 307
| 2,810
| 5.175896
| 0.218241
| 0.056639
| 0.053493
| 0.052863
| 0.293266
| 0.28068
| 0.24292
| 0.221523
| 0.178729
| 0.178729
| 0
| 0
| 0.279359
| 2,810
| 75
| 112
| 37.466667
| 0.784691
| 0
| 0
| 0.206897
| 0
| 0
| 0.238434
| 0.044128
| 0
| 0
| 0
| 0
| 0
| 1
| 0.103448
| false
| 0
| 0.034483
| 0.034483
| 0.275862
| 0.086207
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a713700e9c156f74125bcaeca0299290201d914
| 675
|
py
|
Python
|
modules/module0/02_datastructures_and_geometry/datastructures_2b.py
|
tetov/ITA19
|
1af68a8885caf83acd98f4136d0286539ccbe63b
|
[
"MIT"
] | 7
|
2019-11-13T20:29:54.000Z
|
2020-02-26T14:30:54.000Z
|
modules/module0/02_datastructures_and_geometry/datastructures_2b.py
|
GeneKao/ITA19
|
c4b10dc183599eed4ed60d922b6ef5922d173bdb
|
[
"MIT"
] | 4
|
2019-11-07T20:57:51.000Z
|
2020-03-04T11:43:18.000Z
|
modules/module0/02_datastructures_and_geometry/datastructures_2b.py
|
GeneKao/ITA19
|
c4b10dc183599eed4ed60d922b6ef5922d173bdb
|
[
"MIT"
] | 6
|
2019-10-30T13:25:54.000Z
|
2020-02-14T14:06:09.000Z
|
import os
import compas
from compas.datastructures import Mesh
from compas_rhino.artists import MeshArtist
HERE = os.path.dirname(__file__)
DATA = os.path.join(HERE, 'data')
FILE = os.path.join(DATA, 'faces.obj')
mesh = Mesh.from_obj(FILE)
artist = MeshArtist(mesh, layer="Mesh")
artist.draw_vertices(
color={key: (255, 0, 0) for key in mesh.vertices_on_boundary()})
artist.draw_vertexlabels(
text={key: str(mesh.vertex_degree(key)) for key in mesh.vertices()})
artist.draw_edges(
keys=list(mesh.edges_on_boundary()),
color=(255, 0, 0))
artist.draw_faces(
color={key: (150, 255, 150) for key in mesh.faces() if not mesh.is_face_on_boundary(key)})
| 25
| 94
| 0.722963
| 107
| 675
| 4.392523
| 0.392523
| 0.085106
| 0.051064
| 0.076596
| 0.085106
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032534
| 0.134815
| 675
| 26
| 95
| 25.961538
| 0.77226
| 0
| 0
| 0
| 0
| 0
| 0.025185
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.222222
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a73c0e8a1979c239e091749b325602ad4a40468
| 5,620
|
py
|
Python
|
setup.py
|
IntuitionEngineeringTeam/RedBlackPy
|
99630408153bea7494415c402eb2d9881f3168ee
|
[
"Apache-2.0"
] | 12
|
2018-08-24T20:46:38.000Z
|
2022-01-20T16:25:23.000Z
|
setup.py
|
IntuitionEngineeringTeam/RedBlackPy
|
99630408153bea7494415c402eb2d9881f3168ee
|
[
"Apache-2.0"
] | 1
|
2019-04-02T04:19:58.000Z
|
2019-04-02T04:19:58.000Z
|
setup.py
|
IntuitionEngineeringTeam/RedBlackPy
|
99630408153bea7494415c402eb2d9881f3168ee
|
[
"Apache-2.0"
] | 3
|
2018-07-05T22:47:27.000Z
|
2019-05-25T06:40:40.000Z
|
#
# Created by Soldoskikh Kirill.
# Copyright 2018 Intuition. All rights reserved.
#
import os
import platform
from setuptools import setup
from setuptools.command.build_ext import build_ext
from distutils.extension import Extension
from Cython.Build import cythonize
from rbp_setup_tools.code_generation import generate_from_cython_src
from rbp_setup_tools.types import TYPES
if platform.system() == 'Darwin':
compile_opts = [ '-std=c++11',
'-mmacosx-version-min={:}'.format( platform.mac_ver()[0] ),
'-Ofast' ]
elif platform.system() == 'Linux':
compile_opts = [ '-std=c++11',
'-Ofast' ]
elif platform.system() == 'Windows':
compile_opts = [ '-std=c++11',
'-Ofast' ]
else:
raise EnvironmentError( 'Not supported platform: {plat}'.format(plat=platform.system()) )
#--------------------------------------------------------------------------------------------
# Generate cython code for all supporting types
#--------------------------------------------------------------------------------------------
src_1 = './redblackpy/cython_source/__dtype_tree_processing.pxi'
src_2 = './redblackpy/cython_source/__tree_series_dtype.pxi'
src_3 = './redblackpy/cython_source/__interpolation.pxi'
src_4 = './redblackpy/cython_source/__arithmetic.pxi'
src_1 = open(src_1, 'r')
src_2 = open(src_2, 'r')
src_3 = open(src_3, 'r')
src_4 = open(src_4, 'r')
output_1 = open('./redblackpy/cython_source/dtype_tree_processing.pxi', 'w')
output_2 = open('./redblackpy/cython_source/tree_series_dtype.pxi', 'w')
output_3 = open('./redblackpy/cython_source/interpolation.pxi', 'w')
output_4 = open('./redblackpy/cython_source/arithmetic.pxi', 'w')
generate_from_cython_src(src_1, output_1, TYPES[:-1], 0)
generate_from_cython_src(src_2, output_2, TYPES, 14)
generate_from_cython_src(src_3, output_3, TYPES, 0)
generate_from_cython_src(src_4, output_4, TYPES, 0)
src_1.close()
src_2.close()
src_3.close()
src_4.close()
output_1.close()
output_2.close()
output_3.close()
output_4.close()
#--------------------------------------------------------------------------------------------
ext_modules=[ Extension( "redblackpy.series.tree_series",
sources=["redblackpy/series/tree_series.pyx"],
extra_compile_args=compile_opts,
extra_link_args=compile_opts[:-1],
language = "c++",
include_dirs=['./redblackpy'],
depends=[ 'core/tree/tree.hpp',
'core/tree/rb_tree.tpp'
'core/tree/rb_node.tpp',
'core/tree/rb_node_valued.tpp',
'core/trees_iterator/iterator.hpp',
'core/trees_iterator/iterator.tpp' ], ),
Extension( "redblackpy.series.series_iterator",
sources=["redblackpy/series/series_iterator.pyx"],
extra_compile_args=compile_opts,
extra_link_args=compile_opts[:-1],
language = "c++",
include_dirs=['./redblackpy'],
depends=[ 'core/tree/tree.hpp',
'core/tree/rb_tree.tpp'
'core/tree/rb_node.tpp',
'core/tree/rb_node_valued.tpp',
'core/trees_iterator/iterator.hpp',
'core/trees_iterator/iterator.tpp' ], ),
Extension( "redblackpy.benchmark.timer",
sources=["redblackpy/benchmark/timer.pyx"],
extra_compile_args=compile_opts,
extra_link_args=compile_opts[:-1],
language = "c++",
include_dirs=['./redblackpy'] ) ]
setup( name='redblackpy',
ext_modules = cythonize(ext_modules),
version='0.1.3.0',
author='Solodskikh Kirill',
author_email='hypo@intuition.engineering',
maintainer='Intuition',
maintainer_email='dev@intuition.engineering',
install_requires=['cython'],
description='Data structures based on red-black trees.',
url='https://intuitionengineeringteam.github.io/RedBlackPy/',
download_url='https://github.com/IntuitionEngineeringTeam/RedBlackPy/archive/master.zip',
zip_safe=False,
packages=[ 'redblackpy', 'redblackpy.series',
'redblackpy.benchmark', 'redblackpy.tree_cython_api'],
package_data={'redblackpy.series': ['*.pxd']},
include_package_data=True,
license='Apache License 2.0',
long_description='RedBlackPy is a light Python library that provides data structures \
aimed to fast insertion, removal and self sorting to manipulating ordered data in efficient way.\
The core part of the library had been written on C++ and then was wrapped in Cython. \
Hope that many would find the primary data structures of this library very handy in working \
with time series. One of the main feature of this structures is an access by arbitrary \
key using interpolation, what makes processing of multiple non synchronized time series very simple.\
All data structures based on red black trees.',
classifiers = [ 'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3' ] )
| 44.251969
| 108
| 0.57242
| 604
| 5,620
| 5.10596
| 0.31457
| 0.032101
| 0.057069
| 0.034047
| 0.357977
| 0.289559
| 0.259079
| 0.182555
| 0.182555
| 0.182555
| 0
| 0.014856
| 0.269395
| 5,620
| 126
| 109
| 44.603175
| 0.73624
| 0.070996
| 0
| 0.29
| 0
| 0
| 0.287085
| 0.179428
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.08
| 0
| 0.08
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a7405fc354c53785ef8307b7ce20355175f5c8f
| 7,320
|
py
|
Python
|
conversationkg/kgs/writers.py
|
INDElab/conversationkg
|
8bfe09b0afb4954f633a9287f723c61dcd21ce46
|
[
"Apache-2.0"
] | 3
|
2021-01-18T10:07:44.000Z
|
2021-05-27T07:39:35.000Z
|
conversationkg/kgs/writers.py
|
INDElab/conversationkg
|
8bfe09b0afb4954f633a9287f723c61dcd21ce46
|
[
"Apache-2.0"
] | 3
|
2020-12-09T23:20:27.000Z
|
2021-03-06T11:08:24.000Z
|
conversationkg/kgs/writers.py
|
INDElab/conversationkg
|
8bfe09b0afb4954f633a9287f723c61dcd21ce46
|
[
"Apache-2.0"
] | 1
|
2021-02-19T12:10:11.000Z
|
2021-02-19T12:10:11.000Z
|
from ..conversations.corpus import Conversation
from ..conversations.emails import Email
from collections import Counter
import matplotlib
import pandas as pd
import json
class JSONWriter:
def __init__(self, kg):
self.kg = kg
self.entities = kg.entities()
self.triples = kg.triples
self.provenances = kg.provenances
def store(self, name, save_mapping=True):
with open(f"{name}.json", "w") as handle:
json.dump(self.translated, handle)
with open(f"{name}.provenances.json", "w") as handle:
json.dump(self.provenances, handle)
if save_mapping:
reversed_d = self.reverse_mapping(self.entity2ind)
json_d = {i:e.to_json() for i, e in reversed_d.items()}
with open(f"{name}.ind2entity.json", "w") as handle:
json.dump(json_d, handle)
reverse_d = self.reverse_mapping(self.pred2ind)
with open(f"{name}.ind2pred.json", "w") as handle:
json.dump(reverse_d, handle)
@classmethod
def restore(cls, name, load_mapping_of=None):
def get_class(cls_name):
for mod in conversations_modules:
try:
cls = getattr(mod, cls_name)
return cls
except AttributeError:
pass
raise AttributeError(f"{cls_name} could not be found in any of the modules!")
def json_to_entity(json_dict):
try:
json_dict["class"]
except KeyError:
print(json_dict.keys())
raise
cls_name = json_dict["class"]
cls = get_class(cls_name)
return cls.from_json(json_dict)
if load_mapping_of is None:
load_mapping_of = name
with open(f"{load_mapping_of}.ind2entity.json") as handle:
loaded_entity_mapping = {int(i): d for i, d in json.load(handle).items()}
ind2entity = {i:json_to_entity(d) for i, d in loaded_entity_mapping.items()}
ind2entity = {i: (Person(x) if type(x) is WholePerson else x)
for i, x in ind2entity.items()}
with open(f"{load_mapping_of}.ind2pred.json") as handle:
ind2pred = {int(i): d for i, d in json.load(handle).items()}
with open(f"{name}.json") as handle:
loaded = json.load(handle)
restored_triples = [(ind2entity[s],
ind2pred[p],
ind2entity[o]) for s, p, o in loaded]
with open(f"{name}.provenances.json") as handle:
provenances = json.load(handle)
kg = KG(restored_triples, provenances)
kg.translated = loaded
kg.entity2ind = kg.reverse_mapping(ind2entity)
kg.pred2ind = kg.reverse_mapping(ind2pred)
return kg
@staticmethod
def reverse_mapping(d):
rev_d = {}
for k, v in d.items():
if not v in rev_d:
rev_d[v] = k
else:
print("duplicate:", v)
if not type(v) is Person:
raise ValueError("Non-bijective mapping!")
return rev_d
class CSVWriter:
def __init__(self, kg):
self.kg = kg
self.entities = kg.entities()
self.triples = kg.triples
self.provenances = kg.provenances
def get_node_df(self):
records = []
sorted_ents = sorted(self.entities, key=lambda x: (str(type(x)), str(x)))
for i, e in enumerate(sorted_ents):
node_id = i # hash(e)
node_t = str(e)
node_type = type(e).__name__
node_u = f"icons/{node_type.lower()}.png"
type_ = "LinkChart" if i == 0 else "0"
if type(e) in {Conversation, Email}:
node_dtopic = e.topic.topic.index
node_dtopic_rate = round(e.topic.score, 5)
else:
node_dtopic = -1
node_dtopic_rate = 1.0
lat = lng = 0.0
records.append(
(
type_, node_type, node_id, node_u, node_t,
node_dtopic, node_dtopic_rate, lat, lng
)
)
return pd.DataFrame.from_records(records,
columns= ['type',
'node_type',
'node_id',
'node_u',
'node_t',
'node_dtopic',
'node_dtopic_rate',
'lat',
'lng']
)
def get_link_df(self):
link_types = {p for s, p, o in self.triples}
link_counts = Counter(self.triples)
colours = dict(zip(link_types, list(matplotlib.colors.cnames.values())))
sorted_ents = dict(zip(sorted(self.entities, key=str),
range(len(self.entities))))
records = []
for i, ((s, p, o), prov) in enumerate(zip(self.triples, self.provenances)):
linkId = i # hash((s, p, o)) # s.time.timestamp()
end1 = sorted_ents[s] # hash(s)
end2 = sorted_ents[o] # hash(o)
linkcount = link_counts[(s,p,o)]
linkcolor = colours[p]
linktype = p
itemID = prov
rec = [linkId,
end1,
end2,
linkcount,
linkcolor,
itemID,
linktype]
records.append(rec)
return pd.DataFrame.from_records(records,
columns=['linkId', 'end1', 'end2', 'linkcount', 'linkcolor', 'itemID', 'linktype'])
def to_csv(self, save_path):
node_df = self.get_node_df()
link_df = self.get_link_df()
node_df.to_csv(save_path + ".nodes.csv",
index=False)
link_df.to_csv(save_path + ".links.csv",
index=False)
from neo4j import GraphDatabase
class Neo4jWriter:
def __init__(self, kg):
self.kg = kg
def to_neo4j(self):
pass
def run(self, clear=True):
self.driver = GraphDatabase.driver("bolt://localhost:7687",
auth=("neo4j", "pwd"), encrypted=False)
if clear:
tx.run("""MATCH (x)
DETACH DELETE x""")
| 30.247934
| 124
| 0.454645
| 747
| 7,320
| 4.287818
| 0.228916
| 0.019981
| 0.022479
| 0.024352
| 0.280674
| 0.234468
| 0.191383
| 0.114268
| 0.114268
| 0.114268
| 0
| 0.009716
| 0.451639
| 7,320
| 242
| 125
| 30.247934
| 0.788241
| 0.008197
| 0
| 0.150943
| 0
| 0
| 0.070572
| 0.025086
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081761
| false
| 0.012579
| 0.044025
| 0
| 0.18239
| 0.012579
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a74f67398645a5ea142cd4ebc8cc51cbdd14233
| 590
|
py
|
Python
|
model-test.py
|
shikew/Handwriting-calculator
|
5e0da9f8ceac6dcc815139c6855dfc6fb5af909f
|
[
"Apache-2.0"
] | null | null | null |
model-test.py
|
shikew/Handwriting-calculator
|
5e0da9f8ceac6dcc815139c6855dfc6fb5af909f
|
[
"Apache-2.0"
] | null | null | null |
model-test.py
|
shikew/Handwriting-calculator
|
5e0da9f8ceac6dcc815139c6855dfc6fb5af909f
|
[
"Apache-2.0"
] | 1
|
2019-09-11T11:48:47.000Z
|
2019-09-11T11:48:47.000Z
|
import numpy as np
from PIL import Image
from keras.models import load_model
img_gray = Image.open('1002.png')
number = np.array(img_gray)
print(number.shape)
print('准备的图片的shape:',number.flatten().shape)
print('原number:',number)
number = number.astype('float32')
number = number/255 #归一化
number = number.flatten()
print('处理过后的number.shape:',number.shape)
model = load_model('mnist-dnn.h5')
# model.load_weights('mnist.model.best.hdf5')
# def recognize(photo_data):
# return clf.predict(photo_data)
print(model.predict_classes(np.array([number])))
#print('测试标签为:',test_target[8000])
| 28.095238
| 48
| 0.749153
| 86
| 590
| 5.034884
| 0.534884
| 0.110855
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027933
| 0.089831
| 590
| 21
| 49
| 28.095238
| 0.778399
| 0.240678
| 0
| 0
| 0
| 0
| 0.146396
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.214286
| 0
| 0.214286
| 0.357143
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a75b7b70277fd3cd807924be5321a95f06ea318
| 72,121
|
py
|
Python
|
iblviewer/volume.py
|
nantille/iblviewer
|
a5dad67e8f4b99a535297ba0803caf07b1107ca1
|
[
"MIT"
] | null | null | null |
iblviewer/volume.py
|
nantille/iblviewer
|
a5dad67e8f4b99a535297ba0803caf07b1107ca1
|
[
"MIT"
] | null | null | null |
iblviewer/volume.py
|
nantille/iblviewer
|
a5dad67e8f4b99a535297ba0803caf07b1107ca1
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass, field
from typing import Mapping, List, Any
from datetime import datetime
import logging
import pandas as pd
import glob
import numpy as np
import logging
import os
from collections import OrderedDict
import nrrd
import vtk
import vedo
from vtk.util.numpy_support import numpy_to_vtk
from iblviewer.collection import Collection
import iblviewer.objects as obj
import iblviewer.utils as utils
@dataclass
class VolumeModel:
RAW = 'raw'
SEGMENTED = 'segmented'
NORMALIZED_SUFFIX = '_norm'
DATA_TYPE = {RAW:0, SEGMENTED:1}
PREFIX = 'Volume'
__count = 0
def unique_name():
VolumeModel.__count += 1
return f'{VolumeModel.PREFIX}_{VolumeModel.__count}'
name: str = field(default_factory=unique_name)
file_path: str = None
scalars: Collection = field(default_factory=Collection)
axes: List = field(default_factory=lambda: [1, 1, 1])
data_min: float = None
data_max: float = None
data_map_step: float = 1.0
data: np.ndarray = None
data_type: str = RAW
resolution: int = 1
# Default units are microns.
units: float = 1e-06
base_color_map: Any = None
# At IBL, volume mappings are used from ibllib: ibllib.atlas.regions.mappings
mapping_name: str = None
lateralized: bool = False
# Mapping function. If None, the volume will be given as it is.
mapping: Any = None
luts: Collection = field(default_factory=Collection)
slicers: Collection = field(default_factory=Collection)
isosurfaces: Collection = field(default_factory=Collection)
interactive_subsampling: bool = True
volume_visible: bool = True
slices_visible: bool = True
transpose_shape: Any = None
dimensions: np.ndarray = np.zeros(3).astype(float)
center: np.ndarray = np.zeros(3).astype(float)
def compute_size(self):
"""
Compute volume size
"""
if self.data is None:
return
self.dimensions = np.array(self.data.shape)[:3]
if self.resolution is None:
return
self.resolution = int(self.resolution) # TODO: move this to constructor or init
self.dimensions *= self.resolution
self.center = np.ones(3) * self.resolution / 2 + self.dimensions / 2
def compute_range(self, force=False):
"""
Compute min and max range in the volume
:return: Min and max values
"""
if self.data_min is not None and self.data_max is not None and not force:
return self.data_min, self.data_max
self.data_min = np.min(self.data)
self.data_max = np.max(self.data)
#print('Volume min-max', self.data_min, self.data_max)
return self.data_min, self.data_max
def guess_volume_type(self):
"""
Infer the volume type when it was not specified by the user.
We assume here that typical values between -1 and 1 are raw volumes.
"""
if self.data_type is None:
if self.data_min is None or self.data_max is None:
self.compute_range()
if self.data_min >= -1 and self.data_max <= 1:
guess = VolumeModel.RAW
else:
guess = VolumeModel.SEGMENTED
self.data_type = guess
def is_segmented(self, auto_guess=True):
"""
Get whether current volume/image is segmented
:return: Boolean
"""
if self.data_type is None and auto_guess:
self.guess_volume_type()
return self.data_type == VolumeModel.SEGMENTED
def read_volume(self, file_path):
"""
Read local volume. Downloads the file first if it's remote.
:param file_path: Volume path
:return: 3D array
"""
if file_path.startswith('http') or file_path.startswith('ftp'):
downloaded_temp_file_path = vedo.download(file_path, verbose=False)
if file_path.endswith('nrrd'):
data, header = nrrd.read(downloaded_temp_file_path)
else:
data = vedo.loadImageData(downloaded_temp_file_path)
else:
if file_path.endswith('nrrd'):
data, header = nrrd.read(file_path, index_order='C')
else:
data = vedo.loadImageData(file_path)
return data
def load_volume(self, file_path, remap_scalars=False, mapping=None, make_current=True):
"""
Load a volume data file. Supports NRRD and many other formats thanks to vedo/VTK
:param file_path: Volume file path. Could support other file types easily.
:param remap_scalars: Whether scalar values in the volume are replaced by
their row id from a mapping that stores. This is necessary in the case of segmented
volumes with regions that have a discontinuous id.
:param mapping: Pandas Series or a Dictionary
:param make_current: Set the volume data as the current one
:return: 3D array
"""
data = None
if not remap_scalars or mapping is None:
data = self.import_volume(file_path)
else:
time = datetime.now()
new_file_path = utils.change_file_name(file_path, None, None, VolumeModel.NORMALIZED_SUFFIX)
if os.path.exists(new_file_path):
data = self.import_volume(new_file_path)
else:
data = self.import_volume(file_path)
data, mapping = self.remap_slow(data, mapping, new_file_path)
logging.info('Remapped scalar values in: ' + str(utils.time_diff(time)) + 's')
'''
if volume is not None:
logging.info('Opened atlas ' + new_file_path + ' in ' + str(utils.time_diff(time)) + 's')
min_value, max_value = np.amin(data), np.amax(data)
logging.info('Min max scalar values in volume ' + str(min_value) + ' -> ' + str(max_value))
else:
logging.error('Failed to open atlas ' + new_file_path)
'''
if make_current and data is not None:
self.data = data
return data, mapping
def transpose(self, shape=None):
"""
Transpose the volume for visualization in VTK
:param shape: The new shape. If None, will default to self.transpose_shape
"""
if shape is None:
shape = self.transpose_shape
if shape is None:
return
self.data = np.transpose(self.data, shape)
def remap_slow(self, data, mapping=None, write_path=None):
"""
Reassign volume values (slow on large volumes!) so that they're continuous
:param data: Volume ndarray
:param write_path: Where the modified volume will be stored
(to spare going through this method next time)
:param mapping: Pandas Series or a Dictionary that maps raw volume scalars to new ones
:return: Modified volume data
"""
logging.info('\nBuilding appropriate volume from Allen data source...')
#volume = np.vectorize(self.f)(data)
labels = np.sort(np.unique(data))
num_labels = len(labels)
if mapping is None:
mapping = pd.Series(labels)
logging.info('Num regions labeled in volume ' + str(num_labels) + ' from ' + str(mapping.size) + ' in atlas')
logging.info('Reassigning ' + str(num_labels) + ' scalar values...')
for iter_id in range(num_labels):
label = labels[iter_id]
ids = mapping.index[mapping == label].to_list()
if len(ids) < 1:
continue
# On a large volume, this takes a long time
data[data == label] = ids[0]
if num_labels > 10000 and iter_id % 10 == 0:
logging.info(' Progress: ' + str(int(iter_id/num_labels)*100) + '%')
if write_path is not None:
logging.info('Saving volume data under ' + write_path)
nrrd.write(write_path, data, index_order='C')
return data, mapping
def build_lut(self, scalar_map=None, scalar_range=None, color_map=None,
alpha_map=None, zero_is_transparent=True,
noise_amount=0.0, nan_rgba=None, make_active=True):
"""
Build a look-up table (LUT, sometimes known as transfer function) for the volume
:param scalar_map: A 2D list with values in first column from the volume itself and values from
the second column being your scalar values that correspond to such region
:param scalar_range: Min and max values in a list
:param color_map: Color map name to apply
:param alpha_map: Alpha map, either None or a list of values the same length as scalar_map, that
says how transparent a scalar value should be
:param zero_is_transparent: Whether zero values are made transparent, True by default
:param noise_amount: Whether a noise value is applied on the colors
:param nan_rgba: Color and transparency (RGBA) to assign to invalid (out of range or None) scalar values
:param make_active: Whether this one is made active (you still have to update the views after that)
:return: LUTModel
"""
lut_model = LUTModel()
lut_model.build(scalar_map, scalar_range, color_map, alpha_map,
zero_is_transparent, noise_amount, nan_rgba)
self.luts.store(lut_model, set_current=make_active)
return lut_model
def blend_maps(map1, map2, time, total_time):
"""
Blend color maps
"""
weight1 = max(0.0, total_time - time)
weight2 = max(0.0, time)
return map1 * weight1 + map2 * weight2
class Volume(vedo.Volume):
"""
Overwriting of vedo.Volume constructor that is ill-designed as
it transposes the given numpy array without us knowing about it,
not giving us the option to choose about that.
"""
def __init__(self,
inputobj=None,
c='RdBu_r',
alpha=(0.0, 0.0, 0.2, 0.4, 0.8, 1.0),
alphaGradient=None,
alphaUnit=1,
mode=0,
shade=False,
spacing=None,
dims=None,
origin=None,
mapper='smart'):
vtk.vtkVolume.__init__(self)
vedo.BaseGrid.__init__(self)
self.axes = [1, 1, 1]
###################
if isinstance(inputobj, str):
if "https://" in inputobj:
from vedo.io import download
inputobj = download(inputobj, verbose=False) # fpath
elif os.path.isfile(inputobj):
pass
else:
inputobj = sorted(glob.glob(inputobj))
###################
if 'gpu' in mapper:
self._mapper = vtk.vtkGPUVolumeRayCastMapper()
elif 'opengl_gpu' in mapper:
self._mapper = vtk.vtkOpenGLGPUVolumeRayCastMapper()
elif 'smart' in mapper:
self._mapper = vtk.vtkSmartVolumeMapper()
elif 'fixed' in mapper:
self._mapper = vtk.vtkFixedPointVolumeRayCastMapper()
elif isinstance(mapper, vtk.vtkMapper):
self._mapper = mapper
else:
print("Error unknown mapper type", [mapper])
raise RuntimeError()
self.SetMapper(self._mapper)
###################
inputtype = str(type(inputobj))
#colors.printc('Volume inputtype', inputtype)
if inputobj is None:
img = vtk.vtkImageData()
elif vedo.utils.isSequence(inputobj):
if isinstance(inputobj[0], str): # scan sequence of BMP files
ima = vtk.vtkImageAppend()
ima.SetAppendAxis(2)
pb = vedo.utils.ProgressBar(0, len(inputobj))
for i in pb.range():
f = inputobj[i]
picr = vtk.vtkBMPReader()
picr.SetFileName(f)
picr.Update()
mgf = vtk.vtkImageMagnitude()
mgf.SetInputData(picr.GetOutput())
mgf.Update()
ima.AddInputData(mgf.GetOutput())
pb.print('loading...')
ima.Update()
img = ima.GetOutput()
else:
if "ndarray" not in inputtype:
inputobj = np.array(inputobj)
if len(inputobj.shape)==1:
varr = vedo.numpy2vtk(inputobj, dtype=np.float)
else:
# ------------------------------ Nasty lines commented here
#if len(inputobj.shape)>2:
#inputobj = np.transpose(inputobj, axes=[2, 1, 0])
varr = vedo.numpy2vtk(inputobj.ravel(order='F'), dtype=np.float)
varr.SetName('input_scalars')
img = vtk.vtkImageData()
if dims is not None:
img.SetDimensions(dims)
else:
if len(inputobj.shape)==1:
vedo.colors.printc("Error: must set dimensions (dims keyword) in Volume.", c='r')
raise RuntimeError()
img.SetDimensions(inputobj.shape)
img.GetPointData().SetScalars(varr)
#to convert rgb to numpy
# img_scalar = data.GetPointData().GetScalars()
# dims = data.GetDimensions()
# n_comp = img_scalar.GetNumberOfComponents()
# temp = utils.vtk2numpy(img_scalar)
# numpy_data = temp.reshape(dims[1],dims[0],n_comp)
# numpy_data = numpy_data.transpose(0,1,2)
# numpy_data = np.flipud(numpy_data)
elif "ImageData" in inputtype:
img = inputobj
elif isinstance(inputobj, vedo.Volume):
img = inputobj.GetMapper().GetInput()
elif "UniformGrid" in inputtype:
img = inputobj
elif hasattr(inputobj, "GetOutput"): # passing vtk object, try extract imagdedata
if hasattr(inputobj, "Update"):
inputobj.Update()
img = inputobj.GetOutput()
elif isinstance(inputobj, str):
from vedo.io import loadImageData, download
if "https://" in inputobj:
inputobj = download(inputobj, verbose=False)
img = loadImageData(inputobj)
else:
vedo.colors.printc("Volume(): cannot understand input type:\n", inputtype, c='r')
return
if dims is not None:
img.SetDimensions(dims)
if origin is not None:
img.SetOrigin(origin) ### DIFFERENT from volume.origin()!
if spacing is not None:
img.SetSpacing(spacing)
self._data = img
self._mapper.SetInputData(img)
self.mode(mode).color(c).alpha(alpha).alphaGradient(alphaGradient)
self.GetProperty().SetShade(True)
self.GetProperty().SetInterpolationType(1)
self.GetProperty().SetScalarOpacityUnitDistance(alphaUnit)
# remember stuff:
self._mode = mode
self._color = c
self._alpha = alpha
self._alphaGrad = alphaGradient
self._alphaUnit = alphaUnit
@dataclass
class LUTModel:
"""
This class might look slightly convoluted but it's actually simple.
We use double mapping here in order to enable live/interactive visualization
of volumetric data. Instead of replacing values in a 3D volume, we only replace
the colors in the 1D LUT list.
The point is that it's too slow to update a given data, like a segmented
volume with custom values. Instead, we map such custom values to a 1D
array (our LUT) that maps colors to raw volume values.
This is much faster in terms of rendering and it enables interactive visualization.
The scalar_lut is the original LUT for the given scalars (custom values)
and the mapped_lut is the LUT assigned to the surfaces (like slices)
that have copied data from the volume. The volume is given color_map
and alpha_map through vedo methods.
You might say "ok for double mapping, it's the only way for interactive
rendering of a volume, but what about color_map and mapped_lut? Aren't
they the same?". The answer is: they're the same but VTK does not accept
a vtkLookupTable for a volume. Instead, it wants a vtkColorTransferFunction
and a vtkPiecewiseFunction for alpha. There's no way around it.
The color_map will be computed as a vtkColorTransferFunction and
the alpha_map as the vtkPiecewiseFunction.
"""
name: str = NotImplementedError
color_map_function: Any = None
scalar_map: np.ndarray = None
scalar_min: float = 0.0
scalar_max: float = 1.0
scalar_lut: vtk.vtkLookupTable = None
mapped_lut: vtk.vtkLookupTable = None
color_map: np.ndarray = None
alpha_map: np.ndarray = None
base_color_map: np.ndarray = None
def build(self, scalar_map=None, scalar_range=None, color_map=None,
alpha_map=None, zero_is_transparent=True,
noise_amount=0.0, nan_rgba=None):
"""
Build several look-up tables (LUT, sometimes known as transfer function) for the volume.
This is where double-mapping occurs for segmented volumes that have values from 0 to n where
each value defines a sub-volume or region. If we want to assign values (say from another model)
to these regions, we'd have to change the volume values and it would be too slow iterating over
each voxel in 3D. Instead we define colors that represent these values and assign them to
segmented regions in a 1D list.
:param scalar_map: A 2D list with values in first column from the volume itself and values from
the second column being your scalar values that correspond to such region
:param scalar_range: Min and max values in a list
:param color_map: Color map name to apply
:param alpha_map: Alpha map, either None or a list of values the same length as scalar_map, that
says how transparent a scalar value should be
:param zero_is_transparent: Whether zero values are made transparent, True by default
:param noise_amount: Whether a noise value is applied on the colors
:param nan_rgba: Color and alpha values to assign to invalid (out of range or None) scalar values
:return: LUTModel
"""
if color_map is None:
return
if nan_rgba is None:
nan_rgba = [0.0, 0.0, 0.0, 0.0]
if self.base_color_map is None:
self.base_color_map = color_map
colors = []
alphas = []
lut = vtk.vtkLookupTable()
scalar_lut = vtk.vtkLookupTable()
# Use the number of values in the volume
num_steps = len(self.base_color_map) if self.base_color_map is not None else len(color_map)
num_steps = 2655
s_min = 0
s_max = num_steps
if scalar_map is None:
if color_map is None and self.base_color_map is not None:
color_map = self.base_color_map
loop = range(num_steps)
noise = None
if isinstance(noise_amount, float) and noise_amount > 0:
noise = np.random.rand(num_steps) * noise_amount - noise_amount / 2
# Vedo works with nested lists:
# [region_id, [r, g, b]] for color, and [region_id, a] for alpha
if scalar_map is None:
# Standard volume that is not segmented
lut.SetRange(s_min, s_max)
lut.SetNumberOfTableValues(num_steps)
scalar_lut.SetRange(s_min, s_max)
scalar_lut.SetNumberOfTableValues(num_steps)
for r_id in loop:
color = vedo.colors.getColor(color_map[r_id])
color = np.array(color)
if noise is not None:
color = color + noise[r_id]
color = np.maximum(color, 0.0)
color = np.minimum(color, 1.0)
colors.append([r_id, color])
alpha = 1.0 if alpha_map is None else alpha_map[r_id]
if r_id == 0 and zero_is_transparent:
alpha = 0.0
alphas.append([r_id, alpha])
lut.SetTableValue(r_id, *color, alpha)
scalar_lut.SetTableValue(r_id, *color, alpha)
#scalar_map[r_id] = color_map[r_id]
else:
# Segmented volume
s_min, s_max = scalar_range
lut.SetRange(0, num_steps)
lut.SetNumberOfTableValues(num_steps)
color = None
for r_id in range(num_steps):
try:
value = scalar_map[r_id]
except Exception:
value = None
if value is None:# or s_min > value or s_max < value:
color = nan_rgba[:3]
alpha = nan_rgba[3]
else:
color = vedo.colorMap(value, color_map, s_min, s_max)
alpha = 1.0 if alpha_map is None else alpha_map[r_id]
if value == 0 and zero_is_transparent:
alpha = 0.0
colors.append([r_id, color])
alphas.append([r_id, alpha])
lut.SetTableValue(r_id, *color, alpha)
# Real scalar LUT, mainly as a reference for the user
# Here the colors resulting from the given scalar min to max
# are assigned to segmented values in the volume
mock_values = np.linspace(s_min, s_max, num_steps)
scalar_lut.SetRange(s_min, s_max)
scalar_lut.SetNumberOfTableValues(len(mock_values))
for r_id in range(len(mock_values)):
color = list(vedo.colorMap(mock_values[r_id], color_map, s_min, s_max))
alpha = 0.0 if mock_values[r_id] == 0 and zero_is_transparent else 1.0
scalar_lut.SetTableValue(r_id, *color, 1.0)
lut.Build()
scalar_lut.Build()
# Just to avoid confusion: the user can give a string as a color map, like 'viridis'
# but the real color map object is stored in self.color_map. The name 'viridis'
# is stored under self.color_map_function (if needed later on)
self.color_map_function = color_map
self.color_map = colors
self.alpha_map = alphas
self.scalar_map = scalar_map
self.mapped_lut = lut
self.scalar_lut = scalar_lut
def get_sorted_scalars(self):
"""
Get a numpy 2D array of key-value pairs sorted by value
:return: 2D array
"""
sorted_scalars = np.zeros((len(self.scalar_map), 2))
values = list(self.scalar_map.values())
keys = list(self.scalar_map.keys())
sorted_scalars[:, 0] = keys
sorted_scalars[:, 1] = values
sorted_mask = sorted_scalars[:, 1].argsort()
sorted_scalars = sorted_scalars[sorted_mask]
return sorted_scalars
class VolumeController():
"""
Wrapper class that handles both the volume and its slices
"""
def __init__(self, plot, model, initialize=True, clipping=True, slicer_box=True,
center_on_edges=False, alpha_unit_upper_offset=0.0, add_to_scene=True):
"""
Constructor
:param plot: Plot instance
:param model: VolumeModel instance
:param initialize: Whether the initalization
:param clipping: Whether clipping is enabled at init time
:param slicer_box: Whether the slicer box is enabled at init
:param center_on_edges: Whether the volume is offest by half a voxel or not
:param alpha_unit_upper_offset: The offset to apply to alpha unit computation.
If greater than 0, the volume will be less opaque
:param add_to_scene: Whether the volume is added to scene after init
"""
self.plot = plot
self.model = model
self.actor = None
self.picker = None
self.scalars = None
self.mask = None
self.bounding_mesh = None
self.alpha_unit_upper_offset = alpha_unit_upper_offset
self.alpha_factor = 0.001 # * self.model.resolution
self.clipping_planes = None
self.enable_volume_clipping = True
self.clipping_axes = []
self.slicers = OrderedDict()
self.slicers_selectable = False
self.scalar_bar = None
if initialize:
self.initialize(clipping, slicer_box, center_on_edges, add_to_scene)
#msg = 'Volume abs center', self.volume_center, 'position', np.array(self.volume_actor.pos())
#logging.info(msg)
def get_related_actors(self):
"""
Get all 3D actors related to this view (for registering it in the application)
:return: List of VTK objects
"""
actors = []
for slicer_id in self.slicers:
actor = self.slicers[slicer_id].actor
if actor is not None:
actors.append(actor)
for iso_id in self.model.isosurfaces:
actors.append(self.model.isosurfaces[iso_id])
actors.append(self.actor)
return actors
def initialize(self, clipping=True, slicer_box=True, center_on_edges=False, add_to_scene=True):
"""
Set the volume actor for visualization in VTK
:param clipping: Whether clipping is enabled
:param slicer_box: Whether the slicer box mode is enabled (6 clipping planes)
:param center_on_edges: Whether the volume's center is aligned to its edges
rather than the voxel center
:param add_to_scene: Whether the object is added to the scene
"""
self.build_actor(center_on_edges, add_to_scene)
self.initialize_picker()
if slicer_box:
self.initialize_slicer_box()
self.initialize_clipping_planes()
self.set_volume_clipping(clipping)
self.set_color_map()
'''
if use_mask:
self.mask = self.actor.clone()
self.mask.threshold(1, replace=1, replaceOut=0)
self.actor.mapper().SetMaskTypeToBinary()
self.actor.mapper().SetMaskInput(self.mask)
'''
def set_volume_visibility(self, on=True):
"""
Set volume visibility
:param on: Visibility boolean
"""
if self.actor is not None:
self.actor.SetVisibility(on)
def set_slices_visibility(self, on=True):
"""
Set the visibility of slices
:param on: Visibility boolean
"""
for slicer_id in self.slicers:
slicer_view = self.slicers.get(slicer_id)
slicer_view.actor.SetVisibility(on)
def get_slices_opacity(self):
"""
Get the opacity of slices (should be the same value for all slices)
A mean calculation is performed on all slices alpha, just in case
:return: Alpha value
"""
value = 0
num_values = 0
for slicer_id in self.slicers:
slicer = self.slicers[slicer_id]
if slicer.actor is not None:
slice_alpha = slicer.actor.GetProperty().GetOpacity()
if slice_alpha is None:
continue
value += slice_alpha
num_values += 1
if num_values == 0 or value == 0:
return None
return value / num_values
def set_slices_opacity(self, value):
"""
Set the opacity of slices
:param value: Alpha value
"""
for slicer_id in self.slicers:
slicer = self.slicers[slicer_id]
if slicer.actor is not None:
slicer.actor.alpha(value)
def get_opacity(self):
"""
Get the relative opacity unit
:return: Float
"""
return self.get_relative_opacity_unit()
def get_relative_opacity_unit(self):
"""
Get the alpha unit relative value
:return: Float
"""
alpha_unit = self.actor.alphaUnit()
r = self.model.resolution
# Inverse function of set_opacity_unit()
value = 1.1 - (alpha_unit / r)**0.5
return value
def set_opacity(self, value):
"""
Set the opacity of the volume like in set_opacity_unit()
:param value: Opacity value between 0.0 and 1.0
:return: Resulting alpha unit
"""
self.set_opacity_unit(value)
def set_opacity_unit(self, value):
"""
Set the opacity of the volume by modifying its alpha unit (a VTK thing).
The alpha unit defines how much a voxel is transparent to incoming ray.
This method normalizes the range between 0.0 and 1.0 as it depends
on the resolution of the volume
:param value: Opacity value between 0.0 and 1.0
:return: Resulting alpha unit
"""
r = self.model.resolution
# 1 is chosen and not 1.0 because when value == 1.0, that would
# mean that the volume is fully opaque and this yields artifacts with VTK
alpha_unit = (1 + self.alpha_unit_upper_offset - value)**2 * r
# vedo calls it "alpha" unit, vtk "opacity" unit. same-same!
self.actor.alphaUnit(alpha_unit)
return alpha_unit
def get_spacing(self):
"""
Get the spacing/resolution of the volume
"""
res = self.model.resolution
spacing = None
if isinstance(res, int) or isinstance(res, float):
spacing = np.array([res]*3)
elif len(res) == 3:
spacing = res
else:
raise ValueError(f'Given volume resolution {self.model.resolution} is invalid')
return spacing
def build_actor(self, center_on_edges=False, add_to_scene=True): #[1, 2]
"""
Set the volume actor for visualization in VTK
:param center_on_edges: Whether alignment by one voxel is applied
:param add_to_scene: Whether the object is added to the scene
"""
spacing = self.get_spacing()
self.actor = Volume(self.model.data, spacing=spacing, mapper='smart')
self.scalars = self.actor._data.GetPointData().GetScalars()
self.actor.name = self.model.name
self.actor.shade(False)
self.actor.mode(0)
self.actor.pickable(True)
self.set_interactive_subsampling(False)
if center_on_edges:
# Moving the volume by one voxel. This is possibly due the use of custom spacing.
self.actor.pos(self.actor.pos() + spacing)
center = np.array(self.actor.pos()) + self.actor.center()
if np.linalg.norm(center - self.model.center) > 0:
#print('Adjusting volume center from', self.model.center, 'to', center)
self.model.center = center
self.set_opacity_unit(0.9)
self.actor.jittering(True)
#self.actor._mapper.AutoAdjustSampleDistancesOn()
#self.actor._mapper.SetBlendModeToAverageIntensity()
#self.actor._mapper.SetSampleDistance(100)
if add_to_scene:
self.plot.add(self.actor, render=False)
def set_position(self, position):
"""
Set the position of the volume
"""
self.actor.pos(position)
# TODO: we're entering in unstable things when we move the volume
# because there is not yet a guaranteed support for updating the slices
# with the correct position
self.reset_clipping_planes()
def mirror_volume(self, axes):
"""
Mirror the volume on given axes
:param mirror_axes: A list of axes (either 0, 1, 2 or 'x', 'y', 'z') on which
the volume will be mirrored. Optional
"""
if axes is None or self.actor is None:
return
axes_str = ['x', 'y', 'z']
for axis in axes:
if isinstance(axis, int) and 0 <= axis <= 2:
axis = axes_str[axis]
if isinstance(axis, str) and len(axis) == 1:
self.actor.mirror(axis=axis.lower())
def initialize_picker(self, opacity_iso_value=0.0001):
"""
Initialize the volume picker
:param opacity_iso_value: Threshold that defines at what accumulated
opacity the picker hits the volume. In the case of a segmented volume,
you want to keep this value very low as the default one.
"""
# As per C++ doc https://vtk.org/Wiki/VTK/Examples/Cxx/VTKConcepts/Scalars
# https://stackoverflow.com/questions/35378796/vtk-value-at-x-y-z-point
picker = vtk.vtkVolumePicker()
picker.PickCroppingPlanesOn()
picker.UseVolumeGradientOpacityOff()
picker.SetTolerance(opacity_iso_value)
# A low OpacityIsoValue is necessary in the case of segmented volumes
picker.SetVolumeOpacityIsovalue(opacity_iso_value)
picker.AddPickList(self.actor)
picker.PickFromListOn()
self.picker = picker
def initialize_slicer_box(self):
"""
Initialize 6 slicing planes as a box.
"""
for axis_id in range(6):
slicer_model = SlicerModel(axis=axis_id)
slicer_model.align_to_axis(axis_id, self.model.dimensions)
self.model.slicers.store(slicer_model)
# It's important in this case to have standalone=False
self.slicers[axis_id] = SlicerView(self.plot, self, slicer_model, standalone=False)
def update_slicer(self, slicer_id, value=None, normal=None):
"""
Update a given slicer with the given value
:param slicer_id: SlicerView id
:param value: Value or 3D point
:param normal: Normal
"""
slicer_view = self.slicers.get(slicer_id)
if slicer_view is None:
return
# This is an important part where the slicing plane is itself sliced by other planes
slicer_model = slicer_view.model
slicer_model.clipping_planes = self.get_clipping_planes(slicer_model.axis)
# Use given value (or point) and normal to guide the below code
result = slicer_model.update(value, normal)
if not result:
return
# Update slicing image
slicer_view.update()
def initialize_clipping_planes(self):
"""
Initialize X, Y and Z clipping planes with two planes per axis
for positive and negative slicing
"""
self.clipping_planes = vtk.vtkPlaneCollection()
slicer_models = self.model.slicers
for slicer_id in slicer_models:
self.clipping_planes.AddItem(vtk.vtkPlane())
self.reset_clipping_planes()
return
def get_clipping_planes(self, except_axis=None):
"""
Get the current clipping planes except the ones on the given axis
:param except_axis: Axis id to ignore. If None, all clipping planes will be returned
:return: vtkPlaneCollection
"""
if not isinstance(except_axis, int):
return self.clipping_planes
exceptions = [except_axis * 2, except_axis * 2 + 1]
planes = vtk.vtkPlaneCollection()
for plane_id in range(self.clipping_planes.GetNumberOfItems()):
if plane_id in exceptions:
continue
plane = self.clipping_planes.GetItem(plane_id)
planes.AddItem(plane)
return planes
def reset_clipping_planes(self):
"""
Reset clipping planes
"""
slicer_models = self.model.slicers
for slicer_id in slicer_models:
slicer_model = slicer_models[slicer_id]
plane_id = slicer_model.get_box_plane_id()
plane = self.clipping_planes.GetItem(plane_id)
plane.SetOrigin(slicer_model.origin + self.actor.pos())
plane.SetNormal(slicer_model.normal)
def clip_on_axis(self, position=None, axis=None, normal=None):
"""
Apply clipping on a single axis
:param position: Position
:param axis: Clipping axis, defaults to 0 (X axis)
:param thickness: Whether a thickness (so two clipping planes) are applied
"""
axis_offset = 0
# This should already be sorted in the model but in case it isn't, we double check here
if normal is not None and normal[axis] < 0:
# This means that the given axis has two
# clipping planes and we take the negative one
axis_offset += 1
#position = self.model.dimensions - position
axis_storage_id = axis * 2 + axis_offset
plane = self.clipping_planes.GetItem(axis_storage_id)
plane.SetOrigin(position)
plane.SetNormal(normal)
def set_volume_clipping(self, on=None):
"""
Set volume clipping on or off.
:param on: Whether clipping is enabled or disabled. If None, then
the state is toggled.
"""
if on is None:
self.enable_volume_clipping = not self.enable_volume_clipping
else:
self.enable_volume_clipping = on
if self.enable_volume_clipping:
self.actor.mapper().SetClippingPlanes(self.clipping_planes)
else:
self.actor.mapper().SetClippingPlanes(None)
def clip_to_bounds(self, bounds):
"""
Clip the volume and move the slicing planes according to 6 boundary points
:param bounds: Six values in a list (xmin, xmax, ymin, ymax, zmin, zmax)
"""
planes = vtk.vtkPlanes()
planes.SetBounds(bounds)
# Normals are reversed with the above code
# so we fix that here with flip_normals=True
self.set_clipping_planes(planes, flip_normals=True)
def box_widget_update(self, widget=None, event=None):
"""
Clip the volume with the current box widget
:param widget: vtkBoxCutter
:param event: vtkEvent
"""
if widget is None:
return
planes = vtk.vtkPlanes()
widget.GetPlanes(planes)
self.set_clipping_planes(planes)
def set_clipping_planes(self, planes, flip_normals=False):
"""
Clip the volume and move the slicing planes according the given planes
:param planes: vtkPlanes
"""
vtk_n = planes.GetNormals()
vtk_pts = planes.GetPoints()
num_pts = vtk_pts.GetNumberOfPoints()
for plane_id in range(num_pts):
normal = vtk_n.GetTuple(plane_id)
origin = vtk_pts.GetPoint(plane_id)
plane = self.clipping_planes.GetItem(plane_id)
current_origin = np.array(plane.GetOrigin())
# We don't need to check the normal because
# we prevent box cutter rotation in our case
if np.linalg.norm(current_origin - origin) < 0.1:
continue
plane.SetOrigin(origin)
if flip_normals:
normal = np.array(normal)*-1
plane.SetNormal(normal)
self.update_slicer(plane_id, origin, normal)
self.clipping_planes.Modified()
self.actor.GetMapper().Update()
def set_alpha_map(self, alpha_map, alpha_factor=None):
"""
Set alpha map to the volume view
:param alpha_map: 2D list of scalar values and alpha values
:param alpha_factor: Alpha factor
"""
if alpha_map is None:
if self.model.luts.current is None:
return
alpha_map = self.model.luts.current.alpha_map
if alpha_factor is None:
alpha_factor = self.alpha_factor
if len(np.array(alpha_map).shape) > 1:
volume_alpha_map = np.ones_like(alpha_map).astype(float)
volume_alpha_map[:] = alpha_map[:]
volume_alpha_map[:, 1] *= alpha_factor
self.actor.alpha(volume_alpha_map)
else:
self.actor.alpha(np.array(alpha_map) * alpha_factor)
def set_color_map(self, color_map=None, alpha_map=None):
"""
Set the color and alpha map to the view objects
:param color_map: Nested list of scalar values and rgb colors
like [[0, [0.0, 0.0, 0.0]], [8, [0.5, 0.8, 0.3]], ...]
:param alpha_map: 2D list of scalar values and alpha values
"""
lut = self.model.luts.current
if color_map is None and lut is not None:
color_map = lut.color_map
if alpha_map is None and lut is not None:
alpha_map = lut.alpha_map
if color_map is None:
return
self.actor.cmap(color_map)
self.set_alpha_map(alpha_map)
if lut is not None:
for surface in self.model.isosurfaces:
surface._mapper.SetLookupTable(lut.opaque_lut)
for slicer_id in self.slicers:
slicer = self.slicers[slicer_id]
slicer.apply_lut(lut.mapped_lut)
else:
for slicer_id in self.slicers:
slicer = self.slicers[slicer_id]
slicer.set_color_map(color_map, alpha_map)
def disable_shading(self):
"""
Disable volume shading
"""
volumeProperty = self.actor.GetProperty()
volumeProperty.ShadeOff()
self.actor.SetProperty(volumeProperty)
def enable_shading(self, ambient=0.6, diffuse=0.8, specular=0.9):
"""
Enable volume shading
TODO: See if this method is useful
"""
volumeProperty = self.actor.GetProperty()
volumeProperty.SetInterpolationTypeToLinear()
volumeProperty.ShadeOn()
volumeProperty.SetAmbient(ambient)
volumeProperty.SetDiffuse(diffuse)
volumeProperty.SetSpecular(specular)
volumeProperty.SetScalarOpacityUnitDistance(1)
self.actor.SetProperty(volumeProperty)
def toggle_slices_visibility(self):
"""
Toggle slices visibility
"""
self.model.slices_visible = not self.model.slices_visible
for slicer_id in self.slicers:
slicer = self.slicers[slicer_id]
self.update_slicer(slicer)
if slicer.actor is not None:
slicer.actor.SetVisibility(self.model.slices_visible)
def toggle_hollow(self):
"""
Toggle hollow mode for volume rendering. This is intended
to work only on segmented (annotated) volumes.
"""
volume_property = self.actor.GetProperty()
# Shout at VTK devs: it's twisted to name properties Disable and then have DisableOff...
disabled = bool(volume_property.GetDisableGradientOpacity())
if disabled:
volume_property.DisableGradientOpacityOff()
alpha_gradient = vtk.vtkPiecewiseFunction()
alpha_gradient.AddPoint(0, 0.0)
alpha_gradient.AddPoint(1, 0.75)
alpha_gradient.AddPoint(2, 1.0)
volume_property.SetGradientOpacity(alpha_gradient)
else:
volume_property.DisableGradientOpacityOn()
return not disabled
def get_value_from_xyz(self, position, normal_step=None, avoid_values=0, cast_to_int=True, none_as_zero=False):
"""
Get a scalar value from the volume with respect to XYZ coordinates and a optionally a normal step,
that is the normal on which to probe multiplied by the distance you want to travel further into
the volume to pick a correct value. Often the "surface point" on a volume with non uniform transparency
is at the boundary between transparent (let's say a 0 value is transparent) and more opaque parts.
So you need to go further into the "cloud" so to speak, in order to find the values you want.
:param position: 3D array
:param normal_step: A vector normal multiplied by the lookup distance, in case the raw position yields
bad or unwanted results
:param avoid_values: Try and find other values than this
:param cast_to_int: Whether the value should be cast to integer
:return: Scalar value
"""
if isinstance(avoid_values, int) or isinstance(avoid_values, float):
avoid_values = [avoid_values]
# TODO: see if this is faster? To be tested
# ijk_result = [0.0, 0.0, 0.0]
# volume_actor._data.TransformPhysicalPointToContinuousIndex(xyz, ijk_result)
# volume_actor._data.GetPoint(ijk_result)
pt_id = self.actor._data.FindPoint(*position)
valid_id = 0 < pt_id < self.scalars.GetNumberOfValues()
value = self.scalars.GetValue(pt_id) if valid_id else None
if not valid_id or (value in avoid_values):
if normal_step is not None:
position += normal_step
pt_id = self.actor._data.FindPoint(*position)
valid_id = 0 < pt_id < self.scalars.GetNumberOfValues()
value = self.scalars.GetValue(pt_id) if valid_id else None
if cast_to_int and value is not None:
value = int(value)
if value is None and none_as_zero:
value = 0
return value
def raycast(self, origin, screen_position):
"""
Shorthand for pick() method
"""
return self.pick(origin, screen_position)
def pick(self, origin, screen_position):
"""
Find the nearest intersection – even on sliced volume – with the ray formed
by an origin and a screen-space position (given by VTK when you click on an actor)
:param origin: Origin of the vector
:param screen_position: 2D position on screen. This is given by vtk events like MouseRelease
:return: The nearest position and its related value queried in the volume image
"""
self.picker.Pick(*screen_position[:2], 0, self.plot.renderer)
position = np.array(self.picker.GetPickPosition())
ray = position - origin
distance = np.linalg.norm(ray)
normal = ray / distance
# Go half a voxel further to make sure we don't hit "void"
vol_position = position # + normal * self.model.resolution / 2
probe_position = position + normal * self.model.resolution * 10
closest_dist = distance
slice_position = None
# See if the line hits any of the slicers (that are image planes)
for slicer_id in self.slicers:
slicer = self.slicers[slicer_id]
if slicer.got_slice:
hits = slicer.actor.intersectWithLine(origin, probe_position)
if len(hits) != 1:
continue
new_dist = np.linalg.norm(position - hits[0])
if new_dist < closest_dist and new_dist < self.model.resolution * 2:
closest_dist = new_dist
slice_position = hits[0]
if slice_position is None:
position = vol_position
else:
position = slice_position
value = self.get_value_from_xyz(position, normal * self.model.resolution * 4)
return position, value
def add_probe(self, origin, destination, resolution=40, radius=10, color_map=None,
screen_space=True, min_v=None, max_v=None, add_to_scene=True):
"""
Add a series of points along a line probe
:param origin: Probe origin
:param destination: Probe destination point
:param resolution: Number of (equidistant) points that will be probed along that line
:param radius: Radius of the points
:param color_map: Scalars color map
:param screen_space: Whether the points are screen space or spheres
:param min_v: Min scalar value
:param max_v: Max scalar value
:param add_to_scene: Whether the new probe is added to scene
:return: Points
"""
if color_map is None:
color_map = self.model.luts.current.color_map
positions, values = self.probe(origin, destination, resolution)
points_obj = obj.Points(positions, values=values, radius=radius, screen_space=screen_space,
color_map=color_map, min_v=min_v, max_v=max_v)
points_obj.origin = origin
points_obj.destination = destination
# Dynamic properties assignment
points_obj.target = self.actor
points_obj.target_controller = self
if add_to_scene:
self.plot.add(points_obj)
return points_obj
def update_probe(self, origin, destination, points_obj):
"""
Update a probe with given start and end points
:param origin: Start point
:param destination: End point
:param points_obj: Points object
"""
resolution = points_obj._polydata.GetPoints().GetNumberOfPoints()
positions, values = self.probe(origin, destination, resolution)
points_obj.update_data(positions, values)
def probe(self, origin, destination, resolution=40):
"""
Probe a volume with a line
:param origin: Origin of the line probe
:param destination: Destination of the line probe
:param resolution: Number of point samples along the probe
:return: Positions and values
"""
origin = np.array(origin)
destination = np.array(destination)
distance = np.linalg.norm(destination - origin)
ray = destination - origin
ray_norm = ray / distance
step = distance / resolution
positions = [origin + ray_norm * p_id * step for p_id in range(resolution)]
values = np.array([self.get_value_from_xyz(point, none_as_zero=True) for point in positions])
return positions, values
def set_interactive_subsampling(self, on=False):
"""
Set volume subsampling on or off.
This is enabled by default in VTK and we disable it by default in IBLViewer
:param on: Whether volume subsampling in interactive mode is on or off
"""
#self.plot.window.SetDesiredUpdateRate(0)
#self.actor._mapper.SetInteractiveUpdateRate(0)
self.model.interactive_subsampling = on
self.actor._mapper.SetAutoAdjustSampleDistances(on)
if on:
self.actor._mapper.InteractiveAdjustSampleDistancesOn()
else:
self.actor._mapper.InteractiveAdjustSampleDistancesOff()
def isosurface(self, label, exceptions=[0], force_rebuild=False, set_current=True, to_int=True, split_meshes=True):
"""
Creates a surface mesh (isosurface) of a segmented/labelled volume for the given value.
Unlike general isosurfacing, this method extracts only the surface mesh of the
desired region/label/segmentation, not of all values from 0 to label.
:param label: Label (scalar) value found in the volume
:param exceptions: If the label is found in the exceptions list, isosurfacing will not occur
:param force_rebuild: Whether rebuilding is forced in case we find an existing mesh for the given label
:param set_current: Whether the label is set as the current one in the model
:param to_int: Whether the label is cast to integer
:param split_meshes: Whether we split meshes when multiple ones are found
:return: A list of all manifold meshes for the given label
"""
if label is None or label in exceptions:
return
if to_int:
label = int(label)
existing_meshes = self.model.isosurfaces.get(label)
if existing_meshes is not None and not force_rebuild:
return existing_meshes
lut = self.model.luts.current
simple_lut = vtk.vtkLookupTable()
simple_lut.SetNumberOfColors(1)
simple_lut.SetTableRange(0, 1)
simple_lut.SetScaleToLinear()
simple_lut.SetTableValue(0, 0, 0, 0, 0)
simple_lut.SetTableValue(1, *lut.mapped_lut.GetTableValue(label))
simple_lut.Build()
# Generate object boundaries from labelled volume
discrete = vtk.vtkDiscreteMarchingCubes()
discrete.SetInputData(self.actor.imagedata())
discrete.GenerateValues(1, label, label)
smoothing_iterations = 15
pass_band = 0.001
feature_angle = 120.0
smoother = vtk.vtkWindowedSincPolyDataFilter()
smoother.SetInputConnection(discrete.GetOutputPort())
smoother.SetNumberOfIterations(smoothing_iterations)
smoother.BoundarySmoothingOff()
smoother.FeatureEdgeSmoothingOff()
smoother.SetFeatureAngle(feature_angle)
smoother.SetPassBand(pass_band)
smoother.NonManifoldSmoothingOn()
smoother.NormalizeCoordinatesOn()
smoother.Update()
self.model.isosurfaces[label] = []
#splitter = vtk.vtkExtractPolyDataGeometry()
if split_meshes:
splitter = vtk.vtkPolyDataConnectivityFilter()
splitter.SetInputConnection(smoother.GetOutputPort())
splitter.SetExtractionModeToAllRegions()
splitter.ColorRegionsOn()
splitter.Update()
for region_id in range(splitter.GetNumberOfExtractedRegions()):
#splitter.AddSpecifiedRegion(region_id)
#splitter.Update()
#poly = vtk.vtkPolyData()
#poly.ShallowCopy(splitter.GetOutput())
threshold = vtk.vtkThreshold()
threshold.SetInputConnection(splitter.GetOutputPort())
threshold.ThresholdBetween(region_id, region_id)
threshold.Update()
actor = vedo.Mesh(threshold.GetOutput())
#actor._mapper.SetScalarRange(min_value, lut.scalar_max)
#actor._mapper.SetUseLookupTableScalarRange(True)
actor._mapper.SetLookupTable(simple_lut)
actor._mapper.ScalarVisibilityOn()
actor.name = 'Isosurface_' + str(label)
self.model.isosurfaces[label].append(actor)
#actor.cmap(lut.scalar_lut, np.ones(poly.GetNumberOfVerts())*label)
else:
poly = smoother.GetOutput()
actor = vedo.Mesh(poly)
actor._mapper.SetLookupTable(simple_lut)
actor._mapper.ScalarVisibilityOn()
actor.name = 'Isosurface_' + str(label)
self.model.isosurfaces[label].append(actor)
'''
pdnorm = vtk.vtkPolyDataNormals()
pdnorm.SetInputData(smoother.GetOutput())
pdnorm.ComputePointNormalsOn()
pdnorm.ComputeCellNormalsOn()
pdnorm.FlipNormalsOff()
pdnorm.ConsistencyOn()
pdnorm.Update()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(smoother.GetOutputPort())
mapper.SetLookupTable(lut.scalar_lut)
mapper.SetScalarRange(min_value, lut.scalar_max)
'''
if set_current:
self.model.isosurfaces.set_current(label)
return self.model.isosurfaces[label]
@dataclass
class SlicerModel:
PREFIX = '[Slicer]_'
MIN_SLAB_THICKNESS = 1.0 #um
__count = 0
def unique_name():
SlicerModel.__count += 1
return f'{SlicerModel.PREFIX}_{SlicerModel.__count}'
name: str = field(default_factory=unique_name)
# 0, 1 or 2. See the normal for axis orientation
axis: int = None
value: float = 0.0
bounds: np.ndarray = None
#thickness: float = 0.0
origin: np.ndarray = np.array([0.0, 0.0, 0.0])
normal: np.ndarray = np.array([1.0, 0.0, 0.0])
clipping_planes: vtk.vtkPlaneCollection = None
def get_box_plane_id(self):
"""
Get the plane id
:return: Int
"""
if self.axis is None:
return
offset = 0 if self.normal[self.axis] < 0 else 1
return self.axis * 2 + offset
def get_axis_aligned_info(self, vtk_axis):
"""
VTK stores box clipping planes in the order:
-X to +X: 0, 1
-Y to +Y: 2, 3
-Z to +Z: 4, 5
This method retrieves what is the XYZ axis (0, 1 or 2)
and its orientation sign
:return: Int axis and float orientation
"""
orientation = -1.0 if vtk_axis % 2 == 0 else 1.0
axis = (vtk_axis - vtk_axis % 2) // 2
return axis, orientation
def align_to_axis(self, axis, dimensions=None):
"""
Set the axis of the slicer
:param axis: See parameter vtk_axis in SlicerModel.get_axis_aligned_info()
:param dimensions: Dimensions of the volume
"""
if not isinstance(axis, int):
return
normal = np.zeros(3).astype(float)
xyz_axis, orientation = self.get_axis_aligned_info(axis)
normal[xyz_axis] = orientation
self.axis = xyz_axis
if dimensions is not None and orientation < 0:
self.origin = np.zeros(3)
self.origin[xyz_axis] = dimensions[xyz_axis]
self.normal = normal
def flip_normal(self):
"""
Flip the normal of the slicer
"""
self.normal *= -1.0
self.check_normal()
if isinstance(self.axis, int):
self.axis *= -1
def check_normal(self):
"""
Check if the normal is axis-aligned.
If not, the axis is set to None.
"""
zeros = self.normal == 0
if len(self.normal[zeros]) >= 2:
self.axis = 0
def update(self, value=None, normal=None, axis=None):
"""
Update slicer
:param value: Origin of the slicing plane
:param normal: Normal of the slicing plane
:param axis: Axis, if the plane is axis-aligned
:return: True if model changed, False if it didn't
"""
if not(isinstance(value, int) or isinstance(value, float)):
if normal is None:
normal = self.normal
if normal is None:
return False
if normal[1] == 0 and normal[2] == 0:
axis = 0 #if normal[0] > 0 else 1
elif normal[0] == 0 and normal[2] == 0:
axis = 1 #if normal[1] > 0 else 1
elif normal[0] == 0 and normal[1] == 0:
axis = 2 #if normal[2] > 0 else 1
if axis is not None:
value = value[axis]
if axis is None:
axis = self.axis
if self.value == value:
return False
if axis is not None:
self.value = value
self.origin = np.array(normal) * value
else:
self.value = None
self.origin = value
self.normal = normal
self.axis = axis
return True
class SlicerView():
slices = {}
def __init__(self, plot, volume_view, slicer_model, standalone=True):
"""
Constructor
:param plot: Plot instance
:param volume_view: VolumeView instance
:param slicer_model: SlicerModel instance
:param standalone: Whether the slice is a standalone actor that
can be clicked. Set this to False if you want to use transparency,
at the expense that because of a VTK bug, you won't be able to
click on it anymore, requiring you to code another way of detecting
where the user clicked. See more in initialize_mapper()
"""
self.plot = plot
self.volume_view = volume_view
self.model = slicer_model
self.actor = None
self.filter = None
self.filter = None
self.actor = None
self.reslice = None
self.slice_type = -1
self.depth_peeling_enabled = None
self.standalone = standalone
self.got_slice = False
self.color_map = None
self.alpha_map = None
self.initialize()
def initialize(self, render=False):
"""
Initialize the slicer object
"""
if self.filter is None:
self.filter = vtk.vtkImageDataGeometryFilter()
if self.actor is None:
self.actor = vedo.Mesh(self.filter.GetOutput())
# Adding empty actor so that it's updated later on
self.plot.add(self.actor, render=render)
self.actor.lighting('off')
self.actor.name = self.model.name
self.initialize_mapper()
def initialize_mapper(self):
"""
Initialize the object mapper
"""
mapper = self.actor._mapper
mapper.SetScalarModeToUsePointData() #SetScalarModeToUsePointFieldData
mapper.SetColorModeToMapScalars()
mapper.ScalarVisibilityOn()
# We operate on static volumes thanks to the double LUT mapping implemented here
mapper.SetStatic(True)
# Without using scalar range, the mapping will be off
mapper.SetUseLookupTableScalarRange(True)
# We prevent this actor from being pickable as a result of the bug described below
# when we want to use transparency on the slice.
self.actor.pickable(self.standalone)
if self.standalone:
# There is a bug in VTK 9 that prevents clicking on transparent objects
# as reported on vedo's tracker https://github.com/marcomusy/vedo/issues/291
# The "Force opaque fix" below should be gone with the next VTK update hopefully.
# In the meantime, we use this.
# TODO: remove this when this bug is fixed in VTK
self.actor.ForceOpaqueOn()
else:
# We bypass the transparent selection bug when a VolumeView has multiple slicers
# like in box mode because the click detection occurs on the volume and we perform
# an additional test to see if a slicer yields a nearby result. If it does,
# the result is like clicking on the slice and we get transparency for free.
pass
# Make sure we have depth peeling activated, otherwise transparency with volumes
# will look weird and in the wrong order
self.plot.renderer.UseDepthPeelingOn()
self.plot.renderer.UseDepthPeelingForVolumesOn()
segmented = self.volume_view.model.is_segmented()
if segmented:
# This very line below will mess up the entire slice coloring if:
# - you have a segmented volume and this is set to True
# - you have a non-segmented (like raw MRI, CT) volume and this is set to False
mapper.SetInterpolateScalarsBeforeMapping(not segmented)
mapper.Update()
def set_color_map(self, color_map, alpha_map=None):
"""
Set a color map to the slice
:param color_map: Color map, can be a string, a list of colors or more.
See vedo documentation.
"""
self.color_map = color_map
if alpha_map is not None:
self.alpha_map = alpha_map
if self.got_slice and color_map is not None:
self.actor.cmap(self.color_map, alpha=self.alpha_map)
def set_slice_type(self, slice_type):
"""
Set the slice type. 0 for axial, 1 for free slicing
:param slice_type: Int value
"""
if slice_type == 0 and self.slice_type != slice_type:
self.slice_type = slice_type
self.filter.SetInputData(self.volume_view.actor.imagedata())
elif slice_type == 1 and self.slice_type != slice_type:
self.slice_type = slice_type
self.filter.SetInputData(self.reslice.GetOutput())
def slice_on_normal(self, origin, normal):
"""
Slice a volume with a plane oriented by the given normal.
This allows slicing in all directions.
:param origin: Origin of the slicing plane
:param normal: Normal of the slicing plane
:return: Mesh object with the slice as an image texture
"""
'''
mapper = vtk.vtkImageResliceMapper()
mapper.SetInputData(self.volume_view.actor._data)
mapper.SliceFacesCameraOff()
mapper.SliceAtFocalPointOff()
mapper.JumpToNearestSliceOn()
mapper.SetImageSampleFactor(2)
mapper.BorderOn()
mapper.BackgroundOff()
mapper.UpdateInformation()
mapper.GetSlicePlane().SetOrigin(*origin)
mapper.GetSlicePlane().SetNormal(*normal)
mapper.GetSlicePlane().Modified()
mapper.Modified()
mapper.Update()
self.actor = vtk.vtkImageSlice()
self.actor.SetMapper(mapper)
prop = vtk.vtkImageProperty()
if True:
prop.SetInterpolationTypeToLinear()
else:
prop.SetInterpolationTypeToNearest()
self.actor.SetProperty(prop)
return
'''
if self.reslice is None:
reslice = vtk.vtkImageReslice()
reslice.SetInputData(self.volume_view.actor._data)
#reslice.SetInputData(image)
reslice.SetOutputDimensionality(2)
reslice.SetAutoCropOutput(False)
#reslice.SetInterpolationModeToLinear()
reslice.SetInterpolationModeToNearestNeighbor()
reslice.SetSlabNumberOfSlices(1)
reslice.SetOutputSpacing(self.volume_view.get_spacing())
reslice.ReleaseDataFlagOn()
self.reslice = reslice
self.set_slice_type(1)
M, T = utils.get_transformation_matrix(origin, normal)
self.reslice.SetResliceAxes(M)
self.reslice.Update()
self.filter.Update()
if self.actor is None:
self.actor = vedo.Mesh(self.filter.GetOutput())
self.initialize_mapper()
else:
self.actor._update(self.filter.GetOutput())
self.initialize_mapper()
self.actor.SetOrientation(T.GetOrientation())
self.actor.SetPosition(origin)
self.got_slice = True
return self.actor
def x_slice(self, i):
"""
Extract the slice at index `i` of volume along x-axis.
:param i: I index
"""
self.set_slice_type(0)
nx, ny, nz = self.volume_view.actor.GetMapper().GetInput().GetDimensions()
if i <= 1 or i > nx - 1:
return False
self.filter.SetExtent(i, i, 0, ny, 0, nz)
self.filter.Update()
if self.actor is not None:
self.actor._update(self.filter.GetOutput())
else:
self.actor = vedo.Mesh(self.filter.GetOutput())
self.initialize_mapper()
self.got_slice = True
return True
def y_slice(self, j):
"""
Extract the slice at index `j` of volume along y-axis.
:param j: J index
"""
self.set_slice_type(0)
#nx, ny, nz = self.volume_view.model.dimensions / resolution
nx, ny, nz = self.volume_view.actor.GetMapper().GetInput().GetDimensions()
if j <= 1 or j > ny - 1:
return False
self.filter.SetExtent(0, nx, j, j, 0, nz)
self.filter.Update()
if self.actor is not None:
self.actor._update(self.filter.GetOutput())
else:
self.actor = vedo.Mesh(self.filter.GetOutput())
self.initialize_mapper()
self.got_slice = True
return True
def z_slice(self, k):
"""
Extract the slice at index `k` of volume along z-axis.
:param k: K index
"""
self.set_slice_type(0)
nx, ny, nz = self.volume_view.actor.GetMapper().GetInput().GetDimensions()
if k <= 1 or k > nz - 1:
return False
self.filter.SetExtent(0, nx, 0, ny, k, k)
self.filter.Update()
if self.actor is not None:
self.actor._update(self.filter.GetOutput())
else:
self.actor = vedo.Mesh(self.filter.GetOutput())
self.initialize_mapper()
self.got_slice = True
return True
def slice_on_axis(self, value=None, normal=None, axis=None, use_reslice=False):
"""
Slice on standard X, Y or Z axis
:param value: Value on the given axis
:param normal: Axis normal, can be either +1.0 or -1.0 along that axis
:param axis: Axis integer, 0 for X, 1 for Y, 2 for Z
:param use_reslice: if True, this enables vtkImageReslice which is useful when
the normal is not aligned to either X, Y or Z. If you use it on an axis-aligned
normal, some color inaccuracies will appear if you don't tweak the vtkImageResliceMapper.
This is why the default is False.
:return: Result boolean, whether slice occured or not
"""
resolution = self.volume_view.model.resolution
volume_dimensions = self.volume_view.model.dimensions
'''
if normal[axis] < 0:
if value > 0:
# Make value consistent with given normal.
value *= normal[axis]
value = volume_dimensions[axis] + value
'''
in_volume_slice = int(value) // resolution
if use_reslice:
self.slice_on_normal(normal * value, normal)
return
if axis == 0:
result = self.x_slice(in_volume_slice)
elif axis == 1:
result = self.y_slice(in_volume_slice)
elif axis == 2:
result = self.z_slice(in_volume_slice)
return result
def update(self):
"""
Update slice object according to data in the model
"""
had_slice = self.got_slice
result = True
if isinstance(self.model.axis, int) and 0 <= self.model.axis <= 2:
result = self.slice_on_axis(self.model.value, self.model.normal, self.model.axis)
else:
self.slice_on_normal(self.model.origin, self.model.normal)
if not result:
self.plot.remove(self.actor)
self.got_slice = False
return
#self.actor.pos(*(self.volume_view.actor.pos()-self.actor.pos()))
lut = self.volume_view.model.luts.current
if lut is not None:
'''
This is VTK for you...a mesh can use a vtkLookupTable for RGBA mapping
BUT volumes require vtkColorTransferFunction (RGB) and vtkPiecewiseFunction (alpha)
So we have to put a color map, alpha map and a vtkLookupTable
built from both maps in a LUTModel.
Alternatively, we could update the LUT with alpha values but it's a pain.
ctf = self.volume_view.actor.GetProperty().GetRGBTransferFunction()
lut = vedo.utils.ctf2lut(self.volume_view.actor)
otf = self.volume_view.actor.GetProperty().GetScalarOpacity
# using "ctf" would work only for colors, not for transparency!
self.apply_lut(ctf)
'''
self.apply_lut(lut.mapped_lut)
else:
if self.alpha_map is None:
self.actor.cmap(self.color_map)
else:
self.actor.cmap(self.color_map, alpha=self.alpha_map)
if self.model.clipping_planes is not None:
self.actor.mapper().SetClippingPlanes(self.model.clipping_planes)
if not had_slice:
self.plot.add(self.actor, render=True)
def apply_lut(self, lut=None):
"""
Apply a LUT to the volume
:param lut: vtkLookupTable
:param actor: The actor to receive this
"""
if self.actor is None or lut is None:
return
mapper = self.actor._mapper
mapper.SetLookupTable(lut)
| 39.867883
| 119
| 0.609892
| 8,950
| 72,121
| 4.792626
| 0.118994
| 0.018044
| 0.007134
| 0.001958
| 0.232643
| 0.179023
| 0.152049
| 0.123257
| 0.109619
| 0.093346
| 0
| 0.008817
| 0.309605
| 72,121
| 1,809
| 120
| 39.867883
| 0.852607
| 0.282484
| 0
| 0.244094
| 0
| 0
| 0.014437
| 0.002358
| 0
| 0
| 0
| 0.002764
| 0
| 1
| 0.07185
| false
| 0.003937
| 0.021654
| 0
| 0.207677
| 0.003937
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a76ff4e7600c0692264f843891e33f896e8b3a4
| 12,670
|
py
|
Python
|
modeling/dataset.py
|
LaudateCorpus1/ml-cread
|
b5d5aa87faa0ddad0b41b6b0672395a8bf6147ae
|
[
"AML"
] | 18
|
2021-05-25T17:06:46.000Z
|
2021-11-08T09:47:48.000Z
|
modeling/dataset.py
|
LaudateCorpus1/ml-cread
|
b5d5aa87faa0ddad0b41b6b0672395a8bf6147ae
|
[
"AML"
] | null | null | null |
modeling/dataset.py
|
LaudateCorpus1/ml-cread
|
b5d5aa87faa0ddad0b41b6b0672395a8bf6147ae
|
[
"AML"
] | 6
|
2021-06-03T21:29:34.000Z
|
2022-03-26T11:38:37.000Z
|
#
# For licensing see accompanying LICENSE file.
# Copyright (C) 2021 Apple Inc. All Rights Reserved.
#
'''
Dataset file
'''
import sys
import time
import json
import copy
from itertools import chain
from tqdm import tqdm, trange
import torch
from torch.utils.data import DataLoader, RandomSampler
SPECIAL_TOKENS = {
"bos_token": "<BOS>",
"eos_token": "<EOS>",
"pad_token": "<PAD>",
"sep_token": "<SEP>",
"additional_special_tokens": ["<USR>", "<SYS>", "<M>", "</M>", "<R>", "</R>", "<CUR>"]
}
SPECIAL_TOKENS_VALUES = ["<BOS>", "<EOS>", "<PAD>", "<SEP>", "<USR>", "<SYS>", "<M>", "</M>", "<R>", "</R>", "<CUR>"]
class Dataset(torch.utils.data.Dataset):
def __init__(self, args, tokenizer, data_type, generation, data_size):
assert data_type in ['train', 'dev', 'test']
self.args = args
self.data_size = data_size
self.tokenizer = tokenizer
self.data_type = data_type
self.generation = generation
self._get_special_token_ids()
self._create_examples()
def _get_special_token_ids(self):
self.SPECIAL_TOKENS = SPECIAL_TOKENS
self.SPECIAL_TOKENS_VALUES = SPECIAL_TOKENS_VALUES
self.bos_id = self.tokenizer.convert_tokens_to_ids(self.SPECIAL_TOKENS["bos_token"])
self.eos_id = self.tokenizer.convert_tokens_to_ids(self.SPECIAL_TOKENS["eos_token"])
self.pad_id = self.tokenizer.convert_tokens_to_ids(self.SPECIAL_TOKENS["pad_token"])
self.sep_id = self.tokenizer.convert_tokens_to_ids(self.SPECIAL_TOKENS["sep_token"])
# mention detection vocab
self.mc_cl2idx = {'<N>': 0, '<M>': 1, '</M>': 2} # <N>: none, <M>: start of mention, "</M>": end of mention
self.mc_idx2cl = {v: k for k, v in self.mc_cl2idx.items()}
def prepare_reference_label(self, word_label_index, wordId2tokenId, input_ids):
'''
record the index of start/end of mention and refernece in the input otterance
this info will be used as attention signal in reference resolution step
'''
reconstruct_sentence = self.tokenizer.convert_ids_to_tokens(input_ids)
reconstruct_sentence = [token.replace('Ġ', '') for token in reconstruct_sentence]
token_label_index = []
for start_end_link in word_label_index:
for link_meta in start_end_link:
attention_word_idx, mention_word_idx = link_meta['attention_idx'], link_meta['mention_idx']
if link_meta['mention_type'] == 'start':
attention_token_idx = wordId2tokenId[attention_word_idx][0]
else: # end
attention_token_idx = wordId2tokenId[attention_word_idx][-1]
for mention_token_idx in wordId2tokenId[mention_word_idx]:
link = {}
link['mention_token_idx'] = mention_token_idx
link['attention_token_idx'] = attention_token_idx
assert reconstruct_sentence[mention_token_idx] in link_meta['mention_word']
assert reconstruct_sentence[attention_token_idx] in link_meta['attention_word']
token_label_index.append(link)
return token_label_index
def prepare_binary_label(self, input_ids, wordId2tokenId, binary_rewrite, curr_end_token_idx):
''' only the start of rewriting token receives binary signal '''
binary_label = [-100] * len(input_ids)
assert isinstance(binary_rewrite, bool)
if binary_rewrite == True:
binary_label[curr_end_token_idx] = 1 # rewrite
else:
binary_label[curr_end_token_idx] = 0 # not rewrite
return binary_label
def prepare_mention_label(self, input_ids, word_label_index, wordId2tokenId, curr_start_idx, curr_end_idx):
'''
get label index for mention detection
only the parts of current utterance receive signal, everwhere else will get -100
'''
mention_label = [-100] * len(input_ids)
curr_start_idx = wordId2tokenId[curr_start_idx][0]
curr_end_idx = wordId2tokenId[curr_end_idx-1][-1] + 1
# align class <N> (none) to everywehere in current utterance first
mention_label[curr_start_idx: curr_end_idx] = [ self.mc_cl2idx['<N>'] ] * (curr_end_idx-curr_start_idx)
for start_end_link in word_label_index: # iterate over links in one example
for link_meta in start_end_link: # iterate over start and end of a link
idx = link_meta['mention_idx']
if link_meta['mention_type'] == 'start': # align class <M> (start of mention)
for idx in wordId2tokenId[idx]:
mention_label[idx] = self.mc_cl2idx['<M>']
else: # # align class </M> (end of mention)
idx = wordId2tokenId[idx][-1]
mention_label[idx] = self.mc_cl2idx['</M>']
return mention_label, curr_start_idx, curr_end_idx
def _check_label_index(self, whole_input, links):
''' sanity check for index correctness '''
seq = whole_input.split()
for link in links:
for start_or_end in link:
for word_type in ['mention', 'attention']:
assert seq[start_or_end['{}_idx'.format(word_type)]] == start_or_end['{}_word'.format(word_type)]
def _create_examples(self):
if self.data_type == 'train':
data_file = self.args.train_file
elif self.data_type == 'dev':
data_file = self.args.dev_file
else:
data_file = self.args.test_file
with open(data_file) as f:
data = json.load(f)
self.examples = []
for example_num, example in enumerate(tqdm(data, disable=self.args.disable_display)):
if self.data_size != -1 and example_num == self.data_size:
break
# get data
context = example['dialogue context'] # context, list of str
curr_utt = example['current utterance'] # current utterance, str
rewt_utt = example['rewrite utterance'] # rewrite utterance, str
word_label_index = example['link index'] # index of mention/reference span
binary_rewrite = example['rewrite happen'] # binary label for rewrite or not, bool
# prepare input sequence to model
whole_input = copy.deepcopy(context)
whole_input.append(curr_utt)
curr_start_idx = sum([len(s.split()) for s in context]) # the (word) start idx of current utt
curr_end_idx = curr_start_idx + len(curr_utt.split())
whole_input = " ".join(whole_input)
self._check_label_index(whole_input, word_label_index)
input_ids, wordId2tokenId, tokenId2wordId = self.tokenize_with_map(whole_input)
if rewt_utt == "":
rewt_utt_ids = []
else:
rewt_utt_ids = self.tokenizer(rewt_utt)['input_ids'] # list
target_utt_ids = rewt_utt_ids
target_utt_len = len(target_utt_ids)
if not self.generation:
# input seq: CTX <CUR> current utterance <SEP> rewritten utterance <EOS>
input_ids = input_ids + [self.sep_id] + target_utt_ids + [self.eos_id]
# mention detection signal
mention_label, curr_start_token_idx, curr_end_token_idx = \
self.prepare_mention_label(input_ids, word_label_index, wordId2tokenId, curr_start_idx, curr_end_idx)
# reference resolution signal
reference_label_index = self.prepare_reference_label(word_label_index, wordId2tokenId, input_ids)
# binary classification of rewriting signal
binary_label = self.prepare_binary_label(input_ids, wordId2tokenId, binary_rewrite, curr_end_token_idx)
# rewriting singal
ignore_len = len(input_ids) - target_utt_len - 1 # eos_id
label_ids = [-100] * ignore_len + target_utt_ids + [self.eos_id]
assert len(input_ids) == len(label_ids)
else: # generation
# <sep> is given at first step during decoding
input_ids = input_ids
label_ids = None
mention_label, curr_start_token_idx, curr_end_token_idx = \
self.prepare_mention_label(input_ids, word_label_index, wordId2tokenId, curr_start_idx, curr_end_idx)
reference_label_index = self.prepare_reference_label(word_label_index, wordId2tokenId, input_ids)
binary_label = None
self.examples.append({
'input_ids': input_ids, # list of ids
'label_ids': label_ids, # list of ids
'mention_label_ids': mention_label,
'curr_start_token_idx': curr_start_token_idx,
'curr_end_token_idx': curr_end_token_idx,
'reference_label': reference_label_index,
'wordId2tokenId': wordId2tokenId,
'tokenId2wordId': tokenId2wordId,
'context': context,
'curr_utt': curr_utt,
'whole_input': whole_input,
'rewt_utt': rewt_utt,
'example_id': example['example index'],
'spk': example['speaker'],
'coref_label': word_label_index,
'binary_label_ids': binary_label,
'binary_rewrite': binary_rewrite
})
print('Data Statistics: {} -> {} examples'.format(self.data_type, len(self.examples)))
def _pad(self, sentences, pad_id):
'''
sentences: a list of list with ids
'''
max_len = max((map(len, sentences)))
attention_mask = []
sentences_pad = []
for sent in sentences:
pad_len = max_len - len(sent)
sentences_pad.append( sent + [pad_id]*pad_len )
attention_mask.append( [1]*len(sent) + [0]*pad_len)
return sentences_pad, attention_mask
def __len__(self):
return len(self.examples)
def __getitem__(self, index):
return self.examples[index]
def collate_fn(self, batch):
input_ids = [example['input_ids'] for example in batch]
input_ids, attention_mask = self._pad(input_ids, self.pad_id)
input_ids, attention_mask = torch.tensor(input_ids).long().to(self.args.device), torch.tensor(attention_mask).long().to(self.args.device)
if not self.generation:
label_ids = [example['label_ids'] for example in batch]
label_ids, _ = self._pad(label_ids, -100)
label_ids = torch.tensor(label_ids).long().to(self.args.device)
mention_label_ids = [example['mention_label_ids'] for example in batch]
mention_label_ids, _ = self._pad(mention_label_ids, -100)
mention_label_ids = torch.tensor(mention_label_ids).long().to(self.args.device)
binary_label_ids = [example['binary_label_ids'] for example in batch]
binary_label_ids, _ = self._pad(binary_label_ids, -100)
binary_label_ids = torch.tensor(binary_label_ids).long().to(self.args.device)
else:
label_ids = None
mention_label_ids = [example['mention_label_ids'] for example in batch]
mention_label_ids, _ = self._pad(mention_label_ids, -100)
mention_label_ids = torch.tensor(mention_label_ids).long().to(self.args.device)
binary_label_ids = None
token_type_ids = None # TODO: not sure if this makes any effect to gpt2
# record info
context = [example['context'] for example in batch]
curr_utt = [example['curr_utt'] for example in batch]
rewt_utt = [example['rewt_utt'] for example in batch]
example_ids = [example['example_id'] for example in batch] # record the example idx in batch
curr_start_token_idx = [example['curr_start_token_idx'] for example in batch]
curr_end_token_idx = [example['curr_end_token_idx'] for example in batch]
reference_label = [example['reference_label'] for example in batch]
wordId2tokenId = [example['wordId2tokenId'] for example in batch]
tokenId2wordId = [example['tokenId2wordId'] for example in batch]
whole_input = [example['whole_input'] for example in batch]
spk = [example['spk'] for example in batch]
coref_label = [example['coref_label'] for example in batch]
binary_rewrite = [example['binary_rewrite'] for example in batch]
return {'input_ids': input_ids, 'attention_mask': attention_mask, \
'token_type_ids': token_type_ids, 'label_ids': label_ids, \
'context': context, 'curr_utt': curr_utt, 'rewt_utt': rewt_utt, \
'example_ids': example_ids, 'spk': spk, 'mention_label_ids': mention_label_ids, \
'curr_start_token_idx': curr_start_token_idx, 'curr_end_token_idx': curr_end_token_idx, \
'reference_label': reference_label, 'wordId2tokenId': wordId2tokenId, \
'tokenId2wordId': tokenId2wordId, 'whole_input': whole_input, \
'coref_label': coref_label, 'binary_label_ids': binary_label_ids, \
'binary_rewrite': binary_rewrite}
def tokenize_with_map(self, sentence):
'''
Build the mapping of indexes before/after tokenizer to handel BPE
Input:
sentence: a natural sentence, str
Returns:
wordId2tokenId, a 1-to-many map
tokenId2wordId, a many-to-1 map
'''
assert isinstance(sentence, str)
token_ids = self.tokenizer(sentence)['input_ids']
reconstruct_sentence = self.tokenizer.convert_ids_to_tokens(token_ids)
reconstruct_sentence = [token.replace('Ġ', '') for token in reconstruct_sentence]
sentence = sentence.split()
wordId2tokenId = {}
tokenId = 0
for wordId, word in enumerate(sentence):
wordId2tokenId[wordId] = []
token = ""
while word != token:
wordId2tokenId[wordId].append(tokenId)
token += reconstruct_sentence[tokenId]
tokenId += 1
tokenId2wordId = {}
for wordId, tokenIds in wordId2tokenId.items():
for tokenId in tokenIds:
assert tokenId not in tokenId2wordId
tokenId2wordId[tokenId] = wordId
assert len(wordId2tokenId) == len(sentence)
assert len(tokenId2wordId) == len(reconstruct_sentence)
return token_ids, wordId2tokenId, tokenId2wordId
if __name__ == '__main__':
pass
| 38.510638
| 139
| 0.728808
| 1,791
| 12,670
| 4.839754
| 0.125628
| 0.035994
| 0.024919
| 0.035302
| 0.332833
| 0.278034
| 0.23731
| 0.203392
| 0.176857
| 0.165321
| 0
| 0.009073
| 0.156196
| 12,670
| 328
| 140
| 38.628049
| 0.801702
| 0.126598
| 0
| 0.134199
| 0
| 0
| 0.110381
| 0.002281
| 0
| 0
| 0
| 0.003049
| 0.04329
| 1
| 0.051948
| false
| 0.004329
| 0.034632
| 0.008658
| 0.125541
| 0.004329
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a770f589bb75a8f2ce9da24f74f5b68103d69bf
| 2,431
|
py
|
Python
|
hy/lex/lexer.py
|
schuster-rainer/hy
|
d969ed63d67c4a9070fd41a8fbff35da845e0619
|
[
"MIT"
] | 12
|
2015-01-01T21:21:31.000Z
|
2021-06-14T19:51:59.000Z
|
hy/lex/lexer.py
|
schuster-rainer/hy
|
d969ed63d67c4a9070fd41a8fbff35da845e0619
|
[
"MIT"
] | null | null | null |
hy/lex/lexer.py
|
schuster-rainer/hy
|
d969ed63d67c4a9070fd41a8fbff35da845e0619
|
[
"MIT"
] | 2
|
2016-01-17T21:59:29.000Z
|
2016-09-06T20:56:41.000Z
|
# Copyright (c) 2013 Nicolas Dandrimont <nicolas.dandrimont@crans.org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from rply import LexerGenerator
lg = LexerGenerator()
# A regexp for something that should end a quoting/unquoting operator
# i.e. a space or a closing brace/paren/curly
end_quote = r'(?![\s\)\]\}])'
lg.add('LPAREN', r'\(')
lg.add('RPAREN', r'\)')
lg.add('LBRACKET', r'\[')
lg.add('RBRACKET', r'\]')
lg.add('LCURLY', r'\{')
lg.add('RCURLY', r'\}')
lg.add('HLCURLY', r'#\{')
lg.add('QUOTE', r'\'%s' % end_quote)
lg.add('QUASIQUOTE', r'`%s' % end_quote)
lg.add('UNQUOTESPLICE', r'~@%s' % end_quote)
lg.add('UNQUOTE', r'~%s' % end_quote)
lg.add('HASHBANG', r'#!.*[^\r\n]')
lg.add('HASHREADER', r'#[^{]')
# A regexp which matches incomplete strings, used to support
# multi-line strings in the interpreter
partial_string = r'''(?x)
(?:u|r|ur|ru)? # prefix
" # start string
(?:
| [^"\\] # non-quote or backslash
| \\(.|\n) # or escaped single character or newline
| \\x[0-9a-fA-F]{2} # or escaped raw character
| \\u[0-9a-fA-F]{4} # or unicode escape
| \\U[0-9a-fA-F]{8} # or long unicode escape
)* # one or more times
'''
lg.add('STRING', r'%s"' % partial_string)
lg.add('PARTIAL_STRING', partial_string)
lg.add('IDENTIFIER', r'[^()\[\]{}\'"\s;]+')
lg.ignore(r';.*(?=\r|\n|$)')
lg.ignore(r'\s+')
lexer = lg.build()
| 34.239437
| 76
| 0.667626
| 369
| 2,431
| 4.373984
| 0.471545
| 0.049566
| 0.026022
| 0.024783
| 0.045849
| 0.037175
| 0
| 0
| 0
| 0
| 0
| 0.006477
| 0.174414
| 2,431
| 70
| 77
| 34.728571
| 0.797708
| 0.534348
| 0
| 0
| 0
| 0
| 0.527978
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.030303
| 0
| 0.030303
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a77208eebfdf92ef53ffabde97b664e8625e12d
| 1,319
|
py
|
Python
|
week6/shuffle.py
|
solideveloper/afs-210
|
2ba0bb7c7617cd3169907458f657696a6987689d
|
[
"Apache-2.0"
] | 1
|
2022-01-06T01:22:17.000Z
|
2022-01-06T01:22:17.000Z
|
week6/shuffle.py
|
solideveloper/afs-210
|
2ba0bb7c7617cd3169907458f657696a6987689d
|
[
"Apache-2.0"
] | null | null | null |
week6/shuffle.py
|
solideveloper/afs-210
|
2ba0bb7c7617cd3169907458f657696a6987689d
|
[
"Apache-2.0"
] | null | null | null |
# Python provides a built-in method called random.shuffle that will shuffle the list data type. Do not use this.
# For this assignment, you are to create your own shuffle algorithm that will take as input a sorted list and randomly shuffle the items before returning the list. Try to make your algorithm as efficient as possible.
# Add a comment to your code stating what the time complexity of your algorithm is and why.
# Display list before and after shuffle. Call your shuffle function multiple times, each time on the original sorted list to show the random order of the list items.
data = [7, 20, 26, 31, 40, 51, 55, 63, 74, 81]
ndata = len(data)
import random
def shuffleAlgorithm(data, ndata):
for i in range(ndata-1, 0, -1):
r = random.randint(0, i)
data[i], data[r] = data[r], data[i]
return data
print(data)
print(shuffleAlgorithm(data,ndata))
print(shuffleAlgorithm(data,ndata))
print(shuffleAlgorithm(data,ndata))
print(shuffleAlgorithm(data,ndata))
# fisher yates algorithm - O(n) time complexity because i'm not creating a copy of the list to shuffle through it.
# instead i'm modifying the list in place or at a 'constant space' making it O(n)
# swapping the last item with a random -not previously selected- item and repeating until all items in list have been selected
| 54.958333
| 217
| 0.749052
| 222
| 1,319
| 4.45045
| 0.513514
| 0.035425
| 0.126518
| 0.121457
| 0.121457
| 0.121457
| 0.121457
| 0.121457
| 0.121457
| 0.121457
| 0
| 0.021257
| 0.179682
| 1,319
| 24
| 218
| 54.958333
| 0.891867
| 0.683093
| 0
| 0.307692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.076923
| 0
| 0.230769
| 0.384615
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a79466df9295fa5ad7c3a62c359310229ec684a
| 5,647
|
py
|
Python
|
tadataka/dataset/new_tsukuba.py
|
IshitaTakeshi/Tadataka
|
852c7afb904503005e51884408e1492ef0be836f
|
[
"Apache-2.0"
] | 54
|
2019-11-15T16:30:34.000Z
|
2022-01-13T15:18:54.000Z
|
tadataka/dataset/new_tsukuba.py
|
IshitaTakeshi/Tadataka
|
852c7afb904503005e51884408e1492ef0be836f
|
[
"Apache-2.0"
] | 11
|
2019-02-28T08:28:24.000Z
|
2020-04-07T04:47:12.000Z
|
tadataka/dataset/new_tsukuba.py
|
IshitaTakeshi/Tadataka
|
852c7afb904503005e51884408e1492ef0be836f
|
[
"Apache-2.0"
] | 1
|
2020-02-26T13:59:40.000Z
|
2020-02-26T13:59:40.000Z
|
import csv
import os
from pathlib import Path
from xml.etree import ElementTree as ET
from tqdm import tqdm
from scipy.spatial.transform import Rotation
from skimage.io import imread
import numpy as np
from tadataka.camera import CameraModel, CameraParameters, FOV
from tadataka.dataset.frame import Frame
from tadataka.dataset.base import BaseDataset
from tadataka.pose import Pose
def load_depth(path):
tree = ET.parse(path)
root = tree.getroot()
rows_node, cols_node, dt_node, data_node = root[0]
height, width = int(rows_node.text), int(cols_node.text)
depth_text = data_node.text
depth_text = depth_text.replace('\n', '').strip()
depth_map = np.fromstring(depth_text, sep=' ')
return depth_map.reshape(height, width)
def generate_cache(src_dir, cache_dir, src_extension, loader):
def generate_(subdir):
os.makedirs(str(Path(cache_dir, subdir)))
print(f"Generating cache from {subdir}")
paths = Path(src_dir, subdir).glob("*" + src_extension)
for path in tqdm(list(paths)):
filename = path.name.replace(src_extension, ".npy")
cache_path = Path(cache_dir, subdir, filename)
array = loader(path)
np.save(str(cache_path), array)
generate_("left")
generate_("right")
def generate_image_cache(image_dir, cache_dir):
print("Generating image cache")
generate_cache(image_dir, cache_dir, ".png", imread)
def generate_depth_cache(depth_dir, cache_dir):
print("Generating depth cache")
generate_cache(depth_dir, cache_dir, ".xml", load_depth)
def align_coordinate_system(positions, euler_angles):
# Camera coordinate system and world coordinate system are not aligned
#
# Usually camera coordinate system is represented in the format that
# x: right y: down z: forward
# however, in 'camera_track.txt', they are written in
# x: right y: up z: backward
#
# This means the camera coordinate system is
# rotated 180 degrees around the x-axis from the world coordinate system
# rotate 180 degrees around the x-axis
R = Rotation.from_rotvec([np.pi, 0, 0]).as_matrix()
positions = np.dot(R, positions.T).T
# Reverse rotations around y and z because axes are flipped
# (rot_x, rot_y, rot_z) <- (rot_x, -rot_y, -rot_z)
euler_angles[:, 1:3] = -euler_angles[:, 1:3]
return positions, euler_angles
def load_poses(pose_path):
poses = np.loadtxt(pose_path, delimiter=',')
positions, euler_angles = poses[:, 0:3], poses[:, 3:6]
positions, euler_angles = align_coordinate_system(positions, euler_angles)
rotations = Rotation.from_euler('xyz', euler_angles, degrees=True)
return rotations, positions
def discard_alpha(image):
return image[:, :, 0:3]
def calc_baseline_offset(rotation, baseline_length):
local_offset = np.array([baseline_length, 0, 0])
R = rotation.as_matrix()
return np.dot(R, local_offset)
# TODO download and set dataset_root automatically
class NewTsukubaDataset(BaseDataset):
def __init__(self, dataset_root, condition="daylight"):
self.camera_model = CameraModel(
CameraParameters(focal_length=[615, 615], offset=[320, 240]),
distortion_model=None
)
groundtruth_dir = Path(dataset_root, "groundtruth")
illumination_dir = Path(dataset_root, "illumination")
pose_path = Path(groundtruth_dir, "camera_track.txt")
self.baseline_length = 10.0
self.rotations, self.positions = load_poses(pose_path)
depth_dir = Path(groundtruth_dir, "depth_maps")
depth_cache_dir = Path(groundtruth_dir, "depth_cache")
if not depth_cache_dir.exists():
generate_depth_cache(depth_dir, depth_cache_dir)
self.depth_L_paths = sorted(Path(depth_cache_dir, "left").glob("*.npy"))
self.depth_R_paths = sorted(Path(depth_cache_dir, "right").glob("*.npy"))
image_dir = Path(illumination_dir, condition)
image_cache_dir = Path(illumination_dir, condition + "_cache")
if not image_cache_dir.exists():
generate_image_cache(image_dir, image_cache_dir)
self.image_L_paths = sorted(Path(image_cache_dir, "left").glob("*.npy"))
self.image_R_paths = sorted(Path(image_cache_dir, "right").glob("*.npy"))
assert((len(self.depth_L_paths) == len(self.depth_R_paths) ==
len(self.image_L_paths) == len(self.image_R_paths) ==
len(self.rotations) == len(self.positions)))
for i in range(len(self.positions)):
DL = self.depth_L_paths[i].name
DR = self.depth_R_paths[i].name
IL = self.image_L_paths[i].name
IR = self.image_R_paths[i].name
assert(DL[-8:] == DR[-8:] == IL[-8:] == IR[-8:])
def __len__(self):
return len(self.positions)
def load(self, index):
image_l = np.load(self.image_L_paths[index])
image_r = np.load(self.image_R_paths[index])
image_l = discard_alpha(image_l)
image_r = discard_alpha(image_r)
depth_l = np.load(self.depth_L_paths[index])
depth_r = np.load(self.depth_R_paths[index])
position_center = self.positions[index]
rotation = self.rotations[index]
offset = calc_baseline_offset(rotation, self.baseline_length)
pose_wl = Pose(rotation, position_center - offset / 2.0)
pose_wr = Pose(rotation, position_center + offset / 2.0)
return (
Frame(self.camera_model, pose_wl, image_l, depth_l),
Frame(self.camera_model, pose_wr, image_r, depth_r)
)
| 34.644172
| 81
| 0.673278
| 777
| 5,647
| 4.646075
| 0.239382
| 0.037673
| 0.015235
| 0.016621
| 0.20554
| 0.100831
| 0.018837
| 0
| 0
| 0
| 0
| 0.009914
| 0.214096
| 5,647
| 162
| 82
| 34.858025
| 0.803515
| 0.098282
| 0
| 0
| 0
| 0
| 0.04234
| 0
| 0
| 0
| 0
| 0.006173
| 0.018868
| 1
| 0.113208
| false
| 0
| 0.113208
| 0.018868
| 0.301887
| 0.028302
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a798e4f49354ed1b300d7ffad5bbb4e1e929e1a
| 2,015
|
py
|
Python
|
krogon/maybe.py
|
enamrik/krogon
|
a41a10ed346b7198509929ed9ba1e9fcf778dc78
|
[
"MIT"
] | 1
|
2020-03-02T14:17:02.000Z
|
2020-03-02T14:17:02.000Z
|
krogon/maybe.py
|
enamrik/krogon
|
a41a10ed346b7198509929ed9ba1e9fcf778dc78
|
[
"MIT"
] | null | null | null |
krogon/maybe.py
|
enamrik/krogon
|
a41a10ed346b7198509929ed9ba1e9fcf778dc78
|
[
"MIT"
] | null | null | null |
from typing import Callable, TypeVar, Union, Tuple
from krogon.infix import Infix
A = TypeVar('A')
B = TypeVar('B')
E = TypeVar('E')
Maybe = Union[Tuple['just', A], Tuple['nothing']]
def just(value=None):
return "just", value
def nothing():
return "nothing", None
def from_value(value) -> Maybe[B]:
return _cast_to_maybe(value)
def from_value_or_default(value, default) -> Maybe[B]:
return from_maybe(
_cast_to_maybe(value),
dict(if_just=lambda x: just(x),
if_nothing=lambda: _cast_to_maybe(default)))
@Infix
def then(maybe: Maybe[A], func: Callable[[A], Maybe[B]]) -> Maybe[B]:
if maybe[0] == "just":
return _cast_to_maybe(func(maybe[1]))
elif maybe[0] == "nothing":
return maybe
@Infix
def catch_nothing(maybe: Maybe[A], func: Callable[[A], Maybe[B]]) -> Maybe[B]:
if maybe[0] == "nothing":
return _cast_to_maybe(func())
elif maybe[0] == "just":
return maybe
@Infix
def map(maybe: Maybe[A], mapper: Callable[[A], B]) -> Maybe[B]:
if maybe[0] == "just":
return just(mapper(maybe[1]))
elif maybe[0] == "nothing":
return maybe
@Infix
def value_or_default(maybe: Maybe[A], default_value: B):
return maybe | from_maybe | (dict(if_just=lambda x: x, if_nothing=lambda: default_value))
@Infix
def from_maybe(maybe: Maybe[A], dict_args: dict) -> B:
if_just: Callable = dict_args['if_just']
if_nothing: Callable = dict_args['if_nothing']
if maybe[0] == "just" and if_just is not None:
return if_just(maybe[1])
elif maybe[0] == "nothing" and if_nothing is not None:
return if_nothing()
else:
raise Exception('Invalid Maybe: {}, {}'.format(maybe, dict_args))
def _cast_to_maybe(result):
if result is None:
return nothing()
if isinstance(result, tuple) and len(result) == 2:
maybe_type, value = result
if maybe_type == "just" or maybe_type == "nothing":
return result
return just(result)
| 24.573171
| 93
| 0.629777
| 291
| 2,015
| 4.19244
| 0.168385
| 0.039344
| 0.054098
| 0.041803
| 0.27377
| 0.188525
| 0.169672
| 0.169672
| 0.140984
| 0.140984
| 0
| 0.007653
| 0.221836
| 2,015
| 81
| 94
| 24.876543
| 0.770408
| 0
| 0
| 0.214286
| 0
| 0
| 0.058561
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.178571
| false
| 0
| 0.035714
| 0.089286
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a7c6e1277408f69b722e24dda7d218cc70dda0f
| 1,192
|
py
|
Python
|
migrations/versions/576712576c48_added_model_for_photo_comments.py
|
Torniojaws/vortech-backend
|
f775a97eeae089fa720088d86fe92d40bc5d65bc
|
[
"MIT"
] | null | null | null |
migrations/versions/576712576c48_added_model_for_photo_comments.py
|
Torniojaws/vortech-backend
|
f775a97eeae089fa720088d86fe92d40bc5d65bc
|
[
"MIT"
] | 93
|
2017-09-01T22:24:10.000Z
|
2021-12-22T14:07:06.000Z
|
migrations/versions/576712576c48_added_model_for_photo_comments.py
|
Torniojaws/vortech-backend
|
f775a97eeae089fa720088d86fe92d40bc5d65bc
|
[
"MIT"
] | null | null | null |
"""Added model for photo comments
Revision ID: 576712576c48
Revises: 75bb906df167
Create Date: 2018-03-30 02:06:22.877079
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '576712576c48'
down_revision = '75bb906df167'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('CommentsPhotos',
sa.Column('CommentID', sa.Integer(), nullable=False),
sa.Column('PhotoID', sa.Integer(), nullable=False),
sa.Column('Comment', sa.Text(), nullable=False),
sa.Column('UserID', sa.Integer(), nullable=False),
sa.Column('Created', sa.DateTime(), nullable=True),
sa.Column('Updated', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['PhotoID'], ['Photos.PhotoID'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['UserID'], ['Users.UserID'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('CommentID')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('CommentsPhotos')
# ### end Alembic commands ###
| 30.564103
| 81
| 0.672819
| 134
| 1,192
| 5.947761
| 0.492537
| 0.060226
| 0.075282
| 0.105395
| 0.283563
| 0.223338
| 0.110414
| 0.110414
| 0
| 0
| 0
| 0.058645
| 0.170302
| 1,192
| 38
| 82
| 31.368421
| 0.747219
| 0.261745
| 0
| 0
| 0
| 0
| 0.191697
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.1
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a7f99985562db134bffd977ed750d635522a7a2
| 12,364
|
py
|
Python
|
usaspending_api/etl/helpers.py
|
truthiswill/usaspending-api
|
bd7d915442e2ec94cc830c480ceeffd4479be6c0
|
[
"CC0-1.0"
] | null | null | null |
usaspending_api/etl/helpers.py
|
truthiswill/usaspending-api
|
bd7d915442e2ec94cc830c480ceeffd4479be6c0
|
[
"CC0-1.0"
] | 3
|
2020-02-12T01:16:46.000Z
|
2021-06-10T20:36:57.000Z
|
usaspending_api/etl/helpers.py
|
truthiswill/usaspending-api
|
bd7d915442e2ec94cc830c480ceeffd4479be6c0
|
[
"CC0-1.0"
] | null | null | null |
from datetime import datetime
import warnings
import logging
from django.db.models import Q, Case, Value, When
from django.core.cache import caches, CacheKeyWarning
import django.apps
from usaspending_api.references.models import Agency, Location, RefCountryCode
from usaspending_api.references.helpers import canonicalize_location_dict
from usaspending_api.submissions.models import SubmissionAttributes
from usaspending_api.data.daims_maps import daims_maps
warnings.simplefilter("ignore", CacheKeyWarning)
def clear_caches():
for cache_name in ('default', 'locations', 'awards'):
caches[cache_name].clear()
def cleanse_values(row):
"""
Remove textual quirks from CSV values.
"""
row = {k: v.strip() for (k, v) in row.items()}
row = {k: (None if v.lower() == 'null' else v) for (k, v) in row.items()}
return row
def convert_date(date):
if date == "":
return None
return datetime.strptime(date, '%m/%d/%Y').strftime('%Y-%m-%d')
def get_subtier_agency_dict():
"""Returns a dictionary with key = subtier agency code and value = agency id."""
# there's no unique constraint on subtier_code, so the order by below ensures that in the case of duplicate subtier
# codes, the dictionary we return will reflect the most recently updated one
agencies = Agency.objects.all().values(
'id',
'subtier_agency__subtier_code').order_by('subtier_agency__update_date')
subtier_agency_dict = {
a['subtier_agency__subtier_code']: a['id'] for a in agencies
}
return subtier_agency_dict
def fetch_country_code(vendor_country_code):
code_str = up2colon(vendor_country_code)
if code_str == "":
return None
country_code = RefCountryCode.objects.filter(Q(country_code=code_str) | Q(country_name__iexact=code_str)).first()
if not country_code:
# We don't have an exact match on the name or the code, so we need to
# chain filter on the name
query_set = RefCountryCode.objects
for word in code_str.split():
query_set = query_set.filter(country_name__icontains=word)
country_code = query_set.first()
return country_code
location_cache = caches['locations']
def get_or_create_location(row, mapper):
location_dict = mapper(row)
# Country-specific adjustments
if location_dict["location_country_code"] == "USA":
# Apparently zip codes are optional...
if location_dict["location_zip"]:
location_dict.update(
zip5=location_dict["location_zip"][:5],
zip_last4=location_dict["location_zip"][5:])
location_dict.pop("location_zip")
else:
location_dict.update(
foreign_postal_code=location_dict.pop("location_zip", None),
foreign_province=location_dict.pop("state_code", None))
if "city_name" in location_dict:
location_dict['foreign_city_name'] = location_dict.pop("city_name")
location_dict = canonicalize_location_dict(location_dict)
location_tup = tuple(location_dict.items())
location = location_cache.get(location_tup)
if location:
return location
location = Location.objects.filter(**location_dict).first()
if not location:
location = Location.objects.create(**location_dict)
location_cache.set(location_tup, location)
return location
def up2colon(input_string):
'Takes the part of a string before `:`, if any.'
if input_string:
return input_string.split(':')[0].strip()
return ''
def parse_numeric_value(string):
try:
return float(string)
except Exception:
return None
def get_fiscal_quarter(fiscal_reporting_period):
"""
Return the fiscal quarter.
Note: the reporting period being passed should already be in "federal fiscal format",
where period 1 = Oct. and period 12 = Sept.
"""
if fiscal_reporting_period in [1, 2, 3]:
return 1
elif fiscal_reporting_period in [4, 5, 6]:
return 2
elif fiscal_reporting_period in [7, 8, 9]:
return 3
elif fiscal_reporting_period in [10, 11, 12]:
return 4
def get_previous_submission(cgac_code, fiscal_year, fiscal_period):
"""
For the specified CGAC (e.g., department/top-tier agency) and specified fiscal year and quarter, return the
previous submission within the same fiscal year.
"""
previous_submission = SubmissionAttributes.objects \
.filter(
cgac_code=cgac_code,
reporting_fiscal_year=fiscal_year,
reporting_fiscal_period__lt=fiscal_period,
quarter_format_flag=True) \
.order_by('-reporting_fiscal_period') \
.first()
return previous_submission
def update_model_description_fields():
"""
This method searches through every model Django has registered, checks if it
belongs to a list of apps we should update, and updates all fields with
'_description' at the end with their relevant information.
Dictionaries for DAIMS definitions should be stored in:
usaspending_api/data/daims_maps.py
Each map should be <field_name>_map for discoverability.
If there are conflicting maps (i.e., two models use type_description, but
different enumerations) prepend the map name with the model name and a dot.
For examples of these situations, see the documentation in daims_maps.py
"""
logger = logging.getLogger('console')
# This is a list of apps whose models will be checked for description fields
updatable_apps = [
"accounts",
"awards",
"common",
"financial_activities",
"references",
"submissions"
]
# This iterates over every model that Django has registered
for model in django.apps.apps.get_models():
# This checks the app_label of the model, and thus we can skip it if it is not in one of our updatable_apps.
# Thus, we'll skip any django admin apps, like auth, corsheaders, etc.
if model._meta.app_label not in updatable_apps:
continue
if model.__name__[:10] == "Historical":
continue
model_fields = [f.name for f in model._meta.get_fields()]
# This supports multi-case DAIMS
# We must filter on the model level rather than add them to the when clauses, because if there is a FK in the
# when clause Django is not guaranteed to join on that table properly.
#
# This is an array of tuples of the following format
# (Q object of filter, field_names -> case objects map for this filter)
#
# It is initialized with a blank filter and empty list, which is where default updates are stored
model_filtered_update_case_map = [(Q(), {})]
desc_fields = [field for field in model_fields if field.split('_')[-1] ==
"description"[:len(field.split('_')[-1])]]
non_desc_fields = [field for field in model_fields if field not in desc_fields]
desc_fields_mapping = {}
for desc_field in desc_fields:
actual_field_short = "_".join(desc_field.split('_')[:-1])
actual_field = None
for field in non_desc_fields:
if actual_field_short == field:
actual_field = field
elif actual_field_short == field[:len(actual_field_short)]:
actual_field = field
desc_fields_mapping[desc_field] = actual_field
# Loop through each of the models fields to construct a case for each applicable field
for field in model_fields:
# We're looking for field names ending in _description
split_name = field.split("_")
# If the last element in our split name isn't description, skip it
if len(split_name) == 1 or split_name[-1] != "description"[:len(split_name[-1])]:
continue
source_field = "_".join(split_name[:-1])
destination_field = field
# This is the map name, prefixed by model name for when there are non-unique description fields
source_field = desc_fields_mapping[field] if field in desc_fields_mapping else source_field
model_map_name = "{}.{}_map".format(model.__name__, source_field)
map_name = "{}_map".format(source_field)
# This stores a direct reference to the enumeration mapping
code_map = None
# Validate we have the source field
if source_field not in model_fields:
logger.debug("Tried to update '{}' on model '{}', but source field '{}' does not exist.".
format(destination_field, model.__name__, source_field))
continue
# Validate we have a map
# Prefer model_map_name over map_name
if model_map_name in daims_maps.keys():
code_map = daims_maps[model_map_name]
elif map_name in daims_maps.keys():
code_map = daims_maps[map_name]
else:
logger.warn("Tried to update '{}' on model '{}', but neither map '{}' nor '{}' exists.".
format(destination_field, model.__name__, model_map_name, map_name))
continue
# Cases start from 1
case_number = 1
case_name = "case_1"
case_map = "case_1_map"
while case_name in code_map.keys():
case_object = create_case(code_map[case_map], source_field)
# Construct a Q filter for this case
case_filter = Q(**code_map[case_name])
# See if we already have a tuple for this filter
case_tuple = [x for x in model_filtered_update_case_map if x[0] == case_filter]
if len(case_tuple) == 0:
# We don't, so create the tuple
temp_case_dict = {}
temp_case_dict[field] = case_object
model_filtered_update_case_map.append((case_filter, temp_case_dict))
else:
# We do, so just add our case object to that dictionary
case_tuple[0][1][field] = case_object
# Check for the next case
case_number += 1
case_name = "case_{}".format(case_number)
case_map = "case_{}_map".format(case_number)
# If our case number is still 1, then we didn't have any cases. Therefore, we perform the default
if case_number == 1:
case_object = create_case(code_map, source_field)
# Grab the first tuple, which has no filters
case_tuple = model_filtered_update_case_map[0]
# Add it to our dictionary
case_tuple[1][field] = case_object
for filter_tuple in model_filtered_update_case_map:
# For each filter tuple, check if the dictionary has any entries
if len(filter_tuple[1].keys()) > 0:
print("Updating model {}\n FILTERS:\n {}\n FIELDS:\n {}".
format(model.__name__, str(filter_tuple[0]), "\n ".join(filter_tuple[1].keys())))
try:
model.objects.filter(filter_tuple[0]).update(**filter_tuple[1])
except django.db.utils.ProgrammingError as e:
logger.warn(str(e))
logger.warn("(OK if invoked from a migration, when the table may not yet have been created)")
# Utility method for update_model_description_fields, creates the Case object
def create_case(code_map, source_field):
when_list = []
default = None
for code in code_map.keys():
when_args = {}
when_args[source_field] = code
when_args["then"] = Value(code_map[code])
# If our code is blank, change the comparison to ""
if code == "_BLANK":
when_args[source_field] = Value("")
# We handle the default case later
if code == "_DEFAULT":
default = Value(code_map[code])
continue
# Append a new when to our when-list
when_list.append(When(**when_args))
return Case(*when_list, default=default)
| 38.397516
| 119
| 0.63604
| 1,612
| 12,364
| 4.656328
| 0.230769
| 0.031974
| 0.021316
| 0.015321
| 0.111111
| 0.061151
| 0.021583
| 0.021583
| 0.021583
| 0.021583
| 0
| 0.006403
| 0.280006
| 12,364
| 321
| 120
| 38.517134
| 0.836778
| 0.260514
| 0
| 0.105263
| 0
| 0
| 0.088818
| 0.014158
| 0
| 0
| 0
| 0
| 0
| 1
| 0.063158
| false
| 0
| 0.052632
| 0
| 0.210526
| 0.005263
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a80119456047b966a3757d7fd0f105dc0f5c4f6
| 9,193
|
py
|
Python
|
code/mapplot.py
|
young-astronomer/vlpy
|
7fd434d307a7cc3593f84a7c6c2f4a4a86865afe
|
[
"Apache-2.0"
] | null | null | null |
code/mapplot.py
|
young-astronomer/vlpy
|
7fd434d307a7cc3593f84a7c6c2f4a4a86865afe
|
[
"Apache-2.0"
] | null | null | null |
code/mapplot.py
|
young-astronomer/vlpy
|
7fd434d307a7cc3593f84a7c6c2f4a4a86865afe
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 21 11:11:56 2020
This program is use to plot polarization map from vlbi fits image.
You should specify the input fits images by -i or --infile,
output file by -o or --output,
contour levs by -l or --levs
contour base by -c or --cmul
polarization parameters by -p or --pol: "icut pcut inc scale"
plot window by -w or --win
restore beam position by -b or --bpos
figsize by -f or --figsize
Installation:
1. copy file
chmod a+x mapplot.py
cp mapplot.py ~/myapp
2. set envioment parameters
Add the following line to ~/.bashrc
export PATH=$PATH:/home/usename/myapp
source ~/.bashrc
Running like this:
mapplot.py -w <win> -f <figsize> -n <normalize> <infile> <cmul>
mapplot.py i <input file list> -o <out.pdf> -c <cmul> -w <win> -p <pol>
Examples:
1. mapplot.py -i cta102.fits -o cta102-color.pdf -c 1.8e-3 -w '18 -8 -20 6' -f '7 6' -n 'power 0.5'
2. mapplot.py -w '18 -8 -20 6' -f '4.0 6' -n 'power 0.5' cta102.fits 1.8e-3
https://matplotlib.org/3.1.1/tutorials/colors/colormaps.html
https://matplotlib.org/3.2.1/api/_as_gen/matplotlib.colors.Normalize.html#matplotlib.colors.Normalize
@author: Li, Xiaofeng
Shanghai Astronomical Observatory, Chinese Academy of Sciences
E-mail: lixf@shao.ac.cn; 1650152531@qq.com
"""
import sys
import getopt
from astropy.io import fits
from astropy.table import Table
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
import matplotlib.colors as mcolors
def add_beam(ax, win, h, bpos=None, pad=2.0):
if bpos==None :
x = win[0] - pad * h['bmaj']*3.6E6
y = win[2] + pad * h['bmaj']*3.6E6
bpos = (x, y)
bmaj = h['bmaj'] * 3.6E6
bmin = h['bmin'] * 3.6E6
bpa = 90 - h['bpa']
e = Ellipse(bpos, bmaj, bmin, angle=bpa, ec='k', facecolor='gray')
ax.add_artist(e)
def annotate(ax, notefile=''):
if notefile != '':
tab = Table.read(notefile, format='csv')
for t in tab:
ax.text(t['x'], t['y'], t['text'])
# ax.annotate('%s' % h['object'], xy=(0.125,0.91), xycoords='figure fraction')
# ax.annotate('%.1f GHz' % (h['crval3']/1.0E9), xy=(0.83, 0.91), xycoords='figure fraction')
def cut_cmap(cmap, N_cut=0):
# cmap = mcolors.Colormap(cmap)
cmap = plt.get_cmap(cmap)
x = np.arange(N_cut, 256) / 256.0
color_index = cmap(x)
cmap = mcolors.ListedColormap(color_index)
return cmap
def get_normalize(args, vmin=0.0, vmax=1.0):
if args == '':
norm = mcolors.Normalize(vmin, vmax)
args = args.split(' ')
name = args[0]
if name == 'linear':
if len(args)==3:
vmin, vmax = np.array(args[1:], dtype='f4')
norm = mcolors.Normalize(vmin, vmax, True)
elif name == 'power':
if len(args)==1:
gamma = 0.5
if len(args)==2:
gamma = float(args[1])
elif len(args)==4:
gamma, vmin, vmax = np.array(args[1:], dtype='f4')
if gamma < 1.0 and vmin < 0.0:
vmin = 0.0
norm = mcolors.PowerNorm(gamma, vmin, vmax, True)
elif name == 'log':
if len(args)==3:
vmin, vmax = np.array(args[1:], dtype='f4')
norm = mcolors.LogNorm(vmin, vmax)
elif name == 'symlog':
if len(args)==2:
linthresh = float(args[1])
linscale = 1.0
elif len(args)==3:
linthresh, linscale = np.array(args[1:], dtype='f4')
elif len(args)==5:
linthresh, linscale, vmin, vmax = np.array(args[1:], dtype='f4')
norm = mcolors.SymLogNorm(linthresh, linscale, vmin, vmax)
elif name == 'twoslope':
if len(args)==2:
vcenter = float(args[1])
elif len(args)==4:
vcenter, vmin, vmax = np.array(args[1:], dtype='f4')
norm = mcolors.TwoSlopeNorm(vcenter, vmin, vmax)
return norm
def add_annotation(ax, infile=''):
if infile == '':
return
with open(infile, 'r') as f:
for line in f.readlines():
row = line.split(',')
row = [col.strip() for col in row]
typ = row[0]
args = row[1:]
if typ == 'text':
x, y, text = args
x, y = float(x), float(y)
ax.text(x, y, text)
elif typ == 'arrow':
x1, y1, x2, y2 = np.array(args, dtype='f4')
ax.annotate("", xy=(x1, y1), xytext=(x2, y2),
arrowprops=dict(arrowstyle="->", connectionstyle="arc3"))
elif typ == 'annotation':
x1, y1, x2, y2 = np.array(args[:-1], dtype='f4')
text = args[-1]
ax.annotate(text, xy=(x1, y1), xytext=(x2, y2),
arrowprops=dict(arrowstyle="->", connectionstyle="arc3"))
elif typ == 'ellipse':
x, y, majax, minax, pa = np.array(args, dtype='f4')
e = Ellipse((x,y), majax, minax, angle=pa, lw=0.5, fc='none', ec='k', ls='-')
ax.add_artist(e)
def set_axis(ax, w):
ax.set_aspect('equal')
ax.set_xlabel('Relative R.A. (mas)')
ax.set_ylabel('Relative Dec. (mas)')
ax.set_xlim(w[0],w[1])
ax.set_ylim(w[2],w[3])
ax.tick_params(which='both', direction='in', length=6, right=True, top=True)
ax.tick_params(which='minor',length=4)
ax.minorticks_on()
def word2pix(w, h):
if w == None:
W = [0, h['naxis1'], 0, h['naxis2']]
else:
x0, x1, y0, y1 = w
X0 = h['crpix1'] + x0/(h['cdelt1']*3.6E6)
Y0 = h['crpix2'] + y0/(h['cdelt2']*3.6E6)
X1 = h['crpix1'] + x1/(h['cdelt1']*3.6E6)
Y1 = h['crpix2'] + y1/(h['cdelt2']*3.6E6)
W = [int(X0), int(X1), int(Y0), int(Y1)]
return W
def pix2word(W, h):
if W == None:
W = [0, h['naxis1'], 0, h['naxis2']]
X0, X1, Y0, Y1 = W
x0 = h['cdelt1']*3.6E6 * (X0-h['crpix1'])
y0 = h['cdelt2']*3.6E6 * (Y0-h['crpix2'])
x1 = h['cdelt1']*3.6E6 * (X1-h['crpix1'])
y1 = h['cdelt2']*3.6E6 * (Y1-h['crpix2'])
w = [x0, x1, y0, y1]
return w
def savefig(outfile, dpi=100):
if outfile.lower().endswith('.pdf') :
plt.savefig(outfile)
elif outfile.lower().endswith('.jpg') or outfile.lower().endswith('.jpeg'):
plt.savefig(outfile, dpi=dpi)
elif outfile.lower().endswith('.png'):
plt.savefig(outfile, dpi=dpi)
def mapplot(infile, cmul, outfile='', win=None, levs=None, bpos=None,
figsize=None, dpi=100, annotationfile='', cmap='', N_cut=0,
norm='', fraction=0.05):
hdul = fits.open(infile)
h = hdul[0].header
# img = hdul[0].data[0, 0, :, :]
if levs==None:
levs = cmul*np.array([-1,1,2,4,8,16,32,64,128,256,512,1024,2048,4096])
# print(win)
if figsize == None :
figsize = (6, 6)
if win == None:
win = pix2word(None, h)
W = word2pix(None, h)
else:
W = word2pix(win, h)
img = hdul[0].data[0, 0, W[2]:W[3], W[0]:W[1]]
if cmap == '':
cmap = 'rainbow'
cmap = cut_cmap(cmap, N_cut)
vmin, vmax = np.min(img), np.max(img)
if norm == '':
norm = 'linear %.3f %.3f' % (vmin, vmax)
norm = get_normalize(norm, vmin, vmax)
fig, ax = plt.subplots()
fig.set_size_inches(figsize)
set_axis(ax, win)
add_beam(ax, win, h, bpos=bpos)
add_annotation(ax, annotationfile)
ax.contour(img, levs, extent=win,
linewidths=0.5, colors='k')
pcm = ax.imshow(img, extent=win, origin='lower',
interpolation='none', cmap=cmap, norm=norm)
cbar = fig.colorbar(pcm, ax=ax, fraction=fraction)
# cbar.ax.minorticks_off()
cbar.ax.tick_params('both',direction='in',right=True,top=True,which='both')
cbar.ax.tick_params(axis='y', labelrotation=90)
fig.tight_layout(pad=0.5)
if outfile != '':
savefig(outfile, dpi)
hdul.close()
def myhelp():
print('Help: mapplot.py -w "18 -8 -20 6" -f "7 6" -n "power 0.5" <cta102.fits> <1.8e-3>')
print(' or: mapplot.py -i cta102.fits -o cta102.png -w "18 -8 -20 6" -f "7 6" -n "power 0.5"')
def main(argv):
# infile = r'3c66a-calib/circe-beam.fits'
infile = ''
outfile = ''
annotationfile = ''
cmul = ''
win = None
levs = None
bpos = None
figsize = None
dpi = 100
colormap = ''
N_cut = 0
norm = ''
fraction = 0.05
try:
opts, args = getopt.getopt(argv, "hi:c:o:w:l:b:f:d:a:n:N:",
['help', 'infile=', 'cmul=', 'outfile=', 'win=',
'bpos=', 'figsize=', 'dpi=', 'annotatefile=', 'levs=', 'colormap=',
'N_cut=', 'norm=', 'fraction='])
except getopt.GetoptError:
myhelp()
sys.exit(2)
for opt, arg in opts:
if opt in ('-h', '--help'):
myhelp()
elif opt in ('-i', '--infile'):
infile = arg
elif opt in ('-c', '--cmul'):
cmul = arg
elif opt in ('-o', '--outfile'):
outfile = arg
elif opt in ('-w', '--win'):
win = arg
elif opt in ('-l', '--levs'):
levs = np.array(arg.split(), dtype=np.float64).tolist()
elif opt in ('-b', '--bpos'):
bpos = np.array(arg.split(), dtype=np.float64).tolist()
elif opt in ('-f', '--figsize'):
figsize = np.array(arg.split(), dtype=np.float64).tolist()
elif opt in ('-d', '--dpi'):
dpi = int(arg)
elif opt in ('-a', '--annotatefile'):
annotationfile = arg
elif opt in ('--colormap', ):
colormap = arg
elif opt in ('-N', '--N_cut'):
N_cut = int(arg)
elif opt in ('-n', '--norm'):
norm = arg
elif opt in ('--fraction',):
fraction = float(arg)
if infile=='' and len(args)==2:
infile, cmul = args
if infile=='' and len(args)==3:
infile, outfile, cmul = args
if infile=='' and len(args)==4:
infile, outfile, cmul, win = args
if outfile == '':
outfile = infile.split('.')[0] + '.pdf'
cmul = float(cmul)
if type(win) == str:
win = np.array(win.split(), dtype=np.float64).tolist()
mapplot(infile, cmul, outfile=outfile, win=win, levs=levs, bpos=bpos,
figsize=figsize, dpi=dpi, annotationfile=annotationfile,
cmap=colormap, N_cut=N_cut, norm=norm, fraction=fraction)
if __name__ == '__main__' :
main(sys.argv[1:])
| 30.042484
| 101
| 0.617753
| 1,524
| 9,193
| 3.694226
| 0.22769
| 0.019893
| 0.020782
| 0.019183
| 0.286856
| 0.187034
| 0.171226
| 0.126465
| 0.117052
| 0.117052
| 0
| 0.051597
| 0.175677
| 9,193
| 306
| 102
| 30.042484
| 0.691343
| 0.186011
| 0
| 0.087866
| 0
| 0.012552
| 0.103694
| 0.003034
| 0
| 0
| 0
| 0
| 0
| 1
| 0.050209
| false
| 0
| 0.033473
| 0
| 0.104603
| 0.008368
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a81890c9e9eec4855a38a91238cf619244d9278
| 2,174
|
py
|
Python
|
umbrella/api/v1/router.py
|
pizhi/umbrella
|
95027e6e11a6c8df2ab5f7c202b0c1d2183f839a
|
[
"Apache-2.0"
] | 1
|
2018-01-13T11:45:24.000Z
|
2018-01-13T11:45:24.000Z
|
umbrella/api/v1/router.py
|
pizhi/umbrella
|
95027e6e11a6c8df2ab5f7c202b0c1d2183f839a
|
[
"Apache-2.0"
] | null | null | null |
umbrella/api/v1/router.py
|
pizhi/umbrella
|
95027e6e11a6c8df2ab5f7c202b0c1d2183f839a
|
[
"Apache-2.0"
] | 2
|
2018-01-01T11:39:49.000Z
|
2018-08-07T07:16:45.000Z
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from umbrella.api.v1 import api
from umbrella.common import wsgi
class API(wsgi.Router):
"""WSGI router for Glance v1 API requests."""
def __init__(self, mapper):
api_resource = api.create_resource()
mapper.connect("/",
controller=api_resource,
action="index")
mapper.connect("/images",
controller=api_resource,
action='index',
conditions={'method': ['GET']})
mapper.connect("/images/{id}",
controller=api_resource,
action="show",
conditions=dict(method=["GET"]))
mapper.connect("/net/{instance_uuid}",
controller=api_resource,
action="get_net_sample",
conditions=dict(method=["GET"]))
mapper.connect("/cpu/{instance_uuid}",
controller=api_resource,
action="get_cpu_sample",
conditions=dict(method=["GET"]))
mapper.connect("/disk/{instance_uuid}",
controller=api_resource,
action="get_disk_sample",
conditions=dict(method=["GET"]))
mapper.connect("/mem/{instance_uuid}",
controller=api_resource,
action="get_mem_sample",
conditions=dict(method=["GET"]))
super(API, self).__init__(mapper)
| 36.233333
| 78
| 0.559798
| 224
| 2,174
| 5.303571
| 0.450893
| 0.074074
| 0.123737
| 0.159091
| 0.356061
| 0.277778
| 0.247475
| 0
| 0
| 0
| 0
| 0.006949
| 0.338086
| 2,174
| 59
| 79
| 36.847458
| 0.818624
| 0.297608
| 0
| 0.363636
| 0
| 0
| 0.129973
| 0.013926
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030303
| false
| 0
| 0.060606
| 0
| 0.121212
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a8279873b5f73ab9eb14c009ec624c039c590a5
| 943
|
py
|
Python
|
exemples/test_thomson_simu.py
|
butala/TomograPy
|
a1da41f1e0b7406a1b770e56428789c54175de20
|
[
"CECILL-B"
] | 7
|
2016-07-05T08:31:42.000Z
|
2022-03-31T20:24:13.000Z
|
exemples/test_thomson_simu.py
|
esoubrie/TomograPy
|
a1da41f1e0b7406a1b770e56428789c54175de20
|
[
"CECILL-B"
] | null | null | null |
exemples/test_thomson_simu.py
|
esoubrie/TomograPy
|
a1da41f1e0b7406a1b770e56428789c54175de20
|
[
"CECILL-B"
] | 4
|
2018-08-14T01:54:21.000Z
|
2022-03-10T19:44:43.000Z
|
#!/usr/bin/env python
import time
import numpy as np
import tomograpy
import lo
# object
obj = tomograpy.centered_cubic_map(10, 64)
obj[:] = tomograpy.phantom.shepp_logan(obj.shape)
# data
radius = 200
a = tomograpy.fov(obj, radius)
data = tomograpy.centered_stack(a, 128, n_images=60, radius=radius, max_lon=np.pi)
# model
kwargs = {"pb":"pb", "obj_rmin":1.5, "data_rmin":1.5}
P, D, obj_mask, data_mask = tomograpy.models.thomson(data, obj, u=.5, **kwargs)
# projection
t = time.time()
data[:] = (P * obj.ravel()).reshape(data.shape)
print("projection time : " + str(time.time() - t))
# data
# backprojection
t = time.time()
x0 = P.T * data.ravel()
bpj = x0.reshape(obj.shape)
print("backprojection time : " + str(time.time() - t))
# inversion using scipy.sparse.linalg
t = time.time()
sol = lo.acg(P, data.ravel(), D, 1e-3 * np.ones(3), maxiter=100, tol=1e-8)
sol = sol.reshape(obj.shape)
print("inversion time : " + str(time.time() - t))
| 30.419355
| 82
| 0.680806
| 155
| 943
| 4.077419
| 0.451613
| 0.075949
| 0.042722
| 0.071203
| 0.075949
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032967
| 0.131495
| 943
| 30
| 83
| 31.433333
| 0.738706
| 0.112407
| 0
| 0.136364
| 0
| 0
| 0.094089
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.181818
| 0
| 0.181818
| 0.136364
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a845cfff802e634071ade849b849c82adc47ef1
| 395
|
py
|
Python
|
interactive_grabcut/repo/drag2draw.py
|
hiankun/py_sandbox
|
6623edd0c8ab17641e1ce09fba7da34c4865fc4f
|
[
"MIT"
] | null | null | null |
interactive_grabcut/repo/drag2draw.py
|
hiankun/py_sandbox
|
6623edd0c8ab17641e1ce09fba7da34c4865fc4f
|
[
"MIT"
] | null | null | null |
interactive_grabcut/repo/drag2draw.py
|
hiankun/py_sandbox
|
6623edd0c8ab17641e1ce09fba7da34c4865fc4f
|
[
"MIT"
] | null | null | null |
# source: https://www.youtube.com/watch?v=U0sVp1xLiyo
from tkinter import *
def paint(event):
color = 'red'
x1, y1 = (event.x-1), (event.y-1)
x2, y2 = (event.x+1), (event.y+1)
c.create_oval(x1,y1,x2,y2,fill=color,outline=color)
master = Tk()
c = Canvas(master, width=600, height=400, bg='white')
c.pack(expand=True, fill=BOTH)
c.bind('<B1-Motion>', paint)
master.mainloop()
| 21.944444
| 55
| 0.648101
| 67
| 395
| 3.80597
| 0.671642
| 0.031373
| 0.054902
| 0.094118
| 0.109804
| 0.109804
| 0
| 0
| 0
| 0
| 0
| 0.062315
| 0.146835
| 395
| 17
| 56
| 23.235294
| 0.694362
| 0.129114
| 0
| 0
| 0
| 0
| 0.055556
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.090909
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a861f0810192c03917c1a4cb2de99fa5681f49e
| 14,913
|
py
|
Python
|
neutronclient/osc/v2/vpnaas/ipsec_site_connection.py
|
slawqo/python-neutronclient
|
ee08644c5f2424a40c70010dcf0fa2ad84809bfc
|
[
"Apache-2.0"
] | 120
|
2015-01-07T00:38:58.000Z
|
2021-12-26T13:05:53.000Z
|
neutronclient/osc/v2/vpnaas/ipsec_site_connection.py
|
slawqo/python-neutronclient
|
ee08644c5f2424a40c70010dcf0fa2ad84809bfc
|
[
"Apache-2.0"
] | 1
|
2021-08-11T18:42:30.000Z
|
2021-08-11T22:25:21.000Z
|
neutronclient/osc/v2/vpnaas/ipsec_site_connection.py
|
slawqo/python-neutronclient
|
ee08644c5f2424a40c70010dcf0fa2ad84809bfc
|
[
"Apache-2.0"
] | 153
|
2015-01-05T16:50:50.000Z
|
2021-09-13T12:01:23.000Z
|
# Copyright 2017 FUJITSU LIMITED
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from osc_lib.cli import format_columns
from osc_lib.command import command
from osc_lib import exceptions
from osc_lib import utils
from osc_lib.utils import columns as column_util
from oslo_log import log as logging
from neutronclient._i18n import _
from neutronclient.common import utils as nc_utils
from neutronclient.osc import utils as osc_utils
from neutronclient.osc.v2.vpnaas import utils as vpn_utils
LOG = logging.getLogger(__name__)
_formatters = {
'peer_cidrs': format_columns.ListColumn
}
_attr_map = (
('id', 'ID', column_util.LIST_BOTH),
('name', 'Name', column_util.LIST_BOTH),
('peer_address', 'Peer Address', column_util.LIST_BOTH),
('auth_mode', 'Authentication Algorithm', column_util.LIST_BOTH),
('status', 'Status', column_util.LIST_BOTH),
('tenant_id', 'Project', column_util.LIST_LONG_ONLY),
('peer_cidrs', 'Peer CIDRs', column_util.LIST_LONG_ONLY),
('vpnservice_id', 'VPN Service', column_util.LIST_LONG_ONLY),
('ipsecpolicy_id', 'IPSec Policy', column_util.LIST_LONG_ONLY),
('ikepolicy_id', 'IKE Policy', column_util.LIST_LONG_ONLY),
('mtu', 'MTU', column_util.LIST_LONG_ONLY),
('initiator', 'Initiator', column_util.LIST_LONG_ONLY),
('admin_state_up', 'State', column_util.LIST_LONG_ONLY),
('description', 'Description', column_util.LIST_LONG_ONLY),
('psk', 'Pre-shared Key', column_util.LIST_LONG_ONLY),
('route_mode', 'Route Mode', column_util.LIST_LONG_ONLY),
('local_id', 'Local ID', column_util.LIST_LONG_ONLY),
('peer_id', 'Peer ID', column_util.LIST_LONG_ONLY),
('local_ep_group_id', 'Local Endpoint Group ID',
column_util.LIST_LONG_ONLY),
('peer_ep_group_id', 'Peer Endpoint Group ID', column_util.LIST_LONG_ONLY),
)
def _convert_to_lowercase(string):
return string.lower()
def _get_common_parser(parser, is_create=True):
parser.add_argument(
'--description',
metavar='<description>',
help=_('Description for the connection'))
parser.add_argument(
'--dpd',
metavar="action=ACTION,interval=INTERVAL,timeout=TIMEOUT",
type=nc_utils.str2dict_type(
optional_keys=['action', 'interval', 'timeout']),
help=vpn_utils.dpd_help("IPsec connection"))
parser.add_argument(
'--mtu',
help=_('MTU size for the connection'))
parser.add_argument(
'--initiator',
choices=['bi-directional', 'response-only'],
type=_convert_to_lowercase,
help=_('Initiator state'))
peer_group = parser.add_mutually_exclusive_group()
peer_group.add_argument(
'--peer-cidr',
dest='peer_cidrs',
help=_('Remote subnet(s) in CIDR format. '
'Cannot be specified when using endpoint groups. Only '
'applicable, if subnet provided for VPN service.')
)
peer_group.add_argument(
'--local-endpoint-group',
help=_('Local endpoint group (name or ID) with subnet(s) '
'for IPsec connection')
)
parser.add_argument(
'--peer-endpoint-group',
help=_('Peer endpoint group (name or ID) with CIDR(s) for '
'IPSec connection'))
admin_group = parser.add_mutually_exclusive_group()
admin_group.add_argument(
'--enable',
action='store_true',
help=_("Enable IPSec site connection")
)
admin_group.add_argument(
'--disable',
action='store_true',
help=_("Disable IPSec site connection")
)
parser.add_argument(
'--local-id',
help=_('An ID to be used instead of the external IP '
'address for a virtual router'))
return parser
def _get_common_attrs(client_manager, parsed_args, is_create=True):
attrs = {}
if is_create:
if 'project' in parsed_args and parsed_args.project is not None:
attrs['tenant_id'] = osc_utils.find_project(
client_manager.identity,
parsed_args.project,
parsed_args.project_domain,
).id
if parsed_args.description:
attrs['description'] = str(parsed_args.description)
if parsed_args.mtu:
attrs['mtu'] = parsed_args.mtu
if parsed_args.enable:
attrs['admin_state_up'] = True
if parsed_args.disable:
attrs['admin_state_up'] = False
if parsed_args.initiator:
attrs['initiator'] = parsed_args.initiator
if parsed_args.dpd:
vpn_utils.validate_dpd_dict(parsed_args.dpd)
attrs['dpd'] = parsed_args.dpd
if parsed_args.local_endpoint_group:
_local_epg = client_manager.neutronclient.find_resource(
'endpoint_group',
parsed_args.local_endpoint_group,
cmd_resource='endpoint_group')['id']
attrs['local_ep_group_id'] = _local_epg
if parsed_args.peer_endpoint_group:
_peer_epg = client_manager.neutronclient.find_resource(
'endpoint_group',
parsed_args.peer_endpoint_group,
cmd_resource='endpoint_group')['id']
attrs['peer_ep_group_id'] = _peer_epg
if parsed_args.peer_cidrs:
attrs['peer_cidrs'] = parsed_args.peer_cidrs
if parsed_args.local_id:
attrs['local_id'] = parsed_args.local_id
return attrs
class CreateIPsecSiteConnection(command.ShowOne):
_description = _("Create an IPsec site connection")
def get_parser(self, prog_name):
parser = super(CreateIPsecSiteConnection, self).get_parser(prog_name)
_get_common_parser(parser)
parser.add_argument(
'--peer-id',
required=True,
help=_('Peer router identity for authentication. Can be '
'IPv4/IPv6 address, e-mail address, key id, or FQDN'))
parser.add_argument(
'--peer-address',
required=True,
help=_('Peer gateway public IPv4/IPv6 address or FQDN'))
parser.add_argument(
'--psk',
required=True,
help=_('Pre-shared key string.'))
parser.add_argument(
'--vpnservice',
metavar='VPNSERVICE',
required=True,
help=_('VPN service instance associated with this '
'connection (name or ID)'))
parser.add_argument(
'--ikepolicy',
metavar='IKEPOLICY',
required=True,
help=_('IKE policy associated with this connection (name or ID)'))
parser.add_argument(
'--ipsecpolicy',
metavar='IPSECPOLICY',
required=True,
help=_('IPsec policy associated with this connection '
'(name or ID)'))
parser.add_argument(
'name',
metavar='<name>',
help=_('Set friendly name for the connection'))
osc_utils.add_project_owner_option_to_parser(parser)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.neutronclient
attrs = _get_common_attrs(self.app.client_manager, parsed_args)
if parsed_args.vpnservice:
_vpnservice_id = client.find_resource(
'vpnservice',
parsed_args.vpnservice,
cmd_resource='vpnservice')['id']
attrs['vpnservice_id'] = _vpnservice_id
if parsed_args.ikepolicy:
_ikepolicy_id = client.find_resource(
'ikepolicy',
parsed_args.ikepolicy,
cmd_resource='ikepolicy')['id']
attrs['ikepolicy_id'] = _ikepolicy_id
if parsed_args.ipsecpolicy:
_ipsecpolicy_id = client.find_resource(
'ipsecpolicy',
parsed_args.ipsecpolicy,
cmd_resource='ipsecpolicy')['id']
attrs['ipsecpolicy_id'] = _ipsecpolicy_id
if parsed_args.peer_id:
attrs['peer_id'] = parsed_args.peer_id
if parsed_args.peer_address:
attrs['peer_address'] = parsed_args.peer_address
if parsed_args.psk:
attrs['psk'] = parsed_args.psk
if parsed_args.name:
attrs['name'] = parsed_args.name
if (bool(parsed_args.local_endpoint_group) !=
bool(parsed_args.peer_endpoint_group)):
message = _("You must specify both local and peer endpoint "
"groups")
raise exceptions.CommandError(message)
if not parsed_args.peer_cidrs and not parsed_args.local_endpoint_group:
message = _("You must specify endpoint groups or peer CIDR(s)")
raise exceptions.CommandError(message)
obj = client.create_ipsec_site_connection(
{'ipsec_site_connection': attrs})['ipsec_site_connection']
columns, display_columns = column_util.get_columns(obj, _attr_map)
data = utils.get_dict_properties(obj, columns, formatters=_formatters)
return display_columns, data
class DeleteIPsecSiteConnection(command.Command):
_description = _("Delete IPsec site connection(s)")
def get_parser(self, prog_name):
parser = super(DeleteIPsecSiteConnection, self).get_parser(prog_name)
parser.add_argument(
'ipsec_site_connection',
metavar='<ipsec-site-connection>',
nargs='+',
help=_('IPsec site connection to delete (name or ID)'))
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.neutronclient
result = 0
for ipsec_conn in parsed_args.ipsec_site_connection:
try:
ipsec_con_id = client.find_resource(
'ipsec_site_connection',
ipsec_conn,
cmd_resource='ipsec_site_connection')['id']
client.delete_ipsec_site_connection(ipsec_con_id)
except Exception as e:
result += 1
LOG.error(_("Failed to delete IPsec site connection with "
"name or ID '%(ipsec_site_conn)s': %(e)s"),
{'ipsec_site_conn': ipsec_conn, 'e': e})
if result > 0:
total = len(parsed_args.ipsec_site_connection)
msg = (_("%(result)s of %(total)s IPsec site connection failed "
"to delete.") % {'result': result, 'total': total})
raise exceptions.CommandError(msg)
class ListIPsecSiteConnection(command.Lister):
_description = _("List IPsec site connections "
"that belong to a given project")
def get_parser(self, prog_name):
parser = super(ListIPsecSiteConnection, self).get_parser(prog_name)
parser.add_argument(
'--long',
action='store_true',
default=False,
help=_("List additional fields in output")
)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.neutronclient
obj = client.list_ipsec_site_connections()['ipsec_site_connections']
headers, columns = column_util.get_column_definitions(
_attr_map, long_listing=parsed_args.long)
return (headers, (utils.get_dict_properties(
s, columns, formatters=_formatters) for s in obj))
class SetIPsecSiteConnection(command.Command):
_description = _("Set IPsec site connection properties")
def get_parser(self, prog_name):
parser = super(SetIPsecSiteConnection, self).get_parser(prog_name)
_get_common_parser(parser)
parser.add_argument(
'--peer-id',
help=_('Peer router identity for authentication. Can be '
'IPv4/IPv6 address, e-mail address, key id, or FQDN'))
parser.add_argument(
'--peer-address',
help=_('Peer gateway public IPv4/IPv6 address or FQDN'))
parser.add_argument(
'--name',
metavar='<name>',
help=_('Set friendly name for the connection'))
parser.add_argument(
'ipsec_site_connection',
metavar='<ipsec-site-connection>',
help=_('IPsec site connection to set (name or ID)'))
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.neutronclient
attrs = _get_common_attrs(self.app.client_manager,
parsed_args, is_create=False)
if parsed_args.peer_id:
attrs['peer_id'] = parsed_args.peer_id
if parsed_args.peer_address:
attrs['peer_address'] = parsed_args.peer_address
if parsed_args.name:
attrs['name'] = parsed_args.name
ipsec_conn_id = client.find_resource(
'ipsec_site_connection', parsed_args.ipsec_site_connection,
cmd_resource='ipsec_site_connection')['id']
try:
client.update_ipsec_site_connection(
ipsec_conn_id,
{'ipsec_site_connection': attrs})
except Exception as e:
msg = (_("Failed to set IPsec site "
"connection '%(ipsec_conn)s': %(e)s")
% {'ipsec_conn': parsed_args.ipsec_site_connection, 'e': e})
raise exceptions.CommandError(msg)
class ShowIPsecSiteConnection(command.ShowOne):
_description = _("Show information of a given IPsec site connection")
def get_parser(self, prog_name):
parser = super(ShowIPsecSiteConnection, self).get_parser(prog_name)
parser.add_argument(
'ipsec_site_connection',
metavar='<ipsec-site-connection>',
help=_('IPsec site connection to display (name or ID)'))
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.neutronclient
ipsec_site_id = client.find_resource(
'ipsec_site_connection', parsed_args.ipsec_site_connection,
cmd_resource='ipsec_site_connection')['id']
obj = client.show_ipsec_site_connection(
ipsec_site_id)['ipsec_site_connection']
columns, display_columns = column_util.get_columns(obj, _attr_map)
data = utils.get_dict_properties(obj, columns, formatters=_formatters)
return (display_columns, data)
| 39.768
| 79
| 0.632468
| 1,720
| 14,913
| 5.191279
| 0.153488
| 0.068317
| 0.078732
| 0.030239
| 0.465562
| 0.381454
| 0.329376
| 0.321425
| 0.287266
| 0.279875
| 0
| 0.002105
| 0.267284
| 14,913
| 374
| 80
| 39.874332
| 0.815045
| 0.040502
| 0
| 0.339506
| 0
| 0
| 0.229181
| 0.03331
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040123
| false
| 0
| 0.030864
| 0.003086
| 0.135802
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a893fcf944a3942d0a9e7e6cc93c141d9894e31
| 13,620
|
py
|
Python
|
sushichef.py
|
RechercheTech/sushi-chef-arvind-gupta-toys
|
2b381d8942c16ed16b4a44d8fc020fe0a81a18c0
|
[
"MIT"
] | 1
|
2020-05-10T06:16:48.000Z
|
2020-05-10T06:16:48.000Z
|
sushichef.py
|
RechercheTech/sushi-chef-arvind-gupta-toys
|
2b381d8942c16ed16b4a44d8fc020fe0a81a18c0
|
[
"MIT"
] | 5
|
2019-10-04T11:35:21.000Z
|
2020-05-25T14:19:41.000Z
|
sushichef.py
|
RechercheTech/sushi-chef-arvind-gupta-toys
|
2b381d8942c16ed16b4a44d8fc020fe0a81a18c0
|
[
"MIT"
] | 3
|
2019-09-24T00:15:00.000Z
|
2020-02-06T16:25:36.000Z
|
#!/usr/bin/env python
import os
import requests
import re
import shutil
from arvind import ArvindVideo, ArvindLanguage, YOUTUBE_CACHE_DIR
from bs4 import BeautifulSoup
from bs4.element import NavigableString
from ricecooker.chefs import SushiChef
from ricecooker.classes.files import YouTubeVideoFile
from ricecooker.classes.licenses import get_license
from ricecooker.classes.nodes import VideoNode, TopicNode
ARVIND = "Arvind Gupta Toys"
ARVIND_URL = "http://www.arvindguptatoys.com/films.html"
ROOT_DIR_PATH = os.getcwd()
DOWNLOADS_PATH = os.path.join(ROOT_DIR_PATH, "downloads")
DOWNLOADS_VIDEOS_PATH = os.path.join(DOWNLOADS_PATH, "videos/")
SKIP_VIDEOS_PATH = os.path.join(ROOT_DIR_PATH, "skip_videos.txt")
# These are the languages that has no sub topics on its videos.
SINGLE_TOPIC_LANGUAGES = [
"bhojpuri; bajpuri; bhojapuri", # actual lang_obj.name in le-utils
"bhojpuri", # future-proofing for upcoming lang_obj.name changes
"nepali",
"malayalam",
"telugu",
"bengali",
"odiya",
"punjabi",
"marwari; marwadi", # actual lang_obj.name in le-utils
"marwari", # future-proofing for upcoming lang_obj.name changes
"assamese",
"urdu",
"spanish",
"chinese",
"indonesian",
"sci_edu",
"science/educational",
]
# List of multiple languages on its topics
MULTI_LANGUAGE_TOPIC = ["russian", "french",]
# This are the estimate total count of arvind gupta toys language contents
TOTAL_ARVIND_LANG = 23
SINGLE_TOPIC = "single"
STANDARD_TOPIC = "standard"
MULTI_LANGUAGE = "multi"
YOUTUBE_DOMAINS = ["youtu.be", "youtube.com"]
DEBUG_MODE = True # Print extra debug info durig the chef run (disable in prod)
def clean_video_title(title, lang_obj):
# Remove redundant and misleading words in the video title
clean_title = title
try:
if title != None:
clean_str = title.replace("-", " ").replace("MB", "").replace("|", "")
clean_uplang = clean_str.replace(lang_obj.name.upper(), "")
clean_lowlang = clean_uplang.replace(lang_obj.name.lower(), "")
clean_caplang = clean_lowlang.replace(lang_obj.name.capitalize() , "")
clean_format = clean_caplang.replace(".avi", "").replace(".wmv", "").strip()
clean_extra_spaces = re.sub(" +", " ",clean_format)
is_int = clean_extra_spaces[-2:]
if is_int.isdigit():
clean_extra_spaces = clean_extra_spaces.replace(is_int, "")
clean_title = clean_extra_spaces
print("Cleaned video title ====> ", clean_title)
except Exception as e:
print('Error cleaning this video title: ', clean_title)
return clean_title
def include_video_topic(topic_node, video_data, lang_obj):
# Include video details to the parent topic node
video_id = video_data.uid
video_source_id = 'arvind-video-{0}'.format(video_id)
video_node = VideoNode(
source_id=video_source_id,
title=clean_video_title(video_data.title, lang_obj),
description=video_data.description,
author=ARVIND,
thumbnail=video_data.thumbnail,
license=get_license("CC BY-NC", copyright_holder=ARVIND),
files=[
YouTubeVideoFile(
youtube_id=video_id,
language=video_data.language,
high_resolution=False,
)
])
topic_node.add_child(video_node)
def save_skip_videos(video, topic, lang_obj):
# Compile skip videos into text file
if not os.path.exists(SKIP_VIDEOS_PATH):
open(SKIP_VIDEOS_PATH,"w+")
text_file = open(SKIP_VIDEOS_PATH, "a")
video_info = video.language + " - " + topic + " - " + video.url + " - " + video.license + "\n"
text_file.write(video_info)
text_file.close()
def download_video_topics(data, topic, topic_node, lang_obj):
"""
Scrape, collect, and download the videos and their thumbnails.
"""
video_source_ids = []
for vinfo in data[topic]:
try:
video = ArvindVideo(
url=vinfo['video_url'],
title=vinfo['video_title'],
language=lang_obj.code)
if video.download_info():
if video.license_common:
video_source_id = 'arvind-video-{0}'.format(video.uid)
if video_source_id not in video_source_ids:
include_video_topic(topic_node, video, lang_obj)
video_source_ids.append(video_source_id)
else:
print('Skipping duplicate video: ' + str(vinfo['video_url']))
else:
save_skip_videos(video, topic, lang_obj)
else:
save_skip_videos(video, topic, lang_obj)
except Exception as e:
print('Error downloading this video:', e)
def generate_child_topics(arvind_contents, main_topic, lang_obj, topic_type):
# Create a topic for each languages
data = arvind_contents[lang_obj.name]
for topic_index in data:
topic_name = topic_index
if topic_type == STANDARD_TOPIC:
source_id = 'arvind-child-topic-{0}'.format(topic_name)
topic_node = TopicNode(title=topic_name, source_id=source_id)
download_video_topics(data, topic_name, topic_node, lang_obj)
main_topic.add_child(topic_node)
if topic_type == SINGLE_TOPIC:
download_video_topics(data, topic_name, main_topic, lang_obj)
return main_topic
def create_language_data(lang_data, lang_obj):
"""
Process the list of elements in `lang_data` to extract video links.
"""
topic_contents = {}
initial_topics = []
prev_topic = ""
first_count = 1
total_loop = len(lang_data)
lang_name = lang_obj.name.lower()
for item in lang_data:
total_loop -= 1
if isinstance(item, NavigableString) or item.name == 'br':
continue # skip whitespace and <br/> tags
try:
title = item.text.rstrip().strip()
video_link = ""
try:
video_a_tag = item.find('a')
if video_a_tag:
video_link = video_a_tag.get("href") # for videos
else:
video_link = "" # for headings
topic_details = {}
if any(ytd in video_link for ytd in YOUTUBE_DOMAINS):
if lang_name in MULTI_LANGUAGE_TOPIC:
current_lang = title.split()[0].lower()
if first_count == 1:
first_count = 0
prev_topic = current_lang
topic_details['video_url'] = video_link
topic_details['video_title'] = title
if lang_name in MULTI_LANGUAGE_TOPIC:
if prev_topic != current_lang:
topic_contents[prev_topic] = initial_topics
initial_topics = []
prev_topic = current_lang
initial_topics.append(topic_details)
except Exception as e:
print('>> passing on', e)
pass
if first_count == 1:
if ":" in title:
first_count = 0
prev_topic = title.replace(":", "").strip()
if video_link == "":
if ":" in title:
topic_contents[prev_topic] = initial_topics
prev_topic = title.replace(":", "").strip()
initial_topics = []
except Exception as e:
print('>>> passing on', e)
pass
# This wasn't working (last topic in each standard language was missing) ...
# if total_loop == 0:
# topic_contents[prev_topic] = initial_topics
# ... so changed to this:
topic_contents[prev_topic] = initial_topics
return topic_contents
def scrape_arvind_page():
url = ARVIND_URL
response = requests.get(url)
page = BeautifulSoup(response.text, 'html5lib')
content_divs = page.body.div
list_divs = list(content_divs.children)
languages_div_start = 5
languages_list = list(list_divs[languages_div_start].children)
return languages_list
def get_language_details(lang_name):
video_lang = ArvindLanguage(name=lang_name)
if video_lang.get_lang_obj():
return video_lang
return None
def create_language_topic():
arvind_languages = scrape_arvind_page()
main_topic_list = []
if os.path.exists(SKIP_VIDEOS_PATH):
os.remove(SKIP_VIDEOS_PATH)
loop_max = TOTAL_ARVIND_LANG
language_next_int = 7
loop_couter = 0
while (loop_couter != loop_max):
try:
lang_name = arvind_languages[language_next_int].get('id')
lang_obj = get_language_details(lang_name.lower())
if lang_obj != None:
lang_name = lang_obj.name
lang_name_lower = lang_name.lower()
print('== Processing ', lang_name, '='*60)
language_source_id = 'arvind-parent-topic-{0}'.format(lang_name_lower)
# print('language_source_id =', language_source_id)
get_language_data = list(arvind_languages[language_next_int])
# print('len(get_language_data) = ', len(get_language_data))
data_contents = { lang_name: create_language_data(get_language_data, lang_obj) }
# print('len(data_contents[lang_name])', len(data_contents[lang_name]))
language_topic = TopicNode(title=lang_name.capitalize(), source_id=language_source_id)
if lang_name_lower not in SINGLE_TOPIC_LANGUAGES and lang_name_lower not in MULTI_LANGUAGE_TOPIC:
print("=======> This Language is in standard format", lang_name)
topic_type = STANDARD_TOPIC
generate_child_topics(data_contents, language_topic, lang_obj, topic_type)
main_topic_list.append(language_topic)
print("=====>finished", lang_name)
if lang_name_lower in SINGLE_TOPIC_LANGUAGES:
print("=====> This Language is in single topic format ", lang_name)
topic_type = SINGLE_TOPIC
generate_child_topics(data_contents, language_topic, lang_obj, topic_type)
main_topic_list.append(language_topic)
print("=====>finished", lang_name)
if lang_name_lower in MULTI_LANGUAGE_TOPIC:
print("=====> This Language is in multiple langauage topic format ", lang_name)
lang_data = create_language_data(get_language_data, lang_obj)
for lang in lang_data:
current_lang = get_language_details(lang.lower())
if current_lang != None:
parent_source_id = 'arvind-parent-topic-{0}'.format(current_lang.name)
parent_topic = TopicNode(title=lang.capitalize(), source_id=parent_source_id)
data_dic = {current_lang.name: {"": lang_data[lang]}}
topic_type = SINGLE_TOPIC
generate_child_topics(data_dic, parent_topic, current_lang, topic_type)
main_topic_list.append(parent_topic)
print("=====>finished ", lang)
except Exception as e:
print("===> error getting language topics: ", e)
# raise(e)
language_next_int += 4
loop_couter += 1
return main_topic_list
class ArvindChef(SushiChef):
channel_info = {
"CHANNEL_TITLE": "Arvind Gupta Toys",
"CHANNEL_SOURCE_DOMAIN": "arvindguptatoys.com",
"CHANNEL_SOURCE_ID": "toys-from-trash",
"CHANNEL_LANGUAGE": "mul",
"CHANNEL_THUMBNAIL": 'chefdata/arvind_gupta_thumbnail.png',
"CHANNEL_DESCRIPTION": "Math and science activities through low-cost " \
"materials all in the form of videos to provide various pathways for children to explore" \
" and deepen their understanding of concepts in low-resource contexts around the world." \
" Valuable resource library for teachers to incorporate in their lessons, for parents to" \
" work with children at home using readily available, simple, and low-cost materials.",
}
def pre_run(self, args, options):
"""This function will get called by ricecooker before the chef runs."""
if args['update']:
# delete video info .json files cached in chefdata/youtubecache/
print('Deleting vinfo .json files in {}'.format(YOUTUBE_CACHE_DIR))
if os.path.exists(YOUTUBE_CACHE_DIR):
shutil.rmtree(YOUTUBE_CACHE_DIR)
os.makedirs(YOUTUBE_CACHE_DIR)
def construct_channel(self, **kwargs):
channel = self.get_channel(**kwargs)
languages_topic = create_language_topic()
for lang_topic in languages_topic:
channel.add_child(lang_topic)
return channel
if __name__ == "__main__":
"""
Run this script on the command line using:
python sushichef.py -v --reset --token=YOURTOKENHERE9139139f3a23232
"""
chef = ArvindChef()
chef.main()
| 37.01087
| 113
| 0.606608
| 1,583
| 13,620
| 4.933039
| 0.217309
| 0.026892
| 0.014086
| 0.011525
| 0.257011
| 0.189141
| 0.134076
| 0.101165
| 0.054296
| 0.034575
| 0
| 0.004099
| 0.301395
| 13,620
| 367
| 114
| 37.111717
| 0.816605
| 0.093906
| 0
| 0.174242
| 0
| 0
| 0.125216
| 0.010208
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0.015152
| 0.041667
| 0
| 0.121212
| 0.056818
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a89890f028ab800ae7dcb96dcff01c0b7e8d98a
| 1,184
|
py
|
Python
|
90-subsets-ii.py
|
yuenliou/leetcode
|
e8a1c6cae6547cbcb6e8494be6df685f3e7c837c
|
[
"MIT"
] | null | null | null |
90-subsets-ii.py
|
yuenliou/leetcode
|
e8a1c6cae6547cbcb6e8494be6df685f3e7c837c
|
[
"MIT"
] | null | null | null |
90-subsets-ii.py
|
yuenliou/leetcode
|
e8a1c6cae6547cbcb6e8494be6df685f3e7c837c
|
[
"MIT"
] | null | null | null |
#!/usr/local/bin/python3.7
# -*- coding: utf-8 -*-
from typing import List
class Solution:
def subsetsWithDup(self, nums: List[int]) -> List[List[int]]:
"""
题解:https://leetcode-cn.com/problems/subsets/solution/c-zong-jie-liao-hui-su-wen-ti-lei-xing-dai-ni-gao-/
"""
def backtrack(start, path):
#结束条件:无
res.append(path[:])
for i in range(start, len(nums)):
#和上个数字相等就跳过
if i > start and nums[i] == nums[i - 1]: continue
# 做选择
path.append(nums[i])
# 进入下一行决策
backtrack(i + 1, path)
# 撤销选择
path.pop()
res = []
nums.sort()
backtrack( 0, [])
return res
def main():
param = [1,2,2]
solution = Solution()
ret = solution.subsetsWithDup(param)
print(ret)
'''90. 子集 II
给定一个可能包含重复元素的整数数组 nums,返回该数组所有可能的子集(幂集)。
说明:解集不能包含重复的子集。
示例:
输入: [1,2,2]
输出:
[
[2],
[1],
[1,2,2],
[2,2],
[1,2],
[]
]
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/subsets-ii
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
'''
if __name__ == '__main__':
main()
| 19.733333
| 112
| 0.516047
| 147
| 1,184
| 4.102041
| 0.585034
| 0.016584
| 0.014925
| 0.059701
| 0.109453
| 0.109453
| 0
| 0
| 0
| 0
| 0
| 0.028501
| 0.318412
| 1,184
| 59
| 113
| 20.067797
| 0.718711
| 0.15625
| 0
| 0
| 0
| 0
| 0.011111
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.047619
| 0
| 0.285714
| 0.047619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a8a19db97a47f9f1fc1395728868b9d716366fe
| 450
|
py
|
Python
|
tools/output_tool.py
|
climberwb/bert-pli
|
0e6eda7a23b7502c86eab4c0d889fad1bbb57155
|
[
"MIT"
] | 5
|
2020-12-24T01:46:40.000Z
|
2022-03-18T19:15:10.000Z
|
tools/output_tool.py
|
climberwb/bert-pli
|
0e6eda7a23b7502c86eab4c0d889fad1bbb57155
|
[
"MIT"
] | 1
|
2021-04-05T14:27:24.000Z
|
2021-04-05T14:27:24.000Z
|
tools/output_tool.py
|
climberwb/bert-pli
|
0e6eda7a23b7502c86eab4c0d889fad1bbb57155
|
[
"MIT"
] | 4
|
2020-12-28T09:20:13.000Z
|
2021-12-10T13:33:21.000Z
|
import json
from .accuracy_tool import gen_micro_macro_result
def null_output_function(data, config, *args, **params):
return ""
def basic_output_function(data, config, *args, **params):
which = config.get("output", "output_value").replace(" ", "").split(",")
temp = gen_micro_macro_result(data)
result = {}
for name in which:
result[name] = temp[name]
return json.dumps(result, sort_keys=True)
| 25
| 77
| 0.653333
| 57
| 450
| 4.929825
| 0.561404
| 0.05694
| 0.092527
| 0.135231
| 0.241993
| 0.241993
| 0
| 0
| 0
| 0
| 0
| 0
| 0.211111
| 450
| 17
| 78
| 26.470588
| 0.791549
| 0
| 0
| 0
| 0
| 0
| 0.046189
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0.181818
| 0.090909
| 0.545455
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a8ae0336fc8e8f4551cb0d621a28672bac709c0
| 27,100
|
py
|
Python
|
python/drydock_provisioner/ingester/plugins/deckhand.py
|
Vjrx/airship-drydock
|
315fb9864e6d55a66d5266f76c160be55d22c98b
|
[
"Apache-2.0"
] | 14
|
2018-05-19T11:58:22.000Z
|
2019-05-10T12:31:36.000Z
|
python/drydock_provisioner/ingester/plugins/deckhand.py
|
Vjrx/airship-drydock
|
315fb9864e6d55a66d5266f76c160be55d22c98b
|
[
"Apache-2.0"
] | 10
|
2019-11-12T17:21:16.000Z
|
2021-11-10T18:16:06.000Z
|
python/drydock_provisioner/ingester/plugins/deckhand.py
|
Vjrx/airship-drydock
|
315fb9864e6d55a66d5266f76c160be55d22c98b
|
[
"Apache-2.0"
] | 11
|
2018-06-05T16:21:18.000Z
|
2019-04-03T11:44:34.000Z
|
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This data ingester will consume YAML site topology documents."""
import yaml
import logging
import jsonschema
import os
import pkg_resources
import copy
import hashlib
import drydock_provisioner.objects.fields as hd_fields
from beaker.cache import CacheManager
from beaker.util import parse_cache_config_options
from drydock_provisioner import error as errors
from drydock_provisioner import objects
from drydock_provisioner.ingester.plugins import IngesterPlugin
cache_opts = {
'cache.type': 'memory',
'expire': 1800,
}
cache = CacheManager(**parse_cache_config_options(cache_opts))
class DeckhandIngester(IngesterPlugin):
def __init__(self):
super().__init__()
self.logger = logging.getLogger('drydock.ingester.deckhand')
self.load_schemas()
def get_name(self):
return "deckhand"
def ingest_data(self, **kwargs):
"""Parse and save design data.
:param content: String of valid Deckhand YAML
:returns: a tuple of a status response and a list of parsed objects from drydock_provisioner.objects
"""
def local_parse():
return self.parse_docs(kwargs.get('content'))
if 'content' in kwargs:
try:
# Hash the input to use as the cache key. This is not a security
# related hash, so use cheap and fast MD5
hv = hashlib.md5(kwargs.get('content', b'')).hexdigest()
local_cache = cache.get_cache('parsed_docs')
results = local_cache.get(key=hv, createfunc=local_parse)
parse_status, models = results
except Exception as ex:
self.logger.debug("Error parsing design - hash %s", hv, exc_info=ex)
raise ex
else:
raise ValueError('Missing parameter "content"')
return parse_status, models
def parse_docs(self, doc_blob):
"""Translate a YAML string into the internal Drydock model.
Returns a tuple of a objects.TaskStatus instance to summarize all
document processing and a list of models yielded by successful processing
:param doc_blob: bytes representing a utf-8 encoded YAML string
"""
models = []
yaml_string = doc_blob.decode()
self.logger.debug("yamlingester:parse_docs - Parsing YAML string.")
try:
parsed_data = yaml.safe_load_all(yaml_string)
except yaml.YAMLError as err:
if hasattr(err, 'problem_mark'):
mark = err.problem_mark
raise errors.IngesterError(
"Error parsing YAML at (l:%s, c:%s): %s" %
(mark.line + 1, mark.column + 1, err))
else:
raise errors.IngesterError("Error parsing YAML: %s" % (err))
# tracking processing status to provide a complete summary of issues
ps = objects.Validation()
ps.set_status(hd_fields.ValidationResult.Success)
for d in parsed_data:
try:
(schema_ns, doc_kind, doc_version) = d.get('schema',
'').split('/')
except ValueError as ex:
self.logger.error(
"Error with document structure.", exc_info=ex)
self.logger.debug("Error document\n%s" % yaml.dump(d))
continue
if schema_ns == 'drydock':
try:
doc_ref = objects.DocumentReference(
doc_type=hd_fields.DocumentType.Deckhand,
doc_schema=d.get('schema'),
doc_name=d.get('metadata', {}).get('name', 'Unknown'))
doc_errors = self.validate_drydock_document(d)
if len(doc_errors) > 0:
for e in doc_errors:
ps.add_detail_msg(
objects.ValidationMessage(
msg="%s:%s schema validation error: %s" %
(doc_kind, doc_version, e),
name="DD001",
docs=[doc_ref],
error=True,
level=hd_fields.MessageLevels.ERROR,
diagnostic=
"Invalid input file - see Drydock Troubleshooting Guide for DD001"
))
ps.set_status(hd_fields.ActionResult.Failure)
continue
model = self.process_drydock_document(d)
model.doc_ref = doc_ref
models.append(model)
except errors.IngesterError as ie:
msg = "Error processing document: %s" % str(ie)
self.logger.warning(msg)
ps.add_detail_msg(
objects.ValidationMessage(
msg=msg,
name="DD000",
error=True,
level=hd_fields.MessageLevels.ERROR,
docs=[doc_ref],
diagnostic="Exception during document processing "
"- see Drydock Troubleshooting Guide "
"for DD000"))
ps.set_status(hd_fields.ActionResult.Failure)
except Exception as ex:
msg = "Unexpected error processing document: %s" % str(ex)
self.logger.error(msg, exc_info=True)
ps.add_detail_msg(
objects.ValidationMessage(
msg=msg,
name="DD000",
error=True,
level=hd_fields.MessageLevels.ERROR,
docs=[doc_ref],
diagnostic="Unexpected exception during document "
"processing - see Drydock Troubleshooting "
"Guide for DD000"))
ps.set_status(hd_fields.ActionResult.Failure)
return (ps, models)
def process_drydock_document(self, doc):
"""Process a parsed YAML document.
:param doc: The dictionary from parsing the YAML document
"""
(schema_ns, kind, version) = doc.get('schema', '').split('/')
if version == 'v1':
doc_processor = DeckhandIngester.v1_doc_handlers.get(kind, None)
else:
doc_processor = None
if doc_processor is None:
raise errors.IngesterError(
"Invalid document - Kind %s and Version %s" % (kind, version))
metadata = doc.get('metadata', {})
doc_name = metadata.get('name')
return doc_processor(self, doc_name, doc.get('data', {}))
def validate_drydock_document(self, doc):
"""Validate a parsed document via jsonschema.
If a schema for a document Kind is not available, the document is
considered valid. Schema is chosen by the doc['kind'] field.
Returns a empty list for valid documents, otherwise returns a list
of all found errors
:param doc: dictionary of the parsed document.
"""
schemaname = doc.get('schema', '')
(schema_ns, doc_kind, doc_version) = schemaname.split('/')
errors_found = []
if doc_version == 'v1':
if schemaname in self.v1_doc_schemas:
validator = jsonschema.Draft4Validator(
self.v1_doc_schemas.get(schemaname))
for error in validator.iter_errors(doc.get('data', [])):
errors_found.append(error.message)
return errors_found
def process_drydock_region(self, name, data):
"""Process the data/spec section of a Region document.
:param name: the document name attribute
:param data: the dictionary of the data/spec section
"""
model = objects.Site()
# Need to add validation logic, we'll assume the input is
# valid for now
model.name = name
model.status = hd_fields.SiteStatus.Unknown
model.source = hd_fields.ModelSource.Designed
model.tag_definitions = objects.NodeTagDefinitionList()
tag_defs = data.get('tag_definitions', [])
for t in tag_defs:
tag_model = objects.NodeTagDefinition()
tag_model.tag = t.get('tag', '')
tag_model.type = t.get('definition_type', '')
tag_model.definition = t.get('definition', '')
if tag_model.type not in ['lshw_xpath']:
raise errors.IngesterError(
'Unknown definition_type in '
'tag_definition instance: %s' % (t.definition_type))
model.tag_definitions.append(tag_model)
auth_keys = data.get('authorized_keys', [])
model.authorized_keys = [k for k in auth_keys]
repos = data.get('repositories', None)
if repos:
model.repositories = self.process_drydock_region_repo_list(repos)
return model
def process_drydock_region_repo_list(self, data):
"""Process a package repository list.
:param data: The data from the ``repositories`` key in a Region document
"""
model = objects.RepositoryList()
for k, v in data.items():
if k == 'remove_unlisted':
model.remove_unlisted = v
else:
model.append(objects.Repository(name=k, **v))
return model
def process_drydock_rack(self, name, data):
"""Process the data/spec section of a Rack document.
:param name: the document name attribute
:param data: the dictionary of the data/spec section
"""
model = objects.Rack()
model.source = hd_fields.ModelSource.Designed
model.name = name
model.tor_switches = objects.TorSwitchList()
tors = data.get('tor_switches', {})
for k, v in tors.items():
tor = objects.TorSwitch()
tor.switch_name = k
tor.mgmt_ip = v.get('mgmt_ip', None)
tor.sdn_api_uri = v.get('sdn_api_url', None)
model.tor_switches.append(tor)
model.location = copy.deepcopy(data.get('location', {}))
model.local_networks = [n for n in data.get('local_networks', [])]
return model
def process_drydock_networklink(self, name, data):
"""Process the data/spec section of a NetworkLink document.
:param name: the document name attribute
:param data: the dictionary of the data/spec section
"""
model = objects.NetworkLink()
model.source = hd_fields.ModelSource.Designed
model.name = name
model.metalabels = data.get('labels', {})
bonding = data.get('bonding', {})
model.bonding_mode = bonding.get(
'mode', hd_fields.NetworkLinkBondingMode.Disabled)
if model.bonding_mode in \
(hd_fields.NetworkLinkBondingMode.LACP,
hd_fields.NetworkLinkBondingMode.RoundRobin,
hd_fields.NetworkLinkBondingMode.Standby):
model.bonding_mon_rate = bonding.get('mon_rate', '100')
model.bonding_up_delay = bonding.get('up_delay', '200')
model.bonding_down_delay = bonding.get('down_delay', '200')
if model.bonding_mode == hd_fields.NetworkLinkBondingMode.LACP:
model.bonding_xmit_hash = bonding.get('hash', 'layer3+4')
model.bonding_peer_rate = bonding.get('peer_rate', 'fast')
model.mtu = data.get('mtu', None)
model.linkspeed = data.get('linkspeed', None)
trunking = data.get('trunking', {})
model.trunk_mode = trunking.get(
'mode', hd_fields.NetworkLinkTrunkingMode.Disabled)
model.native_network = trunking.get('default_network', None)
model.allowed_networks = data.get('allowed_networks', None)
return model
def process_drydock_network(self, name, data):
"""Process the data/spec section of a Network document.
:param name: the document name attribute
:param data: the dictionary of the data/spec section
"""
model = objects.Network()
model.source = hd_fields.ModelSource.Designed
model.name = name
model.metalabels = data.get('labels', {})
model.cidr = data.get('cidr', None)
model.vlan_id = data.get('vlan', None)
model.mtu = data.get('mtu', None)
model.routedomain = data.get('routedomain', None)
dns = data.get('dns', {})
model.dns_domain = dns.get('domain', 'local')
model.dns_servers = dns.get('servers', None)
ranges = data.get('ranges', [])
model.ranges = []
for r in ranges:
model.ranges.append({
'type': r.get('type', None),
'start': r.get('start', None),
'end': r.get('end', None),
})
routes = data.get('routes', [])
model.routes = []
for r in routes:
model.routes.append({
'subnet': r.get('subnet', None),
'gateway': r.get('gateway', None),
'metric': r.get('metric', None),
'routedomain': r.get('routedomain', None),
})
dhcp_relay = data.get('dhcp_relay', None)
if dhcp_relay is not None:
model.dhcp_relay_self_ip = dhcp_relay.get('self_ip', None)
model.dhcp_relay_upstream_target = dhcp_relay.get(
'upstream_target', None)
return model
def process_drydock_hwprofile(self, name, data):
"""Process the data/spec section of a HardwareProfile document.
:param name: the document name attribute
:param data: the dictionary of the data/spec section
"""
model = objects.HardwareProfile()
model.name = name
model.source = hd_fields.ModelSource.Designed
model.vendor = data.get('vendor', None)
model.generation = data.get('generation', None)
model.hw_version = data.get('hw_version', None)
model.bios_version = data.get('bios_version', None)
model.boot_mode = data.get('boot_mode', None)
model.bootstrap_protocol = data.get('bootstrap_protocol', None)
model.pxe_interface = data.get('pxe_interface', None)
model.devices = objects.HardwareDeviceAliasList()
device_aliases = data.get('device_aliases', {})
for d, v in device_aliases.items():
dev_model = objects.HardwareDeviceAlias()
dev_model.source = hd_fields.ModelSource.Designed
dev_model.alias = d
dev_model.bus_type = v.get('bus_type', None)
dev_model.dev_type = v.get('dev_type', None)
dev_model.address = v.get('address', None)
model.devices.append(dev_model)
model.cpu_sets = data.get('cpu_sets', None) or dict()
model.hugepages_confs = objects.HugepagesConfList()
for c, d in data.get('hugepages', {}).items():
conf = objects.HugepagesConf(
name=c, size=d.get('size'), count=d.get('count'))
model.hugepages_confs.append(conf)
return model
def process_drydock_hostprofile(self, name, data):
"""Process the data/spec section of a HostProfile document.
:param name: the document name attribute
:param data: the dictionary of the data/spec section
"""
model = objects.HostProfile()
model.name = name
model.source = hd_fields.ModelSource.Designed
self.process_host_common_fields(data, model)
return model
def process_drydock_bootaction(self, name, data):
"""Process the data/spec section of a BootAction document.
:param name: the document name attribute
:Param data: the dictionary of the parsed data/spec section
"""
model = objects.BootAction()
model.name = name
model.source = hd_fields.ModelSource.Designed
assets = data.get('assets')
model.asset_list = objects.BootActionAssetList()
for a in assets:
ba = self.process_bootaction_asset(a)
model.asset_list.append(ba)
node_filter = data.get('node_filter', None)
if node_filter is not None:
nfs = self.process_bootaction_nodefilter(node_filter)
model.node_filter = nfs
model.signaling = data.get('signaling', None)
return model
def process_bootaction_asset(self, asset_dict):
"""Process a dictionary representing a BootAction Data Asset.
:param asset_dict: dictionary representing the bootaction asset
"""
model = objects.BootActionAsset(**asset_dict)
return model
def process_bootaction_nodefilter(self, nf):
"""Process a dictionary representing a BootAction NodeFilter Set.
:param nf: dictionary representing the bootaction nodefilter set.
"""
model = objects.NodeFilterSet()
model.filter_set_type = nf.get('filter_set_type', None)
model.filter_set = []
for nf in nf.get('filter_set', []):
nf_model = objects.NodeFilter(**nf)
model.filter_set.append(nf_model)
return model
def process_drydock_node(self, name, data):
"""Process the data/spec section of a BaremetalNode document.
:param name: the document name attribute
:param data: the dictionary of the data/spec section
"""
model = objects.BaremetalNode()
model.name = name
model.source = hd_fields.ModelSource.Designed
self.process_host_common_fields(data, model)
node_metadata = data.get('metadata', {})
model.boot_mac = node_metadata.get('boot_mac', None)
addresses = data.get('addressing', [])
if len(addresses) == 0:
raise errors.IngesterError('BaremetalNode needs at least'
' 1 assigned address')
model.addressing = objects.IpAddressAssignmentList()
for a in addresses:
assignment = objects.IpAddressAssignment()
address = a.get('address', '')
if address == 'dhcp':
assignment.type = 'dhcp'
assignment.address = None
assignment.network = a.get('network')
model.addressing.append(assignment)
elif address != '':
assignment.type = 'static'
assignment.address = a.get('address')
assignment.network = a.get('network')
model.addressing.append(assignment)
else:
self.log.error("Invalid address assignment %s on Node %s" %
(address, self.name))
return model
def process_host_common_fields(self, data, model):
"""Process fields common to the host-based documents.
Update the provided model with the values of fields common
to BaremetalNode and HostProfile documents.
:param data: dictionary from YAML parsing of the document data/spec section
:param model: instance of objects.HostProfile or objects.BaremetalNode to update
"""
model.parent_profile = data.get('host_profile', None)
model.hardware_profile = data.get('hardware_profile', None)
oob = data.get('oob', {})
model.oob_parameters = {}
for k, v in oob.items():
if k == 'type':
model.oob_type = oob.get('type', None)
else:
model.oob_parameters[k] = v
(model.storage_devices,
model.volume_groups) = self.process_node_storage(
data.get('storage', {}))
interfaces = data.get('interfaces', {})
model.interfaces = objects.HostInterfaceList()
for k, v in interfaces.items():
int_model = objects.HostInterface()
# A null value indicates this interface should be removed
# from any parent profiles
if v is None:
int_model.device_name = '!' + k
continue
int_model.device_name = k
int_model.network_link = v.get('device_link', None)
int_model.hardware_slaves = []
slaves = v.get('slaves', [])
for s in slaves:
int_model.hardware_slaves.append(s)
int_model.networks = []
networks = v.get('networks', [])
for n in networks:
int_model.networks.append(n)
if 'sriov' in v:
int_model.sriov = True
int_model.vf_count = v.get('sriov', {}).get('vf_count', 0)
int_model.trustedmode = v.get('sriov', {}).get(
'trustedmode', False)
model.interfaces.append(int_model)
platform = data.get('platform', {})
model.image = platform.get('image', None)
model.kernel = platform.get('kernel', None)
model.kernel_params = {}
for k, v in platform.get('kernel_params', {}).items():
model.kernel_params[k] = v
model.primary_network = data.get('primary_network', None)
node_metadata = data.get('metadata', {})
metadata_tags = node_metadata.get('tags', [])
model.tags = metadata_tags
owner_data = node_metadata.get('owner_data', {})
model.owner_data = {}
for k, v in owner_data.items():
model.owner_data[k] = v
model.rack = node_metadata.get('rack', None)
return model
def process_node_storage(self, storage):
"""Process the storage data for a node-based document.
Return a tuple of of two lists the first is a StorageDeviceList, the
second is a VolumeGroupList.
:param storage: dictionary of the storage section of a document
"""
phys_devs = storage.get('physical_devices', {})
storage_devices = objects.HostStorageDeviceList()
for k, v in phys_devs.items():
sd = objects.HostStorageDevice(name=k)
sd.source = hd_fields.ModelSource.Designed
if 'labels' in v:
sd.labels = v.get('labels').copy()
if 'volume_group' in v:
vg = v.get('volume_group')
sd.volume_group = vg
elif 'partitions' in v:
sd.partitions = objects.HostPartitionList()
for vv in v.get('partitions', []):
part_model = objects.HostPartition()
part_model.name = vv.get('name')
part_model.source = hd_fields.ModelSource.Designed
part_model.part_uuid = vv.get('part_uuid', None)
part_model.size = vv.get('size', None)
if 'labels' in vv:
part_model.labels = vv.get('labels').copy()
if 'volume_group' in vv:
part_model.volume_group = vv.get('vg')
elif 'filesystem' in vv:
fs_info = vv.get('filesystem', {})
part_model.mountpoint = fs_info.get('mountpoint', None)
part_model.fstype = fs_info.get('fstype', 'ext4')
part_model.mount_options = fs_info.get(
'mount_options', 'defaults')
part_model.fs_uuid = fs_info.get('fs_uuid', None)
part_model.fs_label = fs_info.get('fs_label', None)
sd.partitions.append(part_model)
storage_devices.append(sd)
volume_groups = objects.HostVolumeGroupList()
vol_groups = storage.get('volume_groups', {})
for k, v in vol_groups.items():
vg = objects.HostVolumeGroup(name=k)
vg.vg_uuid = v.get('vg_uuid', None)
vg.logical_volumes = objects.HostVolumeList()
volume_groups.append(vg)
for vv in v.get('logical_volumes', []):
lv = objects.HostVolume(name=vv.get('name'))
lv.size = vv.get('size', None)
lv.lv_uuid = vv.get('lv_uuid', None)
if 'filesystem' in vv:
fs_info = vv.get('filesystem', {})
lv.mountpoint = fs_info.get('mountpoint', None)
lv.fstype = fs_info.get('fstype', 'ext4')
lv.mount_options = fs_info.get('mount_options', 'defaults')
lv.fs_uuid = fs_info.get('fs_uuid', None)
lv.fs_label = fs_info.get('fs_label', None)
vg.logical_volumes.append(lv)
return (storage_devices, volume_groups)
def load_schemas(self):
self.v1_doc_schemas = dict()
schema_dir = self._get_schema_dir()
for schema_file in os.listdir(schema_dir):
f = open(os.path.join(schema_dir, schema_file), 'r')
for schema in yaml.safe_load_all(f):
schema_for = schema['metadata']['name']
if schema_for in self.v1_doc_schemas:
self.logger.warning(
"Duplicate document schemas found for document kind %s."
% schema_for)
self.logger.debug(
"Loaded schema for document kind %s." % schema_for)
self.v1_doc_schemas[schema_for] = schema.get('data')
f.close()
def _get_schema_dir(self):
return pkg_resources.resource_filename('drydock_provisioner',
'schemas')
# Mapping of handlers for different document kinds
v1_doc_handlers = {
'Region': process_drydock_region,
'Rack': process_drydock_rack,
'NetworkLink': process_drydock_networklink,
'Network': process_drydock_network,
'HardwareProfile': process_drydock_hwprofile,
'HostProfile': process_drydock_hostprofile,
'BaremetalNode': process_drydock_node,
'BootAction': process_drydock_bootaction,
}
| 37.225275
| 108
| 0.573579
| 3,009
| 27,100
| 5.01429
| 0.168827
| 0.020414
| 0.016901
| 0.017895
| 0.258218
| 0.213746
| 0.178685
| 0.156482
| 0.139515
| 0.124934
| 0
| 0.003407
| 0.328561
| 27,100
| 727
| 109
| 37.276479
| 0.825786
| 0.146384
| 0
| 0.178344
| 0
| 0
| 0.104372
| 0.002126
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046709
| false
| 0
| 0.027601
| 0.006369
| 0.121019
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a8ca4ac28e4f99e7596ac67b54b694b5e38191d
| 5,517
|
py
|
Python
|
porting_tools/package_xml_porter.py
|
nreplogle/ros2-migration-tools
|
8e422731dea52df19da6de780319a17516f60f7c
|
[
"Apache-2.0"
] | 92
|
2018-10-17T22:18:01.000Z
|
2022-03-19T22:03:16.000Z
|
porting_tools/package_xml_porter.py
|
nreplogle/ros2-migration-tools
|
8e422731dea52df19da6de780319a17516f60f7c
|
[
"Apache-2.0"
] | 12
|
2019-02-21T22:29:15.000Z
|
2021-06-28T22:33:31.000Z
|
porting_tools/package_xml_porter.py
|
nreplogle/ros2-migration-tools
|
8e422731dea52df19da6de780319a17516f60f7c
|
[
"Apache-2.0"
] | 19
|
2018-10-18T11:47:07.000Z
|
2022-02-04T18:41:03.000Z
|
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
""" Contains a class and method for porting a package.xml file from catkin to ament"""
import xml.etree.ElementTree as etree
from .constants import CatkinToAmentMigration, PACKAGE_XML_ELEMENT_ORDER
from .utils import get_functions_with
def new_element(tag, text="", tail="\n", attrib=None):
""" Helper function to make creating an element with a text and tail easier """
if not attrib:
attrib = {}
element = etree.Element(tag, attrib=attrib)
element.text = text
element.tail = tail
return element
def tag_order(tag):
""" Returns integer to order tags """
if tag in PACKAGE_XML_ELEMENT_ORDER:
return PACKAGE_XML_ELEMENT_ORDER.index(tag)
return float("inf")
class PackageXMLPorter:
"""A class for porting a package.xml file from catkin to ament"""
@staticmethod
def port(tree, extra_rules=[]):
"""
Ports package.xml from catkin to ament
Arguments:
tree - the xml tree representing the package.xml file (output of etree.parse("package.xml"))
extra_rules - a list of functions to apply to the xml tree
Returns:
The new xml tree
"""
# Pulls out all methods in this class with name starting with "rule"
rules = get_functions_with(criteria=lambda name: name.startswith("rule"),
from_class=PackageXMLPorter)
package_root = tree.getroot()
for rule in rules + extra_rules:
rule(package_root)
# Make sure there's a final newline
package_root.tail = "\n"
# Reorder the elements
package_root[:] = sorted(list(package_root), key=lambda elem: tag_order(elem.tag))
# Correct indentation
PackageXMLPorter.indent_tree(elem=package_root, level=0)
#########################
# RULES #
#########################
@staticmethod
def rule_set_format(package_root):
# ROS 2 supports formats 2,3
package_root.set("format", "3")
@staticmethod
def rule_set_build_tool(package_root):
for elem in package_root.findall("buildtool_depend"):
if elem.text and elem.text.strip() == "catkin":
package_root.remove(elem)
package_root.append(new_element(tag="buildtool_depend", text="ament_cmake"))
@staticmethod
def rule_set_client_library(package_root):
for elem in list(package_root):
if elem.text and elem.text.strip() in CatkinToAmentMigration.CLIENT_CONVERSION:
elem.text = CatkinToAmentMigration.CLIENT_CONVERSION[elem.text.strip()]
@staticmethod
def rule_add_export_build_type(package_root):
build_elem = new_element(tag="build_type", text="ament_cmake", tail="\n ")
export_elem = new_element(tag="export", text="\n ")
export_elem.append(build_elem)
package_root.append(export_elem)
@staticmethod
def rule_set_run_to_exec_depend(package_root):
for elem in package_root.findall("run_depend"):
elem.tag = "exec_depend"
@staticmethod
def rule_set_depend_to_run_exec(package_root):
for elem in package_root.findall("depend"):
elem.tag = "build_depend"
package_root.append(new_element(tag="exec_depend", text=elem.text, attrib=elem.attrib))
@staticmethod
def rule_update_message_gen_dependency(package_root):
message_generation_used = False
for elem in list(package_root):
if elem.text and elem.text == "message_generation" or elem.text == "message_runtime":
package_root.remove(elem)
message_generation_used = True
if message_generation_used:
package_root.append(new_element(tag="buildtool_depend", text="rosidl_default_generators"))
package_root.append(new_element(tag="build_depend", text="builtin_interfaces"))
package_root.append(new_element(tag="exec_depend", text="builtin_interfaces"))
package_root.append(new_element(tag="exec_depend", text="rosidl_default_runtime"))
package_root.append(new_element(tag="member_of_group", text="rosidl_interface_packages"))
#########################
# HELPERS #
#########################
@staticmethod
def indent_tree(elem, level):
if len(elem) > 0: # element has children
if elem.text is None or len(elem.text) == 0:
elem.text = "\n" + (" "*(level+1)) # sets the indent for the children
list(elem)[-1].tail = "\n" + " "*level
for child in list(elem)[:-1]:
child.tail = "\n" + (" "*(level+1))
PackageXMLPorter.indent_tree(elem=child, level=level+1)
if __name__ == '__main__':
tree = etree.parse("package.xml")
PackageXMLPorter.port(tree=tree)
tree.write("updated_package.xml", encoding="utf-8", xml_declaration=True)
| 39.12766
| 104
| 0.649085
| 700
| 5,517
| 4.922857
| 0.278571
| 0.092571
| 0.037725
| 0.040627
| 0.219385
| 0.189495
| 0.172084
| 0.163088
| 0.130006
| 0.088799
| 0
| 0.00498
| 0.235635
| 5,517
| 140
| 105
| 39.407143
| 0.812189
| 0.236542
| 0
| 0.1625
| 0
| 0
| 0.103672
| 0.017987
| 0.0125
| 0
| 0
| 0
| 0
| 1
| 0.1375
| false
| 0
| 0.0375
| 0
| 0.225
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a8eaddf7ae51bc116bee8d180b8c5c1f2cfecaf
| 4,739
|
py
|
Python
|
endpoints/api/permission_models_interface.py
|
giuseppe/quay
|
a1b7e4b51974edfe86f66788621011eef2667e6a
|
[
"Apache-2.0"
] | 2,027
|
2019-11-12T18:05:48.000Z
|
2022-03-31T22:25:04.000Z
|
endpoints/api/permission_models_interface.py
|
giuseppe/quay
|
a1b7e4b51974edfe86f66788621011eef2667e6a
|
[
"Apache-2.0"
] | 496
|
2019-11-12T18:13:37.000Z
|
2022-03-31T10:43:45.000Z
|
endpoints/api/permission_models_interface.py
|
giuseppe/quay
|
a1b7e4b51974edfe86f66788621011eef2667e6a
|
[
"Apache-2.0"
] | 249
|
2019-11-12T18:02:27.000Z
|
2022-03-22T12:19:19.000Z
|
import sys
from abc import ABCMeta, abstractmethod
from collections import namedtuple
from six import add_metaclass
class SaveException(Exception):
def __init__(self, other):
self.traceback = sys.exc_info()
super(SaveException, self).__init__(str(other))
class DeleteException(Exception):
def __init__(self, other):
self.traceback = sys.exc_info()
super(DeleteException, self).__init__(str(other))
class Role(namedtuple("Role", ["role_name"])):
def to_dict(self):
return {
"role": self.role_name,
}
class UserPermission(
namedtuple(
"UserPermission",
[
"role_name",
"username",
"is_robot",
"avatar",
"is_org_member",
"has_org",
],
)
):
def to_dict(self):
perm_dict = {
"role": self.role_name,
"name": self.username,
"is_robot": self.is_robot,
"avatar": self.avatar,
}
if self.has_org:
perm_dict["is_org_member"] = self.is_org_member
return perm_dict
class RobotPermission(
namedtuple(
"RobotPermission",
[
"role_name",
"username",
"is_robot",
"is_org_member",
],
)
):
def to_dict(self, user=None, team=None, org_members=None):
return {
"role": self.role_name,
"name": self.username,
"is_robot": True,
"is_org_member": self.is_org_member,
}
class TeamPermission(
namedtuple(
"TeamPermission",
[
"role_name",
"team_name",
"avatar",
],
)
):
def to_dict(self):
return {
"role": self.role_name,
"name": self.team_name,
"avatar": self.avatar,
}
@add_metaclass(ABCMeta)
class PermissionDataInterface(object):
"""
Data interface used by permissions API.
"""
@abstractmethod
def get_repo_permissions_by_user(self, namespace_name, repository_name):
"""
Args:
namespace_name: string
repository_name: string
Returns:
list(UserPermission)
"""
@abstractmethod
def get_repo_roles(self, username, namespace_name, repository_name):
"""
Args:
username: string
namespace_name: string
repository_name: string
Returns:
list(Role) or None
"""
@abstractmethod
def get_repo_permission_for_user(self, username, namespace_name, repository_name):
"""
Args:
username: string
namespace_name: string
repository_name: string
Returns:
UserPermission
"""
@abstractmethod
def set_repo_permission_for_user(self, username, namespace_name, repository_name, role_name):
"""
Args:
username: string
namespace_name: string
repository_name: string
role_name: string
Returns:
UserPermission
Raises:
SaveException
"""
@abstractmethod
def delete_repo_permission_for_user(self, username, namespace_name, repository_name):
"""
Args:
username: string
namespace_name: string
repository_name: string
Returns:
void
Raises:
DeleteException
"""
@abstractmethod
def get_repo_permissions_by_team(self, namespace_name, repository_name):
"""
Args:
namespace_name: string
repository_name: string
Returns:
list(TeamPermission)
"""
@abstractmethod
def get_repo_role_for_team(self, team_name, namespace_name, repository_name):
"""
Args:
team_name: string
namespace_name: string
repository_name: string
Returns:
Role
"""
@abstractmethod
def set_repo_permission_for_team(self, team_name, namespace_name, repository_name, permission):
"""
Args:
team_name: string
namespace_name: string
repository_name: string
permission: string
Returns:
TeamPermission
Raises:
SaveException
"""
@abstractmethod
def delete_repo_permission_for_team(self, team_name, namespace_name, repository_name):
"""
Args:
team_name: string
namespace_name: string
repository_name: string
Returns:
TeamPermission
Raises:
DeleteException
"""
| 21.15625
| 99
| 0.556447
| 432
| 4,739
| 5.791667
| 0.162037
| 0.08793
| 0.082734
| 0.097122
| 0.640687
| 0.605516
| 0.559952
| 0.533573
| 0.498401
| 0.439249
| 0
| 0
| 0.358303
| 4,739
| 223
| 100
| 21.251121
| 0.822756
| 0.226208
| 0
| 0.5
| 0
| 0
| 0.083361
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.159574
| false
| 0
| 0.042553
| 0.031915
| 0.319149
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a8f1c2b21e9f7321bc8056b973b7bad4e6c12de
| 754
|
py
|
Python
|
configs/mot/tracktor/tracktor_faster-rcnn_r50_fpn_4e_mot17-public.py
|
sht47/mmtracking
|
5a25e418e9c598d1b576bce8702f5e156cbbefe7
|
[
"Apache-2.0"
] | 12
|
2021-09-05T20:47:16.000Z
|
2022-03-23T07:00:35.000Z
|
configs/mot/tracktor/tracktor_faster-rcnn_r50_fpn_4e_mot17-public.py
|
hellock/mmtracking
|
a22a36b2055d80cf4a7a5ef3913849abb56defcb
|
[
"Apache-2.0"
] | 2
|
2021-09-06T13:20:09.000Z
|
2022-01-13T05:36:14.000Z
|
configs/mot/tracktor/tracktor_faster-rcnn_r50_fpn_4e_mot17-public.py
|
hellock/mmtracking
|
a22a36b2055d80cf4a7a5ef3913849abb56defcb
|
[
"Apache-2.0"
] | 1
|
2021-07-15T00:26:35.000Z
|
2021-07-15T00:26:35.000Z
|
_base_ = ['./tracktor_faster-rcnn_r50_fpn_4e_mot17-public-half.py']
model = dict(
pretrains=dict(
detector= # noqa: E251
'https://download.openmmlab.com/mmtracking/mot/faster_rcnn/faster-rcnn_r50_fpn_4e_mot17-ffa52ae7.pth' # noqa: E501
))
data_root = 'data/MOT17/'
test_set = 'test'
data = dict(
train=dict(ann_file=data_root + 'annotations/train_cocoformat.json'),
val=dict(
ann_file=data_root + 'annotations/train_cocoformat.json',
detection_file=data_root + 'annotations/train_detections.pkl'),
test=dict(
ann_file=data_root + f'annotations/{test_set}_cocoformat.json',
img_prefix=data_root + test_set,
detection_file=data_root + f'annotations/{test_set}_detections.pkl'))
| 41.888889
| 123
| 0.708223
| 101
| 754
| 4.950495
| 0.425743
| 0.112
| 0.12
| 0.09
| 0.482
| 0.412
| 0.32
| 0.196
| 0.196
| 0
| 0
| 0.033281
| 0.16313
| 754
| 17
| 124
| 44.352941
| 0.759113
| 0.027851
| 0
| 0
| 0
| 0.058824
| 0.467123
| 0.310959
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a8f5a90f2c6e24db504d3e023a88b1bddaccca9
| 2,277
|
py
|
Python
|
browserstack/first_sample_build.py
|
Shaimyst/scrive_test
|
38e3ea0192885d1776d24afdbea110d73adc4e8b
|
[
"MIT"
] | null | null | null |
browserstack/first_sample_build.py
|
Shaimyst/scrive_test
|
38e3ea0192885d1776d24afdbea110d73adc4e8b
|
[
"MIT"
] | null | null | null |
browserstack/first_sample_build.py
|
Shaimyst/scrive_test
|
38e3ea0192885d1776d24afdbea110d73adc4e8b
|
[
"MIT"
] | null | null | null |
from threading import Thread
from time import sleep
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
# This array 'caps' defines the capabilities browser, device and OS combinations where the test will run
caps=[{
'os_version': '10',
'os': 'Windows',
'browser': 'ie',
'browser_version': '11.0',
'name': 'Parallel Test1', # test name
'build': 'browserstack-build-1' # Your tests will be organized within this build
},
{
'os_version': 'Big Sur',
'os': 'OS X',
'browser': 'chrome',
'browser_version': '95.0',
'name': 'Parallel Test2',
'build': 'browserstack-build-1'
},
{
'os_version': 'Big Sur',
'os': 'OS X',
'browser': 'firefox',
'browser_version': '93.0',
'name': 'Parallel Test3',
'build': 'browserstack-build-1'
}]
#run_session function searches for 'BrowserStack' on google.com
def run_session(desired_cap):
driver = webdriver.Remote(
command_executor='https://jessicasadler_RbBTVv:xE8t7EaT7QqcLDMfzfvz@hub-cloud.browserstack.com/wd/hub',
desired_capabilities=desired_cap)
driver.get("https://www.google.com")
if not "Google" in driver.title:
raise Exception("Unable to load google page!")
elem = driver.find_element_by_name("q")
elem.send_keys("BrowserStack")
elem.submit()
try:
WebDriverWait(driver, 5).until(EC.title_contains("BrowserStack"))
driver.execute_script('browserstack_executor: {"action": "setSessionStatus", "arguments": {"status":"passed", "reason": "Title matched!"}}')
except TimeoutException:
driver.execute_script('browserstack_executor: {"action": "setSessionStatus", "arguments": {"status":"failed", "reason": "Title not matched"}}')
print(driver.title)
driver.quit()
#The Thread function takes run_session function and each set of capability from the caps array as an argument to run each session parallelly
for cap in caps:
Thread(target=run_session, args=(cap,)).start()
| 42.166667
| 149
| 0.700044
| 277
| 2,277
| 5.66065
| 0.465704
| 0.045918
| 0.053571
| 0.044005
| 0.131378
| 0.131378
| 0.131378
| 0.131378
| 0.096939
| 0
| 0
| 0.011111
| 0.16996
| 2,277
| 54
| 150
| 42.166667
| 0.818519
| 0.158103
| 0
| 0.137255
| 0
| 0.039216
| 0.358264
| 0.023013
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019608
| false
| 0.019608
| 0.156863
| 0
| 0.176471
| 0.019608
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a914cd003bec02fcf0ace8e2f7e5de8208c8146
| 11,024
|
py
|
Python
|
ISM_catalog_profile/scripts/ISM/ISM.py
|
rhmdnd/compliance-trestle-demos
|
1d92c91cca1d23cf707f82f035b2d58ec67c953a
|
[
"Apache-2.0"
] | 10
|
2021-09-03T05:07:19.000Z
|
2022-03-26T13:24:51.000Z
|
ISM_catalog_profile/scripts/ISM/ISM.py
|
rhmdnd/compliance-trestle-demos
|
1d92c91cca1d23cf707f82f035b2d58ec67c953a
|
[
"Apache-2.0"
] | null | null | null |
ISM_catalog_profile/scripts/ISM/ISM.py
|
rhmdnd/compliance-trestle-demos
|
1d92c91cca1d23cf707f82f035b2d58ec67c953a
|
[
"Apache-2.0"
] | 4
|
2021-12-14T22:15:06.000Z
|
2022-03-29T16:16:19.000Z
|
#!/usr/bin/env python3
# # -*- mode:python; coding:utf-8 -*-
# Copyright (c) 2020 IBM Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# limitations under the License.
"""Create ISM catalogs.
This script is used to convert Australian Government Information Security Manual (ISM) into OSCAL formats.
The ISM is the equivalent of NIST 800-53 / FedRAMP / IL6 and similar documents in the USA. The goal is to produce a
similar set OSCAL documents to what NIST and FedRAMP are currently publishing.
It does this via pulling the ISM xml doc and creating:
1 Catalog for all the controls
4 profiles (Official, protected, secret, TS)
Ideally this would be a cron job based script, however, as ACSC publish revisions
with specific names this would need to be discovered by crawling. This will be a potential future enhancement.
This script pulls down the controls in a 'dumb' way from the xml to get the actual controls. A full featured catalog
will need to parse appropriate word / xml documents to provide groups /guidance.
"""
import io
import json
import logging
import pathlib
import sys
import urllib.request
import zipfile
from datetime import datetime
from uuid import uuid4
from ilcli import Command
import trestle.oscal.catalog as catalog
import trestle.oscal.common as common
import trestle.oscal.profile as profile
import xmltodict
# Globally define logging behaviour.
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler(sys.stdout))
remarks_tuple = '\n'.join(
[
'This is not an official version of the Australian Government Information Security Manual.',
'',
'Find the official versions here: https://www.cyber.gov.au/acsc/view-all-content/ism',
'This content was generated using scrips/ISM/ISM.py'
]
)
class ISMManager():
"""ISMManager a class to manage conversion of ISM artifacts into OSCAL."""
def __init__(self):
"""Initialize ISM manager. No required parameters."""
self._profile_controls = {'OFFICIAL': [], 'PROTECTED': [], 'SECRET': [], 'TOP_SECRET': []}
self._profiles = {}
def fetch_ism(self, url):
"""Fetch an Australian government ISM and covert to a dict."""
logger.debug('Fetching ISM from: ' + url)
request_url = urllib.request.urlopen(url)
document = request_url.read()
zipfile_content = zipfile.ZipFile(io.BytesIO(document))
content_list = zipfile_content.namelist()
xml_files = [x for x in content_list if '.xml' in x]
assert len(xml_files) == 1
self.ism_xml = xmltodict.parse(zipfile_content.open(xml_files[0]).read())
def _populate_control_list(self, control, raw_id):
"""Populate control lists based on a dict from the xml version of the ISM."""
# TODO: Really not pythonic but anyway.
control_id = 'control-' + raw_id
for security_level in self._profile_controls.keys():
# Dealing with schema changes 'Yes' and 'true' appear to both be valid options.
if control[security_level].lower() == 'yes' or control[security_level].lower() == 'true':
self._profile_controls[security_level].append(control_id)
def _probe_for_keys(self, ism_control):
"""Probe for the appropriate keys for l2 groups based on whether or not section exists."""
l2_group_key = 'Section'
if l2_group_key not in ism_control.keys():
l2_group_key = 'Topic'
return l2_group_key
def _name_clean(self, name: str) -> str:
"""Normalize string to ncname format."""
return name.strip().lower().replace(' ', '_').replace('/', '-')
def create_ism_catalog(self, version: str) -> None:
"""Parse ISM object and create a catalog."""
m = common.Metadata(
**{
'title': 'Australian Government Information Security manual',
'last-modified': datetime.now().astimezone(),
'version': version,
'oscal-version': '1.0.0',
'remarks': remarks_tuple
}
)
ism_catalog = catalog.Catalog(metadata=m, uuid=str(uuid4()))
# Create basic metadata:
ism_controls = self.ism_xml['ISM']['Control']
l2_group_key = self._probe_for_keys(ism_controls[0])
"""
Approach:
- Two levels of groups - no sub controls.
- below this will be parts
"""
# Get list of top level controls
tl_group_titles = set(map(lambda x: x['Guideline'], ism_controls))
groups = []
for tl_group_name in tl_group_titles:
group = catalog.Group(id=self._name_clean(tl_group_name), title=tl_group_name)
# now add l2 groups
control_subset = list(filter(lambda x: x['Guideline'] == tl_group_name, ism_controls))
# get set l2 group names.
l2_group_titles = set(map(lambda x: x[l2_group_key], control_subset))
l2_groups = []
for l2_group_name in l2_group_titles:
clean_id = self._name_clean(l2_group_name)
l2_group = catalog.Group(id=clean_id, title=l2_group_name)
# Now identify and add the controls
oscal_controls = []
l2_control_subset = list(filter(lambda x: x[l2_group_key] == l2_group_name, control_subset))
# now we can create and add controls.
# TODO: Make more pythonic
for ism_control in l2_control_subset:
raw_id = ism_control['Identifier']
description = ism_control['Description']
topic = ism_control['Topic']
# make description the part statement
statement_part = common.Part(id='control-' + raw_id + '-stmt', name='statement', prose=description)
# this is very minimial
oscal_control = catalog.Control(id='control-' + raw_id, title=topic, parts=[statement_part])
self._populate_control_list(ism_control, raw_id)
oscal_controls.append(oscal_control)
l2_group.controls = oscal_controls
l2_groups.append(l2_group)
group.groups = l2_groups
groups.append(group)
ism_catalog.groups = groups
self._ism_catalog = ism_catalog
def create_ism_profiles(self, revision_date, uri='./ISM_catalog.yaml'):
"""Create profile for each ISM environment."""
for security_level in self._profile_controls.keys():
ism_profile = profile.Profile(
uuid=str(uuid4()),
metadata=common.Metadata(
**{
'title': 'Australian Government Information Security Manual profile for ' + security_level,
'version': revision_date,
'oscal-version': '1.0.0',
'last-modified': datetime.now().astimezone(),
'remarks': remarks_tuple
}
),
imports=[profile.Import(href=uri)]
)
controls_list = self._profile_controls[security_level]
ism_profile.imports[0].include_controls = self._populate_import_include(controls_list)
self._profiles[security_level] = ism_profile
def _populate_import_include(self, control_list):
include_controls = []
selector = profile.SelectControlById()
selector.with_ids = control_list
include_controls.append(selector)
return include_controls
def write_catalog(self, catalogs_path, ism_name):
"""Wrap and write oscal catalog object."""
ism_dir_path = catalogs_path / ism_name
ism_dir_path.mkdir(exist_ok=True)
ism_file_path = ism_dir_path / 'catalog.json'
self._ism_catalog.oscal_write(ism_file_path)
def write_profiles(self, profiles_dir, ism_name):
"""Write out all profiles."""
for security_level in self._profiles.keys():
profile_dir = profiles_dir / (ism_name + '_' + security_level)
profile_dir.mkdir(exist_ok=True)
profile_path = profile_dir / 'profile.json'
self._profiles[security_level].oscal_write(profile_path)
class ISM(Command):
"""
Convert the Australian goverment information security manual (in various versions) into catalogs and profiles.
This CLI has presumptions on resource structures that are returned.
Please note that this project current presumes information about the project structure.
"""
def _init_arguments(self):
self.add_argument('-r', '--root-dir', help='Trestle project root.', default='./')
def _run(self, args):
# little test
root_dir = pathlib.Path(args.root_dir).resolve()
catalogs_dir = root_dir.joinpath('catalogs').resolve()
profiles_dir = root_dir.joinpath('profiles').resolve()
ism_json_file = root_dir.joinpath('scripts/ISM/ism_editions.json').resolve()
if not root_dir.exists():
logger.error('Root trestle project does not exist')
return 1
if not catalogs_dir.exists():
logger.error('Catalogs directory does not exist.')
return 1
if not profiles_dir.exists():
logger.error('Profiles directory does not exist.')
return 1
ism_versions = json.load(ism_json_file.open())
for ism_file in ism_versions['isms']:
# ISM file format: 'ISM - List of Security Controls (August 2019).xml'
logger.info(ism_file)
url = ism_file['version_url']
ism_manager = ISMManager()
ism_manager.fetch_ism(url)
revision_date = ism_file['version_name'].split()
revision_string = revision_date[0] + '_' + revision_date[1]
logger.info(f'Revision date: {revision_date}')
logger.info(f'Revision string: {revision_string}')
logger.info(revision_string)
ism_name = 'ISM_' + revision_string
ism_manager.create_ism_catalog(revision_string)
# This is presumed to be relative for now to the catalog repo based on this
ism_manager.write_catalog(catalogs_dir, ism_name)
ism_manager.create_ism_profiles(revision_string, 'trestle://' + ism_name + '/catalog.json')
ism_manager.write_profiles(profiles_dir, ism_name)
if __name__ == '__main__':
sys.exit(ISM().run())
| 42.4
| 119
| 0.642144
| 1,375
| 11,024
| 4.947636
| 0.267636
| 0.017492
| 0.01029
| 0.022931
| 0.108923
| 0.062766
| 0.054388
| 0.030869
| 0
| 0
| 0
| 0.007886
| 0.263788
| 11,024
| 259
| 120
| 42.563707
| 0.830335
| 0.24873
| 0
| 0.081761
| 0
| 0.006289
| 0.119175
| 0.003604
| 0
| 0
| 0
| 0.007722
| 0.006289
| 1
| 0.075472
| false
| 0
| 0.106918
| 0
| 0.232704
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a9405edbd8cfdcda2cba6e2d4bef4fc6c17c93b
| 806
|
py
|
Python
|
setup.py
|
cyberjunky/python-garminconnect-aio
|
fb913a15107edee5c5530f3bded7c553ec57923b
|
[
"MIT"
] | 11
|
2021-06-08T14:55:33.000Z
|
2022-02-03T03:12:14.000Z
|
setup.py
|
cyberjunky/python-garminconnect-aio
|
fb913a15107edee5c5530f3bded7c553ec57923b
|
[
"MIT"
] | 1
|
2021-08-07T09:24:35.000Z
|
2021-08-07T17:30:40.000Z
|
setup.py
|
cyberjunky/python-garminconnect-aio
|
fb913a15107edee5c5530f3bded7c553ec57923b
|
[
"MIT"
] | 2
|
2021-06-04T15:34:22.000Z
|
2021-10-02T19:48:13.000Z
|
#!/usr/bin/env python
from setuptools import setup
with open("README.md") as readme_file:
readme = readme_file.read()
setup(
author="Ron Klinkien",
author_email="ron@cyberjunky.nl",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
description="Asynchronous Garmin Connect Python 3 API wrapper",
name="garminconnect_aio",
keywords=["garmin connect", "api", "client"],
license="MIT license",
install_requires=["aiohttp >= 3.6", "yarl", "brotlipy"],
long_description_content_type="text/markdown",
long_description=readme,
url="https://github.com/cyberjunky/python-garminconnect-aio",
packages=["garminconnect_aio"],
version="0.1.4",
)
| 29.851852
| 67
| 0.666253
| 92
| 806
| 5.728261
| 0.695652
| 0.091082
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010654
| 0.184864
| 806
| 26
| 68
| 31
| 0.791476
| 0.024814
| 0
| 0
| 0
| 0
| 0.457325
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.045455
| 0
| 0.045455
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a940fa45e0ab9b5f708abce624a09bc0ed42b1a
| 9,513
|
py
|
Python
|
nova/tests/unit/virt/libvirt/fake_imagebackend.py
|
ChameleonCloud/nova
|
4bb9421b02b71f2b218278aa6f97abace871b111
|
[
"Apache-2.0"
] | 1
|
2016-07-18T22:05:01.000Z
|
2016-07-18T22:05:01.000Z
|
nova/tests/unit/virt/libvirt/fake_imagebackend.py
|
ChameleonCloud/nova
|
4bb9421b02b71f2b218278aa6f97abace871b111
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/virt/libvirt/fake_imagebackend.py
|
ChameleonCloud/nova
|
4bb9421b02b71f2b218278aa6f97abace871b111
|
[
"Apache-2.0"
] | 1
|
2021-11-12T03:55:41.000Z
|
2021-11-12T03:55:41.000Z
|
# Copyright 2012 Grid Dynamics
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import functools
import os
import fixtures
import mock
from nova.virt.libvirt import config
from nova.virt.libvirt import driver
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt import utils as libvirt_utils
class ImageBackendFixture(fixtures.Fixture):
def __init__(self, got_files=None, imported_files=None, exists=None):
"""This fixture mocks imagebackend.Backend.backend, which is the
only entry point to libvirt.imagebackend from libvirt.driver.
:param got_files: A list of {'filename': path, 'size': size} for every
file which was created.
:param imported_files: A list of (local_filename, remote_filename) for
every invocation of import_file().
:param exists: An optional lambda which takes the disk name as an
argument, and returns True if the disk exists,
False otherwise.
"""
self.got_files = got_files
self.imported_files = imported_files
self.disks = collections.defaultdict(self._mock_disk)
"""A dict of name -> Mock image object. This is a defaultdict,
so tests may access it directly before a disk has been created."""
self._exists = exists
def setUp(self):
super(ImageBackendFixture, self).setUp()
# Mock template functions passed to cache
self.mock_fetch_image = mock.create_autospec(libvirt_utils.fetch_image)
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.utils.fetch_image', self.mock_fetch_image))
self.mock_fetch_raw_image = \
mock.create_autospec(libvirt_utils.fetch_raw_image)
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.utils.fetch_raw_image',
self.mock_fetch_raw_image))
self.mock_create_ephemeral = \
mock.create_autospec(driver.LibvirtDriver._create_ephemeral)
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.LibvirtDriver._create_ephemeral',
self.mock_create_ephemeral))
self.mock_create_swap = \
mock.create_autospec(driver.LibvirtDriver._create_swap)
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.LibvirtDriver._create_swap',
self.mock_create_swap))
# Backend.backend creates all Image objects
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.imagebackend.Backend.backend',
self._mock_backend))
@property
def created_disks(self):
"""disks, filtered to contain only disks which were actually created
by calling a relevant method.
"""
# A disk was created iff either cache() or import_file() was called.
return {name: disk for name, disk in self.disks.items()
if any([disk.cache.called, disk.import_file.called])}
def _mock_disk(self):
# This is the generator passed to the disks defaultdict. It returns
# a mocked Image object, but note that the returned object has not
# yet been 'constructed'. We don't know at this stage what arguments
# will be passed to the constructor, so we don't know, eg, its type
# or path.
#
# The reason for this 2 phase construction is to allow tests to
# manipulate mocks for disks before they have been created. eg a
# test can do the following before executing the method under test:
#
# disks['disk'].cache.side_effect = ImageNotFound...
#
# When the 'constructor' (image_init in _mock_backend) later runs,
# it will return the same object we created here, and when the
# caller calls cache() it will raise the requested exception.
disk = mock.create_autospec(imagebackend.Image)
# NOTE(mdbooth): fake_cache and fake_import_file are for compatibility
# with existing tests which test got_files and imported_files. They
# should be removed when they have no remaining users.
disk.cache.side_effect = self._fake_cache
disk.import_file.side_effect = self._fake_import_file
# NOTE(mdbooth): test_virt_drivers assumes libvirt_info has functional
# output
disk.libvirt_info.side_effect = \
functools.partial(self._fake_libvirt_info, disk)
return disk
def _mock_backend(self, backend_self, image_type=None):
# This method mocks Backend.backend, which returns a subclass of Image
# (it returns a class, not an instance). This mocked method doesn't
# return a class; it returns a function which returns a Mock. IOW,
# instead of the getting a QCow2, the caller gets image_init,
# so instead of:
#
# QCow2(instance, disk_name='disk')
#
# the caller effectively does:
#
# image_init(instance, disk_name='disk')
#
# Therefore image_init() must have the same signature as an Image
# subclass constructor, and return a mocked Image object.
#
# The returned mocked Image object has the following additional
# properties which are useful for testing:
#
# * Calls with the same disk_name return the same object from
# self.disks. This means tests can assert on multiple calls for
# the same disk without worrying about whether they were also on
# the same object.
#
# * Mocked objects have an additional image_type attribute set to
# the image_type originally passed to Backend.backend() during
# their construction. Tests can use this to assert that disks were
# created of the expected type.
def image_init(instance=None, disk_name=None, path=None):
# There's nothing special about this path except that it's
# predictable and unique for (instance, disk).
if path is None:
path = os.path.join(
libvirt_utils.get_instance_path(instance), disk_name)
else:
disk_name = os.path.basename(path)
disk = self.disks[disk_name]
# Used directly by callers. These would have been set if called
# the real constructor.
setattr(disk, 'path', path)
setattr(disk, 'is_block_dev', mock.sentinel.is_block_dev)
# Used by tests. Note that image_init is a closure over image_type.
setattr(disk, 'image_type', image_type)
# Used by tests to manipulate which disks exist.
if self._exists is not None:
# We don't just cache the return value here because the
# caller may want, eg, a test where the disk initially does not
# exist and later exists.
disk.exists.side_effect = lambda: self._exists(disk_name)
else:
disk.exists.return_value = True
return disk
# Set the SUPPORTS_CLONE member variable to mimic the Image base
# class.
image_init.SUPPORTS_CLONE = False
# Ditto for the 'is_shared_block_storage' function and
# 'is_file_in_instance_path'
def is_shared_block_storage():
return False
def is_file_in_instance_path():
return False
setattr(image_init, 'is_shared_block_storage', is_shared_block_storage)
setattr(image_init, 'is_file_in_instance_path',
is_file_in_instance_path)
return image_init
def _fake_cache(self, fetch_func, filename, size=None, *args, **kwargs):
# Execute the template function so we can test the arguments it was
# called with.
fetch_func(target=filename, *args, **kwargs)
# For legacy tests which use got_files
if self.got_files is not None:
self.got_files.append({'filename': filename, 'size': size})
def _fake_import_file(self, instance, local_filename, remote_filename):
# For legacy tests which use imported_files
if self.imported_files is not None:
self.imported_files.append((local_filename, remote_filename))
def _fake_libvirt_info(self, mock_disk, disk_info, cache_mode,
extra_specs, hypervisor_version, disk_unit=None):
# For tests in test_virt_drivers which expect libvirt_info to be
# functional
info = config.LibvirtConfigGuestDisk()
info.source_type = 'file'
info.source_device = disk_info['type']
info.target_bus = disk_info['bus']
info.target_dev = disk_info['dev']
info.driver_cache = cache_mode
info.driver_format = 'raw'
info.source_path = mock_disk.path
return info
| 41.723684
| 79
| 0.654157
| 1,222
| 9,513
| 4.930442
| 0.252864
| 0.014606
| 0.022407
| 0.027386
| 0.159502
| 0.098091
| 0.06639
| 0.045145
| 0.045145
| 0.045145
| 0
| 0.001602
| 0.278146
| 9,513
| 227
| 80
| 41.907489
| 0.875783
| 0.44087
| 0
| 0.119565
| 0
| 0
| 0.065813
| 0.054811
| 0
| 0
| 0
| 0
| 0
| 1
| 0.119565
| false
| 0
| 0.173913
| 0.021739
| 0.380435
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a95eafd7882de8499fc568c3c76a78f53505995
| 6,671
|
py
|
Python
|
ershoufang/crawler_v2.py
|
zlikun/python-crawler-lianjia
|
7e7bf0cbd333486ee62ac015e72b96d6003c8713
|
[
"Apache-2.0"
] | 2
|
2018-10-25T05:52:33.000Z
|
2021-12-22T06:39:30.000Z
|
ershoufang/crawler_v2.py
|
zlikun/python-crawler-lianjia
|
7e7bf0cbd333486ee62ac015e72b96d6003c8713
|
[
"Apache-2.0"
] | null | null | null |
ershoufang/crawler_v2.py
|
zlikun/python-crawler-lianjia
|
7e7bf0cbd333486ee62ac015e72b96d6003c8713
|
[
"Apache-2.0"
] | 2
|
2019-02-02T14:38:26.000Z
|
2020-07-21T01:57:17.000Z
|
"""
第二版:多进程二手房信息爬虫
1. 将爬虫分解为下载任务和解析任务(可以继续分解,但在本案中意义不大)两部分,两部分各使用一个子进程,相互通过数据管道通信
2. 下载任务内部不使用队列,使用任务管道实现(在多进程:主进程、子进程、子进程内部进程池等场景下,队列并不好用)任务管理和通信
3. 解析任务从与下载任务间的管道中获取数据,解析并保存
问题:当目标被爬完后,怎样让爬虫停止?
"""
import csv
import datetime
import logging
import multiprocessing as mp
import re
import time
from collections import OrderedDict
import requests
from pyquery import PyQuery
from requests import RequestException
base_url = r'https://sh.lianjia.com/ershoufang'
# 已处理URL集合没有很好的表示方法,这里使用普通集合+锁来实现多进程场景下应用
seen_urls = set()
lock = mp.Lock()
# 下载失败重试次数
retries = 3
# 当前日期
today = datetime.date.today()
# 列表页、明细页URL正则表达式
list_page_pattern = '^{}/(pg\d+/)?$'.format(base_url)
item_page_pattern = '^{}/\d+.html$'.format(base_url)
# 数据存储路径
csv_file_path = r'../.data/ershoufang-{}.csv'.format(today)
# 日志配置
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(process)05d - %(levelname)s - %(message)s')
def start_download_job(data_writer, init_tasks):
"""
下载任务(作业)
:param data_writer: 数据管道(写)
:param init_tasks: 初始任务集合
:return:
"""
# 构造进程池,按CPU核数初始化进程池大小,小于4核以4为准,否则以CPU核数为准
pool_size = mp.cpu_count() > 4 and mp.cpu_count() or 4
pool = mp.Pool(pool_size)
# 任务不使用队列(在这种进程中使用子进程和进程池的应用中,队列会遇到各种问题),使用管道实现
(task_reader, task_writer) = mp.Pipe(duplex=False)
# 为了简化代码,初始任务直接通过任务管道发送出去,再接收
# 也可以直接在循环代码中实现,当初始任务集合为空时,再使用任务管道接收任务
task_writer.send(init_tasks)
# 循环从任务管道中读取任务数据,并进行处理
while True:
# 任务是一组URL
urls = task_reader.recv()
# 使用进程池,分别下载这些URL,将下载后的文档内容和url构成的元组通过管道发出
for url in urls:
# 判断任务是否重复
with lock:
if url in seen_urls:
continue
else:
seen_urls.add(url)
# 执行下载任务
pool.apply_async(download, (url, task_writer, data_writer))
pool.close()
pool.join()
def download(url, task_writer, data_writer):
"""
下载网页,最多重试3次
:param url: 下载url地址
:param task_writer: 任务管道(写)
:param data_writer: 数据管道(写)
:return:
"""
for _ in range(retries + 1):
try:
logging.info('download page {}'.format(url))
content = requests.get(url).text
if content is None:
continue
# 抽取列表页的中链接列表
if is_list_page(url):
links = parse_list_page(content, url)
# 将详情页链接列表通过管道发出去
if links and len(links) > 0:
task_writer.send(links)
else:
data_writer.send((content, url))
return
except RequestException:
# 异常时休眠2秒
time.sleep(2)
# 超过重试次数则打印错误消息
logging.error('重试{}次下载仍失败:{}'.format(retries, url))
# 将失败url重新加入任务队列
task_writer.send(set([url]))
def is_list_page(url):
"""
判断是否列表页
:param url:
:return:
"""
return re.match(list_page_pattern, url)
def parse_list_page(content, url):
"""
列表网页解析器
:param content:
:param url:
:return: 详情页链接集合
"""
pq = PyQuery(content, url=url)
return set([li.attr('href') for li in pq('ul.sellListContent div.title > a').items()])
def parse_item_page(content, url):
"""
详情页解析器
:param content:
:param url:
:return: 返回详情数据
"""
pq = PyQuery(content, url=url)
return OrderedDict({'title': pq('div.content > div.title > h1').text().strip(),
'sub_title': pq('div.content > div.title > div.sub').text().strip(),
'price': pq('div.price > span.total').text().strip(),
'unit_price': pq('div.unitPrice > span.unitPriceValue').text().replace('元/平米', '').strip(),
'down_payment_info': pq('div.tax > span.taxtext').text().strip(),
'area': re.search('(\d+\.?\d*)', pq('div.area > div.mainInfo').text()).group(1),
'year_info': pq('div.area > div.subInfo').text().strip(),
'house_type': pq('div.room > div.mainInfo').text().strip(),
'floor': pq('div.room > div.subInfo').text().strip(),
'towards': pq('div.type > div.mainInfo').text().strip(),
'housing_estate': pq('div.communityName > a:first').text().strip(),
'housing_estate_link': pq('div.communityName > a:first').attr('href'),
'location': tuple([i.text().strip() for i in pq('div.areaName > span > a').items()]),
'broker': pq('div.brokerName > a').text().strip(),
'broker_homepage': pq('div.brokerName > a').attr('href'),
'number': pq('div.houseRecord > span.info').text().replace('举报', '').strip()})
def start_parse_job(data_reader):
"""
解析任务(作业)
:param data_reader: 数据管道(读)
:return:
"""
# 构造进程池,按CPU核数初始化进程池大小,小于4核以4为准,否则以CPU核数为准
pool_size = mp.cpu_count() > 4 and mp.cpu_count() or 4
# 解析任务只使用下载任务进程池规模的一半(视情况而定,目前其处理速度要远大于下载任务,也避免进程过多)
pool = mp.Pool(pool_size // 2)
while True:
args = data_reader.recv()
if args is not None:
pool.apply_async(parse, args, callback=process)
pool.close()
pool.join()
def parse(content, url):
"""
解析网页
:param content:
:param url:
:return:
"""
if content is None or url is None:
return
try:
# 解析详情页,返回数据
return parse_item_page(content, url)
except Exception as e:
logging.error(e)
def process(data):
"""
处理数据
:param data:
:return:
"""
if data is None:
return
# 数据基本处理
# 处理小区链接不完整问题
if 'housing_estate_link' in data and not data['housing_estate_link'].startswith('https://'):
data['housing_estate_link'] = 'https://sh.lianjia.com' + data['housing_estate_link']
# 数据转换
# 提取户型中的室数
if 'house_type' in data:
data['house_type'] = (data['house_type'].split('室')[0], data['house_type'])
# 数据存储(写入CSV文件,文件按日期生成)
with open(csv_file_path,
'a', encoding='utf-8', newline='') as f:
writer = csv.writer(f)
writer.writerow(data.values())
if __name__ == '__main__':
# 初始任务集合
init_tasks = set([base_url + '/'] + ['{}/pg{}/'.format(base_url, i) for i in range(2, 101)])
# 创建管道,用于任务(进程)间通信
(data_reader, data_writer) = mp.Pipe(duplex=False)
# 启动下载任务(写端)
mp.Process(target=start_download_job, args=(data_writer, init_tasks)).start()
# 启动解析任务(读端)
mp.Process(target=start_parse_job, args=(data_reader,)).start()
logging.info('--running--')
| 28.75431
| 115
| 0.582072
| 787
| 6,671
| 4.799238
| 0.349428
| 0.021181
| 0.022505
| 0.015886
| 0.186391
| 0.085782
| 0.041303
| 0.041303
| 0.041303
| 0.041303
| 0
| 0.005758
| 0.271024
| 6,671
| 231
| 116
| 28.878788
| 0.770923
| 0.180033
| 0
| 0.172727
| 0
| 0
| 0.183037
| 0.004978
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072727
| false
| 0
| 0.090909
| 0
| 0.227273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a960357ff5666b9fe043faf558321c7ac02d8e5
| 8,415
|
py
|
Python
|
desktop/core/ext-py/pyu2f-0.1.4/pyu2f/convenience/customauthenticator.py
|
yetsun/hue
|
2e48f0cc70e233ee0e1b40733d4b2a18d8836c66
|
[
"Apache-2.0"
] | 5,079
|
2015-01-01T03:39:46.000Z
|
2022-03-31T07:38:22.000Z
|
desktop/core/ext-py/pyu2f-0.1.4/pyu2f/convenience/customauthenticator.py
|
yetsun/hue
|
2e48f0cc70e233ee0e1b40733d4b2a18d8836c66
|
[
"Apache-2.0"
] | 4,640
|
2015-07-08T16:19:08.000Z
|
2019-12-02T15:01:27.000Z
|
desktop/core/ext-py/pyu2f-0.1.4/pyu2f/convenience/customauthenticator.py
|
yetsun/hue
|
2e48f0cc70e233ee0e1b40733d4b2a18d8836c66
|
[
"Apache-2.0"
] | 2,033
|
2015-01-04T07:18:02.000Z
|
2022-03-28T19:55:47.000Z
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class to offload the end to end flow of U2F signing."""
import base64
import hashlib
import json
import os
import struct
import subprocess
import sys
from pyu2f import errors
from pyu2f import model
from pyu2f.convenience import baseauthenticator
SK_SIGNING_PLUGIN_ENV_VAR = 'SK_SIGNING_PLUGIN'
U2F_SIGNATURE_TIMEOUT_SECONDS = 5
SK_SIGNING_PLUGIN_NO_ERROR = 0
SK_SIGNING_PLUGIN_TOUCH_REQUIRED = 0x6985
SK_SIGNING_PLUGIN_WRONG_DATA = 0x6A80
class CustomAuthenticator(baseauthenticator.BaseAuthenticator):
"""Offloads U2F signing to a pluggable command-line tool.
Offloads U2F signing to a signing plugin which takes the form of a
command-line tool. The command-line tool is configurable via the
SK_SIGNING_PLUGIN environment variable.
The signing plugin should implement the following interface:
Communication occurs over stdin/stdout, and messages are both sent and
received in the form:
[4 bytes - payload size (little-endian)][variable bytes - json payload]
Signing Request JSON
{
"type": "sign_helper_request",
"signData": [{
"keyHandle": <url-safe base64-encoded key handle>,
"appIdHash": <url-safe base64-encoded SHA-256 hash of application ID>,
"challengeHash": <url-safe base64-encoded SHA-256 hash of ClientData>,
"version": U2F protocol version (usually "U2F_V2")
},...],
"timeoutSeconds": <security key touch timeout>
}
Signing Response JSON
{
"type": "sign_helper_reply",
"code": <result code>.
"errorDetail": <text description of error>,
"responseData": {
"appIdHash": <url-safe base64-encoded SHA-256 hash of application ID>,
"challengeHash": <url-safe base64-encoded SHA-256 hash of ClientData>,
"keyHandle": <url-safe base64-encoded key handle>,
"version": <U2F protocol version>,
"signatureData": <url-safe base64-encoded signature>
}
}
Possible response error codes are:
NoError = 0
UnknownError = -127
TouchRequired = 0x6985
WrongData = 0x6a80
"""
def __init__(self, origin):
self.origin = origin
def Authenticate(self, app_id, challenge_data,
print_callback=sys.stderr.write):
"""See base class."""
# Ensure environment variable is present
plugin_cmd = os.environ.get(SK_SIGNING_PLUGIN_ENV_VAR)
if plugin_cmd is None:
raise errors.PluginError('{} env var is not set'
.format(SK_SIGNING_PLUGIN_ENV_VAR))
# Prepare input to signer
client_data_map, signing_input = self._BuildPluginRequest(
app_id, challenge_data, self.origin)
# Call plugin
print_callback('Please insert and touch your security key\n')
response = self._CallPlugin([plugin_cmd], signing_input)
# Handle response
key_challenge_pair = (response['keyHandle'], response['challengeHash'])
client_data_json = client_data_map[key_challenge_pair]
client_data = client_data_json.encode()
return self._BuildAuthenticatorResponse(app_id, client_data, response)
def IsAvailable(self):
"""See base class."""
return os.environ.get(SK_SIGNING_PLUGIN_ENV_VAR) is not None
def _BuildPluginRequest(self, app_id, challenge_data, origin):
"""Builds a JSON request in the form that the plugin expects."""
client_data_map = {}
encoded_challenges = []
app_id_hash_encoded = self._Base64Encode(self._SHA256(app_id))
for challenge_item in challenge_data:
key = challenge_item['key']
key_handle_encoded = self._Base64Encode(key.key_handle)
raw_challenge = challenge_item['challenge']
client_data_json = model.ClientData(
model.ClientData.TYP_AUTHENTICATION,
raw_challenge,
origin).GetJson()
challenge_hash_encoded = self._Base64Encode(
self._SHA256(client_data_json))
# Populate challenges list
encoded_challenges.append({
'appIdHash': app_id_hash_encoded,
'challengeHash': challenge_hash_encoded,
'keyHandle': key_handle_encoded,
'version': key.version,
})
# Populate ClientData map
key_challenge_pair = (key_handle_encoded, challenge_hash_encoded)
client_data_map[key_challenge_pair] = client_data_json
signing_request = {
'type': 'sign_helper_request',
'signData': encoded_challenges,
'timeoutSeconds': U2F_SIGNATURE_TIMEOUT_SECONDS,
'localAlways': True
}
return client_data_map, json.dumps(signing_request)
def _BuildAuthenticatorResponse(self, app_id, client_data, plugin_response):
"""Builds the response to return to the caller."""
encoded_client_data = self._Base64Encode(client_data)
signature_data = str(plugin_response['signatureData'])
key_handle = str(plugin_response['keyHandle'])
response = {
'clientData': encoded_client_data,
'signatureData': signature_data,
'applicationId': app_id,
'keyHandle': key_handle,
}
return response
def _CallPlugin(self, cmd, input_json):
"""Calls the plugin and validates the response."""
# Calculate length of input
input_length = len(input_json)
length_bytes_le = struct.pack('<I', input_length)
request = length_bytes_le + input_json.encode()
# Call plugin
sign_process = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
stdout = sign_process.communicate(request)[0]
exit_status = sign_process.wait()
# Parse and validate response size
response_len_le = stdout[:4]
response_len = struct.unpack('<I', response_len_le)[0]
response = stdout[4:]
if response_len != len(response):
raise errors.PluginError(
'Plugin response length {} does not match data {} (exit_status={})'
.format(response_len, len(response), exit_status))
# Ensure valid json
try:
json_response = json.loads(response.decode())
except ValueError:
raise errors.PluginError('Plugin returned invalid output (exit_status={})'
.format(exit_status))
# Ensure response type
if json_response.get('type') != 'sign_helper_reply':
raise errors.PluginError('Plugin returned invalid response type '
'(exit_status={})'
.format(exit_status))
# Parse response codes
result_code = json_response.get('code')
if result_code is None:
raise errors.PluginError('Plugin missing result code (exit_status={})'
.format(exit_status))
# Handle errors
if result_code == SK_SIGNING_PLUGIN_TOUCH_REQUIRED:
raise errors.U2FError(errors.U2FError.TIMEOUT)
elif result_code == SK_SIGNING_PLUGIN_WRONG_DATA:
raise errors.U2FError(errors.U2FError.DEVICE_INELIGIBLE)
elif result_code != SK_SIGNING_PLUGIN_NO_ERROR:
raise errors.PluginError(
'Plugin failed with error {} - {} (exit_status={})'
.format(result_code,
json_response.get('errorDetail'),
exit_status))
# Ensure response data is present
response_data = json_response.get('responseData')
if response_data is None:
raise errors.PluginErrors(
'Plugin returned output with missing responseData (exit_status={})'
.format(exit_status))
return response_data
def _SHA256(self, string):
"""Helper method to perform SHA256."""
md = hashlib.sha256()
md.update(string.encode())
return md.digest()
def _Base64Encode(self, bytes_data):
"""Helper method to base64 encode, strip padding, and return str
result."""
return base64.urlsafe_b64encode(bytes_data).decode().rstrip('=')
| 34.487705
| 80
| 0.684848
| 1,007
| 8,415
| 5.506455
| 0.278054
| 0.028855
| 0.032462
| 0.025248
| 0.224707
| 0.118305
| 0.07899
| 0.065284
| 0.039315
| 0.039315
| 0
| 0.017181
| 0.225312
| 8,415
| 243
| 81
| 34.62963
| 0.83341
| 0.319786
| 0
| 0.048
| 0
| 0
| 0.117035
| 0
| 0
| 0
| 0.002154
| 0
| 0
| 1
| 0.064
| false
| 0
| 0.08
| 0
| 0.208
| 0.016
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a9667d37782748097516470365e83980101a92e
| 1,681
|
py
|
Python
|
kive/portal/management/commands/graph_kive.py
|
dmacmillan/Kive
|
76bc8f289f66fb133f78cb6d5689568b7d015915
|
[
"BSD-3-Clause"
] | 1
|
2021-12-22T06:10:01.000Z
|
2021-12-22T06:10:01.000Z
|
kive/portal/management/commands/graph_kive.py
|
dmacmillan/Kive
|
76bc8f289f66fb133f78cb6d5689568b7d015915
|
[
"BSD-3-Clause"
] | null | null | null |
kive/portal/management/commands/graph_kive.py
|
dmacmillan/Kive
|
76bc8f289f66fb133f78cb6d5689568b7d015915
|
[
"BSD-3-Clause"
] | null | null | null |
import itertools
import os
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = 'Generates class diagrams.'
def handle(self, *args, **options):
if 'django_extensions' not in settings.INSTALLED_APPS:
exit('django_extensions not found, try using --setting kive.UML_settings')
docs_path = os.path.join(os.path.pardir, 'docs', 'models')
apps = [app for app in settings.INSTALLED_APPS
if not (app.startswith('django') or app == 'rest_framework')]
apps.sort()
for app in apps:
print(app)
exclude_models = ['User', 'Group']
if app != 'metadata':
exclude_models.append('AccessControl')
call_command("graph_models",
app,
pygraphviz=True,
group_models=True,
outputfile=os.path.join(docs_path, app+'.png'),
exclude_models=','.join(exclude_models))
readme_path = os.path.join(docs_path, 'README.md')
with open(readme_path, 'rU+') as f:
models_section = '### Models ###\n'
header = itertools.takewhile(lambda line: line != models_section,
f.readlines())
f.seek(0)
for line in header:
f.write(line)
f.write(models_section)
for app in apps:
f.write('#### {} ####\n'.format(app))
f.write('\n\n'.format(app, app))
| 38.204545
| 86
| 0.543129
| 185
| 1,681
| 4.816216
| 0.421622
| 0.026936
| 0.03367
| 0.053872
| 0.040404
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000898
| 0.337299
| 1,681
| 43
| 87
| 39.093023
| 0.798923
| 0
| 0
| 0.054054
| 0
| 0
| 0.149911
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027027
| false
| 0
| 0.135135
| 0
| 0.216216
| 0.027027
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a971f56d894bd93c1e6642fd2fd7e799cec7a1d
| 8,543
|
py
|
Python
|
summary.py
|
rpls/openlane_summary
|
5057fab80a4acaf08e6503ced7abb932684145a5
|
[
"Apache-2.0"
] | null | null | null |
summary.py
|
rpls/openlane_summary
|
5057fab80a4acaf08e6503ced7abb932684145a5
|
[
"Apache-2.0"
] | null | null | null |
summary.py
|
rpls/openlane_summary
|
5057fab80a4acaf08e6503ced7abb932684145a5
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import argparse
import os
import glob
import csv
import sys
import re
from shutil import which
import datetime
def is_tool(name):
return which(name) is not None
def check_path(path):
paths = glob.glob(path)
if len(paths) == 0:
exit("file not found: %s" % path)
if len(paths) > 1:
print("warning: glob pattern found too many files, using first one: %s" % paths[0])
return paths[0]
def openlane_date_sort(e):
datestamp = os.path.basename(e)
if re.match(r'^\d+\-\d+\_\d+\-\d+$',datestamp):
timestamp = datetime.datetime.strptime(datestamp, '%d-%m_%H-%M')
return timestamp.timestamp()
return datestamp
def summary_report(summary_file):
# print short summary of the csv file
status = None
with open(summary_file) as fh:
summary = csv.DictReader(fh)
for row in summary:
for key, value in row.items():
if "violation" in key or "error" in key:
print("%30s : %20s" % (key, value))
if "AREA" in key:
area = float(value)
if "flow_status" in key:
status = value
print("area %d um^2" % (1e6 * area))
if status is not None: # newer OpenLANE has status, older ones don't
print("flow status: %s" % status)
def full_summary_report(summary_file):
# print short summary of the csv file
with open(summary_file) as fh:
summary = csv.DictReader(fh)
for row in summary:
for key, value in row.items():
print("%30s : %20s" % (key, value))
def drc_report(drc_file):
last_drc = None
drc_count = 0
with open(drc_file) as drc:
for line in drc.readlines():
drc_count += 1
if '(' in line:
if last_drc is not None:
print("* %s (%d)" % (last_drc, drc_count/4))
last_drc = line.strip()
drc_count = 0
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="OpenLANE summary tool")
group = parser.add_mutually_exclusive_group(required=True)
# either choose the design and interation
group.add_argument('--design', help="only run checks on specific design", action='store')
# or show standard cells
group.add_argument('--show-sky130', help='show all standard cells', action='store_const', const=True)
# optionally choose different name for top module and which run to use (default latest)
parser.add_argument('--top', help="name of top module if not same as design", action='store')
parser.add_argument('--run', help="choose a specific run. If not given use latest. If not arg, show a menu", action='store', default=-1, nargs='?', type=int)
# what to show
parser.add_argument('--drc', help='show DRC report', action='store_const', const=True)
parser.add_argument('--summary', help='show violations, area & status from summary report', action='store_const', const=True)
parser.add_argument('--full-summary', help='show the full summary report csv file', action='store_const', const=True)
parser.add_argument('--synth', help='show post techmap synth', action='store_const', const=True)
parser.add_argument('--yosys-report', help='show cell usage after yosys synth', action='store_const', const=True)
# klayout for intermediate files
parser.add_argument('--floorplan', help='show floorplan', action='store_const', const=True)
parser.add_argument('--pdn', help='show PDN', action='store_const', const=True)
parser.add_argument('--global-placement', help='show global placement PDN', action='store_const', const=True)
parser.add_argument('--detailed-placement', help='show detailed placement', action='store_const', const=True)
parser.add_argument('--gds', help='show final GDS', action='store_const', const=True)
# GDS3D for 3d view
parser.add_argument('--gds-3d', help='show final GDS in 3D', action='store_const', const=True)
parser.add_argument('--caravel', help='use caravel directory structure instead of standard openlane', action='store_const', const=True)
args = parser.parse_args()
if not args.top:
args.top = args.design
if not 'OPENLANE_ROOT' in os.environ:
exit("pls set OPENLANE_ROOT to where your OpenLANE is installed")
klayout_def = os.path.join(os.path.dirname(sys.argv[0]), 'klayout_def.xml')
klayout_gds = os.path.join(os.path.dirname(sys.argv[0]), 'klayout_gds.xml')
gds3d_tech = os.path.join(os.path.dirname(sys.argv[0]), 'sky130.txt')
# if showing off the sky130 cells
if args.show_sky130:
if not os.environ['PDK_ROOT']:
exit("pls set PDK_ROOT to where your PDK is installed")
path = check_path(os.path.join(os.environ['PDK_ROOT'], "sky130A", "libs.ref", "sky130_fd_sc_hd", "gds", "sky130_fd_sc_hd.gds"))
os.system("klayout -l %s %s" % (klayout_gds, path))
exit()
# otherwise need to know where openlane and the designs are
openlane_designs = ''
if args.caravel:
if os.path.exists('openlane'):
openlane_designs = 'openlane'
else:
openlane_designs = '.'
run_dir = os.path.join(openlane_designs, args.design, 'runs/*')
else:
openlane_designs = os.path.join(os.environ['OPENLANE_ROOT'], 'designs')
run_dir = os.path.join(openlane_designs, args.design, 'runs/*-*')
list_of_files = glob.glob(run_dir)
if len(list_of_files) == 0:
exit("couldn't find that design")
list_of_files.sort(key=openlane_date_sort)
# what run to show?
if args.run == -1:
# default is to use the latest
print("using latest run:")
run_path = max(list_of_files, key=os.path.getctime)
elif args.run is None:
# UI for asking for which run to use
for run_index, run in enumerate(list_of_files):
print("\n%2d: %s" % (run_index, os.path.basename(run)), end='')
print(" <default>\n")
n = input("which run? <enter for default>: ") or run_index
run_path = list_of_files[int(n)]
else:
# use the given run
print("using run %d:" % args.run)
run_path = list_of_files[args.run]
print(run_path)
if args.summary:
path = check_path(os.path.join(run_path, 'reports', 'final_summary_report.csv'))
summary_report(path)
if args.full_summary:
path = check_path(os.path.join(run_path, 'reports', 'final_summary_report.csv'))
full_summary_report(path)
if args.drc:
path = os.path.join(run_path, 'logs', 'magic', 'magic.drc') # don't check path because if DRC is clean, don't get the file
if os.path.exists(path):
drc_report(path)
else:
print("no DRC file, DRC clean?")
if args.synth:
path = check_path(os.path.join(run_path, "tmp", "synthesis", "post_techmap.dot")) # post_techmap is created by https://github.com/efabless/openlane/pull/282
os.system("xdot %s" % path)
if args.yosys_report:
filename = "*yosys_*.stat.rpt"
path = check_path(os.path.join(run_path, "reports", "synthesis", filename))
os.system("cat %s" % path)
if args.floorplan:
path = check_path(os.path.join(run_path, "results", "floorplan", args.top + ".floorplan.def"))
os.system("klayout -l %s %s" % (klayout_def, path))
if args.pdn:
filename = "*pdn.def"
path = check_path(os.path.join(run_path, "tmp", "floorplan", filename))
os.system("klayout -l %s %s" % (klayout_def, path))
if args.global_placement:
filename = "*replace.def"
path = check_path(os.path.join(run_path, "tmp", "placement", filename))
os.system("klayout -l %s %s" % (klayout_def, path))
if args.detailed_placement:
path = check_path(os.path.join(run_path, "results", "placement", args.top + ".placement.def"))
os.system("klayout -l %s %s" % (klayout_def, path))
if args.gds:
path = check_path(os.path.join(run_path, "results", "magic", args.top + ".gds"))
os.system("klayout -l %s %s" % (klayout_gds, path))
if args.gds_3d:
if not is_tool('GDS3D'):
exit("pls install GDS3D from https://github.com/trilomix/GDS3D")
path = check_path(os.path.join(run_path, "results", "magic", args.top + ".gds"))
os.system("GDS3D -p %s -i %s" % (gds3d_tech, path))
| 40.29717
| 164
| 0.628
| 1,209
| 8,543
| 4.303557
| 0.205128
| 0.029983
| 0.034595
| 0.05247
| 0.365751
| 0.317701
| 0.302518
| 0.302518
| 0.262156
| 0.192773
| 0
| 0.009484
| 0.234812
| 8,543
| 211
| 165
| 40.488152
| 0.786446
| 0.078661
| 0
| 0.166667
| 0
| 0
| 0.242582
| 0.006112
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0.051282
| 0.00641
| 0.115385
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a9a27b8be786f9438239fbfe717a4e94dce8571
| 992
|
py
|
Python
|
var/spack/repos/builtin/packages/py-cupy/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11
|
2015-10-04T02:17:46.000Z
|
2018-02-07T18:23:00.000Z
|
var/spack/repos/builtin/packages/py-cupy/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22
|
2017-08-01T22:45:10.000Z
|
2022-03-10T07:46:31.000Z
|
var/spack/repos/builtin/packages/py-cupy/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4
|
2016-06-10T17:57:39.000Z
|
2018-09-11T04:59:38.000Z
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyCupy(PythonPackage):
"""CuPy is an open-source array library accelerated with
NVIDIA CUDA. CuPy provides GPU accelerated computing with
Python. CuPy uses CUDA-related libraries including cuBLAS,
cuDNN, cuRand, cuSolver, cuSPARSE, cuFFT and NCCL to make
full use of the GPU architecture."""
homepage = "https://cupy.dev/"
pypi = "cupy/cupy-8.0.0.tar.gz"
version('8.0.0', sha256='d1dcba5070dfa754445d010cdc952ff6b646d5f9bdcd7a63e8246e2472c3ddb8')
depends_on('python@3.6:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-fastrlock@0.3:', type=('build', 'run'))
depends_on('py-numpy@1.15:', type=('build', 'run'))
depends_on('cuda')
depends_on('nccl')
depends_on('cudnn')
| 35.428571
| 95
| 0.704637
| 134
| 992
| 5.164179
| 0.656716
| 0.09104
| 0.052023
| 0.08237
| 0.096821
| 0.066474
| 0
| 0
| 0
| 0
| 0
| 0.07443
| 0.160282
| 992
| 27
| 96
| 36.740741
| 0.756303
| 0.455645
| 0
| 0
| 0
| 0
| 0.398058
| 0.16699
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.083333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a9acf16b780b19cf351bb2d89e76f1956c1db38
| 1,742
|
py
|
Python
|
simple_rest_client/decorators.py
|
cfytrok/python-simple-rest-client
|
4896e8226ffe194625c63773ea6f49531293b308
|
[
"MIT"
] | null | null | null |
simple_rest_client/decorators.py
|
cfytrok/python-simple-rest-client
|
4896e8226ffe194625c63773ea6f49531293b308
|
[
"MIT"
] | null | null | null |
simple_rest_client/decorators.py
|
cfytrok/python-simple-rest-client
|
4896e8226ffe194625c63773ea6f49531293b308
|
[
"MIT"
] | null | null | null |
import logging
from functools import wraps
import status
from httpx import exceptions
from .exceptions import AuthError, ClientConnectionError, ClientError, NotFoundError, ServerError
logger = logging.getLogger(__name__)
def validate_response(response):
error_suffix = " response={!r}".format(response)
if response.status_code in (status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN):
raise AuthError("operation=auth_error," + error_suffix, response)
if response.status_code == status.HTTP_404_NOT_FOUND:
raise NotFoundError("operation=not_found_error," + error_suffix, response)
if status.is_client_error(code=response.status_code):
raise ClientError("operation=client_error," + error_suffix, response)
if status.is_server_error(code=response.status_code):
raise ServerError("operation=server_error," + error_suffix, response)
def handle_request_error(f):
@wraps(f)
def wrapper(*args, **kwargs):
try:
response = f(*args, **kwargs)
except (
exceptions.Timeout,
) as exc:
logger.exception(exc)
raise ClientConnectionError() from exc
validate_response(response)
return response
return wrapper
def handle_async_request_error(f):
async def wrapper(*args, **kwargs):
try:
response = await f(*args, **kwargs)
except (
exceptions.ReadTimeout,
exceptions.ReadTimeout,
exceptions.WriteTimeout,
exceptions.PoolTimeout,
) as exc:
logger.exception(exc)
raise ClientConnectionError() from exc
validate_response(response)
return response
return wrapper
| 29.033333
| 97
| 0.675086
| 184
| 1,742
| 6.190217
| 0.315217
| 0.048288
| 0.083407
| 0.084284
| 0.468832
| 0.358209
| 0.247586
| 0.187884
| 0.187884
| 0.187884
| 0
| 0.006803
| 0.240528
| 1,742
| 59
| 98
| 29.525424
| 0.854119
| 0
| 0
| 0.409091
| 0
| 0
| 0.061424
| 0.053387
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.113636
| 0
| 0.295455
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a9cba0b5388d429f06edbee8329e6af7d50f140
| 674
|
py
|
Python
|
tests/test_vendcrawler.py
|
josetaas/vendcrawler
|
5cb497d0741f6dbd29a6e41fa9f1cb3374e8f062
|
[
"MIT"
] | null | null | null |
tests/test_vendcrawler.py
|
josetaas/vendcrawler
|
5cb497d0741f6dbd29a6e41fa9f1cb3374e8f062
|
[
"MIT"
] | null | null | null |
tests/test_vendcrawler.py
|
josetaas/vendcrawler
|
5cb497d0741f6dbd29a6e41fa9f1cb3374e8f062
|
[
"MIT"
] | null | null | null |
import unittest
from vendcrawler.scripts.vendcrawler import VendCrawler
class TestVendCrawlerMethods(unittest.TestCase):
def test_get_links(self):
links = VendCrawler('a', 'b', 'c').get_links(2)
self.assertEqual(links,
['https://sarahserver.net/?module=vendor&p=1',
'https://sarahserver.net/?module=vendor&p=2'])
def test_get_page_count(self):
with open('test_vendcrawler.html', 'r') as f:
data = f.read()
page_count = VendCrawler('a', 'b', 'c').get_page_count(str(data))
self.assertEqual(int(page_count), 84)
if __name__ == '__main__':
unittest.main()
| 32.095238
| 73
| 0.615727
| 81
| 674
| 4.888889
| 0.506173
| 0.090909
| 0.050505
| 0.070707
| 0.247475
| 0.161616
| 0
| 0
| 0
| 0
| 0
| 0.009747
| 0.238872
| 674
| 20
| 74
| 33.7
| 0.762183
| 0
| 0
| 0
| 0
| 0
| 0.178042
| 0.031157
| 0
| 0
| 0
| 0
| 0.133333
| 1
| 0.133333
| false
| 0
| 0.133333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4aa366c3a95eb19c5533d5c2db8cc7a7e0760866
| 1,331
|
py
|
Python
|
tests/Python/test_all_configs_output.py
|
lopippo/IsoSpec
|
dfc6d7dac213f174fb9c61a5ee018d3f6174febc
|
[
"BSD-2-Clause"
] | 27
|
2016-05-10T21:27:35.000Z
|
2022-03-30T08:11:36.000Z
|
tests/Python/test_all_configs_output.py
|
lopippo/IsoSpec
|
dfc6d7dac213f174fb9c61a5ee018d3f6174febc
|
[
"BSD-2-Clause"
] | 30
|
2017-08-08T14:24:56.000Z
|
2022-03-30T12:44:11.000Z
|
tests/Python/test_all_configs_output.py
|
lopippo/IsoSpec
|
dfc6d7dac213f174fb9c61a5ee018d3f6174febc
|
[
"BSD-2-Clause"
] | 10
|
2017-06-26T12:14:00.000Z
|
2020-11-01T13:45:14.000Z
|
def binom(n, k):
"""Quickly adapted from https://stackoverflow.com/questions/26560726/python-binomial-coefficient"""
if k < 0 or k > n:
return 0
if k == 0 or k == n:
return 1
total_ways = 1
for i in range(min(k, n - k)):
total_ways = total_ways * (n - i) // (i + 1)
return total_ways
def max_confs_cnt(formula=""):
"""Get the maximal number of configurations for a given chemical formula."""
from IsoSpecPy import IsoParamsFromFormula
f = IsoParamsFromFormula(formula)
if f.atomCount:
N = 1
for n, p in zip(f.atomCount, f.prob):
N *= binom(n+len(p)-1, n)
return N
else:
return 0
def test_max_confs_cnt():
assert max_confs_cnt("O100") == 5151
assert max_confs_cnt("O100N10S6") == 4759524
test_formulas = [ 'O100',
'O100N10S6',
'C100H202',
'S10H20' ]
def test_all_configs_output_cnt():
"""Test if IsoSpecPy output correctly all configurations."""
from IsoSpecPy import IsoThreshold
global test_formulas
for f in test_formulas:
I = IsoThreshold(formula=f, threshold=0.0, absolute=True)
assert len(I) == max_confs_cnt(f)
print("Seems OK!")
if __name__ == "__main__":
test_all_configs_output_cnt()
| 28.319149
| 103
| 0.602554
| 177
| 1,331
| 4.338983
| 0.412429
| 0.052083
| 0.071615
| 0.015625
| 0.096354
| 0.036458
| 0.036458
| 0
| 0
| 0
| 0
| 0.061053
| 0.286251
| 1,331
| 46
| 104
| 28.934783
| 0.747368
| 0.164538
| 0
| 0.057143
| 0
| 0
| 0.052007
| 0
| 0
| 0
| 0
| 0
| 0.085714
| 1
| 0.114286
| false
| 0
| 0.057143
| 0
| 0.314286
| 0.028571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4aa38327240010c87a37f52f085b58c65fe79f76
| 5,090
|
py
|
Python
|
tractseg/models/UNet_Pytorch_Regression.py
|
soichih/TractSeg
|
f78d0c6dc998905e593cbf4346745467e30d1979
|
[
"Apache-2.0"
] | null | null | null |
tractseg/models/UNet_Pytorch_Regression.py
|
soichih/TractSeg
|
f78d0c6dc998905e593cbf4346745467e30d1979
|
[
"Apache-2.0"
] | null | null | null |
tractseg/models/UNet_Pytorch_Regression.py
|
soichih/TractSeg
|
f78d0c6dc998905e593cbf4346745467e30d1979
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Division of Medical Image Computing, German Cancer Research Center (DKFZ)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import glob
from os.path import join
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Adamax
import torch.optim.lr_scheduler as lr_scheduler
from torch.autograd import Variable
from tractseg.libs.PytorchUtils import PytorchUtils
from tractseg.libs.ExpUtils import ExpUtils
from tractseg.models.BaseModel import BaseModel
from tractseg.libs.MetricUtils import MetricUtils
from tractseg.libs.PytorchUtils import conv2d
from tractseg.libs.PytorchUtils import deconv2d
class UNet_Pytorch_Regression(torch.nn.Module):
def __init__(self, n_input_channels=3, n_classes=7, n_filt=64, batchnorm=False, dropout=False):
super(UNet_Pytorch_Regression, self).__init__()
self.in_channel = n_input_channels
self.n_classes = n_classes
self.contr_1_1 = conv2d(n_input_channels, n_filt)
self.contr_1_2 = conv2d(n_filt, n_filt)
self.pool_1 = nn.MaxPool2d((2, 2))
self.contr_2_1 = conv2d(n_filt, n_filt * 2)
self.contr_2_2 = conv2d(n_filt * 2, n_filt * 2)
self.pool_2 = nn.MaxPool2d((2, 2))
self.contr_3_1 = conv2d(n_filt * 2, n_filt * 4)
self.contr_3_2 = conv2d(n_filt * 4, n_filt * 4)
self.pool_3 = nn.MaxPool2d((2, 2))
self.contr_4_1 = conv2d(n_filt * 4, n_filt * 8)
self.contr_4_2 = conv2d(n_filt * 8, n_filt * 8)
self.pool_4 = nn.MaxPool2d((2, 2))
self.dropout = nn.Dropout(p=0.4)
self.encode_1 = conv2d(n_filt * 8, n_filt * 16)
self.encode_2 = conv2d(n_filt * 16, n_filt * 16)
self.deconv_1 = deconv2d(n_filt * 16, n_filt * 16, kernel_size=2, stride=2)
# self.deconv_1 = nn.Upsample(scale_factor=2) #does only upscale width and height #Similar results to deconv2d
self.expand_1_1 = conv2d(n_filt * 8 + n_filt * 16, n_filt * 8)
self.expand_1_2 = conv2d(n_filt * 8, n_filt * 8)
self.deconv_2 = deconv2d(n_filt * 8, n_filt * 8, kernel_size=2, stride=2)
# self.deconv_2 = nn.Upsample(scale_factor=2)
self.expand_2_1 = conv2d(n_filt * 4 + n_filt * 8, n_filt * 4, stride=1)
self.expand_2_2 = conv2d(n_filt * 4, n_filt * 4, stride=1)
self.deconv_3 = deconv2d(n_filt * 4, n_filt * 4, kernel_size=2, stride=2)
# self.deconv_3 = nn.Upsample(scale_factor=2)
self.expand_3_1 = conv2d(n_filt * 2 + n_filt * 4, n_filt * 2, stride=1)
self.expand_3_2 = conv2d(n_filt * 2, n_filt * 2, stride=1)
self.deconv_4 = deconv2d(n_filt * 2, n_filt * 2, kernel_size=2, stride=2)
# self.deconv_4 = nn.Upsample(scale_factor=2)
self.expand_4_1 = conv2d(n_filt + n_filt * 2, n_filt, stride=1)
self.expand_4_2 = conv2d(n_filt, n_filt, stride=1)
self.conv_5 = nn.Conv2d(n_filt, n_classes, kernel_size=1, stride=1, padding=0, bias=True) # no activation function, because is in LossFunction (...WithLogits)
def forward(self, inpt):
contr_1_1 = self.contr_1_1(inpt)
contr_1_2 = self.contr_1_2(contr_1_1)
pool_1 = self.pool_1(contr_1_2)
contr_2_1 = self.contr_2_1(pool_1)
contr_2_2 = self.contr_2_2(contr_2_1)
pool_2 = self.pool_2(contr_2_2)
contr_3_1 = self.contr_3_1(pool_2)
contr_3_2 = self.contr_3_2(contr_3_1)
pool_3 = self.pool_3(contr_3_2)
contr_4_1 = self.contr_4_1(pool_3)
contr_4_2 = self.contr_4_2(contr_4_1)
pool_4 = self.pool_4(contr_4_2)
pool_4 = self.dropout(pool_4)
encode_1 = self.encode_1(pool_4)
encode_2 = self.encode_2(encode_1)
deconv_1 = self.deconv_1(encode_2)
concat1 = torch.cat([deconv_1, contr_4_2], 1)
expand_1_1 = self.expand_1_1(concat1)
expand_1_2 = self.expand_1_2(expand_1_1)
deconv_2 = self.deconv_2(expand_1_2)
concat2 = torch.cat([deconv_2, contr_3_2], 1)
expand_2_1 = self.expand_2_1(concat2)
expand_2_2 = self.expand_2_2(expand_2_1)
deconv_3 = self.deconv_3(expand_2_2)
concat3 = torch.cat([deconv_3, contr_2_2], 1)
expand_3_1 = self.expand_3_1(concat3)
expand_3_2 = self.expand_3_2(expand_3_1)
deconv_4 = self.deconv_4(expand_3_2)
concat4 = torch.cat([deconv_4, contr_1_2], 1)
expand_4_1 = self.expand_4_1(concat4)
expand_4_2 = self.expand_4_2(expand_4_1)
conv_5 = self.conv_5(expand_4_2)
return conv_5, None
| 40.07874
| 167
| 0.678978
| 876
| 5,090
| 3.638128
| 0.184932
| 0.076875
| 0.062127
| 0.033888
| 0.285849
| 0.220897
| 0.151867
| 0.075306
| 0.026985
| 0
| 0
| 0.079717
| 0.221218
| 5,090
| 126
| 168
| 40.396825
| 0.724268
| 0.180943
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.024096
| false
| 0
| 0.192771
| 0
| 0.240964
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4aa4605e775071451ff4f02953c5854fc600fb27
| 1,619
|
py
|
Python
|
platform/core/polyaxon/sidecar/sidecar/__main__.py
|
hackerwins/polyaxon
|
ff56a098283ca872abfbaae6ba8abba479ffa394
|
[
"Apache-2.0"
] | null | null | null |
platform/core/polyaxon/sidecar/sidecar/__main__.py
|
hackerwins/polyaxon
|
ff56a098283ca872abfbaae6ba8abba479ffa394
|
[
"Apache-2.0"
] | null | null | null |
platform/core/polyaxon/sidecar/sidecar/__main__.py
|
hackerwins/polyaxon
|
ff56a098283ca872abfbaae6ba8abba479ffa394
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import time
from kubernetes.client.rest import ApiException
from polyaxon_client.client import PolyaxonClient
from polyaxon_k8s.manager import K8SManager
from sidecar import settings
from sidecar.monitor import is_pod_running
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--app_label',
type=str
)
parser.add_argument(
'--container_id',
type=str
)
parser.add_argument(
'--sleep_interval',
default=2,
type=int
)
parser.add_argument(
'--max_restarts',
default=0,
type=int
)
args = parser.parse_args()
arguments = args.__dict__
container_id = arguments.pop('container_id')
app_label = arguments.pop('app_label')
sleep_interval = arguments.pop('sleep_interval')
max_restarts = arguments.pop('max_restarts')
k8s_manager = K8SManager(namespace=settings.K8S_NAMESPACE, in_cluster=True)
client = PolyaxonClient()
client.set_internal_health_check()
retry = 0
is_running = True
status = None
while is_running and retry < 3:
time.sleep(sleep_interval)
try:
is_running, status = is_pod_running(k8s_manager,
settings.POD_ID,
container_id,
max_restarts)
except ApiException:
retry += 1
time.sleep(sleep_interval) # We wait a bit more before try
if status:
client.reconcile(status=status)
| 27.440678
| 79
| 0.6084
| 173
| 1,619
| 5.404624
| 0.398844
| 0.069519
| 0.072727
| 0.034225
| 0.051337
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009901
| 0.313774
| 1,619
| 58
| 80
| 27.913793
| 0.831683
| 0.017912
| 0
| 0.196078
| 0
| 0
| 0.06927
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.137255
| 0
| 0.137255
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4aa4e20dc8b2673c6655b3fbcb68df91576905a0
| 615
|
py
|
Python
|
simple_robot_tests/src/test_odometry.py
|
plusangel/simple_robot
|
d9ad5ed8cd592f4aee14df13465435279b4d60d7
|
[
"MIT"
] | 1
|
2022-03-02T14:55:27.000Z
|
2022-03-02T14:55:27.000Z
|
simple_robot_tests/src/test_odometry.py
|
plusangel/simple_robot
|
d9ad5ed8cd592f4aee14df13465435279b4d60d7
|
[
"MIT"
] | null | null | null |
simple_robot_tests/src/test_odometry.py
|
plusangel/simple_robot
|
d9ad5ed8cd592f4aee14df13465435279b4d60d7
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
import rospy
from nav_msgs.msg import Odometry
class OdomTopicReader(object):
def __init__(self, topic_name = '/odom'):
self._topic_name = topic_name
self._sub = rospy.Subscriber(self._topic_name, Odometry, self.topic_callback)
self._odomdata = Odometry()
def topic_callback(self, msg):
self._odomdata = msg
rospy.loginfo(self._odomdata)
if __name__ == "__main__":
rospy.init_node('odom_topic_subscriber')
odom_reader_object = OdomTopicReader()
rate = rospy.Rate(10)
while not rospy.is_shutdown():
rate.sleep()
| 26.73913
| 85
| 0.681301
| 76
| 615
| 5.105263
| 0.473684
| 0.092784
| 0.100515
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004124
| 0.211382
| 615
| 22
| 86
| 27.954545
| 0.795876
| 0.034146
| 0
| 0
| 0
| 0
| 0.057336
| 0.035413
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.125
| 0
| 0.3125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4aa9aadd40d912fb75115061e304f8eab10a0530
| 15,044
|
py
|
Python
|
docs/generate_example_images.py
|
KhaledSharif/kornia
|
9bae28e032b092b065658117723a82816d09dbac
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
docs/generate_example_images.py
|
KhaledSharif/kornia
|
9bae28e032b092b065658117723a82816d09dbac
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
docs/generate_example_images.py
|
KhaledSharif/kornia
|
9bae28e032b092b065658117723a82816d09dbac
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
import importlib
import math
import os
from pathlib import Path
from typing import Optional, Tuple
import cv2
import numpy as np
import requests
import torch
import kornia as K
def read_img_from_url(url: str, resize_to: Optional[Tuple[int, int]] = None) -> torch.Tensor:
# perform request
response = requests.get(url).content
# convert to array of ints
nparr = np.frombuffer(response, np.uint8)
# convert to image array and resize
img: np.ndarray = cv2.imdecode(nparr, cv2.IMREAD_UNCHANGED)[..., :3]
# convert the image to a tensor
img_t: torch.Tensor = K.utils.image_to_tensor(img, keepdim=False) # 1xCxHXW
img_t = img_t.float() / 255.0
if resize_to is None:
img_t = K.geometry.resize(img_t, 184)
else:
img_t = K.geometry.resize(img_t, resize_to)
return img_t
def main():
# load the images
BASE_IMAGE_URL1: str = "https://raw.githubusercontent.com/kornia/data/main/panda.jpg" # augmentation
BASE_IMAGE_URL2: str = "https://raw.githubusercontent.com/kornia/data/main/simba.png" # color
BASE_IMAGE_URL3: str = "https://raw.githubusercontent.com/kornia/data/main/girona.png" # enhance
BASE_IMAGE_URL4: str = "https://raw.githubusercontent.com/kornia/data/main/baby_giraffe.png" # morphology
BASE_IMAGE_URL5: str = "https://raw.githubusercontent.com/kornia/data/main/persistencia_memoria.jpg" # filters
BASE_IMAGE_URL6: str = "https://raw.githubusercontent.com/kornia/data/main/delorean.png" # geometry
OUTPUT_PATH = Path(__file__).absolute().parent / "source/_static/img"
os.makedirs(OUTPUT_PATH, exist_ok=True)
print(f"Pointing images to path {OUTPUT_PATH}.")
img1 = read_img_from_url(BASE_IMAGE_URL1)
img2 = read_img_from_url(BASE_IMAGE_URL2, img1.shape[-2:])
img3 = read_img_from_url(BASE_IMAGE_URL3, img1.shape[-2:])
img4 = read_img_from_url(BASE_IMAGE_URL4)
img5 = read_img_from_url(BASE_IMAGE_URL5, (234, 320))
img6 = read_img_from_url(BASE_IMAGE_URL6)
# TODO: make this more generic for modules out of kornia.augmentation
# Dictionary containing the transforms to generate the sample images:
# Key: Name of the transform class.
# Value: (parameters, num_samples, seed)
mod = importlib.import_module("kornia.augmentation")
augmentations_list: dict = {
"CenterCrop": ((184, 184), 1, 2018),
"ColorJitter": ((0.3, 0.3, 0.3, 0.3), 2, 2018),
"RandomAffine": (((-15.0, 20.0), (0.1, 0.1), (0.7, 1.3), 20), 2, 2019),
"RandomBoxBlur": (((7, 7),), 1, 2020),
"RandomCrop": ((img1.shape[-2:], (50, 50)), 2, 2020),
"RandomChannelShuffle": ((), 1, 2020),
"RandomElasticTransform": (((63, 63), (32, 32), (2.0, 2.0)), 2, 2018),
"RandomEqualize": ((), 1, 2020),
"RandomErasing": (((0.2, 0.4), (0.3, 1 / 0.3)), 2, 2017),
"RandomFisheye": ((torch.tensor([-0.3, 0.3]), torch.tensor([-0.3, 0.3]), torch.tensor([0.9, 1.0])), 2, 2020),
"RandomGaussianBlur": (((3, 3), (0.1, 2.0)), 1, 2020),
"RandomGaussianNoise": ((0.0, 0.05), 1, 2020),
"RandomGrayscale": ((), 1, 2020),
"RandomHorizontalFlip": ((), 1, 2020),
"RandomInvert": ((), 1, 2020),
"RandomMotionBlur": ((7, 35.0, 0.5), 2, 2020),
"RandomPerspective": ((0.2,), 2, 2020),
"RandomPlanckianJitter": ((), 2, 2022),
"RandomPosterize": (((1, 4),), 2, 2016),
"RandomResizedCrop": ((img1.shape[-2:], (1.0, 2.0), (1.0, 2.0)), 2, 2020),
"RandomRotation": ((45.0,), 2, 2019),
"RandomSharpness": ((16.0,), 1, 2019),
"RandomSolarize": ((0.2, 0.2), 2, 2019),
"RandomVerticalFlip": ((), 1, 2020),
"RandomThinPlateSpline": ((), 1, 2020),
}
# ITERATE OVER THE TRANSFORMS
for aug_name, (args, num_samples, seed) in augmentations_list.items():
img_in = img1.repeat(num_samples, 1, 1, 1)
# dynamically create the class instance
cls = getattr(mod, aug_name)
aug = cls(*args, p=1.0)
# set seed
torch.manual_seed(seed)
# apply the augmentaiton to the image and concat
out = aug(img_in)
if aug_name == "CenterCrop":
h, w = img1.shape[-2:]
h_new, w_new = out.shape[-2:]
h_dif, w_dif = int(h - h_new), int(w - w_new)
out = torch.nn.functional.pad(out, (w_dif // 2, w_dif // 2, 0, h_dif))
out = torch.cat([img_in[0], *(out[i] for i in range(out.size(0)))], dim=-1)
# save the output image
out_np = K.utils.tensor_to_image((out * 255.0).byte())
cv2.imwrite(str(OUTPUT_PATH / f"{aug_name}.png"), out_np)
sig = f"{aug_name}({', '.join([str(a) for a in args])}, p=1.0)"
print(f"Generated image example for {aug_name}. {sig}")
mod = importlib.import_module("kornia.augmentation")
mix_augmentations_list: dict = {
"RandomMixUp": (((0.3, 0.4),), 2, 20),
"RandomCutMix": ((img1.shape[-2], img1.shape[-1]), 2, 2019),
}
# ITERATE OVER THE TRANSFORMS
for aug_name, (args, num_samples, seed) in mix_augmentations_list.items():
img_in = torch.cat([img1, img2])
# dynamically create the class instance
cls = getattr(mod, aug_name)
aug = cls(*args, p=1.0)
# set seed
torch.manual_seed(seed)
# apply the augmentaiton to the image and concat
out, _ = aug(img_in, torch.tensor([0, 1]))
out = torch.cat([img_in[0], img_in[1], *(out[i] for i in range(out.size(0)))], dim=-1)
# save the output image
out_np = K.utils.tensor_to_image((out * 255.0).byte())
cv2.imwrite(str(OUTPUT_PATH / f"{aug_name}.png"), out_np)
sig = f"{aug_name}({', '.join([str(a) for a in args])}, p=1.0)"
print(f"Generated image example for {aug_name}. {sig}")
mod = importlib.import_module("kornia.color")
color_transforms_list: dict = {
"grayscale_to_rgb": ((), 3),
"rgb_to_bgr": ((), 1),
"rgb_to_grayscale": ((), 1),
"rgb_to_hsv": ((), 1),
"rgb_to_hls": ((), 1),
"rgb_to_luv": ((), 1),
"rgb_to_lab": ((), 1),
# "rgb_to_rgba": ((1.,), 1),
"rgb_to_xyz": ((), 1),
"rgb_to_ycbcr": ((), 1),
"rgb_to_yuv": ((), 1),
"rgb_to_linear_rgb": ((), 1),
}
# ITERATE OVER THE TRANSFORMS
for fn_name, (args, num_samples) in color_transforms_list.items():
# import function and apply
fn = getattr(mod, fn_name)
if fn_name == "grayscale_to_rgb":
out = fn(K.color.rgb_to_grayscale(img2), *args)
else:
out = fn(img2, *args)
# perform normalization to visualize
if fn_name == "rgb_to_lab":
out = out[:, :1] / 100.0
elif fn_name == "rgb_to_hsv":
out[:, :1] = out[:, :1] / 2 * math.pi
elif fn_name == "rgb_to_luv":
out = out[:, :1] / 116.0
# repeat channels for grayscale
if out.shape[1] != 3:
out = out.repeat(1, 3, 1, 1)
# save the output image
if fn_name == "grayscale_to_rgb":
out = torch.cat(
[K.color.rgb_to_grayscale(img2[0]).repeat(3, 1, 1), *(out[i] for i in range(out.size(0)))], dim=-1
)
else:
out = torch.cat([img2[0], *(out[i] for i in range(out.size(0)))], dim=-1)
out_np = K.utils.tensor_to_image((out * 255.0).byte())
cv2.imwrite(str(OUTPUT_PATH / f"{fn_name}.png"), out_np)
sig = f"{fn_name}({', '.join([str(a) for a in args])})"
print(f"Generated image example for {fn_name}. {sig}")
# korna.enhance module
mod = importlib.import_module("kornia.enhance")
transforms: dict = {
"adjust_brightness": ((torch.tensor([0.25, 0.5]),), 2),
"adjust_contrast": ((torch.tensor([0.65, 0.5]),), 2),
"adjust_gamma": ((torch.tensor([0.85, 0.75]), 2.0), 2),
"adjust_hue": ((torch.tensor([-math.pi / 4, math.pi / 4]),), 2),
"adjust_saturation": ((torch.tensor([1.0, 2.0]),), 2),
"solarize": ((torch.tensor([0.8, 0.5]), torch.tensor([-0.25, 0.25])), 2),
"posterize": ((torch.tensor([4, 2]),), 2),
"sharpness": ((torch.tensor([1.0, 2.5]),), 2),
"equalize": ((), 1),
"invert": ((), 1),
"equalize_clahe": ((), 1),
"add_weighted": ((0.75, 0.25, 2.0), 1),
}
# ITERATE OVER THE TRANSFORMS
for fn_name, (args, num_samples) in transforms.items():
img_in = img3.repeat(num_samples, 1, 1, 1)
if fn_name == "add_weighted":
args_in = (img_in, args[0], img2, args[1], args[2])
else:
args_in = (img_in, *args)
# import function and apply
fn = getattr(mod, fn_name)
out = fn(*args_in)
# save the output image
out = torch.cat([img_in[0], *(out[i] for i in range(out.size(0)))], dim=-1)
out_np = K.utils.tensor_to_image((out * 255.0).byte())
cv2.imwrite(str(OUTPUT_PATH / f"{fn_name}.png"), out_np)
sig = f"{fn_name}({', '.join([str(a) for a in args])})"
print(f"Generated image example for {fn_name}. {sig}")
# korna.morphology module
mod = importlib.import_module("kornia.morphology")
kernel = torch.tensor([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
transforms: dict = {
"dilation": ((kernel,), 1),
"erosion": ((kernel,), 1),
"opening": ((kernel,), 1),
"closing": ((kernel,), 1),
"gradient": ((kernel,), 1),
"top_hat": ((kernel,), 1),
"bottom_hat": ((kernel,), 1),
}
# ITERATE OVER THE TRANSFORMS
for fn_name, (args, num_samples) in transforms.items():
img_in = img4.repeat(num_samples, 1, 1, 1)
args_in = (img_in, *args)
# import function and apply
# import pdb;pdb.set_trace()
fn = getattr(mod, fn_name)
out = fn(*args_in)
# save the output image
out = torch.cat([img_in[0], *(out[i] for i in range(out.size(0)))], dim=-1)
out_np = K.utils.tensor_to_image((out * 255.0).byte())
cv2.imwrite(str(OUTPUT_PATH / f"{fn_name}.png"), out_np)
sig = f"{fn_name}({', '.join([str(a) for a in args])})"
print(f"Generated image example for {fn_name}. {sig}")
# korna.filters module
mod = importlib.import_module("kornia.filters")
kernel = torch.tensor([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
transforms: dict = {
"box_blur": (((5, 5),), 1),
"median_blur": (((5, 5),), 1),
"gaussian_blur2d": (((5, 5), (1.5, 1.5)), 1),
"motion_blur": ((5, 90.0, 1.0), 1),
"max_blur_pool2d": ((5,), 1),
"blur_pool2d": ((5,), 1),
"unsharp_mask": (((5, 5), (1.5, 1.5)), 1),
"laplacian": ((5,), 1),
"sobel": ((), 1),
"spatial_gradient": ((), 1),
"canny": ((), 1),
}
# ITERATE OVER THE TRANSFORMS
for fn_name, (args, num_samples) in transforms.items():
img_in = img5.repeat(num_samples, 1, 1, 1)
args_in = (img_in, *args)
# import function and apply
fn = getattr(mod, fn_name)
out = fn(*args_in)
if fn_name in ("max_blur_pool2d", "blur_pool2d"):
out = K.geometry.resize(out, img_in.shape[-2:])
if fn_name == "canny":
out = out[1].repeat(1, 3, 1, 1)
if isinstance(out, torch.Tensor):
out = out.clamp(min=0.0, max=1.0)
if fn_name in ("laplacian", "sobel", "spatial_gradient", "canny"):
out = K.enhance.normalize_min_max(out)
if fn_name == "spatial_gradient":
out = out.permute(2, 1, 0, 3, 4).squeeze()
# save the output image
out = torch.cat([img_in[0], *(out[i] for i in range(out.size(0)))], dim=-1)
out_np = K.utils.tensor_to_image((out * 255.0).byte())
cv2.imwrite(str(OUTPUT_PATH / f"{fn_name}.png"), out_np)
sig = f"{fn_name}({', '.join([str(a) for a in args])})"
print(f"Generated image example for {fn_name}. {sig}")
# korna.geometry.transform module
mod = importlib.import_module("kornia.geometry.transform")
h, w = img6.shape[-2:]
def _get_tps_args():
src = torch.tensor([[[-1.0, -1.0], [-1.0, 1.0], [1.0, -1.0], [1.0, -1.0], [0.0, 0.0]]]).repeat(2, 1, 1) # Bx5x2
dst = src + torch.distributions.Uniform(-0.2, 0.2).rsample((2, 5, 2))
kernel, affine = K.geometry.transform.get_tps_transform(dst, src)
return src, kernel, affine
transforms: dict = {
"warp_affine": (
(
K.geometry.transform.get_affine_matrix2d(
translations=torch.zeros(2, 2),
center=(torch.tensor([w, h]) / 2).repeat(2, 1),
scale=torch.distributions.Uniform(0.5, 1.5).rsample((2, 2)),
angle=torch.tensor([-25.0, 25.0]),
)[:, :2, :3],
(h, w),
),
2,
),
"remap": (
(
*(K.utils.create_meshgrid(h, w, normalized_coordinates=True) - 0.25).unbind(-1),
'bilinear',
'zeros',
True,
True,
),
1,
),
"warp_image_tps": ((_get_tps_args()), 2),
"rotate": ((torch.tensor([-15.0, 25.0]),), 2),
"translate": ((torch.tensor([[10.0, -15], [50.0, -25.0]]),), 2),
"scale": ((torch.tensor([[0.5, 1.25], [1.0, 1.5]]),), 2),
"shear": ((torch.tensor([[0.1, -0.2], [-0.2, 0.1]]),), 2),
"rot180": ((), 1),
"hflip": ((), 1),
"vflip": ((), 1),
"resize": (((120, 220),), 1),
"rescale": ((0.5,), 1),
"elastic_transform2d": ((torch.rand(1, 2, h, w) * 2 - 1, (63, 63), (32, 32), (4.0, 4.0)), 1),
"pyrdown": ((), 1),
"pyrup": ((), 1),
"build_pyramid": ((3,), 1),
}
# ITERATE OVER THE TRANSFORMS
for fn_name, (args, num_samples) in transforms.items():
img_in = img6.repeat(num_samples, 1, 1, 1)
args_in = (img_in, *args)
# import function and apply
fn = getattr(mod, fn_name)
out = fn(*args_in)
if fn_name in ("resize", "rescale", "pyrdown", "pyrup"):
h_new, w_new = out.shape[-2:]
out = torch.nn.functional.pad(out, (0, (w - w_new), 0, (h - h_new)))
if fn_name == "build_pyramid":
_out = []
for pyr in out[1:]:
h_new, w_new = pyr.shape[-2:]
out_tmp = torch.nn.functional.pad(pyr, (0, (w - w_new), 0, (h - h_new)))
_out.append(out_tmp)
out = torch.cat(_out)
# save the output image
out = torch.cat([img_in[0], *(out[i] for i in range(out.size(0)))], dim=-1)
out_np = K.utils.tensor_to_image((out * 255.0).byte())
cv2.imwrite(str(OUTPUT_PATH / f"{fn_name}.png"), out_np)
sig = f"{fn_name}({', '.join([str(a) for a in args])})"
print(f"Generated image example for {fn_name}. {sig}")
if __name__ == "__main__":
main()
| 42.982857
| 120
| 0.545865
| 2,137
| 15,044
| 3.688348
| 0.160037
| 0.028165
| 0.00609
| 0.005075
| 0.5
| 0.466125
| 0.401294
| 0.383025
| 0.345724
| 0.335321
| 0
| 0.065241
| 0.266419
| 15,044
| 349
| 121
| 43.106017
| 0.648967
| 0.085283
| 0
| 0.272727
| 0
| 0.006993
| 0.183005
| 0.006492
| 0
| 0
| 0
| 0.002865
| 0
| 1
| 0.01049
| false
| 0
| 0.059441
| 0
| 0.076923
| 0.027972
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4aa9bb3cf3909a79588350f79db082251d5ab096
| 3,318
|
py
|
Python
|
forte/processors/data_augment/algorithms/embedding_similarity_replacement_op.py
|
Pushkar-Bhuse/forte
|
b7402330cf0b2b26fe56234f0ae43c89b31c0082
|
[
"Apache-2.0"
] | null | null | null |
forte/processors/data_augment/algorithms/embedding_similarity_replacement_op.py
|
Pushkar-Bhuse/forte
|
b7402330cf0b2b26fe56234f0ae43c89b31c0082
|
[
"Apache-2.0"
] | null | null | null |
forte/processors/data_augment/algorithms/embedding_similarity_replacement_op.py
|
Pushkar-Bhuse/forte
|
b7402330cf0b2b26fe56234f0ae43c89b31c0082
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The Forte Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from typing import Tuple
import numpy as np
from texar.torch.data import Vocab, Embedding
from ft.onto.base_ontology import Annotation
from forte.common.configuration import Config
from forte.processors.data_augment.algorithms.text_replacement_op import (
TextReplacementOp,
)
__all__ = [
"EmbeddingSimilarityReplacementOp",
]
class EmbeddingSimilarityReplacementOp(TextReplacementOp):
r"""
This class is a replacement op leveraging pre-trained word
embeddings, such as `word2vec` and `glove`, to replace the input
word with another word with similar word embedding.
By default, the replacement word is randomly chosen from the
top k words with the most similar embeddings.
Args:
configs:
The config should contain the following key-value pairs:
- vocab_path (str): The absolute path to the vocabulary file for
the pretrained embeddings
- embed_hparams (dict): The hparams to initialize the
texar.torch.data.Embedding object.
- top_k (int): the number of k most similar words to choose from
"""
def __init__(self, configs: Config):
super().__init__(configs)
self.vocab = Vocab(self.configs["vocab_path"])
embed_hparams = self.configs["embed_hparams"]
embedding = Embedding(self.vocab.token_to_id_map_py, embed_hparams)
self.normalized_vectors = (
embedding.word_vecs
/ np.sqrt((embedding.word_vecs**2).sum(axis=1))[:, np.newaxis]
)
def replace(self, input_anno: Annotation) -> Tuple[bool, str]:
r"""
This function replaces a word words with similar
pretrained embeddings.
Args:
input_anno (Annotation): The input annotation.
Returns:
A tuple of two values, where the first element is a boolean value
indicating whether the replacement happens, and the second
element is the replaced word.
"""
word = input_anno.text
if word not in self.vocab.token_to_id_map_py:
return False, word
source_id = self.vocab.token_to_id_map_py[word]
source_vector = self.normalized_vectors[source_id]
scores = np.dot(self.normalized_vectors, source_vector)
target_ids = np.argpartition(-scores, self.configs["top_k"] + 1)[
: self.configs["top_k"] + 1
]
target_words = [
self.vocab.id_to_token_map_py[idx]
for idx in target_ids
if idx != source_id
and self.vocab.id_to_token_map_py[idx].lower() != word.lower()
]
return True, random.choice(target_words)
| 36.065217
| 77
| 0.676311
| 436
| 3,318
| 5.004587
| 0.417431
| 0.027498
| 0.019248
| 0.021998
| 0.070119
| 0.055454
| 0.055454
| 0.023831
| 0
| 0
| 0
| 0.005223
| 0.249849
| 3,318
| 91
| 78
| 36.461538
| 0.871434
| 0.465943
| 0
| 0
| 0
| 0
| 0.040198
| 0.01979
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04878
| false
| 0
| 0.170732
| 0
| 0.292683
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4aaa0768bd968c91cbf077505e1dc0e7ee6365c8
| 34,840
|
py
|
Python
|
ion_functions/qc/qc_functions.py
|
steinermg/ion-functions
|
cea532ad9af51e86768572c8deb48547d99567c5
|
[
"Apache-2.0"
] | 10
|
2015-04-03T15:32:21.000Z
|
2018-11-21T11:57:26.000Z
|
ion_functions/qc/qc_functions.py
|
steinermg/ion-functions
|
cea532ad9af51e86768572c8deb48547d99567c5
|
[
"Apache-2.0"
] | 8
|
2015-01-07T15:19:22.000Z
|
2015-12-08T18:14:04.000Z
|
ion_functions/qc/qc_functions.py
|
steinermg/ion-functions
|
cea532ad9af51e86768572c8deb48547d99567c5
|
[
"Apache-2.0"
] | 17
|
2015-01-14T16:23:00.000Z
|
2021-07-19T08:26:52.000Z
|
#!/usr/bin/env python
"""
@package ion_functions.qc_functions
@file ion_functions/qc_functions.py
@author Christopher Mueller
@brief Module containing QC functions ported from matlab samples in DPS documents
"""
from ion_functions.qc.qc_extensions import stuckvalues, spikevalues, gradientvalues, ntp_to_month
import time
import numpy as np
import numexpr as ne
from scipy.interpolate import LinearNDInterpolator
from ion_functions import utils
from ion_functions.utils import fill_value
# try to load the OOI logging module, using default Python logging module if
# unavailable
try:
from ooi.logging import log
except ImportError:
import logging
log = logging.getLogger('ion-functions')
def is_fill(arr):
return np.atleast_1d(arr)[-1] == -9999. # Not the normal fill value, it's hardcoded to the QC params
def is_none(arr):
return arr is None or (np.atleast_1d(arr)[-1] == None)
def dataqc_globalrangetest_minmax(dat, dat_min, dat_max, strict_validation=False):
'''
Python wrapper for dataqc_globalrangetest
Combines the min/max arguments into list for dataqc_globalrangetest
'''
if is_none(dat_min) or is_none(dat_max) or is_fill(dat_min) or is_fill(dat_max):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
return dataqc_globalrangetest(dat, [np.atleast_1d(dat_min)[-1], np.atleast_1d(dat_max)[-1]], strict_validation=strict_validation)
def dataqc_globalrangetest(dat, datlim, strict_validation=False):
"""
Description:
Data quality control algorithm testing if measurements fall into a
user-defined valid range. Returns 1 for presumably good data and 0 for
data presumed bad.
Implemented by:
2010-07-28: DPS authored by Mathias Lankhorst. Example code provided
for Matlab.
2013-04-06: Christopher Wingard. Initial python implementation.
2013-05-30: Christopher Mueller. Performance improvements by adding
strict_validation flag.
Usage:
qcflag = dataqc_globalrangetest(dat, datlim, strict_validation)
where
qcflag = Boolean, 0 if value is outside range, else = 1.
dat = Input dataset, any scalar or vector. Must be numeric and real.
datlim = Two-element vector with the minimum and maximum values
considered to be valid.
strict_validation = Flag (default is False) to assert testing of input
types (e.g. isreal, isnumeric)
References:
OOI (2012). Data Product Specification for Global Range Test. Document
Control Number 1341-10004. https://alfresco.oceanobservatories.org
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10004_Data_Product_SPEC_GLBLRNG_OOI.pdf)
"""
dat = np.atleast_1d(dat)
datlim = np.atleast_1d(datlim)
if strict_validation:
if not utils.isnumeric(dat).all():
raise ValueError('\'dat\' must be numeric')
if not utils.isreal(dat).all():
raise ValueError('\'dat\' must be real')
if not utils.isnumeric(datlim).all():
raise ValueError('\'datlim\' must be numeric')
if not utils.isreal(datlim).all():
raise ValueError('\'datlim\' must be real')
if len(datlim) < 2: # Must have at least 2 elements
raise ValueError('\'datlim\' must have at least 2 elements')
return (datlim.min() <= dat) & (dat <= datlim.max()).astype('int8')
def dataqc_localrangetest_wrapper(dat, datlim, datlimz, dims, pval_callback):
if is_none(datlim) or np.all(np.atleast_1d(datlim).flatten() == -9999):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
if is_none(datlimz) or np.all(np.atleast_1d(datlim).flatten() == -9999):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
if is_none(dims):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
if is_none(pval_callback):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
z = []
for dim in dims:
if dim == 'month':
# Convert time vector to vector of months
v = pval_callback('time')
v = np.asanyarray(v, dtype=np.float)
v = ntp_to_month(v)
z.append(v)
else:
# Fetch the dimension from the callback method
v = pval_callback(dim)
z.append(v)
if len(dims)>1:
z = np.column_stack(z)
else:
z = z[0]
datlimz = datlimz[:,0]
return dataqc_localrangetest(dat, z, datlim, datlimz)
def dataqc_localrangetest(dat, z, datlim, datlimz, strict_validation=False):
"""
Description:
Data quality control algorithm testing if measurements fall into a
user-defined valid range. This range is not constant but varies with
measurement location. Returns 1 for presumably good data and 0 for data
presumed bad.
Implemented by:
2012-07-17: DPS authored by Mathias Lankhorst. Example code provided
for Matlab.
2013-04-06: Christopher Wingard. Initial python implementation.
Usage:
qcflag = dataqc_localrangetest(dat, z, datlim, datlimz)
where
qcflag = Boolean, 0 if value is outside range, else = 1.
dat = input data set, a numeric real scalar or column vector.
z = location of measurement dat. must have same # of rows as dat and
same # of columns as datlimz
datlim = two column array with the minimum (column 1) and maximum
(column 2) values considered valid.
datlimz = array with the locations where datlim is given. must have
same # of rows as datlim and same # of columns as z.
References:
OOI (2012). Data Product Specification for Local Range Test. Document
Control Number 1341-10005. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10005_Data_Product_SPEC_LOCLRNG_OOI.pdf)
"""
if strict_validation:
# check if dat and datlim are matrices
if not utils.isvector(dat):
raise ValueError('\'dat\' must be a matrix')
if not utils.ismatrix(datlim):
raise ValueError('\'datlim\' must be a matrix')
# check if all inputs are numeric and real
for k, arg in {'dat': dat, 'z': z, 'datlim': datlim,
'datlimz': datlimz}.iteritems():
if not utils.isnumeric(arg).all():
raise ValueError('\'{0}\' must be numeric'.format(k))
if not utils.isreal(arg).all():
raise ValueError('\'{0}\' must be real'.format(k))
if len(datlim.shape) == 3 and datlim.shape[0] == 1:
datlim = datlim.reshape(datlim.shape[1:])
if len(datlimz.shape) == 3 and datlimz.shape[0] == 1:
datlimz = datlimz.reshape(datlimz.shape[1:])
# test size and shape of the input arrays datlimz and datlim, setting test
# variables.
array_size = datlimz.shape
if len(array_size) == 1:
numlim = array_size[0]
ndim = 1
else:
numlim = array_size[0]
ndim = array_size[1]
array_size = datlim.shape
tmp1 = array_size[0]
tmp2 = array_size[1]
if tmp1 != numlim:
raise ValueError('\'datlim\' and \'datlimz\' must '
'have the same number of rows.')
if tmp2 != 2:
raise ValueError('\'datlim\' must be structured as 2-D array '
'with exactly 2 columns and 1 through N rows.')
# test the size and shape of the z input array
array_size = z.shape
if len(array_size) == 1:
num = array_size[0]
tmp2 = 1
else:
num = array_size[0]
tmp2 = array_size[1]
if tmp2 != ndim:
raise ValueError('\'z\' must have the same number of columns '
'as \'datlimz\'.')
if num != dat.size:
raise ValueError('Len of \'dat\' must match number of '
'rows in \'z\'')
# test datlim, values in column 2 must be greater than those in column 1
if not all(datlim[:, 1] > datlim[:, 0]):
raise ValueError('Second column values of \'datlim\' should be '
'greater than first column values.')
# calculate the upper and lower limits for the data set
if ndim == 1:
# determine the lower limits using linear interpolation
lim1 = np.interp(z, datlimz, datlim[:, 0], left=np.nan, right=np.nan)
# determine the upper limits using linear interpolation
lim2 = np.interp(z, datlimz, datlim[:, 1], left=np.nan, right=np.nan)
else:
# Compute Delaunay Triangulation and use linear interpolation to
# determine the N-dimensional lower limits
F = LinearNDInterpolator(datlimz, datlim[:, 0].reshape(numlim, 1))
lim1 = F(z).reshape(dat.size)
# Compute Delaunay Triangulation and use linear interpolation to
# determine the N-dimensional upper limits
F = LinearNDInterpolator(datlimz, datlim[:, 1].reshape(numlim, 1))
lim2 = F(z).reshape(dat.size)
# replace NaNs from above interpolations
ff = (np.isnan(lim1)) | (np.isnan(lim2))
lim1[ff] = np.max(datlim[:, 1])
lim2[ff] = np.min(datlim[:, 0])
# compute the qcflags
qcflag = (dat >= lim1) & (dat <= lim2)
return qcflag.astype('int8')
def dataqc_spiketest_wrapper(dat, acc, N, L, strict_validation=False):
if is_none(acc) or is_fill(acc) or is_none(N) or is_fill(N) or is_none(L) or is_fill(L):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
return dataqc_spiketest(dat, np.atleast_1d(acc)[-1], np.atleast_1d(N)[-1], np.atleast_1d(L)[-1], strict_validation=strict_validation)
def dataqc_spiketest(dat, acc, N=5, L=5, strict_validation=False):
"""
Description:
Data quality control algorithm testing a time series for spikes.
Returns 1 for presumably good data and 0 for data presumed bad.
The time series is divided into windows of len L (an odd integer
number). Then, window by window, each value is compared to its (L-1)
neighboring values: a range R of these (L-1) values is computed (max.
minus min.), and replaced with the measurement accuracy ACC if ACC>R. A
value is presumed to be good, i.e. no spike, if it deviates from the
mean of the (L-1) peers by less than a multiple of the range,
N*max(R,ACC).
Further than (L-1)/2 values from the start or end points, the peer
values are symmetrically before and after the test value. Within that
range of the start and end, the peers are the first/last L values
(without the test value itself).
The purpose of ACC is to restrict spike detection to deviations
exceeding a minimum threshold value (N*ACC) even if the data have
little variability. Use ACC=0 to disable this behavior.
Implemented by:
2012-07-28: DPS authored by Mathias Lankhorst. Example code provided
for Matlab.
2013-04-06: Christopher Wingard. Initial python implementation.
2013-05-30: Christopher Mueller. Performance optimizations.
Usage:
qcflag = dataqc_spiketest(dat, acc, N, L)
where
qcflag = Boolean, 0 if value is outside range, else = 1.
dat = input data set, a numeric, real vector.
acc = Accuracy of any input measurement.
N = (optional, defaults to 5) Range multiplier, cf. above
L = (optional, defaults to 5) Window len, cf. above
References:
OOI (2012). Data Product Specification for Spike Test. Document
Control Number 1341-10006. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10006_Data_Product_SPEC_SPKETST_OOI.pdf)
"""
dat = np.atleast_1d(dat)
if strict_validation:
if not utils.isnumeric(dat).all():
raise ValueError('\'dat\' must be numeric')
if not utils.isreal(dat).all():
raise ValueError('\'dat\' must be real')
if not utils.isvector(dat):
raise ValueError('\'dat\' must be a vector')
for k, arg in {'acc': acc, 'N': N, 'L': L}.iteritems():
if not utils.isnumeric(arg).all():
raise ValueError('\'{0}\' must be numeric'.format(k))
if not utils.isreal(arg).all():
raise ValueError('\'{0}\' must be real'.format(k))
dat = np.asanyarray(dat, dtype=np.float)
out = spikevalues(dat, L, N, acc)
return out
def dataqc_polytrendtest_wrapper(dat, t, ord_n, nstd, strict_validation=False):
if is_none(ord_n) or is_fill(ord_n) or is_none(nstd) or is_fill(ord_n):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
return dataqc_polytrendtest(dat, t, np.atleast_1d(ord_n)[-1], np.atleast_1d(nstd)[-1], strict_validation=strict_validation)
def dataqc_polytrendtest(dat, t, ord_n=1, nstd=3, strict_validation=False):
"""
Description:
Data quality control algorithm testing if measurements contain a
significant portion of a polynomial. Returns 1 if this is not the case,
else 0.
The purpose of this test is to check if a significant fraction of the
variability in a time series can be explained by a drift, possibly
interpreted as a sensor drift. This drift is assumed to be a polynomial
of order ORD. Use ORD=1 to consider a linear drift
The time series dat is passed to MatLab's POLYFIT routine to obtain a
polynomial fit PP to dat, and the difference dat-PP is compared to the
original dat. If the standard deviation of (dat-PP) is less than that
of dat by a factor of NSTD, the time series is assumed to contain a
significant trend (output will be 0), else not (output will be 1).
Implemented by:
2012-10-29: DPS authored by Mathias Lankhorst. Example code provided
for Matlab.
2013-04-06: Christopher Wingard. Initial python implementation.
2013-05-30: Christopher Mueller. Performance optimizations.
Usage:
qcflag = dataqc_polytrendtest(dat, t, ord_n, nstd, strict_validation)
where
qcflag = Boolean, 0 a trend is detected, 1 elsewhere.
dat = Input dataset, a numeric real vector.
t = time record associated with dat
ord_n (optional, defaults to 1) = Polynomial order.
nstd (optional, defaults to 3) = Factor by how much the standard
deviation must be reduced before qcflag switches from 1 to 0
strict_validation (optional, defaults to False) = Flag asserting
testing of inputs.
References:
OOI (2012). Data Product Specification for Trend Test. Document
Control Number 1341-10007. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10007_Data_Product_SPEC_TRNDTST_OOI.pdf)
"""
dat = np.atleast_1d(dat)
t = np.atleast_1d(t)
if strict_validation:
for k, arg in {'dat': dat, 't': t, 'ord_n': ord_n, 'nstd': nstd}.iteritems():
if not utils.isnumeric(arg).all():
raise ValueError('\'{0}\' must be numeric'.format(k))
if not utils.isreal(arg).all():
raise ValueError('\'{0}\' must be real'.format(k))
for k, arg in {'dat': dat, 't': t}.iteritems():
if not utils.isvector(arg):
raise ValueError('\'{0}\' must be a vector'.format(k))
for k, arg in {'ord_n': ord_n, 'nstd': nstd}.iteritems():
if not utils.isscalar(arg):
raise ValueError('\'{0}\' must be a scalar'.format(k))
ord_n = int(round(abs(ord_n)))
nstd = int(abs(nstd))
ll = len(dat)
# Not needed because time is incorporated as 't'
# t = range(ll)
pp = np.polyfit(t, dat, ord_n)
datpp = np.polyval(pp, t)
# test for a trend
if np.atleast_1d((np.std(dat - datpp) * nstd) < np.std(dat)).all():
trndtst = 0
else:
trndtst = 1
# insure output size equals input, even though test yields a single value.
qcflag = np.ones(dat.shape).astype('int8') * trndtst
return qcflag
def dataqc_stuckvaluetest_wrapper(x, reso, num, strict_validation=False):
if is_none(reso) or is_fill(reso) or is_none(num) or is_fill(num):
out = np.empty(x.shape, np.int8)
out.fill(-99)
return out
return dataqc_stuckvaluetest(x, np.atleast_1d(reso)[-1], np.atleast_1d(num)[-1], strict_validation=strict_validation)
def dataqc_stuckvaluetest(x, reso, num=10, strict_validation=False):
"""
Description:
Data quality control algorithm testing a time series for "stuck
values", i.e. repeated occurences of one value. Returns 1 for
presumably good data and 0 for data presumed bad.
Implemented by:
2012-10-29: DPS authored by Mathias Lankhorst. Example code provided
for Matlab.
2013-04-06: Christopher Wingard. Initial python implementation.
Usage:
qcflag = =dataqc_stuckvaluetest(x, RESO, NUM);
where
qcflag = Boolean output: 0 where stuck values are found, 1 elsewhere.
x = Input time series (vector, numeric).
reso = Resolution; repeat values less than reso apart will be
considered "stuck values".
num = Minimum number of successive values within reso of each other
that will trigger the "stuck value". num is optional and defaults
to 10 if omitted or empty.
References:
OOI (2012). Data Product Specification for Stuck Value Test. Document
Control Number 1341-10008. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10008_Data_Product_SPEC_STUCKVL_OOI.pdf)
"""
dat = np.atleast_1d(x)
if strict_validation:
if not utils.isnumeric(dat).all():
raise ValueError('\'x\' must be numeric')
if not utils.isvector(dat):
raise ValueError('\'x\' must be a vector')
if not utils.isreal(dat).all():
raise ValueError('\'x\' must be real')
for k, arg in {'reso': reso, 'num': num}.iteritems():
if not utils.isnumeric(arg).all():
raise ValueError('\'{0}\' must be numeric'.format(k))
if not utils.isscalar(arg):
raise ValueError('\'{0}\' must be a scalar'.format(k))
if not utils.isreal(arg).all():
raise ValueError('\'{0}\' must be real'.format(k))
num = np.abs(num)
dat = np.asanyarray(dat, dtype=np.float)
ll = len(x)
if ll < num:
# Warn - 'num' is greater than len(x), returning zeros
out = np.zeros(dat.size, dtype='int8')
else:
out = stuckvalues(dat, reso, num)
return out
def dataqc_gradienttest_wrapper(dat, x, ddatdx, mindx, startdat, toldat, strict_validation=False):
if is_none(ddatdx) or is_fill(ddatdx) or is_none(mindx) or is_fill(mindx) or is_none(startdat) or is_fill(startdat) or is_none(toldat) or is_fill(toldat):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
outqc = dataqc_gradienttest(dat, x, [-np.atleast_1d(ddatdx)[-1], np.atleast_1d(ddatdx)[-1]], np.atleast_1d(mindx)[-1], np.atleast_1d(startdat)[-1], np.atleast_1d(toldat)[-1], strict_validation=strict_validation)
return outqc
def dataqc_gradienttest(dat, x, ddatdx, mindx, startdat, toldat, strict_validation=False):
"""
Description
Data quality control algorithm testing if changes between successive
data points fall within a certain range.
Input data dat are given as a function of coordinate x. The algorithm
will flag dat values as bad if the change deltaDAT/deltaX between
successive dat values exceeds thresholds given in ddatdx. Once the
threshold is exceeded, following dat are considered bad until a dat
value returns to within toldat of the last known good value.
It is possible to remove data points that are too close together in x
coordinates (use mindx).
By default, the first value of dat is considered good. To change this,
use startdat and toldat to set as the first good data point the first
one that comes within toldat of startdat.
Implemented by:
2012-07-17: DPS authored by Mathias Lankhorst. Example code provided
for Matlab.
2013-04-06: Christopher Wingard. Initial python implementation.
Usage:
outdat, outx, outqc = dataqc_gradienttest(dat, x, ddatdx, mindx,
startdat, toldat);
where
outdat = same as dat except that NaNs and values not meeting mindx are
removed.
outx = same as x except that NaNs and values not meeting mindx are
removed.
outqc = output quality control flags for outdat. 0 means bad data, 1
means good data.
dat = input dataset, a numeric real vector.
x = coordinate (e.g. time, distance) along which dat is given. Must be
of the same size as dat and strictly increasing.
ddatdx = two-element vector defining the valid range of ddat/dx
from one point to the next.
mindx = scalar. minimum dx for which this test will be applied (data
that are less than mindx apart will be deleted). defaults to zero
if NaN/empty.
startdat = start value (scalar) of dat that is presumed good. defaults
to first non-NaN value of dat if NaN/empty.
toldat = tolerance value (scalar) for dat; threshold to within which
dat must return to be counted as good, after exceeding a ddatdx
threshold detected bad data.
References:
OOI (2012). Data Product Specification for Gradient Test. Document
Control Number 1341-100010.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-10010_Data_Product_SPEC_GRDTEST_OOI.pdf)
"""
if strict_validation:
if not utils.isvector(dat) or not utils.isvector(x):
raise ValueError('\'dat\' and \'x\' must be vectors')
if len(dat) != len(x):
raise ValueError('\'dat\' and \'x\' must be of equal len')
if not all(np.diff(x) > 0):
raise ValueError('\'x\' must be montonically increasing')
dat = np.asanyarray(dat, dtype=np.float).flatten()
x = np.asanyarray(x, dtype=np.float).flatten()
if np.isnan(mindx):
mindx = 0
mindx = mindx or 0
if np.isnan(startdat):
startdat = 0
startdat = startdat or 0
# No strict validation here, they are scalards and they must be validated
# before going into the C-layer
if not utils.isscalar(mindx):
raise ValueError("'mindx' must be scalar, NaN, or empty.")
if not utils.isscalar(startdat):
raise ValueError("'startdat' must be scalar, NaN, or empty.")
# Confirm that there are still data points left, else abort:
if np.abs(x[0] - x[-1]) < mindx:
out = np.zeros(x.shape)
out.fill(1)
log.warn('Too few values to inspect')
return out
grad_min = ddatdx[0]
grad_max = ddatdx[1]
out = gradientvalues(dat, x, grad_min, grad_max, mindx, startdat, toldat)
return out
def dataqc_solarelevation(lon, lat, dt):
"""
Description
Computes instantaneous no-sky solar radiation and altitude from date
and time stamp and position data. It is put together from expressions
taken from Appendix E in the 1978 edition of Almanac for Computers,
Nautical Almanac Office, U.S. Naval Observatory. They are reduced
accuracy expressions valid for the years 1800-2100. Solar declination
computed from these expressions is accurate to at least 1'. The solar
constant (1368.0 W/m^2) represents a mean of satellite measurements
made over the last sunspot cycle (1979-1995) taken from Coffey et al
(1995), Earth System Monitor, 6, 6-10.
This code is a python implementation of soradna1.m available in Air-Sea
Toolbox.
Implemented by:
1997-03-08: Version 1.0 (author unknown) of soradna1.m.
1998-08-28: Version 1.1 (author unknown) of soradna1.m.
1999-08-05: Version 2.0 (author unknown) of soradna1.m.
2013-04-07: Christopher Wingard. Initial python implementation. Note,
this function is derived from old, unmaintained code. More robust
implementations exist (e.g. PyEphem and PySolar) that will probably
calculate these values more accurately.
Usage:
z, sorad = dataqc_solarelevation(lon, lat, dt)
where
z = solar altitude [degrees]
sorad = no atmosphere solar radiation [W m^-2]
lon = longitude (east is positive) [decimal degress]
lat = latitude [decimal degrees]
dt = date and time stamp in UTC [seconds since 1970-01-01]
Examples
dt = 1329177600 # 2012-02-14 00:00:00
z, sorad = dataqc_solarelevation(120, 30, dt)
z = 15.1566, sorad = 366.8129
OOI (2012). Data Product Specification for Solar Elevation. Document
Control Number 1341-100011.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-10011_Data_Product_SPEC_SOLRELV_OOI.pdf)
"""
# Test lengths and types of inputs. Latitude and longitude must be the same
# size and can either be a scalar or a vecotr. The date and time stamp
# can also be either a scalar or a vector. If all three inputs are vectors,
# they must be of the same length.
if len(lon) != len(lat):
raise ValueError('\'lon\' and \'lat\' must be the same size')
if utils.isvector(lon) and utils.isvector(lat) and utils.isvector(dt):
# test their lengths
if not len(lon) == len(lat) == len(dt):
raise ValueError('If all inputs are vectors, these must all '
'be of the same length')
# set constants (using values from as_consts.m)
# ------ short-wave flux calculations
# the solar constant [W m^-2] represents a mean of satellite measurements
# made over the last sunspot cycle (1979-1995), taken from Coffey et al.
# (1995), Earth System Monitor, 6, 6-10.
solar_const = 1368.0
# Create a time tuple in UTC from the Epoch time input, and then create
# scalars or numpy arrays of time elements for subsequent calculations.
ldt = len(dt)
yy = np.zeros(ldt, dtype=np.int)
mn = np.zeros(ldt, dtype=np.int)
dd = np.zeros(ldt, dtype=np.int)
hh = np.zeros(ldt, dtype=np.int)
mm = np.zeros(ldt, dtype=np.int)
ss = np.zeros(ldt, dtype=np.int)
for i in range(ldt):
# create time tuple in UTC
gtime = time.gmtime(dt[i])
# create scalar elements
yy[i] = gtime[0]
mn[i] = gtime[1]
dd[i] = gtime[2]
hh[i] = gtime[3]
mm[i] = gtime[4]
ss[i] = gtime[5]
#constants used in function
deg2rad = np.pi / 180.0
rad2deg = 1 / deg2rad
# compute Universal Time in hours
utime = hh + (mm + ss / 60.0) / 60.0
# compute Julian ephemeris date in days (Day 1 is 1 Jan 4713 B.C. which
# equals -4712 Jan 1)
jed = (367.0 * yy - np.fix(7.0*(yy+np.fix((mn+9)/12.0))/4.0)
+ np.fix(275.0*mn/9.0) + dd + 1721013 + utime / 24.0)
# compute interval in Julian centuries since 1900
jc_int = (jed - 2415020.0) / 36525.0
# compute mean anomaly of the sun
ma_sun = 358.475833 + 35999.049750 * jc_int - 0.000150 * jc_int**2
ma_sun = (ma_sun - np.fix(ma_sun/360.0) * 360.0) * deg2rad
# compute mean longitude of sun
ml_sun = 279.696678 + 36000.768920 * jc_int + 0.000303 * jc_int**2
ml_sun = (ml_sun - np.fix(ml_sun/360.0) * 360.0) * deg2rad
# compute mean anomaly of Jupiter
ma_jup = 225.444651 + 2880.0 * jc_int + 154.906654 * jc_int
ma_jup = (ma_jup - np.fix(ma_jup/360.0) * 360.0) * deg2rad
# compute longitude of the ascending node of the moon's orbit
an_moon = (259.183275 - 1800 * jc_int - 134.142008 * jc_int
+ 0.002078 * jc_int**2)
an_moon = (an_moon - np.fix(an_moon/360.0) * 360.0 + 360.0) * deg2rad
# compute mean anomaly of Venus
ma_ven = (212.603219 + 58320 * jc_int + 197.803875 * jc_int
+ 0.001286 * jc_int**2)
ma_ven = (ma_ven - np.fix(ma_ven/360.0) * 360.0) * deg2rad
# compute sun theta
theta = (0.397930 * np.sin(ml_sun) + 0.009999 * np.sin(ma_sun-ml_sun)
+ 0.003334 * np.sin(ma_sun+ml_sun) - 0.000208 * jc_int
* np.sin(ml_sun) + 0.000042 * np.sin(2*ma_sun+ml_sun) - 0.000040
* np.cos(ml_sun) - 0.000039 * np.sin(an_moon-ml_sun) - 0.000030
* jc_int * np.sin(ma_sun-ml_sun) - 0.000014
* np.sin(2*ma_sun-ml_sun) - 0.000010
* np.cos(ma_sun-ml_sun-ma_jup) - 0.000010 * jc_int
* np.sin(ma_sun+ml_sun))
# compute sun rho
rho = (1.000421 - 0.033503 * np.cos(ma_sun) - 0.000140 * np.cos(2*ma_sun)
+ 0.000084 * jc_int * np.cos(ma_sun) - 0.000033
* np.sin(ma_sun-ma_jup) + 0.000027 * np.sin(2.*ma_sun-2.*ma_ven))
# compute declination
decln = np.arcsin(theta/np.sqrt(rho))
# compute equation of time (in seconds of time)
l = 276.697 + 0.98564734 * (jed-2415020.0)
l = (l - 360.0 * np.fix(l/360.0)) * deg2rad
eqt = (-97.8 * np.sin(l) - 431.3 * np.cos(l) + 596.6 * np.sin(2*l)
- 1.9 * np.cos(2*l) + 4.0 * np.sin(3*l) + 19.3 * np.cos(3*l)
- 12.7 * np.sin(4*l))
eqt = eqt / 60.0
# compute local hour angle from global hour angle
gha = 15.0 * (utime-12) + 15.0 * eqt / 60.0
lha = gha - lon
# compute radius vector
rv = np.sqrt(rho)
# compute solar altitude
sz = (np.sin(deg2rad*lat) * np.sin(decln) + np.cos(deg2rad*lat)
* np.cos(decln) * np.cos(deg2rad*lha))
z = rad2deg * np.arcsin(sz)
# compute solar radiation outside atmosphere (defaults to 0 when solar
# altitude is below the horizon)
sorad = (solar_const / rv**2) * np.sin(deg2rad * z)
sorad[z < 0] = 0
return (z, sorad)
def dataqc_propagateflags_wrapper(strict_validation=False, *args):
'''
This is a function that wraps dataqc_propagateflags for use in ION
It accepts a variable number of vector arguments (of the same shape) and calls dataqc_propagateflags
'''
if not strict_validation:
shapes = np.array([i.shape[0] for i in args])
if not (shapes == shapes[0]).all():
raise ValueError('Input vectors are not the same shape')
return dataqc_propagateflags(np.array(args), strict_validation=strict_validation)
def dataqc_propagateflags(inflags, strict_validation=False):
"""
Description:
Propagate "bad" qc flags (from an arbitrary number of source datasets)
to another (derived) dataset.
Consider data from an oceanographic CTD (conductivity, temperature, and
pressure) instrument. From these three time series, you want to compute
salinity. If any of the three source data (conductivity, temperature,
pressure) is of bad quality, the salinity will be bad as well. You can
feed your QC assessment of the former three into this routine, which
will then give you the combined assessment for the derived (here:
salinity) property.
Implemented by:
2012-07-17: DPS authored by Mathias Lankhorst. Example code provided
for Matlab.
2013-04-06: Christopher Wingard. Initial python implementation.
Usage:
outflag = dataqc_propagateflags(inflags)
where
outflag = a 1-by-N boolean vector that contains 1 where all of the
inflags are 1, and 0 otherwise.
inflags = an M-by-N boolean matrix, where each of the M rows contains
flags of an independent data set such that "0" means bad data and
"1" means good data.
References:
OOI (2012). Data Product Specification for Combined QC Flags. Document
Control Number 1341-100012.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-10012_Data_Product_SPEC_CMBNFLG_OOI.pdf)
"""
if strict_validation:
if not utils.islogical(inflags):
raise ValueError('\'inflags\' must be \'0\' or \'1\' '
'integer flag array')
array_size = inflags.shape
nrows = array_size[0]
if nrows < 2:
error('\'inflags\' must be at least a two-dimensional array')
outflag = np.all(inflags, 0)
return outflag.astype('int8')
def dataqc_condcompress(p_orig, p_new, c_orig, cpcor=-9.57e-8):
"""
Description:
Implementation of the Sea-Bird conductivity compressibility correction,
scaling the input conductivity based on ratio of the original pressure
and the updated pressure.
Implemented by:
2013-04-07: Christopher Wingard. Initial python implementation.
Usage:
c_new = dataqc_condcompress(p_orig, p_new, c_orig, cpcor)
where
c_new = updated conductivity record [S/m]
p_orig = original pressure used to calculate original conductivity,
this typically the L1a PRESWAT [dbar]
p_new = updated pressure, typically L1b PRESWAT [dbar]
c_orig = original conductivty record, typically L1a CONDWAT [S/m]
cpcor = pressure correction coefficient used to calculate original
conductivity, default is -9.57e-8
References:
OOI (2012). Data Product Specification for Conductivity Compressibility
Correction. Document Control Number 1341-10030.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-10030_Data_Product_SPEC_CNDCMPR_OOI.pdf)
"""
c_new = c_orig * (1 + cpcor * p_orig) / (1 + cpcor * p_new)
return c_new
| 37.746479
| 215
| 0.632319
| 4,920
| 34,840
| 4.411179
| 0.16687
| 0.028752
| 0.012441
| 0.010137
| 0.403032
| 0.352624
| 0.307331
| 0.265079
| 0.234253
| 0.22232
| 0
| 0.048972
| 0.27147
| 34,840
| 922
| 216
| 37.787419
| 0.806083
| 0.503502
| 0
| 0.270588
| 0
| 0
| 0.07296
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052941
| false
| 0
| 0.029412
| 0.005882
| 0.164706
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4aaa32daecbc845e7f79a56464fde4fa9e4bd81d
| 10,702
|
py
|
Python
|
datatest/__past__/api08.py
|
avshalomt2/datatest
|
f622b0e990b53c73f56730a9009b39af7653df20
|
[
"Apache-2.0"
] | null | null | null |
datatest/__past__/api08.py
|
avshalomt2/datatest
|
f622b0e990b53c73f56730a9009b39af7653df20
|
[
"Apache-2.0"
] | null | null | null |
datatest/__past__/api08.py
|
avshalomt2/datatest
|
f622b0e990b53c73f56730a9009b39af7653df20
|
[
"Apache-2.0"
] | null | null | null |
"""Backward compatibility for version 0.8 API."""
from __future__ import absolute_import
import inspect
import datatest
from datatest._compatibility import itertools
from datatest._compatibility.collections.abc import Sequence
from datatest._load.get_reader import get_reader
from datatest._load.load_csv import load_csv
from datatest._load.temptable import load_data
from datatest._load.temptable import new_table_name
from datatest._load.temptable import savepoint
from datatest._load.temptable import table_exists
from datatest._query.query import DEFAULT_CONNECTION
from datatest._query.query import BaseElement
from datatest._utils import file_types
from datatest._utils import string_types
from datatest._utils import iterpeek
from datatest.allowance import BaseAllowance
from datatest import Invalid
from datatest.difference import NOTFOUND
datatest.DataResult = datatest.Result
class DataQuery(datatest.Query):
def __call__(self, *args, **kwds):
self.execute(*args, **kwds)
datatest.DataQuery = DataQuery
class DataSource(datatest.Selector):
def __init__(self, data, fieldnames=None):
first_value, iterator = iterpeek(data)
if isinstance(first_value, dict):
if not fieldnames:
fieldnames = list(first_value.keys())
super(DataSource, self).__init__(iterator, fieldnames)
else:
if fieldnames:
iterator = itertools.chain([fieldnames], iterator)
super(DataSource, self).__init__(iterator)
@classmethod
def from_csv(cls, file, encoding=None, **fmtparams):
if isinstance(file, string_types) or isinstance(file, file_types):
data_list = [file]
else:
data_list = file
new_cls = cls.__new__(cls)
new_cls._connection = DEFAULT_CONNECTION
cursor = new_cls._connection.cursor()
with savepoint(cursor):
table = new_table_name(cursor)
for obj in data_list:
load_csv(cursor, table, obj, encoding=encoding, **fmtparams)
new_cls._table = table if table_exists(cursor, table) else None
new_cls._data = file
new_cls._args = (encoding,)
new_cls._kwds = fmtparams
new_cls._update_list = []
return new_cls
@classmethod
def from_excel(cls, path, worksheet=0):
new_cls = cls.__new__(cls)
new_cls._connection = DEFAULT_CONNECTION
cursor = new_cls._connection.cursor()
with savepoint(cursor):
table = new_table_name(cursor)
reader = get_reader.from_excel(path, worksheet=0)
load_data(cursor, table, reader)
new_cls._table = table if table_exists(cursor, table) else None
new_cls._data = path
new_cls._args = tuple()
new_cls._kwds = dict()
if worksheet != 0:
new_cls._kwds['worksheet'] = worksheet
new_cls._update_list = []
return new_cls
def columns(self, type=list): # Removed in datatest 0.8.2
return type(self.fieldnames)
datatest.DataSource = DataSource
class allowed_key(BaseAllowance):
"""The given *function* should accept a number of arguments
equal the given key elements. If key is a single value (string
or otherwise), *function* should accept one argument. If key
is a three-tuple, *function* should accept three arguments.
"""
def __init__(self, function, msg=None):
super(allowed_key, self).__init__(msg)
self.function = function
def __repr__(self):
cls_name = self.__class__.__name__
msg_part = ', msg={0!r}'.format(self.msg) if self.msg else ''
return '{0}({1!r}{2})'.format(cls_name, self.function, msg_part)
def call_predicate(self, item):
key = item[0]
if not isinstance(key, tuple) and isinstance(key, BaseElement):
return self.function(key)
return self.function(*key)
datatest.allowed_key = allowed_key
class allowed_args(BaseAllowance):
"""The given *function* should accept a number of arguments equal
the given elements in the 'args' attribute. If args is a single
value (string or otherwise), *function* should accept one argument.
If args is a three-tuple, *function* should accept three arguments.
"""
def __init__(self, function, msg=None):
super(allowed_args, self).__init__(msg)
self.function = function
def __repr__(self):
cls_name = self.__class__.__name__
msg_part = ', msg={0!r}'.format(self.msg) if self.msg else ''
return '{0}({1!r}{2})'.format(cls_name, self.function, msg_part)
def call_predicate(self, item):
args = item[1].args
if not isinstance(args, tuple) and isinstance(args, BaseElement):
return self.function(args)
return self.function(*args)
datatest.allowed_args = allowed_args
def get_subject(self):
if hasattr(self, '_subject_data'):
return self._subject_data
return self._find_data_source('subject')
def set_subject(self, value):
self._subject_data = value
datatest.DataTestCase.subject = property(get_subject, set_subject)
def get_reference(self):
if hasattr(self, '_reference_data'):
return self._reference_data
return self._find_data_source('reference')
def set_reference(self, value):
self._reference_data = value
datatest.DataTestCase.reference = property(get_reference, set_reference)
def _find_data_source(name):
stack = inspect.stack()
stack.pop() # Skip record of current frame.
for record in stack: # Bubble-up stack looking for name.
frame = record[0]
if name in frame.f_globals:
return frame.f_globals[name] # <- EXIT!
raise NameError('cannot find {0!r}'.format(name))
datatest.DataTestCase._find_data_source = staticmethod(_find_data_source)
def allowedKey(self, function, msg=None):
"""Allows differences in a mapping where *function* returns True.
For each difference, function will receive the associated mapping
**key** unpacked into one or more arguments.
"""
return allowed_key(function, msg)
datatest.DataTestCase.allowedKey = allowedKey
def allowedArgs(self, function, msg=None):
"""Allows differences where *function* returns True. For the
'args' attribute of each difference (a tuple), *function* must
accept the number of arguments unpacked from 'args'.
"""
return allowed_args(function, msg)
datatest.DataTestCase.allowedArgs = allowedArgs
def _require_sequence(data, sequence): # New behavior in datatest 0.8.3
"""Compare *data* against a *sequence* of values. Stops at the
first difference found and returns an AssertionError. If no
differences are found, returns None.
"""
if isinstance(data, str):
raise ValueError("uncomparable types: 'str' and sequence type")
data_type = getattr(data, 'evaluation_type', data.__class__)
if not issubclass(data_type, Sequence):
type_name = data_type.__name__
msg = "expected sequence type, but got " + repr(type_name)
raise ValueError(msg)
message_prefix = None
previous_element = NOTFOUND
zipped = itertools.zip_longest(data, sequence, fillvalue=NOTFOUND)
for index, (actual, expected) in enumerate(zipped):
if actual == expected:
previous_element = actual
continue
if actual == NOTFOUND:
message_prefix = ('Data sequence is missing '
'elements starting with index {0}').format(index)
message_suffix = 'Expected {0!r}'.format(expected)
elif expected == NOTFOUND:
message_prefix = ('Data sequence contains extra '
'elements starting with index {0}').format(index)
message_suffix = 'Found {0!r}'.format(actual)
else:
message_prefix = \
'Data sequence differs starting at index {0}'.format(index)
message_suffix = \
'Found {0!r}, expected {1!r}'.format(actual, expected)
break
else: # <- NOBREAK!
return None # <- EXIT!
leading_elements = []
if index > 1:
leading_elements.append('...')
if previous_element != NOTFOUND:
leading_elements.append(repr(previous_element))
actual_repr = repr(actual) if actual != NOTFOUND else '?????'
caret_underline = '^' * len(actual_repr)
trailing_elements = []
next_tuple = next(zipped, NOTFOUND)
if next_tuple != NOTFOUND:
trailing_elements.append(repr(next_tuple[0]))
if next(zipped, NOTFOUND) != NOTFOUND:
trailing_elements.append('...')
if leading_elements:
leading_string = ', '.join(leading_elements) + ', '
else:
leading_string = ''
leading_whitespace = ' ' * len(leading_string)
if trailing_elements:
trailing_string = ', ' + ', '.join(trailing_elements)
else:
trailing_string = ''
sequence_string = leading_string + actual_repr + trailing_string
message = '{0}:\n\n {1}\n {2}{3}\n{4}'.format(message_prefix,
sequence_string,
leading_whitespace,
caret_underline,
message_suffix)
return AssertionError(message)
datatest.validation._require_sequence = _require_sequence
def _require_callable(data, function):
if data is NOTFOUND:
return Invalid(None) # <- EXIT!
def wrapped(element):
try:
if isinstance(element, BaseElement):
returned_value = function(element)
else:
returned_value = function(*element)
except Exception:
returned_value = False # Raised errors count as False.
if returned_value == True:
return None # <- EXIT!
if returned_value == False:
return Invalid(element) # <- EXIT!
if isinstance(returned_value, BaseDifference):
return returned_value # <- EXIT!
callable_name = function.__name__
message = \
'{0!r} returned {1!r}, should return True, False or a difference instance'
raise TypeError(message.format(callable_name, returned_value))
if isinstance(data, BaseElement):
return wrapped(data) # <- EXIT!
results = (wrapped(elem) for elem in data)
diffs = (diff for diff in results if diff)
first_element, diffs = iterpeek(diffs)
if first_element: # If not empty, return diffs.
return diffs
return None
| 36.03367
| 86
| 0.652775
| 1,262
| 10,702
| 5.306656
| 0.183043
| 0.018814
| 0.014335
| 0.014932
| 0.289682
| 0.217112
| 0.197999
| 0.189637
| 0.189637
| 0.168135
| 0
| 0.004884
| 0.253784
| 10,702
| 296
| 87
| 36.155405
| 0.833709
| 0.122127
| 0
| 0.199095
| 0
| 0
| 0.058418
| 0
| 0.004525
| 0
| 0
| 0
| 0.004525
| 1
| 0.095023
| false
| 0
| 0.085973
| 0.004525
| 0.312217
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4aabc93bad87b0dbf891bc8cb36cc3c5cdca1038
| 5,975
|
py
|
Python
|
python/scripts/compare_events.py
|
tvogels01/arthur-redshift-etl
|
477f822d16cd3a86b3bf95cfa28915cb7470a6e4
|
[
"MIT"
] | null | null | null |
python/scripts/compare_events.py
|
tvogels01/arthur-redshift-etl
|
477f822d16cd3a86b3bf95cfa28915cb7470a6e4
|
[
"MIT"
] | 44
|
2021-11-22T02:18:41.000Z
|
2022-03-28T02:13:32.000Z
|
python/scripts/compare_events.py
|
tvogels01/arthur-redshift-etl
|
477f822d16cd3a86b3bf95cfa28915cb7470a6e4
|
[
"MIT"
] | null | null | null |
"""
This script compares events from two ETLs to highlight differences in elapsed times or row counts.
* Pre-requisites
You need to have a list of events for each ETL. Arthur can provide this using the
"query_events" command.
For example:
```
arthur.py query_events -p development 37ACEC7440AB4620 -q > 37ACEC7440AB4620.events
arthur.py query_events -p development 96BE11B234F84F39 -q > 96BE11B234F84F39.events
```
* Usage
Once you have the files, you use this script:
```
compare_events.py 37ACEC7440AB4620.events 96BE11B234F84F39.events
```
The order of those two files is: "older ETL" => "newer ETL".
"""
import csv
import re
import sys
from collections import defaultdict, namedtuple
from math import isclose
from tabulate import tabulate
def read_file(filename):
"""
Read output from query_events command.
The file is expected to be formatted such that there's a header line, a separator, then the
data. The column set must contain "elapsed" and "rowcount" for later processing.
Also Arthur prints a summary after the table, like "(100 rows)" which will be skipped if present.
"""
column_spacing_re = re.compile(r"\s+\|\s+")
row_count_re = re.compile(r"\(\d+\s*rows\)")
print(f"Reading events from {filename}...")
with open(filename) as f:
for i, line in enumerate(f.readlines()):
if i == 1 or row_count_re.match(line):
# Found the separator line or the final row tally.
continue
yield column_spacing_re.sub("|", line).strip()
def parse_file(filename):
"""Parse the input as '|'-delimited columns."""
lines = read_file(filename)
reader = csv.reader(lines, delimiter="|")
row_class = namedtuple("CsvRow", next(reader), rename=True)
for row in reader:
yield row_class._make(row)
def extract_values(filename):
"""Find elapsed time and rowcount for each target relation."""
# The "lambda: None" trick allows us to use 'd[]' instead of 'd.get()' later.
elapsed = defaultdict(lambda: None)
rowcount = defaultdict(lambda: None)
for row in parse_file(filename):
elapsed[row.step, row.target] = float(row.elapsed) if row.elapsed != "---" else None
rowcount[row.step, row.target] = int(row.rowcount) if row.rowcount != "---" else None
return elapsed, rowcount
def delta(a, b):
"""
Return change in percent (or None if undefined).
The delta in percent is rounded to one decimal.
"""
if a is None or b is None:
return None
if a == 0.0 and b == 0.0:
return 0.0
assert a != 0.0 and b != 0.0
return round((b - a) * 1000.0 / a) / 10.0
def show_delta(previous_value, current_value, column):
"""
Return whether the change from previous event to current event is "significant".
If the values appear to be equal or almost equal, there's no need to report a delta.
Also, if the values are really small and any change is inflated, skip reporting the delta.
Note that for row count, a decrease in rows is always shown.
"""
if previous_value is None or current_value is None:
return False
if previous_value == current_value:
return False
if column == "elapsed":
# Decrease trigger-happiness for quick loads:
if previous_value < 10.0 and current_value < 10.0:
return False
if previous_value < 30.0 or current_value < 30.0:
return not isclose(previous_value, current_value, abs_tol=20.0)
if previous_value < 60.0 or current_value < 60.0:
return not isclose(previous_value, current_value, rel_tol=0.5)
if previous_value < 300.0 or current_value < 300.0:
return not isclose(previous_value, current_value, rel_tol=0.2)
if column == "rowcount":
# We expect to move forward with growing tables so smaller row counts are suspect.
if previous_value > current_value:
return True
# Increase trigger-happiness for small (dimensional) tables:
if previous_value < 1000 or current_value < 1000:
return not isclose(previous_value, current_value, abs_tol=10)
return not isclose(previous_value, current_value, rel_tol=0.1)
def print_comparison_table(previous_values, current_values, column):
"""Print differences between runs, sorted by relation."""
all_events = frozenset(previous_values).union(current_values)
has_large_diff = frozenset(
event
for event in all_events
if show_delta(previous_values[event], current_values[event], column)
)
table = sorted(
(
(
event[1], # target
event[0], # step
previous_values[event],
current_values[event],
delta(previous_values[event], current_values[event]),
)
for event in has_large_diff
),
key=lambda row: row[:2], # Avoid comparison with None values in the columns
)
print("Differences for '{}':\n".format(column))
print(
tabulate(
table,
headers=("target", "step", "prev. " + column, "cur. " + column, "delta %"),
tablefmt="presto",
)
)
def main():
if len(sys.argv) >= 2 and sys.argv[1] in ("-h", "--help"):
print(__doc__)
sys.exit(0)
if len(sys.argv) != 3:
print(
"Usage: {prog} previous_events current_events".format(prog=sys.argv[0]),
file=sys.stderr,
)
sys.exit(1)
previous_events_file, current_events_file = sys.argv[1:3]
previous_elapsed, previous_rowcount = extract_values(previous_events_file)
current_elapsed, current_rowcount = extract_values(current_events_file)
print_comparison_table(previous_elapsed, current_elapsed, "elapsed")
print()
print_comparison_table(previous_rowcount, current_rowcount, "rowcount")
if __name__ == "__main__":
main()
| 33.948864
| 101
| 0.654226
| 813
| 5,975
| 4.672817
| 0.302583
| 0.047907
| 0.042116
| 0.052645
| 0.146618
| 0.136878
| 0.093446
| 0.071335
| 0.063175
| 0.038431
| 0
| 0.031347
| 0.247197
| 5,975
| 175
| 102
| 34.142857
| 0.81325
| 0.31046
| 0
| 0.049505
| 0
| 0
| 0.053771
| 0
| 0
| 0
| 0
| 0
| 0.009901
| 1
| 0.069307
| false
| 0
| 0.059406
| 0
| 0.257426
| 0.089109
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4aacfc97b162e67687e0053e093dc275ef1915a8
| 4,163
|
py
|
Python
|
harness/drifter.py
|
cmu-sei/augur-code
|
d8c1e29ce3276037b26b65ea316d251752529449
|
[
"BSD-3-Clause"
] | null | null | null |
harness/drifter.py
|
cmu-sei/augur-code
|
d8c1e29ce3276037b26b65ea316d251752529449
|
[
"BSD-3-Clause"
] | null | null | null |
harness/drifter.py
|
cmu-sei/augur-code
|
d8c1e29ce3276037b26b65ea316d251752529449
|
[
"BSD-3-Clause"
] | null | null | null |
# Augur: A Step Towards Realistic Drift Detection in Production MLSystems - Code
# Copyright 2022 Carnegie Mellon University.
#
# NO WARRANTY. THIS CARNEGIE MELLON UNIVERSITY AND SOFTWARE ENGINEERING INSTITUTE MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. CARNEGIE MELLON UNIVERSITY MAKES NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE MATERIAL. CARNEGIE MELLON UNIVERSITY DOES NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT.
#
# Released under a MIT (SEI)-style license, please see license.txt or contact permission@sei.cmu.edu for full terms.
#
# [DISTRIBUTION STATEMENT A] This material has been approved for public release and unlimited distribution. Please see Copyright notice for non-US Government use and distribution.
#
# Carnegie Mellon® is registered in the U.S. Patent and Trademark Office by Carnegie Mellon University.
#
# This Software includes and/or makes use of the following Third-Party Software subject to its own license:
# 1. Tensorflow (https://github.com/tensorflow/tensorflow/blob/master/LICENSE) Copyright 2014 The Regents of the University of California.
# 2. Pandas (https://github.com/pandas-dev/pandas/blob/main/LICENSE) Copyright 2021 AQR Capital Management, LLC, Lambda Foundry, Inc. and PyData Development Team, and open source contributors.
# 3. scikit-learn (https://github.com/scikit-learn/scikit-learn/blob/main/COPYING) Copyright 2021 The scikit-learn developers.
# 4. numpy (https://github.com/numpy/numpy/blob/main/LICENSE.txt) Copyright 2021 NumPy Developers.
# 5. scipy (https://github.com/scipy/scipy/blob/main/LICENSE.txt) Copyright 2021 SciPy Developers.
# 6. statsmodels (https://github.com/statsmodels/statsmodels/blob/main/LICENSE.txt) Copyright 2018 Jonathan E. Taylor, Scipy developers, statsmodels Developers.
# 7. matplotlib (https://github.com/matplotlib/matplotlib/blob/main/LICENSE/LICENSE) Copyright 2016 Matplotlib development team.
#
# DM22-0044
import shutil
from drift import drift_generator
from utils import arguments
from utils.config import Config
from utils import logging
from datasets import dataset
LOG_FILE_NAME = "drifter.log"
DEFAULT_CONFIG_FILENAME = "./drifter_config.json"
DRIFT_EXP_CONFIG_FOLDER = "../experiments/drifter"
def load_dataset(dataset_filename, dataset_class_name):
"""Load dataset to drift."""
dataset_class = dataset.load_dataset_class(dataset_class_name)
base_dataset = dataset_class()
base_dataset.load_from_file(dataset_filename)
return base_dataset
def main():
logging.setup_logging(LOG_FILE_NAME)
# Allow selecting configs for experiments, and load it.
args = arguments.get_parsed_arguments()
config_file = Config.get_config_file(args, DRIFT_EXP_CONFIG_FOLDER, DEFAULT_CONFIG_FILENAME)
config = Config()
config.load(config_file)
# Load scenario data.
drift_module, params = drift_generator.load_drift_config(config.get("drift_scenario"))
if args.test:
drift_generator.test_drift(config, drift_module, params, config.get("bins"))
else:
# Sort dataset into bins.
base_dataset = load_dataset(config.get("dataset"), config.get("dataset_class"))
bin_value = config.get("bin_value") if config.contains("bin_value") else "results"
bin_shuffle = config.get("bin_shuffle") if config.contains("bin_shuffle") else True
bins = drift_generator.load_bins(base_dataset, config.get("bins"), bin_value, bin_shuffle)
# Apply drift.
drifted_dataset = drift_generator.apply_drift(bins, drift_module, params)
drift_generator.add_timestamps(drifted_dataset, config.get("timestamps"))
# Save it to regular file, and timestamped file.
drifted_dataset.save_to_file(config.get("output"))
print("Copying output file to timestamped backup.")
shutil.copyfile(config.get("output"), drift_generator.get_drift_stamped_name(config.get("output")))
if __name__ == '__main__':
main()
| 54.064935
| 513
| 0.764833
| 571
| 4,163
| 5.432574
| 0.367776
| 0.034816
| 0.031593
| 0.017408
| 0.048678
| 0.019987
| 0
| 0
| 0
| 0
| 0
| 0.012669
| 0.146769
| 4,163
| 76
| 514
| 54.776316
| 0.86036
| 0.544319
| 0
| 0
| 0
| 0
| 0.119395
| 0.023231
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057143
| false
| 0
| 0.171429
| 0
| 0.257143
| 0.028571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4aae62b164701dc61724cb01ba008cf15083826f
| 8,528
|
py
|
Python
|
network/baselines_archive/resnet_3d101.py
|
xuyu0010/ARID_v1
|
b03d0975f41547e8aa78929b8e26a62248f8e18f
|
[
"CC-BY-4.0"
] | 5
|
2020-06-24T07:33:36.000Z
|
2021-11-30T17:52:08.000Z
|
network/baselines_archive/resnet_3d101.py
|
xuyu0010/ARID_v1
|
b03d0975f41547e8aa78929b8e26a62248f8e18f
|
[
"CC-BY-4.0"
] | 1
|
2022-03-29T05:23:24.000Z
|
2022-03-29T06:19:57.000Z
|
network/baselines_archive/resnet_3d101.py
|
xuyu0010/ARID_v1
|
b03d0975f41547e8aa78929b8e26a62248f8e18f
|
[
"CC-BY-4.0"
] | 3
|
2021-02-06T10:56:30.000Z
|
2022-01-18T18:50:12.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import math
from functools import partial
import logging
import os
try:
from . import initializer
from .utils import load_state
except:
import initializer
from utils import load_state
__all__ = ['ResNeXt', 'resnet50', 'resnet101']
def conv3x3x3(in_planes, out_planes, stride=1):
# 3x3x3 convolution with padding
return nn.Conv3d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
def conv1x1x1(in_planes, out_planes, stride=1):
return nn.Conv3d(in_planes,
out_planes,
kernel_size=1,
stride=stride,
bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, downsample=None):
super().__init__()
self.conv1 = conv3x3x3(in_planes, planes, stride)
self.bn1 = nn.BatchNorm3d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3x3(planes, planes)
self.bn2 = nn.BatchNorm3d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1, downsample=None):
super().__init__()
self.conv1 = conv1x1x1(in_planes, planes)
self.bn1 = nn.BatchNorm3d(planes)
self.conv2 = conv3x3x3(planes, planes, stride)
self.bn2 = nn.BatchNorm3d(planes)
self.conv3 = conv1x1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm3d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self,
block,
layers,
block_inplanes=[64, 128, 256, 512],
n_input_channels=3,
conv1_t_size=7,
conv1_t_stride=1,
no_max_pool=False,
shortcut_type='B',
widen_factor=1.0,
num_classes=400,
pretrained=True):
super().__init__()
block_inplanes = [int(x * widen_factor) for x in block_inplanes]
self.in_planes = block_inplanes[0]
self.no_max_pool = no_max_pool
self.conv1 = nn.Conv3d(n_input_channels,
self.in_planes,
kernel_size=(conv1_t_size, 7, 7),
stride=(conv1_t_stride, 2, 2),
padding=(conv1_t_size // 2, 3, 3),
bias=False)
self.bn1 = nn.BatchNorm3d(self.in_planes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool3d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, block_inplanes[0], layers[0],
shortcut_type)
self.layer2 = self._make_layer(block,
block_inplanes[1],
layers[1],
shortcut_type,
stride=2)
self.layer3 = self._make_layer(block,
block_inplanes[2],
layers[2],
shortcut_type,
stride=2)
self.layer4 = self._make_layer(block,
block_inplanes[3],
layers[3],
shortcut_type,
stride=2)
self.avgpool = nn.AdaptiveAvgPool3d((1, 1, 1))
self.fc = nn.Linear(block_inplanes[3] * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv3d):
nn.init.kaiming_normal_(m.weight,
mode='fan_out',
nonlinearity='relu')
elif isinstance(m, nn.BatchNorm3d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Initialization
initializer.xavier(net=self)
if pretrained:
pretrained_model=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'pretrained/resnet-101-kinetics.pth')
logging.info("Network:: graph initialized, loading pretrained model: `{}'".format(pretrained_model))
assert os.path.exists(pretrained_model), "cannot locate: `{}'".format(pretrained_model)
pretrained = torch.load(pretrained_model)
load_state(self, pretrained['state_dict'])
else:
logging.info("Network:: graph initialized, use random inilization!")
def _downsample_basic_block(self, x, planes, stride):
out = F.avg_pool3d(x, kernel_size=1, stride=stride)
zero_pads = torch.zeros(out.size(0), planes - out.size(1), out.size(2),
out.size(3), out.size(4))
if isinstance(out.data, torch.cuda.FloatTensor):
zero_pads = zero_pads.cuda()
out = torch.cat([out.data, zero_pads], dim=1)
return out
def _make_layer(self, block, planes, blocks, shortcut_type, stride=1):
downsample = None
if stride != 1 or self.in_planes != planes * block.expansion:
if shortcut_type == 'A':
downsample = partial(self._downsample_basic_block,
planes=planes * block.expansion,
stride=stride)
else:
downsample = nn.Sequential(
conv1x1x1(self.in_planes, planes * block.expansion, stride),
nn.BatchNorm3d(planes * block.expansion))
layers = []
layers.append(
block(in_planes=self.in_planes,
planes=planes,
stride=stride,
downsample=downsample))
self.in_planes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.in_planes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
if not self.no_max_pool:
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def get_fine_tuning_parameters(model, ft_begin_index):
if ft_begin_index == 0:
return model.parameters()
ft_module_names = []
for i in range(ft_begin_index, 5):
ft_module_names.append('layer{}'.format(i))
ft_module_names.append('fc')
parameters = []
for k, v in model.named_parameters():
for ft_module in ft_module_names:
if ft_module in k:
parameters.append({'params': v})
break
else:
parameters.append({'params': v, 'lr': 0.0})
return parameters
def RESNET101(**kwargs):
"""Constructs a ResNet-50 model.
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
return model
if __name__ == "__main__":
import torch
logging.getLogger().setLevel(logging.DEBUG)
# ---------
net1 = RESNET101(num_classes=11, pretrained=True)
data = torch.randn(1,3,16,224,224)
output1 = net1(data)
print (output1.shape)
| 31.238095
| 124
| 0.533654
| 960
| 8,528
| 4.569792
| 0.208333
| 0.031001
| 0.027354
| 0.028721
| 0.338728
| 0.278778
| 0.193526
| 0.164349
| 0.148393
| 0.129701
| 0
| 0.034629
| 0.363391
| 8,528
| 272
| 125
| 31.352941
| 0.773439
| 0.010671
| 0
| 0.293839
| 0
| 0
| 0.02871
| 0.004034
| 0
| 0
| 0
| 0
| 0.004739
| 1
| 0.056872
| false
| 0
| 0.061611
| 0.009479
| 0.189573
| 0.004739
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4ab01eaed0874fd5f366410cee4ae62597dd8de5
| 4,167
|
py
|
Python
|
tests/ninety_nine_problems/test_miscellaneous_problems.py
|
gecBurton/inference_logic
|
2531d8f8fb0154b3bd42ac86eccc44d7038f6ef6
|
[
"MIT"
] | 3
|
2020-10-19T20:35:24.000Z
|
2020-10-21T07:13:02.000Z
|
tests/ninety_nine_problems/test_miscellaneous_problems.py
|
gecBurton/inference_logic
|
2531d8f8fb0154b3bd42ac86eccc44d7038f6ef6
|
[
"MIT"
] | 2
|
2020-11-10T16:54:13.000Z
|
2020-11-10T18:51:31.000Z
|
tests/ninety_nine_problems/test_miscellaneous_problems.py
|
gecBurton/inference_logic
|
2531d8f8fb0154b3bd42ac86eccc44d7038f6ef6
|
[
"MIT"
] | 1
|
2020-10-21T07:13:14.000Z
|
2020-10-21T07:13:14.000Z
|
import pytest
from inference_logic import Rule, Variable, search
from inference_logic.data_structures import Assert, Assign
@pytest.mark.xfail
def test_90():
r"""
P90 (**) Eight queens problem
This is a classical problem in computer science. The objective is to
place eight queens on a chessboard so that no two queens are attacking
each other; i.e., no two queens are in the same row, the same column,
or on the same diagonal. We generalize this original problem by
allowing for an arbitrary dimension N of the chessboard.
We represent the positions of the queens as a list of numbers 1..N.
Example: [4,2,7,3,6,8,5,1] means that the queen in the first column
is in row 4, the queen in the second column is in row 2, etc.
By using the permutations of the numbers 1..N we guarantee that
no two queens are in the same row. The only test that remains
to be made is the diagonal test. A queen placed at column X and
row Y occupies two diagonals: one of them, with number C = X-Y, goes
from bottom-left to top-right, the other one, numbered D = X+Y, goes
from top-left to bottom-right. In the test predicate we keep track
of the already occupied diagonals in Cs and Ds.
% The first version is a simple generate-and-test solution.
% queens_1(N,Qs) :- Qs is a solution of the N-queens problem
queens_1(N,Qs) :- range(1,N,Rs), permu(Rs,Qs), test(Qs).
% range(A,B,L) :- L is the list of numbers A..B
range(A,A,[A]).
range(A,B,[A|L]) :- A < B, A1 is A+1, range(A1,B,L).
% permu(Xs,Zs) :- the list Zs is a permutation of the list Xs
permu([],[]).
permu(Qs,[Y|Ys]) :- del(Y,Qs,Rs), permu(Rs,Ys).
del(X,[X|Xs],Xs).
del(X,[Y|Ys],[Y|Zs]) :- del(X,Ys,Zs).
% test(Qs) :- the list Qs represents a non-attacking queens solution
test(Qs) :- test(Qs,1,[],[]).
% test(Qs,X,Cs,Ds) :- the queens in Qs, representing columns X to N,
% are not in conflict with the diagonals Cs and Ds
test([],_,_,_).
test([Y|Ys],X,Cs,Ds) :-
C is X-Y, \+ memberchk(C,Cs),
D is X+Y, \+ memberchk(D,Ds),
X1 is X + 1,
test(Ys,X1,[C|Cs],[D|Ds]).
%--------------------------------------------------------------
% Now, in version 2, the tester is pushed completely inside the
% generator permu.
queens_2(N,Qs) :- range(1,N,Rs), permu_test(Rs,Qs,1,[],[]).
permu_test([],[],_,_,_).
permu_test(Qs,[Y|Ys],X,Cs,Ds) :-
del(Y,Qs,Rs),
C is X-Y, \+ memberchk(C,Cs),
D is X+Y, \+ memberchk(D,Ds),
X1 is X+1,
permu_test(Rs,Ys,X1,[C|Cs],[D|Ds]).
"""
N, Qs, N, Rs, Qs, A, B, L, A1, Y, Ys, X, Xs, Zs = Variable.factory(
"N", "Qs", "N", "Rs", "Qs", "A", "B", "L", "A1", "Y", "Ys", "X", "Xs", "Zs"
)
_W1, _W2, _W3 = Variable.factory("_W1", "_W2", "_W3")
Cs, Ds, D, X1, C, Cs = Variable.factory("Cs", "Ds", "D", "X1", "C", "Cs")
db = [
Rule(
dict(queens_1=N, a=Qs),
dict(range=1, a=N, b=Rs),
dict(permu=Rs, a=Qs),
dict(test=Qs),
),
dict(range=A, a=A, b=[A]),
Rule(
dict(range=A, a=B, b=[A, *L]),
Assert(lambda A, B: A < B),
Assign(A1, lambda A: A + 1),
dict(range=A1, a=B, b=L),
),
dict(permu=[], a=[]),
Rule(
dict(permu=Qs, a=[Y, *Ys]), dict(delete=Y, a=Qs, b=Rs), dict(permu=Rs, a=Ys)
),
dict(delete=X, a=[X, *Xs], b=Xs),
Rule(dict(delete=X, a=[Y, *Ys], b=[Y, *Zs]), dict(delete=X, a=Ys, b=Zs)),
Rule(dict(test=Qs), dict(test=Qs, a=1, b=[], c=[])),
dict(test=[], a=_W1, b=_W2, c=_W3),
Rule(
dict(test=[Y, *Ys], a=X, b=Cs, c=Ds),
Assign(C, lambda X, Y: X - Y),
Assert(lambda C, Cs: C not in Cs),
Assign(D, lambda X, Y: X + Y),
Assert(lambda D, Ds: D not in Ds),
Assign(X1, lambda X: X + 1),
dict(test=Ys, a=X1, b=[C, *Cs], c=[D, *Ds]),
),
]
Q = Variable("Q")
query = dict(queens_1=8, a=Q)
assert list(search(db, query)) == []
| 35.313559
| 88
| 0.538037
| 718
| 4,167
| 3.083565
| 0.213092
| 0.009937
| 0.007227
| 0.023487
| 0.165312
| 0.148148
| 0.116531
| 0.081301
| 0.081301
| 0.055104
| 0
| 0.018905
| 0.276458
| 4,167
| 117
| 89
| 35.615385
| 0.715423
| 0.545956
| 0
| 0.173913
| 0
| 0
| 0.024289
| 0
| 0
| 0
| 0
| 0
| 0.108696
| 1
| 0.021739
| false
| 0
| 0.065217
| 0
| 0.086957
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4ab03ce1ed84ecb90d03ef035bc80050cf57b143
| 4,856
|
py
|
Python
|
airbus_cobot_gui/src/airbus_cobot_gui/diagnostics/diagnostics.py
|
ipa320/airbus_coop
|
974564807ba5d24096e237a9991311608a390da1
|
[
"Apache-2.0"
] | 4
|
2017-10-15T23:32:24.000Z
|
2019-12-26T12:31:53.000Z
|
airbus_cobot_gui/src/airbus_cobot_gui/diagnostics/diagnostics.py
|
ipa320/airbus_coop
|
974564807ba5d24096e237a9991311608a390da1
|
[
"Apache-2.0"
] | 6
|
2017-09-05T13:52:00.000Z
|
2017-12-01T14:18:27.000Z
|
airbus_cobot_gui/src/airbus_cobot_gui/diagnostics/diagnostics.py
|
ipa320/airbus_coop
|
974564807ba5d24096e237a9991311608a390da1
|
[
"Apache-2.0"
] | 4
|
2017-09-04T08:14:36.000Z
|
2017-09-18T07:22:21.000Z
|
#!/usr/bin/env python
#
# Copyright 2015 Airbus
# Copyright 2017 Fraunhofer Institute for Manufacturing Engineering and Automation (IPA)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rospy
import os
import sys
import threading
from roslib.packages import get_pkg_dir
from python_qt_binding.QtGui import *
from python_qt_binding.QtCore import *
from python_qt_binding import loadUi
from airbus_cobot_gui.res import R
from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus
from airbus_pyqt_extend.QtAgiGui import QAgiPopup
from rqt_robot_monitor.status_item import StatusItem
import rqt_robot_monitor.util_robot_monitor as util
## @class DiagnosticsStatus
## @brief Class for difine different control status.
#OK = 0
#WARN = 1
#ERROR = 2
#STALE = 3
class DiagnosticsWidget(QPushButton):
DIAGNOSTICS_TOPLEVEL_TOPIC_NAME = rospy.get_param('diagnostics_toplevel_topic_name','/diagnostics_toplevel_state')
state = "status_stale"
msg = "No diagnostic messages received"
def __init__(self, context):
"""! The constructor."""
QPushButton.__init__(self)
self._context = context
# Diagnostics top level: update the color of the button depending on the current diagnostics toplevel message
self.connect(self, SIGNAL("stateChanged"), self.update_state)
self.emit(SIGNAL('stateChanged'), self.state, self.msg)
self._diagnostics_toplevel_state_sub = rospy.Subscriber(self.DIAGNOSTICS_TOPLEVEL_TOPIC_NAME , DiagnosticStatus, self.toplevel_state_callback)
# Diagnostics: when button pressed open a new window with a detailed list of components and diagnostic messages
self.connect(self,SIGNAL('clicked(bool)'),self._trigger_button)
def update_state(self, state, msg):
self.setIcon(R.getIconById(state))
self.setIconSize(QSize(40,40))
self.setToolTip(msg)
def toplevel_state_callback(self, msg):
self.state = msg.level
if msg.level == 0:
self.state= "status_ok"
self.msg = "OK"
if msg.level == 1 :
self.state= "status_warning"
self.msg = "WARNING"
if msg.level == 2 :
self.state= "status_error"
self.msg = "ERROR"
if msg.level == 3 :
self.state= "status_stale"
self.msg = "STALE"
self.emit(SIGNAL('stateChanged'), self.state, self.msg)
def _trigger_button(self, checked):
popup = DiagnosticsPopup(self, self._context)
popup.show_()
class DiagnosticsPopup(QAgiPopup):
def __init__(self, parent, context):
"""! The constructor."""
QAgiPopup.__init__(self, parent)
self._context = context
self._parent = parent
self.setRelativePosition(QAgiPopup.TopRight, QAgiPopup.BottomRight)
loadUi(R.layouts.diagnostics_popup, self)
self._inspectors = {}
self._current_msg = None
palette = self.tree_all_devices.palette()
self._original_base_color = palette.base().color()
self._original_alt_base_color = palette.alternateBase().color()
self._tree = StatusItem(self.tree_all_devices.invisibleRootItem())
self.adjustSize()
# Diagnostics subscriber
DIAGNOSTICS_TOPIC_NAME = rospy.get_param('diagnostics_topic_name','/diagnostics_agg')
self.connect(self,SIGNAL("UpdateDiagnostics"), self.update_diag)
self._diagnostics_agg_sub = rospy.Subscriber(DIAGNOSTICS_TOPIC_NAME, DiagnosticArray, self.message_cb)
def update_diag(self):
#update the tree
self._tree.prune()
self.tree_all_devices.resizeColumnToContents(0)
self.adjustSize()
def message_cb(self,msg):
""" DiagnosticArray message callback """
for status in msg.status:
path = status.name.split('/')
if path[0] == '':
path = path[1:]
tmp_tree = self._tree
for p in path:
tmp_tree = tmp_tree[p]
tmp_tree.update(status, util.get_resource_name(status.name))
self.emit(SIGNAL('UpdateDiagnostics'))
if __name__ == "__main__":
from airbus_cobot_gui.context import Context
app = QApplication(sys.argv)
main = QMainWindow()
main.setCentralWidget(TranslatorUi(Context(main)))
main.show()
app.exec_()
#End of file
| 35.97037
| 150
| 0.691928
| 595
| 4,856
| 5.438655
| 0.356303
| 0.02225
| 0.012361
| 0.017614
| 0.061805
| 0.046354
| 0.025958
| 0.025958
| 0
| 0
| 0
| 0.00709
| 0.215815
| 4,856
| 134
| 151
| 36.238806
| 0.8427
| 0.225494
| 0
| 0.071429
| 0
| 0
| 0.079925
| 0.021529
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.166667
| 0
| 0.309524
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4ab14530560ea0c6ff68422c45af6c1228280da2
| 758
|
py
|
Python
|
graphgallery/functional/dense/onehot.py
|
dongzizhu/GraphGallery
|
c65eab42daeb52de5019609fe7b368e30863b4ae
|
[
"MIT"
] | 1
|
2020-07-29T08:00:32.000Z
|
2020-07-29T08:00:32.000Z
|
graphgallery/functional/dense/onehot.py
|
dongzizhu/GraphGallery
|
c65eab42daeb52de5019609fe7b368e30863b4ae
|
[
"MIT"
] | null | null | null |
graphgallery/functional/dense/onehot.py
|
dongzizhu/GraphGallery
|
c65eab42daeb52de5019609fe7b368e30863b4ae
|
[
"MIT"
] | null | null | null |
import numpy as np
from ..transform import DenseTransform
from ..decorators import multiple
from ..transform import Transform
__all__ = ['onehot', 'Onehot']
@Transform.register()
class Onehot(DenseTransform):
def __init__(self, depth=None):
super().__init__()
self.collect(locals())
def __call__(self, *x):
return onehot(*x, depth=self.depth)
@multiple()
def onehot(label, depth=None):
"""Get the one-hot like label of nodes."""
label = np.asarray(label, dtype=np.int32)
depth = depth or label.max() + 1
if label.ndim == 1:
return np.eye(depth, dtype=label.dtype)[label]
else:
raise ValueError(f"label must be a 1D array, but got {label.ndim}D array.")
| 25.266667
| 84
| 0.634565
| 99
| 758
| 4.69697
| 0.535354
| 0.055914
| 0.08172
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008651
| 0.237467
| 758
| 29
| 85
| 26.137931
| 0.795848
| 0.047493
| 0
| 0
| 0
| 0
| 0.09607
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15
| false
| 0
| 0.2
| 0.05
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4ab3a002c74475748d23b9510c6318a19949f281
| 752
|
py
|
Python
|
lesson06/liqi/test.py
|
herrywen-nanj/51reboot
|
1130c79a360e1b548a6eaad176eb60f8bed22f40
|
[
"Apache-2.0"
] | null | null | null |
lesson06/liqi/test.py
|
herrywen-nanj/51reboot
|
1130c79a360e1b548a6eaad176eb60f8bed22f40
|
[
"Apache-2.0"
] | null | null | null |
lesson06/liqi/test.py
|
herrywen-nanj/51reboot
|
1130c79a360e1b548a6eaad176eb60f8bed22f40
|
[
"Apache-2.0"
] | null | null | null |
import configparser
'''
config = configparser.ConfigParser()
config.read('db.ini')
print(config.sections())
print(dict(config['mysqld'])['symbolic-links'])
'''
def ReadConfig(filename, section, key=None):
print(filename)
config = configparser.ConfigParser()
config.read(filename)
print(config.sections())
if not config.sections():
return "config init is empty", False
if key:
if section in config.sections():
return dict(config[section])[key], True
else:
return '', False
else:
return dict(config[section]), True
result, ok = ReadConfig('db.ini', 'mysqld', 'socket')
print(ok)
print(result)
if __name__ == '__main__':
ReadConfig('db.ini','mysqld','socket')
| 22.117647
| 53
| 0.640957
| 86
| 752
| 5.511628
| 0.383721
| 0.118143
| 0.126582
| 0.151899
| 0.2827
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.204787
| 752
| 34
| 54
| 22.117647
| 0.792642
| 0
| 0
| 0.1
| 0
| 0
| 0.104746
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.05
| 0
| 0.3
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4ab3ac9ae685aecfb387f1a734cc96132d725108
| 1,947
|
py
|
Python
|
core/forms.py
|
xUndero/noc
|
9fb34627721149fcf7064860bd63887e38849131
|
[
"BSD-3-Clause"
] | 1
|
2019-09-20T09:36:48.000Z
|
2019-09-20T09:36:48.000Z
|
core/forms.py
|
ewwwcha/noc
|
aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb
|
[
"BSD-3-Clause"
] | null | null | null |
core/forms.py
|
ewwwcha/noc
|
aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Forms wrapper
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Third-party modules
import six
from django import forms
from django.utils.encoding import force_unicode
from django.utils.html import escape
class NOCBoundField(forms.forms.BoundField):
"""
Bound field with django-admin like label-tag
"""
def __init__(self, *args, **kwargs):
super(NOCBoundField, self).__init__(*args, **kwargs)
self.is_checkbox = isinstance(self.field.widget, forms.CheckboxInput)
def label_tag(self, contents=None, attrs=None):
if not contents:
contents = force_unicode(
escape(self.field.label if self.field.label else self.name)
) + (":" if not self.is_checkbox else "")
classes = []
if self.is_checkbox:
classes += ["vCheckboxLabel"]
if self.field.required:
classes += ["required"]
if classes:
attrs = attrs.copy() if attrs else {}
attrs["class"] = " ".join(classes)
return super(NOCBoundField, self).label_tag(contents=contents, attrs=attrs)
class NOCForm(forms.Form):
"""
Form wrapper returning NOCBoundField items
"""
class Media(object):
css = {"all": ["/ui/pkg/django-media/admin/css/forms.css"]}
def __init__(self, *args, **kwargs):
super(NOCForm, self).__init__(*args, **kwargs)
self.disabled_fields = set()
def disable_field(self, name):
self.disabled_fields.add(name)
def __iter__(self):
for name, field in six.iteritems(self.fields):
if name not in self.disabled_fields:
yield NOCBoundField(self, field, name)
| 32.45
| 83
| 0.558295
| 207
| 1,947
| 5.101449
| 0.396135
| 0.042614
| 0.039773
| 0.028409
| 0.090909
| 0.049242
| 0
| 0
| 0
| 0
| 0
| 0.005976
| 0.226502
| 1,947
| 59
| 84
| 33
| 0.695219
| 0.214689
| 0
| 0.058824
| 0
| 0
| 0.04829
| 0.026828
| 0
| 0
| 0
| 0
| 0
| 1
| 0.147059
| false
| 0
| 0.117647
| 0
| 0.382353
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4ab43c897df779b46c4155028b30eff4d2ad17d1
| 1,990
|
py
|
Python
|
ersteops/unit/views.py
|
Prescrypto/ErsteOps
|
0b744173fb4f500003c96c4dcb26fb67d6eaa5ec
|
[
"MIT"
] | null | null | null |
ersteops/unit/views.py
|
Prescrypto/ErsteOps
|
0b744173fb4f500003c96c4dcb26fb67d6eaa5ec
|
[
"MIT"
] | 33
|
2017-11-24T19:44:57.000Z
|
2022-02-12T07:02:53.000Z
|
ersteops/unit/views.py
|
Prescrypto/ErsteOps
|
0b744173fb4f500003c96c4dcb26fb67d6eaa5ec
|
[
"MIT"
] | 1
|
2017-12-11T09:15:04.000Z
|
2017-12-11T09:15:04.000Z
|
import json
from django.shortcuts import get_object_or_404
from django.core import serializers
from django.http import HttpResponse
from .models import Unit
from .utils import UNIT_LIST_FIELD
BAD_REQUEST = HttpResponse(json.dumps({'error': 'Bad Request'}), status=400, content_type='application/json')
def unit_json_list(request):
''' List Json View for local available units '''
if request.is_ajax():
units = Unit.objects.available_units()
data = serializers.serialize('json', list(units), fields=UNIT_LIST_FIELD)
_raw_data = json.loads(data)
for unit in _raw_data:
if unit['fields']['is_alliance']:
unit['fields'].update({'identifier': '{}{}'.format(unit['fields']['identifier'],' (Alianza)')})
else:
continue
return HttpResponse(json.dumps(_raw_data), content_type='application/json', status=200)
else:
return BAD_REQUEST
def detail_unit_json(request, id_unit):
''' Detail view of unit '''
if request.is_ajax():
unit = Unit.objects.filter(pk=id_unit)
if len(unit) == 0:
return HttpResponse(json.dumps({'error': 'Unidad no encontrada'}), status=404, content_type='application/json')
data = serializers.serialize('json', unit, fields=UNIT_LIST_FIELD)
# Add crew list
_raw_data = json.loads(data)
_raw_data[0]['fields'].update({
'crew_list' : unit.first().get_crew_list
})
return HttpResponse(json.dumps(_raw_data), content_type='application/json', status=200)
else:
return BAD_REQUEST
def alliance_unit_json_list(request):
''' List Json View for alliance available units '''
if request.is_ajax():
units = Unit.objects.available_alliance_units()
data = serializers.serialize('json', list(units), fields=UNIT_LIST_FIELD)
return HttpResponse(data, content_type='application/json', status=200)
else:
return BAD_REQUEST
| 39.019608
| 123
| 0.665327
| 248
| 1,990
| 5.133065
| 0.262097
| 0.032993
| 0.08641
| 0.102121
| 0.456402
| 0.42498
| 0.42498
| 0.42498
| 0.371563
| 0.371563
| 0
| 0.012755
| 0.21206
| 1,990
| 50
| 124
| 39.8
| 0.799107
| 0.061307
| 0
| 0.4
| 0
| 0
| 0.114116
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075
| false
| 0
| 0.15
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4ab74d6454f4022c0cd33cf7aa9d2924c227290a
| 2,394
|
py
|
Python
|
src/action/tests/test_logic.py
|
uts-cic/ontask_b
|
b313e2352c77b40655f41dd5acba3a7635e6f3b3
|
[
"MIT"
] | 3
|
2018-08-24T10:48:40.000Z
|
2020-05-29T06:33:23.000Z
|
src/action/tests/test_logic.py
|
Lukahm/ontask
|
f16bdaa06ea450ee56d4581340e611b1076bed16
|
[
"MIT"
] | null | null | null |
src/action/tests/test_logic.py
|
Lukahm/ontask
|
f16bdaa06ea450ee56d4581340e611b1076bed16
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import os
from django.conf import settings
from django.shortcuts import reverse
from django.core.management import call_command
import test
from dataops import pandas_db
from workflow.models import Workflow
class EmailActionTracking(test.OntaskTestCase):
fixtures = ['simple_email_action']
filename = os.path.join(
settings.BASE_DIR(),
'action',
'fixtures',
'simple_email_action_df.sql'
)
trck_tokens = [
"eyJhY3Rpb24iOjIsInRvIjoic3R1ZGVudDFAYm9ndXMuY29tIiwiY29sdW1uX2RzdCI6IkVtYWlsUmVhZF8xIiwic2VuZGVyIjoiaWRlc2lnbmVyMUBib2d1cy5jb20iLCJjb2x1bW5fdG8iOiJlbWFpbCJ9:1eBtw5:MwH1axNDQq9HpgcP6jRvp7cAFmI",
"eyJhY3Rpb24iOjIsInRvIjoic3R1ZGVudDJAYm9ndXMuY29tIiwiY29sdW1uX2RzdCI6IkVtYWlsUmVhZF8xIiwic2VuZGVyIjoiaWRlc2lnbmVyMUBib2d1cy5jb20iLCJjb2x1bW5fdG8iOiJlbWFpbCJ9:1eBtw5:FFS1EXjdgJjc37ZVOcW22aIegR4",
"eyJhY3Rpb24iOjIsInRvIjoic3R1ZGVudDNAYm9ndXMuY29tIiwiY29sdW1uX2RzdCI6IkVtYWlsUmVhZF8xIiwic2VuZGVyIjoiaWRlc2lnbmVyMUBib2d1cy5jb20iLCJjb2x1bW5fdG8iOiJlbWFpbCJ9:1eBtw5:V0KhNWbcY3YPTfJXRagPaeJae4M"
]
wflow_name = 'wflow1'
wflow_desc = 'description text for workflow 1'
wflow_empty = 'The workflow does not have data'
@classmethod
def setUpClass(cls):
super(EmailActionTracking, cls).setUpClass()
pandas_db.pg_restore_table(cls.filename)
def tearDown(self):
pandas_db.delete_all_tables()
super(EmailActionTracking, self).tearDown()
# Test that tracking hits are properly stored.
def test_tracking(self):
# Repeat the checks two times to test if they are accumulating
for idx in range(1, 3):
# Iterate over the tracking items
for trck in self.trck_tokens:
self.client.get(reverse('trck') + '?v=' + trck)
# Get the workflow and the data frame
workflow = Workflow.objects.get(name=self.wflow_name)
df = pandas_db.load_from_db(workflow.id)
# Check that the results have been updated in the DB (to 1)
for uemail in [x[1] for x in test.user_info
if x[1].startswith('student')]:
self.assertEqual(
int(df.loc[df['email'] == uemail, 'EmailRead_1'].values[0]),
idx
)
| 38.612903
| 202
| 0.704678
| 229
| 2,394
| 7.227074
| 0.528384
| 0.019335
| 0.022961
| 0.030211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.057589
| 0.223893
| 2,394
| 61
| 203
| 39.245902
| 0.833154
| 0.105681
| 0
| 0
| 0
| 0
| 0.342081
| 0.280694
| 0
| 0
| 0
| 0
| 0.023256
| 1
| 0.069767
| false
| 0
| 0.186047
| 0
| 0.418605
| 0.023256
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4ab90259acfbeda3412addc434ad2001de65b77a
| 5,371
|
py
|
Python
|
obniz/parts/Moving/StepperMotor/__init__.py
|
izm51/obniz-python-sdk
|
40a738b5fe2c0a415cdc09f46d28c143982bfb07
|
[
"MIT"
] | 11
|
2019-03-22T12:02:11.000Z
|
2021-01-21T04:57:18.000Z
|
obniz/parts/Moving/StepperMotor/__init__.py
|
izm51/obniz-python-sdk
|
40a738b5fe2c0a415cdc09f46d28c143982bfb07
|
[
"MIT"
] | 5
|
2019-03-02T08:28:25.000Z
|
2021-02-02T22:06:37.000Z
|
obniz/parts/Moving/StepperMotor/__init__.py
|
izm51/obniz-python-sdk
|
40a738b5fe2c0a415cdc09f46d28c143982bfb07
|
[
"MIT"
] | 3
|
2019-07-20T06:55:09.000Z
|
2019-12-04T05:05:00.000Z
|
from attrdict import AttrDefault
import asyncio
class StepperMotor:
def __init__(self):
self.keys = ['a', 'b', 'aa', 'bb', 'common']
self.required_keys = ['a', 'b', 'aa', 'bb']
self._step_instructions = AttrDefault(bool,
{
'1': [[0, 1, 1, 1], [1, 0, 1, 1], [1, 1, 0, 1], [1, 1, 1, 0]],
'2': [[0, 0, 1, 1], [1, 0, 0, 1], [1, 1, 0, 0], [0, 1, 1, 0]],
'1-2': [[0, 1, 1, 1], [0, 0, 1, 1], [1, 0, 1, 1], [1, 0, 0, 1], [1, 1, 0, 1], [1, 1, 0, 0], [1, 1, 1, 0], [0, 1, 1, 0]]
}
)
self.type = None
self.current_step = 0
self._step_type = '2'
self.frequency = 100
self.rotation_step_count = 100
self.milli_meter_step_count = 1
@staticmethod
def info():
return AttrDefault(bool, {'name': 'StepperMotor'})
def wired(self, obniz):
self.obniz = obniz
if obniz.is_valid_io(*[self.params.common]):
self.common = obniz.get_io(*[self.params.common])
self.common.output(*[True])
self.type = 'unipolar'
else:
self.type = 'bipolar'
self.ios = []
self.ios.append(*[obniz.get_io(*[self.params.a])])
self.ios.append(*[obniz.get_io(*[self.params.b])])
self.ios.append(*[obniz.get_io(*[self.params.aa])])
self.ios.append(*[obniz.get_io(*[self.params.bb])])
async def step_wait(self, step_count):
if type(step_count) in ['int', 'float']:
raise Exception('must provide number')
step_count = round(*[step_count])
if step_count == 0:
return
step_count_abs = abs(*[step_count])
instructions = self._get_step_instructions(*[])
instruction_length = len(instructions)
array = []
current_phase = self.current_step % instruction_length
if current_phase < 0:
current_phase = (instruction_length - current_phase * -1)
if step_count > 0:
for i in range(0, len(instructions), 1):
current_phase += 1
if current_phase >= instruction_length:
current_phase = 0
array.append(*[instructions[current_phase]])
else:
for i in range(0, len(instructions), 1):
current_phase -= 1
if current_phase < 0:
current_phase = (instruction_length - 1)
array.append(*[instructions[current_phase]])
msec = 1000 / self.frequency
msec = int(*[msec])
if msec < 1:
msec = 1
def anonymous0(index):
instruction = array[index]
for i in range(0, len(self.ios), 1):
self.ios[i].output(*[instruction[i]])
state = anonymous0
states = []
for i in range(0, instruction_length, 1):
states.append(*[AttrDefault(bool, {'duration': msec, 'state': state})])
await self.obniz.io.repeat_wait(*[states, step_count_abs])
self.current_step += step_count
async def step_to_wait(self, destination):
mustmove = (destination - self.current_step)
await self.step_wait(*[mustmove])
async def hold_wait(self):
instructions = self._get_step_instructions(*[])
instruction_length = len(instructions)
current_phase = self.current_step % instruction_length
if current_phase < 0:
current_phase = (instruction_length - current_phase * -1)
for i in range(0, len(self.ios), 1):
self.ios[i].output(*[instructions[current_phase][i]])
await self.obniz.ping_wait(*[])
async def free_wait(self):
for i in range(0, len(self.ios), 1):
self.ios[i].output(*[True])
await self.obniz.ping_wait(*[])
def step_type(self, step_type):
new_type = self._step_instructions[step_type]
if not new_type:
raise Exception('unknown step type ' + str(step_type))
self._step_type = step_type
def speed(self, step_per_sec):
self.frequency = step_per_sec
def current_rotation(self):
return self.current_step / self.rotation_step_count * 360
def current_angle(self):
angle = int(*[self.current_rotation(*[]) * 1000]) % 360000 / 1000
if angle < 0:
angle = (360 - angle)
return angle
async def rotate_wait(self, rotation):
rotation /= 360
needed = rotation * self.rotation_step_count
await self.step_wait(*[needed])
async def rotate_to_wait(self, angle):
needed = (angle - self.current_angle(*[]))
if abs(*[needed]) > 180:
needed = (needed - 360) if needed > 0 else (360 + needed)
needed = needed / 360 * self.rotation_step_count
await self.step_wait(*[needed])
def current_distance(self):
return self.current_step / self.milli_meter_step_count
async def move_wait(self, distance):
needed = distance * self.milli_meter_step_count
await self.step_wait(*[needed])
async def move_to_wait(self, destination):
needed = (destination - self.current_distance(*[])) * self.milli_meter_step_count
await self.step_wait(*[needed])
def _get_step_instructions(self):
return self._step_instructions[self._step_type]
| 37.559441
| 135
| 0.571216
| 677
| 5,371
| 4.333826
| 0.149188
| 0.018405
| 0.014315
| 0.014997
| 0.470688
| 0.388548
| 0.339809
| 0.339809
| 0.282549
| 0.200409
| 0
| 0.038289
| 0.294917
| 5,371
| 143
| 136
| 37.559441
| 0.736467
| 0
| 0
| 0.209677
| 0
| 0
| 0.021035
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.080645
| false
| 0
| 0.016129
| 0.032258
| 0.153226
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4abb8389f46537b21c77c0aa5024c68649d338e4
| 2,241
|
py
|
Python
|
opennem/utils/scrapyd.py
|
paulculmsee/opennem
|
9ebe4ab6d3b97bdeebc352e075bbd5c22a8ddea1
|
[
"MIT"
] | 22
|
2020-06-30T05:27:21.000Z
|
2022-02-21T12:13:51.000Z
|
opennem/utils/scrapyd.py
|
paulculmsee/opennem
|
9ebe4ab6d3b97bdeebc352e075bbd5c22a8ddea1
|
[
"MIT"
] | 71
|
2020-08-07T13:06:30.000Z
|
2022-03-15T06:44:49.000Z
|
opennem/utils/scrapyd.py
|
paulculmsee/opennem
|
9ebe4ab6d3b97bdeebc352e075bbd5c22a8ddea1
|
[
"MIT"
] | 13
|
2020-06-30T03:28:32.000Z
|
2021-12-30T08:17:16.000Z
|
#!/usr/bin/env python
"""
Srapyd control methods
"""
import logging
from typing import Any, Dict, List
from urllib.parse import urljoin
from opennem.settings import settings
from opennem.utils.http import http
from opennem.utils.scrapy import get_spiders
logger = logging.getLogger("scrapyd.client")
def get_jobs() -> Dict[str, Any]:
job_url = urljoin(
settings.scrapyd_url,
"listjobs.json?project={}".format(settings.scrapyd_project_name),
)
jobs = http.get(job_url).json()
return jobs
def job_cancel(id: str) -> bool:
cancel_job_url = urljoin(settings.scrapyd_url, "cancel.json")
r = http.post(cancel_job_url, data={"project": "opennem", "job": id})
resp = r.json()
if resp["status"] == "error":
logger.error("Error: {}".format(resp["message"]))
return False
logger.info("Cancelled job: {}".format(resp["jobid"]))
return True
def job_schedule(spider_name: str) -> bool:
schedule_url = urljoin(settings.scrapyd_url, "schedule.json")
try:
r = http.post(schedule_url, data={"project": "opennem", "spider": spider_name})
except Exception as e:
logger.error("Error getting {}: {}".format(schedule_url, e))
return False
if not r.ok:
logger.error("Error: {}".format(r.status_code))
return False
resp = r.json()
if resp["status"] == "error":
logger.error("Error: {}".format(resp["message"]))
return False
logger.info("Queued spider {} with task: {}".format(spider_name, resp["jobid"]))
return True
def job_cancel_state(state: str = "pending") -> bool:
jobs = get_jobs()
if state not in jobs:
logger.info("Invalid state or no jobs in state {}".format(state))
return False
pending_jobs = jobs[state]
for job in pending_jobs:
job_id = job["id"]
logger.info("Cancelling {}".format(job_id))
job_cancel(job_id)
return True
def job_schedule_all(matches: str = None) -> List[str]:
spiders = get_spiders()
spider_scheduled = []
for s in spiders:
if matches and matches != s:
continue
job_schedule(s)
spider_scheduled.append(s)
return spider_scheduled
| 23.103093
| 87
| 0.635431
| 291
| 2,241
| 4.762887
| 0.292096
| 0.018038
| 0.046176
| 0.054113
| 0.239538
| 0.196248
| 0.11544
| 0.11544
| 0.11544
| 0.11544
| 0
| 0
| 0.227577
| 2,241
| 96
| 88
| 23.34375
| 0.800693
| 0.019188
| 0
| 0.237288
| 0
| 0
| 0.135678
| 0.010964
| 0
| 0
| 0
| 0
| 0
| 1
| 0.084746
| false
| 0
| 0.101695
| 0
| 0.355932
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4abf6af83131868287dda032df11a21439ed9d49
| 1,164
|
py
|
Python
|
tutorials/W2D2_LinearSystems/solutions/W2D2_Tutorial1_Solution_437c0b24.py
|
liuxiaomiao123/NeuroMathAcademy
|
16a7969604a300bf9fbb86f8a5b26050ebd14c65
|
[
"CC-BY-4.0"
] | 2
|
2020-07-03T04:39:09.000Z
|
2020-07-12T02:08:31.000Z
|
tutorials/W2D2_LinearSystems/solutions/W2D2_Tutorial1_Solution_437c0b24.py
|
NinaHKivanani/course-content
|
3c91dd1a669cebce892486ba4f8086b1ef2e1e49
|
[
"CC-BY-4.0"
] | 1
|
2020-06-22T22:57:03.000Z
|
2020-06-22T22:57:03.000Z
|
tutorials/W2D2_LinearSystems/solutions/W2D2_Tutorial1_Solution_437c0b24.py
|
NinaHKivanani/course-content
|
3c91dd1a669cebce892486ba4f8086b1ef2e1e49
|
[
"CC-BY-4.0"
] | 1
|
2021-03-29T21:08:26.000Z
|
2021-03-29T21:08:26.000Z
|
def integrate_exponential(a, x0, dt, T):
"""Compute solution of the differential equation xdot=a*x with
initial condition x0 for a duration T. Use time step dt for numerical
solution.
Args:
a (scalar): parameter of xdot (xdot=a*x)
x0 (scalar): initial condition (x at time 0)
dt (scalar): timestep of the simulation
T (scalar): total duration of the simulation
Returns:
ndarray, ndarray: `x` for all simulation steps and the time `t` at each step
"""
# Initialize variables
t = np.arange(0, T, dt)
x = np.zeros_like(t, dtype=complex)
x[0] = x0
# Step through system and integrate in time
for k in range(1, len(t)):
# for each point in time, compute xdot = a*x
xdot = (a*x[k-1])
# update x by adding xdot scaled by dt
x[k] = x[k-1] + xdot * dt
return x, t
# choose parameters
a = -0.5 # parameter in f(x)
T = 10 # total Time duration
dt = 0.001 # timestep of our simulation
x0 = 1. # initial condition of x at time 0
x, t = integrate_exponential(a, x0, dt, T)
with plt.xkcd():
fig = plt.figure(figsize=(8, 6))
plt.plot(t, x.real)
plt.xlabel('Time (s)')
plt.ylabel('x')
| 27.714286
| 80
| 0.640893
| 199
| 1,164
| 3.733668
| 0.39196
| 0.026918
| 0.032301
| 0.061911
| 0.069987
| 0.069987
| 0
| 0
| 0
| 0
| 0
| 0.02718
| 0.241409
| 1,164
| 42
| 81
| 27.714286
| 0.81427
| 0.579897
| 0
| 0
| 0
| 0
| 0.019868
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4abf7b9f84deaebd77faef58a9ebbc8bcdd69360
| 1,199
|
py
|
Python
|
PyTemp/gis/shapefile_to_geojson.py
|
SwaggerKhan/PatrolGis
|
89b1a398ffd6171ac35ea9d023bce98a0fc7e930
|
[
"MIT"
] | null | null | null |
PyTemp/gis/shapefile_to_geojson.py
|
SwaggerKhan/PatrolGis
|
89b1a398ffd6171ac35ea9d023bce98a0fc7e930
|
[
"MIT"
] | null | null | null |
PyTemp/gis/shapefile_to_geojson.py
|
SwaggerKhan/PatrolGis
|
89b1a398ffd6171ac35ea9d023bce98a0fc7e930
|
[
"MIT"
] | null | null | null |
import json
import geojson
import geopandas as gpd
class SaveToGeoJSON:
__name_counter = 0
def file_name(self):
if self.__name_counter == 0:
self.__name_counter = 1
return "./out"+str(self.__name_counter)+".json"
elif self.__name_counter == 1:
self.__name_counter = 2
return "./out"+str(self.__name_counter)+".json"
else:
self.__name_counter = 0
print("Contact developer")
def save(self, name, file_save_name):
self.shape_file = gpd.read_file(name)
self.shape_file.to_file(file_save_name, driver="GeoJSON")
class MergeGeoJSON:
__files_merge_list = ['./out1.json', './out2.json']
__poly_geojson = list()
def save(self):
for i in self.__files_merge_list:
with open(i) as geojson_data:
self.__poly_geojson.append(json.load(geojson_data))
merged = { 'firstObj ' : self.__poly_geojson[1], 'secondObj' : self.__poly_geojson[0] }
json.dumps(merged)
with open('Merged_out.json', 'w') as outfile:
json.dump(merged, outfile, indent=3)
outfile.close()
return True
| 31.552632
| 95
| 0.605505
| 149
| 1,199
| 4.496644
| 0.375839
| 0.131343
| 0.156716
| 0.047761
| 0.092537
| 0.092537
| 0.092537
| 0
| 0
| 0
| 0
| 0.012761
| 0.281068
| 1,199
| 38
| 96
| 31.552632
| 0.764501
| 0
| 0
| 0.064516
| 0
| 0
| 0.083333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096774
| false
| 0
| 0.096774
| 0
| 0.451613
| 0.032258
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4ac2c549f6e7bc96012e6af6cdb10885c9451aa4
| 543
|
py
|
Python
|
torch_geometric/read/ply.py
|
DL-85/pytorch_geometric
|
eb12a94a667e881c4a6bff26b0453428bcb72393
|
[
"MIT"
] | 2
|
2019-10-10T07:01:07.000Z
|
2020-11-04T06:26:42.000Z
|
torch_geometric/read/ply.py
|
cloudyyyyy/pytorch_geometric
|
61d389b5f8ee700dda4d18cadca72f24c978fce1
|
[
"MIT"
] | null | null | null |
torch_geometric/read/ply.py
|
cloudyyyyy/pytorch_geometric
|
61d389b5f8ee700dda4d18cadca72f24c978fce1
|
[
"MIT"
] | 1
|
2019-10-31T01:15:03.000Z
|
2019-10-31T01:15:03.000Z
|
import torch
from plyfile import PlyData
from torch_geometric.data import Data
def read_ply(path):
with open(path, 'rb') as f:
data = PlyData.read(f)
pos = ([torch.tensor(data['vertex'][axis]) for axis in ['x', 'y', 'z']])
pos = torch.stack(pos, dim=-1)
face = None
if 'face' in data:
faces = data['face']['vertex_indices']
faces = [torch.tensor(face, dtype=torch.long) for face in faces]
face = torch.stack(faces, dim=-1)
data = Data(pos=pos)
data.face = face
return data
| 23.608696
| 76
| 0.607735
| 81
| 543
| 4.037037
| 0.444444
| 0.04893
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004878
| 0.244936
| 543
| 22
| 77
| 24.681818
| 0.792683
| 0
| 0
| 0
| 0
| 0
| 0.060773
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.1875
| 0
| 0.3125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4ac4732c076aba6b6bc386af069168643221a2c1
| 2,679
|
py
|
Python
|
ml-agents/mlagents/trainers/brain_conversion_utils.py
|
ranguera/ml-agents
|
68779b407b32fce2ea14b16ef1bc26dea7d5e5a8
|
[
"Apache-2.0"
] | 2
|
2019-12-13T22:00:11.000Z
|
2019-12-14T00:47:32.000Z
|
ml-agents/mlagents/trainers/brain_conversion_utils.py
|
almartson/ml-agents
|
ee748705b777ddd365c55065366e83596c615811
|
[
"Apache-2.0"
] | null | null | null |
ml-agents/mlagents/trainers/brain_conversion_utils.py
|
almartson/ml-agents
|
ee748705b777ddd365c55065366e83596c615811
|
[
"Apache-2.0"
] | null | null | null |
from mlagents.trainers.brain import BrainInfo, BrainParameters, CameraResolution
from mlagents.envs.base_env import BatchedStepResult, AgentGroupSpec
from mlagents.envs.exception import UnityEnvironmentException
import numpy as np
from typing import List
def step_result_to_brain_info(
step_result: BatchedStepResult,
group_spec: AgentGroupSpec,
agent_id_prefix: int = None,
) -> BrainInfo:
n_agents = step_result.n_agents()
vis_obs_indices = []
vec_obs_indices = []
for index, observation in enumerate(step_result.obs):
if len(observation.shape) == 2:
vec_obs_indices.append(index)
elif len(observation.shape) == 4:
vis_obs_indices.append(index)
else:
raise UnityEnvironmentException(
"Invalid input received from the environment, the observation should "
"either be a vector of float or a PNG image"
)
if len(vec_obs_indices) == 0:
vec_obs = np.zeros((n_agents, 0), dtype=np.float32)
else:
vec_obs = np.concatenate([step_result.obs[i] for i in vec_obs_indices], axis=1)
vis_obs = [step_result.obs[i] for i in vis_obs_indices]
mask = np.ones((n_agents, np.sum(group_spec.action_size)), dtype=np.float32)
if group_spec.is_action_discrete():
mask = np.ones(
(n_agents, np.sum(group_spec.discrete_action_branches)), dtype=np.float32
)
if step_result.action_mask is not None:
mask = 1 - np.concatenate(step_result.action_mask, axis=1)
if agent_id_prefix is None:
agent_ids = [str(ag_id) for ag_id in list(step_result.agent_id)]
else:
agent_ids = [f"${agent_id_prefix}-{ag_id}" for ag_id in step_result.agent_id]
return BrainInfo(
vis_obs,
vec_obs,
list(step_result.reward),
agent_ids,
list(step_result.done),
list(step_result.max_step),
mask,
)
def group_spec_to_brain_parameters(
name: str, group_spec: AgentGroupSpec
) -> BrainParameters:
vec_size = np.sum(
[shape[0] for shape in group_spec.observation_shapes if len(shape) == 1]
)
vis_sizes = [shape for shape in group_spec.observation_shapes if len(shape) == 3]
cam_res = [CameraResolution(s[0], s[1], s[2]) for s in vis_sizes]
a_size: List[int] = []
if group_spec.is_action_discrete():
a_size += list(group_spec.discrete_action_branches)
vector_action_space_type = 0
else:
a_size += [group_spec.action_size]
vector_action_space_type = 1
return BrainParameters(
name, int(vec_size), cam_res, a_size, [], vector_action_space_type
)
| 37.732394
| 87
| 0.670773
| 373
| 2,679
| 4.538874
| 0.273458
| 0.076787
| 0.030715
| 0.037212
| 0.222682
| 0.161843
| 0.114589
| 0.090963
| 0.090963
| 0.054341
| 0
| 0.010244
| 0.234789
| 2,679
| 70
| 88
| 38.271429
| 0.81561
| 0
| 0
| 0.090909
| 0
| 0
| 0.050765
| 0.009705
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030303
| false
| 0
| 0.075758
| 0
| 0.136364
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4ac65a293f32905c196e86dcfb72e76e3b1b85d2
| 853
|
py
|
Python
|
mrdc_ws/src/mrdc_serial/setup.py
|
SoonerRobotics/MRDC22
|
00c1360138e468bf313eefc93fbde11f289ece82
|
[
"MIT"
] | null | null | null |
mrdc_ws/src/mrdc_serial/setup.py
|
SoonerRobotics/MRDC22
|
00c1360138e468bf313eefc93fbde11f289ece82
|
[
"MIT"
] | 1
|
2021-12-01T01:21:22.000Z
|
2021-12-01T01:21:22.000Z
|
mrdc_ws/src/mrdc_serial/setup.py
|
SoonerRobotics/MRDC22
|
00c1360138e468bf313eefc93fbde11f289ece82
|
[
"MIT"
] | 1
|
2021-09-28T23:43:07.000Z
|
2021-09-28T23:43:07.000Z
|
from setuptools import find_packages, setup
from glob import glob
import os
package_name = 'mrdc_serial'
setup(
name=package_name,
version='1.0.0',
packages=find_packages(),
data_files=[
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
('share/' + package_name, ['package.xml']),
(os.path.join('share', package_name, 'launch'),
glob(os.path.join('launch', '*.xml')))
],
install_requires=['setuptools'],
maintainer='Dylan Zemlin',
maintainer_email='dylan.zemlin@gmail.com',
description='The MRDC Serial package that controls communication to the arduino',
license='MIT License',
entry_points={
'console_scripts': [
'remote = mrdc_serial.remote:main',
'serial = mrdc_serial.serial:main'
],
},
)
| 28.433333
| 85
| 0.630715
| 96
| 853
| 5.427083
| 0.5
| 0.105566
| 0.06142
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004545
| 0.22626
| 853
| 29
| 86
| 29.413793
| 0.784848
| 0
| 0
| 0.074074
| 0
| 0
| 0.357562
| 0.127784
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43493e4caf41318515d94514d68ea22bde6fccc6
| 5,219
|
py
|
Python
|
PytorchRouting/Examples/run_experiments.py
|
oleksost/RoutingNetworks
|
7e3e9219b7389d5af2a832a4882bc9fda0e7fd21
|
[
"Apache-2.0"
] | 63
|
2018-07-19T20:12:55.000Z
|
2022-03-31T14:59:37.000Z
|
PytorchRouting/Examples/run_experiments.py
|
oleksost/RoutingNetworks
|
7e3e9219b7389d5af2a832a4882bc9fda0e7fd21
|
[
"Apache-2.0"
] | 2
|
2019-08-08T18:28:13.000Z
|
2019-09-24T16:46:22.000Z
|
PytorchRouting/Examples/run_experiments.py
|
oleksost/RoutingNetworks
|
7e3e9219b7389d5af2a832a4882bc9fda0e7fd21
|
[
"Apache-2.0"
] | 16
|
2018-07-25T05:56:51.000Z
|
2021-01-09T02:47:05.000Z
|
"""
This file defines some simple experiments to illustrate how Pytorch-Routing functions.
"""
import numpy as np
import tqdm
import torch
from PytorchRouting.DecisionLayers import REINFORCE, QLearning, SARSA, ActorCritic, GumbelSoftmax, PerTaskAssignment, \
WPL, AAC, AdvantageLearning, RELAX, EGreedyREINFORCE, EGreedyAAC
from PytorchRouting.Examples.Models import PerTask_all_fc, RoutedAllFC, PerTask_1_fc, PerDecisionSingleAgent, \
Dispatched
from PytorchRouting.Examples.Datasets import CIFAR100MTL
def compute_batch(model, batch):
samples, labels, tasks = batch
out, meta = model(samples, tasks=tasks)
correct_predictions = (out.max(dim=1)[1].squeeze() == labels.squeeze()).cpu().numpy()
accuracy = correct_predictions.sum()
oh_labels = one_hot(labels, out.size()[-1])
module_loss, decision_loss = model.loss(out, meta, oh_labels)
return module_loss, decision_loss, accuracy
def one_hot(indices, width):
indices = indices.squeeze().unsqueeze(1)
oh = torch.zeros(indices.size()[0], width).to(indices.device)
oh.scatter_(1, indices, 1)
return oh
def run_experiment(model, dataset, learning_rates, routing_module_learning_rate_ratio):
print('Loaded dataset and constructed model. Starting Training ...')
for epoch in range(50):
optimizers = []
parameters = []
if epoch in learning_rates:
try:
optimizers.append(torch.optim.SGD(model.routing_parameters(),
lr=routing_module_learning_rate_ratio*learning_rates[epoch]))
optimizers.append(torch.optim.SGD(model.module_parameters(),
lr=learning_rates[epoch]))
parameters = model.module_parameters() + model.module_parameters()
except AttributeError:
optimizers.append(torch.optim.SGD(model.parameters(), lr=learning_rates[epoch]))
parameters = model.parameters()
train_log, test_log = np.zeros((3,)), np.zeros((3,))
train_samples_seen, test_samples_seen = 0, 0
dataset.enter_train_mode()
model.train()
# while True:
pbar = tqdm.tqdm(unit=' samples')
while True:
try:
batch = dataset.get_batch()
except StopIteration:
break
train_samples_seen += len(batch[0])
pbar.update(len(batch[0]))
module_loss, decision_loss, accuracy = compute_batch(model, batch)
(module_loss + decision_loss).backward()
torch.nn.utils.clip_grad_norm_(parameters, 40., norm_type=2)
for opt in optimizers:
opt.step()
model.zero_grad()
train_log += np.array([module_loss.tolist(), decision_loss.tolist(), accuracy])
pbar.close()
dataset.enter_test_mode()
model.eval()
model.start_logging_selections()
while True:
try:
batch = dataset.get_batch()
except StopIteration:
break
test_samples_seen += len(batch[0])
module_loss, decision_loss, accuracy = compute_batch(model, batch)
test_log += np.array([module_loss.tolist(), decision_loss.tolist(), accuracy])
print('Epoch {} finished after {} train and {} test samples..\n'
' Training averages: Model loss: {}, Routing loss: {}, Accuracy: {}\n'
' Testing averages: Model loss: {}, Routing loss: {}, Accuracy: {}'.format(
epoch + 1, train_samples_seen, test_samples_seen,
*(train_log/train_samples_seen).round(3), *(test_log/test_samples_seen).round(3)))
model.stop_logging_selections_and_report()
if __name__ == '__main__':
# MNIST
# dataset = MNIST_MTL(64, data_files=['./Datasets/mnist.pkl.gz'])
# model = PerTask_all_fc(1, 288, 2, dataset.num_tasks, dataset.num_tasks)
# model = WPL_routed_all_fc(1, 288, 2, dataset.num_tasks, dataset.num_tasks)
cuda = False
# cuda = True
# CIFAR
dataset = CIFAR100MTL(10, data_files=['./Datasets/cifar-100-py/train', './Datasets/cifar-100-py/test'], cuda=cuda)
model = RoutedAllFC(WPL, 3, 128, 5, dataset.num_tasks, dataset.num_tasks)
# model = RoutedAllFC(RELAX, 3, 128, 5, dataset.num_tasks, dataset.num_tasks)
# model = RoutedAllFC(EGreedyREINFORCE, 3, 128, 5, dataset.num_tasks, dataset.num_tasks)
# model = RoutedAllFC(AdvantageLearning, 3, 128, 5, dataset.num_tasks, dataset.num_tasks)
# model = PerDecisionSingleAgent(AdvantageLearning, 3, 128, 5, dataset.num_tasks, dataset.num_tasks)
# model = Dispatched(AdvantageLearning, 3, 128, 5, dataset.num_tasks, dataset.num_tasks)
learning_rates = {0: 3e-3, 5: 1e-3, 10: 3e-4}
routing_module_learning_rate_ratio = 0.3
if cuda:
model.cuda()
run_experiment(model, dataset, learning_rates, routing_module_learning_rate_ratio)
'''
WPL_routed_all_fc(3, 512, 5, dataset.num_tasks, dataset.num_tasks)
Training averages: Model loss: 0.427, Routing loss: 8.864, Accuracy: 0.711
Testing averages: Model loss: 0.459, Routing loss: 9.446, Accuracy: 0.674
'''
| 46.185841
| 119
| 0.650699
| 628
| 5,219
| 5.19586
| 0.286624
| 0.055164
| 0.082746
| 0.06068
| 0.415569
| 0.378486
| 0.30616
| 0.267545
| 0.267545
| 0.267545
| 0
| 0.030038
| 0.234528
| 5,219
| 112
| 120
| 46.598214
| 0.786733
| 0.147729
| 0
| 0.158537
| 0
| 0
| 0.078151
| 0.013581
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036585
| false
| 0
| 0.073171
| 0
| 0.134146
| 0.02439
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
434b79d3907786ef5e26df8cc123133e8b35acdc
| 6,670
|
py
|
Python
|
code/image-manipulation.py
|
rgeirhos/object-recognition
|
4679f7c60665bd9fb274c6c4372fc0fa34b51485
|
[
"CC-BY-4.0"
] | 33
|
2017-06-22T21:51:25.000Z
|
2021-09-03T01:59:58.000Z
|
code/image-manipulation.py
|
rgeirhos/object-recognition
|
4679f7c60665bd9fb274c6c4372fc0fa34b51485
|
[
"CC-BY-4.0"
] | null | null | null |
code/image-manipulation.py
|
rgeirhos/object-recognition
|
4679f7c60665bd9fb274c6c4372fc0fa34b51485
|
[
"CC-BY-4.0"
] | 20
|
2017-06-24T01:48:19.000Z
|
2021-05-12T08:41:23.000Z
|
#!/usr/bin/env python
from skimage.color import rgb2gray
from skimage.io import imread, imsave
from scipy.misc import toimage
import numpy as np
import wrapper as wr
###########################################################
# IMAGE IO
###########################################################
def imload_rgb(path):
"""Load and return an RGB image in the range [0, 1]."""
return imread(path) / 255.0
def save_img(image, imgname, use_JPEG=False):
"""Save image as either .jpeg or .png"""
if use_JPEG:
imsave(imgname+".JPEG", image)
else:
toimage(image,
cmin=0.0, cmax=1.0).save(imgname+".png")
###########################################################
# IMAGE MANIPULATION
###########################################################
def adjust_contrast(image, contrast_level):
"""Return the image scaled to a certain contrast level in [0, 1].
parameters:
- image: a numpy.ndarray
- contrast_level: a scalar in [0, 1]; with 1 -> full contrast
"""
assert(contrast_level >= 0.0), "contrast_level too low."
assert(contrast_level <= 1.0), "contrast_level too high."
return (1-contrast_level)/2.0 + image.dot(contrast_level)
def grayscale_contrast(image, contrast_level):
"""Convert to grayscale. Adjust contrast.
parameters:
- image: a numpy.ndarray
- contrast_level: a scalar in [0, 1]; with 1 -> full contrast
"""
return adjust_contrast(rgb2gray(image), contrast_level)
def uniform_noise(image, width, contrast_level, rng):
"""Convert to grayscale. Adjust contrast. Apply uniform noise.
parameters:
- image: a numpy.ndarray
- width: a scalar indicating width of additive uniform noise
-> then noise will be in range [-width, width]
- contrast_level: a scalar in [0, 1]; with 1 -> full contrast
- rng: a np.random.RandomState(seed=XYZ) to make it reproducible
"""
image = grayscale_contrast(image, contrast_level)
return apply_uniform_noise(image, -width, width, rng)
###########################################################
# HELPER FUNCTIONS
###########################################################
def apply_uniform_noise(image, low, high, rng=None):
"""Apply uniform noise to an image, clip outside values to 0 and 1.
parameters:
- image: a numpy.ndarray
- low: lower bound of noise within [low, high)
- high: upper bound of noise within [low, high)
- rng: a np.random.RandomState(seed=XYZ) to make it reproducible
"""
nrow = image.shape[0]
ncol = image.shape[1]
image = image + get_uniform_noise(low, high, nrow, ncol, rng)
#clip values
image = np.where(image < 0, 0, image)
image = np.where(image > 1, 1, image)
assert is_in_bounds(image, 0, 1), "values <0 or >1 occurred"
return image
def get_uniform_noise(low, high, nrow, ncol, rng=None):
"""Return uniform noise within [low, high) of size (nrow, ncol).
parameters:
- low: lower bound of noise within [low, high)
- high: upper bound of noise within [low, high)
- nrow: number of rows of desired noise
- ncol: number of columns of desired noise
- rng: a np.random.RandomState(seed=XYZ) to make it reproducible
"""
if rng is None:
return np.random.uniform(low=low, high=high,
size=(nrow, ncol))
else:
return rng.uniform(low=low, high=high,
size=(nrow, ncol))
def is_in_bounds(mat, low, high):
"""Return wether all values in 'mat' fall between low and high.
parameters:
- mat: a numpy.ndarray
- low: lower bound (inclusive)
- high: upper bound (inclusive)
"""
return np.all(np.logical_and(mat >= 0, mat <= 1))
def eidolon_partially_coherent_disarray(image, reach, coherence, grain):
"""Return parametrically distorted images (produced by Eidolon factory.
For more information on the effect of different distortions, please
have a look at the paper: Koenderink et al., JoV 2017,
Eidolons: Novel stimuli for vision research).
- image: a numpy.ndarray
- reach: float, controlling the strength of the manipulation
- coherence: a float within [0, 1] with 1 = full coherence
- grain: float, controlling how fine-grained the distortion is
"""
return wr.partially_coherent_disarray(wr.data_to_pic(image),
reach, coherence, grain)
###########################################################
# MAIN METHOD FOR TESTING & DEMONSTRATION PURPOSES
###########################################################
if __name__ == "__main__":
print("""This main method should generate manipulated
images in the directory where it was executed.""")
use_JPEG = False # either JPEG or PNG
img = imload_rgb("test_image.JPEG")
###################################################
# A) Example for color-experiment:
# - convert to grayscale
###################################################
img_grayscale = rgb2gray(img)
save_img(img_grayscale, "test_image_grayscale", use_JPEG)
###################################################
# B) Example for contrast-experiment:
# - convert to grayscale and
# - reduce contrast to nominal contrast of 10%
###################################################
contrast_level_1 = 0.1
img_low_contrast = grayscale_contrast(image=img,
contrast_level=contrast_level_1)
save_img(img_low_contrast, "test_image_low_contrast", use_JPEG)
###################################################
# C) Example for noise-experiment:
# - convert to graycale and
# - reduce contrast to 30% and
# - apply uniform noise with width 0.1
###################################################
noise_width = 0.1
contrast_level_2 = 0.3
rng = np.random.RandomState(seed=42)
img_noisy = uniform_noise(image=img, width=noise_width,
contrast_level=contrast_level_2,
rng=rng)
save_img(img_noisy, "test_image_noisy", use_JPEG)
###################################################
# D) Example for eidolon-experiment:
# - use partially_coherent_disarray
###################################################
grain = 10.0
coherence = 1.0
reach = 8.0
img_eidolon = eidolon_partially_coherent_disarray(img, reach,
coherence, grain)
save_img(img_eidolon, "test_image_eidolon", use_JPEG)
| 31.913876
| 75
| 0.556822
| 777
| 6,670
| 4.651223
| 0.24453
| 0.07554
| 0.021583
| 0.024903
| 0.247371
| 0.180133
| 0.164638
| 0.164638
| 0.128113
| 0.128113
| 0
| 0.01496
| 0.228336
| 6,670
| 208
| 76
| 32.067308
| 0.687196
| 0.367916
| 0
| 0.060606
| 0
| 0
| 0.090938
| 0.007417
| 0
| 0
| 0
| 0
| 0.045455
| 1
| 0.136364
| false
| 0
| 0.075758
| 0
| 0.348485
| 0.015152
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
434cb653784b20b7295c5b100050122451d7d139
| 4,855
|
py
|
Python
|
emmet-core/emmet/core/vasp/calc_types.py
|
espottesmith/emmet
|
bd28b91d240da9f0c996a2b2efb7e67da9176a09
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
emmet-core/emmet/core/vasp/calc_types.py
|
espottesmith/emmet
|
bd28b91d240da9f0c996a2b2efb7e67da9176a09
|
[
"BSD-3-Clause-LBNL"
] | 78
|
2020-11-16T06:46:43.000Z
|
2022-03-28T03:02:51.000Z
|
emmet-core/emmet/core/vasp/calc_types.py
|
utf/emmet
|
27a51a7ad4c300e280de5ba9b59a311dd77cffdd
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
""" Module to define various calculation types as Enums for VASP """
import datetime
from itertools import groupby, product
from pathlib import Path
from typing import Dict, Iterator, List
import bson
import numpy as np
from monty.json import MSONable
from monty.serialization import loadfn
from pydantic import BaseModel
from pymatgen.analysis.structure_matcher import ElementComparator, StructureMatcher
from pymatgen.core.structure import Structure
from typing_extensions import Literal
from emmet.core import SETTINGS
from emmet.core.utils import ValueEnum
_RUN_TYPE_DATA = loadfn(str(Path(__file__).parent.joinpath("run_types.yaml").resolve()))
_TASK_TYPES = [
"NSCF Line",
"NSCF Uniform",
"Dielectric",
"DFPT",
"DFPT Dielectric",
"NMR Nuclear Shielding",
"NMR Electric Field Gradient",
"Static",
"Structure Optimization",
"Deformation",
]
_RUN_TYPES = (
[
rt
for functional_class in _RUN_TYPE_DATA
for rt in _RUN_TYPE_DATA[functional_class]
]
+ [
f"{rt}+U"
for functional_class in _RUN_TYPE_DATA
for rt in _RUN_TYPE_DATA[functional_class]
]
+ ["LDA", "LDA+U"]
)
RunType = ValueEnum( # type: ignore
"RunType", dict({"_".join(rt.split()).replace("+", "_"): rt for rt in _RUN_TYPES})
)
RunType.__doc__ = "VASP calculation run types"
TaskType = ValueEnum("TaskType", {"_".join(tt.split()): tt for tt in _TASK_TYPES}) # type: ignore
TaskType.__doc__ = "VASP calculation task types"
CalcType = ValueEnum( # type: ignore
"CalcType",
{
f"{'_'.join(rt.split()).replace('+','_')}_{'_'.join(tt.split())}": f"{rt} {tt}"
for rt, tt in product(_RUN_TYPES, _TASK_TYPES)
},
)
CalcType.__doc__ = "VASP calculation types"
def run_type(parameters: Dict) -> RunType:
"""
Determines the run_type from the VASP parameters dict
This is adapted from pymatgen to be far less unstable
Args:
parameters: Dictionary of VASP parameters from Vasprun.xml
"""
if parameters.get("LDAU", False):
is_hubbard = "+U"
else:
is_hubbard = ""
def _variant_equal(v1, v2) -> bool:
"""
helper function to deal with strings
"""
if isinstance(v1, str) and isinstance(v2, str):
return v1.strip().upper() == v2.strip().upper()
else:
return v1 == v2
# This is to force an order of evaluation
for functional_class in ["HF", "VDW", "METAGGA", "GGA"]:
for special_type, params in _RUN_TYPE_DATA[functional_class].items():
if all(
[
_variant_equal(parameters.get(param, None), value)
for param, value in params.items()
]
):
return RunType(f"{special_type}{is_hubbard}")
return RunType(f"LDA{is_hubbard}")
def task_type(
inputs: Dict[Literal["incar", "poscar", "kpoints", "potcar"], Dict]
) -> TaskType:
"""
Determines the task type
Args:
inputs: inputs dict with an incar, kpoints, potcar, and poscar dictionaries
"""
calc_type = []
incar = inputs.get("incar", {})
if incar.get("ICHARG", 0) > 10:
try:
kpts = inputs.get("kpoints") or {}
kpt_labels = kpts.get("labels") or []
num_kpt_labels = len(list(filter(None.__ne__, kpt_labels)))
except Exception as e:
raise Exception(
"Couldn't identify total number of kpt labels: {}".format(e)
)
if num_kpt_labels > 0:
calc_type.append("NSCF Line")
else:
calc_type.append("NSCF Uniform")
elif incar.get("LEPSILON", False):
if incar.get("IBRION", 0) > 6:
calc_type.append("DFPT")
calc_type.append("Dielectric")
elif incar.get("IBRION", 0) > 6:
calc_type.append("DFPT")
elif incar.get("LCHIMAG", False):
calc_type.append("NMR Nuclear Shielding")
elif incar.get("LEFG", False):
calc_type.append("NMR Electric Field Gradient")
elif incar.get("NSW", 1) == 0:
calc_type.append("Static")
elif incar.get("ISIF", 2) == 3 and incar.get("IBRION", 0) > 0:
calc_type.append("Structure Optimization")
elif incar.get("ISIF", 3) == 2 and incar.get("IBRION", 0) > 0:
calc_type.append("Deformation")
return TaskType(" ".join(calc_type))
def calc_type(
inputs: Dict[Literal["incar", "poscar", "kpoints", "potcar"], Dict],
parameters: Dict,
) -> CalcType:
"""
Determines the calc type
Args:
inputs: inputs dict with an incar, kpoints, potcar, and poscar dictionaries
parameters: Dictionary of VASP parameters from Vasprun.xml
"""
rt = run_type(parameters).value
tt = task_type(inputs).value
return CalcType(f"{rt} {tt}")
| 28.391813
| 98
| 0.618332
| 600
| 4,855
| 4.836667
| 0.296667
| 0.038594
| 0.048243
| 0.022398
| 0.230875
| 0.215713
| 0.206065
| 0.206065
| 0.171606
| 0.091661
| 0
| 0.007198
| 0.256025
| 4,855
| 170
| 99
| 28.558824
| 0.796235
| 0.132441
| 0
| 0.077586
| 0
| 0.008621
| 0.173086
| 0.021453
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0.12069
| 0
| 0.206897
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
434df89c1b80cf68699882387250cf1a06bd4617
| 4,165
|
py
|
Python
|
models/train_classifier.py
|
YiWang-Evonne/disaster_response
|
824f646920ac85a01419101e17e92f592a505782
|
[
"MIT"
] | null | null | null |
models/train_classifier.py
|
YiWang-Evonne/disaster_response
|
824f646920ac85a01419101e17e92f592a505782
|
[
"MIT"
] | null | null | null |
models/train_classifier.py
|
YiWang-Evonne/disaster_response
|
824f646920ac85a01419101e17e92f592a505782
|
[
"MIT"
] | null | null | null |
import sys
import pandas as pd
from sqlalchemy import create_engine
import nltk
nltk.download(['punkt', 'wordnet', 'averaged_perceptron_tagger'])
import re
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
import pickle
from sklearn.model_selection import GridSearchCV
def load_data(database_filepath):
"""
load data from sql db
:param database_filepath: sql db path
:return: pandas dataframe
"""
engine = create_engine("sqlite:///"+database_filepath)
df = pd.read_sql_table('modeling_data', engine)
yvar = [item for item in list(df) if item not in ['message', 'original', 'genre', 'id']]
X = df['message']
Y = df[yvar]
return X.values, Y.values, list(Y)
def tokenize(text):
"""
processing the text input
:param text: text inputs
:return:
"""
url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
detected_urls = re.findall(url_regex, text)
for url in detected_urls:
text = text.replace(url, "urlplaceholder")
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
def build_model():
"""
build model pipeline
:return: model pipeline
"""
model_pipeline = Pipeline([
('features', Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer())
])),
('clf', RandomForestClassifier())
])
return model_pipeline
def model_gridsearch(model, parameters):
cv = GridSearchCV(model, param_grid=parameters, verbose=3)
return cv
def evaluate_model(model, X_test, Y_test, category_names):
"""
evaluate model performances
:param model: model obj
:param X_test: test x
:param Y_test: test y
:param category_names: y names
:return:
"""
y_pred = model.predict(X_test)
print(classification_report(Y_test, y_pred, target_names=category_names))
def save_model(model, model_filepath):
"""
save model to local path
:param model: model obj
:param model_filepath: saving path
:return:
"""
with open(model_filepath, 'wb') as f:
pickle.dump(model, f)
def main():
"""
CLI to fit the model
:return:
"""
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, Y, category_names = load_data(database_filepath)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
print('Building model...')
model = build_model()
print('Training model...')
# model.fit(X_train, Y_train)
parameters = {
'clf__n_estimators': [100, 400, 800],
# 'clf__criterion':["gini", "entropy"]
}
cv = model_gridsearch(model, parameters)
best_model_pipeline = cv.best_estimator_
print('Evaluating model...')
evaluate_model(best_model_pipeline, X_test, Y_test, category_names)
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(best_model_pipeline, model_filepath)
print('Trained model saved!')
else:
print('Please provide the filepath of the disaster messages database '\
'as the first argument and the filepath of the pickle file to '\
'save the model to as the second argument. \n\nExample: python '\
'train_classifier.py ../data/DisasterResponse.db classifier.pkl')
if __name__ == '__main__':
main()
| 30.181159
| 96
| 0.657143
| 511
| 4,165
| 5.162427
| 0.34638
| 0.033359
| 0.006823
| 0.018196
| 0.058378
| 0.017437
| 0
| 0
| 0
| 0
| 0
| 0.006213
| 0.227131
| 4,165
| 138
| 97
| 30.181159
| 0.813296
| 0.12605
| 0
| 0
| 0
| 0.0125
| 0.175208
| 0.037504
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0875
| false
| 0
| 0.2
| 0
| 0.3375
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
434e8c387b837394ff0f03da5e59c67d77ad7f7c
| 7,456
|
py
|
Python
|
experimental/attentive_uncertainty/toy_regression/datasets.py
|
miksu/edward2
|
973acdb23701f320ebaee8a56fc44d4414acfa4e
|
[
"Apache-2.0"
] | null | null | null |
experimental/attentive_uncertainty/toy_regression/datasets.py
|
miksu/edward2
|
973acdb23701f320ebaee8a56fc44d4414acfa4e
|
[
"Apache-2.0"
] | null | null | null |
experimental/attentive_uncertainty/toy_regression/datasets.py
|
miksu/edward2
|
973acdb23701f320ebaee8a56fc44d4414acfa4e
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2019 The Edward2 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parses real and synthetic datasets.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import google_type_annotations
from __future__ import print_function
import collections
import tensorflow as tf
NPRegressionDescription = collections.namedtuple(
"NPRegressionDescription",
("context_x", "context_y", "target_x", "target_y"))
class GPCurvesReader(object):
"""Generates curves using a Gaussian Process (GP).
Supports vector inputs (x) and vector outputs (y). Kernel is
mean-squared exponential, using the x-value l2 coordinate distance scaled by
some factor chosen randomly in a range. Outputs are independent gaussian
processes.
"""
def __init__(self,
batch_size,
max_num_context,
x_size=1,
y_size=1,
l1_scale=0.6,
sigma_scale=1.0,
random_kernel_parameters=False,
testing=False):
"""Creates a regression dataset of functions sampled from a GP.
Args:
batch_size: An integer.
max_num_context: The max number of observations in the context.
x_size: Integer >= 1 for length of "x values" vector.
y_size: Integer >= 1 for length of "y values" vector.
l1_scale: Float; typical scale for kernel distance function.
sigma_scale: Float; typical scale for variance.
random_kernel_parameters: If `True`, the kernel parameters (l1 and sigma)
are sampled uniformly within [0.1, l1_scale] and [0.1, sigma_scale].
testing: Boolean that indicates whether we are testing. If so there are
more targets for visualization.
"""
self._batch_size = batch_size
self._max_num_context = max_num_context
self._x_size = x_size
self._y_size = y_size
self._l1_scale = l1_scale
self._sigma_scale = sigma_scale
self._random_kernel_parameters = random_kernel_parameters
self._testing = testing
def _gaussian_kernel(self, xdata, l1, sigma_f, sigma_noise=2e-2):
"""Applies the Gaussian kernel to generate curve data.
Args:
xdata: Tensor of shape [B, num_total_points, x_size] with
the values of the x-axis data.
l1: Tensor of shape [B, y_size, x_size], the scale
parameter of the Gaussian kernel.
sigma_f: Tensor of shape [B, y_size], the magnitude
of the std.
sigma_noise: Float, std of the noise that we add for stability.
Returns:
The kernel, a float tensor of shape
[B, y_size, num_total_points, num_total_points].
"""
num_total_points = tf.shape(xdata)[1]
# Expand and take the difference
xdata1 = tf.expand_dims(xdata, axis=1) # [B, 1, num_total_points, x_size]
xdata2 = tf.expand_dims(xdata, axis=2) # [B, num_total_points, 1, x_size]
diff = xdata1 - xdata2 # [B, num_total_points, num_total_points, x_size]
# [B, y_size, num_total_points, num_total_points, x_size]
norm = tf.square(diff[:, None, :, :, :] / l1[:, :, None, None, :])
norm = tf.reduce_sum(
norm, -1) # [B, data_size, num_total_points, num_total_points]
# [B, y_size, num_total_points, num_total_points]
kernel = tf.square(sigma_f)[:, :, None, None] * tf.exp(-0.5 * norm)
# Add some noise to the diagonal to make the cholesky work.
kernel += (sigma_noise**2) * tf.eye(num_total_points)
return kernel
def generate_curves(self, num_context=None):
"""Builds the op delivering the data.
Generated functions are `float32` with x values between -2 and 2.
Args:
num_context: Number of context points. If None, chosen randomly.
Returns:
A `CNPRegressionDescription` namedtuple.
"""
if num_context is None:
num_context = tf.random_uniform(
shape=[], minval=3, maxval=self._max_num_context, dtype=tf.int32)
# If we are testing we want to have more targets and have them evenly
# distributed in order to plot the function.
if self._testing:
num_target = 400
num_total_points = num_target
x_values = tf.tile(
tf.expand_dims(tf.range(-2., 2., 1. / 100, dtype=tf.float32), axis=0),
[self._batch_size, 1])
x_values = tf.expand_dims(x_values, axis=-1)
# During training the number of target points and their x-positions are
# selected at random
else:
num_target = tf.random_uniform(shape=(), minval=0,
maxval=self._max_num_context - num_context,
dtype=tf.int32)
num_total_points = num_context + num_target
x_values = tf.random_uniform(
[self._batch_size, num_total_points, self._x_size], -2, 2)
# Set kernel parameters
# Either choose a set of random parameters for the mini-batch
if self._random_kernel_parameters:
l1 = tf.random_uniform([self._batch_size, self._y_size,
self._x_size], 0.1, self._l1_scale)
sigma_f = tf.random_uniform([self._batch_size, self._y_size],
0.1, self._sigma_scale)
# Or use the same fixed parameters for all mini-batches
else:
l1 = tf.ones(shape=[self._batch_size, self._y_size,
self._x_size]) * self._l1_scale
sigma_f = tf.ones(shape=[self._batch_size,
self._y_size]) * self._sigma_scale
# Pass the x_values through the Gaussian kernel
# [batch_size, y_size, num_total_points, num_total_points]
kernel = self._gaussian_kernel(x_values, l1, sigma_f)
# Calculate Cholesky, using double precision for better stability:
cholesky = tf.cast(tf.cholesky(tf.cast(kernel, tf.float64)), tf.float32)
# Sample a curve
# [batch_size, y_size, num_total_points, 1]
y_values = tf.matmul(
cholesky,
tf.random_normal([self._batch_size, self._y_size, num_total_points, 1]))
# [batch_size, num_total_points, y_size]
y_values = tf.transpose(tf.squeeze(y_values, 3), [0, 2, 1])
if self._testing:
# Select the targets
target_x = x_values
target_y = y_values
# Select the observations
idx = tf.random_shuffle(tf.range(num_target))
context_x = tf.gather(x_values, idx[:num_context], axis=1)
context_y = tf.gather(y_values, idx[:num_context], axis=1)
else:
# Select the targets which will consist of the context points as well as
# some new target points
target_x = x_values[:, :num_target + num_context, :]
target_y = y_values[:, :num_target + num_context, :]
# Select the observations
context_x = x_values[:, :num_context, :]
context_y = y_values[:, :num_context, :]
return NPRegressionDescription(
context_x=context_x,
context_y=context_y,
target_x=target_x,
target_y=target_y)
| 37.467337
| 80
| 0.666309
| 1,062
| 7,456
| 4.428437
| 0.245763
| 0.039124
| 0.068467
| 0.034446
| 0.233893
| 0.143525
| 0.088454
| 0.073783
| 0.065703
| 0.015735
| 0
| 0.016679
| 0.244099
| 7,456
| 198
| 81
| 37.656566
| 0.817779
| 0.450644
| 0
| 0.056818
| 0
| 0
| 0.01463
| 0.005903
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034091
| false
| 0
| 0.068182
| 0
| 0.136364
| 0.011364
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
434ee97c218201d658ac3ee9f3df8bd8d8383c79
| 1,287
|
py
|
Python
|
critiquebrainz/frontend/views/index.py
|
shagun6/critiquebrainz
|
b7ae41fb09ff4dd4e34847b294fbee4ccc76bad5
|
[
"Apache-2.0"
] | null | null | null |
critiquebrainz/frontend/views/index.py
|
shagun6/critiquebrainz
|
b7ae41fb09ff4dd4e34847b294fbee4ccc76bad5
|
[
"Apache-2.0"
] | null | null | null |
critiquebrainz/frontend/views/index.py
|
shagun6/critiquebrainz
|
b7ae41fb09ff4dd4e34847b294fbee4ccc76bad5
|
[
"Apache-2.0"
] | 1
|
2019-10-20T05:48:53.000Z
|
2019-10-20T05:48:53.000Z
|
from flask import Blueprint, render_template
from flask_babel import format_number
import critiquebrainz.db.users as db_users
import critiquebrainz.db.review as db_review
from bs4 import BeautifulSoup
from markdown import markdown
DEFAULT_CACHE_EXPIRATION = 10 * 60 # seconds
frontend_bp = Blueprint('frontend', __name__)
@frontend_bp.route('/')
def index():
# Popular reviews
popular_reviews = db_review.get_popular(6)
for review in popular_reviews:
# Preparing text for preview
preview = markdown(review['text'], safe_mode="escape")
review['preview'] = ''.join(BeautifulSoup(preview, "html.parser").findAll(text=True))
# Recent reviews
recent_reviews, _ = db_review.list_reviews(sort='created', limit=9)
# Statistics
review_count = format_number(db_review.get_count(is_draft=False))
user_count = format_number(db_users.total_count())
return render_template('index/index.html', popular_reviews=popular_reviews, recent_reviews=recent_reviews,
reviews_total=review_count, users_total=user_count)
@frontend_bp.route('/about')
def about():
return render_template('index/about.html')
@frontend_bp.route('/guidelines')
def guidelines():
return render_template('index/guidelines.html')
| 31.390244
| 110
| 0.740482
| 163
| 1,287
| 5.576687
| 0.380368
| 0.044004
| 0.049505
| 0.082508
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006446
| 0.156177
| 1,287
| 40
| 111
| 32.175
| 0.830571
| 0.059052
| 0
| 0
| 0
| 0
| 0.094606
| 0.017427
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12
| false
| 0
| 0.24
| 0.08
| 0.48
| 0.08
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
434fad48264cdf3b340402e86c40cd6b6db05bc8
| 2,406
|
py
|
Python
|
Enigma/Enigma-master/GBS/gbsHelper.py
|
Q-Alpha/Hackathon2020
|
c0ed45b4c1cc4f475f83786e641b859dad94f863
|
[
"MIT"
] | 12
|
2020-07-23T17:11:22.000Z
|
2022-02-03T12:44:56.000Z
|
Enigma/Enigma-master/GBS/gbsHelper.py
|
Q-Alpha/Hackathon2020
|
c0ed45b4c1cc4f475f83786e641b859dad94f863
|
[
"MIT"
] | 1
|
2020-07-28T13:35:51.000Z
|
2020-07-28T13:35:51.000Z
|
Enigma/Enigma-master/GBS/gbsHelper.py
|
Q-Alpha/Hackathon2020
|
c0ed45b4c1cc4f475f83786e641b859dad94f863
|
[
"MIT"
] | 25
|
2020-07-22T14:32:17.000Z
|
2021-09-08T11:43:55.000Z
|
import strawberryfields as sf
from strawberryfields import ops
from strawberryfields.utils import random_interferometer
from strawberryfields.apps import data, sample, subgraph, plot
import plotly
import networkx as nx
import numpy as np
class GBS:
def __init__(self, samples =[], min_pho = 16, max_pho = 30, subgraph_size = 8, max_count = 2000):
self.samples = samples
self.min_pho = min_pho
self.max_pho = max_pho
self.subgraph_size = subgraph_size
self.max_count = max_count
def graphDensity(self, samples, min_pho, max_pho, subgraph_size, max_count):
dense = subgraph.search(samples, pl_graph, subgraph_size, min_pho, max_count=max_count)
dense_freq = []
for k in range(subgraph_size, min_pho+1):
dense_freq.append([k,len(dense[k])])
return dense, dense_freq
def graphFreqScore(self, d_freqs, max_freq):
x,y = [], []
for i in range(len(d_freqs)):
for j in range(len(d_freqs[i])):
n,f = d_freqs[i][j][0],d_freqs[i][j][1]
x.append(n*f)
N = len(d_freq[i])
y.append((1/max_freq)*(np.sum(x)/N))
x = []
min_y = np.min(y)
y = [min_y/x for x in y]
return y, y.index(max(y))
def runJob(self, eng):
num_subsystem = 8
prog = sf.Program(num_subsystem, name="remote_job")
U = random_interferometer(4)
with prog.context as q:
# Initial squeezed states
# Allowed values are r=1.0 or r=0.0
ops.S2gate(1.0) | (q[0], q[4])
ops.S2gate(1.0) | (q[1], q[5])
ops.S2gate(1.0) | (q[3], q[7])
# Interferometer on the signal modes (0-3)
ops.Interferometer(U) | (q[0], q[1], q[2], q[3])
ops.BSgate(0.543, 0.123) | (q[2], q[0])
ops.Rgate(0.453) | q[1]
ops.MZgate(0.65, -0.54) | (q[2], q[3])
# *Same* interferometer on the idler modes (4-7)
ops.Interferometer(U) | (q[4], q[5], q[6], q[7])
ops.BSgate(0.543, 0.123) | (q[6], q[4])
ops.Rgate(0.453) | q[5]
ops.MZgate(0.65, -0.54) | (q[6], q[7])
ops.MeasureFock() | q
eng = eng
results =eng.run(prog, shots=10)
# state = results.state
# measurements = results.samples
return results.samples
| 34.869565
| 101
| 0.554032
| 365
| 2,406
| 3.528767
| 0.29863
| 0.02795
| 0.016304
| 0.025621
| 0.13354
| 0.052795
| 0.052795
| 0
| 0
| 0
| 0
| 0.05622
| 0.305071
| 2,406
| 68
| 102
| 35.382353
| 0.714115
| 0.082294
| 0
| 0
| 0
| 0
| 0.004543
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.134615
| 0
| 0.288462
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
434fcaaddceb714a13ca57fae4621f94efbd1d3d
| 10,781
|
py
|
Python
|
happy/HappyNodeJoin.py
|
jenniexie/happy
|
6ba01586e20bb3e4f92e180fd8dce3752519f7c9
|
[
"Apache-2.0"
] | null | null | null |
happy/HappyNodeJoin.py
|
jenniexie/happy
|
6ba01586e20bb3e4f92e180fd8dce3752519f7c9
|
[
"Apache-2.0"
] | null | null | null |
happy/HappyNodeJoin.py
|
jenniexie/happy
|
6ba01586e20bb3e4f92e180fd8dce3752519f7c9
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright (c) 2015-2017 Nest Labs, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##
# @file
# Implements HappyNodeJoin class through which a virtual node join a network.
#
# When a node joins a network, an TAP interface is created in the node and in
# the network. Then TUN is setup on the node.
#
import os
import sys
from happy.ReturnMsg import ReturnMsg
from happy.Utils import *
from happy.utils.IP import IP
from happy.HappyLink import HappyLink
from happy.HappyNetwork import HappyNetwork
from happy.HappyNode import HappyNode
import happy.HappyLinkAdd
import happy.HappyNodeAddress
import happy.HappyNodeRoute
options = {}
options["quiet"] = False
options["node_id"] = None
options["tap"] = False
options["network_id"] = None
options["fix_hw_addr"] = None
options["customized_eui64"] = None
def option():
return options.copy()
class HappyNodeJoin(HappyLink, HappyNode, HappyNetwork):
"""
Assigns a virtual node to a specific network.
happy-node-join [-h --help] [-q --quiet] [-i --id <NODE_NAME>]
[-n --network <NETWORK_NAME>] [-m --mac <HW_ADDR>]
[-c --customizedeui64 <CUST_EUI64>] [-p --tap]
-i --id Required. Node to be added to a network. Find using
happy-node-list or happy-state.
-n --network Required. Network to add the node to. Find using
happy-network-list or happy-state.
-m --mac The MAC hardware address for the node.
-c --customizedeui64 The EUI64 address for the node.
-p --tap Configure the link between the node and the network as an
L2 TAP device with a virtual bridge. Omit this parameter to
default to an L3 TUN configuration for normal IP routing.
Example:
$ happy-node-join ThreadNode HomeThread
Adds the ThreadNode node to the HomeThread network.
$ happy-node-join -i onhub -n HomeWiFi -m 5
Adds the onhub node to the HomeWiFi network with a MAC hardware address of
00:00:00:00:00:05.
$ happy-node-join -i onhub -n HomeWiFi -c 00:00:00:00:00:00:00:05
Adds the onhub node to the HomeWiFi network with an EUI64 address of
00:00:00:00:00:00:00:05.
return:
0 success
1 fail
"""
def __init__(self, opts=options):
HappyNetwork.__init__(self)
HappyNode.__init__(self)
HappyLink.__init__(self)
self.quiet = opts["quiet"]
self.node_id = opts["node_id"]
self.tap = opts["tap"]
self.network_id = opts["network_id"]
self.fix_hw_addr = opts["fix_hw_addr"]
self.customized_eui64 = opts["customized_eui64"]
if not self.fix_hw_addr and opts["customized_eui64"]:
self.fix_hw_addr = self.customized_eui64[6:]
self.customized_eui64 = self.customized_eui64.replace(':', '-')
def __pre_check(self):
# Check if the name of the node is given
if not self.node_id:
emsg = "Missing name of the virtual node that should join a network."
self.logger.error("[localhost] HappyNodeJoin: %s" % (emsg))
self.exit()
# Check if the name of the network is given
if not self.network_id:
emsg = "Missing name of the virtual network that be joined by a virtual node."
self.logger.error("[localhost] HappyNodeJoin: %s" % (emsg))
self.exit()
# Check if node exists
if not self._nodeExists():
emsg = "virtual node %s does not exist." % (self.node_id)
self.logger.error("[%s] HappyNodeJoin: %s" % (self.node_id, emsg))
self.exit()
# Check if network exists
if not self._networkExists():
emsg = "virtual network %s does not exist." % (self.network_id)
self.logger.error("[%s] HappyNodeJoin: %s" % (self.node_id, emsg))
self.exit()
# Check if node already joined that network
if self.network_id in self.getNodeNetworkIds():
emsg = "virtual node %s is already part of %s network." % (self.node_id, self.network_id)
self.logger.error("[%s] HappyNodeJoin: %s" % (self.node_id, emsg))
self.exit()
self.fix_hw_addr = self.fixHwAddr(self.fix_hw_addr)
# Check if HW MAC address is valid
if self.fix_hw_addr is not None and self.fix_hw_addr.count(":") != 5:
emsg = "virtual node %s get invalid MAC HW address %s." % (self.node_id, self.fix_hw_addr)
self.logger.error("[%s] HappyNodeJoin: %s" % (self.node_id, emsg))
self.exit()
def __create_link(self):
options = happy.HappyLinkAdd.option()
options["quiet"] = self.quiet
options["type"] = self.getNetworkType()
options["tap"] = self.tap
link = happy.HappyLinkAdd.HappyLinkAdd(options)
ret = link.run()
self.link_id = ret.Data()
self.readState()
def __post_check_1(self):
# Ensure that the link is saved in the state
if self.link_id not in self.getLinkIds():
emsg = "Link %s does not exist." % (self.link_id)
self.logger.error("[%s] HappyNodeJoin: %s" % (self.node_id, emsg))
self.exit()
def __get_node_interface_info(self):
self.link_type = self.getLinkType(self.link_id)
self.link_network_end = self.getLinkNetworkEnd(self.link_id)
self.link_node_end = self.getLinkNodeEnd(self.link_id)
self.node_interface_name = self.getNodeInterfaceName(self.node_id, self.link_type)
def __connect_to_network(self):
self.moveInterfaceToNamespace(self.link_network_end, self.network_id)
# Attach to bridge
cmd = "brctl addif " + self.uniquePrefix(self.network_id) + " " + self.link_network_end
cmd = self.runAsRoot(cmd)
ret = self.CallAtNetwork(self.network_id, cmd)
def __connect_to_node(self):
if not self.isNodeLocal(self.node_id):
if self.getLinkTap(self.link_id):
self.moveLwipInterfaceToNamespace(self.link_id, self.node_id)
else:
self.moveInterfaceToNamespace(self.link_node_end, self.node_id)
cmd = "ip link set " + self.link_node_end
cmd += " name " + self.node_interface_name
if self.fix_hw_addr is not None:
cmd += " address " + self.fix_hw_addr
cmd = self.runAsRoot(cmd)
ret = self.CallAtNode(self.node_id, cmd)
def __nmconf(self):
if not self.isNodeLocal(self.node_id):
return
if not self.tap:
cmd = "nmcli dev disconnect iface " + self.node_interface_name
cmd = self.runAsRoot(cmd)
ret = self.CallAtHost(cmd)
def __check_node_hw_addr(self):
hw_addr = self.getHwAddress(self.node_interface_name, self.node_id)
hw_addr_int = IP.mac48_string_to_int(hw_addr)
if (hw_addr_int & (1 << 41)):
hw_addr_int = hw_addr_int & ~(1 << 41)
new_hw_addr = IP.mac48_string_to_int(hw_addr_int)
cmd = "ip link set " + self.node_interface_name + " address " + str(new_hw_addr)
cmd = self.runAsRoot(cmd)
r = self.CallAtNode(self.node_id, cmd)
def __post_check_2(self):
return
def __bring_up_interface(self):
self.bringLinkUp(self.link_id, self.node_interface_name, self.node_id, self.network_id)
def __add_new_interface_state(self):
self.setLinkNetworkNodeHw(self.link_id, self.network_id, self.node_id, self.fix_hw_addr)
new_network_interface = {}
self.setNetworkLink(self.network_id, self.link_id, new_network_interface)
new_node_interface = {}
new_node_interface["link"] = self.link_id
new_node_interface["type"] = self.link_type
new_node_interface["ip"] = {}
if self.customized_eui64:
new_node_interface["customized_eui64"] = self.customized_eui64
self.setNodeInterface(self.node_id, self.node_interface_name, new_node_interface)
def __assign_network_addresses(self):
network_prefixes = self.getNetworkPrefixes(self.network_id)
for prefix in network_prefixes:
options = happy.HappyNodeAddress.option()
options["quiet"] = self.quiet
options["node_id"] = self.node_id
options["interface"] = self.node_interface_name
if IP.isIpv6(prefix):
nid = self.getInterfaceId(self.node_interface_name, self.node_id)
else:
nid = self.getNextNetworkIPv4Id(prefix, self.network_id)
options["address"] = self.getNodeAddressOnPrefix(prefix, nid)
options["add"] = True
addrctrl = happy.HappyNodeAddress.HappyNodeAddress(options)
ret = addrctrl.run()
def __load_network_routes(self):
routes = self.getNetworkRoutes(self.network_id)
for route_to in routes.keys():
route_record = self.getNetworkRoute(route_to, self.network_id)
options = happy.HappyNodeRoute.option()
options["quiet"] = self.quiet
options["add"] = True
options["node_id"] = self.node_id
options["to"] = route_to
options["via"] = route_record["via"]
options["prefix"] = route_record["prefix"]
noder = happy.HappyNodeRoute.HappyNodeRoute(options)
ret = noder.run()
def run(self):
with self.getStateLockManager():
self.__pre_check()
self.__create_link()
self.__post_check_1()
self.__get_node_interface_info()
self.__connect_to_network()
self.__connect_to_node()
self.__nmconf()
self.__check_node_hw_addr()
self.__bring_up_interface()
self.__post_check_2()
self.__add_new_interface_state()
self.writeState()
self.__assign_network_addresses()
self.__load_network_routes()
return ReturnMsg(0)
| 35.463816
| 102
| 0.626472
| 1,384
| 10,781
| 4.666185
| 0.195087
| 0.04088
| 0.037163
| 0.022143
| 0.324094
| 0.227005
| 0.177454
| 0.111799
| 0.078043
| 0.065655
| 0
| 0.014068
| 0.274743
| 10,781
| 303
| 103
| 35.580858
| 0.811869
| 0.228457
| 0
| 0.180233
| 0
| 0
| 0.096893
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.093023
| false
| 0
| 0.063953
| 0.011628
| 0.186047
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
434feac939e1b8979a11ce2e5fb237601f1fd855
| 46,866
|
py
|
Python
|
__init__.py
|
SDRAST/Data_Reduction
|
f007d716b5c28c086910a81206cffaf37ff6368c
|
[
"Apache-2.0"
] | null | null | null |
__init__.py
|
SDRAST/Data_Reduction
|
f007d716b5c28c086910a81206cffaf37ff6368c
|
[
"Apache-2.0"
] | null | null | null |
__init__.py
|
SDRAST/Data_Reduction
|
f007d716b5c28c086910a81206cffaf37ff6368c
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Modules to support data reduction in Python.
The main purpose of the base module ``Data_Reduction`` is to provide a
suplerclass with a good set of attributes and methods to cover all common needs.
The base module is also able to read data from a text file as a ``numpy``
structured array. This is done with a class called ``DataGetterMixin`` which
must be invoked after the base class has been initiated.
The module function ``examine_text_data_file()`` reveals the structure of the
file(s) that provide the data..
Examples
========
Here we initiate a base class after mixing in the data getter. The first line o
the file has column names but the first three columns are all under one
name ``UTC`` so we specify column widths to consider the first three columns
to be one column. We use the names from the first line of the file, which
could have been done with an ``open()``, ``readline()``, and ``close()``::
mixIn(Observation, DataGetterMixin)
obs = Observation(dss=28, date="2012/127", project="SolarPatrol")
obs.open_datafile('t12127.10',
delimiter=[17,16,3,11,7,9,8,2,6],
skip_header=1,
names="UTC Epoch Chan Tsys Int Az El Diode Level".split())
Now the data getter is already mixed in to Observation so we don't need to do
it again. In this case we specify the names of the columns, changing ``Int`` to
``Integr``::
obs2 = Observation(dss=28, date="2012/127", project="SolarPatrol")
obs2.open_datafile('t12127.10', skip_header=1,
names="Year DOY UTC Epoch Chan Tsys Integr Az El Diode Level".split())
The class Map inherits from DataGetterMixin, so no explicit mixin required::
obsmap = Map(dss=84, date="2020/163", project="SolarPatrol")
obsmap.initialize('sim-venus.dat', source="Venus")
Let's examine ``obsmap``. We have only one signal column::
In [3]: obsmap.channel.keys()
Out[3]: dict_keys(['xl'])
In [4]: obsmap.channel['xl'].keys()
Out[4]: dict_keys(['freq', 'bw', 'pol', 'ifmode', 'atten', 'power'])
"""
# standard Python modules
import datetime
import glob
import h5py
import logging
import math
import matplotlib.dates as MPLd
import numpy as NP
import os
import re
import readline
import scipy.interpolate
import scipy.fftpack
import Astronomy as A
import Astronomy.DSN_coordinates as coords
import Astronomy.Ephem as AE
import DatesTimes as DT
import local_dirs
import Math.clusters as VQ # vector quantization
import support
# enable raw_input Tab completion
readline.parse_and_bind("tab: complete")
logger = logging.getLogger(__name__) # module logger
class Observation(object):
"""
superclass for a data structure and methods
Attributes
==========
aliases - (dict) data keys to replace those in original data
channel - (dict) signal paths, e.g., different freqs and pols
data - (dict) original data, e.g., read from file or database
DOY - (int) day of year of observation
end - (float) UNIX time at the end
latitude - (float) from obs
logger - (logging.Logger)
longitude - (float) from obs
name - (str) user assigned, defaults to YEAR/DOY
numdata - (int) number of data samples
obs - (AE.DSS) observatory
session - (Session) set of observations, parent to Observation
session_path - (str) directory for session files
start - (float) UNIX time at the beginning
year - (int) year of observation
**Reserved Column Names**
These column names are recognized. They are also the keys for attribute
``data``.
These quantities must be present in some form::
unixtime (float) UNIX time in sec
chan_name (str) channel name
integr (float) integration (exposure) in sec
azel (float,float) azimuth and elevation in decimal deg
power (float) power level if only a single channel
Optional::
diode (float) 0 or power in K (integers OK)
level (float) (unidentified -- in ``tlog`` table)
cryotemp (float) cryostat temp in K
windspeed (float) km/hr
winddir (float) deg
ambtemp (float) deg C
pressure (float) mbar
Columns to be computed::
mpldatenum (float) matplotlib ``datenum``
Alternative for ``power``::
tsys (float) system temperature (calibrated power)
top (float) alternative for ``tsys`` (used in DSN)
vfc_counts (int) VFC counts (rate times ``integr``)
Any column with a name which is not a reserved name is assumed to be
power-like data from the channel with that name, unless that name is in a
list provided to the argument ``ignore`` in the method ``get_data_channels``
of the class ``DataGetterMixin``.
Alternative for ``unixtime``::
year (int) year of observation
doy (int) day of year
utc (str) HH:MM:SS
timestr (str) something like 2020/06/14/14:22:21.00
Alternative for ``chan_name``::
chan (int) index in receiver channel names
Alternative for ``azel``::
radec (float,float) precessed right ascension in decimal hours and
precessed declination in decimal deg
radec1950 (float,float) mean right ascension in decimal hours and
mean declination in decimal deg at epoch
radec2000 (float,float) mean right ascension in decimal hours and
mean declination at epoch in decimal deg
az (float) azimuth in decimal deg
el (float) elevation in decimal deg
ra (float) precessed right ascension in decimal hours
dec (float) precessed declination in decimal deg
ra1950 (float) mean right ascension in decimal hours at epoch
dec1950 (float) mean declination in decimal deg at epoch
ra2000 (float) mean right ascension in decimal hours at epoch
dec2000 (float) mean declination in decimal deg at epoch
Notes
=====
* The ``data`` structure is a dict.
* The value of a ``data`` item is either a numpy array or a object
like ``float``, ``int``, or ``str``.
* The keys have reserved words defined above and will be lowercase.
* Items with other keys may be added, typically by a child class.
* Coordinates shall be in pairs, `e.g. ``azel``, ``radec``. (This way you
never get one without the other.)
"""
reserved = ['unixtime','chan_name','integr','az','el','year','doy','utc',
'timestr','chan','tsys','top','diode','level','cryotemp',
'windspeed','winddir','ambtemp','pressure',
'ra','dec','ra1950','dec1950','ra2000','dec2000']
power_keys = ['tsys', 'top', 'vfc_counts', 'power']
def __init__(self, parent=None, name=None, dss=None,
date=None, project=None):
"""
Create a base Observation object.
This is not meant to be initialized by itself. A subclass generally
determines how data are read in. However, method ``initialize()``
provides a basic data read capability using ``numpy.genfromtxt()``
and creates the object's data structure.
Args:
parent (Session): session to which this observation belongs
name (str): an identifier; default is station ID + "obs"
dss (int): station number
date (str): "YEAR/DOY"
project (str): directory under /usr/local/projects
"""
self.logger = logging.getLogger(logger.name+".Observation")
self.session = parent
# observatory must be specified
if dss:
self.obs = coords.DSS(dss)
self.longitude = self.obs.long*180/math.pi # deg
self.latitude = self.obs.lat*180/math.pi # deg
else:
self.logger.error("__init__: requires observatory location")
raise Exception("Where were the data taken?")
# give the object a name
if name:
self.name = name
else:
self.name = "DSS"+str(dss)+"obs"
self.logger = logging.getLogger(logger.name+".Observation")
# the observation was part of some project
if project:
self.project = project
else:
self.logger.error("__init__: requires a project")
raise Exception("Where are the session's working files?")
# the observation was done on some date
if date:
y,d = date.split('/')
self.year = int(y);
self.DOY = int(d)
projdatapath, self.sessionpath, rawdatapath = \
get_obs_dirs(project, dss, self.year, self.DOY,
datafmt=None)
self.logger.debug("__init__: session path: %s", self.sessionpath)
else:
self.logger.error("__init__: requires a date")
raise Exception("When were the date taken?")
# accomodate subclass arguments
self.aliases = {}
# what I really want to do here is see if this was called by a subclass,
# in which case I do not try to get the channel info until this
# initialization has finished.
#
#if hasattr(self, "get_data_channels"):
# channels = self, get_data_channels()
# self.make_channels(channels)
#else:
# self.logger.info("__init__: initialize() may now be called")
def splitkey(self, longlat):
"""
Checks for presence of coordinates in pairs or singles
@param longlat : "azel", or "radec", or "radecEPOC"
@type longlat : str
"""
longitude = longlat[:2] # 'az' or 'ra'
if len(longlat) > 5: # has epoch
epoch = longlat[-4:]
longitude += epoch
latitude = longlat[2:-4]+epoch
else: # date of observation
latitude = longlat[2:]
epoch = None
return longitude, latitude, epoch
def check_for(self, data, longlat):
"""
Checks for separate coordinates and splits if coord pairs
Args:
data (dict): attribute ``data``
longlat (str): "azel", or "radec", or "radecEPOC"
"""
longitude, latitude, epoch = self.splitkey(longlat)
if longitude in data.dtype.names and \
latitude in data.dtype.names:
self.logger.debug("check_for: data has %s and %s", longitude, latitude)
self.data[longitude] = data[longitude]
self.data[latitude] = data[latitude]
return True
elif longlat in data.dtype.names:
self.logger.debug("check_for: data has %s", longlat)
self.data[longitude],self.data[latitude] = map(None, *data[longlat])
self.logger.debug("check_for: added %s and %s to data",
longitude, latitude)
return True
else:
# coords need to be computed from other coords
return False
def unpack_to_complex(self, rawdata):
"""
Converts a sequence of alternating real/imag samples to complex
@param rawdata : alternating real and imaginary bytes
@type rawdata : numpy array of signed int8
@return: numpy array of complex
"""
datalen = len(rawdata)
real = rawdata[0:datalen:2]
imag = rawdata[1:datalen:2]
data = real + 1j*imag
return data
def sideband_separate(self, data):
"""
Converts a complex spectrum array and returns two reals with USB and LSB
This applies a Hilbert transform to the complex data.
"""
usb = (data.real + scipy.fftpack.hilbert(data).imag)
lsb = (scipy.fftpack.hilbert(data).real + data.imag)
return lsb,usb
class Channel(support.PropertiedClass):
"""
Class for a signal path
"""
def __init__(self, parent, name, freq=None, bw=None, pol=None, IFtype=None,
atten=None):
"""
Notes
=====
The properties can be accessed as if the class were a dict.
Arguments
=========
freq:float or int: center frequency in MHz
bw:float or int: bandwidth in MHz
pol:str: polarization code
"""
support.PropertiedClass.__init__(self)
self.parent = parent
self.logger = logging.getLogger(self.parent.name+".Channel")
self.logger.debug("__init__: created %s", self.logger.name)
self.logger.debug("__init__: parent is %s", self.parent)
self.name = name
self.data['freq'] = freq
self.data['bw'] = bw
self.data['pol'] = pol
self.data['ifmode'] = IFtype
self.data['atten'] = atten
class DataGetterMixin(object):
"""
Class for getting data from a CSV file.
"""
def initialize(self, filename, delimiter=" ", names=True, skip_header=0,
source=None):
"""
Get the data and make a data structure for the observations.
This is not included by default in ``__init__()`` to keep it simple for
subclasses.
Args:
filename (str): name only, required; the path is provided
delimiter (str): what separates the columns
names (bool): the first line has column names
skip_header (int) : number of rows to skip
"""
# get the data
data = self.open_datafile(filename, delimiter=delimiter, names=names,
skip_header=skip_header)
# get the signal columns and names
metadata, signals = self.get_data_channels(data)
# create Channel objects for the signal properties
self.make_channels(signals)
# create the data structure
self.make_data_struct(data, metadata, signals)
# compute the offsets from the source center for each data point
if source:
self.get_offsets(source=source)
else:
self.logger.warning("initialize: no source specified; no offsets")
def open_datafile(self, filename, delimiter=" ", names=True, skip_header=0):
"""
Opens and reads a data file
This is used by ``Malargue`` (one data files) and ``GAVRT`` (one data file
for each signal).
Args:
filename (str): text data file name
delimiter (str): separator between columns (default: whitespace)
names (bool): file row has column names (default: True)
skip_header (int): number of rows to skip at beginning of file
Returns:
ndarray:
"""
data = NP.genfromtxt(self.sessionpath+filename,
delimiter=delimiter,
dtype=None,
names=names,
case_sensitive='lower',
skip_header=skip_header,
encoding=None)
return data
def get_data_channels(self, data, ignore=None):
"""
Gets or sets the names of the signal columns
Column names are separated into metadata and signals. Names in
``ignore`` re ignored. Names in ``aliases`` are replaced.
Args:
data (ndarray): data read from text file
ignore (list of str): columns to ignore; default None
Returns:
(list of str, list of str): metadata, signals
"""
names = data.dtype.names
metadata = []
signals = []
for name in names:
if ignore:
if name in ignore:
pass
if name.casefold() in map(str.casefold, self.aliases):
key = self.aliases[name].lower() # we use only lower case names
else:
key = name.lower()
self.logger.debug("get_data_channels: doing %s for %s", key, name)
if key in map(str.casefold, Observation.reserved):
if key.casefold() in ['top', 'tsys']:
signals.append(key)
else:
metadata.append(key)
else:
signals.append(key)
self.logger.debug("get_data_channels: signals: %s", signals)
self.logger.debug("get_data_channels: metadata: %s", metadata)
return metadata, signals
def make_data_struct(self, data, metadata, signals):
"""
Takes a text table with headers and converts it into a numpy ``ndarray``.
That means that a column can be extracted using `data[label]`.
Args
====
data: (ndarray) the data from the text file
metadata: (list of str) the column names for metadata
signals: (list of str) the column names for power-like data
"""
# get the known columns:
self.data = {}
self.numdata = len(data)
#self.logger.debug("make_data_struct: using aliases: %s", self.aliases)
# get columns that are not metadata; each has power for a channel
for signal in signals:
#self.logger.debug("make_data_struct: for signal: %s", signal)
#if signal in self.aliases.items():
# get the key in 'data' which matches 'value' in 'aliases'
# power = data[next(key for key, value in self.aliases.items()
# if value == signal)][idx]
#else:
# power = data[signal]
#self.channel[signal]['power'] = power
self.channel[signal]['power'] = data[signal]
# get UNIX time
if 'unixtime' in metadata:
if 'unixtime' in data.dtype.names:
self.data['unixtime'] = data['unixtime']
else:
# look up the equivalent of UNIX time in the data table
self.data['unixtime'] = data[next(key
for key, value in self.aliases.items()
if value == 'unixtime')]
# compute other convenient forms of time
self.data['datetime'] = [] # Python datetime.date
self.data['date_num'] = [] # matplotlib.dates date number
for idx in list(range(self.numdata)):
if 'unixtime' in data.dtype.names:
tm = data['unixtime'][idx]
else:
tm = data[next(key for key, value in self.aliases.items()
if value == 'unixtime')][idx]
dt = datetime.datetime.utcfromtimestamp(tm)
self.data['datetime'].append(dt)
self.data['date_num'].append(MPLd.date2num(dt))
self.start = self.data['unixtime'][0]
self.end = self.data['unixtime'][-1]
else:
# figure out how to process the time data columns
pass
# compute alternate coordinates
if self.check_for(data, 'azel'):
# azel exists; compute radec if needed; then radec2000 if needed
if self.check_for(data, 'radec'):
pass
else:
self.radec_from_azel()
if self.check_for(data, 'radec2000'):
# ra2000 and dec2000 already exist
pass
else:
self.radec2000_from_radec()
elif self.check_for(data, 'radec2000'):
# coordinates exist; compute back to azimuth and elevation
if self.check_for(data, 'radec'):
pass
else:
# compute observed RA and dec
self.radec_from_radec2000()
if self.check_for(data, 'azel'):
pass
else:
self.azel_from_radec()
# in here check for 'radec'
else:
self.logger.error("no coordinates found in data")
raise Exception("check INFO logging for columns found")
self.start = self.data['unixtime'].min()
self.end = self.data['unixtime'].max()
def make_channels(self, signals, props=None):
"""
Assign properties to the channels.
The prop keys are "freq", "pol", and "IFtype".
Args:
props (dict of dicts): signal channel properties.
"""
self.channel = {}
for ch in signals:
chindex = signals.index(ch)
if props:
self.channel[ch] = self.Channel(self, ch,
freq =props[ch]['freq'],
bw =props[ch]['bw'],
pol =props[ch]['pol'],
IFtype=props[ch]['IFtype'],
atten =props[ch]['atten'])
else:
self.channel[ch] = self.Channel(self, ch)
class GriddingMixin(object):
"""
Class for all the data and methods associated with a raster scan map
It is expected that the parent class is a subclass of ``Observation`` already
by virtue of it being a superclass of subclass which inherits these methods.
Attrs:
cfg (dict):
data (numpy array): from ``Observation``
logger (logging.Logger): replaces ``Observation`` logger
name (str): replaces ``Observation`` name
session (Session):
source (str):
step (float): map step size
"""
def get_grid_stepsize(self, xy=None):
"""
Determine the stepsize of gridded data
This assumes xdec and dec data increase incrementally by 'stepsize'.
The sequences may repeat in a sawtooth-like series. The number of
'xdec' and 'dec' points is multiple times the gridsize.
Arguments:
xy (tuple or list) - X-array and Y-array (default Map.data)
"""
# get the absolute value of coordinate intervals
if xy:
dxdecs = abs(xy[0][1:] - xy[0][:-1])
ddecs = abs(xy[1][1:] - xy[1][:-1])
else:
dxdecs = abs(self.data['xdec_offset'][1:]-self.data['xdec_offset'][:-1])
ddecs = abs(self.data['dec_offset'][1:] -self.data['dec_offset'][:-1])
# form array of X,Y pairs
coords = NP.array(list(zip(dxdecs,ddecs)))
# expect two clusters (default)
cluster_pos = VQ.find_clusters(coords).round(4) # tenths of mdeg
# return the non-zero intervals
return cluster_pos[0].max(), cluster_pos[1].max()
def regrid(self, width=1.0, height=1.0, step=None, power_key=None):
"""
converts a map from observed coordinates to map coordinates
If ``step`` is not given then the step size will be the average step size
in X and the average step in Y. In this case, the effect is to make a
regular grid if the original positions were not exact, i.e., pointing error.
@param width : map width in deg
@type width : float
@param height : map height in deg
@type height : float
@param step : map step size in X and Y in deg
@type step : (float, float)
@param power_key : dict key of Z-value
@type power_key : str
"""
# what is the power-like quantity?
if power_key:
pass
else:
# take the first that matches
for key in Observation.power_keys:
if key in self.data:
power_key = key
self.logger.info("regrid: using '%s'", power_key)
break
else:
continue
if power_key:
pass
else:
self.logger.error("regrid: no power data key found")
return None
if step == None:
# use the original stepsize
self.xstep, self.ystep = self.get_grid_stepsize()
else:
self.xstep, self.ystep = step
self.data['grid_x'] = NP.arange(
-width/2, width/2+self.xstep/2, self.xstep/2)
self.data['grid_y'] = NP.arange(
-height/2,height/2+self.ystep/2, self.ystep/2)
self.logger.debug("regrid: grid shape is %dx%d", len(self.data['grid_x']),
len(self.data['grid_y']))
self.data['grid_z'] = {}
for chnl in self.channel:
self.logger.debug("regrid: processing %s", chnl)
points = list(zip(self.data['xdec_offset'],self.data['dec_offset']))
self.logger.debug("regrid: %d positions", len(points))
values = self.data[power_key][chnl]
self.logger.debug("regrid: %d values", len(values))
xi, yi = NP.meshgrid(self.data['grid_x'], self.data['grid_y'])
try:
self.data['grid_z'][chnl] = scipy.interpolate.griddata(points, values,
(xi, yi), method='nearest')
except ValueError as details:
self.logger.error("regrid: gridding failed: %s", str(details))
self.logger.debug("regrid: channel %s length of points is %d",
chnl, len(points))
self.logger.debug("regrid: channel %s length of values is %d", chnl,
len(values))
continue
def radec_from_azel(self):
"""
compute RA and dec from az and el
"""
RA = []; decs = []; RAdecs = []
for idx in list(range(self.numdata)):
# setup
dt = self.data['datetime'][idx]
# format time as (YEAR, DOY.fff)
time_tuple = (dt.year,
DT.day_of_year(dt.year,dt.month,dt.day)
+ ( dt.hour
+ dt.minute/60.
+ dt.second/3600.
+ dt.microsecond/3600./1e6)/24.)
azimuth = self.data['az'][idx]
elevation = self.data['el'][idx]
# compute
ra,dec = A.AzEl_to_RaDec(azimuth, elevation,
self.latitude,
-self.longitude,
time_tuple)
RA.append(ra)
decs.append(dec)
RAdecs.append((RA,decs))
self.data['ra'] = RA
self.data['dec'] = decs
self.data['radec'] = RAdecs
def radec2000_from_radec(self):
"""
compute RA2000 and dec2000 from observed RA and dec
"""
RA2000 = []; decs2000 = []; RAdec2000 = []
for idx in list(range(self.numdata)):
# setup
tm = self.data['unixtime'][idx]
mjd = DT.UnixTime_to_MJD(tm)
MJD = int(mjd)
UT = 24*(mjd-MJD)
ra = self.data['ra']
dec = self.data['dec']
# compute
ra2000,dec2000 = A.apparent_to_J2000(MJD,UT,
ra, dec,
self.longitude, self.latitude)
RA2000.append(ra2000)
decs2000.append(dec2000)
RAdec2000.append((ra2000,dec2000))
self.data['ra2000'] = RA2000
self.data['dec2000'] = dec2000
self.data['radec2000'] = RAdec2000
def radec_from_radec2000(self):
"""
compute apparent RA and dec. from J2000 RA and dec
"""
RA = []; decs = []; RAdecs = []
for idx in list(range(self.numdata)):
# setup
tm = self.data['unixtime'][idx]
mjd = DT.UnixTime_to_MJD(tm)
MJD = int(mjd)
UT = 24*(mjd-MJD)
ra2000 = self.data['ra2000'][idx]
dec2000 = self.data['dec2000'][idx]
# compute
ra, dec = A.J2000_to_apparent(MJD, UT,
ra2000*math.pi/12, dec2000*math.pi/180)
RA.append(ra)
decs.append(dec)
RAdecs.append((ra,dec))
self.data['ra'] = RA
self.data['dec'] = decs
self.data['radec'] = RAdecs
def azel_from_radec(self):
"""
compute azimuth and elevation from apparent right ascension and declination
"""
azs = []; els = []; azels = []
for idx in list(range(self.numdata)):
# setup
ra = self.data['ra'][idx]
dec = self.data['dec'][idx]
timetuple = self.data['datetime'][idx].timetuple()
year = timetuple.tm_year
doy = timetuple.tm_yday + (timetuple.tm_hour
+(timetuple.tm_min+timetuple.tm_sec/60)/60)/24
# compute
az, el = A.RaDec_to_AzEl(ra, dec,
self.latitude, self.longitude, (year,doy))
azs.append(az)
els.append(el)
azels.append((az,el))
self.data['az'] = azs
self.data['el'] = els
self.data['azel'] = azels
def get_offsets(self, source="Sun", xdec_ofst=0., dec_ofst=0.):
"""
Generates a map in coordinates relative to a source
If the source is the default, the position of the Sun will be computed for
the time of each sample. IT SEEMS LIKE A GOOD IDEA TO DO THIS FOR PLANETS
ALSO.
This adds elements with keys ``xdec_offset`` and ``dec_offset`` to the
attribute ``data``.
@param source : source at map center
@type source : ephem source instance
@param xdec_ofst : relative X-dec position of sample
@type xdec_ofst : float
@param dec_ofst : relative dec position of sample
@type dec_ofst : float
@return: (dxdecs,ddecs) in degrees
"""
if source.lower() == "sun":
src = AE.ephem.Sun()
else:
src = AE.calibrator(source)
self.data['dec_offset'] = []
self.data['xdec_offset'] = []
for count in range(len(self.data['unixtime'])):
dt = datetime.datetime.utcfromtimestamp(
self.data['unixtime'][count])
if type(src) == AE.Quasar:
pass
else:
src.compute(dt)
ra_center = src.ra*12/math.pi # hours
dec_center = src.dec*180/math.pi # degrees
decrad = src.dec
# right ascension increases to the left, cross-dec to the right
self.data['xdec_offset'].append(xdec_ofst -
(self.data['ra'][count] - ra_center)*15*math.cos(decrad) )
self.data['dec_offset'].append( dec_ofst +
self.data['dec'][count] - dec_center)
# change list to NP.array
self.data['xdec_offset'] = NP.array(self.data['xdec_offset'])
self.data['dec_offset'] = NP.array(self.data['dec_offset'])
class Map(Observation, GriddingMixin):
"""
Map class without special features for GAVRT and Malargue
Most of the methods are mixed in to avoid conflicting with subclasses
"""
def __init__(self, parent=None, name=None, dss=None, date=None, project=None):
"""
Create a Map object
Args:
parent (Session): an observing session to which this belongs
name (str): an identifier, like a scan number
dss (int): station where the data were taken
date (str): date of observation as "YEAR/DOY"
project (str): project for which this observation was made
"""
Observation.__init__(self, parent=parent, name=name, dss=dss, date=date,
project=project)
class Recording(h5py.File):
"""
Class for raw data
This is typically the contents of a data file transcribed into a standard
format. It may be the data of one Observation object, or data for multiple
Observation objects, or contain part of the data for an Observation object.
If the data being curated are not in a standard project, and they are not
in a standard place,
"""
def __init__(self, session=None, path=None, date=None, dss=None, name=None):
"""
Initialize a metadata container and data directory
Args
====
session (Session): required, unless:
path (str) : location of raw data files
date
"""
self.logger = logging.getLogger(logger.name+".Recording")
if session:
self.session = session
if not name:
name = session.project + "-" + str(session.year) + "-" + \
('%03d' % session.doy) + "-dss" + str(session.dss)+".info"
self.year = session.year
self.doy = session.doy
self.dss = session.dss
self.project = session.project
self.session_dir = session.session_dir
elif path and name:
self.session = Session() # for its methods and attributes
self.session_dir = path
self.name = name
else:
raise RuntimeError("either a session or a path and filename required")
h5py.File.__init__(self, name, 'w')
self.attrs['project'] = self.project
self.attrs['dss'] = self.dss
self.attrs['year'] = self.year
self.attrs['doy'] = self.doy
class Session(object):
"""
Base class for an observing session on a given year and DOY
Public Attributes::
doy (int) - day of year for session
logger (logging.Logger) - logging.Logger object
parent (object) - a data reduction session (mult. observ. sessions)
year (int) -
doy (int) -
project (str) -
session_dir (str) - path to results from this session
A session usually refers to a telescope, date and project. This will
normally define a path to the session directory.
"""
def __init__(self, parent=None, date=None, project=None, dss=None,
path=None):
"""
initialize data reduction for one observing session
Args
====
parent: (object) optional class for a data reduction tool
date: (str) required, format YEAR/DOY
project: (str) required
dss (int) required
path (str) optional
If `path` is given for a non-standard observing files location, and it does
not exist, it will be created. Then the Recording and Observation instances
must be directed to where the files are.
"""
self.logger = logging.getLogger(logger.name+".Session")
if parent:
self.session = parent
if date and project and dss:
y,d = date.split('/')
self.year = int(y);
self.doy = int(d)
self.project = project
self.dss = dss
self.name = "'%s %4d/%03d'" % (self.project, self.year, self.doy)
else:
self.logger.error("__init__: missing DSS or year or DOY or project")
raise Exception("Where and when and for what project were the data taken?")
self.find_session_dir(path=path)
def find_session_dir(self, path=None):
"""
find or make the sessions directory
Args:
path (str) - explicit path to files
"""
self.logger.debug("find_session_dir: entered for path=%s", path)
if path:
self.session_dir = path
else:
obs_dir = local_dirs.projects_dir + self.project \
+"/Observations/dss"+str(self.dss)+"/"
self.session_dir = obs_dir+ "%4d" % self.year +"/"+ "%03d" % self.doy +"/"
if not os.path.exists(self.session_dir):
os.makedirs(self.session_dir, mode=0o775)
def select_data_files(self, datapath=None, name_pattern="", auto=True,
load_hdf=False):
"""
Provide the user with menu to select data files.
Finding the right data store is complicated as there are many kinds of data
files
* If datapath is ...RA_data/HDF5/... then the files could be .h5 (Ashish)
or .hdf5 (Dean).
* If datapath is ...RA_data/FITS/... then the extent is .fits.
* If datapath is ...project_data/... then the extent is .pkl
* If datapath is ...projects/... (default) then the extent is probably
.csv or .dat or .prd.
@param datapath : path to top of the tree where the DSS subdirectories are
@type datapath : str
@param name_pattern : pattern for selecting file names, e.g. source
@type name_pattern : str
@param load_hdf : use RA_data/HDF5 directory if True
@type load_hdf : bool
@para auto : take all files found
@type auto : bool
@return: list of str
"""
# Get the data files to be processed
self.logger.debug("select_data_files: looking in %s", datapath)
if name_pattern:
name,extent = os.path.splitext(name_pattern)
if extent.isalpha(): # a proper extent with no wildcards
# take name pattern as is
pass
else:
# only one * at front and back of pattern
name_pattern = "*"+name_pattern.rstrip('*')+"*"
else:
# no pattern specified. All files.
name_pattern = "*"
self.logger.debug("select_data_files: for pattern %s", name_pattern)
if datapath:
if re.search('HDF5', datapath):
load_hdf = True
elif re.search('project_data', datapath):
load_hdf = False
datafiles = support.text.select_files(datapath+name_pattern+"[0-9].pkl")
elif re.search('FITS', datapath):
datafiles = support.text.select_files(datapath+name_pattern+".fits")
if load_hdf:
full = datapath+name_pattern+".h*5"
else:
full = datapath+name_pattern
else:
full = self.session_dir + name_pattern
self.logger.debug("select_data_files: from: %s", full)
if auto:
datafiles = glob.glob(full)
else:
datafiles = support.text.select_files(full)
self.logger.debug("select_data_files: found %s", datafiles)
if datafiles == []:
self.logger.error(
"select_data_files: None found. Is the data directory mounted?")
raise RuntimeError('No data files found.')
if type(datafiles) == str:
datafiles = [datafiles]
self.logger.info("select_data_files: to be processed: %s", datafiles)
return datafiles
class Spectrum(Observation):
"""
Class for spectra
"""
def __init__(self):
"""
needs a spectrum attribute
"""
self.logger = logging.getLogger(logger.name+".Spectrum")
def get_num_chans(self, linefreq, bandwidth, max_vel_width):
"""
compute the base 2 number of output channels for the specified resolution
"""
kmpspMHz = 300000./linefreq
BW_kmps = bandwidth*kmpspMHz
est_num_chan_out = BW_kmps/max_vel_width
self.logger.debug("get_num_chans: estimated num chans out = %d",
est_num_chan_out)
return 2**int(math.ceil(math.log(est_num_chan_out,2)))
def reduce_spectrum_channels(self, refval, refpix, delta,
num_chan=1024, axis=0):
"""
Reduce the number of channels in the spectrum.
The default option is to reduce the spectrum to a specified number of
channels with a default of 1024. The input spectrum is presumed to have
2**N channels so that num_chan/num_chan_in is an integer.
If 'spectrum' is an N-D array, then the spectrum axis is given by 'axis'
which defaults to 0.
'delta' is negative for lower sideband or reversed double sideband spectra.
@param spectrum : spectrum values
@type spectrum : list or nparray
@param refval : X-axis value at the reference pixel of 'spectrum'
@type refval : float
@param refpix : reference pixel for 'spectrum'
@type refpix : int
@param delta : interval between pixels on the X-axis
@type delta : float
@param num_chan : optional number of channels to be returned (default: 2^10)
@type num_chan : int
@return: numpy.array
"""
if math.log(num_chan,2) % 1:
raise RuntimeError("num_chan = %d is not a power of 2", num_chan)
if type(self.spectrum) == NP.ndarray:
num_chans_in = self.spectrum.shape[axis]
else:
num_chans_in = len(self.spectrum)
if math.log(num_chans_in,2) % 1:
raise RuntimeError("input spectrum length = %d is not a power of 2",
num_chans_in)
self.logger.debug("reduce_spectrum_channels: %d channels in", num_chans_in)
num_chan_avg = num_chans_in/num_chan
newrefpix = refpix/num_chan_avg
self.logger.debug("reduce_spectrum_channels: refpix from %d to %d",
refpix, newrefpix)
newdelta = delta*num_chan_avg
self.logger.debug("reduce_spectrum_channels: delta from %.3f to %.3f",
delta, newdelta)
newrefval = refval + delta*(num_chan_avg/2 - 1)
self.logger.debug("reduce_spectrum_channels: refval from %.3f to %.3f",
refval, newrefval)
self.logger.debug("reduce_spectrum_channels: averaging %d channels", num_chan_avg)
specout = NP.array([spectrum[index*num_chan_avg:(index+1)*num_chan_avg].mean()
for index in range(num_chan)])
self.logger.debug("reduce_spectrum_channels: %d channels out", num_chan)
return specout, newrefval, newrefpix, newdelta
def get_freq_array(self, bandwidth, n_chans):
"""
Create an array of frequencies for the channels of a backend
@param bandwidth : bandwidth
@type bandwidth : float
@param n_chans : number of channels
@type n_chans : int
@return: frequency of each channel in same units as bandwidth
"""
return NP.arange(n_chans)*float(bandwidth)/n_chans
def freq_to_chan(frequency,bandwidth,n_chans):
"""
Returns the channel number where a given frequency is to be found.
@param frequency : frequency of channel in sane units as bandwidth.
@type frequency : float
@param bandwidth : upper limit of spectrometer passband
@type bandwidth : float
@param n_chans : number of channels in the spectrometer
@type n_chans : int
@return: channel number (int)
"""
if frequency < 0:
frequency = bandwidth + frequency
if frequency > bandwidth:
raise RuntimeError("that frequency is too high.")
return round(float(frequency)/bandwidth*n_chans) % n_chans
def get_smoothed_bandshape(self, degree = None, poly_order=15):
"""
Do a Gaussian smoothing of the spectrum and then fit a polynomial.
Optionally, the raw and smoothed data and the fitted polynomial can be
plotted.
Note
====
``numpy.polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False)``
Least squares polynomial fit.
Fit a polynomial::
p(x) = p[0] * x**deg + ... + p[deg]
of degree deg to points (x, y).
Returns a vector of coefficients p that minimises the squared error.
@param spectrum : input data
@type spectrum : list of float
@param degree : number of samples to smoothed (Gaussian FWHM)
@type degree : int
@param poly_order : order of the polynomial
@type poly_order : int
@param plot : plotting option
@type plot : boolean
@return: (polynomial_coefficient, smoothed_spectrum)
"""
if degree == None:
degree = len(self.spectrum)/100
# normalize the spectrum so max is 1 and convert to dB.
max_lev = NP.max(self.spectrum)
norm_spec = NP.array(self.spectrum)/float(max_lev)
norm_spec_db = 10*NP.log10(norm_spec)
# do a Gaussian smoothing
norm_spec_db_smoothed = smoothListGaussian(norm_spec_db, degree=degree)
# deal with the edges by making them equal to the smoothed end points
norm_spec_db_smoothed_resized = NP.ones(len(self.spectrum))
# left end
norm_spec_db_smoothed_resized[0:degree] = norm_spec_db_smoothed[0]
# middle
norm_spec_db_smoothed_resized[degree:degree+len(norm_spec_db_smoothed)] = \
norm_spec_db_smoothed
# right end
norm_spec_db_smoothed_resized[degree+len(norm_spec_db_smoothed):] = \
norm_spec_db_smoothed[-1]
return poly, norm_spec_db_smoothed_resized
# ------------------------ module functions -------------------------------
def examine_text_data_file(filename):
"""
Examine a file to guide ``genfromtxt()``
Things to look for::
* Is there a header line with column names? If not, use argument ``names``.
* Is the number of names equal to the number of columns? If not::
- use argument ``names`` and ``skip_header=1``, or
- use argument ``delimiter`` with a list of column widths
and ``skip_header=1``.
"""
print(examine_text_data_file.__doc__)
fd = open(filename, "r")
lines = fd.readlines()
fd.close()
topline = lines[0].strip().split()
print(" 1 2 3 4 5 6 7")
print("01234567890123456789012345678901234567890123456789012345678901234567890123456789")
print(lines[0].strip())
print(lines[1].strip())
print(" ...")
print(lines[-1].strip())
data = NP.genfromtxt(filename, dtype=None, names=None, skip_header=1, encoding=None)
print("%d datatypes:" % len(data.dtype.fields))
for item in data.dtype.fields:
print(item, data.dtype.fields[item])
def get_obs_dirs(project, station, year, DOY, datafmt=None):
"""
Returns the directories where data and working files are kept
@param project : project code string, e.g., RRL
@type project : str
@param station : DSN station number
@type station : int
@param year : year of observation
@type year : int
@param DOY : day of year of observations
@type DOY : int
@param datafmt : raw data format
@type datafmt : str
"""
#logger.debug("get_obs_dirs: type %s for %s, DSS%d, %4d/%03d",
# datafmt, project, station, year, DOY)
obspath = "dss%2d/%4d/%03d/" % (station,year,DOY)
if project:
projdatapath = "/usr/local/project_data/"+project+"/"+obspath
projworkpath = "/usr/local/projects/"+project+"/Observations/"+obspath
else:
projdatapath = ""
projworkpath = ""
if datafmt:
rawdatapath = "/usr/local/RA_data/"+datafmt+"/"+obspath
else:
rawdatapath = ""
return projdatapath, projworkpath, rawdatapath
# --------- old stuff to be discarded still needed for now ---------------
def old_get_obs_session(project=None, dss=None, date=None, path='proj'):
"""
Provides project, station, year and DOY, asking as needed.
It follows one of several possible paths to get to the session::
proj - path through /usr/local/projects/<project>
hdf5 - path through /usr/local/RA_data/HDF5
fits - path through /usr/local/RA_data/FITS
wvsr - path through /data
@param project : optional name as defined in /usr/local/projects
@type project : str
@param dss : optional station number
@type dss : int
@param date : optional YYYY/DDD
@type date : str
@return: project, DSS, year, DOY.
"""
def get_directory(path):
"""
"""
# only one trailing /
path = path.rstrip('/')+"/*"
logger.debug("get_obs_session:get_directory: from %s", path)
names = glob.glob(path)
if names:
dirs = []
for name in names:
if os.path.isdir(name):
dirs.append(os.path.basename(name))
dirs.sort()
for name in dirs:
print((name), end=' ')
return input('\n>')
else:
return []
def from_wvsr_dir():
"""
this needs to be completed and tested on crab14 or an auto host
"""
session = get_directory(local_dirs.wvsr_dir)
return session
cwd = os.getcwd()
# get the project
if project:
pass
else:
os.chdir(local_dirs.projects_dir)
project = get_directory(local_dirs.projects_dir)
logger.debug("from_wvsr_dir: project is %s", project)
projectpath = local_dirs.projects_dir+project
# get the station
if path[:4].lower() == 'wvsr':
# special call
print("from_wvsr_dir()")
if path[:4].lower() == 'proj':
os.chdir(projectpath+"/Observations/")
elif path[:4].lower() == 'hdf5':
os.chdir(local_dirs.hdf5_dir)
elif path[:4].lower() == 'fits':
os.chdir(local_dirs.fits_dir)
# get the station
if dss:
pass
else:
# This seems odd but get_directory() needs '/' and int does not
station = get_directory(os.getcwd()+"/").rstrip('/')
dss = int(station[-2:])
stationpath = os.getcwd()+"/dss"+str(dss)
# get the date
if date:
items = date.split('/')
year = int(items[0])
DOY = int(items[1])
else:
year = int(get_directory(stationpath))
yearpath = stationpath+"/"+str(year)
DOY = int(get_directory(yearpath))
os.chdir(cwd)
return project, dss, year, DOY
| 35.370566
| 91
| 0.615307
| 6,185
| 46,866
| 4.582215
| 0.147615
| 0.021453
| 0.015349
| 0.006986
| 0.167672
| 0.118239
| 0.09181
| 0.077661
| 0.047281
| 0.035073
| 0
| 0.016119
| 0.275914
| 46,866
| 1,324
| 92
| 35.397281
| 0.819036
| 0.431912
| 0
| 0.195688
| 0
| 0
| 0.118994
| 0.011487
| 0
| 0
| 0
| 0
| 0
| 1
| 0.056385
| false
| 0.019901
| 0.031509
| 0
| 0.139303
| 0.018242
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
435242d1a3384ab078fa9b2a0a84286b9581b8f8
| 8,483
|
py
|
Python
|
Context_Guided_RelRep/train.py
|
Huda-Hakami/Context-Guided-Relation-Embeddings
|
520ce89fe7bad3aba2f3eb112329300625bb55f7
|
[
"Apache-2.0"
] | 1
|
2019-10-06T03:54:53.000Z
|
2019-10-06T03:54:53.000Z
|
Context_Guided_RelRep/train.py
|
Huda-Hakami/Context-Guided-Relation-Embeddings
|
520ce89fe7bad3aba2f3eb112329300625bb55f7
|
[
"Apache-2.0"
] | null | null | null |
Context_Guided_RelRep/train.py
|
Huda-Hakami/Context-Guided-Relation-Embeddings
|
520ce89fe7bad3aba2f3eb112329300625bb55f7
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
from wordreps import WordReps
from algebra import cosine, normalize
import tensorflow as tf
import random
from dataset import DataSet
import CGRE_Model
from Eval import eval_SemEval
import sklearn.preprocessing
# ============ End Imports ============
class Training():
def __init__(self):
# Compositional relation embeddings (G1) Hyperparameters
self.batchSize=100
G1_HL=3
G1_Hdim=WR.dim
G1_BN=True #boolean variable T/F for batch normalization on G1 MLP
G1_l2_reg=0.001 # L2 regularization coefficient
self.G1_pkeep=1.0 # 1.0 means no Dropout applied during training on G1
# LSTM pattern encoding (G2) Hyperparameters
G2_HL=1
G2_Hdim=WR.dim
self.G2_pkeep=1.0 # 1.0 means no Dropout applied during training on G2
activ='tanh'
# Create relational model instance
self.RelModel=CGRE_Model.CGRE(activ,self.batchSize)
self.RelModel.G1_model(Ea,G1_BN,G1_HL,G1_Hdim,G1_l2_reg)
self.RelModel.G2_rnn_model(DS.max_length,G2_HL,G2_Hdim)
# --------------------------------------------------
def Train_Model(self):
# Hyperparameters
epochs=500
hist_loss=[]
hist_acc=[]
winn_loss=1e7
win_acc=-1
# Discriminator Hyperparameters (for Rel-Rep-alignment model)
D_HL=0
D_Hdim=WR.dim
D_BN=False # boolean variable T/F for batch normalization on D
self.D_pkeep=1.0 # 1.0 means no Dropout applied during training on the Discriminator D
D_l2_reg=0.001 # L2 regularization coefficient (to perform l2 regularized cross-entropy)
Train = DS.Training_triplesIDs
Train_Relations=set([rel for (a,b,p,w,rel) in Train])
Num_of_Classes=len(Train_Relations)
print ("Number of relation labels for cross-entropy objective=",Num_of_Classes)
# Assign ids to relations
Rel2id={}
i=0
for rel in Train_Relations:
Rel2id[rel]=i
i+=1
Train_dic={}
for (a,b,p,w,rel) in Train:
Train_dic.setdefault((a,b,rel),[])
Train_dic[(a,b,rel)].append((p,w))
Training_patterns=set([p for (_,_,p,_,_) in Train])
print ('Number of training patterns after removing test instances=',len(Training_patterns))
Train_list=list(Train_dic.keys())
print ("Number of training word-pairs (a,b,[(p,w)])",len(Train_list))
self.RelModel.define_loss(D_HL,D_Hdim,D_BN,D_l2_reg,Num_of_Classes)
self.RelModel.optimize()
self.sess=tf.Session()
self.sess.run(tf.global_variables_initializer())
print ("==========================================================================")
for epoch in range(epochs):
# Randomly shuffle training instances for each epoch
random.shuffle(Train_list)
# performance every 20 steps
if epoch%1==0:
Pair_Embeddings=self.Gen_Pair_Embeddings()
acc_1,corr_1=eval_SemEval(Pair_Embeddings,'Test')
acc_2,corr_2=eval_SemEval(Pair_Embeddings,'Valid')
acc_3,corr_3=eval_SemEval(Pair_Embeddings,'All')
print ("Epoch:%d, Acc_Test:%f, Acc_Valid:%f, Acc_All:%f, Corr_Test:%f, Corr_Valid:%f, Corr_All:%f"%(epoch,acc_1,acc_2,acc_3,corr_1,corr_2,corr_3))
hist_acc.append(acc_2)
# For early stopping
if acc_2>win_acc:
win_acc=acc_2
self.Save_Trained_Model()
print ("Parameters and Pair-Embeddings are changed...")
best_epoch=epoch
patient_cnt=0
else:
patient_cnt+=1
if patient_cnt>10:
print ("early stopping ... epoch number %d"%epoch)
print ("Winner acc:%f at epoch:%d"%(win_acc,best_epoch))
# break
# Training
for minibatch in next_batch(self.batchSize,Train_list):
a_ids,b_ids,labels=shred_tuples(minibatch)
Train_Y=np.zeros((len(minibatch),Num_of_Classes))
for i,rel in enumerate(labels):
rel_id=Rel2id[rel]
Train_Y[i,rel_id]=1.0
train_data={self.RelModel.a_ids:a_ids,self.RelModel.b_ids:b_ids,self.RelModel.G1_pkeep:self.G1_pkeep,\
self.RelModel.is_training:True,self.RelModel.D_pkeep:self.D_pkeep}
minibatch_patterns=[Train_dic[(a,b,rel)] for (a,b,rel) in minibatch]
max_num_of_patterns,pattern_seq,early_stop,weights=Pattern_Sequences(a_ids,b_ids,minibatch_patterns)
train_data[self.RelModel.max_num_of_patterns]=max_num_of_patterns
train_data[self.RelModel.patterns_ids]=pattern_seq
train_data[self.RelModel.early_stop]=early_stop
train_data[self.RelModel.weights]=weights
train_data[self.RelModel.G2_pkeep]=self.G2_pkeep
# Loss options
train_data[self.RelModel.Y_]=Train_Y
self.sess.run(self.RelModel.train_step,feed_dict=train_data)
# --------------------------------------------------
def Save_Trained_Model(self):
Pair_Embeddings_dic=self.Gen_Pair_Embeddings()
np.save("res/Pair_Embeddings.npy",Pair_Embeddings_dic)
# --------------------------------------------------
def Gen_Pair_Embeddings(self):
word_pairs_ids=[(DS.word2id[a],DS.word2id[b]) for (a,b) in DS.Test_Pairs]
a_ids=[t[0] for t in word_pairs_ids]
b_ids=[t[1] for t in word_pairs_ids]
dic={self.RelModel.a_ids:a_ids,self.RelModel.b_ids:b_ids,self.RelModel.G1_pkeep:1.0,self.RelModel.is_training:False}
Pair_Embeddings1=self.sess.run(self.RelModel.Last_G1_output,feed_dict=dic)
# Pair_Embeddings1=sklearn.preprocessing.normalize(Pair_Embeddings1,axis=1,norm='l2') #L2 norm of r(a,b)
a_ids=[t[1] for t in word_pairs_ids]
b_ids=[t[0] for t in word_pairs_ids]
dic={self.RelModel.a_ids:a_ids,self.RelModel.b_ids:b_ids,self.RelModel.G1_pkeep:1.0,self.RelModel.is_training:False}
Pair_Embeddings2=self.sess.run(self.RelModel.Last_G1_output,feed_dict=dic)
# Pair_Embeddings2=sklearn.preprocessing.normalize(Pair_Embeddings2,axis=1,norm='l2') #L2 norm of r(b,a)
Pair_Embeddings=np.hstack((Pair_Embeddings1,Pair_Embeddings2))
Pair_Embeddings_dic={}
for i,(a,b) in enumerate(DS.Test_Pairs):
Pair_Embeddings_dic[(a,b)]=Pair_Embeddings[i]
return Pair_Embeddings_dic
# ============ End of the Evaluation class ============
def next_batch(batchSize,data):
# loop over our dataset in mini-batches of size `batchSize`
for i in np.arange(0, len(data), batchSize):
# yield the current batched data
yield data[i:i + batchSize]
# -------------------------------------------------------
def shred_tuples(tuples):
a_ids=[t[0] for t in tuples]
b_ids=[t[1] for t in tuples]
labels=[t[2] for t in tuples]
return a_ids,b_ids,labels
# -------------------------------------------------------
def Pattern_Sequences(a_ids,b_ids,minibatch_patterns):
max_num_of_patterns=np.max([len(L) for L in minibatch_patterns])
min_num_of_patterns=np.min([len(L) for L in minibatch_patterns])
# print ("Max num of patterns:",max_num_of_patterns)
# print ("Min num of patterns:",min_num_of_patterns)
pattern_seq=np.zeros((len(a_ids)*max_num_of_patterns,DS.max_length+2),dtype=int) #+2 is for the targeted two entities a and b
early_stop=[0 for i in range(len(a_ids)*max_num_of_patterns)]
weights=[0.0 for i in range(len(a_ids)*max_num_of_patterns)]
for i in range(len(a_ids)):
set_of_patterns=minibatch_patterns[i]
for j in range(max_num_of_patterns):
if j<len(set_of_patterns):
pattern_id,w=set_of_patterns[j][0],set_of_patterns[j][1]
pattern=DS.id2Patterns[pattern_id]
words=pattern.strip().split(' ')
words.insert(0,DS.id2word[a_ids[i]])
words.append(DS.id2word[b_ids[i]])
early_stop[(i*max_num_of_patterns)+j]=len(words)
weights[(i*max_num_of_patterns)+j]=w
for k,word in enumerate(words):
pattern_seq[(i*max_num_of_patterns)+j,k]=DS.word2id[word]
return max_num_of_patterns,pattern_seq,early_stop,weights
# -----------------------------------------------------------
if __name__=="__main__":
'''
Word Embeddings
'''
pretrained_glove_300=("../glove.6B.300d.zip","glove",300)
WR=WordReps()
norm=1
standardise=0
WR.Read_Embeddings_zip_file(pretrained_glove_300,norm,standardise)
WR.vects['<PAD>']=np.zeros(WR.dim)
# WR.vects['X']=np.random.rand(WR.dim)
# WR.vects['Y']=np.random.rand(WR.dim)
WR.vects['X']=np.random.normal(size=(WR.dim)).astype('float32')
WR.vects['Y']=np.random.normal(size=(WR.dim)).astype('float32')
'''
Dataset
'''
corpus='Wikipedia_English'
Train_dataset=('DiffVec',"DiffVec_Pairs")
Test_dataset=('SemEval',"SemEval_Pairs.txt")
labels_type='proxy'
Reverse_pairs=True
DS=DataSet(corpus,Train_dataset,Test_dataset,labels_type,Reverse_pairs)
id2Patterns="../Relational_Patterns/Patterns_Xmid5Y"
Patterns_per_pair="../Relational_Patterns/Patterns_Xmid5Y_PerPair"
DS.Retrieve_Patterns(id2Patterns,Patterns_per_pair)
Ea=DS.Generate_Embedding_Matrix(WR)
'''
Training & Evaluation
'''
Eval=Training()
Eval.Train_Model()
| 37.870536
| 150
| 0.707651
| 1,352
| 8,483
| 4.190828
| 0.191568
| 0.057183
| 0.039005
| 0.039534
| 0.283975
| 0.249735
| 0.240205
| 0.195376
| 0.129721
| 0.104659
| 0
| 0.021547
| 0.11918
| 8,483
| 223
| 151
| 38.040359
| 0.736751
| 0.196393
| 0
| 0.012422
| 0
| 0.006211
| 0.099388
| 0.027011
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.055901
| 0
| 0.124224
| 0.049689
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43525bbf3ff2f6151c746e2a0599b8ee3f2bbfcc
| 1,071
|
py
|
Python
|
synch_integrate.py
|
HerculesJack/grtrans
|
bc005307d81dac1bdb9520e776e7627126dd690a
|
[
"MIT"
] | 25
|
2016-02-11T01:52:14.000Z
|
2021-06-16T02:15:42.000Z
|
synch_integrate.py
|
RAnantua/grtrans
|
a0353a8516335412b27fe4866eabafcfc0fe498f
|
[
"MIT"
] | 6
|
2016-11-10T15:25:20.000Z
|
2018-01-18T15:15:57.000Z
|
synch_integrate.py
|
RAnantua/grtrans
|
a0353a8516335412b27fe4866eabafcfc0fe498f
|
[
"MIT"
] | 6
|
2016-02-11T14:13:01.000Z
|
2022-03-10T01:56:02.000Z
|
from radtrans_integrate import radtrans_integrate
from polsynchemis import polsynchemis
import numpy as np
import scipy.integrate
# calculate synchrotron emissivity for given coefficients
def synch_jarho(nu,n,B,T,theta):
if ((np.isscalar(nu)==False) & (np.isscalar(n)==True)):
n = n + np.zeros(len(nu))
B = B + np.zeros(len(nu))
T = T + np.zeros(len(nu))
theta = theta + np.zeros(len(nu))
e = polsynchemis.polsynchth(nu,n,B,T,theta)
j = e[:,:4]; a = e[:,4:8]; rho = e[:,8:]
return j,a,rho
def run(x,jarr,aarr,rhoarr,sphstokes=-1,atol=1e-8,rtol=1e-6,max_tau=10):
if sphstokes==-1:
method=0
else:
method=3
radtrans_integrate.init_radtrans_integrate_data(method,4,len(x),len(x),max_tau,0.1,atol,rtol,1e-2,100000)
Karr = (np.append(aarr,rhoarr,axis=1))
tau = np.append(0.,scipy.integrate.cumtrapz(Karr[:,0],x))
radtrans_integrate.integrate(x[::-1],jarr[:,:],Karr[:,:],tau,4)
i = radtrans_integrate.intensity.copy()
radtrans_integrate.del_radtrans_integrate_data()
return i
| 36.931034
| 109
| 0.659197
| 171
| 1,071
| 4.040936
| 0.391813
| 0.196816
| 0.057887
| 0.069465
| 0.028944
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033822
| 0.171802
| 1,071
| 28
| 110
| 38.25
| 0.745209
| 0.051354
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0
| 0.16
| 0
| 0.32
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
435728a0cb21ad40d2d8c25c033f2746e09d0952
| 4,239
|
py
|
Python
|
apps/dash-port-analytics/app/ui/tab_map_controls.py
|
JeroenvdSande/dash-sample-apps
|
106fa24693cfdaf47c06466a0aed78e642344f91
|
[
"MIT"
] | 2,332
|
2019-05-10T18:24:20.000Z
|
2022-03-30T21:46:29.000Z
|
apps/dash-port-analytics/app/ui/tab_map_controls.py
|
JeroenvdSande/dash-sample-apps
|
106fa24693cfdaf47c06466a0aed78e642344f91
|
[
"MIT"
] | 384
|
2019-05-09T19:19:56.000Z
|
2022-03-12T00:58:24.000Z
|
apps/dash-port-analytics/app/ui/tab_map_controls.py
|
JeroenvdSande/dash-sample-apps
|
106fa24693cfdaf47c06466a0aed78e642344f91
|
[
"MIT"
] | 3,127
|
2019-05-16T17:20:45.000Z
|
2022-03-31T17:59:07.000Z
|
import dash_core_components as dcc
import dash_html_components as html
from config import strings
def make_tab_port_map_controls(
port_arr: list,
port_val: str,
vessel_types_arr: list,
vessel_type_val: str,
year_arr: list,
year_val: int,
month_arr: list,
month_val: int,
) -> html.Div:
"""
Returns a HTML div of user controls found on top of the map tab.
:param port_arr: list, all possible ports
:param port_val: str, current port value
:param vessel_types_arr: list, all possible vessel types
:param vessel_type_val: str, current vessel type value
:param year_arr: list, all possible years
:param year_val: str, current year value
:param month_arr: list, all possible months
:param month_val: str, current month value
:return: HTML div
"""
return html.Div(
className="tab-port-map-controls",
children=[
html.Div(
className="tab-port-map-single-control-container area-a",
children=[
html.Label(
className="control-label", children=[strings.LABEL_PORT]
),
dcc.Dropdown(
id="port-map-dropdown-port",
clearable=False,
options=[{"label": port, "value": port} for port in port_arr],
value=port_val,
),
],
),
html.Div(className="tab-port-map-single-control-separator area-b"),
html.Div(
className="tab-port-map-single-control-container area-c",
children=[
html.Label(
className="control-label", children=[strings.LABEL_VESSEL]
),
dcc.Dropdown(
id="port-map-dropdown-vessel-type",
clearable=False,
options=[
{"label": vessel_type, "value": vessel_type}
for vessel_type in vessel_types_arr
],
value=vessel_type_val,
),
],
),
html.Div(className="tab-port-map-single-control-separator area-d"),
html.Div(
className="tab-port-map-single-control-container date-grid area-e",
children=[
html.Div(
className="tab-port-map-single-control-container-date",
children=[
html.Label(
className="control-label", children=[strings.LABEL_YEAR]
),
dcc.Dropdown(
id="port-map-dropdown-year",
clearable=False,
options=[
{"label": year, "value": year} for year in year_arr
],
value=year_val,
),
],
),
html.Div(
className="tab-port-map-single-control-separator smaller-line"
),
html.Div(
className="tab-port-map-single-control-container-date",
children=[
html.Label(
className="control-label",
children=[strings.LABEL_MONTH],
),
dcc.Dropdown(
id="port-map-dropdown-month",
clearable=False,
options=[
{"label": month, "value": month}
for month in month_arr
],
value=month_val,
),
],
),
],
),
],
)
| 38.889908
| 88
| 0.420854
| 359
| 4,239
| 4.844011
| 0.18663
| 0.056354
| 0.057504
| 0.098332
| 0.46406
| 0.46406
| 0.384704
| 0.384704
| 0.384704
| 0.317999
| 0
| 0
| 0.493041
| 4,239
| 108
| 89
| 39.25
| 0.809214
| 0.105685
| 0
| 0.55914
| 0
| 0
| 0.15325
| 0.113132
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010753
| false
| 0
| 0.032258
| 0
| 0.053763
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
435a70dd7b6f4dda69b0f2a7703c3f754714213d
| 22,429
|
py
|
Python
|
graphql_compiler/compiler/workarounds/orientdb_query_execution.py
|
0xflotus/graphql-compiler
|
0c892f5254d0cf3d03a68012080d0b736bc49913
|
[
"Apache-2.0"
] | null | null | null |
graphql_compiler/compiler/workarounds/orientdb_query_execution.py
|
0xflotus/graphql-compiler
|
0c892f5254d0cf3d03a68012080d0b736bc49913
|
[
"Apache-2.0"
] | 1
|
2019-04-18T18:23:16.000Z
|
2019-04-18T18:23:16.000Z
|
graphql_compiler/compiler/workarounds/orientdb_query_execution.py
|
0xflotus/graphql-compiler
|
0c892f5254d0cf3d03a68012080d0b736bc49913
|
[
"Apache-2.0"
] | 1
|
2019-11-21T02:38:27.000Z
|
2019-11-21T02:38:27.000Z
|
# Copyright 2018-present Kensho Technologies, LLC.
"""Workarounds for OrientDB scheduler issue that causes poor query planning for certain queries.
For purposes of query planning, the OrientDB query planner ignores "where:" clauses
that hit indexes but do not use the "=" operator. For example, "CONTAINS" can be used to check
that a field covered by an index is in a specified list of values, and can therefore be covered
by an index, but OrientDB will ignore this. When no equality ("=") checks on indexed columns
are present, OrientDB will generate a query plan that starts execution at the class with
lowest cardinality, which can lead to excessive numbers of scanned and discarded records.
Assuming the query planner creates a query plan where a location with CONTAINS is
the first in the execution order, the execution system will apply indexes
to speed up this operation. Therefore, it's sufficient to trick the query planner into
always creating such a query plan, even though it thinks indexes cannot be used in the query.
Valid query execution start points for the OrientDB query planner must satisfy the following:
- Must not be "optional: true".
- Must not have a "while:" clause nor follow a location that has one.
- Must have a "class:" defined. This class is used for cardinality estimation, and to
look for available indexes that may cover any "where:" clause that may be present.
The optimizations in this file improve performance by enabling execution start points according
to the following assumptions:
1. Start points with "where:" clauses that reference only local fields (i.e. not tagged values
from other query locations) are always better than start points without a "where:".
This is because the filter will have to be applied one way or the other, so we might as well
apply it early.
2. If no such start points are available, we'd like to make available as many start points
as possible, since we'd like OrientDB to start at the start point whose class has
the lowest possible cardinality.
The process of applying the optimizations is as follows:
- Exclude and ignore all query steps that are inside a fold, optional, or recursion scope,
or have a "where:" clause that references a non-local (i.e. tagged) field.
- Find all remaining query steps with "where:" clauses that reference only local fields.
- If any are found, we guide our actions from assumption 1 above:
- Ensure they have a defined "class:" -- i.e. the OrientDB scheduler will consider them
valid start points.
- Then, prune all other query steps (ones without such "where:" clauses) by removing their
"class:" clause, making them invalid as query start points for OrientDB's scheduler.
- If none are found, we guide our actions from assumption 2 above:
- Ensure that all query points not inside fold, optional, or recursion scope contain
a "class:" clause. That increases the number of available query start points,
so OrientDB can choose the start point of lowest cardinality.
"""
from ..blocks import CoerceType, QueryRoot, Recurse, Traverse
from ..expressions import ContextField, ContextFieldExistence
from ..helpers import get_only_element_from_collection
from ..ir_lowering_match.utils import convert_coerce_type_and_add_to_where_block
def _is_local_filter(filter_block):
"""Return True if the Filter block references no non-local fields, and False otherwise."""
# We need the "result" value of this function to be mutated within the "visitor_fn".
# Since we support both Python 2 and Python 3, we can't use the "nonlocal" keyword here:
# https://www.python.org/dev/peps/pep-3104/
# Instead, we use a dict to store the value we need mutated, since the "visitor_fn"
# can mutate state in the parent scope, but not rebind variables in it without "nonlocal".
# TODO(predrag): Revisit this if we drop support for Python 2.
result = {
'is_local_filter': True
}
filter_predicate = filter_block.predicate
def visitor_fn(expression):
"""Expression visitor function that looks for uses of non-local fields."""
non_local_expression_types = (ContextField, ContextFieldExistence)
if isinstance(expression, non_local_expression_types):
result['is_local_filter'] = False
# Don't change the expression.
return expression
filter_predicate.visit_and_update(visitor_fn)
return result['is_local_filter']
def _classify_query_locations(match_query):
"""Classify query locations into three groups: preferred, eligible, ineligible.
- Ineligible locations are ones that cannot be the starting point of query execution.
These include locations within recursions, locations that are the target of
an optional traversal, and locations with an associated "where:" clause with non-local filter.
- Preferred locations are ones that are eligible to be the starting point, and also have
an associated "where:" clause that references no non-local fields -- only local fields,
literals, and variables.
- Eligible locations are all locations that do not fall into either of these two categories.
Args:
match_query: MatchQuery object describing the query being analyzed for optimization
Returns:
tuple (preferred, eligible, ineligible) where each element is a set of Location objects.
The three sets are disjoint.
"""
preferred_locations = set()
eligible_locations = set()
ineligible_locations = set()
# Any query must have at least one traversal with at least one step.
# The first step in this traversal must be a QueryRoot.
first_match_step = match_query.match_traversals[0][0]
if not isinstance(first_match_step.root_block, QueryRoot):
raise AssertionError(u'First step of first traversal unexpectedly was not QueryRoot: '
u'{} {}'.format(first_match_step, match_query))
# The first step in the first traversal cannot possibly be inside an optional, recursion,
# or fold. Its location is always an eligible start location for a query.
# We need to determine whether it is merely eligible, or actually a preferred location.
if first_match_step.where_block is not None:
if _is_local_filter(first_match_step.where_block):
preferred_locations.add(first_match_step.as_block.location)
else:
# TODO(predrag): Fix once we have a proper fix for tag-and-filter in the same scope.
# Either the locally-scoped tag will have to generate a LocalField
# instead of a ContextField, or we'll have to rework the local filter
# detection code in this module.
raise AssertionError(u'The first step of the first traversal somehow had a non-local '
u'filter. This should not be possible, since there is nowhere '
u'for the tagged value to have come from. Values: {} {}'
.format(first_match_step, match_query))
else:
eligible_locations.add(first_match_step.as_block.location)
# This loop will repeat the analysis of the first step of the first traversal.
# QueryRoots other than the first are required to always be at a location whose status
# (preferred / eligible / ineligible) is already known. Since we already processed
# the first QueryRoot above, the rest of the loop can assume all QueryRoots are like that.
for current_traversal in match_query.match_traversals:
for match_step in current_traversal:
current_step_location = match_step.as_block.location
if isinstance(match_step.root_block, QueryRoot):
already_encountered_location = any((
current_step_location in preferred_locations,
current_step_location in eligible_locations,
current_step_location in ineligible_locations,
))
if not already_encountered_location:
raise AssertionError(u'Unexpectedly encountered a location in QueryRoot whose '
u'status has not been determined: {} {} {}'
.format(current_step_location, match_step, match_query))
at_eligible_or_preferred_location = (
current_step_location in preferred_locations or
current_step_location in eligible_locations)
# This location has already been encountered and processed.
# Other than setting the "at_eligible_or_preferred_location" state for the sake of
# the following MATCH steps, there is nothing further to be done.
continue
elif isinstance(match_step.root_block, Recurse):
# All Recurse blocks cause locations within to be ineligible.
at_eligible_or_preferred_location = False
elif isinstance(match_step.root_block, Traverse):
# Optional Traverse blocks cause locations within to be ineligible.
# Non-optional Traverse blocks do not change the eligibility of locations within:
# if the pre-Traverse location was eligible, so will the location within,
# and if it was not eligible, neither will the location within.
if match_step.root_block.optional:
at_eligible_or_preferred_location = False
else:
raise AssertionError(u'Unreachable condition reached: {} {} {}'
.format(match_step.root_block, match_step, match_query))
if not at_eligible_or_preferred_location:
ineligible_locations.add(current_step_location)
elif match_step.where_block is not None:
if _is_local_filter(match_step.where_block):
# This location has a local filter, and is not otherwise ineligible (it's not
# in a recursion etc.). Therefore, it's a preferred query start location.
preferred_locations.add(current_step_location)
else:
# Locations with non-local filters are never eligible locations, since they
# depend on another location being executed before them.
ineligible_locations.add(current_step_location)
else:
# No local filtering (i.e. not preferred), but also not ineligible. Eligible it is.
eligible_locations.add(current_step_location)
return preferred_locations, eligible_locations, ineligible_locations
def _calculate_type_bound_at_step(match_step):
"""Return the GraphQL type bound at the given step, or None if no bound is given."""
current_type_bounds = []
if isinstance(match_step.root_block, QueryRoot):
# The QueryRoot start class is a type bound.
current_type_bounds.extend(match_step.root_block.start_class)
if match_step.coerce_type_block is not None:
# The CoerceType target class is also a type bound.
current_type_bounds.extend(match_step.coerce_type_block.target_class)
if current_type_bounds:
# A type bound exists. Assert that there is exactly one bound, defined in precisely one way.
return get_only_element_from_collection(current_type_bounds)
else:
# No type bound exists at this MATCH step.
return None
def _assert_type_bounds_are_not_conflicting(current_type_bound, previous_type_bound,
location, match_query):
"""Ensure that the two bounds either are an exact match, or one of them is None."""
if all((current_type_bound is not None,
previous_type_bound is not None,
current_type_bound != previous_type_bound)):
raise AssertionError(
u'Conflicting type bounds calculated at location {}: {} vs {} '
u'for query {}'.format(location, previous_type_bound, current_type_bound, match_query))
def _expose_only_preferred_locations(match_query, location_types, coerced_locations,
preferred_locations, eligible_locations):
"""Return a MATCH query where only preferred locations are valid as query start locations."""
preferred_location_types = dict()
eligible_location_types = dict()
new_match_traversals = []
for current_traversal in match_query.match_traversals:
new_traversal = []
for match_step in current_traversal:
new_step = match_step
current_step_location = match_step.as_block.location
if current_step_location in preferred_locations:
# This location is preferred. We have to make sure that at least one occurrence
# of this location in the MATCH query has an associated "class:" clause,
# which would be generated by a type bound at the corresponding MATCH step.
current_type_bound = _calculate_type_bound_at_step(match_step)
previous_type_bound = preferred_location_types.get(current_step_location, None)
if previous_type_bound is not None:
# The location is already valid. If so, make sure that this step either does
# not have any type bounds (e.g. via QueryRoot or CoerceType blocks),
# or has type bounds that match the previously-decided type bound.
_assert_type_bounds_are_not_conflicting(
current_type_bound, previous_type_bound, current_step_location, match_query)
else:
# The location is not yet known to be valid. If it does not have
# a type bound in this MATCH step, add a type coercion to the type
# registered in "location_types".
if current_type_bound is None:
current_type_bound = location_types[current_step_location].name
new_step = match_step._replace(
coerce_type_block=CoerceType({current_type_bound}))
preferred_location_types[current_step_location] = current_type_bound
elif current_step_location in eligible_locations:
# This location is eligible, but not preferred. We have not make sure
# none of the MATCH steps with this location have type bounds, and therefore
# will not produce a corresponding "class:" clause in the resulting MATCH query.
current_type_bound = _calculate_type_bound_at_step(match_step)
previous_type_bound = eligible_location_types.get(current_step_location, None)
if current_type_bound is not None:
# There is a type bound here that we need to neutralize.
_assert_type_bounds_are_not_conflicting(
current_type_bound, previous_type_bound, current_step_location, match_query)
# Record the deduced type bound, so that if we encounter this location again,
# we ensure that we again infer the same type bound.
eligible_location_types[current_step_location] = current_type_bound
if (current_step_location not in coerced_locations or
previous_type_bound is not None):
# The type bound here is already implied by the GraphQL query structure,
# or has already been applied at a previous occurrence of this location.
# We can simply delete the QueryRoot / CoerceType blocks that impart it.
if isinstance(match_step.root_block, QueryRoot):
new_root_block = None
else:
new_root_block = match_step.root_block
new_step = match_step._replace(
root_block=new_root_block, coerce_type_block=None)
else:
# The type bound here is not already implied by the GraphQL query structure.
# This should only be possible via a CoerceType block. Lower this CoerceType
# block into a Filter with INSTANCEOF to ensure the resulting query has the
# same semantics, while making the location invalid as a query start point.
if (isinstance(match_step.root_block, QueryRoot) or
match_step.coerce_type_block is None):
raise AssertionError(u'Unexpected MATCH step applying a type bound not '
u'already implied by the GraphQL query structure: '
u'{} {}'.format(match_step, match_query))
new_where_block = convert_coerce_type_and_add_to_where_block(
match_step.coerce_type_block, match_step.where_block)
new_step = match_step._replace(
coerce_type_block=None, where_block=new_where_block)
else:
# There is no type bound that OrientDB can find defined at this location.
# No action is necessary.
pass
else:
# This location is neither preferred nor eligible.
# No action is necessary at this location.
pass
new_traversal.append(new_step)
new_match_traversals.append(new_traversal)
return match_query._replace(match_traversals=new_match_traversals)
def _expose_all_eligible_locations(match_query, location_types, eligible_locations):
"""Return a MATCH query where all eligible locations are valid as query start locations."""
eligible_location_types = dict()
new_match_traversals = []
for current_traversal in match_query.match_traversals:
new_traversal = []
for match_step in current_traversal:
new_step = match_step
current_step_location = match_step.as_block.location
if current_step_location in eligible_locations:
# This location is eligible. We need to make sure it has an associated type bound,
# so that it produces a "class:" clause that will make it a valid query start
# location. It either already has such a type bound, or we can use the type
# implied by the GraphQL query structure to add one.
current_type_bound = _calculate_type_bound_at_step(match_step)
previous_type_bound = eligible_location_types.get(current_step_location, None)
if current_type_bound is None:
current_type_bound = location_types[current_step_location].name
new_coerce_type_block = CoerceType({current_type_bound})
new_step = match_step._replace(coerce_type_block=new_coerce_type_block)
else:
# There is a type bound here. We simply ensure that the bound is not conflicting
# with any other type bound at a different MATCH step with the same location.
_assert_type_bounds_are_not_conflicting(
current_type_bound, previous_type_bound, current_step_location, match_query)
# Record the deduced type bound, so that if we encounter this location again,
# we ensure that we again infer the same type bound.
eligible_location_types[current_step_location] = current_type_bound
else:
# This function may only be called if there are no preferred locations. Since this
# location cannot be preferred, and is not eligible, it must be ineligible.
# No action is necessary in this case.
pass
new_traversal.append(new_step)
new_match_traversals.append(new_traversal)
return match_query._replace(match_traversals=new_match_traversals)
def expose_ideal_query_execution_start_points(compound_match_query, location_types,
coerced_locations):
"""Ensure that OrientDB only considers desirable query start points in query planning."""
new_queries = []
for match_query in compound_match_query.match_queries:
location_classification = _classify_query_locations(match_query)
preferred_locations, eligible_locations, _ = location_classification
if preferred_locations:
# Convert all eligible locations into non-eligible ones, by removing
# their "class:" clause. The "class:" clause is provided either by having
# a QueryRoot block or a CoerceType block in the MatchStep corresponding
# to the location. We remove it by converting the class check into
# an "INSTANCEOF" Filter block, which OrientDB is unable to optimize away.
new_query = _expose_only_preferred_locations(
match_query, location_types, coerced_locations,
preferred_locations, eligible_locations)
elif eligible_locations:
# Make sure that all eligible locations have a "class:" clause by adding
# a CoerceType block that is a no-op as guaranteed by the schema. This merely
# ensures that OrientDB is able to use each of these locations as a query start point,
# and will choose the one whose class is of lowest cardinality.
new_query = _expose_all_eligible_locations(
match_query, location_types, eligible_locations)
else:
raise AssertionError(u'This query has no preferred or eligible query start locations. '
u'This is almost certainly a bug: {}'.format(match_query))
new_queries.append(new_query)
return compound_match_query._replace(match_queries=new_queries)
| 58.257143
| 100
| 0.664452
| 2,854
| 22,429
| 5.033987
| 0.160827
| 0.035707
| 0.037029
| 0.013782
| 0.361453
| 0.316002
| 0.262616
| 0.214311
| 0.171574
| 0.162595
| 0
| 0.001066
| 0.289045
| 22,429
| 384
| 101
| 58.408854
| 0.899912
| 0.456418
| 0
| 0.36612
| 0
| 0
| 0.05754
| 0
| 0
| 0
| 0
| 0.005208
| 0.060109
| 1
| 0.043716
| false
| 0.016393
| 0.021858
| 0
| 0.10929
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
435b03494c0e0f08adce48e2055f1eb32e5446ba
| 3,763
|
py
|
Python
|
traffic_light/core.py
|
ofalk/cleware-traffic-light
|
be319fec8e190811463ade8aabc37ca2b4f17e57
|
[
"MIT"
] | null | null | null |
traffic_light/core.py
|
ofalk/cleware-traffic-light
|
be319fec8e190811463ade8aabc37ca2b4f17e57
|
[
"MIT"
] | null | null | null |
traffic_light/core.py
|
ofalk/cleware-traffic-light
|
be319fec8e190811463ade8aabc37ca2b4f17e57
|
[
"MIT"
] | null | null | null |
from enum import IntEnum
import functools
import usb.core
import usb.util
from traffic_light.error import TrafficLightError, MultipleTrafficLightsError
BM_REQUEST_TYPE = 0x21
B_REQUEST = 0x09
W_VALUE = 0x200
W_INDEX = 0x00
ID_VENDOR = 0x0d50
ID_PRODUCT = 0x0008
INTERFACE = 0
class Color(IntEnum):
RED = 0x10
YELLOW = 0x11
GREEN = 0x12
class State(IntEnum):
OFF = 0x0
ON = 0x1
class ClewareTrafficLight:
def __init__(self, address=None):
if address:
self.address = address
self.device = usb.core.find(
address=address,
idVendor=ID_VENDOR,
idProduct=ID_PRODUCT)
elif len(list(ClewareTrafficLight.find_devices())) > 1:
raise MultipleTrafficLightsError(
"No address is given and there are multiple devices conected! "
"Use 'print_devices' to see a list of connected devices."
)
else:
self.device = usb.core.find(
idVendor=ID_VENDOR,
idProduct=ID_PRODUCT)
if self.device is None:
raise TrafficLightError('Cleware traffic light not found!')
self.reattach = False
def attach(self):
"""Attaches the device back to the kernel"""
usb.util.dispose_resources(self.device)
if self.reattach:
self.device.attach_kernel_driver(INTERFACE)
def detach(self):
"""Detaches the device from to kernel so it can be used"""
if self.device.is_kernel_driver_active(INTERFACE):
self.device.detach_kernel_driver(INTERFACE)
self.reattach = True
@staticmethod
def find_devices():
"""Returns the raw iterator of all found traffic lights"""
devices = usb.core.find(find_all=True, idVendor=ID_VENDOR, idProduct=ID_PRODUCT)
if devices:
return devices
return []
@staticmethod
def print_devices():
"""Prints a list of all connected traffic lights"""
devices = ClewareTrafficLight.get_devices()
for device in devices:
print(device)
@staticmethod
def get_devices():
"""Returns a list of ClewareTrafficLight instances"""
usb_devices = ClewareTrafficLight.find_devices()
return [ClewareTrafficLight(d.address) for d in usb_devices]
def set_led(self, color, value, timeout=1000):
"""Sets the given state and color of the attached traffic light
Attribute:
color -- the to set color as the enum. E.g. Color.RED
state -- the state to which it should be set. E.g. State.ON
address -- the usb address of a specific traffic light
"""
try:
self.detach()
self.device.ctrl_transfer(BM_REQUEST_TYPE, B_REQUEST, W_VALUE, W_INDEX, [0x00, color, value], timeout=timeout)
except Exception as exc:
raise TrafficLightError(str(exc)) from exc
finally:
self.attach()
def __getattr__(self, name):
"""Parses attribut calls in function"""
args = name.split('_')
try:
color = Color[args[0].upper()]
state = State[args[1].upper()]
except Exception as exc:
raise TrafficLightError("Either the given color or state could not be parsed! Exc: {}"
.format(exc))
return functools.partial(self.set_led, color, state)
def __str__(self):
"""Converts instance into string with important imformations"""
return ("== Cleware Traffic Light ==\n"
"Address: {} \n"
"IdVendor: {} \n"
"IdProduct: {}".format(self.address, ID_VENDOR, ID_PRODUCT))
| 32.439655
| 122
| 0.60962
| 435
| 3,763
| 5.147126
| 0.358621
| 0.03573
| 0.014739
| 0.033497
| 0.103618
| 0.084859
| 0.032157
| 0
| 0
| 0
| 0
| 0.017557
| 0.303747
| 3,763
| 115
| 123
| 32.721739
| 0.837023
| 0.155195
| 0
| 0.154762
| 0
| 0
| 0.09041
| 0
| 0
| 0
| 0.016468
| 0
| 0
| 1
| 0.107143
| false
| 0
| 0.059524
| 0
| 0.321429
| 0.035714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
435b7f5d139890173dc2cf9019b51215cc554d6e
| 3,646
|
py
|
Python
|
sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_orchestration_app_luis_response_async.py
|
dubiety/azure-sdk-for-python
|
62ffa839f5d753594cf0fe63668f454a9d87a346
|
[
"MIT"
] | 1
|
2022-02-01T18:50:12.000Z
|
2022-02-01T18:50:12.000Z
|
sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_orchestration_app_luis_response_async.py
|
ellhe-blaster/azure-sdk-for-python
|
82193ba5e81cc5e5e5a5239bba58abe62e86f469
|
[
"MIT"
] | null | null | null |
sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_orchestration_app_luis_response_async.py
|
ellhe-blaster/azure-sdk-for-python
|
82193ba5e81cc5e5e5a5239bba58abe62e86f469
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
"""
FILE: sample_analyze_orchestration_app_luis_response_async.py
DESCRIPTION:
This sample demonstrates how to analyze user query using an orchestration project.
In this sample, orchestration project's top intent will map to a LUIS project.
For more info about how to setup a CLU orchestration project, see the README.
USAGE:
python sample_analyze_orchestration_app_luis_response_async.py
Set the environment variables with your own values before running the sample:
1) AZURE_CONVERSATIONS_ENDPOINT - endpoint for your CLU resource.
2) AZURE_CONVERSATIONS_KEY - API key for your CLU resource.
3) AZURE_CONVERSATIONS_WORKFLOW_PROJECT_NAME - project name for your CLU orchestration project.
4) AZURE_CONVERSATIONS_WORKFLOW_DEPLOYMENT_NAME - deployment name for your CLU orchestration project.
"""
import asyncio
async def sample_analyze_orchestration_app_luis_response_async():
# [START analyze_orchestration_app_luis_response]
# import libraries
import os
from azure.core.credentials import AzureKeyCredential
from azure.ai.language.conversations.aio import ConversationAnalysisClient
# get secrets
clu_endpoint = os.environ["AZURE_CONVERSATIONS_ENDPOINT"]
clu_key = os.environ["AZURE_CONVERSATIONS_KEY"]
project_name = os.environ["AZURE_CONVERSATIONS_WORKFLOW_PROJECT_NAME"]
deployment_name = os.environ["AZURE_CONVERSATIONS_WORKFLOW_DEPLOYMENT_NAME"]
# analyze query
client = ConversationAnalysisClient(clu_endpoint, AzureKeyCredential(clu_key))
async with client:
query = "Reserve a table for 2 at the Italian restaurant"
result = await client.analyze_conversation(
task={
"kind": "Conversation",
"analysisInput": {
"conversationItem": {
"participantId": "1",
"id": "1",
"modality": "text",
"language": "en",
"text": query
},
"isLoggingEnabled": False
},
"parameters": {
"projectName": project_name,
"deploymentName": deployment_name,
"verbose": True
}
}
)
# view result
print("query: {}".format(result["result"]["query"]))
print("project kind: {}\n".format(result["result"]["prediction"]["projectKind"]))
# top intent
top_intent = result["result"]["prediction"]["topIntent"]
print("top intent: {}".format(top_intent))
top_intent_object = result["result"]["prediction"]["intents"][top_intent]
print("confidence score: {}".format(top_intent_object["confidenceScore"]))
print("project kind: {}".format(top_intent_object["targetProjectKind"]))
if top_intent_object["targetProjectKind"] == "Luis":
print("\nluis response:")
luis_response = top_intent_object["result"]["prediction"]
print("top intent: {}".format(luis_response["topIntent"]))
print("\nentities:")
for entity in luis_response["entities"]:
print("\n{}".format(entity))
# [END analyze_orchestration_app_luis_response]
async def main():
await sample_analyze_orchestration_app_luis_response_async()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| 39.630435
| 106
| 0.637685
| 372
| 3,646
| 6
| 0.36828
| 0.048387
| 0.061828
| 0.072581
| 0.228943
| 0.167563
| 0.084229
| 0.043011
| 0
| 0
| 0
| 0.002911
| 0.246297
| 3,646
| 92
| 107
| 39.630435
| 0.809316
| 0.328305
| 0
| 0
| 0
| 0
| 0.259564
| 0.055944
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.078431
| 0
| 0.078431
| 0.176471
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
435f04515eafc16cb9b3781591916aadd65a8bd3
| 2,499
|
py
|
Python
|
intro/deploy.py
|
terziev-viktor/SolidityCourse
|
6f10852e94eec69438c5e577795d317694227337
|
[
"MIT"
] | null | null | null |
intro/deploy.py
|
terziev-viktor/SolidityCourse
|
6f10852e94eec69438c5e577795d317694227337
|
[
"MIT"
] | null | null | null |
intro/deploy.py
|
terziev-viktor/SolidityCourse
|
6f10852e94eec69438c5e577795d317694227337
|
[
"MIT"
] | null | null | null |
import json
from web3 import Web3
from solcx import compile_standard, install_solc
with open("./SimpleStorage.sol", "r") as file:
simple_storage_src = file.read()
# install solcx
install_solc("0.8.0")
# compile the source
compiled_sol = compile_standard(
{
"language": "Solidity",
"sources": {"SimpleStorage.sol": {"content": simple_storage_src}},
"settings":
{
"outputSelection":
{
"*":
{
"*": ["abi", "metadata", "evm.bytecode", "evm.sourceMap"]
}
}
},
},
solc_version = "0.8.0"
)
with open("./out.json", "w") as file:
json.dump(compiled_sol, file)
# getting the bytecode
bytecode = compiled_sol["contracts"]["SimpleStorage.sol"]["SimpleStorage"]["evm"]["bytecode"]["object"]
# getting the abi
abi = compiled_sol["contracts"]["SimpleStorage.sol"]["SimpleStorage"]["abi"]
# connecting to ganache
w3 = Web3(Web3.HTTPProvider("HTTP://127.0.0.1:7545"))
chain_id = 1337
my_address = "0x02ECDdb09504C4d4B2ba2c7Ec80d77d44f6e631c"
private_key = "0xa9ddbecce894fdad11cd9864d9c58f794d23bd5f0d78d1c2eea204b284edfefc"
# Create the contract in python
SimpleStorage = w3.eth.contract(abi=abi, bytecode=bytecode)
# Get the latest test transaction
nonce = w3.eth.getTransactionCount(my_address)
# 1. Build a transaction
# 2. Sing the transaction
# 3. Send the transaction
transaction = SimpleStorage.constructor().buildTransaction({"gasPrice": w3.eth.gas_price, "chainId": chain_id, "from": my_address, "nonce": nonce})
signed_txn = w3.eth.account.sign_transaction(transaction, private_key)
tx_hash = w3.eth.send_raw_transaction(signed_txn.rawTransaction)
# confirm transaction is received
tx_receipt = w3.eth.wait_for_transaction_receipt(tx_hash)
print("tx_hash=", tx_hash)
print("receipt=", tx_receipt)
# working on-chain
simple_storage = w3.eth.contract(address=tx_receipt.contractAddress, abi=abi)
print(simple_storage.functions.retrieve().call())
store_transaction = simple_storage.functions.store(15).buildTransaction({
"gasPrice": w3.eth.gas_price,
"chainId": chain_id,
"from": my_address,
"nonce": nonce + 1
}
)
singed_store_transaction = w3.eth.account.sign_transaction(store_transaction, private_key)
store_transaction_hash = w3.eth.send_raw_transaction(singed_store_transaction.rawTransaction)
store_transaction_receipt = w3.eth.wait_for_transaction_receipt(store_transaction_hash)
| 31.2375
| 147
| 0.708283
| 293
| 2,499
| 5.832765
| 0.368601
| 0.032183
| 0.018724
| 0.038619
| 0.250439
| 0.218841
| 0.129901
| 0.0866
| 0.0866
| 0.0866
| 0
| 0.045933
| 0.163665
| 2,499
| 79
| 148
| 31.632911
| 0.77177
| 0.109244
| 0
| 0
| 0
| 0
| 0.198825
| 0.058292
| 0
| 0
| 0.048803
| 0
| 0
| 1
| 0
| false
| 0
| 0.058824
| 0
| 0.058824
| 0.058824
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4361a9278aa18283e07b14ec0d517fca7051b980
| 9,550
|
py
|
Python
|
info_popup.py
|
cartazio/SublimeHaskell
|
e6f12ea69de939d12212a6ec594bf0aae0603f6d
|
[
"MIT"
] | 2
|
2021-07-07T16:41:48.000Z
|
2021-11-17T11:08:50.000Z
|
info_popup.py
|
cartazio/SublimeHaskell
|
e6f12ea69de939d12212a6ec594bf0aae0603f6d
|
[
"MIT"
] | null | null | null |
info_popup.py
|
cartazio/SublimeHaskell
|
e6f12ea69de939d12212a6ec594bf0aae0603f6d
|
[
"MIT"
] | null | null | null |
import urllib.parse
import webbrowser
import json
from xml.etree import ElementTree
import sublime
import SublimeHaskell.sublime_haskell_common as Common
import SublimeHaskell.internals.utils as Utils
import SublimeHaskell.internals.unicode_opers as UnicodeOpers
import SublimeHaskell.symbols as symbols
import SublimeHaskell.internals.backend_mgr as BackendManager
import SublimeHaskell.parseoutput as ParseOutput
import SublimeHaskell.types as types
# Unused module variable:
# style_header = "<style>" \
# "a { text-decoration: underline; }" \
# ".type { color: red; }" \
# ".tyvar { color: blue; }" \
# ".operator { color: green; }" \
# ".comment { color: gray; font-style: italic; }" \
# ".docs { color: gray; }" \
# "</style>"
class Styles(object):
"""
Loads and holds cache of scheme styles
Also generates style header
"""
def __init__(self):
self.schemes = {}
CSS_CLASSES = {
'comment': 'comment',
'function': 'entity.name.function',
'type': 'entity.name.type',
'operator': 'keyword.operator',
'keyword': 'keyword.declaration',
'tyvar': 'variable.generic',
'error': 'sublimehaskell.mark.error',
'warning': 'sublimehaskell.mark.warning',
'hint': 'sublimehaskell.mark.hint'
}
def load_scheme(self, scheme_path):
if scheme_path not in self.schemes:
scheme_res = sublime.load_resource(scheme_path)
if scheme_res:
# Go through all styles and collect scope/foreground/fontStyle etc.
# Prefer ST3 'sublime-color-scheme' JSON over older TextMate XML.
self.schemes[scheme_path] = self.collect_sublime_scheme(json.loads(scheme_res)) \
if scheme_path.endswith('.sublime-color-scheme') \
else self.collect_textmate_scheme(ElementTree.fromstring(scheme_res))
return self.schemes.get(scheme_path, {})
def collect_textmate_scheme(self, scheme_tree):
scheme = {}
for style in scheme_tree.findall(".//dict[key='scope']"):
try:
cur_style = {}
cur_tag = None
for elem in style.iter():
if elem.tag == 'key':
cur_tag = elem.text # We are going to fill it next time
elif elem.tag == 'string' and cur_tag is not None:
cur_style[cur_tag] = elem.text
cur_tag = None
if 'scope' in cur_style:
scheme[cur_style['scope']] = cur_style
except ValueError:
pass
return scheme
def collect_sublime_scheme(self, scheme_dict):
scheme = {}
for rule in scheme_dict.get('rules', []):
scope = rule.get('scope', '')
if scope:
scheme[scope] = rule
return scheme
def gen_style(self, scheme_path):
scheme = self.load_scheme(scheme_path)
parts = []
parts.append("<style>")
parts.append("a { text-decoration: underline; }")
# generate CSS style for each class
for cls, scope in self.CSS_CLASSES.items():
# find scope or its parent in scheme
scope_parts = scope.split('.')
for css_scope in reversed(['.'.join(scope_parts[0:i+1]) for i in range(0, len(scope_parts))]):
if css_scope in scheme: # Found some scope, fill style class
style_parts = []
if 'foreground' in scheme[css_scope]:
style_parts.append("color: {0}".format(scheme[css_scope]['foreground']))
# Prefer ST3 'sublime-color-scheme' JSON attribute over the older TextMate-ish name
font_style = scheme[css_scope].get('font_style', scheme[css_scope].get('fontStyle', ''))
if font_style:
style_parts.append("font-style: {0}".format(font_style))
parts.append(".{0} {{ {1} }}".format(cls, "; ".join(style_parts)))
break
parts.append("</style>")
return "".join(parts)
class SublimeHaskellHoverPopup(object):
# HTML style formatting
STYLES = Styles()
def __init__(self, view, filename, point, hover_zone):
super().__init__()
self.view = view
self.filename = filename
self.point = point
self.hover_zone = hover_zone
self.line = view.rowcol(point)[0]
self.shown = False
def do_hover(self):
if self.hover_zone == sublime.HOVER_TEXT:
qsymbol = Common.get_qualified_symbol_at_point(self.view, self.point)
## print('hover: qualified symbol {0}'.format(qsymbol))
module_word = qsymbol.module
ident = qsymbol.name
if module_word is not None and ident is None:
# TODO: Any ideas for popup about module?
pass
elif ident is not None:
whois_name = qsymbol.qualified_name()
full_name = qsymbol.full_name()
# Try get type of hovered symbol
typed_expr = None
if types.SourceHaskellTypeCache().has(self.filename):
typed_expr = self.get_type(types.SourceHaskellTypeCache().get(self.filename), whois_name)
else:
project_name = Common.locate_cabal_project_from_view(self.view)[1]
point_rgn = sublime.Region(self.point, self.point)
typed_expr = self.get_type(types.get_type_view(self.view, project_name, point_rgn), whois_name)
# Try whois
suggest_import = False
decl = Utils.head_of(BackendManager.active_backend().whois(whois_name, self.filename))
if not decl:
suggest_import = True
decl = Utils.head_of(BackendManager.active_backend().lookup(full_name, self.filename))
self.create_symbol_popup(typed_expr, decl, suggest_import)
elif self.hover_zone == sublime.HOVER_GUTTER:
errs = [err for err in ParseOutput.MARKER_MANAGER.marks_for_view(self.view) if err.region.start.line == self.line]
if errs:
popup_parts = [self.STYLES.gen_style(self.view.settings().get('color_scheme'))]
for err in errs:
msg = UnicodeOpers.use_unicode_operators(symbols.escape_text(err.message))
# Decorate first word with style
decors = {
'Error': 'error',
'Warning': 'warning',
'Hint': 'hint'
}
for dec, dec_style in decors.items():
msg = msg.replace(dec, u'<span class="{0}">{1}</span>'.format(dec_style, dec))
popup_parts.append(u'<p>{0}</p>'.format(msg))
if err.correction is not None:
popup_parts.append(err.correction.popup())
popup_text = u''.join(popup_parts)
self.shown = True
self.view.show_popup(popup_text, sublime.HIDE_ON_MOUSE_MOVE_AWAY, self.point, 600, 600,
self.on_navigate, self.on_hide)
def create_symbol_popup(self, typed_expr, decl, suggest_import):
if typed_expr or decl:
popup_parts = [self.STYLES.gen_style(self.view.settings().get('color_scheme'))]
if typed_expr:
popup_parts.append(u'<p><span class="function">{0}</span>{1}</p>'.format(
typed_expr.substr(self.view),
symbols.format_type(UnicodeOpers.use_unicode_operators(' :: {0}'.format(typed_expr.typename)))))
if decl:
popup_msg = [u'<a href="import:{0}">Add import</a>'.format(urllib.parse.quote_plus(decl.name))] \
if suggest_import else []
popup_parts.append(decl.popup(popup_msg))
popup_text = u''.join(popup_parts)
if not self.shown:
self.shown = True
self.view.show_popup(popup_text, sublime.HIDE_ON_MOUSE_MOVE_AWAY, self.point, 600, 600,
self.on_navigate, self.on_hide)
else:
self.view.update_popup(popup_text)
def get_type(self, type_list, qual_name):
filt_types = [t for t in type_list
if t.substr(self.view) == qual_name and t.region(self.view).contains(self.point)]
return Utils.head_of(filt_types)
def on_navigate(self, url):
if self.view.is_popup_visible():
self.view.hide_popup()
if url[0:4] == 'http':
webbrowser.open(url)
elif url[0:8] == 'autofix:':
rgn = symbols.Region.from_str(url[8:])
ParseOutput.MARKER_MANAGER.apply_autocorrect(self.view, rgn)
elif url[0:7] == "import:":
decl_name = urllib.parse.unquote(url[7:])
self.view.run_command('sublime_haskell_insert_import_for_symbol',
{'filename': self.view.file_name(),
'decl': decl_name})
else:
self.view.window().open_file(url, sublime.ENCODED_POSITION | sublime.TRANSIENT)
def on_hide(self):
self.shown = False
| 42.070485
| 126
| 0.566387
| 1,087
| 9,550
| 4.789328
| 0.23827
| 0.030734
| 0.006915
| 0.00922
| 0.145601
| 0.119093
| 0.078371
| 0.062236
| 0.062236
| 0.062236
| 0
| 0.006048
| 0.324817
| 9,550
| 226
| 127
| 42.256637
| 0.801334
| 0.093717
| 0
| 0.136095
| 0
| 0
| 0.07837
| 0.024846
| 0
| 0
| 0
| 0.004425
| 0
| 1
| 0.065089
| false
| 0.011834
| 0.118343
| 0
| 0.236686
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4361a9d08c25b0f208bbec15d3be738264785d14
| 4,126
|
py
|
Python
|
modules/google_home_lights.py
|
artizanatweb/ghome-assistant
|
dba2bc58979ebae48afc71c356ae2d40b8830eee
|
[
"Apache-2.0"
] | null | null | null |
modules/google_home_lights.py
|
artizanatweb/ghome-assistant
|
dba2bc58979ebae48afc71c356ae2d40b8830eee
|
[
"Apache-2.0"
] | null | null | null |
modules/google_home_lights.py
|
artizanatweb/ghome-assistant
|
dba2bc58979ebae48afc71c356ae2d40b8830eee
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright (C) 2017 Seeed Technology Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from modules.pixel_ring import pixel_ring
import numpy
import time
import threading
try:
import queue as Queue
except ImportError:
import Queue as Queue
class GoogleHomeLights:
def __init__(self):
self.basis = numpy.array([0] * 4 * 12)
self.basis[0 * 4 + 0] = 2
self.basis[3 * 4 + 2] = 2
self.basis[6 * 4 + 1] = 1
self.basis[6 * 4 + 2] = 1
self.basis[9 * 4 + 1] = 2
self.pixels = self.basis * 0
self.write(self.pixels)
pixel_ring.write(0, [6, 0, 0, 0])
self.next = threading.Event()
self.queue = Queue.Queue()
self.thread = threading.Thread(target=self._run)
self.thread.daemon = True
self.thread.start()
def wakeup(self, direction=0):
def f():
self._wakeup(direction)
self.queue.put(f)
def listen(self):
self.next.set()
self.queue.put(self._listen)
def think(self):
self.next.set()
self.queue.put(self._think)
def speak(self):
self.next.set()
self.queue.put(self._speak)
def off(self):
self.next.set()
self.queue.put(self._off)
def _run(self):
while True:
func = self.queue.get()
func()
def _wakeup(self, direction=0):
position = int((direction + 15) / 30) % 12
basis = numpy.roll(self.basis, position * 4)
for i in range(1, 25):
pixels = basis * i
self.write(pixels)
time.sleep(0.005)
pixels = numpy.roll(pixels, 4)
self.write(pixels)
time.sleep(0.1)
for i in range(2):
new_pixels = numpy.roll(pixels, 4)
self.write(new_pixels * 0.5 + pixels)
pixels = new_pixels
time.sleep(0.1)
self.write(pixels)
self.pixels = pixels
def _listen(self):
pixels = self.pixels
for i in range(1, 25):
self.write(pixels * i / 24)
time.sleep(0.01)
def _think(self):
pixels = self.pixels
self.next.clear()
while not self.next.is_set():
pixels = numpy.roll(pixels, 4)
self.write(pixels)
time.sleep(0.2)
t = 0.1
for i in range(0, 5):
pixels = numpy.roll(pixels, 4)
self.write(pixels * (4 - i) / 4)
time.sleep(t)
t /= 2
# time.sleep(0.5)
self.pixels = pixels
def _speak(self):
pixels = self.pixels
self.next.clear()
while not self.next.is_set():
for i in range(5, 25):
self.write(pixels * i / 24)
time.sleep(0.01)
time.sleep(0.3)
for i in range(24, 4, -1):
self.write(pixels * i / 24)
time.sleep(0.01)
time.sleep(0.3)
self._off()
def _off(self):
self.write([0] * 4 * 12)
def write(self, data):
if type(data) is list:
pixel_ring.write(3, data)
else:
pixel_ring.write(3, data.astype('uint8').tostring())
lights = GoogleHomeLights()
if __name__ == '__main__':
while True:
try:
lights.wakeup()
time.sleep(3)
lights.think()
time.sleep(3)
lights.speak()
time.sleep(3)
lights.off()
time.sleep(3)
except KeyboardInterrupt:
break
pixel_ring.off()
| 24.128655
| 74
| 0.542414
| 547
| 4,126
| 4.025594
| 0.261426
| 0.061308
| 0.045413
| 0.029973
| 0.311989
| 0.266122
| 0.231608
| 0.21753
| 0.144414
| 0.144414
| 0
| 0.042773
| 0.342705
| 4,126
| 171
| 75
| 24.128655
| 0.769174
| 0.145904
| 0
| 0.347826
| 0
| 0
| 0.003704
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.121739
| false
| 0
| 0.06087
| 0
| 0.191304
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43626cff0461fc1edbacac7b7a76a2f308ada971
| 5,016
|
py
|
Python
|
tensortools/optimize/mncp_hals.py
|
klmcguir/tensortools
|
38262f5bad9d3171286e34e5f15d196752dda939
|
[
"MIT"
] | null | null | null |
tensortools/optimize/mncp_hals.py
|
klmcguir/tensortools
|
38262f5bad9d3171286e34e5f15d196752dda939
|
[
"MIT"
] | null | null | null |
tensortools/optimize/mncp_hals.py
|
klmcguir/tensortools
|
38262f5bad9d3171286e34e5f15d196752dda939
|
[
"MIT"
] | null | null | null |
"""
Nonnegative CP decomposition by Hierarchical alternating least squares (HALS).
With support for missing data.
"""
import numpy as np
import scipy as sci
from scipy import linalg
from tensortools.operations import unfold, khatri_rao
from tensortools.tensors import KTensor
from tensortools.optimize import FitResult, optim_utils
from .._hals_update import _hals_update
def mncp_hals(X, rank, mask, random_state=None, init='rand', **options):
"""
Fits nonnegtaive CP Decomposition using the Hierarcial Alternating Least
Squares (HALS) Method. Supports missing data.
Parameters
----------
X : (I_1, ..., I_N) array_like
A real array with nonnegative entries and ``X.ndim >= 3``.
rank : integer
The `rank` sets the number of components to be computed.
mask : (I_1, ..., I_N) array_like
A binary tensor with the same shape as ``X``. All entries equal to zero
correspond to held out or missing data in ``X``. All entries equal to
one correspond to observed entries in ``X`` and the decomposition is
fit to these datapoints.
random_state : integer, RandomState instance or None, optional (default ``None``)
If integer, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used by np.random.
init : str, or KTensor, optional (default ``'rand'``).
Specifies initial guess for KTensor factor matrices.
If ``'randn'``, Gaussian random numbers are used to initialize.
If ``'rand'``, uniform random numbers are used to initialize.
If KTensor instance, a copy is made to initialize the optimization.
options : dict, specifying fitting options.
tol : float, optional (default ``tol=1E-5``)
Stopping tolerance for reconstruction error.
max_iter : integer, optional (default ``max_iter = 500``)
Maximum number of iterations to perform before exiting.
min_iter : integer, optional (default ``min_iter = 1``)
Minimum number of iterations to perform before exiting.
max_time : integer, optional (default ``max_time = np.inf``)
Maximum computational time before exiting.
verbose : bool ``{'True', 'False'}``, optional (default ``verbose=True``)
Display progress.
Returns
-------
result : FitResult instance
Object which holds the fitted results. It provides the factor matrices
in form of a KTensor, ``result.factors``.
Notes
-----
This implemenation is using the Hierarcial Alternating Least Squares Method.
References
----------
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
Examples
--------
"""
# Mask missing elements.
X = np.copy(X)
X[~mask] = np.linalg.norm(X[mask])
# Check inputs.
optim_utils._check_cpd_inputs(X, rank)
# Initialize problem.
U, normX = optim_utils._get_initial_ktensor(init, X, rank, random_state)
result = FitResult(U, 'NCP_HALS', **options)
# Store problem dimensions.
normX = linalg.norm(X[mask].ravel())
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Iterate the HALS algorithm until convergence or maxiter is reached
# i) compute the N gram matrices and multiply
# ii) Compute Khatri-Rao product
# iii) Update component U_1, U_2, ... U_N
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
while result.still_optimizing:
# First, HALS update.
for n in range(X.ndim):
# Select all components, but U_n
components = [U[j] for j in range(X.ndim) if j != n]
# i) compute the N-1 gram matrices
grams = sci.multiply.reduce([arr.T.dot(arr) for arr in components])
# ii) Compute Khatri-Rao product
kr = khatri_rao(components)
p = unfold(X, n).dot(kr)
# iii) Update component U_n
_hals_update(U[n], grams, p)
# Then, update masked elements.
pred = U.full()
X[~mask] = pred[~mask]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Update the optimization result, checks for convergence.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Compute objective function
# grams *= U[X.ndim - 1].T.dot(U[X.ndim - 1])
# obj = np.sqrt( (sci.sum(grams) - 2 * sci.sum(U[X.ndim - 1] * p) + normX**2)) / normX
resid = X - pred
result.update(linalg.norm(resid.ravel()) / normX)
# end optimization loop, return result.
return result.finalize()
| 40.780488
| 94
| 0.60626
| 617
| 5,016
| 4.860616
| 0.388979
| 0.035012
| 0.023008
| 0.024008
| 0.132044
| 0.086029
| 0.058686
| 0
| 0
| 0
| 0
| 0.007922
| 0.245016
| 5,016
| 123
| 95
| 40.780488
| 0.783998
| 0.692185
| 0
| 0
| 0
| 0
| 0.009174
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0.269231
| 0
| 0.346154
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4363164b554bb6ade5f87250305647778400993f
| 18,079
|
py
|
Python
|
raredecay/tools/data_tools.py
|
jonas-eschle/raredecay
|
6285f91e0819d01c80125f50b24e60ee5353ae2e
|
[
"Apache-2.0"
] | 7
|
2016-11-19T17:28:07.000Z
|
2020-12-29T19:49:37.000Z
|
raredecay/tools/data_tools.py
|
mayou36/raredecay
|
5b319ada66ebe54f81e216efad81fc9f06237a30
|
[
"Apache-2.0"
] | 23
|
2017-03-13T19:13:58.000Z
|
2021-05-30T21:48:50.000Z
|
raredecay/tools/data_tools.py
|
jonas-eschle/raredecay
|
6285f91e0819d01c80125f50b24e60ee5353ae2e
|
[
"Apache-2.0"
] | 5
|
2016-12-17T19:24:13.000Z
|
2021-05-31T14:32:34.000Z
|
"""
@author: Jonas Eschle "Mayou36"
DEPRECEATED! USE OTHER MODULES LIKE rd.data, rd.ml, rd.reweight, rd.score and rd.stat
DEPRECEATED!DEPRECEATED!DEPRECEATED!DEPRECEATED!DEPRECEATED!
Contains several tools to convert, load, save and plot data
"""
import warnings
import os
import copy
import pandas as pd
import numpy as np
import uproot
import pickle
from . import dev_tool
# both produce error (27.07.2016) when importing them if run from main.py.
# No problem when run as main...
# from raredecay.tools import dev_tool
from .. import meta_config as meta_cfg
def apply_cuts(signal_data, bkg_data, percent_sig_to_keep=100, bkg_length=None):
"""Search for best cut on value to still keep percent_sig_to_keep of signal
Parameters
----------
signal_data : 1-D numpy array
The signal
bkg_data : 1-D numpy array
The background data
percent_sig_to_keep : 0 < float <= 100
What percentage of the data to keep in order to apply the cuts.
"""
# if percent_sig_to_keep < 100:
# raise NotImplementedError("percentage of < 100 not yet imlemented")
percentile = [0, percent_sig_to_keep] # TODO: modify for percent_sig_to_keep
bkg_length_before = len(bkg_data)
bkg_length = len(bkg_data) if bkg_length in (None, 0) else bkg_length
lower_cut, upper_cut = np.percentile(signal_data, percentile)
cut_bkg = np.count_nonzero(
np.logical_or(bkg_data < lower_cut, bkg_data > upper_cut)
)
rejected_bkg = (bkg_length_before - cut_bkg) / bkg_length
return [lower_cut, upper_cut], rejected_bkg
def make_root_dict(path_to_rootfile, tree_name, branches):
"""Returns a root_numpy compatible "root-dict" of a root-tree.
Parameters
----------
path_to_rootfile : str
The exact path to the root-tree including the filename. Example:
/home/user1/data/myRootTree1.root
tree_name : str
The name of the tree
branches : str or list[str, str, str,... ]
The branches of the tree to use
"""
output = dict(filenames=path_to_rootfile, treename=tree_name, branches=branches)
output = dev_tool.entries_to_str(output)
return output
def add_to_rootfile(rootfile, new_branch, branch_name=None, overwrite=True):
"""Adds a new branch to a given root file.
.. warning:: Overwrite not working currently!
Parameters
----------
rootfile : root-dict
The ROOT-file where the data should be added
new_branch : numpy.array 1-D, list, root-dict
A one-dimensional numpy array that contains the data.
branch_name : str
The name of the branche resp. the name in the dtype of the array.
"""
from root_numpy import array2root
from rootpy.io import root_open
rootfile = dev_tool.entries_to_str(rootfile)
new_branch = dev_tool.entries_to_str(new_branch)
branch_name = dev_tool.entries_to_str(branch_name)
# get the right parameters
# TODO: what does that if there? an assertion maybe?
write_mode = "update"
branch_name = "new_branch1" if branch_name is None else branch_name
if isinstance(rootfile, dict):
filename = rootfile.get("filenames")
treename = rootfile.get("treename")
new_branch = to_ndarray(new_branch)
# new_branch.dtype = [(branch_name, 'f8')]
# write to ROOT-file
write_to_root = False
if os.path.isfile(filename):
with root_open(filename, mode="a") as root_file:
tree = getattr(root_file, treename) # test
if not tree.has_branch(branch_name):
write_to_root = True
# array2tree(new_branch, tree=tree)
# f.write("", TObject.kOverwrite) # overwrite, does not create friends
else:
write_mode = "recreate"
write_to_root = True
if write_to_root:
arr = np.core.records.fromarrays([new_branch], names=branch_name)
array2root(arr=arr, filename=filename, treename=treename, mode=write_mode)
return 0
else:
return 1
# TODO: remove? outdated
def format_data_weights(data_to_shape, weights):
"""Format the data and the weights perfectly. Same length and more.
Change the data to pandas.DataFrame and fill the weights with ones where
nothing or None is specified. Returns both in lists.
Very useful to loop over several data and weights.
Parameters
----------
data_to_shape : (root_dict, numpy.array, pandas.DataFrame)
The data for which we apply the weights. Usual 2-D shape.
weights : (list, numpy.array, pandas.DataFrame, None)
The weights to be reshaped
*Best format* :
[array(weights),array(weights), None, array(weights),...]
*None* can be used if no special weights are specified.
If weights contains less "weight-containing array-like objects" then
data_to_shape does, the difference will be filled with *1*
Return
------
out : list(pandas.DataFrame(data), pandas.DataFrame(data),...)
Return a list containing data
out : list(numpy.array(weight), numpy.array(weight),...)
Return a list with the weights, converted and filled.
"""
# conver the data
if not isinstance(data_to_shape, list):
data_to_shape = [data_to_shape]
data_to_shape = list(map(to_pandas, data_to_shape))
# convert the weights
if not isinstance(weights, list):
weights = [weights]
if weights[0] is not None:
if len(weights[0]) == 1:
weights = [weights]
# convert to pandas
assert isinstance(weights, list), "weights could not be converted to list"
for data_id, data in enumerate(data_to_shape):
if data_id >= len(weights):
weights.append(None)
if weights[data_id] is None:
weights[data_id] = np.array([1] * len(data))
weights[data_id] = to_pandas(weights[data_id]).squeeze().values
return data_to_shape, weights
def obj_to_string(objects, separator=None):
"""Return a string containing all objects as strings, separated by the separator.
Useful for automatic conversion for different types. The following objects
will automatically be converted:
- None will be omitted
Parameters
----------
objects : any object or list(obj, obj, ...) with a string representation
The objects will be converted to a string and concatenated, separated
by the separator.
separator : str
The separator between the objects. Default is " - ".
"""
objects = dev_tool.entries_to_str(objects)
if isinstance(objects, str): # no need to change things
return objects
separator = " - " if separator is None else separator
assert isinstance(separator, str), "Separator not a str"
objects = to_list(objects)
objects = [str(obj) for obj in objects if obj not in (None, "")] # remove Nones
string_out = ""
for word in objects:
string_out += word + separator if word != objects[-1] else word
return string_out
def is_root(data_to_check):
"""Check whether a given data is a root file. Needs dicts to be True."""
flag = False
data_to_check = dev_tool.entries_to_str(data_to_check)
if isinstance(data_to_check, dict):
path_name = data_to_check.get("filenames")
# assert isinstance(path_name, str), ("'filenames' of the dictionary " +
# str(data_to_check) + "is not a string")
if path_name.endswith(meta_cfg.ROOT_DATATYPE):
flag = True
return flag
def is_list(data_to_check):
"""Check whether the given data is a list."""
flag = False
if isinstance(data_to_check, list):
flag = True
return flag
def is_ndarray(data_to_check):
"""Check whether a given data is an ndarray."""
flag = False
if isinstance(data_to_check, np.ndarray):
flag = True
return flag
def is_pickle(data_to_check):
"""Check if the file is a pickled file (checks the ending)."""
flag = False
data_to_check = dev_tool.entries_to_str(data_to_check)
if isinstance(data_to_check, str):
if data_to_check.endswith(meta_cfg.PICKLE_DATATYPE):
flag = True
return flag
def to_list(data_in):
"""Convert the data into a list. Does not pack lists into a new one.
If your input is, for example, a string or a list of strings, or a
tuple filled with strings, you have, in general, a problem:
- just iterate through the object will fail because it iterates through the
characters of the string.
- using list(obj) converts the tuple, leaves the list but splits the strings
characters into single elements of a new list.
- using [obj] creates a list containing a string, but also a list containing
a list or a tuple, which you did not want to.
Solution: use to_list(obj), which creates a new list in case the object is
a single object (a string is a single object in this sence) or converts
to a list if the object is already a container for several objects.
Parameters
----------
data_in : any obj
So far, any object can be entered.
Returns
-------
out : list
Return a list containing the object or the object converted to a list.
"""
if isinstance(data_in, (str, int, float)):
data_in = [data_in]
data_in = list(data_in)
return data_in
def to_ndarray(data_in, float_array=False):
"""Convert data to numpy array (containing only floats).
Parameters
----------
data_in : any reasonable data
The data to be converted
"""
import uproot
if is_root(data_in):
with uproot.open(data_in["filenames"]) as file:
tree = file[data_in["treename"]]
branches = to_list(data_in["branches"])
loaded = tree.arrays(branches, library="np")
loaded = np.stack([loaded[branch] for branch in branches])
if len(branches) == 1:
loaded = loaded[0]
data_in = loaded
# change numpy.void to normal floats
if isinstance(data_in, (pd.Series, pd.DataFrame)):
test_sample = data_in.iloc[0]
else:
test_sample = data_in[0]
if isinstance(test_sample, np.void):
data_in = np.array([val[0] for val in data_in])
if isinstance(data_in, (np.recarray, np.ndarray)):
data_in = data_in.tolist()
if is_list(data_in) or isinstance(data_in, pd.Series):
data_in = np.array(data_in)
if not isinstance(data_in[0], (int, float, str, bool)):
if float_array:
iter_data = copy.deepcopy(data_in)
# HACK
data_in = np.ndarray(shape=len(data_in), dtype=data_in.dtype)
# HACK END
for i, element in enumerate(iter_data):
if not isinstance(element, (int, float, str, bool)):
# does that work or should we iterate over copy?
try:
element_len = len(element)
except TypeError:
element_len = 1
if element_len > 1:
data_in[i] = to_ndarray(element)
float_array = False
elif element_len == 1:
data_in[i] = float(element)
warnings.warn("Could not force float array")
if float_array:
data_in = np.asfarray(data_in)
assert is_ndarray(data_in), "Error, could not convert data to numpy array"
return data_in
def to_pandas_old(data_in, index=None, columns=None):
"""Convert data from numpy or root to pandas dataframe.
Convert data safely to pandas, whatever the format is.
Parameters
----------
data_in : any reasonable data
The data to be converted
"""
# TODO: generalize
root_index_name = "__index__"
data_in = dev_tool.entries_to_str(data_in)
if is_root(data_in):
root_index = None
import root_numpy
if root_index_name in root_numpy.list_branches(
filename=data_in["filenames"], treename=data_in.get("treename")
):
root_index = root_numpy.root2array(
filenames=data_in["filenames"],
treename=data_in.get("treename"),
selection=data_in.get("selection"),
branches=root_index_name,
)
data_in = root_numpy.root2array(**data_in) # why **? it's a root dict
if is_list(data_in):
data_in = np.array(data_in)
if is_ndarray(data_in):
if (isinstance(columns, (list, tuple)) and len(columns) == 1) or isinstance(
columns, str
):
data_in = to_ndarray(data_in)
data_in = pd.DataFrame(data_in, columns=columns, index=root_index)
if index is not None:
data_in = data_in.loc[index]
elif isinstance(data_in, pd.DataFrame):
pass
else:
raise TypeError("Could not convert data to pandas. Data: " + data_in)
return data_in
def to_pandas(data_in, index=None, columns=None):
"""Convert data from numpy or root to pandas dataframe.
Convert data safely to pandas, whatever the format is.
Parameters
----------
data_in : any reasonable data
The data to be converted
"""
data_in = dev_tool.entries_to_str(data_in)
if is_root(data_in):
if columns is None:
columns = data_in["branches"]
with uproot.open(data_in["filenames"]) as file:
tree = file[data_in["treename"]]
if "__index__" in tree.keys(): # legacy, we can also convert this
return to_pandas_old(data_in=data_in, index=index, columns=columns)
branches = to_list(columns)
loaded = tree.arrays(branches, library="pd")
if index is not None:
loaded = loaded.loc[index]
return loaded
else:
# HACK START
return to_pandas_old(data_in=data_in, index=index, columns=columns)
# HACK END
# from root_pandas import read_root
#
# root_pandas_numpy_map = dict(filenames='paths', treename='key', branches='columns',
# selection='where')
#
# if is_root(data_in):
# is_root2array = False
# for key, val in copy.deepcopy(list(data_in.items())):
# if key in root_pandas_numpy_map:
# is_root2array = True
# del data_in[key]
# data_in[root_pandas_numpy_map[key]] = val
# data_in['columns'] = to_list(data_in['columns'])
# if is_root2array:
# data_in['columns'] = ['noexpand:'+col for col in data_in['columns'] if not col.startswith('noexpand:')]
# remove the noexpand:
# data_in = read_root(**data_in) # why **? it's a root dict
# if is_list(data_in):
# data_in = np.array(data_in)
# if is_ndarray(data_in):
# if ((isinstance(columns, (list, tuple)) and len(columns) == 1) or
# isinstance(columns, string)):
#
# data_in = to_ndarray(data_in)
# data_in = pd.DataFrame(data_in, columns=columns)
# if index is not None:
# data_in = data_in.loc[index]
# elif isinstance(data_in, pd.DataFrame):
# pass
# else:
# raise TypeError("Could not convert data to pandas. Data: " + data_in)
# return data_in
def adv_return(return_value, save_name=None):
"""Save the value if save_name specified, otherwise just return input.
Can be wrapped around the return value. Without any arguments, the return
of your function will be exactly the same. With arguments, the value can
be saved (**pickled**) before it is returned.
Parameters
----------
return_value : any python object
The python object which should be pickled.
save_name : str, None
| The (file-)name for the pickled file. File-extension will be added \
automatically if specified in *raredecay.meta_config*.
| If *None* is passed, the object won't be pickled.
Return
------
out : python object
Return return_value without changes.
**Usage**:
Instead of a simple return statement
>>> return my_variable/my_object
one can use the **completely equivalent** statement
>>> return adv_return(my_variable/my_object)
If the return value should be saved in addition to be returned, use
>>> return adv_return(my_variable/my_object, save_name='my_object.pickle')
(*the .pickle ending is not required but added automatically if omitted*)
which returns the value and saves it.
"""
save_name = dev_tool.entries_to_str(save_name)
if save_name not in (None, False):
if isinstance(save_name, str):
save_name = meta_cfg.PICKLE_PATH + save_name
if not is_pickle(save_name):
save_name += "." + meta_cfg.PICKLE_DATATYPE
with open(str(save_name), "wb") as f:
pickle.dump(return_value, f, meta_cfg.PICKLE_PROTOCOL)
print(str(return_value) + " pickled to " + save_name)
else:
pass
# HACK how to solve logger problem?
# logger.error("Could not pickle data, name for file (" +
# str(save_name) + ") is not a string!" +
# "\n Therefore, the following data was only returned" +
# " but not saved! \n Data:" + str(return_value))
return return_value
def try_unpickle(file_to_unpickle, use_metapath_bkwcomp=False):
"""Try to unpickle a file and return, otherwise just return input."""
file_to_unpickle = dev_tool.entries_to_str(file_to_unpickle)
if is_pickle(file_to_unpickle):
extra_path = meta_cfg.PICKLE_PATH if use_metapath_bkwcomp else ""
with open(extra_path + file_to_unpickle, "rb") as f:
file_to_unpickle = pickle.load(f)
return file_to_unpickle
| 34.969052
| 117
| 0.636872
| 2,497
| 18,079
| 4.429315
| 0.161394
| 0.053707
| 0.014919
| 0.015913
| 0.242767
| 0.194846
| 0.165371
| 0.149458
| 0.142224
| 0.135895
| 0
| 0.004557
| 0.271752
| 18,079
| 516
| 118
| 35.036822
| 0.835485
| 0.44571
| 0
| 0.230415
| 0
| 0
| 0.037895
| 0
| 0
| 0
| 0
| 0.005814
| 0.013825
| 1
| 0.069124
| false
| 0.009217
| 0.059908
| 0
| 0.21659
| 0.004608
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4363297eb771b020c864cdfbc69be70aff1727b6
| 2,052
|
py
|
Python
|
toontown/coghq/boardbothq/BoardOfficeManagerAI.py
|
LittleNed/toontown-stride
|
1252a8f9a8816c1810106006d09c8bdfe6ad1e57
|
[
"Apache-2.0"
] | 1
|
2018-06-16T23:06:38.000Z
|
2018-06-16T23:06:38.000Z
|
toontown/coghq/boardbothq/BoardOfficeManagerAI.py
|
NoraTT/Historical-Commits-Project-Altis-Source
|
fe88e6d07edf418f7de6ad5b3d9ecb3d0d285179
|
[
"Apache-2.0"
] | null | null | null |
toontown/coghq/boardbothq/BoardOfficeManagerAI.py
|
NoraTT/Historical-Commits-Project-Altis-Source
|
fe88e6d07edf418f7de6ad5b3d9ecb3d0d285179
|
[
"Apache-2.0"
] | 4
|
2019-06-20T23:45:23.000Z
|
2020-10-14T20:30:15.000Z
|
from direct.directnotify import DirectNotifyGlobal
import DistributedBoardOfficeAI
from toontown.toonbase import ToontownGlobals
from toontown.coghq.boardbothq import BoardOfficeLayout
from direct.showbase import DirectObject
import random
class BoardOfficeManagerAI(DirectObject.DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory('BoardOfficeManagerAI')
boardofficeId = None
def __init__(self, air):
DirectObject.DirectObject.__init__(self)
self.air = air
def getDoId(self):
return 0
def createBoardOffice(self, boardofficeId, players):
for avId in players:
if bboard.has('boardofficeId-%s' % avId):
boardofficeId = bboard.get('boardofficeId-%s' % avId)
break
numFloors = ToontownGlobals.BoardOfficeNumFloors[boardofficeId]
floor = random.randrange(numFloors)
for avId in players:
if bboard.has('mintFloor-%s' % avId):
floor = bboard.get('mintFloor-%s' % avId)
floor = max(0, floor)
floor = min(floor, numFloors - 1)
break
for avId in players:
if bboard.has('mintRoom-%s' % avId):
roomId = bboard.get('mintRoom-%s' % avId)
for i in xrange(numFloors):
layout = BoardOfficeLayout.BoardOfficeLayout(boardofficeId, i)
if roomId in layout.getRoomIds():
floor = i
else:
from toontown.coghq.boardbothq import BoardOfficeRoomSpecs
roomName = BoardOfficeRoomSpecs.BoardOfficeRoomId2RoomName[roomId]
BoardOfficeManagerAI.notify.warning('room %s (%s) not found in any floor of mint %s' % (roomId, roomName, boardofficeId))
mintZone = self.air.allocateZone()
mint = DistributedBoardOfficeAI.DistributedBoardOfficeAI(self.air, boardofficeId, mintZone, floor, players)
mint.generateWithRequired(mintZone)
return mintZone
| 41.04
| 141
| 0.639376
| 187
| 2,052
| 6.973262
| 0.358289
| 0.023006
| 0.020706
| 0.03681
| 0.11273
| 0.062117
| 0.062117
| 0
| 0
| 0
| 0
| 0.002714
| 0.281676
| 2,052
| 49
| 142
| 41.877551
| 0.881954
| 0
| 0
| 0.119048
| 0
| 0
| 0.070175
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.166667
| 0.02381
| 0.357143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
436393af32e8421a7a3401c8eb82314850e79873
| 2,144
|
py
|
Python
|
ansiblemetrics/utils.py
|
radon-h2020/AnsibleMetrics
|
8a8e27d9b54fc1578d00526c8663184a2e686cb2
|
[
"Apache-2.0"
] | 1
|
2020-04-24T16:09:14.000Z
|
2020-04-24T16:09:14.000Z
|
ansiblemetrics/utils.py
|
radon-h2020/AnsibleMetrics
|
8a8e27d9b54fc1578d00526c8663184a2e686cb2
|
[
"Apache-2.0"
] | null | null | null |
ansiblemetrics/utils.py
|
radon-h2020/AnsibleMetrics
|
8a8e27d9b54fc1578d00526c8663184a2e686cb2
|
[
"Apache-2.0"
] | null | null | null |
from typing import Union
def key_value_list(d: Union[dict, list], key=None) -> list:
"""
This function iterates over all the key-value pairs of a dictionary and returns a list of tuple (key, value) where the key contain only primitive value (i.e., no list or dict), e.g., string, number etc.
d -- a dictionary to iterate through
"""
if not d:
return []
if not isinstance(d, dict) and not isinstance(d, list):
return []
key_values = []
if isinstance(d, list):
for entry in d:
if isinstance(entry, dict):
key_values.extend(key_value_list(entry))
else:
key_values.append((key, entry))
else:
for k, v in d.items():
if k is None or v is None:
continue
if not isinstance(v, dict) and type(v) != list:
key_values.append((k, v))
elif isinstance(v, list):
key_values.extend(key_value_list(v, k))
else:
key_values.extend(key_value_list(v))
return key_values
def all_keys(d: Union[dict, list]) -> list:
"""
Returns a list of all the keys of a dictionary (duplicates included)
d -- a dictionary to iterate through
"""
if not d:
return []
if d is None or not isinstance(d, dict) and not isinstance(d, list):
return []
keys = []
if isinstance(d, list):
for entry in d:
keys.extend(all_keys(entry))
else:
for k, v in d.items():
keys.append(k)
keys.extend(all_keys(v))
return keys
def all_values(d: Union[dict, list]) -> list:
"""
Returns a list of all the primitive values of a dictionary (duplicates included)
d -- a dictionary to iterate through
"""
if not d:
return []
if not isinstance(d, dict) and not isinstance(d, list):
return [d]
values = []
if isinstance(d, list):
for entry in d:
values.extend(all_values(entry))
else:
for k, v in d.items():
values.extend(all_values(v))
return values
| 25.831325
| 206
| 0.564366
| 301
| 2,144
| 3.950166
| 0.199336
| 0.083263
| 0.070648
| 0.035324
| 0.541632
| 0.541632
| 0.518923
| 0.471825
| 0.392767
| 0.335576
| 0
| 0
| 0.333022
| 2,144
| 82
| 207
| 26.146341
| 0.831469
| 0.215951
| 0
| 0.470588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.019608
| 0
| 0.254902
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4364ccde24cc2af35ff42479b35b005f175a3209
| 24,502
|
py
|
Python
|
phy/gui/actions.py
|
ycanerol/phy
|
7a247f926dd5bf5d8ab95fe138e8f4a0db11b068
|
[
"BSD-3-Clause"
] | 118
|
2019-06-03T06:19:43.000Z
|
2022-03-25T00:05:26.000Z
|
phy/gui/actions.py
|
ycanerol/phy
|
7a247f926dd5bf5d8ab95fe138e8f4a0db11b068
|
[
"BSD-3-Clause"
] | 761
|
2015-01-08T11:17:41.000Z
|
2019-05-27T16:12:08.000Z
|
phy/gui/actions.py
|
ycanerol/phy
|
7a247f926dd5bf5d8ab95fe138e8f4a0db11b068
|
[
"BSD-3-Clause"
] | 70
|
2019-05-30T11:05:26.000Z
|
2022-03-30T11:51:23.000Z
|
# -*- coding: utf-8 -*-
"""Actions and snippets."""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import inspect
from functools import partial, wraps
import logging
import re
import sys
import traceback
from .qt import QKeySequence, QAction, require_qt, input_dialog, busy_cursor, _get_icon
from phylib.utils import Bunch
logger = logging.getLogger(__name__)
# -----------------------------------------------------------------------------
# Snippet parsing utilities
# -----------------------------------------------------------------------------
def _parse_arg(s):
"""Parse a number or string."""
try:
return int(s)
except ValueError:
pass
try:
return float(s)
except ValueError:
pass
return s
def _parse_list(s):
"""Parse a comma-separated list of values (strings or numbers)."""
# Range: 'x-y'
if '-' in s:
m, M = map(_parse_arg, s.split('-'))
return list(range(m, M + 1))
# List of ids: 'x,y,z'
elif ',' in s:
return list(map(_parse_arg, s.split(',')))
else:
return _parse_arg(s)
def _parse_snippet(s):
"""Parse an entire snippet command."""
return tuple(map(_parse_list, s.split(' ')))
def _prompt_args(title, docstring, default=None):
"""Display a prompt dialog requesting function arguments.
'default' is a function returning the default value for the proposed input dialog.
"""
# There are args, need to display the dialog.
# Extract Example: `...` in the docstring to put a predefined text
# in the input dialog.
logger.debug("Prompting arguments for %s", title)
r = re.search('Example: `([^`]+)`', docstring)
docstring_ = docstring[:r.start()].strip() if r else docstring
try:
text = str(default()) if default else (r.group(1) if r else None)
except Exception as e: # pragma: no cover
logger.error("Error while handling user input: %s", str(e))
return
s, ok = input_dialog(title, docstring_, text)
if not ok or not s:
return
# Parse user-supplied arguments and call the function.
args = _parse_snippet(s)
return args
# -----------------------------------------------------------------------------
# Show shortcut utility functions
# -----------------------------------------------------------------------------
def _get_shortcut_string(shortcut):
"""Return a string representation of a shortcut."""
if not shortcut:
return ''
if isinstance(shortcut, (tuple, list)):
return ', '.join([_get_shortcut_string(s) for s in shortcut])
if isinstance(shortcut, str):
if hasattr(QKeySequence, shortcut):
shortcut = QKeySequence(getattr(QKeySequence, shortcut))
else:
return shortcut.lower()
assert isinstance(shortcut, QKeySequence)
s = shortcut.toString() or ''
return str(s).lower()
def _get_qkeysequence(shortcut):
"""Return a QKeySequence or list of QKeySequence from a shortcut string."""
if shortcut is None:
return []
if isinstance(shortcut, (tuple, list)):
return [_get_qkeysequence(s) for s in shortcut]
assert isinstance(shortcut, str)
if hasattr(QKeySequence, shortcut):
return QKeySequence(getattr(QKeySequence, shortcut))
sequence = QKeySequence.fromString(shortcut)
assert not sequence.isEmpty()
return sequence
def _show_shortcuts(shortcuts):
"""Display shortcuts."""
out = []
for n in sorted(shortcuts):
shortcut = _get_shortcut_string(shortcuts[n])
if not n.startswith('_') and not shortcut.startswith('-'):
out.append('- {0:<40} {1:s}'.format(n, shortcut))
if out:
print('Keyboard shortcuts')
print('\n'.join(out))
print('')
def _show_snippets(snippets):
"""Display snippets."""
out = []
for n in sorted(snippets):
snippet = snippets[n]
if not n.startswith('_'):
out.append('- {0:<40} :{1:s}'.format(n, snippet))
if out:
print('Snippets')
print('\n'.join(out))
print('')
def show_shortcuts_snippets(actions):
"""Show the shortcuts and snippets of an Actions instance."""
print(actions.name)
print('-' * len(actions.name))
print()
_show_shortcuts(actions.shortcuts)
_show_snippets(actions._default_snippets)
# -----------------------------------------------------------------------------
# Actions
# -----------------------------------------------------------------------------
def _alias(name):
# Get the alias from the character after & if it exists.
alias = name[name.index('&') + 1] if '&' in name else name
alias = alias.replace(' ', '_').lower()
return alias
def _expected_args(f):
if isinstance(f, partial):
argspec = inspect.getfullargspec(f.func)
else:
argspec = inspect.getfullargspec(f)
f_args = argspec.args
if 'self' in f_args:
f_args.remove('self')
# Remove arguments with defaults from the list.
if len(argspec.defaults or ()):
f_args = f_args[:-len(argspec.defaults)]
# Remove arguments supplied in a partial.
if isinstance(f, partial):
f_args = f_args[len(f.args):]
f_args = [arg for arg in f_args if arg not in f.keywords]
return tuple(f_args)
@require_qt
def _create_qaction(gui, **kwargs):
# Create the QAction instance.
name = kwargs.get('name', '')
name = name[0].upper() + name[1:].replace('_', ' ')
action = QAction(name, gui)
# Show an input dialog if there are args.
callback = kwargs.get('callback', None)
title = getattr(callback, '__name__', 'action')
# Number of expected arguments.
n_args = kwargs.get('n_args', None) or len(_expected_args(callback))
@wraps(callback)
def wrapped(is_checked, *args):
if kwargs.get('checkable', None):
args = (is_checked,) + args
if kwargs.get('prompt', None):
args += _prompt_args(
title, docstring, default=kwargs.get('prompt_default', None)) or ()
if not args: # pragma: no cover
logger.debug("User cancelled input prompt, aborting.")
return
if len(args) < n_args:
logger.warning(
"Invalid function arguments: expecting %d but got %d", n_args, len(args))
return
try:
# Set a busy cursor if set_busy is True.
with busy_cursor(kwargs.get('set_busy', None)):
return callback(*args)
except Exception: # pragma: no cover
logger.warning("Error when executing action %s.", name)
logger.debug(''.join(traceback.format_exception(*sys.exc_info())))
action.triggered.connect(wrapped)
sequence = _get_qkeysequence(kwargs.get('shortcut', None))
if not isinstance(sequence, (tuple, list)):
sequence = [sequence]
action.setShortcuts(sequence)
assert kwargs.get('docstring', None)
docstring = re.sub(r'\s+', ' ', kwargs.get('docstring', None))
docstring += ' (alias: {})'.format(kwargs.get('alias', None))
action.setStatusTip(docstring)
action.setWhatsThis(docstring)
action.setCheckable(kwargs.get('checkable', None))
action.setChecked(kwargs.get('checked', None))
if kwargs.get('icon', None):
action.setIcon(_get_icon(kwargs['icon']))
return action
class Actions(object):
"""Group of actions bound to a GUI.
This class attaches to a GUI and implements the following features:
* Add and remove actions
* Keyboard shortcuts for the actions
* Display all shortcuts
Constructor
-----------
gui : GUI instance
name : str
Name of this group of actions.
menu : str
Name of the GUI menu that will contain the actions.
submenu : str
Name of the GUI submenu that will contain the actions.
default_shortcuts : dict
Map action names to keyboard shortcuts (regular strings).
default_snippets : dict
Map action names to snippets (regular strings).
"""
def __init__(
self, gui, name=None, menu=None, submenu=None, view=None,
insert_menu_before=None, default_shortcuts=None, default_snippets=None):
self._actions_dict = {}
self._aliases = {}
self._default_shortcuts = default_shortcuts or {}
self._default_snippets = default_snippets or {}
assert name
self.name = name
self.menu = menu
self.submenu = submenu
self.view = view
self.view_submenu = None
self.insert_menu_before = insert_menu_before
self._view_submenus = {}
self.gui = gui
gui.actions.append(self)
# Create the menu when creating the Actions instance.
if menu:
gui.get_menu(menu, insert_menu_before)
def _get_menu(self, menu=None, submenu=None, view=None, view_submenu=None):
"""Return the QMenu depending on a combination of keyword arguments."""
# Defaults.
menu = menu or self.menu
submenu = submenu or self.submenu
view = view or self.view
view_submenu = view_submenu or self.view_submenu
# If the action is a view action, it should be added to the view's menu in the dock widget.
if view:
if view_submenu and view_submenu not in self._view_submenus:
self._view_submenus[view_submenu] = view.dock._menu.addMenu(view_submenu)
if view_submenu:
return self._view_submenus[view_submenu]
else:
return view.dock._menu
# Create the submenu if there is one.
if submenu:
# Create the submenu.
self.gui.get_submenu(menu, submenu)
# Make sure the action gets added to the submenu.
menu = submenu
if menu:
return self.gui.get_menu(menu)
def add(self, callback=None, name=None, shortcut=None, alias=None, prompt=False, n_args=None,
docstring=None, menu=None, submenu=None, view=None, view_submenu=None, verbose=True,
checkable=False, checked=False, set_busy=False, prompt_default=None,
show_shortcut=True, icon=None, toolbar=False):
"""Add an action with a keyboard shortcut.
Parameters
----------
callback : function
Take no argument if checkable is False, or a boolean (checked) if it is True
name : str
Action name, the callback's name by default.
shortcut : str
The keyboard shortcut for this action.
alias : str
Snippet, the name by default.
prompt : boolean
Whether this action should display a dialog with an input box where the user can
write arguments to the callback function.
n_args : int
If prompt is True, specify the number of expected arguments.
set_busy : boolean
Whether to use a busy cursor while performing the action.
prompt_default : str
The default text in the input text box, if prompt is True.
docstring : str
The action docstring, to be displayed in the status bar when hovering over the action
item in the menu. By default, the function's docstring.
menu : str
The name of the menu where the action should be added. It is automatically created
if it doesn't exist.
submenu : str
The name of the submenu where the action should be added. It is automatically created
if it doesn't exist.
view : QWidget
A view that belongs to the GUI, if the actions are to be added to the view's menu bar.
view_submenu : str
The name of a submenu in the view menu.
checkable : boolean
Whether the action is checkable (toggle on/off).
checked : boolean
Whether the checkable action is initially checked or not.
show_shortcut : boolean
Whether to show the shortcut in the Help action that displays all GUI shortcuts.
icon : str
Hexadecimal code of the font-awesome icon.
toolbar : boolean
Whether to add the action to the toolbar.
"""
param_names = sorted(inspect.signature(Actions.add).parameters)
l = locals()
kwargs = {param_name: l[param_name] for param_name in param_names if param_name != 'self'}
if callback is None:
# Allow to use either add(func) or @add or @add(...).
kwargs.pop('callback', None)
return partial(self.add, **kwargs)
assert callback
# Get the name from the callback function if needed.
name = name or callback.__name__
alias = alias or self._default_snippets.get(name, _alias(name)).split(' ')[0]
name = name.replace('&', '')
shortcut = shortcut or self._default_shortcuts.get(name, None)
# Skip existing action.
if name in self._actions_dict:
return
# Set the status tip from the function's docstring.
docstring = docstring or callback.__doc__ or name
docstring = re.sub(r'[ \t\r\f\v]{2,}', ' ', docstring.strip())
# Create and register the action.
kwargs.update(name=name, alias=alias, shortcut=shortcut, docstring=docstring)
action = _create_qaction(self.gui, **kwargs)
action_obj = Bunch(qaction=action, **kwargs)
if verbose and not name.startswith('_'):
logger.log(5, "Add action `%s` (%s).", name, _get_shortcut_string(action.shortcut()))
self.gui.addAction(action)
# Do not show private actions in the menu.
if not name.startswith('_'):
# Find the menu in which the action should be added.
qmenu = self._get_menu(
menu=menu, submenu=submenu, view=view, view_submenu=view_submenu)
if qmenu:
qmenu.addAction(action)
# Add the action to the toolbar.
if toolbar:
self.gui._toolbar.show()
self.gui._toolbar.addAction(action)
self._actions_dict[name] = action_obj
# Register the alias -> name mapping.
self._aliases[alias] = name
# Set the callback method.
if callback:
setattr(self, name.lower().replace(' ', '_').replace(':', ''), callback)
def separator(self, **kwargs):
"""Add a separator.
Parameters
----------
menu : str
The name of the menu where the separator should be added. It is automatically created
if it doesn't exist.
submenu : str
The name of the submenu where the separator should be added. It is automatically
created if it doesn't exist.
view : QWidget
A view that belongs to the GUI, if the separator is to be added to the view's menu bar.
view_submenu : str
The name of a submenu in the view menu.
"""
self._get_menu(**kwargs).addSeparator()
def disable(self, name=None):
"""Disable all actions, or only one if a name is passed."""
if name is None:
for name in self._actions_dict:
self.disable(name)
return
self._actions_dict[name].qaction.setEnabled(False)
def enable(self, name=None):
"""Enable all actions, or only one if a name is passed.."""
if name is None:
for name in self._actions_dict:
self.enable(name)
return
self._actions_dict[name].qaction.setEnabled(True)
def get(self, name):
"""Get a QAction instance from its name."""
return self._actions_dict[name].qaction if name in self._actions_dict else None
def run(self, name, *args):
"""Run an action as specified by its name."""
assert isinstance(name, str)
# Resolve the alias if it is an alias.
name = self._aliases.get(name, name)
# Get the action.
action = self._actions_dict.get(name, None)
if not action:
raise ValueError("Action `{}` doesn't exist.".format(name))
if not name.startswith('_'):
logger.debug("Execute action `%s`.", name)
try:
return action.callback(*args)
except TypeError as e:
logger.warning("Invalid action arguments: " + str(e))
return
def remove(self, name):
"""Remove an action."""
self.gui.removeAction(self._actions_dict[name].qaction)
del self._actions_dict[name]
delattr(self, name)
def remove_all(self):
"""Remove all actions."""
names = sorted(self._actions_dict.keys())
for name in names:
self.remove(name)
@property
def shortcuts(self):
"""A dictionary mapping action names to keyboard shortcuts."""
out = {}
for name in sorted(self._actions_dict):
action = self._actions_dict[name]
if not action.show_shortcut:
continue
# Discard actions without shortcut and without an alias.
if not action.shortcut and not action.alias:
continue
# Only show alias for actions with no shortcut.
alias_str = ' (:%s)' % action.alias if action.alias != name else ''
shortcut = action.shortcut or '-'
shortcut = shortcut if isinstance(action.shortcut, str) else ', '.join(shortcut)
out[name] = '%s%s' % (shortcut, alias_str)
return out
def show_shortcuts(self):
"""Display all shortcuts in the console."""
show_shortcuts_snippets(self)
def __contains__(self, name):
"""Whether the Actions group contains a specified action."""
return name in self._actions_dict
def __repr__(self):
return '<Actions {}>'.format(sorted(self._actions_dict))
# -----------------------------------------------------------------------------
# Snippets
# -----------------------------------------------------------------------------
class Snippets(object):
"""Provide keyboard snippets to quickly execute actions from a GUI.
This class attaches to a GUI and an `Actions` instance. To every command
is associated a snippet with the same name, or with an alias as indicated
in the action. The arguments of the action's callback functions can be
provided in the snippet's command with a simple syntax. For example, the
following command:
```
:my_action string 3-6
```
corresponds to:
```python
my_action('string', (3, 4, 5, 6))
```
The snippet mode is activated with the `:` keyboard shortcut. A snippet
command is activated with `Enter`, and one can leave the snippet mode
with `Escape`.
When the snippet mode is enabled (with `:`), this object adds a hidden Qt action
for every keystroke. These actions are removed when the snippet mode is disabled.
Constructor
-----------
gui : GUI instance
"""
# HACK: Unicode characters do not seem to work on Python 2
cursor = '\u200A\u258C'
# Allowed characters in snippet mode.
# A Qt shortcut will be created for every character.
_snippet_chars = r"abcdefghijklmnopqrstuvwxyz0123456789 ,.;?!_-+~=*/\(){}[]<>&|"
def __init__(self, gui):
self.gui = gui
self._status_message = gui.status_message
self.actions = Actions(gui, name='Snippets', menu='&File')
# Register snippet mode shortcut.
@self.actions.add(shortcut=':')
def enable_snippet_mode():
"""Enable the snippet mode (type action alias in the status
bar)."""
self.mode_on()
self._create_snippet_actions()
self.mode_off()
@property
def command(self):
"""This is used to write a snippet message in the status bar. A cursor is appended at
the end."""
msg = self.gui.status_message
n = len(msg)
n_cur = len(self.cursor)
return msg[:n - n_cur]
@command.setter
def command(self, value):
value += self.cursor
self.gui.unlock_status()
self.gui.status_message = value
self.gui.lock_status()
def _backspace(self):
"""Erase the last character in the snippet command."""
if self.command == ':':
return
logger.log(5, "Snippet keystroke `Backspace`.")
self.command = self.command[:-1]
def _enter(self):
"""Disable the snippet mode and execute the command."""
command = self.command
logger.log(5, "Snippet keystroke `Enter`.")
# NOTE: we need to set back the actions (mode_off) before running
# the command.
self.mode_off()
self.run(command)
def _create_snippet_actions(self):
"""Add mock Qt actions for snippet keystrokes.
Used to enable snippet mode.
"""
# One action per allowed character.
for i, char in enumerate(self._snippet_chars):
def _make_func(char):
def callback():
logger.log(5, "Snippet keystroke `%s`.", char)
self.command += char
return callback
# Lowercase letters.
self.actions.add(
name='_snippet_{}'.format(i),
shortcut=char,
callback=_make_func(char))
# Uppercase letters.
if char in self._snippet_chars[:26]:
self.actions.add(
name='_snippet_{}_upper'.format(i),
shortcut='shift+' + char,
callback=_make_func(char.upper()))
self.actions.add(
name='_snippet_backspace', shortcut='backspace', callback=self._backspace)
self.actions.add(
name='_snippet_activate', shortcut=('enter', 'return'), callback=self._enter)
self.actions.add(
name='_snippet_disable', shortcut='escape', callback=self.mode_off)
def run(self, snippet):
"""Execute a snippet command.
May be overridden.
"""
assert snippet[0] == ':'
snippet = snippet[1:]
snippet_args = _parse_snippet(snippet)
name = snippet_args[0]
logger.debug("Processing snippet `%s`.", snippet)
try:
# Try to run the snippet on all attached Actions instances.
for actions in self.gui.actions:
try:
actions.run(name, *snippet_args[1:])
return
except ValueError:
# This Actions instance doesn't contain the requested
# snippet, trying the next attached Actions instance.
pass
logger.warning("Couldn't find action `%s`.", name)
except Exception as e:
logger.warning("Error when executing snippet: \"%s\".", str(e))
logger.debug(''.join(traceback.format_exception(*sys.exc_info())))
def is_mode_on(self):
"""Whether the snippet mode is enabled."""
return self.command.startswith(':')
def mode_on(self):
"""Enable the snippet mode."""
logger.debug("Snippet mode enabled, press `escape` to leave this mode.")
# Save the current status message.
self._status_message = self.gui.status_message
self.gui.lock_status()
# Silent all actions except the Snippets actions.
for actions in self.gui.actions:
if actions != self.actions:
actions.disable()
self.actions.enable()
self.command = ':'
def mode_off(self):
"""Disable the snippet mode."""
self.gui.unlock_status()
# Reset the GUI status message that was set before the mode was
# activated.
self.gui.status_message = self._status_message
# Re-enable all actions except the Snippets actions.
self.actions.disable()
for actions in self.gui.actions:
if actions != self.actions:
actions.enable()
# The `:` shortcut should always be enabled.
self.actions.enable('enable_snippet_mode')
| 35.305476
| 99
| 0.587952
| 2,938
| 24,502
| 4.792716
| 0.141253
| 0.022655
| 0.01811
| 0.009445
| 0.212201
| 0.128116
| 0.111356
| 0.088346
| 0.081812
| 0.056956
| 0
| 0.002845
| 0.282793
| 24,502
| 693
| 100
| 35.356421
| 0.798441
| 0.324218
| 0
| 0.198939
| 0
| 0
| 0.064941
| 0.003771
| 0
| 0
| 0
| 0
| 0.02122
| 1
| 0.106101
| false
| 0.007958
| 0.02122
| 0.002653
| 0.249337
| 0.023873
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
436637ae94348f41cc38697c102e03126553cd4f
| 807
|
py
|
Python
|
PP4E-Examples-1.4/Examples/PP4E/Tools/cleanpyc.py
|
AngelLiang/PP4E
|
3a7f63b366e1e4700b4d2524884696999a87ba9d
|
[
"MIT"
] | null | null | null |
PP4E-Examples-1.4/Examples/PP4E/Tools/cleanpyc.py
|
AngelLiang/PP4E
|
3a7f63b366e1e4700b4d2524884696999a87ba9d
|
[
"MIT"
] | null | null | null |
PP4E-Examples-1.4/Examples/PP4E/Tools/cleanpyc.py
|
AngelLiang/PP4E
|
3a7f63b366e1e4700b4d2524884696999a87ba9d
|
[
"MIT"
] | null | null | null |
"""
delete all .pyc bytecode files in a directory tree: use the
command line arg as root if given, else current working dir
"""
import os, sys
findonly = False
rootdir = os.getcwd() if len(sys.argv) == 1 else sys.argv[1]
found = removed = 0
for (thisDirLevel, subsHere, filesHere) in os.walk(rootdir):
for filename in filesHere:
if filename.endswith('.pyc'):
fullname = os.path.join(thisDirLevel, filename)
print('=>', fullname)
if not findonly:
try:
os.remove(fullname)
removed += 1
except:
type, inst = sys.exc_info()[:2]
print('*'*4, 'Failed:', filename, type, inst)
found += 1
print('Found', found, 'files, removed', removed)
| 31.038462
| 65
| 0.553903
| 96
| 807
| 4.645833
| 0.59375
| 0.03139
| 0.035874
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012915
| 0.328377
| 807
| 25
| 66
| 32.28
| 0.809963
| 0.14746
| 0
| 0
| 0
| 0
| 0.048529
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.055556
| 0
| 0.055556
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43670f7c99a2ebd5fc17181669e6be4597ca4939
| 25,401
|
py
|
Python
|
apps/controllerx/cx_core/type/light_controller.py
|
clach04/controllerx
|
b5cd92d3371c352c50f7d5ba7dae4538d7c15dfe
|
[
"MIT"
] | null | null | null |
apps/controllerx/cx_core/type/light_controller.py
|
clach04/controllerx
|
b5cd92d3371c352c50f7d5ba7dae4538d7c15dfe
|
[
"MIT"
] | null | null | null |
apps/controllerx/cx_core/type/light_controller.py
|
clach04/controllerx
|
b5cd92d3371c352c50f7d5ba7dae4538d7c15dfe
|
[
"MIT"
] | null | null | null |
from typing import Any, Dict, Optional, Type, Union
from cx_const import Light, PredefinedActionsMapping
from cx_core.color_helper import get_color_wheel
from cx_core.controller import action
from cx_core.feature_support.light import LightSupport
from cx_core.integration import EventData
from cx_core.integration.deconz import DeCONZIntegration
from cx_core.integration.z2m import Z2MIntegration
from cx_core.release_hold_controller import ReleaseHoldController
from cx_core.stepper import Stepper
from cx_core.stepper.circular_stepper import CircularStepper
from cx_core.stepper.minmax_stepper import MinMaxStepper
from cx_core.type_controller import Entity, TypeController
DEFAULT_MANUAL_STEPS = 10
DEFAULT_AUTOMATIC_STEPS = 10
DEFAULT_MIN_BRIGHTNESS = 1
DEFAULT_MAX_BRIGHTNESS = 255
DEFAULT_MIN_WHITE_VALUE = 1
DEFAULT_MAX_WHITE_VALUE = 255
DEFAULT_MIN_COLOR_TEMP = 153
DEFAULT_MAX_COLOR_TEMP = 500
DEFAULT_TRANSITION = 300
DEFAULT_ADD_TRANSITION = True
DEFAULT_TRANSITION_TURN_TOGGLE = False
ColorMode = str
# Once the minimum supported version of Python is 3.8,
# we can declare the ColorMode as a Literal
# ColorMode = Literal["auto", "xy_color", "color_temp"]
class LightEntity(Entity):
color_mode: ColorMode
def __init__(self, name: str, color_mode: ColorMode = "auto") -> None:
super().__init__(name)
self.color_mode = color_mode
class LightController(TypeController[LightEntity], ReleaseHoldController):
"""
This is the main class that controls the lights for different devices.
Type of actions:
- On/Off/Toggle
- Brightness click and hold
- Color temperature click and hold
- xy color click and hold
If a light supports xy_color and color_temperature, then xy_color will be the
default functionality. Parameters taken:
- controller (required): Inherited from Controller
- light (required): This is either the light entity name or a dictionary as
{name: string, color_mode: auto | xy_color | color_temp}
- delay (optional): Inherited from ReleaseHoldController
- manual_steps (optional): Number of steps to go from min to max when clicking.
- automatic_steps (optional): Number of steps to go from min to max when smoothing.
"""
ATTRIBUTE_BRIGHTNESS = "brightness"
ATTRIBUTE_WHITE_VALUE = "white_value"
# With the following attribute, it will select color_temp or xy_color, depending on the light.
ATTRIBUTE_COLOR = "color"
ATTRIBUTE_COLOR_TEMP = "color_temp"
ATTRIBUTE_XY_COLOR = "xy_color"
index_color = 0
value_attribute = None
# These are intermediate variables to store the checked value
smooth_power_on_check: bool
remove_transition_check: bool
domains = ["light"]
entity_arg = "light"
async def init(self) -> None:
manual_steps = self.args.get("manual_steps", DEFAULT_MANUAL_STEPS)
automatic_steps = self.args.get("automatic_steps", DEFAULT_AUTOMATIC_STEPS)
self.min_brightness = self.args.get("min_brightness", DEFAULT_MIN_BRIGHTNESS)
self.max_brightness = self.args.get("max_brightness", DEFAULT_MAX_BRIGHTNESS)
self.min_white_value = self.args.get("min_white_value", DEFAULT_MIN_WHITE_VALUE)
self.max_white_value = self.args.get("max_white_value", DEFAULT_MAX_WHITE_VALUE)
self.min_color_temp = self.args.get("min_color_temp", DEFAULT_MIN_COLOR_TEMP)
self.max_color_temp = self.args.get("max_color_temp", DEFAULT_MAX_COLOR_TEMP)
self.transition = self.args.get("transition", DEFAULT_TRANSITION)
self.color_wheel = get_color_wheel(
self.args.get("color_wheel", "default_color_wheel")
)
color_stepper = CircularStepper(
0, len(self.color_wheel) - 1, len(self.color_wheel)
)
self.manual_steppers: Dict[str, Stepper] = {
LightController.ATTRIBUTE_BRIGHTNESS: MinMaxStepper(
self.min_brightness, self.max_brightness, manual_steps
),
LightController.ATTRIBUTE_WHITE_VALUE: MinMaxStepper(
self.min_white_value, self.max_white_value, manual_steps
),
LightController.ATTRIBUTE_COLOR_TEMP: MinMaxStepper(
self.min_color_temp, self.max_color_temp, manual_steps
),
LightController.ATTRIBUTE_XY_COLOR: color_stepper,
}
self.automatic_steppers: Dict[str, Stepper] = {
LightController.ATTRIBUTE_BRIGHTNESS: MinMaxStepper(
self.min_brightness, self.max_brightness, automatic_steps
),
LightController.ATTRIBUTE_WHITE_VALUE: MinMaxStepper(
self.min_white_value, self.max_white_value, automatic_steps
),
LightController.ATTRIBUTE_COLOR_TEMP: MinMaxStepper(
self.min_color_temp, self.max_color_temp, automatic_steps
),
LightController.ATTRIBUTE_XY_COLOR: color_stepper,
}
self.smooth_power_on = self.args.get(
"smooth_power_on", self.supports_smooth_power_on()
)
self.add_transition = self.args.get("add_transition", DEFAULT_ADD_TRANSITION)
self.add_transition_turn_toggle = self.args.get(
"add_transition_turn_toggle", DEFAULT_TRANSITION_TURN_TOGGLE
)
await super().init()
def _get_entity_type(self) -> Type[LightEntity]:
return LightEntity
def get_predefined_actions_mapping(self) -> PredefinedActionsMapping:
return {
Light.ON: self.on,
Light.OFF: self.off,
Light.TOGGLE: self.toggle,
Light.TOGGLE_FULL_BRIGHTNESS: (
self.toggle_full,
(LightController.ATTRIBUTE_BRIGHTNESS,),
),
Light.TOGGLE_FULL_WHITE_VALUE: (
self.toggle_full,
(LightController.ATTRIBUTE_WHITE_VALUE,),
),
Light.TOGGLE_FULL_COLOR_TEMP: (
self.toggle_full,
(LightController.ATTRIBUTE_COLOR_TEMP,),
),
Light.TOGGLE_MIN_BRIGHTNESS: (
self.toggle_min,
(LightController.ATTRIBUTE_BRIGHTNESS,),
),
Light.TOGGLE_MIN_WHITE_VALUE: (
self.toggle_min,
(LightController.ATTRIBUTE_WHITE_VALUE,),
),
Light.TOGGLE_MIN_COLOR_TEMP: (
self.toggle_min,
(LightController.ATTRIBUTE_COLOR_TEMP,),
),
Light.RELEASE: self.release,
Light.ON_FULL_BRIGHTNESS: (
self.on_full,
(LightController.ATTRIBUTE_BRIGHTNESS,),
),
Light.ON_FULL_WHITE_VALUE: (
self.on_full,
(LightController.ATTRIBUTE_WHITE_VALUE,),
),
Light.ON_FULL_COLOR_TEMP: (
self.on_full,
(LightController.ATTRIBUTE_COLOR_TEMP,),
),
Light.ON_MIN_BRIGHTNESS: (
self.on_min,
(LightController.ATTRIBUTE_BRIGHTNESS,),
),
Light.ON_MIN_WHITE_VALUE: (
self.on_min,
(LightController.ATTRIBUTE_WHITE_VALUE,),
),
Light.ON_MIN_COLOR_TEMP: (
self.on_min,
(LightController.ATTRIBUTE_COLOR_TEMP,),
),
Light.SET_HALF_BRIGHTNESS: (
self.set_value,
(
LightController.ATTRIBUTE_BRIGHTNESS,
0.5,
),
),
Light.SET_HALF_WHITE_VALUE: (
self.set_value,
(
LightController.ATTRIBUTE_WHITE_VALUE,
0.5,
),
),
Light.SET_HALF_COLOR_TEMP: (
self.set_value,
(
LightController.ATTRIBUTE_COLOR_TEMP,
0.5,
),
),
Light.SYNC: self.sync,
Light.CLICK_BRIGHTNESS_UP: (
self.click,
(
LightController.ATTRIBUTE_BRIGHTNESS,
Stepper.UP,
),
),
Light.CLICK_BRIGHTNESS_DOWN: (
self.click,
(
LightController.ATTRIBUTE_BRIGHTNESS,
Stepper.DOWN,
),
),
Light.CLICK_WHITE_VALUE_UP: (
self.click,
(
LightController.ATTRIBUTE_WHITE_VALUE,
Stepper.UP,
),
),
Light.CLICK_WHITE_VALUE_DOWN: (
self.click,
(
LightController.ATTRIBUTE_WHITE_VALUE,
Stepper.DOWN,
),
),
Light.CLICK_COLOR_UP: (
self.click,
(
LightController.ATTRIBUTE_COLOR,
Stepper.UP,
),
),
Light.CLICK_COLOR_DOWN: (
self.click,
(
LightController.ATTRIBUTE_COLOR,
Stepper.DOWN,
),
),
Light.CLICK_COLOR_TEMP_UP: (
self.click,
(
LightController.ATTRIBUTE_COLOR_TEMP,
Stepper.UP,
),
),
Light.CLICK_COLOR_TEMP_DOWN: (
self.click,
(
LightController.ATTRIBUTE_COLOR_TEMP,
Stepper.DOWN,
),
),
Light.CLICK_XY_COLOR_UP: (
self.click,
(
LightController.ATTRIBUTE_XY_COLOR,
Stepper.UP,
),
),
Light.CLICK_XY_COLOR_DOWN: (
self.click,
(
LightController.ATTRIBUTE_XY_COLOR,
Stepper.DOWN,
),
),
Light.HOLD_BRIGHTNESS_UP: (
self.hold,
(
LightController.ATTRIBUTE_BRIGHTNESS,
Stepper.UP,
),
),
Light.HOLD_BRIGHTNESS_DOWN: (
self.hold,
(
LightController.ATTRIBUTE_BRIGHTNESS,
Stepper.DOWN,
),
),
Light.HOLD_BRIGHTNESS_TOGGLE: (
self.hold,
(
LightController.ATTRIBUTE_BRIGHTNESS,
Stepper.TOGGLE,
),
),
Light.HOLD_WHITE_VALUE_UP: (
self.hold,
(
LightController.ATTRIBUTE_WHITE_VALUE,
Stepper.UP,
),
),
Light.HOLD_WHITE_VALUE_DOWN: (
self.hold,
(
LightController.ATTRIBUTE_WHITE_VALUE,
Stepper.DOWN,
),
),
Light.HOLD_WHITE_VALUE_TOGGLE: (
self.hold,
(
LightController.ATTRIBUTE_WHITE_VALUE,
Stepper.TOGGLE,
),
),
Light.HOLD_COLOR_UP: (
self.hold,
(
LightController.ATTRIBUTE_COLOR,
Stepper.UP,
),
),
Light.HOLD_COLOR_DOWN: (
self.hold,
(
LightController.ATTRIBUTE_COLOR,
Stepper.DOWN,
),
),
Light.HOLD_COLOR_TOGGLE: (
self.hold,
(
LightController.ATTRIBUTE_COLOR,
Stepper.TOGGLE,
),
),
Light.HOLD_COLOR_TEMP_UP: (
self.hold,
(
LightController.ATTRIBUTE_COLOR_TEMP,
Stepper.UP,
),
),
Light.HOLD_COLOR_TEMP_DOWN: (
self.hold,
(
LightController.ATTRIBUTE_COLOR_TEMP,
Stepper.DOWN,
),
),
Light.HOLD_COLOR_TEMP_TOGGLE: (
self.hold,
(
LightController.ATTRIBUTE_COLOR_TEMP,
Stepper.TOGGLE,
),
),
Light.HOLD_XY_COLOR_UP: (
self.hold,
(
LightController.ATTRIBUTE_XY_COLOR,
Stepper.UP,
),
),
Light.HOLD_XY_COLOR_DOWN: (
self.hold,
(
LightController.ATTRIBUTE_XY_COLOR,
Stepper.DOWN,
),
),
Light.HOLD_XY_COLOR_TOGGLE: (
self.hold,
(
LightController.ATTRIBUTE_XY_COLOR,
Stepper.TOGGLE,
),
),
Light.XYCOLOR_FROM_CONTROLLER: self.xycolor_from_controller,
Light.COLORTEMP_FROM_CONTROLLER: self.colortemp_from_controller,
}
async def check_remove_transition(self, on_from_user: bool) -> bool:
return (
not self.add_transition
or (on_from_user and not self.add_transition_turn_toggle)
or await self.feature_support.not_supported(LightSupport.TRANSITION)
)
async def call_light_service(self, service: str, **attributes) -> None:
if "transition" not in attributes:
attributes["transition"] = self.transition / 1000
if self.remove_transition_check:
del attributes["transition"]
await self.call_service(service, entity_id=self.entity.name, **attributes)
async def _on(self, **attributes) -> None:
await self.call_light_service("light/turn_on", **attributes)
@action
async def on(self, **attributes) -> None:
await self._on(**attributes)
async def _off(self, **attributes) -> None:
await self.call_light_service("light/turn_off", **attributes)
@action
async def off(self, **attributes) -> None:
await self._off(**attributes)
async def _toggle(self, **attributes) -> None:
await self.call_light_service("light/toggle", **attributes)
@action
async def toggle(self, **attributes) -> None:
await self._toggle(**attributes)
async def _set_value(self, attribute: str, fraction: float) -> None:
fraction = max(0, min(fraction, 1))
stepper = self.automatic_steppers[attribute]
if isinstance(stepper, MinMaxStepper):
min_ = stepper.minmax.min
max_ = stepper.minmax.max
value = (max_ - min_) * fraction + min_
await self._on(**{attribute: value})
@action
async def set_value(self, attribute: str, fraction: float) -> None:
await self._set_value(attribute, fraction)
@action
async def toggle_full(self, attribute: str) -> None:
stepper = self.automatic_steppers[attribute]
if isinstance(stepper, MinMaxStepper):
await self._toggle(**{attribute: stepper.minmax.max})
@action
async def toggle_min(self, attribute: str) -> None:
stepper = self.automatic_steppers[attribute]
if isinstance(stepper, MinMaxStepper):
await self._toggle(**{attribute: stepper.minmax.min})
async def _on_full(self, attribute: str) -> None:
await self._set_value(attribute, 1)
@action
async def on_full(self, attribute: str) -> None:
await self._on_full(attribute)
async def _on_min(self, attribute: str) -> None:
await self._set_value(attribute, 0)
@action
async def on_min(self, attribute: str) -> None:
await self._on_min(attribute)
@action
async def sync(self) -> None:
attributes: Dict[Any, Any] = {}
try:
color_attribute = await self.get_attribute(LightController.ATTRIBUTE_COLOR)
if color_attribute == LightController.ATTRIBUTE_COLOR_TEMP:
attributes[color_attribute] = 370 # 2700K light
else:
attributes[color_attribute] = (0.323, 0.329) # white colour
except ValueError:
self.log(
"⚠️ `sync` action will only change brightness",
level="WARNING",
ascii_encode=False,
)
await self._on(**attributes, brightness=self.max_brightness)
@action
async def xycolor_from_controller(self, extra: Optional[EventData]) -> None:
if extra is None:
self.log("No event data present", level="WARNING")
return
if isinstance(self.integration, Z2MIntegration):
if "action_color" not in extra:
self.log(
"`action_color` is not present in the MQTT payload", level="WARNING"
)
return
xy_color = extra["action_color"]
await self._on(xy_color=(xy_color["x"], xy_color["y"]))
elif isinstance(self.integration, DeCONZIntegration):
if "xy" not in extra:
self.log("`xy` is not present in the deCONZ event", level="WARNING")
return
await self._on(xy_color=extra["xy"])
@action
async def colortemp_from_controller(self, extra: Optional[EventData]) -> None:
if extra is None:
self.log("No event data present", level="WARNING")
return
if isinstance(self.integration, Z2MIntegration):
if "action_color_temperature" not in extra:
self.log(
"`action_color_temperature` is not present in the MQTT payload",
level="WARNING",
)
return
await self._on(color_temp=extra["action_color_temperature"])
async def get_attribute(self, attribute: str) -> str:
if attribute == LightController.ATTRIBUTE_COLOR:
if self.entity.color_mode == "auto":
if await self.feature_support.is_supported(LightSupport.COLOR):
return LightController.ATTRIBUTE_XY_COLOR
elif await self.feature_support.is_supported(LightSupport.COLOR_TEMP):
return LightController.ATTRIBUTE_COLOR_TEMP
else:
raise ValueError(
"This light does not support xy_color or color_temp"
)
else:
return self.entity.color_mode
else:
return attribute
async def get_value_attribute(self, attribute: str) -> Union[float, int]:
if self.smooth_power_on_check:
return 0
if attribute == LightController.ATTRIBUTE_XY_COLOR:
return 0
elif (
attribute == LightController.ATTRIBUTE_BRIGHTNESS
or attribute == LightController.ATTRIBUTE_WHITE_VALUE
or attribute == LightController.ATTRIBUTE_COLOR_TEMP
):
value = await self.get_entity_state(self.entity.name, attribute)
if value is None:
raise ValueError(
f"Value for `{attribute}` attribute could not be retrieved "
f"from `{self.entity.name}`. "
"Check the FAQ to know more about this error: "
"https://xaviml.github.io/controllerx/faq"
)
else:
try:
return float(value)
except ValueError:
raise ValueError(
f"Attribute `{attribute}` with `{value}` as a value "
"could not be converted to float"
)
else:
raise ValueError(f"Attribute `{attribute}` not expected")
def check_smooth_power_on(
self, attribute: str, direction: str, light_state: str
) -> bool:
return (
direction != Stepper.DOWN
and attribute == self.ATTRIBUTE_BRIGHTNESS
and self.smooth_power_on
and light_state == "off"
)
async def before_action(self, action: str, *args, **kwargs) -> bool:
to_return = True
if action in ("click", "hold"):
attribute, direction = args
light_state: str = await self.get_entity_state(self.entity.name)
self.smooth_power_on_check = self.check_smooth_power_on(
attribute, direction, light_state
)
self.remove_transition_check = await self.check_remove_transition(
on_from_user=False
)
to_return = (light_state == "on") or self.smooth_power_on_check
else:
self.remove_transition_check = await self.check_remove_transition(
on_from_user=True
)
self.smooth_power_on_check = False
return await super().before_action(action, *args, **kwargs) and to_return
@action
async def click(self, attribute: str, direction: str) -> None:
attribute = await self.get_attribute(attribute)
self.value_attribute = await self.get_value_attribute(attribute)
await self.change_light_state(
self.value_attribute,
attribute,
direction,
self.manual_steppers[attribute],
"click",
)
@action
async def hold(self, attribute: str, direction: str) -> None: # type: ignore
attribute = await self.get_attribute(attribute)
self.value_attribute = await self.get_value_attribute(attribute)
self.log(
f"Attribute value before running the hold action: {self.value_attribute}",
level="DEBUG",
)
if direction == Stepper.TOGGLE:
self.log(
f"Previous direction: {self.automatic_steppers[attribute].previous_direction}",
level="DEBUG",
)
direction = self.automatic_steppers[attribute].get_direction(
self.value_attribute, direction
)
self.log(f"Going direction: {direction}", level="DEBUG")
await super().hold(attribute, direction)
async def hold_loop(self, attribute: str, direction: str) -> bool: # type: ignore
if self.value_attribute is None:
return True
return await self.change_light_state(
self.value_attribute,
attribute,
direction,
self.automatic_steppers[attribute],
"hold",
)
async def change_light_state(
self,
old: float,
attribute: str,
direction: str,
stepper: Stepper,
action_type: str,
) -> bool:
"""
This functions changes the state of the light depending on the previous
value and attribute. It returns True when no more changes will need to be done.
Otherwise, it returns False.
"""
attributes: Dict[str, Any]
if attribute == LightController.ATTRIBUTE_XY_COLOR:
index_color, _ = stepper.step(self.index_color, direction)
self.index_color = int(index_color)
xy_color = self.color_wheel[self.index_color]
attributes = {attribute: xy_color}
if action_type == "hold":
attributes["transition"] = self.delay / 1000
await self._on(**attributes)
# In case of xy_color mode it never finishes the loop, the hold loop
# will only stop if the hold action is called when releasing the button.
# I haven't experimented any problems with it, but a future implementation
# would be to force the loop to stop after 4 or 5 loops as a safety measure.
return False
if self.smooth_power_on_check:
await self._on_min(attribute)
# # After smooth power on, the light should not brighten up.
return True
new_state_attribute, exceeded = stepper.step(old, direction)
new_state_attribute = round(new_state_attribute, 3)
attributes = {attribute: new_state_attribute}
if action_type == "hold":
attributes["transition"] = self.delay / 1000
await self._on(**attributes)
self.value_attribute = new_state_attribute
return exceeded
def supports_smooth_power_on(self) -> bool:
"""
This function can be overrided for each device to indicate the default behaviour of the controller
when the associated light is off and an event for incrementing brightness is received.
Returns True if the associated light should be turned on with minimum brightness if an event for incrementing
brightness is received, while the lamp is off.
The behaviour can be overridden by the user with the 'smooth_power_on' option in app configuration.
"""
return False
| 37.686944
| 117
| 0.565332
| 2,526
| 25,401
| 5.4327
| 0.112035
| 0.101436
| 0.046491
| 0.036071
| 0.478102
| 0.37193
| 0.28077
| 0.213364
| 0.174525
| 0.163521
| 0
| 0.004411
| 0.357427
| 25,401
| 673
| 118
| 37.742942
| 0.836233
| 0.074761
| 0
| 0.470389
| 0
| 0
| 0.054828
| 0.008555
| 0
| 0
| 0
| 0
| 0
| 1
| 0.00846
| false
| 0
| 0.021997
| 0.005076
| 0.093063
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
436a1ebb3d99a1475a443393df66a840b227b6bf
| 4,916
|
py
|
Python
|
src/command_modules/azure-cli-security/azure/cli/command_modules/security/_params.py
|
jfcoz/azure-cli
|
8459ef3fd3c76d9f99defd95d4c980923891fa6d
|
[
"MIT"
] | 1
|
2019-10-01T10:29:15.000Z
|
2019-10-01T10:29:15.000Z
|
src/command_modules/azure-cli-security/azure/cli/command_modules/security/_params.py
|
jfcoz/azure-cli
|
8459ef3fd3c76d9f99defd95d4c980923891fa6d
|
[
"MIT"
] | 3
|
2019-07-12T22:10:38.000Z
|
2019-07-12T22:10:49.000Z
|
src/command_modules/azure-cli-security/azure/cli/command_modules/security/_params.py
|
jfcoz/azure-cli
|
8459ef3fd3c76d9f99defd95d4c980923891fa6d
|
[
"MIT"
] | 1
|
2019-06-21T05:08:09.000Z
|
2019-06-21T05:08:09.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
from azure.cli.core.commands.parameters import resource_group_name_type
from knack.arguments import CLIArgumentType
from ._validators import (validate_alert_status,
validate_auto_provisioning_toggle,
validate_pricing_tier)
name_arg_type = CLIArgumentType(options_list=('--name', '-n'), metavar='NAME', help='name of the resource to be fetched')
home_region_arg_type = CLIArgumentType(options_list=('--home-region', '-hr'), metavar='HOMEREGION', help='home region that was selected for the subscription')
location_arg_type = CLIArgumentType(options_list=('--location', '-l'), metavar='LOCATION', help='location of the resource')
# Alerts
alert_status_arg_type = CLIArgumentType(options_list=('--status'), metavar='STATUS', help='target status of the alert. possible values are "dismiss" and "activate"')
# Auto Provisioning
auto_provisioning_auto_provision_arg_type = CLIArgumentType(options_list=('--auto-provision'), metavar='AUTOPROVISION', help='Automatic provisioning toggle. possible values are "on" or "off"')
# Contacts
contact_email_arg_type = CLIArgumentType(options_list=('--email'), metavar='EMAIL', help='E-mail of the security contact')
contact_phone_arg_type = CLIArgumentType(options_list=('--phone'), metavar='PHONE', help='Phone of the security contact')
contact_alert_notifications_arg_type = CLIArgumentType(options_list=('--alert-notifications'), metavar='ALERTNOTIFICATIONS', help='Whether to send mail notifications to the security contacts')
contact_alerts_admins_arg_type = CLIArgumentType(options_list=('--alerts-admins'), metavar='ALERTADMINS', help='Whether to send mail notifications to the subscription administrators')
# Pricing
pricing_tier_arg_type = CLIArgumentType(options_list=('--tier'), metavar='TIER', help='pricing tier type')
# Workspace settings
workspace_setting_target_workspace_arg_type = CLIArgumentType(options_list=('--target-workspace'), metavar='TARGETWORKSPACE', help='An ID of the workspace resource that will hold the security data')
def load_arguments(self, _):
for scope in ['alert',
'task',
'setting',
'contact',
'auto-provisioning-setting',
'discovered-security-solution',
'external-security-solution',
'jit-policy',
'location',
'pricing',
'topology',
'workspace-setting']:
with self.argument_context('security {}'.format(scope)) as c:
c.argument(
'resource_group_name',
options_list=['--resource-group', '-g'],
arg_type=resource_group_name_type)
c.argument(
'resource_name',
arg_type=name_arg_type)
c.argument(
'location',
arg_type=location_arg_type)
for scope in ['alert update']:
with self.argument_context('security {}'.format(scope)) as c:
c.argument(
'status',
validator=validate_alert_status,
arg_type=alert_status_arg_type)
for scope in ['auto-provisioning-setting update']:
with self.argument_context('security {}'.format(scope)) as c:
c.argument(
'auto_provision',
validator=validate_auto_provisioning_toggle,
arg_type=auto_provisioning_auto_provision_arg_type)
for scope in ['contact create']:
with self.argument_context('security {}'.format(scope)) as c:
c.argument(
'email',
arg_type=contact_email_arg_type)
c.argument(
'phone',
arg_type=contact_phone_arg_type)
c.argument(
'alert_notifications',
arg_type=contact_alert_notifications_arg_type)
c.argument(
'alerts_admins',
arg_type=contact_alerts_admins_arg_type)
for scope in ['pricing create']:
with self.argument_context('security {}'.format(scope)) as c:
c.argument(
'tier',
validator=validate_pricing_tier,
arg_type=pricing_tier_arg_type)
for scope in ['workspace-setting create']:
with self.argument_context('security {}'.format(scope)) as c:
c.argument(
'target_workspace',
arg_type=workspace_setting_target_workspace_arg_type)
| 47.728155
| 198
| 0.621237
| 516
| 4,916
| 5.666667
| 0.24031
| 0.076607
| 0.082763
| 0.109097
| 0.392955
| 0.198358
| 0.147743
| 0.147743
| 0.121067
| 0.121067
| 0
| 0
| 0.240236
| 4,916
| 102
| 199
| 48.196078
| 0.782865
| 0.086859
| 0
| 0.246753
| 0
| 0
| 0.267753
| 0.027914
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012987
| false
| 0
| 0.038961
| 0
| 0.051948
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
436a53c20b8a7b3181b33290aeb94d9c5458f945
| 1,558
|
py
|
Python
|
tests/models/test_transformers.py
|
Alicegaz/torchok
|
7b8f95df466a25b1ad8ee93bed1a3c7516440cf4
|
[
"Apache-2.0"
] | 8
|
2021-10-12T05:39:20.000Z
|
2022-03-31T10:55:01.000Z
|
tests/models/test_transformers.py
|
Alicegaz/torchok
|
7b8f95df466a25b1ad8ee93bed1a3c7516440cf4
|
[
"Apache-2.0"
] | 1
|
2022-03-30T19:23:42.000Z
|
2022-03-30T19:23:42.000Z
|
tests/models/test_transformers.py
|
Alicegaz/torchok
|
7b8f95df466a25b1ad8ee93bed1a3c7516440cf4
|
[
"Apache-2.0"
] | 5
|
2021-11-17T07:38:28.000Z
|
2022-01-31T10:46:36.000Z
|
import unittest
import torch
from parameterized import parameterized
from src.constructor import create_backbone
from src.models.backbones.utils import list_models
from .test_segmentation import example_backbones
def inp(bsize, in_ch, w, h):
return torch.ones(bsize, in_ch, w, h)
class TestBackboneCorrectness(unittest.TestCase):
def setUp(self) -> None:
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
@parameterized.expand(list_models(module='vision_transformer', exclude_filters=''))
def test_vit_torchscript_conversion(self, backbone_name):
model = create_backbone(backbone_name, img_size=self.input.shape[2]).to(self.device).eval()
with torch.no_grad():
torch.jit.trace(model, self.input)
torch.cuda.empty_cache()
@parameterized.expand(list_models(module='coat', exclude_filters=''))
def test_coat_torchscript_conversion(self, backbone_name):
model = create_backbone(backbone_name, img_size=self.input.shape[2]).to(self.device).eval()
with torch.no_grad():
torch.jit.trace(model, self.input)
torch.cuda.empty_cache()
@parameterized.expand(list_models(module='swin_transformer', exclude_filters=''))
def test_swin_torchscript_conversion(self, backbone_name):
model = create_backbone(backbone_name).to(self.device).eval()
input = torch.rand(2, 3, *model.img_size, device=self.device)
with torch.no_grad():
torch.jit.trace(model, input)
torch.cuda.empty_cache()
| 38.95
| 99
| 0.717587
| 205
| 1,558
| 5.24878
| 0.331707
| 0.066915
| 0.064126
| 0.080855
| 0.604089
| 0.469331
| 0.469331
| 0.469331
| 0.438662
| 0.438662
| 0
| 0.003837
| 0.163671
| 1,558
| 39
| 100
| 39.948718
| 0.821949
| 0
| 0
| 0.333333
| 0
| 0
| 0.030167
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.2
| 0.033333
| 0.433333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
436d01399c03b77d98f4cf23e9025181a7999308
| 3,767
|
py
|
Python
|
app/app.py
|
shaswat01/Disaster_Response_ETL
|
c441514fb5231d193cd4b29afad00fe0f3513562
|
[
"MIT"
] | null | null | null |
app/app.py
|
shaswat01/Disaster_Response_ETL
|
c441514fb5231d193cd4b29afad00fe0f3513562
|
[
"MIT"
] | null | null | null |
app/app.py
|
shaswat01/Disaster_Response_ETL
|
c441514fb5231d193cd4b29afad00fe0f3513562
|
[
"MIT"
] | null | null | null |
import nltk
import json
import plotly
import pandas as pd
import plotly.graph_objects as go
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
nltk.download(['punkt','wordnet'])
from flask import Flask
from flask import render_template, request, jsonify
from plotly.graph_objs import Bar, Histogram
import joblib
from sqlalchemy import create_engine
app = Flask(__name__)
def tokenize(text):
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
# load data
engine = create_engine('sqlite:///data/DisasterResponse.db')
df = pd.read_sql_table('messages', engine)
# load model
model = joblib.load("models/model.pkl")
# index webpage displays cool visuals and receives user input text for model
@app.route('/')
@app.route('/index')
def index():
# extract data needed for visuals
# Viz 1
genre = df.groupby('genre').count()['id'].sort_values()
# Viz 2
df['text length'] = df['message'].apply(lambda x: len(x.split()))
histogram = df[df['text length'] < 100].groupby('text length').count()['id']
# Viz 3
total_category = df.drop(columns=['id','message','original','genre', 'text length']).sum().sort_values(ascending=False).head(5)
# create visuals
graphs = [
{
'data': [
Bar(
x=genre.values,
y=genre.index,
orientation='h'
)
],
'layout': {
'title': 'Distribution of Message Genres',
'yaxis': {
'title': "Genre"
},
'xaxis': {
'title': "Counts"
}
}
},
{
'data': [
Bar(
x=histogram.index,
y=histogram.values
)
],
'layout': {
'title': 'Distribution of Messages Length',
'yaxis': {
'title': "Total Messages"
},
'xaxis': {
'title': "Total Words"
}
}
},
{
'data': [
Bar(
x=total_category.index,
y=total_category.values
)
],
'layout': {
'title': 'Total Messages per Category (Top 5)',
'yaxis': {
'title': "Total"
},
'xaxis': {
'title': "Category"
}
}
}
]
# encode plotly graphs in JSON
ids = ["graph-{}".format(i) for i, _ in enumerate(graphs)]
graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)
# render web page with plotly graphs
return render_template('master.html', ids=ids, graphJSON=graphJSON)
# web page that handles user query and displays model results
@app.route('/go')
def go():
# save user input in query
query = request.args.get('query', '')
# use model to predict classification for query
classification_labels = model.predict([query])[0]
classification_results = dict(zip(df.columns[4:], classification_labels))
# This will render the go.html Please see that file.
return render_template(
'go.html',
query=query,
classification_result=classification_results
)
def main():
app.run()
#app.run(host='0.0.0.0', port=3001, debug=True)
if __name__ == '__main__':
main()
| 25.281879
| 131
| 0.528537
| 386
| 3,767
| 5.056995
| 0.411917
| 0.020492
| 0.012295
| 0.025615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007383
| 0.352801
| 3,767
| 148
| 132
| 25.452703
| 0.793273
| 0.120255
| 0
| 0.174757
| 0
| 0
| 0.137094
| 0.010312
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038835
| false
| 0
| 0.116505
| 0
| 0.184466
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
436d1a37515679503cc50623874a3539d00946be
| 4,659
|
py
|
Python
|
tools/mo/openvino/tools/mo/front/mxnet/mx_reshape_reverse.py
|
pazamelin/openvino
|
b7e8ef910d7ed8e52326d14dc6fd53b71d16ed48
|
[
"Apache-2.0"
] | 1
|
2019-09-22T01:05:07.000Z
|
2019-09-22T01:05:07.000Z
|
tools/mo/openvino/tools/mo/front/mxnet/mx_reshape_reverse.py
|
pazamelin/openvino
|
b7e8ef910d7ed8e52326d14dc6fd53b71d16ed48
|
[
"Apache-2.0"
] | 58
|
2020-11-06T12:13:45.000Z
|
2022-03-28T13:20:11.000Z
|
tools/mo/openvino/tools/mo/front/mxnet/mx_reshape_reverse.py
|
pazamelin/openvino
|
b7e8ef910d7ed8e52326d14dc6fd53b71d16ed48
|
[
"Apache-2.0"
] | 2
|
2019-09-20T01:33:37.000Z
|
2019-09-20T08:42:11.000Z
|
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.mxnet.mx_reshape_to_reshape import MXReshapeToReshape
from openvino.tools.mo.ops.Reverse import Reverse
from openvino.tools.mo.ops.mxreshape import MXReshape
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.front.common.replacement import FrontReplacementOp
from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input
from openvino.tools.mo.graph.graph import Graph
from openvino.tools.mo.ops.reshape import Reshape
from openvino.tools.mo.ops.shape import Shape
from openvino.tools.mo.ops.squeeze import Squeeze
from openvino.tools.mo.ops.unsqueeze import Unsqueeze
class MXReshapeReverse(FrontReplacementOp):
"""
If reshape layer with reverse True, special values will inferred from right to left.
The Replacer simulate the behavior. The replaced subgraph reverse input data and special dims,
and after reshape reverse output result to backward.
Resulting subgraph: reshape(reverse=True) -> reverse - reshape(reverse=False) -reverse subgraph.
"""
op = 'MXReshape'
enabled = True
def run_before(self):
return [MXReshapeToReshape]
def replace_sub_graph(self, graph: Graph, match: dict):
mxreshape = match['op']
if not mxreshape.reverse:
return
shape_node = Shape(graph, dict(name=mxreshape.id + '/Shape')).create_node()
forward_reverse_unsqueeze_node = create_op_node_with_second_input(graph, Unsqueeze, int64_array([0]),
dict(name=str(mxreshape.id) + '/ForwardUnsqueeze'))
forward_reverse_node = Reverse(graph, dict(name=mxreshape.id + '/ForwardReverse', axis=1)).create_node()
forward_reverse_squeeze_node = create_op_node_with_second_input(graph, Squeeze, int64_array([0]),
dict(name=str(mxreshape.id) + '/ForwardSqueeze'))
reshape_node = Reshape(graph, dict(name=mxreshape.id + '/Reshape')).create_node()
shape_node.in_port(0).connect(mxreshape.in_port(0).get_source())
mxreshape.in_port(0).get_connection().set_destination(reshape_node.in_port(0))
forward_reverse_unsqueeze_node.in_port(0).connect(shape_node.out_port(0))
forward_reverse_node.in_port(0).connect(forward_reverse_unsqueeze_node.out_port(0))
forward_reverse_squeeze_node.in_port(0).connect(forward_reverse_node.out_port(0))
reshape_node.in_port(1).connect(forward_reverse_squeeze_node.out_port(0))
reshape_shape_node = create_op_node_with_second_input(graph, Reshape, int64_array(np.flip(mxreshape.dim, 0)),
dict(name=str(mxreshape.id) + '/ReshapeShape'))
if np.sum(np.in1d([-2, -3, -4], mxreshape.dim), axis=0):
reshape_shape_node = MXReshape(graph, dict(name=mxreshape.id + '/Reshape',
dim=int64_array(np.flip(mxreshape.dim, 0)))).create_node()
reshape_shape_node.in_port(0).connect(reshape_node.out_port(0))
backward_shape_node = Shape(graph, dict(name=mxreshape.id + '/BackwardShape')).create_node()
backward_reverse_unsqueeze_node = create_op_node_with_second_input(graph, Unsqueeze, int64_array([0]),
dict(name=str(mxreshape.id) + '/BackwardUnsqueeze'))
backward_reverse_node = Reverse(graph, dict(name=mxreshape.id + '/BackwardReverse', axis=1)).create_node()
backward_reverse_squeeze_node = create_op_node_with_second_input(graph, Squeeze, int64_array([0]),
dict(name=str(mxreshape.id) + '/BackwardSqueeze'))
backward_reshape_node = Reshape(graph, dict(name=mxreshape.id + '/BackwardReshape')).create_node()
backward_shape_node.in_port(0).connect(reshape_shape_node.out_port(0))
backward_reverse_unsqueeze_node.in_port(0).connect(backward_shape_node.out_port(0))
backward_reverse_node.in_port(0).connect(backward_reverse_unsqueeze_node.out_port(0))
backward_reverse_squeeze_node.in_port(0).connect(backward_reverse_node.out_port(0))
backward_reshape_node.in_port(0).connect(reshape_shape_node.out_port(0))
backward_reshape_node.in_port(1).connect(backward_reverse_squeeze_node.out_port(0))
mxreshape.out_port(0).get_connection().set_source(backward_reshape_node.out_port(0))
| 59.730769
| 127
| 0.69457
| 598
| 4,659
| 5.137124
| 0.192308
| 0.042318
| 0.042318
| 0.046875
| 0.593099
| 0.484701
| 0.37207
| 0.257813
| 0.166667
| 0.152344
| 0
| 0.017483
| 0.201975
| 4,659
| 77
| 128
| 60.506494
| 0.808768
| 0.087787
| 0
| 0
| 0
| 0
| 0.040986
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.222222
| 0.018519
| 0.351852
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
436dafbd787a4e7854f10318324bcf64277e6432
| 6,480
|
py
|
Python
|
Python/Simulation/Numerical_Methods/test_cubic_spline_solve.py
|
MattMarti/Lambda-Trajectory-Sim
|
4155f103120bd49221776cc3b825b104f36817f2
|
[
"MIT"
] | null | null | null |
Python/Simulation/Numerical_Methods/test_cubic_spline_solve.py
|
MattMarti/Lambda-Trajectory-Sim
|
4155f103120bd49221776cc3b825b104f36817f2
|
[
"MIT"
] | null | null | null |
Python/Simulation/Numerical_Methods/test_cubic_spline_solve.py
|
MattMarti/Lambda-Trajectory-Sim
|
4155f103120bd49221776cc3b825b104f36817f2
|
[
"MIT"
] | null | null | null |
import unittest;
import numpy as np;
import scipy as sp;
from cubic_spline_solve import cubic_spline_solve;
from cubic_spline_fun import cubic_spline_fun;
class Test_cubic_spline_solve(unittest.TestCase):
'''
Test_cubicsplineSolve
Test case for the cubic spline solver function. This function just solves
for the spline data, so that the spline can be precomputed before code is
run. This improves code performance by removing the need to invert a
matrix every time the spline function is called.
@author: Matt Marti
@date: 2019-06-16
'''
def test_nominal_01(self):
'''Test the spline solve for nominal test case'''
# Function handles for function and derivatives
f = lambda x : sp.sin(x);
df = lambda x : sp.cos(x);
# x from 0 to 30 in the correct format
xrange = np.linspace(0, 10, 20);
xkvec = np.zeros((1, xrange.shape[0]));
for i in range(0, xrange.shape[0]):
xkvec[0,i] = xrange[i];
#
# Generate function values dataset
fkvec = f(xkvec);
xinter = np.linspace(0, 10, 1000);
# Generate parameters for clamped boundary conditions
fslope = np.ndarray((1,2));
fslope[0,0] = sp.cos(xkvec[0,0]);
fslope[0,1] = sp.cos(xkvec[0,-1]);
# Compute already tested spline
_, _, akvec, bkvec, ckvec, dkvec \
= cubic_spline_fun(xkvec, fkvec, xinter, fslope);
splineDataTrue = np.zeros((1, xkvec.shape[1], 5));
splineDataTrue[0,:,0] = akvec.squeeze();
splineDataTrue[0,:,1] = bkvec.squeeze();
splineDataTrue[0,:,2] = ckvec.squeeze();
splineDataTrue[0,:,3] = dkvec.squeeze();
splineDataTrue[0,:,4] = xkvec.squeeze();
# Run spline solve
splineDataMat = cubic_spline_solve( xkvec, fkvec, fslope );
# Test Function truth values
error = splineDataMat - splineDataTrue;
maxerr = np.max(np.abs(error));
self.assertLess(maxerr, 1e-12, 'Spline error too high');
#
def test_multiple_01(self):
'''Test the spline works for a two dimensional case'''
# Definition for two dimensional function output
def func(x):
if type(x) is not np.ndarray:
f = np.zeros((2,1));
else:
f = np.zeros((2,x.shape[0]));
#
f[0,:] = np.sin(x);
f[1,:] = -10*x**2 + 50*x + 1000;
return f;
#
# Definition for derivative function
def dfunc(x):
if type(x) is not np.ndarray:
df = np.zeros((2,1));
else:
df = np.zeros((2,x.shape[0]));
#
df[0,:] = np.cos(x);
df[1,:] = -20*x + 50;
return df;
#
# Given
f = lambda x : func(x);
df = lambda x : dfunc(x);
xkvec = np.linspace(0, 10, 20);
fkvec = f(xkvec);
xinter = np.linspace(0, 10, 1000);
fslope = np.ndarray((2,2)); # Clambed B.C.s
fslope[:,0] = df(xkvec[0]).squeeze();
fslope[:,1] = df(xkvec[-1]).squeeze();
# Preallocate truth spline data
m = 2;
n = xkvec.shape[0];
splineDataTrue = np.zeros((m, n, 5));
splineDataTrue[0,:,4] = xkvec;
# Run true spline for first dataset
_, _, akvec, bkvec, ckvec, dkvec \
= cubic_spline_fun(xkvec, fkvec[0,:], xinter, fslope[0,:]);
splineDataTrue[0,:,0] = akvec.squeeze();
splineDataTrue[0,:,1] = bkvec.squeeze();
splineDataTrue[0,:,2] = ckvec.squeeze();
splineDataTrue[0,:,3] = dkvec.squeeze();
# Run true spline for second dataset
_, _, akvec, bkvec, ckvec, dkvec \
= cubic_spline_fun(xkvec, fkvec[1,:], xinter, fslope[1,:]);
splineDataTrue[1,:,0] = akvec.squeeze();
splineDataTrue[1,:,1] = bkvec.squeeze();
splineDataTrue[1,:,2] = ckvec.squeeze();
splineDataTrue[1,:,3] = dkvec.squeeze();
# Run new spline
splineDataMat = cubic_spline_solve( xkvec, fkvec, fslope );
# Test Function truth values
error = splineDataMat - splineDataTrue;
maxerr = np.max(np.abs(error));
self.assertLess(maxerr, 1e-12, 'Spline error too high');
#
def test_types(self):
'''Test that the function raises type errors on bad input'''
# Function handles for function and derivatives
f = lambda x : sp.sin(x);
df = lambda x : sp.cos(x);
# x from 0 to 30 in the correct format
xrange = np.linspace(0, 10, 20);
xkvec = np.zeros((1, xrange.shape[0]));
for i in range(0, xrange.shape[0]):
xkvec[0,i] = xrange[i];
#
# Generate function values dataset
fkvec = f(xkvec);
xinter = np.linspace(0, 10, 1000);
# Generate parameters for clamped boundary conditions
fslope = np.ndarray((1,2));
fslope[0,0] = sp.cos(xkvec[0,0]);
fslope[0,1] = sp.cos(xkvec[0,-1]);
# Run function without errors
splineDataMat = cubic_spline_solve( xkvec, fkvec, fslope );
# Test with various inputs for xkvec
self.assertRaises(TypeError, cubic_spline_solve, True, fkvec, fslope);
self.assertRaises(TypeError, cubic_spline_solve, 0.1, fkvec, fslope);
self.assertRaises(TypeError, cubic_spline_solve, "AA", fkvec, fslope);
self.assertRaises(TypeError, cubic_spline_solve, 'A', fkvec, fslope);
# Test with various inputs for xkvec
self.assertRaises(TypeError, cubic_spline_solve, xkvec, True, fslope);
self.assertRaises(TypeError, cubic_spline_solve, xkvec, 0.1, fslope);
self.assertRaises(TypeError, cubic_spline_solve, xkvec, "AA", fslope);
self.assertRaises(TypeError, cubic_spline_solve, xkvec, 'A', fslope);
# Test with various inputs for fslope
self.assertRaises(TypeError, cubic_spline_solve, xkvec, fkvec, True);
self.assertRaises(TypeError, cubic_spline_solve, xkvec, fkvec, 0.1);
self.assertRaises(TypeError, cubic_spline_solve, xkvec, fkvec, "AA");
self.assertRaises(TypeError, cubic_spline_solve, xkvec, fkvec, 'A');
#
#
| 37.241379
| 78
| 0.564352
| 795
| 6,480
| 4.525786
| 0.197484
| 0.073374
| 0.080044
| 0.100056
| 0.651473
| 0.629516
| 0.61284
| 0.611451
| 0.444692
| 0.423013
| 0
| 0.036954
| 0.310957
| 6,480
| 174
| 79
| 37.241379
| 0.768869
| 0.194907
| 0
| 0.48
| 0
| 0
| 0.009957
| 0
| 0
| 0
| 0
| 0
| 0.14
| 1
| 0.05
| false
| 0
| 0.05
| 0
| 0.13
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4370bea6e2a16934ad57aff4637712bbcfdb6bc4
| 331
|
py
|
Python
|
1805_number_of_different_integers_in_a_string.py
|
hotternative/leetcode
|
d0ec225abc2ada1398666641c7872f3eb889e7ed
|
[
"MIT"
] | null | null | null |
1805_number_of_different_integers_in_a_string.py
|
hotternative/leetcode
|
d0ec225abc2ada1398666641c7872f3eb889e7ed
|
[
"MIT"
] | null | null | null |
1805_number_of_different_integers_in_a_string.py
|
hotternative/leetcode
|
d0ec225abc2ada1398666641c7872f3eb889e7ed
|
[
"MIT"
] | null | null | null |
from string import ascii_lowercase
ts = 'a123bc34d8ef34'
cur = []
res = set()
for c in ts:
if c in ascii_lowercase:
if cur:
s = ''.join(cur)
res.add(int(s))
cur = []
else:
cur.append(c)
else:
if cur:
s = ''.join(cur)
res.add(int(s))
print(res)
| 13.24
| 34
| 0.480363
| 45
| 331
| 3.488889
| 0.466667
| 0.11465
| 0.076433
| 0.127389
| 0.292994
| 0.292994
| 0.292994
| 0.292994
| 0.292994
| 0
| 0
| 0.039216
| 0.383686
| 331
| 24
| 35
| 13.791667
| 0.730392
| 0
| 0
| 0.588235
| 0
| 0
| 0.042683
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.058824
| 0
| 0.058824
| 0.058824
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4371e6643a58d749ad832f8647f0481df0293c7c
| 1,087
|
py
|
Python
|
app.py
|
ahmedriaz9908/memeapiiz
|
eef98f837f2ec83edc3dd004f19dcefda9b582a5
|
[
"MIT"
] | null | null | null |
app.py
|
ahmedriaz9908/memeapiiz
|
eef98f837f2ec83edc3dd004f19dcefda9b582a5
|
[
"MIT"
] | null | null | null |
app.py
|
ahmedriaz9908/memeapiiz
|
eef98f837f2ec83edc3dd004f19dcefda9b582a5
|
[
"MIT"
] | null | null | null |
from flask import Flask, render_template, jsonify
from reddit_handler import *
app = Flask(__name__)
meme_subreddits = ['izlam']
@app.route('/')
def index():
return render_template('index.html')
@app.route('/meme')
def one_post():
sub = random.choice(meme_subreddits)
re = get_posts(sub, 100)
r = random.choice(re)
while not is_img_link(r[1]):
r = random.choice(re)
return jsonify({
'title': r[0],
'url': r[1],
'postLink': r[2],
'subreddit': sub
})
@app.route('/sample')
def sample():
re = get_posts(random.choice(meme_subreddits), 100)
r = random.choice(re)
while not is_img_link(r[1]):
r = random.choice(re)
return render_template('sample.html', title=r[0], img_url=r[1], shortlink=r[2])
@app.route('/test')
def test():
re = get_posts(random.choice(meme_subreddits), 100)
return render_template('test.html', re=re)
@app.route('/<something>')
def not_found(something):
return render_template('not_found.html')
| 20.12963
| 84
| 0.601656
| 146
| 1,087
| 4.308219
| 0.308219
| 0.133545
| 0.127186
| 0.09539
| 0.303657
| 0.303657
| 0.303657
| 0.303657
| 0.18442
| 0.18442
| 0
| 0.020631
| 0.24195
| 1,087
| 53
| 85
| 20.509434
| 0.742718
| 0
| 0
| 0.235294
| 0
| 0
| 0.10058
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.147059
| false
| 0
| 0.058824
| 0.058824
| 0.352941
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4372f137c065f7fda02b994b61b1b4bd3b7965e5
| 1,775
|
py
|
Python
|
pyrite/llvm.py
|
iahuang/pyrite
|
0db83aad6aa8f245edf13d393f65d408eb956c4d
|
[
"MIT"
] | null | null | null |
pyrite/llvm.py
|
iahuang/pyrite
|
0db83aad6aa8f245edf13d393f65d408eb956c4d
|
[
"MIT"
] | 1
|
2022-03-28T00:35:11.000Z
|
2022-03-29T21:17:06.000Z
|
pyrite/llvm.py
|
iahuang/pyrite
|
0db83aad6aa8f245edf13d393f65d408eb956c4d
|
[
"MIT"
] | null | null | null |
import shutil
from pyrite import fs
from pyrite.command_line import run_command
from pyrite.errors import UserError
from pyrite.globals import Globals
from os.path import join
class LLVMInterface:
_clang_path: str
def __init__(self):
self._clang_path = self._get_clang_path()
def _get_clang_path(self) -> str:
clang_path = shutil.which(Globals.get_compiler_options().clang_command)
if not clang_path:
raise UserError(
"Pyrite requires clang to be installed, but no such installation was found."
)
return clang_path
def compile_ll(self, source: str, output_path: str) -> None:
"""
Compile the contents of [source] as LLVM IR code, outputting a binary
specified by [output_path]. If any errors arise in compilation,
raise an error.
"""
ir_path = join(self.get_build_directory(), "build.ll")
fs.write_file(
path=ir_path,
data=source
)
result = run_command([self._clang_path, ir_path, "-o", output_path])
if result.stderr:
fs.write_file(
path=join(self.get_build_directory(), "llvm_error.txt"),
data=result.stderr
)
raise UserError(
"An unexpected error occurred during the compilation process. A detailed report has been written to {}".format(
self.get_build_directory()
)
)
def get_build_directory(self) -> str:
"""
Pyrite uses a temporary working "build" directory to store files needed for LLVM/Clang
"""
cwd = Globals.get_compiler_options().cwd
return join(cwd, "_build")
| 29.583333
| 127
| 0.60507
| 213
| 1,775
| 4.826291
| 0.41784
| 0.070039
| 0.066148
| 0.061284
| 0.05642
| 0.05642
| 0
| 0
| 0
| 0
| 0
| 0
| 0.316056
| 1,775
| 59
| 128
| 30.084746
| 0.846787
| 0.132958
| 0
| 0.108108
| 0
| 0
| 0.138889
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108108
| false
| 0
| 0.162162
| 0
| 0.378378
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43741937702bf1405a4a4845184d5f67e95b3dd1
| 526
|
py
|
Python
|
bag_recursive.py
|
eduardogerentklein/Algoritmos-Geneticos
|
499836ac4867240ee3777dcdd554081a480cb8c9
|
[
"MIT"
] | null | null | null |
bag_recursive.py
|
eduardogerentklein/Algoritmos-Geneticos
|
499836ac4867240ee3777dcdd554081a480cb8c9
|
[
"MIT"
] | null | null | null |
bag_recursive.py
|
eduardogerentklein/Algoritmos-Geneticos
|
499836ac4867240ee3777dcdd554081a480cb8c9
|
[
"MIT"
] | null | null | null |
maxWeight = 30
value = [15, 7, 10, 5, 8, 17]
weight = [15, 3, 2, 5, 9, 20]
def bag(pos, selected):
# calcula o total
totalValue = 0
pesoTotal = 0
for i in selected:
totalValue += value[i]
pesoTotal += weight[i]
if pesoTotal > maxWeight:
return (0,0)
if pos >= len(weight):
return (totalValue, pesoTotal)
answer1 = bag(pos + 1, selected + [pos])
answer2 = bag(pos + 1, list(selected))
if answer1[0] > answer2[0]:
return answer1
else:
return answer2
bestAnswer = bag(0, [])
print(bestAnswer)
| 18.137931
| 41
| 0.629278
| 77
| 526
| 4.298701
| 0.480519
| 0.054381
| 0.042296
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083538
| 0.226236
| 526
| 29
| 42
| 18.137931
| 0.72973
| 0.028517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0
| 0
| 0.238095
| 0.047619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43785386d2679f8fabe7de8f8acd7359d1da2540
| 5,112
|
py
|
Python
|
task3/task3_xgb_cv.py
|
meck93/intro_ml
|
903710b13e9eed8b45fdbd9957c2fb49b2981f62
|
[
"MIT"
] | null | null | null |
task3/task3_xgb_cv.py
|
meck93/intro_ml
|
903710b13e9eed8b45fdbd9957c2fb49b2981f62
|
[
"MIT"
] | null | null | null |
task3/task3_xgb_cv.py
|
meck93/intro_ml
|
903710b13e9eed8b45fdbd9957c2fb49b2981f62
|
[
"MIT"
] | null | null | null |
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.feature_selection import f_classif, SelectKBest
import numpy as np
import pandas as pd
import os
mingw_path = 'C:\\Program Files\\mingw-w64\\x86_64-7.2.0-posix-sjlj-rt_v5-rev1\\mingw64\\bin'
os.environ['PATH'] = mingw_path + ';' + os.environ['PATH']
import xgboost as xgb
# Constants
FILE_PATH_TRAIN = "./input/train.h5"
FILE_PATH_TEST = "./input/test.h5"
TEST_SIZE = 0.25
# read training file
# test_data = pd.read_hdf(FILE_PATH_TRAIN, "test")
training_data = pd.read_hdf(FILE_PATH_TRAIN, "train")
# training data
# extracting the x-values
x_values_training = training_data.copy()
x_values_training = x_values_training.drop(labels=['y'], axis=1)
x_component_training = x_values_training.values
# extracting the y-values
y_component_training = training_data['y'].values
# training the scaler
scaler = StandardScaler(with_mean=True, with_std=True)
scaler = scaler.fit(x_component_training)
# scaling the training and test data
x_train_scaled = scaler.transform(x_component_training)
# feature selection
selector = SelectKBest(f_classif, k=25)
selector = selector.fit(x_train_scaled, y_component_training)
x_train_scaled_new = selector.transform(x_train_scaled)
# splitting the training set into a training & validation set
x_train, x_val, y_train, y_val = train_test_split(x_train_scaled_new, y_component_training, test_size=TEST_SIZE, random_state=42)
# training, evaluation and test data in xgboost DMatrix
xg_train = xgb.DMatrix(x_train, label=y_train)
xg_val = xgb.DMatrix(x_val, label=y_val)
# setup parameters for xgboost
params = {}
# use softmax multi-class classification
params['objective'] = 'multi:softmax'
# scale weight of positive examples
params['silent'] = 0
params['num_class'] = 5
params['tree_method'] = 'auto'
params['seed'] = 42
# number of boosting rounds
rounds = 300
# gridsearch_params = [
# (max_depth, min_child_weight)
# for max_depth in range(6,13,2)
# for min_child_weight in range(4,9,2)
# ]
# print(gridsearch_params)
# best_params = None
# min_error = float("Inf")
# for max_depth, min_child_weight in gridsearch_params:
# print("CV with max_depth={}, min_child_weight={}".format(max_depth, min_child_weight))
# # Update our parameters
# params['max_depth'] = max_depth
# params['min_child_weight'] = min_child_weight
# # Run CV
# cv_results = xgb.cv(params, xg_train, num_boost_round=rounds, seed=42, nfold=5, metrics={'merror'}, early_stopping_rounds=10, verbose_eval=True)
# # Update best error
# mean_error = cv_results['test-merror-mean'].min()
# boost_rounds = cv_results['test-merror-mean'].argmin()
# print("\t Multiclass Error {} for {} rounds".format(mean_error, boost_rounds))
# print()
# if mean_error < min_error:
# min_error = mean_error
# best_params = (max_depth, min_child_weight)
# print("Best params: {}, {}, MAE: {}".format(best_params[0], best_params[1], min_error))
# # grid search parameters
# gridsearch_params = []
# # tree depth, gamma, learning rate, regularization lambda
# for max_tree_depth in range(6, 11, 1):
# for gamma in range(0, 13, 2):
# for learn_rate in [0.3, 0.1, 0.05]:
# for reg_lambda in [10.0, 1.0, 0.0, 0.1, 0.01]:
# gridsearch_params.append((max_tree_depth, gamma, learn_rate, reg_lambda))
# print(gridsearch_params)
gridsearch_params = [
(max_depth, gamma)
for max_depth in range(6,13,2)
for gamma in range(0,13,2)
]
print(gridsearch_params)
best_params = None
min_test_error = float("Inf")
min_train_error = float("Inf")
file = open("output.txt", mode="w+", encoding='utf-8', newline='\n')
for max_depth, gamma in gridsearch_params:
print("CV with max_depth={}, gamma={}".format(max_depth, gamma))
file.write("CV with max_depth={}, gamma={}\n".format(max_depth, gamma))
# Update our parameters
params['max_depth'] = max_depth
params['gamma'] = gamma
# Run CV
cv_results = xgb.cv(params, xg_train, num_boost_round=rounds, seed=42, nfold=5, metrics={'merror'}, early_stopping_rounds=10, verbose_eval=True)
# Update best error
test_error = cv_results['test-merror-mean'].min()
train_error = cv_results['train-merror-mean'].min()
boost_rounds = cv_results['test-merror-mean'].argmin()
print("Multiclass Error {} for {} rounds".format(test_error, boost_rounds))
print()
file.write("Multiclass Error - Test: {} - Train: {} for {} rounds\n".format(test_error, train_error, boost_rounds))
file.write("\n")
if test_error < min_test_error:
min_test_error = test_error
min_train_error = train_error
best_params = (max_depth, gamma)
print("Best params: {}, {}, Test Error: {}, Train Error: {}".format(best_params[0], best_params[1], min_test_error, min_train_error))
file.write("Best params: {}, {}, Test Error: {}, Train Error: {}\n".format(best_params[0], best_params[1], min_test_error, min_train_error))
file.close()
| 32.35443
| 150
| 0.714593
| 762
| 5,112
| 4.536745
| 0.23622
| 0.041655
| 0.032398
| 0.023141
| 0.372867
| 0.306624
| 0.273648
| 0.234596
| 0.18166
| 0.140006
| 0
| 0.020559
| 0.153169
| 5,112
| 157
| 151
| 32.56051
| 0.778009
| 0.382042
| 0
| 0
| 0
| 0.015625
| 0.17188
| 0.021283
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0.078125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4378f461808522c0661a502153858f383b5e6b02
| 1,369
|
py
|
Python
|
discovery-provider/src/queries/get_plays_metrics.py
|
atticwip/audius-protocol
|
9758e849fae01508fa1d27675741228b11533e6e
|
[
"Apache-2.0"
] | 429
|
2019-08-14T01:34:07.000Z
|
2022-03-30T06:31:38.000Z
|
discovery-provider/src/queries/get_plays_metrics.py
|
SNOmad1/audius-protocol
|
3d5fc2bf688265eb529060f1f3234ef2b95ed231
|
[
"Apache-2.0"
] | 998
|
2019-08-14T01:52:37.000Z
|
2022-03-31T23:17:22.000Z
|
discovery-provider/src/queries/get_plays_metrics.py
|
SNOmad1/audius-protocol
|
3d5fc2bf688265eb529060f1f3234ef2b95ed231
|
[
"Apache-2.0"
] | 73
|
2019-10-04T04:24:16.000Z
|
2022-03-24T16:27:30.000Z
|
import logging
import time
from sqlalchemy import func, desc
from src.models import Play
from src.utils import db_session
logger = logging.getLogger(__name__)
def get_plays_metrics(args):
"""
Returns metrics for play counts
Args:
args: dict The parsed args from the request
args.start_time: date The start of the query
args.limit: number The max number of responses to return
args.bucket_size: string A date_trunc operation to aggregate timestamps by
Returns:
Array of dictionaries with the play counts and timestamp
"""
db = db_session.get_db_read_replica()
with db.scoped_session() as session:
return _get_plays_metrics(session, args)
def _get_plays_metrics(session, args):
metrics_query = (
session.query(
func.date_trunc(args.get("bucket_size"), Play.created_at).label(
"timestamp"
),
func.count(Play.id).label("count"),
)
.filter(Play.created_at > args.get("start_time"))
.group_by(func.date_trunc(args.get("bucket_size"), Play.created_at))
.order_by(desc("timestamp"))
.limit(args.get("limit"))
)
metrics = metrics_query.all()
metrics = [
{"timestamp": int(time.mktime(m[0].timetuple())), "count": m[1]}
for m in metrics
]
return metrics
| 27.938776
| 82
| 0.646457
| 180
| 1,369
| 4.733333
| 0.4
| 0.032864
| 0.052817
| 0.042254
| 0.161972
| 0.100939
| 0.100939
| 0.100939
| 0.100939
| 0.100939
| 0
| 0.001955
| 0.252739
| 1,369
| 48
| 83
| 28.520833
| 0.83089
| 0.252739
| 0
| 0
| 0
| 0
| 0.075587
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068966
| false
| 0
| 0.172414
| 0
| 0.310345
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|