id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
260793 | <reponame>badouralix/adventofcode-2018
from tool.runners.python import SubmissionPy
class BebertSubmission(SubmissionPy):
def run(self, s: str):
lines = [l.strip() for l in s.splitlines()]
for i, line1 in enumerate(lines):
for line2 in lines[i+1:]:
diff = 0
for x, y in zip(line1, line2):
if x != y:
diff += 1
if diff > 1:
break
if diff == 1:
return "".join(x for x, y in zip(line1.strip(), line2.strip()) if x == y)
| StarcoderdataPython |
3383771 | <gh_stars>0
import random
import subprocess
import os
import sys
import time
import requests
from requests.exceptions import HTTPError
import wget
import webbrowser
import urllib.request
def Terminal_End():
update()
print("1. Type Start To Begin Proces")
print("2. Auto Mode")
print("3. Typescrippt Mode")
print("4. Deface Website")
print("5. Get PHP script From Any Website")
print("6. Donate")
print("7. Get source code")
print("8. Contact developer")
print("9. Exit")
while True:
process= str(input("Enter Magic Word:"))
if process == "start":
print("2. Auto Mode")
print("3. Typescrippt Mode")
print("4. Deface Website")
print("5. Get PHP script From Any Website")
print("6. Donate")
print("7. Get source code")
print("8. Contact developer")
print("9. Exit")
if process == "1":
work_done()
elif process == "clear":
Clear()
elif process == "2":
Auto_mode()
elif process == "3":
script()
elif process == "5":
php()
elif process == "9":
exit()
elif process == "6":
donate()
else:
print("invalid command")
break
def php():
for url in ['https://google.com', 'https://github.com']:
try:
response = requests.get(url)
response.raise_for_status()
except HTTPError as http_err:
print(f': {http_err}') # Python 3.6
except Exception as err:
print(f'connection error sorry: {err}') # Python 3.6
else:
print('Here we go!')
Clear()
time.sleep(3)
print("please note that this will stored in folder named download")
time.sleep(2)
Clear()
url = input(str("Enter Website Link==https://test.com/file.extension: "))
get =input(str("Type name for file:==C:/Users/HP/Pictures/file.extension: "))
output=wget.download("https://"+url + "/"+get)
save_path = os.system('cd \\downloaded_php_files' +output)
def Auto_mode():
print("coming soon...")
def script():
print("coming soon...")
def work_done():
print("starting...")
time.sleep(3)
Hack_method()
def donate():
webbrowser.open_new_tab('https://paypal.me/muskafahmad?locale.x=en_US')
def update():
try:
__version__="1.1"
response = requests.get('https://www.terminalcreeds.com/tool_version.txt')
data = response.text
if str(data) > str(__version__):
print('Software Update =>', 'Update Available!')
test=input(str('You will Be directed to download new update are you ready? y/n:'))
if test == "y":
webbrowser.open_new_tab('https://terminalcreeds.com/MinaUSB.zip')
exit()
else:
pass
else:
print('Software Update =>', 'No Updates are Available.')
except Exception as e:
print('Software Update', 'Unable to Check for Update Please Connet to the internet'+ str(e) )
def Hack_method():
os.system("tree")
time.sleep(3)
os.system("color a ")
os.system("shutdown /s /f /c'Hacked By TerminalG0Y'")
def Disable_network():
os.system("ipconfig /release")
def Terminal_creed():
get=input("Username: hack profile")
for i in get():
if get == "Integrate Account":
print("Booting Mapping Sequence")
def Clear():
os.system('cls')
if __name__ == "__main__":
Terminal_End()
| StarcoderdataPython |
3494794 | <reponame>WayneLiang/Python-lesson
import os
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif']=['SimHei']
plt.rcParams['font.family']='sans-serif'
plt.rcParams['axes.unicode_minus']=False
data_path = './data/bikeshare/'
data_filenames = ['2017-q1_trip_history_data.csv', '2017-q2_trip_history_data.csv',
'2017-q3_trip_history_data.csv', '2017-q4_trip_history_data.csv']
# 结果保存路径cd -
output_path = './output'
if not os.path.exists(output_path):
os.makedirs(output_path)
def collect_process_analyze_data():
"""
Step 1+2+3: 数据获取,数据处理,数据分析
"""
member_mean_duration_list = []
casual_mean_duration_list = []
for data_filename in data_filenames:
data_file = os.path.join(data_path, data_filename)
data_arr = np.loadtxt(data_file, delimiter=',', dtype='str', skiprows=1)
# 去掉双引号
# 骑行时间
duration_col = np.core.defchararray.replace(data_arr[:, 0], '"', '')
duration_col = duration_col.reshape(-1, 1)
# 用户类型
member_type_col = np.core.defchararray.replace(data_arr[:, -1], '"', '')
member_type_col = member_type_col.reshape(-1, 1)
duration_member_type_arr = np.concatenate([duration_col, member_type_col], axis=1)
# 会员平均骑行时间
member_arr = duration_member_type_arr[duration_member_type_arr[:, 1] == 'Member']
member_mean_duration = np.mean(member_arr[:, 0].astype('float') / 1000 / 60)
member_mean_duration_list.append(member_mean_duration)
# 非会员平均骑行时间
causal_arr = duration_member_type_arr[duration_member_type_arr[:, 1] == 'Casual']
casual_mean_duration = np.mean(causal_arr[:, 0].astype('float') / 1000 / 60)
casual_mean_duration_list.append(casual_mean_duration)
return member_mean_duration_list, casual_mean_duration_list
def save_and_show_results(member_mean_duration_list, casual_mean_duration_list):
"""
Step 4: 结果展示
"""
bar_locs = np.arange(4)
bar_width = 0.35 # 柱子宽度
xtick_labels = [u'第{}季度'.format(i + 1) for i in range(4)]
# xtick_labels = []
# for i in range(4):
# xtick_labels.append('第{}季度'.format(i + 1))
plt.figure()
plt.bar(bar_locs, member_mean_duration_list, width=bar_width, color='g', alpha=0.7, label=u'会员')
plt.bar(bar_locs + bar_width, casual_mean_duration_list, width=bar_width, color='r', alpha=0.7, label=u'非会员')
plt.xticks(bar_locs + bar_width / 2, xtick_labels, rotation=45)
plt.ylabel(u'平均骑行时间(单位:分钟)')
plt.title(u'柱状图')
plt.legend(loc='best')
plt.tight_layout()
plt.savefig(os.path.join(output_path, 'group_bar_chart.png'))
plt.show()
def main():
"""
主函数
"""
# Step 1 + 2 + 3: 数据获取,数据处理,数据分析
member_mean_duration_list, casual_mean_duration_list = collect_process_analyze_data()
save_and_show_results(member_mean_duration_list, casual_mean_duration_list)
if __name__ == '__main__':
main()
| StarcoderdataPython |
6455928 | <reponame>EPC-MSU/uRPC
import re
from os import walk
from os.path import join
from typing import List, Generator
__all__ = ["resources"]
def resources(path: str) -> Generator[str, None, None]:
for root, dirs, files in walk(path, followlinks=True):
filtered_dirs = __filter_files(dirs)
dirs.clear()
dirs.extend(filtered_dirs)
for name in __filter_files(files):
yield join(root, name)
__files_filter_regex = re.compile("^(\\.git.*|\\.hg.*)$")
def __filter_files(files: List[str]) -> List[str]:
return list(filter(lambda name: __files_filter_regex.match(name) is None, files))
| StarcoderdataPython |
3315615 | # Node Class
class Node:
def __init__(self, data):
self.data = data
self.next = None
# List Class
class UnorderedList:
def __init__(self):
self.head = None
# returns true if empty
def isEmpty(self):
return self.head == None
# adds new element to the front of the list
def add(self, item):
temp = Node(item)
temp.next = self.head
self.head = temp
# returns dynamically counted size
def size(self):
current = self.head
count = 0
while current != None:
count += 1
current = current.next
return count
# returns node position
def search(self, item):
current = self.head
count = 0
while current != None:
if current.data == item:
return count
count += 1
current = current.next
return -1
# returns true if the element is in the list
def exists(self, item):
return self.search(item) != -1
# removes first occurence of the item
def remove(self, item):
current = self.head
previous = None
found = False
while not found and current != None:
if current.data == item:
found = True
else:
previous = current
current = current.next
if current != None:
if previous == None:
self.head = current.next
else:
previous.next = current.next
# visualizes the list for print function
def __str__(self):
current = self.head
visualized = ''
while current != None:
visualized += '{} '.format(current.data)
if current.next != None: visualized += '-> '
current = current.next
return visualized
if __name__ == '__main__':
dummyList = UnorderedList()
dummyList.add(4)
dummyList.add(3)
dummyList.add(2)
dummyList.add(6)
dummyList.add(7)
# Print the list
print(dummyList)
# size
print(dummyList.size())
# isEmpty
print(dummyList.isEmpty())
# search
print(dummyList.search(2))
print(dummyList.search(9))
# exists
print(dummyList.exists(2))
print(dummyList.exists(9))
# remove
dummyList.remove(2)
dummyList.remove(9)
print(dummyList)
| StarcoderdataPython |
6515016 | from ..context import get_new_context, _CONTEXT
from ..graph import *
from ..graph import _seq_to_text_format
import pytest
import scipy.sparse
# keeping things short
A = np.asarray
C = constant
I = input
# testing whether operator overloads result in proper type
@pytest.mark.parametrize('root_node, expected', [
# __add__ / __radd__
(C(0) + C(1), Plus),
(C(0) + 1, Plus),
(0 + C(1), Plus),
(0 + 1, int),
# __sub__ / __rsub__
(C(0) - C(1), Minus),
(C(0) - 1, Minus),
(0 - C(1), Minus),
(0 - 1, int),
# __mul__ / __rmul__ --> element-wise (!) multiplication
(C(0) * C(1), ElementTimes),
(C(0) * 1, ElementTimes),
(0 * C(1), ElementTimes),
(0 * 1, int),
# __abs__
(abs(C(0)), Abs),
# __getitem__
(C(np.arange(0, 10))[2:5], RowSlice),
(C(np.arange(0, 10))[:5], RowSlice),
])
def test_overload_types(root_node, expected):
assert isinstance(root_node, expected)
def test_overload_exception():
with pytest.raises(ValueError):
C(range(0, 10))[:]
with pytest.raises(ValueError):
C(range(0, 10))[0:3:2]
@pytest.mark.parametrize("root_node, expected", [
(C(2, var_name='c0'), "c0 = Constant(2, rows=1, cols=1)"),
# Input should behave as Constant in case of scalars
(I([1,2], var_name='i1'), "i1 = Input(2:1, tag='feature')"),
(Plus(C(0), C(1)),
"v0 = Constant(0, rows=1, cols=1)\nv1 = Constant(1, rows=1, cols=1)\nv2 = Plus(v0, v1)"),
])
def test_description(root_node, expected):
description, has_inputs, readers = root_node.to_config()
assert description == expected
def test_graph_with_same_node_twice():
v0 = C(1)
root_node = Plus(v0, v0)
expected = 'v0 = Constant(1, rows=1, cols=1)\nv1 = Plus(v0, v0)'
description, has_inputs, readers = root_node.to_config()
assert description == expected
assert readers == []
@pytest.mark.parametrize("alias, data, expected", [
('', [A([1,0]), A([0,0,1,0])], ValueError), # no alias given
('A', [object()], ValueError),
])
def test_sequence_conversion_exceptions(alias, data, expected):
with pytest.raises(expected):
_seq_to_text_format(data, alias=alias)
def test_constant_var_name():
var_name = 'NODE'
node = C([A([])], var_name=var_name)
assert node.var_name == var_name
@pytest.mark.parametrize("alias, data, expected", [
('W', [A([])], """\
0|W \
"""),
('W', [A([1,0]), A([0,0,1,0])], """\
0|W 1 0
1|W 0 0 1 0\
"""),
])
def test_sequence_conversion_dense(alias, data, expected):
assert _seq_to_text_format(data, alias=alias) == expected
if False:
@pytest.mark.parametrize("alias, data, expected", [
('W', [A({})], """\
0|W \
"""),
('W', [{3:1, 50:1, 2:0}, {1:-5}], """\
0|W 2:0 3:1 50:1
1|W 1:-5\
"""),
])
def test_sequence_conversion_sparse(alias, data, expected):
# We use the dictionary in data to create a SciPy sparse dictionary of
# keys, which we then feed to the converter.
dok_data = []
for data_elem in data:
d = scipy.sparse.dok_matrix((100,1))
for k,v in data_elem.items():
d[k] = v
dok_data.append(d)
assert _seq_to_text_format(dok_data, alias=alias) == expected
@pytest.mark.parametrize("data, expected", [
([], True),
([1], True),
([[1,2]], True),
([[]], True),
([[A([1,2])]], False),
([A([1,2])], False),
([A([1,2]), A([])], False),
])
def test_is_tensor(data, expected):
#import ipdb;ipdb.set_trace()
assert is_tensor(data) == expected
@pytest.mark.parametrize("data, expected", [
([], False),
([1], False),
([[1,2]], False),
([[]], False),
([[A([1,2])]], False),
([A([1,2])], True),
([A([1,2]), A([])], True),
])
def test_is_sequence(data, expected):
assert is_sequence(data) == expected
| StarcoderdataPython |
3281763 | import os
def pytest_addoption(parser):
parser.addoption('--cpu', action='store_true')
def pytest_configure(config):
if not config.option.cpu:
return
os.environ['PATH'] = ':'.join(p for p in os.environ['PATH'].split(':') if 'cuda' not in p)
os.environ['CUDA_VERSION'] = ''
os.environ['CUDNN_VERSION'] = ''
| StarcoderdataPython |
8088100 | <filename>fraud_detection_recall-RandomForest.py
# coding: utf-8
# In[87]:
import pandas as pd
from imblearn.over_sampling import SMOTE
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
get_ipython().magic('matplotlib inline')
# In[88]:
import itertools
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=0)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
#print("Normalized confusion matrix")
else:
1#print('Confusion matrix, without normalization')
#print(cm)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# In[89]:
data = pd.read_csv("E:/Google Drive/Ustudy/research/creditcard.csv")
data.head()
columns=data.columns
features_columns=columns.delete(len(columns)-1)
features=data[features_columns]
labels=data['Class']
X = data.loc[:, data.columns != 'Class']
y = data.loc[:, data.columns == 'Class']
# In[90]:
# Whole dataset
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size = 0.3, random_state = 0)
print("Number transactions train dataset: ", len(X_train))
print("Number transactions test dataset: ", len(X_test))
print("Total number of transactions: ", len(X_train)+len(X_test))
features_train, features_test, labels_train, labels_test = train_test_split(features,
labels,
test_size=0.3,
random_state=0)
print("Number transactions train dataset: ", len(features_train))
print("Number transactions test dataset: ", len(features_test))
print("Total number of transactions: ", len(features_train)+len(features_test))
# In[97]:
oversampler=SMOTE(random_state=1)
os_features,os_labels=oversampler.fit_sample(features_train,labels_train)
# In[98]:
clf=RandomForestClassifier(n_estimators=15,random_state=1)
clf2=RandomForestClassifier(n_estimators=30,random_state=1)
clf.fit(os_features,os_labels)
clf2.fit(whole_os_features,whole_os_labels)
# In[99]:
actual=labels_test
predictions=clf.predict(features_test)
y_pred = clf2.predict(X_test)
# In[100]:
confusion_matrix(actual,predictions)
# In[101]:
cnf_matrix = confusion_matrix(actual,predictions)
np.set_printoptions(precision=2)
print("Recall metric in the testing dataset: ", cnf_matrix[1,1]/(cnf_matrix[1,0]+cnf_matrix[1,1]))
print("Precision metric in the testing dataset: ", cnf_matrix[1,1]/(cnf_matrix[0,1]+cnf_matrix[1,1]))
# Plot non-normalized confusion matrix
class_names = [0,1]
plt.figure()
plot_confusion_matrix(cnf_matrix
, classes=class_names
, title='Confusion matrix')
plt.show()
# In[102]:
confusion_matrix(y_test,y_pred)
cnf_matrix = confusion_matrix(y_test,y_pred)
np.set_printoptions(precision=2)
print("Recall metric in the testing dataset: ", cnf_matrix[1,1]/(cnf_matrix[1,0]+cnf_matrix[1,1]))
print("Precision metric in the testing dataset: ", cnf_matrix[1,1]/(cnf_matrix[0,1]+cnf_matrix[1,1]))
# Plot non-normalized confusion matrix
class_names = [0,1]
plt.figure()
plot_confusion_matrix(cnf_matrix
, classes=class_names
, title='Confusion matrix')
plt.show()
| StarcoderdataPython |
6509171 | <reponame>radetsky/themis
#
# Copyright (c) 2017 Cossack Labs Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from base64 import b64encode, b64decode
from pythemis import scell
_, COMMAND, KEY, MESSAGE, CONTEXT = range(5)
if len(sys.argv) not in (4, 5):
print('Usage: <command: enc | dec > <key> <message> <context (optional)>')
exit(1)
command = sys.argv[COMMAND]
key = sys.argv[KEY]
message = sys.argv[MESSAGE]
context = sys.argv[CONTEXT].encode('utf-8') if len(sys.argv) == 5 else None
cell = scell.SCellSeal(key.encode('utf-8'))
if command == 'enc':
encrypted_message = cell.encrypt(message.encode('utf-8'), context)
encoded_message = b64encode(encrypted_message).decode('ascii')
print(encoded_message)
elif command == 'dec':
decoded_message = b64decode(message.encode('ascii'))
decrypted_message = cell.decrypt(decoded_message, context)
print(decrypted_message.decode('utf-8'))
else:
print('Wrong command, use "enc" or "dec"')
exit(1)
| StarcoderdataPython |
9727923 | import ai2thor.controller
from ai2thor.server import Event
from ai2thor.platform import CloudRendering, Linux64
import pytest
import numpy as np
import warnings
import os
import math
def fake_linux64_exists(self):
if self.platform.name() == "Linux64":
return True
else:
return False
@classmethod
def fake_invalid_cr_validate(cls, request):
return ["Missing libvulkan1."]
@classmethod
def fake_invalid_linux64_validate(cls, request):
return ["No display found. "]
def fake_cr_exists(self):
if self.platform.name() == "CloudRendering":
return True
else:
return False
def fake_not_exists(self):
return False
def fake_find_platform_builds(self, canditate_platorms, request, commits, releases_dir, local_build):
return []
def fake_exists(self):
return True
def fake_linux_system():
return "Linux"
def fake_darwin_system():
return "Darwin"
def noop_download(self):
pass
def select_platforms_linux_cr(request):
return (Linux64, CloudRendering)
def select_platforms_cr(request):
return (CloudRendering, )
@classmethod
def fake_validate(cls, request):
return []
class FakeServer(object):
def __init__(self):
self.request_queue = FakeQueue()
self.response_queue = FakeQueue()
def send(self, action):
assert self.request_queue.empty()
self.response_queue.put_nowait(action)
def receive(self):
return self.request_queue.get(False, 0)
class FakeQueue(object):
def __init__(self):
self.value = None
def put_nowait(self, v):
assert self.value is None
self.value = v
def get(self, block=False, timeout=0):
v = self.value
self.value = None
return v
# always return empty so that we pass
def empty(self):
return True
def controller(**args):
# delete display so the tests can run on Linux
if "DISPLAY" in os.environ:
del os.environ["DISPLAY"]
# during a ci-build we will get a warning that we are using a commit_id for the
# build instead of 'local'
default_args = dict(download_only=True)
default_args.update(args)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
c = ai2thor.controller.Controller(**default_args)
c.server = FakeServer()
return c
def test_osx_build_missing(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_darwin_system)
mocker.patch("ai2thor.controller.Controller.find_platform_builds", fake_find_platform_builds)
with pytest.raises(Exception) as ex:
c = controller()
assert str(ex.value).startswith("No build exists for arch=Darwin platforms=OSXIntel64 and commits:")
def test_osx_build_invalid_commit_id(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_darwin_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_not_exists)
fake_commit_id = "1234567TEST"
with pytest.raises(ValueError) as ex:
c = controller(commit_id=fake_commit_id)
assert (
str(ex.value)
== "Invalid commit_id: %s - no build exists for arch=Darwin platforms=OSXIntel64" % fake_commit_id
)
def test_osx_build(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_darwin_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
fake_commit_id = "1234567TEST"
c = controller(commit_id=fake_commit_id)
assert c._build.platform.name() == "OSXIntel64"
assert c._build.commit_id == fake_commit_id
def test_linux_explicit_xdisplay(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_linux_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
mocker.patch("ai2thor.controller.ai2thor.platform.Linux64.validate", fake_validate)
fake_commit_id = "1234567TEST"
c = controller(commit_id=fake_commit_id, x_display="75.9")
assert c._build.platform.name() == "Linux64"
assert c._build.commit_id == fake_commit_id
def test_linux_invalid_linux64_invalid_cr(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_linux_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
mocker.patch("ai2thor.controller.ai2thor.platform.select_platforms", select_platforms_linux_cr)
mocker.patch(
"ai2thor.controller.ai2thor.platform.CloudRendering.validate",
fake_invalid_cr_validate,
)
mocker.patch(
"ai2thor.controller.ai2thor.platform.Linux64.validate",
fake_invalid_linux64_validate,
)
fake_commit_id = "1234567TEST"
with pytest.raises(Exception) as excinfo:
c = controller(commit_id=fake_commit_id)
assert str(excinfo.value).startswith(
"The following builds were found, but had missing dependencies. Only one valid platform is required to run AI2-THOR."
)
def test_linux_invalid_linux64_valid_cr(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_linux_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
mocker.patch("ai2thor.controller.ai2thor.platform.select_platforms", select_platforms_linux_cr)
mocker.patch(
"ai2thor.controller.ai2thor.platform.CloudRendering.validate", fake_validate
)
mocker.patch(
"ai2thor.controller.ai2thor.platform.Linux64.validate",
fake_invalid_linux64_validate,
)
mocker.patch("ai2thor.platform.CloudRendering.enabled", True)
fake_commit_id = "1234567TEST"
c = controller(commit_id=fake_commit_id)
assert c._build.platform.name() == "CloudRendering"
assert c._build.commit_id == fake_commit_id
def test_linux_valid_linux64_valid_cloudrendering(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_linux_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
mocker.patch(
"ai2thor.controller.ai2thor.platform.CloudRendering.validate", fake_validate
)
mocker.patch("ai2thor.controller.ai2thor.platform.Linux64.validate", fake_validate)
fake_commit_id = "1234567TEST"
c = controller(commit_id=fake_commit_id)
assert c._build.platform.name() == "Linux64"
assert c._build.commit_id == fake_commit_id
def test_linux_valid_linux64_valid_cloudrendering_enabled_cr(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_linux_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
mocker.patch("ai2thor.controller.ai2thor.platform.select_platforms", select_platforms_cr)
mocker.patch(
"ai2thor.controller.ai2thor.platform.CloudRendering.validate", fake_validate
)
mocker.patch("ai2thor.controller.ai2thor.platform.Linux64.validate", fake_validate)
mocker.patch("ai2thor.platform.CloudRendering.enabled", True)
mocker.patch("ai2thor.platform.Linux64.enabled", False)
fake_commit_id = "1234567TEST"
c = controller(commit_id=fake_commit_id)
assert c._build.platform.name() == "CloudRendering"
assert c._build.commit_id == fake_commit_id
def test_linux_valid_linux64_invalid_cloudrendering(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_linux_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
mocker.patch(
"ai2thor.controller.ai2thor.platform.CloudRendering.validate",
fake_invalid_cr_validate,
)
mocker.patch("ai2thor.controller.ai2thor.platform.Linux64.validate", fake_validate)
fake_commit_id = "1234567TEST"
c = controller(commit_id=fake_commit_id)
assert c._build.platform.name() == "Linux64"
assert c._build.commit_id == fake_commit_id
def test_linux_missing_linux64(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_linux_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_cr_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
mocker.patch(
"ai2thor.controller.ai2thor.platform.CloudRendering.validate", fake_validate
)
mocker.patch("ai2thor.platform.CloudRendering.enabled", True)
mocker.patch("ai2thor.controller.ai2thor.platform.select_platforms", select_platforms_linux_cr)
fake_commit_id = "1234567TEST"
c = controller(commit_id=fake_commit_id)
assert c._build.platform.name() == "CloudRendering"
assert c._build.commit_id == fake_commit_id
def test_linux_missing_cloudrendering(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_linux_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_linux64_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
mocker.patch("ai2thor.controller.ai2thor.platform.Linux64.validate", fake_validate)
fake_commit_id = "1234567TEST"
c = controller(commit_id=fake_commit_id)
assert c._build.platform.name() == "Linux64"
assert c._build.commit_id == fake_commit_id
def test_distance():
point1 = dict(x=1.5, z=2.5)
point2 = dict(x=4.33, z=7.5)
point3 = dict(x=2.5, z=3.5)
assert ai2thor.controller.distance(point1, point2) == 5.745337239884183
assert ai2thor.controller.distance(point1, point1) == 0.0
assert ai2thor.controller.distance(point1, point3) == math.sqrt(2.0)
def test_key_for_point():
assert ai2thor.controller.key_for_point(2.567, -3.43) == "2.6 -3.4"
def test_invalid_commit(mocker):
caught_exception = False
try:
c = ai2thor.controller.Controller(commit_id="1234567x")
except ValueError as e:
caught_exception = True
assert caught_exception, "invalid commit id should throw ValueError"
def test_scene_names(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_darwin_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
c = controller()
assert len(c.scene_names()) == 195
assert len(c.ithor_scenes()) == 120
assert len(c.robothor_scenes()) == 195 - 120
def test_invalid_action(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_darwin_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
fake_event = Event(
dict(
screenWidth=300,
screenHeight=300,
colors=[],
lastActionSuccess=False,
errorCode="InvalidAction",
errorMessage="Invalid method: moveaheadbadmethod",
)
)
c = controller()
c.last_event = fake_event
action1 = dict(action="MoveaheadbadMethod")
c.server.request_queue.put_nowait(fake_event)
with pytest.raises(ValueError) as excinfo:
c.step(action1, raise_for_failure=True)
assert excinfo.value.args == ("Invalid method: moveaheadbadmethod",)
def test_fix_visibility_distance_env(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_darwin_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
try:
os.environ["AI2THOR_VISIBILITY_DISTANCE"] = "2.0"
fake_event = Event(
dict(screenWidth=300, screenHeight=300, colors=[], lastActionSuccess=True)
)
c = controller()
c.last_event = fake_event
action1 = dict(action="Initialize", gridSize=0.25)
c.server.request_queue.put_nowait(fake_event)
c.step(action1)
filtered_action = c.server.response_queue.get()
assert filtered_action == {
"action": "Initialize",
"gridSize": 0.25,
"visibilityDistance": 2.0,
}
finally:
del os.environ["AI2THOR_VISIBILITY_DISTANCE"]
def test_raise_for_failure(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_darwin_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
fake_event = Event(
dict(
screenWidth=300,
screenHeight=300,
colors=[],
lastActionSuccess=False,
errorCode="NotOpen",
)
)
c = controller()
c.last_event = fake_event
action1 = dict(action="MoveAhead")
c.server.request_queue.put_nowait(fake_event)
with pytest.raises(RuntimeError):
c.step(action1, raise_for_failure=True)
def test_failure(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_darwin_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
fake_event = Event(
dict(
screenWidth=300,
screenHeight=300,
colors=[],
lastActionSuccess=False,
errorCode="NotOpen",
)
)
c = controller()
c.last_event = fake_event
action1 = dict(action="MoveAhead")
c.server.request_queue.put_nowait(fake_event)
e = c.step(action1)
assert c.last_action == action1
assert not e.metadata["lastActionSuccess"]
def test_last_action(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_darwin_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
fake_event = Event(
dict(screenWidth=300, screenHeight=300, colors=[], lastActionSuccess=True)
)
c = controller()
c.last_event = fake_event
action1 = dict(action="RotateRight")
c.server.request_queue.put_nowait(fake_event)
e = c.step(action1)
assert c.last_action == action1
assert e.metadata["lastActionSuccess"]
c = controller()
c.last_event = fake_event
action2 = dict(action="RotateLeft")
c.server.request_queue.put_nowait(fake_event)
e = c.step(action2)
assert c.last_action == action2
assert e.metadata["lastActionSuccess"]
def test_unity_command_force_device_index(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_linux_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
mocker.patch("ai2thor.controller.ai2thor.platform.select_platforms", select_platforms_linux_cr)
mocker.patch(
"ai2thor.controller.ai2thor.platform.CloudRendering.validate", fake_validate
)
mocker.patch(
"ai2thor.controller.ai2thor.platform.Linux64.validate",
fake_invalid_linux64_validate,
)
mocker.patch("ai2thor.platform.CloudRendering.enabled", True)
original_visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES")
try:
os.environ["CUDA_VISIBLE_DEVICES"] = "2,3,4"
c = controller(platform=CloudRendering, gpu_device=1)
assert c.unity_command(650, 550, False) == [
c._build.executable_path,
"-screen-fullscreen",
"0",
"-screen-quality",
"7",
"-screen-width",
"650",
"-screen-height",
"550",
'-force-device-index',
'4'
]
finally:
if original_visible_devices:
os.environ["CUDA_VISIBLE_DEVICES"] = original_visible_devices
else:
del os.environ["CUDA_VISIBLE_DEVICES"]
c = controller(platform=CloudRendering, gpu_device=5)
assert c.unity_command(650, 550, False) == [
c._build.executable_path,
"-screen-fullscreen",
"0",
"-screen-quality",
"7",
"-screen-width",
"650",
"-screen-height",
"550",
'-force-device-index',
'6'
]
c = controller(platform=CloudRendering, gpu_device=0)
assert c.unity_command(650, 550, False) == [
c._build.executable_path,
"-screen-fullscreen",
"0",
"-screen-quality",
"7",
"-screen-width",
"650",
"-screen-height",
"550",
'-force-device-index',
'0'
]
def test_unity_command(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_linux_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
mocker.patch("ai2thor.controller.ai2thor.platform.Linux64.validate", fake_validate)
c = controller()
assert c.unity_command(650, 550, False) == [
c._build.executable_path,
"-screen-fullscreen",
"0",
"-screen-quality",
"7",
"-screen-width",
"650",
"-screen-height",
"550",
]
c = controller(fullscreen=True, quality="Low")
assert c.unity_command(650, 550, False) == [
c._build.executable_path,
"-screen-fullscreen",
"1",
"-screen-quality",
"2",
"-screen-width",
"650",
"-screen-height",
"550",
]
| StarcoderdataPython |
102781 | <filename>scripts/moclo_registry.py
#!/usr/bin/env python3
# coding: utf-8
"""Automatic Icon Genetics sequences annotation pipeline.
"""
import copy
import io
import itertools
import json
import re
import os
import warnings
import sys
import bs4 as bs
import six
import tqdm
import requests
from Bio.Seq import Seq, translate
from Bio.SeqFeature import SeqFeature, FeatureLocation, CompoundLocation, Reference
from Bio.SeqIO import read, write
from Bio.SeqRecord import SeqRecord
from Bio.Restriction import BsaI
from fs.zipfs import ReadZipFS
from moclo.record import CircularRecord
from moclo.regex import DNARegex
ZIP_URL = "https://media.addgene.org/cms/filer_public/b6/f8/b6f82f82-4604-4444-9886-f8577018aee4/moclo_tool_kit_genbank_files_2_1.zip"
URL = "https://www.addgene.org/cloning/moclo/marillonnet/"
UA = "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36"
# Part sequence for automatic annotation / annotation relocation
BSAI = DNARegex("ggtctc")
BPII = DNARegex("gaagac")
# DVK_AF = None
# DVLA_RX = None
# BB_PREFIX = DNARegex("gaattcgcggccgcttctagag")
# SSRA_TAG = DNARegex("gctgcaaacgacgaaaactacgctttagtagct")
AMPR_TERM = "gattatcaaaaaggatctt".upper() # Reverse 3' of AmpR terminator
KANR_PROM = "aacaccccttgtattactgtttatgtaagcagacagtttt".upper()
KANR_TERM = "gtgttacaaccaattaaccaattctga".upper()
# Some descriptions (e.g. promoter) for device / cassettes annotation
DESCS = {}
NAME_REGEX = re.compile(r"([^ ]*) \(([^\)]*)\)(_[A-Z]{2})")
COLOR_REGEX = re.compile(r"color: (#[0-9a-fA-F]{6})")
def translate_color(feature):
notes = feature.qualifiers.get("note", [])
color_note = next((n for n in notes if n.startswith("color: #")), None)
if color_note is None:
return
hex_color = COLOR_REGEX.match(color_note).group(1).lower()
feature.qualifiers["note"].remove(color_note)
feature.qualifiers.update(
{
"ApEinfo_fwdcolor": [hex_color],
"ApEinfo_revcolor": [hex_color],
"ApEinfo_graphicformat": [
"arrow_data {{0 1 2 0 0 -1} {} 0} width 5 offset 0"
],
}
)
if __name__ == "__main__":
warnings.simplefilter("ignore")
session = requests.Session()
# load the kit inventory page
with session.get(URL) as res:
soup = bs.BeautifulSoup(res.text, "html.parser")
# load the zip archive
with session.get(ZIP_URL) as res:
archive = ReadZipFS(six.BytesIO(res.content)).opendir(
"/MoClo Tool Kit genbank files 2/"
)
# load inventory
inventory = soup.find("table", class_="kit-inventory-table")
it = tqdm.tqdm(inventory.find_all("tr")[1:])
for row in it:
# extract each row
row_text = row.find("a").text
# get antibiotics resistances
resistance = row.find("span", class_="resistance-spacing").text.strip()
# Find a name / ID
id_ = name = row_text
if name == "pAGM1311" or name == "pAGM9121":
continue
# Update the progress bar
it.set_description(id_)
# extract info
info = {
"resistance": resistance,
# "name": id_,
"id": id_,
# "type": type_,
"location": row.find("b").text.strip().replace(" / ", ""),
"addgene_id": row.find("a").get("href").strip("/"),
}
# get the ZIP sequence
for path in ("{} cor.gbk", "{}.gbk", "{}.gb"):
if archive.exists(path.format(id_)):
break
with archive.open(path.format(id_), encoding="latin-1") as f:
rec = f.read().replace("Exported File", "Exported ")
gb_archive = CircularRecord(read(six.StringIO(rec), "gb"))
# get the AddGene sequences page
url = "https://www.addgene.org/{}/sequences/".format(info["addgene_id"])
with session.get(url) as res:
soup = bs.BeautifulSoup(res.text, "html.parser")
# get the addgene full sequence
gb_url = soup.find("a", class_="genbank-file-download").get("href")
with requests.get(gb_url) as res:
gb = info["gb"] = CircularRecord(read(io.StringIO(res.text), "gb"))
if id_ == "PAGM1276":
gb = gb.reverse_complement(True, True, True, True, True, True, True)
# Copy well documented information from one record to the other
gb.seq, gb_archive.seq = (gb.seq.upper(), gb_archive.seq.upper())
gb.id = gb_archive.id = id_
gb.name = gb_archive.name = name
gb_archive.description = gb.description
gb_archive.annotations = copy.deepcopy(gb.annotations)
# quick feature accessor
def get_features(label):
return (f for f in gb.features if label in f.qualifiers.get("label", []))
def get_features_from_note(note):
return (f for f in gb.features if note in f.qualifiers.get("note", []))
# Check sequences are the same, or skip
if len(gb) != len(gb_archive):
# FIXME: pAGM1276 sequences differ
print("lengths differ for", id_, ":", len(gb), "VS", len(gb_archive))
# gb = gb.reverse_complement(True, True, True, True, True, True, True)
continue
elif gb.seq != gb_archive.seq:
print("sequences differ for", id_, ":", len(gb))
continue
# Copy AddGene annotations to the archive record
for feature in gb.features:
# get the feature sequence
seq = feature.extract(gb.seq + gb.seq)
if feature.location.strand == -1:
seq = seq.reverse_complement()
# search the feature in the
match = DNARegex(seq).search(gb_archive)
if match is not None:
start, end = match.span()
loc = FeatureLocation(start, end, feature.location.strand)
# remove possibly duplicate annotation
if any(f.location == loc for f in gb_archive.features):
other = next(f for f in gb_archive.features if f.location == loc)
gb_archive.features.remove(other)
# add the annotation to the archive record
new_feature = copy.deepcopy(feature)
new_feature.location = loc
gb_archive.features.append(new_feature)
# quick feature accessor for amalgamated record
def get_features(label):
return (
f for f in gb_archive.features if label in f.qualifiers.get("label", [])
)
def get_features_from_note(note):
return (
f for f in gb_archive.features if note in f.qualifiers.get("note", [])
)
# AmpR recolor and annotations
ampr = next(get_features("AmpR"), None)
if ampr is not None:
ampr.qualifiers = {
"label": ["AmpR"],
"codon_start": 1,
"gene": "bla",
"product": "beta-lactamase",
"function": "ampicilin and caribenicillin resistance",
"translation": ampr.extract(gb.seq).translate(),
"note": ["color: #9F4240"],
"db_xref": [
"GO:0005515",
"GO:0008800",
"GO:0016787",
"GO:0030655",
"GO:0046677",
"InterPro:IPR000871",
"InterPro:IPR023650",
"InterPro:IPR012338",
"PDB:1ZG4",
"UniProtKB/Swiss-Prot:P62593",
],
"EC_number": "3.5.2.6",
}
ampr_prom = next(get_features("AmpR promoter"), None)
ampr_prom = ampr_prom or next(get_features("AmpR Promoter"), None)
if ampr_prom is not None:
ampr_prom.qualifiers["label"] = ["AmpR Promoter"]
ampr_prom.qualifiers["note"] = ["color: #ff6666"]
ampr_term_start = gb.seq.find(AMPR_TERM)
if ampr is not None and ampr_term_start >= 0:
ampr_term = SeqFeature(
location=FeatureLocation(ampr_term_start, ampr_term_start + 94, -1),
type="terminator",
qualifiers={"label": "AmpR Terminator", "note": ["color: #ff6666"]},
)
gb.features.append(ampr_term)
# KanR recolor and annotations
kanr = next(get_features("KanR"), None)
if kanr is not None:
kanr.qualifiers.update(
{
"gene": "aphA1",
"product": "aminoglycoside phosphotransferase",
"EC_number": "2.7.1.95",
"label": ["KanR"],
"function": "kanamicyn resistance",
"db_xref": [
"CDD:cd05150",
"GO:0000166",
"GO:0005524",
"GO:0008910",
"GO:0016301",
"GO:0016740",
"GO:0016773",
"GO:0016310",
"GO:0046677",
"InterPro:IPR024165",
"InterPro:IPR011009",
"InterPro:IPR002575",
"PFAM:PF01636",
"UniProtKB/Swiss-Prot:P00551",
],
"note": ["color: #008000"],
}
)
kanr_term_start = gb.seq.find(KANR_TERM)
if kanr is not None and kanr_term_start >= 0:
kanr_term = SeqFeature(
location=FeatureLocation(kanr_term_start, kanr_term_start + 27, -1),
type="terminator",
qualifiers={"label": ["KanR Terminator"], "note": ["color: #93ff35"]},
)
gb.features.append(kanr_term)
kanr_prom = next(get_features("KanR Promoter"), None)
kanr_prom_start = gb.seq.find(KANR_PROM)
if kanr is not None and kanr_prom_start >= 0:
kanr_prom = SeqFeature(
location=FeatureLocation(kanr_prom_start, kanr_prom_start + 148, -1),
type="terminator",
qualifiers={"label": ["KanR Promoter"], "note": ["color: #93ff35"]},
)
gb.features.append(kanr_prom)
if kanr_prom is None and ampr_prom is not None and kanr is not None:
kanr_prom = ampr_prom
kanr_prom.qualifiers["label"] = ["KanR Promoter"]
kanr_prom.qualifiers["note"] = ["color: #93ff35"]
# SpecR recolor and annotations
smr = next(get_features('SmR'), None)
if smr is not None:
smr.qualifiers.update({
'label': ['SmR'],
'gene': 'aadA',
'product': 'aminoglycoside adenylyltransferase',
'function': 'spectinomycin and streptomycin resistance',
'note': ['color: #ffff00'],
})
# Remove duplicate crtI
if len(list(get_features('crtI'))) == 2:
x, y = get_features('crtI')
gb_archive.features.remove(x if x.type == 'misc_feature' else y)
# Remove duplicate crtB
if len(list(get_features('crtB'))) == 2:
x, y = get_features('crtB')
gb_archive.features.remove(x if x.type == 'misc_feature' else y)
# Remove duplicate oriV
if len(list(get_features('oriV'))) == 2:
gb_archive.features.remove(min(get_features('oriV'), key=lambda f: len(f.qualifiers)))
# GFP recolor and annotations
# gfp = next(get_features("GFP"), None)
# if gfp is not None:
# gfp.qualifiers.update(
# {
# "label": "GFP",
# "note": ["color: #34ff03"],
# "product": ["green fluorescent protein"],
# "gene": ["GFP"],
# "db_xref": [
# "PDB:1H6R",
# "InterPro:IPR009017",
# "InterPro:IPR011584",
# "InterPro:IPR000786",
# "PFAM:PF01353",
# "GO:0008218",
# "GO:0006091",
# "GO:0018298",
# "UniProtKB/Swiss-Prot:P42212",
# ],
# "inference": [
# "DESCRIPTION:alignment:blastx:UniProtKB/Swiss-Prot:P42212"
# ],
# }
# )
# mRFP1 recolor and annotations
# rfp = next(get_features("mRFP1"), None)
# if rfp is not None:
# rfp.qualifiers.update(
# {
# "label": "mRFP",
# "product": "mRFP1",
# "note": [
# "monomeric derivative of DsRed (Campbell et al., 2002)",
# "iGEM Part: BBa_E1010",
# "color: #c16969",
# ],
# "db_xref": [
# "UniProtKB/Swiss-Prot:Q9U6Y8",
# "GO:0008218",
# "GO:0006091",
# "GO:0018298",
# "PDB:2H5R",
# ],
# }
# )
# remove bla annotation since we have a better AmpR
# bla = next(get_features("bla"), None)
# if bla is not None:
# gb_archive.features.remove(bla)
# Remove bad Sm/Sp
rm_feats = [
"Sm/Sp",
r"Sm/Sp\no\DraIII",
r"spec orf?",
r"spec\orf?",
r"Kan\(no\BpiI)",
r"spec",
r"rep\(pMB1)",
r'rep - pMB1',
"NPTII",
"AP(R)",
r"AP\r",
"ALPHA",
"Kan",
]
for label in rm_feats:
for f in get_features(label):
gb_archive.features.remove(f)
# FIXME
# have all the plasmids in the same direction, i.e. so that the
# antibiotics resistance cassette is always on the reverse strand
# antibio = next(x for y in ('AmpR', 'SmR', 'KanR') for x in get_features(y))
# if antibio.location.strand != -1:
# gb_archive = gb_archive.reverse_complement(True, True, True, True, True, True, True,)
# Remove all "/vntifkey" feature qualifier
for feature in gb_archive.features:
feature.qualifiers.pop("vntifkey", None)
# sort features by start location, source always first
gb_archive.features.sort(
key=lambda f: (-len(gb.seq)) * (f.type == "source") + f.location.start
)
# translate color from notes to ApEinfo
for feature in gb_archive.features:
translate_color(feature)
# Fix the direct submission reference
if gb_archive.annotations["references"][-1].title == "Direct Submission":
ref = gb_archive.annotations["references"][-1]
else:
ref = Reference()
ref.title = "Direct Submission"
gb_archive.annotations.append(ref)
ref.authors = "<NAME>"
ref.journal = "Distributed with the MoClo Python library\nhttps://github.com/althonos/moclo"
# write the final record
dst_dir = os.path.abspath(
os.path.join(__file__, "..", "..", "moclo-moclo", "registry", "moclo")
)
dst_file = os.path.join(dst_dir, "{}.gb").format(info["id"])
write(gb_archive, dst_file, "gb")
| StarcoderdataPython |
254513 | <gh_stars>1-10
#!../../../.env/bin/python
import os
import numpy as np
import time
a = np.array([
[1,0,3],
[0,2,1],
[0.1,0,0],
])
print a
row = 1
col = 2
print a[row][col]
assert a[row][col] == 1
expected_max_rows = [0, 1, 0]
expected_max_values = [1, 2, 3]
print 'expected_max_rows:', expected_max_rows
print 'expected_max_values:', expected_max_values
t0 = time.time()
actual_max_rows = list(np.argmax(a, axis=0))
td = time.time() - t0
actual_max_values = list(np.amax(a, axis=0))
print 'td:', round(td, 4)
print 'actual_max_rows:', actual_max_rows
print 'actual_max_values:', actual_max_values
assert actual_max_rows == expected_max_rows
assert actual_max_values == expected_max_values
| StarcoderdataPython |
3432966 | import numpy as np
import matplotlib.pyplot as plt
acc = [93.75]
prec = [94.11764706]
recall = [92.30769231]
spec = [100]
MCC = [67.30769231]
F1 = [93.2038835]
fig, ax = plt.subplots()
g = [acc,prec,recall,spec,MCC,F1]
y = ["acc","prec","recall","spec","MCC","F1"]
first = ax.scatter(y,g, label='LSTM')
acc = [97.61904762]
prec = [97.6744186]
recall = [100]
spec = [96.55172414]
MCC = [100]
F1 = [ 98.82352941 ]
g = [acc,prec,recall,spec,MCC,F1]
y = ["acc","prec","recall","spec","MCC","F1"]
first = ax.scatter(y,g, label='CNN')
ax.set_ylabel('Resultados en porcentaje')
ax.set_title('Sitio de clivaje 6')
ax.legend()
plt.show() | StarcoderdataPython |
180498 | import os
user_input = input('What is the name of your directory: ')
user_input = user_input.replace(" ", "").lower()
rootdir = str(user_input)
searchstring = input('What word are you trying to find?: ')
for subdir, dirs, files in os.walk(rootdir):
for file in files:
file_location = os.path.join(subdir, file)
if os.path.isfile(file_location):
f = open(file_location)
try:
if searchstring in f.read().replace(" ", "").lower():
# YYYMMM dont need to print, use pass to pass the if
#print('found string in file %s' % file_location)
pass
else:
#print('string not found in file %s' % file_location)
print(file_location)
f.close()
except Exception:
pass
| StarcoderdataPython |
3545576 | <filename>st2common/st2common/config.py
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from oslo_config import cfg
from st2common.constants.system import VERSION_STRING
def do_register_opts(opts, group=None, ignore_errors=False):
try:
cfg.CONF.register_opts(opts, group=group)
except:
if not ignore_errors:
raise
def do_register_cli_opts(opt, ignore_errors=False):
# TODO: This function has broken name, it should work with lists :/
if not isinstance(opt, (list, tuple)):
opts = [opt]
else:
opts = opt
try:
cfg.CONF.register_cli_opts(opts)
except:
if not ignore_errors:
raise
def register_opts(ignore_errors=False):
auth_opts = [
cfg.BoolOpt('enable', default=True, help='Enable authentication middleware.'),
cfg.IntOpt('token_ttl', default=86400, help='Access token ttl in seconds.')
]
do_register_opts(auth_opts, 'auth', ignore_errors)
rbac_opts = [
cfg.BoolOpt('enable', default=False, help='Enable RBAC.'),
]
do_register_opts(rbac_opts, 'rbac', ignore_errors)
system_user_opts = [
cfg.StrOpt('user',
default='stanley',
help='Default system user.'),
cfg.StrOpt('ssh_key_file',
default='/home/vagrant/.ssh/stanley_rsa',
help='SSH private key for the system user.')
]
do_register_opts(system_user_opts, 'system_user', ignore_errors)
schema_opts = [
cfg.IntOpt('version', default=4, help='Version of JSON schema to use.'),
cfg.StrOpt('draft', default='http://json-schema.org/draft-04/schema#',
help='URL to the JSON schema draft.')
]
do_register_opts(schema_opts, 'schema', ignore_errors)
system_opts = [
cfg.StrOpt('base_path', default='/opt/stackstorm',
help='Base path to all st2 artifacts.'),
cfg.ListOpt('admin_users', default=[],
help='A list of usernames for users which should have admin privileges')
]
do_register_opts(system_opts, 'system', ignore_errors)
system_packs_base_path = os.path.join(cfg.CONF.system.base_path, 'packs')
content_opts = [
cfg.StrOpt('system_packs_base_path', default=system_packs_base_path,
help='Path to the directory which contains system packs.'),
cfg.StrOpt('packs_base_paths', default=None,
help='Paths which will be searched for integration packs.')
]
do_register_opts(content_opts, 'content', ignore_errors)
db_opts = [
cfg.StrOpt('host', default='0.0.0.0', help='host of db server'),
cfg.IntOpt('port', default=27017, help='port of db server'),
cfg.StrOpt('db_name', default='st2', help='name of database'),
cfg.StrOpt('username', help='username for db login'),
cfg.StrOpt('password', help='<PASSWORD>'),
cfg.IntOpt('connection_retry_max_delay_m', help='Connection retry total time (minutes).',
default=3),
cfg.IntOpt('connection_retry_backoff_max_s', help='Connection retry backoff max (seconds).',
default=10),
cfg.IntOpt('connection_retry_backoff_mul', help='Backoff multiplier (seconds).',
default=1)
]
do_register_opts(db_opts, 'database', ignore_errors)
messaging_opts = [
# It would be nice to be able to deprecate url and completely switch to using
# url. However, this will be a breaking change and will have impact so allowing both.
cfg.StrOpt('url', default='amqp://guest:guest@127.0.0.1:5672//',
help='URL of the messaging server.'),
cfg.ListOpt('cluster_urls', default=[],
help='URL of all the nodes in a messaging service cluster.')
]
do_register_opts(messaging_opts, 'messaging', ignore_errors)
syslog_opts = [
cfg.StrOpt('host', default='127.0.0.1',
help='Host for the syslog server.'),
cfg.IntOpt('port', default=514,
help='Port for the syslog server.'),
cfg.StrOpt('facility', default='local7',
help='Syslog facility level.'),
cfg.StrOpt('protocol', default='udp',
help='Transport protocol to use (udp / tcp).')
]
do_register_opts(syslog_opts, 'syslog', ignore_errors)
log_opts = [
cfg.ListOpt('excludes', default='',
help='Exclusion list of loggers to omit.'),
cfg.BoolOpt('redirect_stderr', default=False,
help='Controls if stderr should be redirected to the logs.'),
cfg.BoolOpt('mask_secrets', default=True,
help='True to mask secrets in the log files.')
]
do_register_opts(log_opts, 'log', ignore_errors)
# Common API options
api_opts = [
cfg.StrOpt('host', default='0.0.0.0', help='StackStorm API server host'),
cfg.IntOpt('port', default=9101, help='StackStorm API server port')
]
do_register_opts(api_opts, 'api', ignore_errors)
# Common auth options
auth_opts = [
cfg.StrOpt('api_url', default=None,
help='Base URL to the API endpoint excluding the version')
]
do_register_opts(auth_opts, 'auth', ignore_errors)
# Common options (used by action runner and sensor container)
action_sensor_opts = [
cfg.BoolOpt('enable', default=True,
help='Whether to enable or disable the ability to post a trigger on action.'),
]
do_register_opts(action_sensor_opts, group='action_sensor')
# Coordination options
coord_opts = [
cfg.StrOpt('url', default=None, help='Endpoint for the coordination server.'),
cfg.IntOpt('lock_timeout', default=60, help='TTL for the lock if backend suports it.')
]
do_register_opts(coord_opts, 'coordination', ignore_errors)
# Mistral options
mistral_opts = [
cfg.StrOpt('v2_base_url', default='http://127.0.0.1:8989/v2', help='v2 API root endpoint.'),
cfg.IntOpt('retry_exp_msec', default=1000, help='Multiplier for the exponential backoff.'),
cfg.IntOpt('retry_exp_max_msec', default=300000, help='Max time for each set of backoff.'),
cfg.IntOpt('retry_stop_max_msec', default=600000, help='Max time to stop retrying.'),
cfg.StrOpt('keystone_username', default=None, help='Username for authentication.'),
cfg.StrOpt('keystone_password', default=None, help='Password for authentication.'),
cfg.StrOpt('keystone_project_name', default=None, help='OpenStack project scope.'),
cfg.StrOpt('keystone_auth_url', default=None, help='Auth endpoint for Keystone.'),
cfg.StrOpt('api_url', default=None, help=('URL Mistral uses to talk back to the API.'
'If not provided it defaults to public API URL. Note: This needs to be a base '
'URL without API version (e.g. http://127.0.0.1:9101)'))
]
do_register_opts(mistral_opts, group='mistral', ignore_errors=ignore_errors)
# Common CLI options
debug = cfg.BoolOpt('debug', default=False,
help='Enable debug mode. By default this will set all log levels to DEBUG.')
profile = cfg.BoolOpt('profile', default=False,
help=('Enable profile mode. In the profile mode all the MongoDB queries and related '
'profile data are logged.'))
use_debugger = cfg.BoolOpt('use-debugger', default=True,
help='Enables debugger. Note that using this option changes how the '
'eventlet library is used to support async IO. This could result in '
'failures that do not occur under normal operation.')
cli_opts = [debug, profile, use_debugger]
do_register_cli_opts(cli_opts, ignore_errors=ignore_errors)
def parse_args(args=None):
register_opts()
cfg.CONF(args=args, version=VERSION_STRING)
| StarcoderdataPython |
6549846 | <gh_stars>0
"""
Implementation of a range of Graph Recurrent Networks.
Trying to follow the structure of rnn_cell.py in the mxnet code.
"""
import mxnet as mx
import sockeye.constants as C
from sockeye.config import Config
import logging
logger = logging.getLogger(__name__)
#def get_gcn(input_dim: int, output_dim: int,
# tensor_dim: int, use_gcn_gating: bool,
# dropout: float, prefix: str):
def get_resgrn(config, prefix):
resgrn = ResGRNCell(config.input_dim,
config.output_dim,
config.tensor_dim,
config.num_layers,
no_residual=config.no_residual,
activation=config.activation,
add_gate=config.add_gate,
dropout=config.dropout,
prefix=prefix)
return resgrn
def get_gatedgrn(config, prefix):
gatedgrn = GatedGRNCell(config.input_dim,
config.output_dim,
config.tensor_dim,
config.num_layers,
activation=config.activation,
add_gate=config.add_gate,
dropout=config.dropout,
norm=config.norm,
prefix=prefix)
return gatedgrn
# class GGRNParams(object):
# """Container to hold GGRN variables.
# Used for parameter sharing between layers/timesteps.
# Parameters
# ----------
# prefix : str
# All variables' name created by this container will
# be prepended with prefix.
# """
# def __init__(self, prefix=''):
# self._prefix = prefix
# self._params = {}
# def get(self, name, **kwargs):
# """Get a variable with name or create a new one if missing.
# Parameters
# ----------
# name : str
# name of the variable
# **kwargs :
# more arguments that's passed to symbol.Variable
# """
# name = self._prefix + name
# if name not in self._params:
# self._params[name] = mx.sym.Variable(name, **kwargs)
# return self._params[name]
class ResGRNConfig(Config):
"""
GCN configuration.
:param input_dim: Dimensionality for input vectors.
:param output_dim: Dimensionality for output vectors.
:param tensor_dim: Edge label space dimensionality.
:param layers: Number of layers / unrolled timesteps.
:param no_residual: skip residual connections
:param activation: Non-linear function used inside the GGRN updates.
:param add_gate: Add edge-wise gating (Marcheggiani & Titov, 2017).
:param dropout: Dropout between layers.
"""
def __init__(self,
input_dim: int,
output_dim: int,
tensor_dim: int,
num_layers: int,
no_residual: bool = False,
activation: str = 'relu',
add_gate: bool = False,
dropout: float = 0.0) -> None:
super().__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.tensor_dim = tensor_dim
self.num_layers = num_layers
self.no_residual = no_residual
self.activation = activation
self.add_gate = add_gate
self.dropout = dropout
class ResGRNCell(object):
"""Residual GRN cell
"""
def __init__(self,
input_dim,
output_dim,
tensor_dim,
num_layers,
no_residual=False,
activation='relu',
add_gate=False,
prefix='resgrn_',
params=None,
dropout=0.0):
self._prefix = prefix
self._params = params
self._modified = False
self._input_dim = input_dim
self._output_dim = output_dim
self._tensor_dim = tensor_dim
self._num_layers = num_layers
self._no_residual = no_residual
self._activation = activation
self._add_edge_gate = add_gate
#self.reset()
if self._input_dim != self._output_dim:
self._first_W = mx.symbol.Variable(self._prefix + '_first_weight',
shape=(input_dim, output_dim))
self._first_b = mx.symbol.Variable(self._prefix + '_first_bias',
shape=(output_dim,))
#self._W = mx.symbol.Variable(self._prefix + '_weight',
# shape=(output_dim, rank))
#self._Wl = [mx.symbol.Variable(self._prefix + str(i) + '_edge_weight',
# shape=(rank, output_dim))
# for i in range(tensor_dim)]
self._Wl = [mx.symbol.Variable(self._prefix + str(i) + '_edge_weight',
shape=(output_dim, output_dim))
for i in range(tensor_dim)]
self._bl = [mx.symbol.Variable(self._prefix + str(i) + '_edge_bias',
shape=(output_dim,))
for i in range(tensor_dim)]
# Edge gate parameters
if self._add_edge_gate:
self._edge_gate_W = [mx.symbol.Variable(self._prefix + str(i) + '_edge_gate_weight',
shape=(output_dim, 1))
for i in range(tensor_dim)]
self._edge_gate_b = [mx.symbol.Variable(self._prefix + str(i) + '_edge_gate_bias',
shape=(1, 1))
for i in range(tensor_dim)]
def convolve(self, adj, inputs, seq_len):
"""
Apply one convolution per layer. This is where we add the residuals.
A linear transformation is required in case the input dimensionality is
different from GRN output dimensionality.
"""
#outputs = self._single_convolve(adj, inputs, seq_len)
if self._input_dim != self._output_dim:
outputs = mx.symbol.dot(inputs, self._first_W)
outputs = mx.symbol.broadcast_add(outputs, self._first_b)
#outputs = mx.symbol.FullyConnected(data=inputs, num_hidden=self._output_dim, flatten=True)
else:
outputs = inputs
#outputs = mx.symbol.dot(inputs, self._first_W)
#outputs = mx.symbol.concat(inputs, outputs)
for i in range(self._num_layers):
convolved = self._single_convolve(adj, outputs, seq_len)
if self._no_residual:
outputs = convolved
else:
outputs = convolved + outputs
#outputs = mx.symbol.concat(outputs, inputs)
return outputs
def _single_convolve(self, adj, inputs, seq_len):
"""
IMPORTANT: when retrieving the original adj matrix for an
edge label we add one to "i" because the edge ids stored
in the matrix start at 1. 0 corresponds to lack of edges.
"""
output_list = []
for i in range(self._tensor_dim):
# linear transformation
Wi = self._Wl[i]
#Wi = mx.symbol.dot(self._W, Wi)
bi = self._bl[i]
output = mx.symbol.dot(inputs, Wi)
output = mx.symbol.broadcast_add(output, bi)
# optional edge gating
if self._add_edge_gate:
edge_gate_Wi = self._edge_gate_W[i]
edge_gate_bi = self._edge_gate_b[i]
edge_gate_val = mx.symbol.dot(inputs, edge_gate_Wi)
edge_gate_val = mx.symbol.broadcast_add(edge_gate_val, edge_gate_bi)
edge_gate_val = mx.symbol.Activation(edge_gate_val, act_type='sigmoid')
output = mx.symbol.broadcast_mul(output, edge_gate_val)
# convolution
label_id = i + 1
mask = mx.symbol.ones_like(adj) * label_id
adji = (mask == adj)
#adji = mx.symbol.slice_axis(adj, axis=1, begin=i, end=i+1)
#adji = mx.symbol.reshape(adji, shape=(-1, seq_len, seq_len))
output = mx.symbol.batch_dot(adji, output)
output = mx.symbol.expand_dims(output, axis=1)
output_list.append(output)
outputs = mx.symbol.concat(*output_list, dim=1)
outputs = mx.symbol.sum(outputs, axis=1)
final_output = mx.symbol.Activation(outputs, act_type=self._activation)
#final_output = mx.symbol.Dropout(final_output, p=self._dropout)
return final_output
def reset(self):
pass
class GatedGRNConfig(Config):
"""
GCN configuration.
:param input_dim: Dimensionality for input vectors.
:param output_dim: Dimensionality for output vectors.
:param tensor_dim: Edge label space dimensionality.
:param layers: Number of layers / unrolled timesteps.
:param no_residual: skip residual connections
:param activation: Non-linear function used inside the GGRN updates.
:param add_gate: Add edge-wise gating (Marcheggiani & Titov, 2017).
:param dropout: Dropout between layers.
"""
def __init__(self,
input_dim: int,
output_dim: int,
tensor_dim: int,
num_layers: int,
activation: str = 'relu',
add_gate: bool = False,
dropout: float = 0.0,
norm: bool = False,) -> None:
super().__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.tensor_dim = tensor_dim
self.num_layers = num_layers
self.activation = activation
self.add_gate = add_gate
self.dropout = dropout
self.norm = norm
class GatedGRNCell(object):
"""Gated GRN cell
"""
def __init__(self,
input_dim,
output_dim,
tensor_dim,
num_layers,
activation='relu',
add_gate=False,
prefix='gatedgrn_',
params=None,
dropout=0.0,
norm=False):
self._prefix = prefix
self._params = params
self._modified = False
self._input_dim = input_dim
self._output_dim = output_dim
self._tensor_dim = tensor_dim
self._num_layers = num_layers
self._activation = activation
self._add_edge_gate = add_gate
self._dropout = dropout
self._dropout_mask = None
self._norm = norm
# Linear transformation for the first layer in case input vectors
# are of a different dimensionality from the output vectors
if self._input_dim != self._output_dim:
self._first_W = mx.symbol.Variable(self._prefix + '_first_weight',
shape=(input_dim, output_dim))
self._first_b = mx.symbol.Variable(self._prefix + '_first_bias',
shape=(output_dim,))
# Main transformation, using label-wise parameters
self._Wl = [mx.symbol.Variable(self._prefix + str(i) + '_edge_weight',
shape=(output_dim, output_dim))
for i in range(tensor_dim)]
self._bl = [mx.symbol.Variable(self._prefix + str(i) + '_edge_bias',
shape=(output_dim,))
for i in range(tensor_dim)]
# Reset gate
self._reset_Wl = [mx.symbol.Variable(self._prefix + str(i) + '_reset_weight',
shape=(output_dim, output_dim))
for i in range(tensor_dim)]
self._reset_bl = [mx.symbol.Variable(self._prefix + str(i) + '_reset_bias',
shape=(output_dim,))
for i in range(tensor_dim)]
# Update gate
self._update_Wl = [mx.symbol.Variable(self._prefix + str(i) + '_update_weight',
shape=(output_dim, output_dim))
for i in range(tensor_dim)]
self._update_bl = [mx.symbol.Variable(self._prefix + str(i) + '_update_bias',
shape=(output_dim,))
for i in range(tensor_dim)]
# Edge gate parameters
if self._add_edge_gate:
self._edge_gate_W = [mx.symbol.Variable(self._prefix + str(i) + '_edge_gate_weight',
shape=(output_dim, 1))
for i in range(tensor_dim)]
self._edge_gate_b = [mx.symbol.Variable(self._prefix + str(i) + '_edge_gate_bias',
shape=(1, 1))
for i in range(tensor_dim)]
def convolve(self, adj, inputs, seq_len):
"""
Apply one convolution per layer. This is where we apply the gates
A linear transformation is required in case the input dimensionality is
different from GRN output dimensionality.
"""
# Dropout is applied on inputs
if self._dropout != 0.0:
print("DROPOUT: %f" % self._dropout)
inputs = mx.sym.Dropout(inputs, p=self._dropout)
# Transformation to match dims
if self._input_dim != self._output_dim:
outputs = mx.symbol.dot(inputs, self._first_W)
outputs = mx.symbol.broadcast_add(outputs, self._first_b)
# Sounded like a sensible idea but didn't really work...
# I guess because ReLU?
#outputs = mx.symbol.Activation(outputs, act_type=self._activation)
else:
outputs = inputs
# Variational/Bayesian Dropout mask. Mask does not change between layers.
#if self._dropout_mask is None:
#self._dropout_mask = mx.sym.Dropout(data=mx.sym.ones_like(outputs), p=self._dropout)
# Convolutions
for i in range(self._num_layers):
reset_outputs = self._reset(adj, outputs, seq_len)
convolved = self._single_convolve(adj, reset_outputs, seq_len)
outputs = self._update(adj, outputs, convolved, seq_len)
#if self._dropout != 0.0:
# outputs = mx.symbol.Dropout(outputs, p=self._dropout)
#outputs = outputs * self._dropout_mask
#outputs = outputs * mx.sym.Dropout(data=mx.sym.ones_like(outputs), p=self._dropout)
#outputs = outputs * mx.sym.ones_like(outputs)
return outputs
def _reset(self, adj, inputs, seq_len):
"""
Apply reset gate to the inputs.
IMPORTANT: when retrieving the original adj matrix for an
edge label we add one to "i" because the edge ids stored
in the matrix start at 1. 0 corresponds to lack of edges.
"""
output_list = []
for i in range(self._tensor_dim):
# linear transformation
reset_Wi = self._reset_Wl[i]
reset_bi = self._reset_bl[i]
output = mx.symbol.dot(inputs, reset_Wi)
output = mx.symbol.broadcast_add(output, reset_bi)
# convolution
label_id = i + 1
mask = mx.symbol.ones_like(adj) * label_id
adji = (mask == adj)
output = mx.symbol.batch_dot(adji, output)
output = mx.symbol.expand_dims(output, axis=1)
output_list.append(output)
outputs = mx.symbol.concat(*output_list, dim=1)
outputs = mx.symbol.sum(outputs, axis=1)
reset_gate = mx.symbol.Activation(outputs, act_type='sigmoid')
final_outputs = mx.symbol.broadcast_mul(reset_gate, inputs)
return final_outputs
def _update(self, adj, inputs, convolved, seq_len):
"""
Apply update gate to the inputs.
IMPORTANT: when retrieving the original adj matrix for an
edge label we add one to "i" because the edge ids stored
in the matrix start at 1. 0 corresponds to lack of edges.
"""
output_list = []
for i in range(self._tensor_dim):
# linear transformation
update_Wi = self._update_Wl[i]
update_bi = self._update_bl[i]
output = mx.symbol.dot(convolved, update_Wi)
output = mx.symbol.broadcast_add(output, update_bi)
# convolution
label_id = i + 1
mask = mx.symbol.ones_like(adj) * label_id
adji = (mask == adj)
output = mx.symbol.batch_dot(adji, output)
output = mx.symbol.expand_dims(output, axis=1)
output_list.append(output)
outputs = mx.symbol.concat(*output_list, dim=1)
outputs = mx.symbol.sum(outputs, axis=1)
update_gate = mx.symbol.Activation(outputs, act_type='sigmoid')
final_outputs = (mx.symbol.broadcast_mul(update_gate, convolved) +
mx.symbol.broadcast_mul((1 - update_gate), inputs))
return final_outputs
def _single_convolve(self, adj, inputs, seq_len):
"""
IMPORTANT: when retrieving the original adj matrix for an
edge label we add one to "i" because the edge ids stored
in the matrix start at 1. 0 corresponds to lack of edges.
"""
output_list = []
for i in range(self._tensor_dim):
# linear transformation
Wi = self._Wl[i]
#Wi = mx.symbol.dot(self._W, Wi)
bi = self._bl[i]
output = mx.symbol.dot(inputs, Wi)
output = mx.symbol.broadcast_add(output, bi)
# optional edge gating
if self._add_edge_gate:
edge_gate_Wi = self._edge_gate_W[i]
edge_gate_bi = self._edge_gate_b[i]
edge_gate_val = mx.symbol.dot(inputs, edge_gate_Wi)
edge_gate_val = mx.symbol.broadcast_add(edge_gate_val, edge_gate_bi)
edge_gate_val = mx.symbol.Activation(edge_gate_val, act_type='sigmoid')
output = mx.symbol.broadcast_mul(output, edge_gate_val)
# convolution
label_id = i + 1
mask = mx.symbol.ones_like(adj) * label_id
#adji = (mask == adj)
adji = mx.symbol.broadcast_equal(mask, adj)
#adji = mx.symbol.slice_axis(adj, axis=1, begin=i, end=i+1)
#adji = mx.symbol.reshape(adji, shape=(-1, seq_len, seq_len))
output = mx.symbol.batch_dot(adji, output)
output = mx.symbol.expand_dims(output, axis=1)
output_list.append(output)
outputs = mx.symbol.concat(*output_list, dim=1)
outputs = mx.symbol.sum(outputs, axis=1)
if self._norm:
norm_adj = mx.symbol.broadcast_not_equal(adj, mx.symbol.zeros_like(adj))
norm_factor = mx.symbol.sum(norm_adj, axis=2, keepdims=True)
outputs = mx.symbol.broadcast_div(outputs, norm_factor)
final_output = mx.symbol.Activation(outputs, act_type=self._activation)
#final_output = mx.symbol.Dropout(final_output, p=self._dropout)
return final_output
def reset(self):
logger.info("GRN DROPOUT MASK RESET")
self._dropout_mask = None
| StarcoderdataPython |
3237516 | <reponame>mgthometz/advent-of-code-2021
import sys, collections
from grid import gridsource as grid
from util import findints
Target = collections.namedtuple('Target', 'xmin xmax ymin ymax')
def main():
f = open(sys.argv[1] if len(sys.argv) > 1 else 'in')
target = Target(*findints(f.read()))
result = 0
for xvel in inclusive_range(0, target.xmax):
for yvel in inclusive_range(target.ymin, -target.ymin - 1):
if is_hit((xvel, yvel), target):
result += 1
print(result)
def inclusive_range(lo, hi):
return range(lo, hi + 1)
def is_hit(velocity, target):
for pos in trajectory(velocity, target):
if (
target.xmin <= pos[0] <= target.xmax and
target.ymin <= pos[1] <= target.ymax
):
return True
return False
def trajectory(velocity, target):
pos = (0, 0)
while pos[0] <= target.xmax and pos[1] >= target.ymin:
yield pos
pos = grid.addvec(pos, velocity)
velocity = (
max(0, velocity[0] - 1),
velocity[1] - 1
) | StarcoderdataPython |
3221696 | class Solution(object):
def climbStairs(self, n):
"""
:type n: int
:rtype: int
"""
T = [1,1]
for i in xrange(2,n+1):
T.append(T[i-1] + T[i-2]) # faster, with 36ms, predefined with 40ms.
return T[n]
# T[i] is the sum of T[i-1] (plus 1 step to reach end)
# and T[i-1] (plus 2 steps to reach end)
if __name__ == '__main__':
print Solution().climbStairs(5) | StarcoderdataPython |
3453839 | import os
import random
import tempfile
import uuid
import librosa
import numpy as np
import sys
from audiomentations.core.transforms_interface import BaseWaveformTransform
from audiomentations.core.utils import (
convert_float_samples_to_int16,
)
class Mp3Compression(BaseWaveformTransform):
"""Compress the audio using an MP3 encoder to lower the audio quality.
This may help machine learning models deal with compressed, low-quality audio.
This transform depends on either lameenc or pydub/ffmpeg.
Note that bitrates below 32 kbps are only supported for low sample rates (up to 24000 hz).
Note: When using the lameenc backend, the output may be slightly longer than the input due
to the fact that the LAME encoder inserts some silence at the beginning of the audio.
Warning: This transform writes to disk, so it may be slow. Ideally, the work should be done
in memory. Contributions are welcome.
"""
SUPPORTED_BITRATES = [
8,
16,
24,
32,
40,
48,
56,
64,
80,
96,
112,
128,
144,
160,
192,
224,
256,
320,
]
def __init__(
self, min_bitrate: int = 8, max_bitrate: int = 64, backend: str = "pydub", p=0.5
):
"""
:param min_bitrate: Minimum bitrate in kbps
:param max_bitrate: Maximum bitrate in kbps
:param backend: "pydub" or "lameenc".
Pydub may use ffmpeg under the hood.
Pros: Seems to avoid introducing latency in the output.
Cons: Slower than lameenc.
lameenc:
Pros: You can set the quality parameter in addition to bitrate.
Cons: Seems to introduce some silence at the start of the audio.
:param p: The probability of applying this transform
"""
super().__init__(p)
assert self.SUPPORTED_BITRATES[0] <= min_bitrate <= self.SUPPORTED_BITRATES[-1]
assert self.SUPPORTED_BITRATES[0] <= max_bitrate <= self.SUPPORTED_BITRATES[-1]
assert min_bitrate <= max_bitrate
self.min_bitrate = min_bitrate
self.max_bitrate = max_bitrate
assert backend in ("pydub", "lameenc")
self.backend = backend
def randomize_parameters(self, samples, sample_rate):
super().randomize_parameters(samples, sample_rate)
if self.parameters["should_apply"]:
bitrate_choices = [
bitrate
for bitrate in self.SUPPORTED_BITRATES
if self.min_bitrate <= bitrate <= self.max_bitrate
]
self.parameters["bitrate"] = random.choice(bitrate_choices)
def apply(self, samples, sample_rate):
if self.backend == "lameenc":
return self.apply_lameenc(samples, sample_rate)
elif self.backend == "pydub":
return self.apply_pydub(samples, sample_rate)
else:
raise Exception("Backend {} not recognized".format(self.backend))
def apply_lameenc(self, samples, sample_rate):
try:
import lameenc
except ImportError:
print(
"Failed to import the lame encoder. Maybe it is not installed? "
"To install the optional lameenc dependency of audiomentations,"
" do `pip install audiomentations[extras]` instead of"
" `pip install audiomentations`",
file=sys.stderr,
)
raise
assert len(samples.shape) == 1
assert samples.dtype == np.float32
int_samples = convert_float_samples_to_int16(samples)
encoder = lameenc.Encoder()
encoder.set_bit_rate(self.parameters["bitrate"])
encoder.set_in_sample_rate(sample_rate)
encoder.set_channels(1)
encoder.set_quality(7) # 2 = highest, 7 = fastest
encoder.silence()
mp3_data = encoder.encode(int_samples.tobytes())
mp3_data += encoder.flush()
# Write a temporary MP3 file that will then be decoded
tmp_dir = tempfile.gettempdir()
tmp_file_path = os.path.join(
tmp_dir, "tmp_compressed_{}.mp3".format(str(uuid.uuid4())[0:12])
)
with open(tmp_file_path, "wb") as f:
f.write(mp3_data)
degraded_samples, _ = librosa.load(tmp_file_path, sample_rate)
os.unlink(tmp_file_path)
return degraded_samples
def apply_pydub(self, samples, sample_rate):
try:
import pydub
except ImportError:
print(
"Failed to import pydub. Maybe it is not installed? "
"To install the optional pydub dependency of audiomentations,"
" do `pip install audiomentations[extras]` instead of"
" `pip install audiomentations`",
file=sys.stderr,
)
raise
assert len(samples.shape) == 1
assert samples.dtype == np.float32
int_samples = convert_float_samples_to_int16(samples)
audio_segment = pydub.AudioSegment(
int_samples.tobytes(),
frame_rate=sample_rate,
sample_width=int_samples.dtype.itemsize,
channels=1,
)
tmp_dir = tempfile.gettempdir()
tmp_file_path = os.path.join(
tmp_dir, "tmp_compressed_{}.mp3".format(str(uuid.uuid4())[0:12])
)
bitrate_string = "{}k".format(self.parameters["bitrate"])
file_handle = audio_segment.export(tmp_file_path, bitrate=bitrate_string)
file_handle.close()
degraded_samples, _ = librosa.load(tmp_file_path, sample_rate)
os.unlink(tmp_file_path)
return degraded_samples
| StarcoderdataPython |
1833398 | import unittest
import time
import uuid
import logging
import emission.core.get_database as edb
import emission.analysis.modelling.tour_model.featurization as featurization
import emission.analysis.modelling.tour_model.cluster_pipeline as cp
import emission.storage.timeseries.abstract_timeseries as esta
import emission.tests.analysisTests.tourModelTests.common as etatc
import emission.tests.common as etc
class FeaturizationTests(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(FeaturizationTests, self).__init__(*args, **kwargs)
def setUp(self):
self.data = cp.read_data()
self.testUUID = uuid.uuid4()
self.ts = esta.TimeSeries.get_time_series(self.testUUID)
print 'there are ' + str(len(self.data))
def tearDown(self):
edb.get_timeseries_db().remove({'user_id': self.testUUID})
edb.get_analysis_timeseries_db().remove({'user_id': self.testUUID})
def testCalculatePoints(self):
feat = featurization.featurization([])
self.assertTrue(not feat.data)
feat = featurization.featurization(None)
self.assertTrue(not feat.data)
trip = etatc._createTripEntry(self, None, None, None, None)
data = [trip]
try:
feat = featurization.featurization(data)
except AttributeError:
self.assertTrue(True)
except Exception:
self.assertTrue(False)
feat = featurization.featurization(self.data)
self.assertTrue(len(feat.points) == len(feat.data))
for p in feat.points:
self.assertTrue(None not in p)
def testCluster(self):
feat = featurization.featurization(self.data)
feat.cluster(min_clusters=2, max_clusters=10)
self.assertTrue(len(feat.labels) == len(feat.points))
self.assertTrue(feat.clusters == len(set(feat.labels)))
a = feat.cluster(name='kmeans', min_clusters=5, max_clusters=20)
self.assertTrue(len(feat.labels) == len(feat.points))
self.assertTrue(feat.clusters == len(set(feat.labels)))
b = feat.cluster(name='nonname', min_clusters=5, max_clusters=20)
self.assertTrue(a == b) #defaults to kmeans with invalid clustering method
feat.cluster(min_clusters=len(self.data)+1)
c = feat.cluster(min_clusters = 0, max_clusters=20)
d = feat.cluster(min_clusters = 2, max_clusters=20)
self.assertTrue(c == d)
try:
feat.cluster(min_clusters = 10, max_clusters=2)
except ValueError:
self.assertTrue(True)
except Exception:
self.assertTrue(False)
data = []
start = [-122, 47]
end = [-123,47]
now = time.time()
for i in range(10):
a = etatc._createTripEntry(self, now, now, start, end)
data.append(a)
start = [-74, 41]
end = [-74, 42]
for i in range(10):
a = etatc._createTripEntry(self, now, now, start, end)
data.append(a)
feat = featurization.featurization(data)
feat.cluster()
self.assertTrue(len(set(feat.labels)) == 2)
def testCheckClusters(self):
feat = featurization.featurization(self.data)
a = feat.check_clusters()
self.assertTrue(a == None)
feat.cluster(min_clusters=2, max_clusters=10)
try:
feat.check_clusters()
except Exception, e:
logging.exception(e.message)
self.assertTrue(False)
if __name__ == "__main__":
etc.configLogging()
unittest.main()
| StarcoderdataPython |
6536513 | <filename>Predictor_Tfidf/UI_dense_fully_connected.py<gh_stars>10-100
"""Use an ANN to find the probability of occurrence of diseases"""
import tflearn
import numpy as np
import tensorflow as tf
from sklearn.externals import joblib
import os
import sys
import time
lib_path = os.path.abspath(os.path.join('../', 'lib'))
sys.path.append(lib_path)
from icd9 import ICD9
# Start time
t1 = time.time()
# Parameters
diag_to_desc = {}
n_epoch = 10
def generate_icd9_lookup():
"""Generate description from ICD9 code"""
tree = ICD9('../lib/icd9/codes.json')
for ud in uniq_diag:
try:
diag_to_desc[ud] = tree.find(ud[2:]).description
except:
if ud[2:] == "008":
diag_to_desc[ud] = "Intestinal infections due to other organisms"
elif ud[2:] == "280":
diag_to_desc[ud] = "Iron deficiency anemias"
elif ud[2:] == "284":
diag_to_desc[ud] = "Aplastic anemia and other bone marrow failure syndrome"
elif ud[2:] == "285":
diag_to_desc[ud] = "Other and unspecified anemias"
elif ud[2:] == "286":
diag_to_desc[ud] = "Coagulation defects"
elif ud[2:] == "287":
diag_to_desc[ud] = "Purpura and other hemorrhagic conditions"
elif ud[2:] == "288":
diag_to_desc[ud] = "Diseases of white blood cells"
else:
diag_to_desc[ud] = "Not Found"
# Get the 80 most common diagnosis from the vocab file
with open('../Data/patient_sequences/vocab') as f:
uniq_diag = np.array(f.read().split('\n')[1].split(' '))
input_patient = 'l_50931 l_51221 l_51222 l_51248 l_51249 l_51277 l_51288 l_50808 l_50809 l_50811 l_50821 l_50893 l_50902 l_50960 l_51279 l_51301 l_50818 l_50970 l_51244 l_51256 l_50824 l_51237 l_51274 l_50813 l_50820 l_50912 l_51009 l_50882 l_50924 l_50953 l_50998 l_51006 l_51003 l_50862 l_51516 c_V5867 s_7885 d_008 d_041 d_250 d_250 d_272 d_285 d_311 d_324 d_349 d_357 d_362 d_401 d_458 d_511 d_567 d_584 d_721 d_722 d_722 d_730 d_999 l_50863 l_50893 l_50902 l_50912 l_50931 l_50960 l_50970 l_51003 l_51006 l_51009 l_51221 l_51222 l_51237 l_51244 l_51256 l_51265 l_51274 l_51275 l_51277 l_51279 l_51301 l_50813 l_51516 l_50818 l_50820 l_50821 l_50862 l_50963 l_50909 l_50882 l_51482 l_51493 l_51116 l_51118 l_51120 l_51123 l_51125 l_51127 l_51128 l_50971 l_50983 l_51479 l_51249 c_V5867 s_78552 s_78959 d_008 d_038 d_250 d_250 d_272 d_276 d_280 d_357 d_362 d_401 d_427 d_428 d_428 d_511 d_518 d_571 d_584 d_730 d_995'
# Generate the vector representation for the input sequence
vect = joblib.load('../Data_Preparation/Transformation_Models/tfidf_fitted.pkl')
patient_seq = vect.transform([input_patient]) # Unstandardized value of patient sequence
# Standardizing the patient sequence
sc = joblib.load('../Data_Preparation/Transformation_Models/standard.pkl')
patient_seq = sc.transform(patient_seq.toarray()) # Convert the sequence to array or sc will give an error
Prediction_for_patient_prob = {}
Prediction_for_patient = {}
generate_icd9_lookup() # generate the lookup for each diagnosis
for c, d in enumerate(uniq_diag):
# Display the training diagnosis
print("--------------------Training {}--------------------".format(d))
# Run each iteration in a graph
with tf.Graph().as_default():
# Model
input_layer = tflearn.input_data(shape=[None, 1391], name='input')
dense1 = tflearn.fully_connected(input_layer, 128, activation='linear', name='dense1')
dropout1 = tflearn.dropout(dense1, 0.8)
dense2 = tflearn.fully_connected(dropout1, 128, activation='linear', name='dense2')
dropout2 = tflearn.dropout(dense2, 0.8)
output = tflearn.fully_connected(dropout2, 2, activation='softmax', name='output')
regression = tflearn.regression(output, optimizer='adam', loss='categorical_crossentropy', learning_rate=.001)
# Define model with checkpoint (autosave)
model = tflearn.DNN(regression, tensorboard_verbose=3)
# load the previously trained model
model.load('Saved_Models/Fully_Connected_n_epochs_{0}/dense_fully_connected_dropout_5645_{1}.tfl'
.format(n_epoch, d))
# Standardize the values and predict the output
vector_rep_patient_sc = np.reshape(patient_seq, (1, 1391))
# Find the probability of outputs
Prediction_for_patient_prob[d] = np.array(model.predict(vector_rep_patient_sc))[:, 1]
Prediction_for_patient[d] = np.where(Prediction_for_patient_prob[d] > 0.5, 1., 0.)
print('\n')
print('Completed : {0}/{1}'.format(c + 1, len(uniq_diag)))
print('--------------------{} Complete--------------------'.format(d))
print('\n')
# Print the final results
print('------------------------------Table for All Predictions------------------------------')
for d in uniq_diag:
print('ICD9 : {0:<8s} Probability : {1:<8.2} Description : {2}'
.format(d, float(Prediction_for_patient_prob[d][0]), diag_to_desc[d]))
print('------------------------------End------------------------------')
# Print the ICD9 codes of diseases with prob > 0.5
print('------------------------------Table for All Predictions with Prob > 0.5------------------------------')
for d in uniq_diag:
if Prediction_for_patient[d] > 0.5:
print('ICD9 : {0:<8s} Probability : {1:<8.2} Description : {2}'
.format(d, Prediction_for_patient_prob[d][0], diag_to_desc[d]))
print('------------------------------End------------------------------')
# Calculate time
t2 = time.time()
print("Time Taken : {:.2f} s".format(t2 - t1))
| StarcoderdataPython |
129245 | # -*- coding: utf-8 -*-
# Copyright (c) 2016-2020 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import numpy as np
import pytest
import pandapower as pp
import pandapower.shortcircuit as sc
@pytest.fixture
def wind_park_example():
net = pp.create_empty_network()
b1 = pp.create_bus(net, vn_kv=110., index=1)
b2 = pp.create_bus(net, vn_kv=110., index=2)
b3 = pp.create_bus(net, vn_kv=110., index=3)
b4 = pp.create_bus(net, vn_kv=110., index=4)
pp.create_ext_grid(net, b1, s_sc_max_mva=20*110*np.sqrt(3), rx_max=0.1)
pp.create_line_from_parameters(net, from_bus=b1, to_bus=b2, length_km=100, r_ohm_per_km=0.120, x_ohm_per_km=0.393, c_nf_per_km=0, max_i_ka=10)
pp.create_line_from_parameters(net, from_bus=b1, to_bus=b3, length_km=50, r_ohm_per_km=0.120, x_ohm_per_km=0.393, c_nf_per_km=0, max_i_ka=10)
pp.create_line_from_parameters(net, from_bus=b2, to_bus=b3, length_km=50, r_ohm_per_km=0.120, x_ohm_per_km=0.393, c_nf_per_km=0, max_i_ka=10)
pp.create_line_from_parameters(net, from_bus=b3, to_bus=b4, length_km=25, r_ohm_per_km=0.120, x_ohm_per_km=0.393, c_nf_per_km=0, max_i_ka=10)
pp.create_sgen(net, b2, p_mw=0.1e3, sn_mva=100)
pp.create_sgen(net, b3, p_mw=0.050e3, sn_mva=50)
pp.create_sgen(net, b4, p_mw=0.050e3, sn_mva=50)
net.sgen["k"] = 1.2
return net
@pytest.fixture
def three_bus_example():
net = pp.create_empty_network()
b1 = pp.create_bus(net, 110)
b2 = pp.create_bus(net, 110)
b3 = pp.create_bus(net, 110)
pp.create_ext_grid(net, b1, s_sc_max_mva=100., s_sc_min_mva=80., rx_min=0.4, rx_max=0.4)
pp.create_line(net, b1, b2, std_type="305-AL1/39-ST1A 110.0" , length_km=20.)
pp.create_line(net, b2, b3, std_type="N2XS(FL)2Y 1x185 RM/35 64/110 kV" , length_km=15.)
net.line["endtemp_degree"] = 80
pp.create_sgen(net, b2, sn_mva=2, p_mw=0, k=1.2)
return net
def test_max_branch_results(three_bus_example):
net = three_bus_example
sc.calc_sc(net, case="max", ip=True, ith=True, branch_results=True)
assert np.allclose(net.res_bus_sc.ikss_ka.values, np.array([0.53746061, 0.50852707, 0.4988896]))
assert np.allclose(net.res_line_sc.ikss_ka.values, np.array([ 0.49593034, 0.4988896 ]))
assert np.allclose(net.res_line_sc.ip_ka.values, np.array([ 0.92787443, 0.9251165 ]))
assert np.allclose(net.res_line_sc.ith_ka.values, np.array([ 0.49811957, 0.50106881]))
def test_min_branch_results(three_bus_example):
net = three_bus_example
sc.calc_sc(net, case="min", ip=True, ith=True, branch_results=True)
assert np.allclose(net.res_bus_sc.ikss_ka.values, np.array([ 0.43248784, 0.41156533, 0.40431286]))
assert np.allclose(net.res_line_sc.ikss_ka.values, np.array([ 0.39171613, 0.40431286]))
assert np.allclose(net.res_line_sc.ip_ka.values, np.array([ 0.72795118, 0.74576565]))
assert np.allclose(net.res_line_sc.ith_ka.values, np.array([ 0.39340278, 0.40605375]))
def test_wind_park(wind_park_example):
net = wind_park_example
sc.calc_sc(net, ip=True)
assert np.isclose(net.res_bus_sc.ikss_ka.at[2], 3.9034, rtol=1e-4)
assert np.isclose(net.res_bus_sc.ip_ka.at[2], 7.3746, rtol=1e-4)
if __name__ == '__main__':
pytest.main(["test_sgen.py"])
| StarcoderdataPython |
3300230 | <filename>pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/OpenGLContext/scenegraph/inline.py
"""VRML97 Inline node"""
from vrml.vrml97 import basenodes, nodetypes
from vrml import field, protofunctions, fieldtypes
from OpenGLContext import context
class InlineURLField( fieldtypes.MFString ):
"""Field for managing interactions with an Inline's URL value"""
fieldType = "MFString"
def fset( self, client, value, notify=1 ):
"""Set the client's URL, then try to load the scene"""
value = super(InlineURLField, self).fset( client, value, notify )
import threading
threading.Thread(
name = "Background load of %s"%(value),
target = client.loadBackground,
args = ( value, context.Context.allContexts,),
).start()
return value
def fdel( self, client, notify=1 ):
"""Delete the client's URL, which should delete the scene as well"""
value = super( InlineURLField, self).fdel( client, notify )
del client.scenegraph
return value
class Inline(basenodes.Inline):
"""Inline VRML97 scene based on VRML 97 Inline
Reference:
http://www.web3d.org/x3d/specifications/vrml/ISO-IEC-14772-IS-VRML97WithAmendment1/part1/nodesRef.html#Inline
"""
scenegraph = None
def renderedChildren( self, types= (nodetypes.Children, nodetypes.Rendering,) ):
"""Choose child from level that is at appropriate range"""
if self.scenegraph:
return self.scenegraph.children
return []
url = InlineURLField(
'url', 1, list
)
def loadBackground( self, url, contexts=() ):
"""Load an image from the given url in the background
url -- SF or MFString URL to load relative to the
node's root's baseURL
On success:
Sets the resulting PIL image to the
client's image property (triggering an un-caching
and re-compile if there was a previous image).
if contexts, iterate through the list calling
context.triggerRedraw(1)
"""
try:
from OpenGLContext.loaders.loader import Loader
except ImportError:
pass
else:
for u in url:
try:
baseNode = protofunctions.root(self)
if baseNode:
baseURI = baseNode.baseURI
else:
baseURI = None
result = Loader.load( u, baseURL = baseURI )
except IOError:
pass
else:
print 'loaded', u
self.scenegraph = result
for context in contexts:
c = context()
if c:
c.triggerRedraw(1)
return
warnings.warn( """Unable to load any scene from the url %s for the node %s"""%( url, str(self)))
| StarcoderdataPython |
4975998 | # -*- coding: utf-8 -*-
"""Serves /swagger endpoint."""
import yaml
from pyramid.view import view_config
@view_config(route_name='swagger', request_method='GET', renderer='json')
def apidocs(request):
with open('api-docs/swagger.yaml', 'r') as f:
swagger_content = f.read()
return yaml.load(swagger_content)
| StarcoderdataPython |
9675678 | from ctypes import c_ubyte, c_ushort
from snoboy import memory
# how many cycles we've executed
ticks = 0
operations = {}
def doInstruction():
opcode = memory.read(registers.PC)
registers.PC += 1
if opcode in operations:
print "Instruction: %s (%x)" % (operations[opcode].__name__, opcode)
operations[opcode]()
else:
raise NotImplementedError("Instruction 0x%x not implemented"%opcode)
def add_ticks(t):
global ticks
ticks = ticks + t
def _register16(default=0):
"""Create a property around a c_ushort"""
_value = c_ushort(default)
def get(self):
return _value.value
def set(self, value):
_value.value = value
return property(get, set)
def _register(default=0):
"""Create a property around a c_ubyte."""
_value = c_ubyte(default)
def get(self):
return _value.value
def set(self, value):
_value.value = value
return property(get, set)
def _compound_register(upper, lower):
"""Return a property that provides 16-bit access to two registers."""
def get(self):
return (upper.fget(None) << 8) | lower.fget(None)
def set(self, value):
upper.fset(None, value >> 8)
lower.fset(None, value)
return property(get, set)
def _indirect_register(reg):
"""Return a property that provides 8-bit access to the memory location pointed to by reg"""
def get(self):
return memory.read(reg.fget(None))
def set(self, value):
memory.write(reg.fget(None), value)
return property(get,set)
class Registers(object):
A = _register()
F = _register()
B = _register()
C = _register()
D = _register()
E = _register()
H = _register()
L = _register()
BC = _compound_register(B, C)
DE = _compound_register(D, E)
HL = _compound_register(H, L)
iHL = _indirect_register(HL)
SP = _register16()
PC = _register16()
def __getitem__(self, index):
return getattr(self, index)
def __setitem__(self, index, value):
setattr(self, index, value)
registers = Registers()
def reset():
#initialize to default value
registers.PC = 0x0100
registers.SP = 0xFFFE
registers.A = 0
registers.F = 0
registers.B = 0
registers.C = 0
registers.D = 0
registers.E = 0
registers.H = 0
registers.L = 0 | StarcoderdataPython |
9643109 | <filename>migration/migration_manager.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
from entity import configuration as config_entity
from entity import tag as tag_entity
from migration import tags as tag_lists
import os
import shutil
import sqlite3
import sys
class MigrationManager:
""" Checks current version and migrates if necessary.
Constants:
CONFIG_VERSION -- The version name in configuration table (string).
EMPTY_DB_FILE -- Name of empty database file (string).
Member:
home -- The home directory (string).
db_file -- Path to sqlite database file.
"""
CONFIG_VERSION = 'version'
EMPTY_DB_FILE = 'empty-db.sqlite'
def __init__(self, home, db_file):
self.home = home
self.db_file = home+db_file
def migrate(self):
""" Executes checks and migrates if necessary. Sub functions should
always commit changes in the database. """
# Version 1
self.__create_db()
db = sqlite3.connect(self.db_file)
# Version 1 -> 2
current = 1
if self.__is_version(db, current):
self.__create_tags(db, tag_lists.tags_001)
current += 1
self.__update_version(db, current)
# Version 2 -> 3
current = 2
if self.__is_version(db, current):
self.__tags_rename_vermouth(db)
current += 1
self.__update_version(db, current)
# Version 3 -> 4
current = 3
if self.__is_version(db, current):
self.__tags_rename_brackets(db)
current += 1
self.__update_version(db, current)
# Version 4 -> 5
current = 4
if self.__is_version(db, current):
self.__create_tags(db, tag_lists.tags_002)
current += 1
self.__update_version(db, current)
# Version 5 -> 6
current = 5
if self.__is_version(db, current):
self.__create_tags(db, tag_lists.tags_003)
current += 1
self.__update_version(db, current)
# Version 6 -> 7
current = 6
if self.__is_version(db, current):
self.__create_tags(db, tag_lists.tags_004)
current += 1
self.__update_version(db, current)
# Version 7 -> 8
current = 7
if self.__is_version(db, current):
self.__create_tags(db, tag_lists.tags_005)
current += 1
self.__update_version(db, current)
# Version 8 -> 9
current = 8
if self.__is_version(db, current):
self.__create_tags(db, tag_lists.tags_006)
current += 1
self.__update_version(db, current)
# Version 9 -> 10
current = 9
if self.__is_version(db, current):
self.__create_tags(db, tag_lists.tags_007)
current += 1
self.__update_version(db, current)
# Version 10 -> 11
current = 10
if self.__is_version(db, current):
self.__create_tags(db, tag_lists.tags_008)
current += 1
self.__update_version(db, current)
# Version 11 -> 12
current = 11
if self.__is_version(db, current):
self.__create_tags(db, tag_lists.tags_009)
current += 1
self.__update_version(db, current)
# Version 12 -> 13
current = 12
# Finished
db.close()
def __create_db(self):
""" Create database file if not existing. """
if not os.path.exists(self.db_file):
shutil.copy(self.home+self.EMPTY_DB_FILE, self.db_file)
def __create_tags(self, db, tags):
""" Create initial set of German and English tags. """
for tag_names in tags:
first = True
parent_id = None
for tag_name in tag_names:
tag = tag_entity.Tag(name=tag_name, synonym_of=parent_id)
try:
tag.save(db)
except sqlite3.IntegrityError:
print('{} in [{}]'.format(tag_name, ', '.join(tag_names)))
sys.exit(80085)
if first:
parent_id = tag.id
first = False
db.commit()
def __is_version(self, db, version):
""" Checks version in configuration table. """
config = config_entity.Configuration.find_name(db, self.CONFIG_VERSION)
return int(config.value) is version
def __tags_rename_brackets(self, db):
""" Correct brackets in tags. """
tag = tag_entity.Tag.find_name(db, 'Paprika [Gewürz]')
tag.name = 'Paprika (Gewürz)'
tag.save(db)
tag = tag_entity.Tag.find_name(db, 'Paprika [Spice]')
tag.name = 'Paprika (Spice)'
tag.save(db)
tag = tag_entity.Tag.find_name(db, 'Rum [Dark]')
tag.name = 'Rum (Dark)'
tag.save(db)
tag = tag_entity.Tag.find_name(db, 'Rum [Light]')
tag.name = 'Rum (Light)'
tag.save(db)
def __tags_rename_vermouth(self, db):
""" Correct brackets in vermouth. """
tag = tag_entity.Tag.find_name(db, 'Vermouth [Dry')
tag.name = 'Vermouth (Dry)'
tag.save(db)
tag = tag_entity.Tag.find_name(db, 'Vermouth [Sweet]')
tag.name = 'Vermouth (Sweet)'
tag.save(db)
def __update_version(self, db, version):
""" Sets version in configuration table to given value. """
config = config_entity.Configuration.find_name(db, self.CONFIG_VERSION)
config.value = str(version)
config.save(db)
db.commit() | StarcoderdataPython |
4897721 | # Generated by Django 4.0.2 on 2022-03-03 12:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('instructors', '0008_remove_user_location'),
]
operations = [
migrations.AlterField(
model_name='location',
name='ZIP',
field=models.CharField(blank=True, default=0, max_length=10, verbose_name='ZIP'),
),
migrations.AlterField(
model_name='location',
name='city',
field=models.CharField(max_length=150, verbose_name='city'),
),
migrations.AlterField(
model_name='location',
name='country',
field=models.CharField(max_length=150, verbose_name='country'),
),
migrations.AlterField(
model_name='location',
name='latitude',
field=models.DecimalField(blank=True, decimal_places=5, default=0.0, max_digits=6, verbose_name='latitude'),
),
migrations.AlterField(
model_name='location',
name='stop_date',
field=models.DateTimeField(blank=True, null=True, verbose_name='stop date'),
),
]
| StarcoderdataPython |
8036478 | from minitf import kernel as K
from minitf.autodiff.vjp_maker import def_vjp_maker
# Stolen from autograd library
def unbroadcast(target, g):
while K.rank(g) > K.rank(target):
g = K.reduce_sum(g, axis=0)
for axis, size in enumerate(K.shape(target)):
if size == 1:
g = K.reduce_sum(g, axis=axis, keepdims=True)
return g
def balanced_eq(x, z, y):
return (x == z) / (1.0 + (x == y))
def_vjp_maker(K.add, lambda ans, x, y: (
lambda g: unbroadcast(x, g),
lambda g: unbroadcast(y, g),
))
def_vjp_maker(K.subtract, lambda ans, x, y: (
lambda g: unbroadcast(x, g),
lambda g: unbroadcast(y, -g),
))
def_vjp_maker(K.multiply, lambda ans, x, y: (
lambda g: unbroadcast(x, y * g),
lambda g: unbroadcast(y, x * g),
))
def_vjp_maker(K.divide, lambda ans, x, y: (
lambda g: unbroadcast(x, g / y),
lambda g: unbroadcast(y, -g * x / (y * y)),
))
def_vjp_maker(K.dot, lambda ans, x, y: (
lambda g: K.dot(g, K.transpose(y)),
lambda g: K.dot(K.transpose(x), g),
))
def_vjp_maker(K.square, lambda ans, x: (
lambda g: g * 2 * x,
))
# Need to update.
def_vjp_maker(K.reduce_mean, lambda ans, x: (
lambda g: g / K.size(x),
))
def_vjp_maker(K.exp, lambda ans, x: (
lambda g: ans * g,
))
def_vjp_maker(K.negative, lambda ans, x: (
lambda g: -g,
))
def_vjp_maker(K.transpose, lambda ans, x: (
lambda g: K.transpose(g),
))
def_vjp_maker(K.maximum, lambda ans, x, y: (
lambda g: unbroadcast(x, g * balanced_eq(x, ans, y)),
lambda g: unbroadcast(y, g * balanced_eq(y, ans, x)),
))
def_vjp_maker(K.minimum, lambda ans, x, y: (
lambda g: unbroadcast(x, g * balanced_eq(x, ans, y)),
lambda g: unbroadcast(y, g * balanced_eq(y, ans, x)),
))
def_vjp_maker(K.cast, lambda ans, x, dtype: (
lambda g: K.cast(g, x.dtype),
))
def_vjp_maker(K.reshape, lambda ans, x, shape: (
lambda g: K.reshape(g, K.shape(x)),
))
def_vjp_maker(K.flatten, lambda ans, x: (
lambda g: K.reshape(g, K.shape(x)),
))
def_vjp_maker(K.where, lambda ans, c, x, y: (
lambda g: None, # no vjp for condition parameter
lambda g: K.where(c, g, K.zeros_like(g)),
lambda g: K.where(c, K.zeros_like(g), g),
))
| StarcoderdataPython |
8007586 | import logging
import json
import math
from types import BuiltinMethodType
import ftx
from execution.exchanges import BaseExchange
# import http
# http.client.HTTPConnection.debuglevel = 1
logger = logging.getLogger("execution")
class FTXExchange(BaseExchange):
BUY = LONG = "buy"
SELL = SHORT = "sell"
def __init__(self, subaccount, testmode, api_key, api_secret) -> None:
self.client = ftx.FtxClient(
api_key=api_key, api_secret=api_secret, subaccount_name=subaccount
)
self.testmode = testmode
logger.debug(f"ftx inited with testmode={testmode}")
def _parse_symbol(self, market):
"""Remove the USD denominator from a Market.
Some ftx calls need the market (pair), some only
the coin.
Args:
market (str): the market, e.g. BTC/USD
Returns:
str: The symbol without the /USD e.g. BTC
"""
if "/" in market:
symbol = market.split("/")[0]
return symbol
def get_quote(self, market):
"""Get a quote
Args:
market (string): the market, e.g. 'BTC/USD'.
Returns:
dict: a dictionary result
"""
# TODO check there is one and only one result?
return self.client.get_market(market=market)
def spot_is_borrowable(self, market):
"""Check if this spot market has lending (so you can short it).
Args:
market ([type]): [description]
"""
info = self.client.get_market_info(market)
if info[0].get("previousFunding"):
return True
def get_target_price(self, market, side, aggressive=False):
"""calculate a target price somewhere in the order book
Args:
market (string): [description]
aggressive (bool, optional): [description]. Defaults to False.
"""
assert not aggressive, "unsupported policy aggressive"
quote = self.get_quote(market)
return self._get_spread_midpoint(market, side)
def _get_spread_midpoint(self, quote, side):
"""Compute target price to midpoint of the spread
if possible (enough ticks in spread), else at best bid/offer
Args:
quote (object): quote data from API
side (object): side
"""
if not side == self.BUY and not side == self.SELL:
raise NotImplementedError(f"Unsupported side: {side}")
price_increment = quote.get("priceIncrement")
bid = quote.get("bid")
ask = quote.get("ask")
assert bid <= ask, "invalid spread data"
spread_in_ticks = (ask - bid) / price_increment
if spread_in_ticks <= 1:
return (
bid if side == self.BUY else ask
) # can't do a midpoint on a spread of 1 tick, default to best bid/ask
offset_ticks = math.ceil(spread_in_ticks / 2)
offset_notional = offset_ticks * price_increment
if side == self.BUY:
target = bid + offset_notional
if not (target < ask):
raise AssertionError("invalid target price (side=buy)")
return target
if side == self.SELL:
target = ask - offset_notional
if not (target > bid):
raise AssertionError("invalid target price (side=sell)")
return target
def get_tick_size(self, market):
quote = self.get_quote(market)
return quote.get("sizeIncrement")
def _get_position(self, market: str):
"""[summary]
Args:
market ([type]): [description]
Returns:
[type]: [description]
"""
spot_balances = self.client.get_balances() # spot
symbol = self._parse_symbol(market.name)
positions = list(filter(lambda x: x["coin"] == symbol, spot_balances))
if len(positions) == 1:
retval = positions.pop().get("total")
print(f"{market} has exactly one open position of size {retval}")
return retval
elif not len(positions):
print(f"{market} does not have an open position")
return 0.0
else:
raise IndexError(f"more than one position for {market} in {positions}")
def set_position(self, market: str, target_position: float):
"""[summary]
Args:
market ([type]): [description]
target_position ([type]): [description]
"""
# TODO Need to consider if you can enter a short position on this security.
current_position = self._get_position(market)
delta = target_position - current_position
if delta < 0:
delta = abs(delta)
self._place_order(market, self.SELL, delta)
else:
self._place_order(market, self.BUY, delta)
# TODO
# log whatgever you do.
def _place_order(self, market: str, side: str, units: float):
"""[summary]
Args:
market ([type]): [description]
side ([type]): [description]
units ([type]): [description]
"""
### TODO Also need to check open orders....
target_price = self.get_target_price(market, side)
consideration = target_price * units
tick = self.get_tick_size(market)
print(
f"""endpoint="executor",
testmode="{self.testmode}"",
market="{market}",
side="{side}",
consideration="{consideration}",
target_price="{target_price}",
units="{units}" """
)
if units < tick:
print(f"{market} order size {units} is less than the tick size {tick}")
elif self.testmode == False:
try:
print(
self.client.place_order(
market=str(market),
side=side,
price=target_price,
size=units,
type="limit",
post_only=True,
)
)
except Exception as e:
print("Exception!!")
raise e
| StarcoderdataPython |
375056 | <reponame>hsiboy/seabus
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from folium.folium import Map, initialize_notebook, CircleMarker
from folium.map import (FeatureGroup, FitBounds, Icon, LayerControl, Marker,
Popup, TileLayer)
from folium.features import (ClickForMarker, CustomIcon, DivIcon,
GeoJson, LatLngPopup,
MarkerCluster, MultiPolyLine, PolyLine, Vega,
RegularPolygonMarker, TopoJson, WmsTileLayer)
import folium.colormap as colormap
__version__ = '0.2.1'
__all__ = ['Map',
'initialize_notebook',
'CircleMarker',
'FeatureGroup',
'FitBounds',
'Icon',
'LayerControl',
'Marker',
'Popup',
'TileLayer',
'ClickForMarker',
'colormap',
'CustomIcon',
'DivIcon',
'GeoJson',
'GeoJsonStyle',
'LatLngPopup',
'MarkerCluster',
'MultiPolyLine',
'PolyLine',
'Vega',
'RegularPolygonMarker',
'TopoJson',
'WmsTileLayer']
| StarcoderdataPython |
5157096 | #!/usr/bin/python2
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import re
from resource_management.libraries.script import Script
from resource_management.libraries.functions.default import default
from resource_management.core.logger import Logger
from resource_management.core.resources.system import File, Directory, Execute, Link
from resource_management.core.source import DownloadSource, InlineTemplate, Template
from resource_management.libraries.resources.xml_config import XmlConfig
from resource_management.libraries.resources.modify_properties_file import ModifyPropertiesFile
from resource_management.libraries.resources.properties_file import PropertiesFile
from resource_management.core.exceptions import Fail
from resource_management.libraries.functions.decorator import retry
from resource_management.libraries.functions.generate_logfeeder_input_config import generate_logfeeder_input_config
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.is_empty import is_empty
from resource_management.core.utils import PasswordString
from resource_management.core.shell import as_sudo
from resource_management.libraries.functions import solr_cloud_util
from ambari_commons.constants import UPGRADE_TYPE_NON_ROLLING, UPGRADE_TYPE_ROLLING
from resource_management.core.exceptions import ExecutionFailed
# This file contains functions used for setup/configure of Ranger Admin and Ranger Usersync.
# The design is to mimic what is done by the setup.sh script bundled by Ranger component currently.
def ranger(name=None, upgrade_type=None):
"""
parameter name: name of ranger service component
"""
if name == 'ranger_admin':
setup_ranger_admin(upgrade_type=upgrade_type)
if name == 'ranger_usersync':
setup_usersync(upgrade_type=upgrade_type)
if name == 'ranger_tagsync':
setup_tagsync(upgrade_type=upgrade_type)
def setup_ranger_admin(upgrade_type=None):
import params
if upgrade_type is None:
upgrade_type = Script.get_upgrade_type(default("/commandParams/upgrade_type", ""))
ranger_home = params.ranger_home
ranger_conf = params.ranger_conf
Directory(ranger_conf,
owner = params.unix_user,
group = params.unix_group,
create_parents = True
)
copy_jdbc_connector(ranger_home)
File(format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
content = DownloadSource(format("{jdk_location}/{check_db_connection_jar_name}")),
mode = 0644,
)
generate_logfeeder_input_config('ranger', Template("input.config-ranger.json.j2", extra_imports=[default]))
cp = format("{check_db_connection_jar}")
if params.db_flavor.lower() == 'sqla':
cp = cp + os.pathsep + format("{ranger_home}/ews/lib/sajdbc4.jar")
else:
cp = cp + os.pathsep + format("{driver_curl_target}")
cp = cp + os.pathsep + format("{ranger_home}/ews/lib/*")
db_connection_check_command = format(
"{java_home}/bin/java -cp {cp} org.apache.ambari.server.DBConnectionVerification '{ranger_jdbc_connection_url}' {ranger_db_user} {ranger_db_password!p} {ranger_jdbc_driver}")
env_dict = {}
if params.db_flavor.lower() == 'sqla':
env_dict = {'LD_LIBRARY_PATH':params.ld_lib_path}
Execute(db_connection_check_command, path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin', tries=5, try_sleep=10, environment=env_dict)
Execute(('ln','-sf', format('{ranger_home}/ews/webapp/WEB-INF/classes/conf'), format('{ranger_home}/conf')),
not_if=format("ls {ranger_home}/conf"),
only_if=format("ls {ranger_home}/ews/webapp/WEB-INF/classes/conf"),
sudo=True)
if upgrade_type is not None:
src_file = format('{ranger_home}/ews/webapp/WEB-INF/classes/conf.dist/ranger-admin-default-site.xml')
dst_file = format('{ranger_home}/conf/ranger-admin-default-site.xml')
Execute(('cp', '-f', src_file, dst_file), sudo=True)
src_file = format('{ranger_home}/ews/webapp/WEB-INF/classes/conf.dist/security-applicationContext.xml')
dst_file = format('{ranger_home}/conf/security-applicationContext.xml')
Execute(('cp', '-f', src_file, dst_file), sudo=True)
Directory(format('{ranger_home}/'),
owner = params.unix_user,
group = params.unix_group,
recursive_ownership = True,
)
Directory(params.ranger_pid_dir,
mode=0755,
owner = params.unix_user,
group = params.user_group,
cd_access = "a",
create_parents=True
)
Directory(params.admin_log_dir,
owner = params.unix_user,
group = params.unix_group,
create_parents = True,
cd_access='a',
mode=0755
)
if os.path.isfile(params.ranger_admin_default_file):
File(params.ranger_admin_default_file, owner=params.unix_user, group=params.unix_group)
else:
Logger.warning('Required file {0} does not exist, copying the file to {1} path'.format(params.ranger_admin_default_file, ranger_conf))
src_file = format('{ranger_home}/ews/webapp/WEB-INF/classes/conf.dist/ranger-admin-default-site.xml')
dst_file = format('{ranger_home}/conf/ranger-admin-default-site.xml')
Execute(('cp', '-f', src_file, dst_file), sudo=True)
File(params.ranger_admin_default_file, owner=params.unix_user, group=params.unix_group)
if os.path.isfile(params.security_app_context_file):
File(params.security_app_context_file, owner=params.unix_user, group=params.unix_group)
else:
Logger.warning('Required file {0} does not exist, copying the file to {1} path'.format(params.security_app_context_file, ranger_conf))
src_file = format('{ranger_home}/ews/webapp/WEB-INF/classes/conf.dist/security-applicationContext.xml')
dst_file = format('{ranger_home}/conf/security-applicationContext.xml')
Execute(('cp', '-f', src_file, dst_file), sudo=True)
File(params.security_app_context_file, owner=params.unix_user, group=params.unix_group)
if default("/configurations/ranger-admin-site/ranger.authentication.method", "") == 'PAM':
d = '/etc/pam.d'
if os.path.isdir(d):
if os.path.isfile(os.path.join(d, 'ranger-admin')):
Logger.info('ranger-admin PAM file already exists.')
else:
File(format('{d}/ranger-admin'),
content=Template('ranger_admin_pam.j2'),
owner = params.unix_user,
group = params.unix_group,
mode=0644
)
if os.path.isfile(os.path.join(d, 'ranger-remote')):
Logger.info('ranger-remote PAM file already exists.')
else:
File(format('{d}/ranger-remote'),
content=Template('ranger_remote_pam.j2'),
owner = params.unix_user,
group = params.unix_group,
mode=0644
)
else:
Logger.error("Unable to use PAM authentication, /etc/pam.d/ directory does not exist.")
# remove plain-text password from xml configs
ranger_admin_site_copy = {}
ranger_admin_site_copy.update(params.config['configurations']['ranger-admin-site'])
for prop in params.ranger_admin_password_properties:
if prop in ranger_admin_site_copy:
ranger_admin_site_copy[prop] = "_"
if 'ranger.ha.spnego.kerberos.keytab' in ranger_admin_site_copy:
ranger_admin_site_copy['ranger.spnego.kerberos.keytab'] = ranger_admin_site_copy['ranger.ha.spnego.kerberos.keytab']
XmlConfig("ranger-admin-site.xml",
conf_dir=ranger_conf,
configurations=ranger_admin_site_copy,
configuration_attributes=params.config['configurationAttributes']['ranger-admin-site'],
owner=params.unix_user,
group=params.unix_group,
mode=0644)
Directory(os.path.join(ranger_conf,'ranger_jaas'),
mode=0700,
owner=params.unix_user,
group=params.unix_group,
)
if params.stack_supports_ranger_log4j:
File(format('{ranger_home}/ews/webapp/WEB-INF/log4j.properties'),
owner=params.unix_user,
group=params.unix_group,
content=InlineTemplate(params.admin_log4j),
mode=0644
)
do_keystore_setup(upgrade_type=upgrade_type)
create_core_site_xml(ranger_conf)
if params.stack_supports_ranger_kerberos:
if params.is_hbase_ha_enabled and params.ranger_hbase_plugin_enabled:
XmlConfig("hbase-site.xml",
conf_dir=ranger_conf,
configurations=params.config['configurations']['hbase-site'],
configuration_attributes=params.config['configurationAttributes']['hbase-site'],
owner=params.unix_user,
group=params.unix_group,
mode=0644
)
if params.is_namenode_ha_enabled and params.ranger_hdfs_plugin_enabled:
XmlConfig("hdfs-site.xml",
conf_dir=ranger_conf,
configurations=params.config['configurations']['hdfs-site'],
configuration_attributes=params.config['configurationAttributes']['hdfs-site'],
owner=params.unix_user,
group=params.unix_group,
mode=0644
)
File(format("{ranger_conf}/ranger-admin-env.sh"),
content = InlineTemplate(params.ranger_env_content),
owner = params.unix_user,
group = params.unix_group,
mode = 0755
)
def setup_ranger_db(stack_version=None):
import params
ranger_home = params.ranger_home
if stack_version is not None:
ranger_home = format("{stack_root}/{stack_version}/ranger-admin")
copy_jdbc_connector(ranger_home)
ModifyPropertiesFile(format("{ranger_home}/install.properties"),
properties = {'audit_store': params.ranger_audit_source_type},
owner = params.unix_user,
)
env_dict = {'RANGER_ADMIN_HOME':ranger_home, 'JAVA_HOME':params.java_home}
if params.db_flavor.lower() == 'sqla':
env_dict = {'RANGER_ADMIN_HOME':ranger_home, 'JAVA_HOME':params.java_home, 'LD_LIBRARY_PATH':params.ld_lib_path}
# User wants us to setup the DB user and DB?
if params.create_db_dbuser:
Logger.info('Setting up Ranger DB and DB User')
dba_setup = format('ambari-python-wrap {ranger_home}/dba_script.py -q')
Execute(dba_setup,
environment=env_dict,
logoutput=True,
user=params.unix_user,
)
else:
Logger.info('Separate DBA property not set. Assuming Ranger DB and DB User exists!')
db_setup = format('ambari-python-wrap {ranger_home}/db_setup.py')
Execute(db_setup,
environment=env_dict,
logoutput=True,
user=params.unix_user,
)
def setup_java_patch(stack_version=None):
import params
ranger_home = params.ranger_home
if stack_version is not None:
ranger_home = format("{stack_root}/{stack_version}/ranger-admin")
env_dict = {'RANGER_ADMIN_HOME':ranger_home, 'JAVA_HOME':params.java_home}
if params.db_flavor.lower() == 'sqla':
env_dict = {'RANGER_ADMIN_HOME':ranger_home, 'JAVA_HOME':params.java_home, 'LD_LIBRARY_PATH':params.ld_lib_path}
setup_java_patch = format('ambari-python-wrap {ranger_home}/db_setup.py -javapatch')
Execute(setup_java_patch,
environment=env_dict,
logoutput=True,
user=params.unix_user,
)
def do_keystore_setup(upgrade_type=None):
import params
ranger_home = params.ranger_home
cred_lib_path = params.cred_lib_path
ranger_credential_helper(cred_lib_path, params.ranger_jpa_jdbc_credential_alias, params.ranger_ambari_db_password, params.ranger_credential_provider_path)
if params.ranger_auth_method.upper() == "LDAP":
ranger_credential_helper(params.cred_lib_path, params.ranger_ldap_password_alias, params.ranger_ldap_bind_auth_password, params.ranger_credential_provider_path)
if params.ranger_auth_method.upper() == "ACTIVE_DIRECTORY":
ranger_credential_helper(params.cred_lib_path, params.ranger_ad_password_alias, params.ranger_ad_bind_auth_password, params.ranger_credential_provider_path)
if params.stack_supports_secure_ssl_password:
ranger_credential_helper(params.cred_lib_path, params.ranger_truststore_alias, params.truststore_password, params.ranger_credential_provider_path)
if params.https_enabled and not params.http_enabled:
ranger_credential_helper(params.cred_lib_path, params.ranger_https_keystore_alias, params.https_keystore_password, params.ranger_credential_provider_path)
File(params.ranger_credential_provider_path,
owner = params.unix_user,
group = params.unix_group,
only_if = format("test -e {ranger_credential_provider_path}"),
mode = 0640
)
update_dot_jceks_crc_ownership(credential_provider_path = params.ranger_credential_provider_path, user = params.unix_user, group = params.unix_group)
def password_validation(password):
import params
if password.strip() == "":
raise Fail("Blank password is not allowed for Bind user. Please enter valid password.")
if re.search("[\\\`'\"]",password):
raise Fail("LDAP/AD bind password contains one of the unsupported special characters like \" ' \ `")
else:
Logger.info("password validated")
def copy_jdbc_connector(ranger_home):
import params
if params.jdbc_jar_name is None and params.driver_curl_source.endswith("/None"):
error_message = format("{db_flavor} jdbc driver cannot be downloaded from {jdk_location}\nPlease run 'ambari-server setup --jdbc-db={db_flavor} --jdbc-driver={{path_to_jdbc}}' on ambari-server host.")
raise Fail(error_message)
if params.driver_curl_source and not params.driver_curl_source.endswith("/None"):
if params.previous_jdbc_jar and os.path.isfile(params.previous_jdbc_jar):
File(params.previous_jdbc_jar, action='delete')
File(params.downloaded_custom_connector,
content = DownloadSource(params.driver_curl_source),
mode = 0644
)
driver_curl_target = format("{ranger_home}/ews/lib/{jdbc_jar_name}")
if params.db_flavor.lower() == 'sqla':
Execute(('tar', '-xvf', params.downloaded_custom_connector, '-C', params.tmp_dir), sudo = True)
Execute(('cp', '--remove-destination', params.jar_path_in_archive, os.path.join(ranger_home, 'ews', 'lib')),
path=["/bin", "/usr/bin/"],
sudo=True)
File(os.path.join(ranger_home, 'ews', 'lib', 'sajdbc4.jar'), mode=0644)
Directory(params.jdbc_libs_dir,
cd_access="a",
create_parents=True)
Execute(as_sudo(['yes', '|', 'cp', params.libs_path_in_archive, params.jdbc_libs_dir], auto_escape=False),
path=["/bin", "/usr/bin/"])
else:
Execute(('cp', '--remove-destination', params.downloaded_custom_connector, os.path.join(ranger_home, 'ews', 'lib')),
path=["/bin", "/usr/bin/"],
sudo=True)
File(os.path.join(ranger_home, 'ews', 'lib',params.jdbc_jar_name), mode=0644)
ModifyPropertiesFile(format("{ranger_home}/install.properties"),
properties = params.config['configurations']['admin-properties'],
owner = params.unix_user,
)
if params.db_flavor.lower() == 'sqla':
ModifyPropertiesFile(format("{ranger_home}/install.properties"),
properties = {'SQL_CONNECTOR_JAR': format('{ranger_home}/ews/lib/sajdbc4.jar')},
owner = params.unix_user,
)
else:
ModifyPropertiesFile(format("{ranger_home}/install.properties"),
properties = {'SQL_CONNECTOR_JAR': format('{driver_curl_target}')},
owner = params.unix_user,
)
def setup_usersync(upgrade_type=None):
import params
usersync_home = params.usersync_home
ranger_home = params.ranger_home
ranger_ugsync_conf = params.ranger_ugsync_conf
if not is_empty(params.ranger_usersync_ldap_ldapbindpassword) and params.ug_sync_source == 'org.apache.ranger.ldapusersync.process.LdapUserGroupBuilder':
password_validation(params.ranger_usersync_ldap_ldapbindpassword)
Directory(params.ranger_pid_dir,
mode=0755,
owner = params.unix_user,
group = params.user_group,
cd_access = "a",
create_parents=True
)
Directory(params.usersync_log_dir,
owner = params.unix_user,
group = params.unix_group,
cd_access = 'a',
create_parents=True,
mode=0755,
recursive_ownership = True
)
Directory(format("{ranger_ugsync_conf}/"),
owner = params.unix_user
)
generate_logfeeder_input_config('ranger', Template("input.config-ranger.json.j2", extra_imports=[default]))
if upgrade_type is not None:
src_file = format('{usersync_home}/conf.dist/ranger-ugsync-default.xml')
dst_file = format('{usersync_home}/conf/ranger-ugsync-default.xml')
Execute(('cp', '-f', src_file, dst_file), sudo=True)
if params.stack_supports_ranger_log4j:
File(format('{usersync_home}/conf/log4j.properties'),
owner=params.unix_user,
group=params.unix_group,
content=InlineTemplate(params.usersync_log4j),
mode=0644
)
elif upgrade_type is not None and not params.stack_supports_ranger_log4j:
src_file = format('{usersync_home}/conf.dist/log4j.xml')
dst_file = format('{usersync_home}/conf/log4j.xml')
Execute(('cp', '-f', src_file, dst_file), sudo=True)
# remove plain-text password from xml configs
ranger_ugsync_site_copy = {}
ranger_ugsync_site_copy.update(params.config['configurations']['ranger-ugsync-site'])
for prop in params.ranger_usersync_password_properties:
if prop in ranger_ugsync_site_copy:
ranger_ugsync_site_copy[prop] = "_"
XmlConfig("ranger-ugsync-site.xml",
conf_dir=ranger_ugsync_conf,
configurations=ranger_ugsync_site_copy,
configuration_attributes=params.config['configurationAttributes']['ranger-ugsync-site'],
owner=params.unix_user,
group=params.unix_group,
mode=0644)
if os.path.isfile(params.ranger_ugsync_default_file):
File(params.ranger_ugsync_default_file, owner=params.unix_user, group=params.unix_group)
if os.path.isfile(params.usgsync_log4j_file):
File(params.usgsync_log4j_file, owner=params.unix_user, group=params.unix_group)
if os.path.isfile(params.cred_validator_file):
File(params.cred_validator_file, group=params.unix_group, mode=04555)
ranger_credential_helper(params.ugsync_cred_lib, 'usersync.ssl.key.password', params.ranger_usersync_keystore_password, params.ugsync_jceks_path)
if not is_empty(params.ranger_usersync_ldap_ldapbindpassword) and params.ug_sync_source == 'org.apache.ranger.ldapusersync.process.LdapUserGroupBuilder':
ranger_credential_helper(params.ugsync_cred_lib, 'ranger.usersync.ldap.bindalias', params.ranger_usersync_ldap_ldapbindpassword, params.ugsync_jceks_path)
ranger_credential_helper(params.ugsync_cred_lib, 'usersync.ssl.truststore.password', params.ranger_usersync_truststore_password, params.ugsync_jceks_path)
File(params.ugsync_jceks_path,
owner = params.unix_user,
group = params.unix_group,
only_if = format("test -e {ugsync_jceks_path}"),
mode = 0640
)
update_dot_jceks_crc_ownership(credential_provider_path = params.ugsync_jceks_path, user = params.unix_user, group = params.unix_group)
File(params.usersync_services_file,
mode = 0755,
)
if not os.path.isfile(params.ranger_usersync_keystore_file):
cmd = format("{java_home}/bin/keytool -genkeypair -keyalg RSA -alias selfsigned -keystore '{ranger_usersync_keystore_file}' -keypass {ranger_usersync_keystore_password!p} -storepass {ranger_usersync_keystore_password!p} -validity 3600 -keysize 2048 -dname '{default_dn_name}'")
Execute(cmd, logoutput=True, user = params.unix_user)
File(params.ranger_usersync_keystore_file,
owner = params.unix_user,
group = params.unix_group,
mode = 0640
)
create_core_site_xml(ranger_ugsync_conf)
File(format("{ranger_ugsync_conf}/ranger-usersync-env.sh"),
content = InlineTemplate(params.ranger_env_content),
owner = params.unix_user,
group = params.unix_group,
mode = 0755
)
def setup_tagsync(upgrade_type=None):
import params
ranger_tagsync_home = params.ranger_tagsync_home
ranger_home = params.ranger_home
ranger_tagsync_conf = params.ranger_tagsync_conf
Directory(format("{ranger_tagsync_conf}"),
owner = params.unix_user,
group = params.unix_group,
create_parents = True
)
Directory(params.ranger_pid_dir,
mode=0755,
create_parents=True,
owner = params.unix_user,
group = params.user_group,
cd_access = "a",
)
Directory(params.tagsync_log_dir,
create_parents = True,
owner = params.unix_user,
group = params.unix_group,
cd_access = "a",
mode=0755
)
XmlConfig("ranger-tagsync-site.xml",
conf_dir=ranger_tagsync_conf,
configurations=params.config['configurations']['ranger-tagsync-site'],
configuration_attributes=params.config['configurationAttributes']['ranger-tagsync-site'],
owner=params.unix_user,
group=params.unix_group,
mode=0644)
if params.stack_supports_ranger_tagsync_ssl_xml_support:
Logger.info("Stack supports tagsync-ssl configurations, performing the same.")
setup_tagsync_ssl_configs()
else:
Logger.info("Stack doesnt support tagsync-ssl configurations, skipping the same.")
PropertiesFile(format('{ranger_tagsync_conf}/atlas-application.properties'),
properties = params.tagsync_application_properties,
mode=0755,
owner=params.unix_user,
group=params.unix_group
)
File(format('{ranger_tagsync_conf}/log4j.properties'),
owner=params.unix_user,
group=params.unix_group,
content=InlineTemplate(params.tagsync_log4j),
mode=0644
)
File(params.tagsync_services_file,
mode = 0755,
)
create_core_site_xml(ranger_tagsync_conf)
File(format("{ranger_tagsync_conf}/ranger-tagsync-env.sh"),
content = InlineTemplate(params.ranger_env_content),
owner = params.unix_user,
group = params.unix_group,
mode = 0755
)
def ranger_credential_helper(lib_path, alias_key, alias_value, file_path):
import params
java_bin = format('{java_home}/bin/java')
file_path = format('jceks://file{file_path}')
cmd = (java_bin, '-cp', lib_path, 'org.apache.ranger.credentialapi.buildks', 'create', alias_key, '-value', PasswordString(alias_value), '-provider', file_path)
Execute(cmd, environment={'JAVA_HOME': params.java_home}, logoutput=True, sudo=True)
def create_core_site_xml(conf_dir):
import params
if params.stack_supports_ranger_kerberos:
if params.has_namenode:
# if there is the viewFS mount table content, create separate xml config and include in in the core-site
# else just create core-site
if params.mount_table_content:
XmlConfig("core-site.xml",
conf_dir=conf_dir,
configurations=params.config['configurations']['core-site'],
configuration_attributes=params.config['configurationAttributes']['core-site'],
owner=params.unix_user,
group=params.unix_group,
mode=0644,
xml_include_file=os.path.join(conf_dir, params.xml_inclusion_file_name)
)
File(os.path.join(conf_dir, params.xml_inclusion_file_name),
owner=params.unix_user,
group=params.unix_group,
content=params.mount_table_content,
mode=0644
)
else:
XmlConfig("core-site.xml",
conf_dir=conf_dir,
configurations=params.config['configurations']['core-site'],
configuration_attributes=params.config['configurationAttributes']['core-site'],
owner=params.unix_user,
group=params.unix_group,
mode=0644
)
else:
Logger.warning('HDFS service not installed. Creating core-site.xml file.')
XmlConfig("core-site.xml",
conf_dir=conf_dir,
configurations=params.core_site_property,
configuration_attributes={},
owner=params.unix_user,
group=params.unix_group,
mode=0644
)
def setup_ranger_audit_solr():
import params
if params.security_enabled and params.stack_supports_ranger_kerberos:
if params.solr_jaas_file is not None:
File(format("{solr_jaas_file}"),
content=Template("ranger_solr_jaas_conf.j2"),
owner=params.unix_user
)
try:
check_znode()
if params.stack_supports_ranger_solr_configs:
Logger.info('Solr configrations supported,creating solr-configurations.')
File(format("{ranger_solr_conf}/solrconfig.xml"),
content=InlineTemplate(params.ranger_solr_config_content),
owner=params.unix_user,
group=params.unix_group,
mode=0644
)
solr_cloud_util.upload_configuration_to_zk(
zookeeper_quorum = params.zookeeper_quorum,
solr_znode = params.solr_znode,
config_set = params.ranger_solr_config_set,
config_set_dir = params.ranger_solr_conf,
tmp_dir = params.tmp_dir,
java64_home = params.java_home,
solrconfig_content = InlineTemplate(params.ranger_solr_config_content),
jaas_file=params.solr_jaas_file,
retry=30, interval=5
)
else:
Logger.info('Solr configrations not supported, skipping solr-configurations.')
solr_cloud_util.upload_configuration_to_zk(
zookeeper_quorum = params.zookeeper_quorum,
solr_znode = params.solr_znode,
config_set = params.ranger_solr_config_set,
config_set_dir = params.ranger_solr_conf,
tmp_dir = params.tmp_dir,
java64_home = params.java_home,
jaas_file=params.solr_jaas_file,
retry=30, interval=5)
if params.security_enabled and params.has_infra_solr \
and not params.is_external_solrCloud_enabled and params.stack_supports_ranger_kerberos:
solr_cloud_util.add_solr_roles(params.config,
roles = [params.infra_solr_role_ranger_admin, params.infra_solr_role_ranger_audit, params.infra_solr_role_dev],
new_service_principals = [params.ranger_admin_jaas_principal])
service_default_principals_map = [('hdfs', 'nn'), ('hbase', 'hbase'), ('hive', 'hive'), ('kafka', 'kafka'), ('kms', 'rangerkms'),
('knox', 'knox'), ('nifi', 'nifi'), ('yanr', 'yarn')]
service_principals = get_ranger_plugin_principals(service_default_principals_map)
solr_cloud_util.add_solr_roles(params.config,
roles = [params.infra_solr_role_ranger_audit, params.infra_solr_role_dev],
new_service_principals = service_principals)
solr_cloud_util.create_collection(
zookeeper_quorum = params.zookeeper_quorum,
solr_znode = params.solr_znode,
collection = params.ranger_solr_collection_name,
config_set = params.ranger_solr_config_set,
java64_home = params.java_home,
shards = params.ranger_solr_shards,
replication_factor = int(params.replication_factor),
jaas_file = params.solr_jaas_file)
if params.security_enabled and params.has_infra_solr \
and not params.is_external_solrCloud_enabled and params.stack_supports_ranger_kerberos:
secure_znode(format('{solr_znode}/configs/{ranger_solr_config_set}'), params.solr_jaas_file)
secure_znode(format('{solr_znode}/collections/{ranger_solr_collection_name}'), params.solr_jaas_file)
except ExecutionFailed as execution_exception:
Logger.error('Error when configuring Solr for Ranger, Kindly check Solr/Zookeeper services to be up and running:\n {0}'.format(execution_exception))
def setup_ranger_admin_passwd_change(username, user_password, user_default_password):
import params
cmd = format("ambari-python-wrap {ranger_home}/db_setup.py -changepassword {username} {user_default_password!p} {user_password!p}")
Execute(cmd, environment={'JAVA_HOME': params.java_home, 'RANGER_ADMIN_HOME': params.ranger_home}, user=params.unix_user, tries=3, try_sleep=5, logoutput=True)
@retry(times=10, sleep_time=5, err_class=Fail)
def check_znode():
import params
solr_cloud_util.check_znode(
zookeeper_quorum=params.zookeeper_quorum,
solr_znode=params.solr_znode,
java64_home=params.java_home)
def secure_znode(znode, jaasFile):
import params
solr_cloud_util.secure_znode(config=params.config, zookeeper_quorum=params.zookeeper_quorum,
solr_znode=znode,
jaas_file=jaasFile,
java64_home=params.java_home, sasl_users=[params.ranger_admin_jaas_principal])
def get_ranger_plugin_principals(services_defaults_tuple_list):
"""
Get ranger plugin user principals from service-default value maps using ranger-*-audit configurations
"""
import params
user_principals = []
if len(services_defaults_tuple_list) < 1:
raise Exception("Services - defaults map parameter is missing.")
for (service, default_value) in services_defaults_tuple_list:
user_principal = default(format("configurations/ranger-{service}-audit/xasecure.audit.jaas.Client.option.principal"), default_value)
user_principals.append(user_principal)
return user_principals
def setup_tagsync_ssl_configs():
import params
Directory(params.security_store_path,
cd_access="a",
create_parents=True)
Directory(params.tagsync_etc_path,
cd_access="a",
owner=params.unix_user,
group=params.unix_group,
mode=0775,
create_parents=True)
# remove plain-text password from xml configs
ranger_tagsync_policymgr_ssl_copy = {}
ranger_tagsync_policymgr_ssl_copy.update(params.config['configurations']['ranger-tagsync-policymgr-ssl'])
for prop in params.ranger_tagsync_password_properties:
if prop in ranger_tagsync_policymgr_ssl_copy:
ranger_tagsync_policymgr_ssl_copy[prop] = "_"
XmlConfig("ranger-policymgr-ssl.xml",
conf_dir=params.ranger_tagsync_conf,
configurations=ranger_tagsync_policymgr_ssl_copy,
configuration_attributes=params.config['configurationAttributes']['ranger-tagsync-policymgr-ssl'],
owner=params.unix_user,
group=params.unix_group,
mode=0644)
ranger_credential_helper(params.tagsync_cred_lib, 'sslKeyStore', params.ranger_tagsync_keystore_password, params.ranger_tagsync_credential_file)
ranger_credential_helper(params.tagsync_cred_lib, 'sslTrustStore', params.ranger_tagsync_truststore_password, params.ranger_tagsync_credential_file)
File(params.ranger_tagsync_credential_file,
owner = params.unix_user,
group = params.unix_group,
only_if = format("test -e {ranger_tagsync_credential_file}"),
mode = 0640
)
update_dot_jceks_crc_ownership(credential_provider_path = params.ranger_tagsync_credential_file, user = params.unix_user, group = params.unix_group)
# remove plain-text password from xml configs
atlas_tagsync_ssl_copy = {}
atlas_tagsync_ssl_copy.update(params.config['configurations']['atlas-tagsync-ssl'])
for prop in params.ranger_tagsync_password_properties:
if prop in atlas_tagsync_ssl_copy:
atlas_tagsync_ssl_copy[prop] = "_"
XmlConfig("atlas-tagsync-ssl.xml",
conf_dir=params.ranger_tagsync_conf,
configurations=atlas_tagsync_ssl_copy,
configuration_attributes=params.config['configurationAttributes']['atlas-tagsync-ssl'],
owner=params.unix_user,
group=params.unix_group,
mode=0644)
ranger_credential_helper(params.tagsync_cred_lib, 'sslKeyStore', params.atlas_tagsync_keystore_password, params.atlas_tagsync_credential_file)
ranger_credential_helper(params.tagsync_cred_lib, 'sslTrustStore', params.atlas_tagsync_truststore_password, params.atlas_tagsync_credential_file)
File(params.atlas_tagsync_credential_file,
owner = params.unix_user,
group = params.unix_group,
only_if = format("test -e {atlas_tagsync_credential_file}"),
mode = 0640
)
update_dot_jceks_crc_ownership(credential_provider_path = params.atlas_tagsync_credential_file, user = params.unix_user, group = params.unix_group)
Logger.info("Configuring tagsync-ssl configurations done successfully.")
def update_password_configs():
import params
password_configs = {'db_root_password': '_', 'db_password': '_'}
if params.stack_supports_ranger_audit_db:
password_configs['audit_db_password'] = '_'
ModifyPropertiesFile(format("{ranger_home}/install.properties"),
properties = password_configs,
owner = params.unix_user,
)
def validate_user_password(password_property = None):
import params
validation = []
if password_property is None:
ranger_password_properties = ['<PASSWORD>', '<PASSWORD>', 'rangerusersync_user_password', '<PASSWORD>ertagsync_user_password', 'keyadmin_user_password']
else:
ranger_password_properties = [password_property]
for index in range(len(ranger_password_properties)):
password = params.config['configurations']['ranger-env'][ranger_password_properties[index]]
if not bool(re.search(r'^(?=.*[0-9])(?=.*[a-zA-Z]).{8,}$', password)) or bool(re.search('[\\\`"\']', password)):
validation.append(ranger_password_properties[index])
if len(validation) > 0:
raise Fail("Password validation failed for : " + ", ".join(validation) + ". Password should be minimum 8 characters with minimum one alphabet and one numeric. Unsupported special characters are \" ' \ `")
def update_dot_jceks_crc_ownership(credential_provider_path, user, group):
dot_jceks_crc_file_path = os.path.join(os.path.dirname(credential_provider_path), "." + os.path.basename(credential_provider_path) + ".crc")
File(dot_jceks_crc_file_path,
owner = user,
group = group,
only_if = format("test -e {dot_jceks_crc_file_path}"),
mode = 0640
)
| StarcoderdataPython |
329388 | <reponame>rohankumardubey/datasette-hashed-urls
from datasette.app import Datasette
import pytest
import sqlite_utils
@pytest.fixture
def db_files(tmpdir):
mutable = str(tmpdir / "this-is-mutable.db")
immutable = str(tmpdir / "this-is-immutable.db")
rows = [{"id": 1}, {"id": 2}]
sqlite_utils.Database(mutable)["t"].insert_all(rows, pk="id")
sqlite_utils.Database(immutable)["t"].insert_all(rows, pk="id")
return mutable, immutable
@pytest.fixture
def ds(db_files):
return Datasette(files=[db_files[0]], immutables=[db_files[1]])
@pytest.mark.asyncio
async def test_immutable_database_renamed_on_startup(ds):
await ds.invoke_startup()
databases = (await ds.client.get("/-/databases.json")).json()
names = [db["name"] for db in databases]
assert len(names) == 2
assert "this-is-mutable" in names
other_name = [name for name in names if name != "this-is-mutable"][0]
assert other_name.startswith("this-is-immutable-")
@pytest.mark.asyncio
@pytest.mark.parametrize(
"path,should_redirect",
(
("/", False),
("/this-is-mutable", False),
("/this-is-mutable/t", False),
("/this-is-mutable/t/1", False),
("/this-is-immutable", True),
("/this-is-immutable/t", True),
("/this-is-immutable/t?id=1", True),
("/this-is-immutable/t/1", True),
),
)
async def test_paths_with_no_hash_redirect(ds, path, should_redirect):
await ds.invoke_startup()
immutable_hash = ds._hashed_url_databases["this-is-immutable"]
response = await ds.client.get(path)
assert (
"cache-control" not in response.headers
or response.headers["cache-control"] == "max-age=5"
)
if should_redirect:
assert response.status_code == 302
expected_path = path.replace(
"/this-is-immutable", "/this-is-immutable-{}".format(immutable_hash)
)
assert response.headers["location"] == expected_path
else:
assert response.status_code == 200
@pytest.mark.asyncio
@pytest.mark.parametrize("path_suffix", ("", "/t", "/t?id=1", "/t/1"))
@pytest.mark.parametrize("max_age", (None, 3600))
async def test_paths_with_hash_have_cache_header(db_files, path_suffix, max_age):
metadata = {}
if max_age:
metadata["plugins"] = {"datasette-hashed-urls": {"max_age": max_age}}
ds = Datasette(files=[db_files[0]], immutables=[db_files[1]], metadata=metadata)
await ds.invoke_startup()
immutable_hash = ds._hashed_url_databases["this-is-immutable"]
path = "/this-is-immutable-{}{}".format(immutable_hash, path_suffix)
response = await ds.client.get(path)
assert response.status_code == 200
cache_control = response.headers["cache-control"]
expected = "max-age={}, public".format(max_age or 31536000)
assert cache_control == expected
| StarcoderdataPython |
9658835 | <gh_stars>1-10
import rest
import vcf
import json
from operator import itemgetter
import pprint
import requests
# Note that this is the current as of v77 with 2 included for backwards compatibility (VEP <= 75)
csq_order = ["transcript_ablation",
"splice_donor_variant",
"splice_acceptor_variant",
"stop_gained",
"frameshift_variant",
"stop_lost",
"initiator_codon_variant",
"transcript_amplification",
"inframe_insertion",
"inframe_deletion",
"missense_variant",
"splice_region_variant",
"incomplete_terminal_codon_variant",
"stop_retained_variant",
"synonymous_variant",
"coding_sequence_variant",
"mature_miRNA_variant",
"5_prime_UTR_variant",
"3_prime_UTR_variant",
"non_coding_transcript_exon_variant",
"non_coding_exon_variant", # deprecated
"intron_variant",
"NMD_transcript_variant",
"non_coding_transcript_variant",
"nc_transcript_variant", # deprecated
"upstream_gene_variant",
"downstream_gene_variant",
"TFBS_ablation",
"TFBS_amplification",
"TF_binding_site_variant",
"regulatory_region_ablation",
"regulatory_region_amplification",
"regulatory_region_variant",
"feature_elongation",
"feature_truncation",
"intergenic_variant",
"start_lost",
'protein_altering_variant',
""]
csq_order_dict = dict(zip(csq_order, range(len(csq_order))))
rev_csq_order_dict = dict(zip(range(len(csq_order)), csq_order))
def compare_two_consequences(csq1, csq2):
if csq_order_dict[worst_csq_from_csq(csq1)] < csq_order_dict[worst_csq_from_csq(csq2)]:
return -1
elif csq_order_dict[worst_csq_from_csq(csq1)] == csq_order_dict[worst_csq_from_csq(csq2)]:
return 0
return 1
def get_protein_hgvs(csq):
"""
Takes consequence dictionary, returns proper variant formatting for synonymous variants
"""
if '%3D' in csq['HGVSp']:
try:
amino_acids = ''.join([protein_letters_1to3[x] for x in csq['Amino_acids']])
return "p." + amino_acids + csq['Protein_position'] + amino_acids
except Exception, e:
print 'Could not create HGVS for: %s' % csq
return csq['HGVSp'].split(':')[-1]
def worst_csq_index(csq_list):
"""
Input list of consequences (e.g. ['frameshift_variant', 'missense_variant'])
Return index of the worst annotation (In this case, index of 'frameshift_variant', so 4)
Works well with csqs = 'non_coding_exon_variant&nc_transcript_variant' by worst_csq_index(csqs.split('&'))
:param annnotation:
:return most_severe_consequence_index:
"""
return min([csq_order_dict[ann] for ann in csq_list])
def worst_csq_from_list(csq_list):
"""
Input list of consequences (e.g. ['frameshift_variant', 'missense_variant'])
Return the worst annotation (In this case, 'frameshift_variant')
Works well with csqs = 'non_coding_exon_variant&nc_transcript_variant' by worst_csq_from_list(csqs.split('&'))
:param annnotation:
:return most_severe_consequence:
"""
return rev_csq_order_dict[worst_csq_index(csq_list)]
def worst_csq_from_csq(csq):
"""
Input possibly &-filled csq string (e.g. 'non_coding_exon_variant&nc_transcript_variant')
Return the worst annotation (In this case, 'non_coding_exon_variant')
:param consequence:
:return most_severe_consequence:
"""
return rev_csq_order_dict[worst_csq_index(csq.split('&'))]
def order_vep_by_csq(annotation_list):
print('ANNOTATION LIST',annotation_list)
output = sorted(annotation_list, cmp=lambda x, y: compare_two_consequences(x, y), key=itemgetter('consequence_terms'))
for ann in output:
ann['major_consequence'] = worst_csq_from_csq(ann['consequence_terms'])
return output
def compare_two_consequences(csq1, csq2):
if csq_order_dict[worst_csq_from_csq(csq1)] < csq_order_dict[worst_csq_from_csq(csq2)]:
return -1
elif csq_order_dict[worst_csq_from_csq(csq1)] == csq_order_dict[worst_csq_from_csq(csq2)]:
return 0
return 1
def get_variants_by_rsid(db, rsid):
if not rsid.startswith('rs'):
return None
try:
int(rsid.lstrip('rs'))
except Exception, e:
return None
variants = list([Variant(data=v) for v in db.variants.find({'rsid': rsid}, fields={'_id': False})])
#add_consequence_to_variants(variants)
return variants
class Variant(object):
def __init__(self, variant_id=None, db=None,data=None):
if variant_id is None: variant_id=data['variant_id']
self.variant_id=str(variant_id).strip().replace('_','-')
self.chrom, self.pos, self.ref, self.alt = variant_id.split('-')
q=vcf.vcf_query(variant_str=self.variant_id,)
if q is None: raise Exception('NOT IN VCF',self.variant_id)
self.__dict__.update(q)
if data: self.__dict__.update(data)
if db:
Variant.db=db
data=Variant.db.variants.find_one({'variant_id':self.variant_id},fields={'_id':False})
if not data:
print('NOT IN DB', self.variant_id, 'WILL INSERT')
self.save()
#self.xpos = get_xpos(self.chrom, self.pos)
else:
self.__dict__.update(data)
def __getattribute__(self, key):
"Emulate type_getattro() in Objects/typeobject.c"
v = object.__getattribute__(self, key)
if hasattr(v, '__get__'): return v.__get__(None, self)
return v
def save(self):
print('writing', self.variant_id, 'to database')
return Variant.db.variants.update({'variant_id':self.variant_id},self.__dict__,upsert=True)
@property
def status(self):
return 'M'
@property
def HPO(self):
return []
@property
def FILTER(self):
return self.filter
@property
def filter(self):
self.__dict__['filter']=self.__dict_filter['FILTER']
return self.__dict__['filter']
@property
def hom_samples(self):
if 'hom_samples' in self.__dict__: return self.__dict__['hom_samples']
q=vcf.vcf_query(variant_str=self.variant_id)
self.__dict__.update(q)
print(self.save())
return self.__dict__['hom_samples']
@property
def het_samples(self):
if 'het_samples' in self.__dict__: return self.__dict__['het_samples']
q=vcf.vcf_query(variant_str=self.variant_id)
self.__dict__.update(q)
print(self.save())
return self.__dict__['het_samples']
def to_JSON(self):
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
def get_minimal_representation(self):
"""
Get the minimal representation of a variant, based on the ref + alt alleles in a VCF
This is used to make sure that multiallelic variants in different datasets,
with different combinations of alternate alleles, can always be matched directly.
Note that chromosome is ignored here - in xbrowse, we'll probably be dealing with 1D coordinates
Args:
pos (int): genomic position in a chromosome (1-based)
ref (str): ref allele string
alt (str): alt allele string
Returns:
tuple: (pos, ref, alt) of remapped coordinate
"""
pos = int(self.pos)
# If it's a simple SNV, don't remap anything
if len(self.ref) == 1 and len(self.alt) == 1: return self.pos, self.ref, self.alt
# strip off identical suffixes
while(self.alt[-1] == self.ref[-1] and min(len(self.alt),len(self.ref)) > 1):
alt = alt[:-1]
ref = ref[:-1]
# strip off identical prefixes and increment position
while(self.alt[0] == self.ref[0] and min(len(self.alt),len(self.ref)) > 1):
alt = self.alt[1:]
self.ref = self.ref[1:]
self.pos += 1
return self.pos, self.ref, self.alt
def add_consequence_to_variant(self):
worst_csq = worst_csq_with_vep(variant['vep_annotations'])
if worst_csq is None: return
variant['major_consequence'] = worst_csq['major_consequence']
variant['HGVSp'] = get_protein_hgvs(worst_csq)
variant['HGVSc'] = get_transcript_hgvs(worst_csq)
variant['HGVS'] = get_proper_hgvs(worst_csq)
variant['CANONICAL'] = worst_csq['CANONICAL']
variant['flags'] = get_flags_from_variant(variant)
if csq_order_dict[variant['major_consequence']] <= csq_order_dict["frameshift_variant"]:
variant['category'] = 'lof_variant'
elif csq_order_dict[variant['major_consequence']] <= csq_order_dict["missense_variant"]:
# Should be noted that this grabs inframe deletion, etc.
variant['category'] = 'missense_variant'
elif csq_order_dict[variant['major_consequence']] <= csq_order_dict["synonymous_variant"]:
variant['category'] = 'synonymous_variant'
else:
variant['category'] = 'other_variant'
@property
def data(self): return self.__dict__
@property
def consequence(self):
"""
Return the most severe consequence
"""
if 'consequence' in self.__dict__: return self.__dict__['consequence']
if 'major_consequence' in self.__dict__: return self.__dict__['major_consequence']
if 'most_severe_consequence' in self.__dict__: return self.__dict__['most_severe_consequence']
url='http://grch37.rest.ensembl.org/vep/human/hgvs/%s?content-type=application/json' % self.hgvs.replace('chr','')
r=requests.get(url)
print(url)
d=r.json()
#if not isinstance(d,list) and len(d) < 1: return None
if 'error' in d: return None
d=d[0]
print(d['most_severe_consequence'])
self.__dict__['consequence']=d['most_severe_consequence']
print(self.save())
return self.__dict__['consequence']
@property
def transcripts(self):
if 'transcripts' in self.__dict__: return self.__dict__['transcripts']
url='http://grch37.rest.ensembl.org/vep/human/hgvs/%s?content-type=application/json' % self.hgvs.replace('chr','')
r=requests.get(url)
print(url)
d=r.json()
if 'error' in d: return None
d=d[0]
self.__dict__['transcripts']=list(set([csq['transcript_id'] for csq in d.transcript_consequences]))
self.__dict__['genes']=list(set([csq['gene_id'] for csq in d.transcript_consequences]))
print(self.save())
if not isinstance(d,list) and len(d) < 1: return None
return self.__dict__['transcripts']
@property
def genes(self):
if 'genes' in self.__dict__: return list(set(self.__dict__['genes']))
url='http://grch37.rest.ensembl.org/vep/human/hgvs/%s?content-type=application/json' % self.hgvs.replace('chr','')
r=requests.get(url)
print(url)
d=r.json()[0]
self.__dict__['genes']=list(set([csq['gene_id'] for csq in d['transcript_consequences']]))
print(self.save())
return self.__dict__['genes']
@property
def p_hgvs(self):
"""
Takes consequence dictionary, returns proper variant formatting for synonymous variants
"""
if '%3D' in csq['HGVSp']:
try:
amino_acids = ''.join([protein_letters_1to3[x] for x in csq['Amino_acids']])
return "p." + amino_acids + csq['Protein_position'] + amino_acids
except Exception, e:
print 'Could not create HGVS for: %s' % csq
return csq['HGVSp'].split(':')[-1]
@property
def snpeff(self):
if 'snpeff' in self.__dict__: return self.__dict__['snpeff']
self.__dict__['snpeff'] = rest.mv.getvariant('chr%s:g.%s%s>%s'%(self.chrom,self.pos,self.ref,self.alt,),fields='snpeff')
return self.__dict__['snpeff']
@property
def cadd(self):
if 'cadd' in self.__dict__: return self.__dict__['cadd'].get('phred',None)
cadd = rest.mv.getvariant('chr%s:g.%s%s>%s'%(self.chrom,self.pos,self.ref,self.alt,),fields='cadd')
if cadd and 'cadd' in cadd:
self.__dict__['cadd']=cadd['cadd']
else:
self.__dict__['cadd']={}
print(self.save())
return self.__dict__['cadd'].get('phred',None)
@property
def vep_annotations(self):
if 'vep_annotations' in self.__dict__: return self.__dict__['vep_annotations']
self.__dict__['vep_annotations']=rest.vep_anno(self.chrom, self.pos, self.ref, self.alt)
print('number of transcripts:', len(self.__dict__['vep_annotations']))
self.__dict__['transcript_consequences']=self.__dict__['vep_annotations'][0]['transcript_consequences']
self.__dict__['gene_name_upper']=self.__dict__['transcript_consequences'][0]['gene_symbol']
print('gene_symbol', self.__dict__['gene_name_upper'])
#print(self.__dict__['vep_annotations'])
#self.__dict__['vep_annotations'] = order_vep_by_csq(self.__dict__['vep_annotations'])
#self.ordered_csqs = [x['major_consequence'] for x in self.__dict__['vep_annotations']]
# Close but not quite there
#ordered_csqs = reduce(lambda x, y: ','.join([x, y]) if y not in x else x, ordered_csqs, '').split(',')
#consequences = defaultdict(lambda: defaultdict(list))
#for annotation in self.data['vep_annotations']:
#annotation['HGVS'] = get_proper_hgvs(annotation)
#consequences[annotation['major_consequence']][annotation['Gene']].append(annotation)
return self.__dict__['vep_annotations']
@property
def transcript_consequences(self):
if 'transcript_consequences' in self.__dict__: return self.__dict__['transcript_consequences']
#print(self.vep_annotations)
return self.__dict__['transcript_consequences']
@vep_annotations.setter
def vep_annotations(self,value):
self.__dict__['vep_annotations']=value
@property
def in_exac(self):
if 'EXAC' in self.__dict__ and len(self.__dict__['EXAC'])>0:
self.__dict__['in_exac']=True
else:
self.__dict__['in_exac']=False
return self.__dict__['in_exac']
@property
def EXAC(self):
if 'EXAC' in self.__dict__:
self.__dict__['EXAC']['total_homs']=self.__dict__['EXAC']['AC_Hom']/2
return
if 'EXAC_freq' in self.__dict__: return self.__dict__['EXAC_freq']
print('EXAC')
self.__dict__['EXAC_freq']=rest.exac_anno(self.data['variant_id'],update=False)
if len(self.__dict__['EXAC_freq'])>0:
self.__dict__['in_exac']=True
else:
self.__dict__['in_exac']=False
#print(self.save())
return self.__dict__['EXAC_freq']
@EXAC.setter
def EXAC(self,value):
self.__dict__['ExAC_freq']=value
return self.__dict__['EXAC_freq']
@property
def ExAC_freq(self):
if 'ExAC_freq' in self.__dict__ and 'total_homs' in self.__dict__['ExAC_freq']: return self.__dict__['ExAC_freq']
self.__dict__['ExAC_freq']=rest.exac_anno(self.variant_id,update=False)
print(self.__dict__['ExAC_freq'].keys())
#print(self.save())
return self.__dict__['ExAC_freq']
@property
def WT_COUNT(self):
if 'WT_COUNT' in self.__dict__: return self.__dict__['WT_COUNT']
q=vcf.vcf_query(variant_str=self.variant_id)
if q is None: raise Exception('ERROR',self.variant_id)
self.__dict__.update(q)
print(self.save())
return self.__dict__['WT_COUNT']
@property
def HOM_COUNT(self):
if 'HOM_COUNT' in self.__dict__: return self.__dict__['HOM_COUNT']
q=vcf.vcf_query(variant_str=self.variant_id)
if q is None: raise Exception('ERROR',self.variant_id)
self.__dict__.update(q)
print(self.save())
return self.__dict__['HOM_COUNT']
@property
def allele_num(self):
if 'allele_num' in self.__dict__: return self.__dict__['allele_num']
q=vcf.vcf_query(variant_str=self.variant_id)
if q is None: raise Exception('ERROR',self.variant_id)
self.__dict__.update(q)
print(self.save())
return self.__dict__['allele_num']
def get_flags_from_variant(self):
flags = []
if 'mnps' in variant:
flags.append('MNP')
#lof_annotations = [x for x in variant['vep_annotations'] if x['LoF'] != '']
lof_annotations = []
if not len(lof_annotations): return flags
if all([x['LoF'] == 'LC' for x in lof_annotations]):
flags.append('LC LoF')
if all([x['LoF_flags'] != '' for x in lof_annotations]):
flags.append('LoF flag')
return flags
@property
def HUGO(self):
if 'gene_name_upper' in self.__dict__: return self.__dict__['gene_name_upper']
if 'canonical_gene_name_upper' in self.__dict__: return self.__dict__['canonical_gene_name_upper'][0]
self.vep_annotations
#print(self.save())
return self.__dict__['gene_name_upper']
@property
def description(self):
if 'description' in self.__dict__: return self.__dict__['description']
g=Variant.db.genes.find_one({'gene_name_upper':self.HUGO})
self.__dict__['description']=g.get('full_gene_name','')
return self.__dict__['description']
@property
def OMIM(self):
if 'OMIM' in self.__dict__: return self.__dict__['OMIM']
#self.__dict__['OMIM']=self.vep_annotations[0]['SYMBOL']
#print(self.save())
#return self.__dict__['OMIM']
return ''
@property
def p_change(self):
if 'p_change' in self.__dict__: return self.__dict__['p_change']
if 'HGVSp' in self.__dict__: return self.__dict__['HGVSp']
#if 'canonical_hgvsp' in self__dict__: return self.__dict__['canonical_hgvsp']
self.__dict__['p_change']=dict()
#self.__dict__['p_change']=
#trans['hgvsp'].split(':')[1]
self.__dict__['p_change']['exon']=''
self.__dict__['p_change']['gene_id']=self.genes[0]
self.__dict__['p_change']['transcript_id']=self.canonical_transcript[0]
self.__dict__['p_change']['hgvs_c']=self.canonical_hgvsc[0]
self.__dict__['p_change']['hgvs_p']=self.canonical_hgvsp[0]
return self.__dict__['p_change']
# get db
def stuff():
if 'consequence' in self.__dict__ and len(self.__dict__['consequence']): return self.__dict__['consequence']
pp = pprint.PrettyPrinter(indent=10)
v['Consequence']=[transcript['consequence_terms'][0] for transcript in v['vep_annotations']['transcript_consequences']]
v['vep_annotations']['Consequence']=[csq for csq in v['Consequence']]
print ('CSQ')
print( v['vep_annotations']['Consequence'] )
worst_csq = worst_csq_with_vep(variant['vep_annotations'])
if worst_csq is None: return
variant['major_consequence'] = worst_csq['major_consequence']
variant['HGVSp'] = get_protein_hgvs(worst_csq)
variant['HGVSc'] = get_transcript_hgvs(worst_csq)
variant['HGVS'] = get_proper_hgvs(worst_csq)
variant['CANONICAL'] = worst_csq['CANONICAL']
variant['flags'] = get_flags_from_variant(variant)
if csq_order_dict[variant['major_consequence']] <= csq_order_dict["frameshift_variant"]:
variant['category'] = 'lof_variant'
elif csq_order_dict[variant['major_consequence']] <= csq_order_dict["missense_variant"]:
# Should be noted that this grabs inframe deletion, etc.
variant['category'] = 'missense_variant'
elif csq_order_dict[variant['major_consequence']] <= csq_order_dict["synonymous_variant"]:
variant['category'] = 'synonymous_variant'
else:
variant['category'] = 'other_variant'
def worst_csq_with_vep(self, annotation_list):
"""
Takes list of VEP annotations [{'Consequence': 'frameshift', Feature: 'ENST'}, ...]
Returns most severe annotation (as full VEP annotation [{'Consequence': 'frameshift', Feature: 'ENST'}])
Also tacks on worst consequence for that annotation (i.e. worst_csq_from_csq)
:param annotation_list:
:return worst_annotation:
"""
if len(annotation_list) == 0: return None
worst = annotation_list[0]
for annotation in annotation_list:
if compare_two_consequences(annotation['Consequence'], worst['Consequence']) < 0:
worst = annotation
elif compare_two_consequences(annotation['Consequence'], worst['Consequence']) == 0 and annotation['CANONICAL'] == 'YES':
worst = annotation
worst['major_consequence'] = worst_csq_from_csq(worst['Consequence'])
return worst
def test():
client = pymongo.MongoClient()
hpo_db = client['hpo']
db = client['uclex-old']
patient_db = client['patients']
patient_id=os.path.basename(filename.replace('.csv',''))
parent_dir=os.path.basename(os.path.abspath(os.path.join(filename, os.pardir)))
# Add patient to phenotips if it does not already exist
pheno=PhenotipsClient()
patient={u'features':[], 'clinicalStatus': {u'clinicalStatus': u'affected'}, u'ethnicity': {u'maternal_ethnicity': [], u'paternal_ethnicity': []}, u'family_history': {}, u'disorders': [], u'life_status': u'alive', u'reporter': u'', u'genes': [], u'prenatal_perinatal_phenotype': {u'prenatal_phenotype': [], u'negative_prenatal_phenotype': []}, u'prenatal_perinatal_history': {u'twinNumber': u''}, u'sex': u'U', u'solved': {u'status': u'unsolved'}}
eid=patient_id
p=pheno.get_patient(auth=auth,eid=eid)
print p
if p is None:
print 'MISSING', eid
patient['features']=[ {'id':h,'type':'phenotype','observed':'yes'} for h in hpo.strip().split(',')]
patient['external_id']=eid
print 'CREATING', eid
print pheno.create_patient(auth,patient)
if not patient_db.patients.find_one({'external_id':eid}):
# update database
p=pheno.get_patient(eid=eid,auth=auth)
print 'UPDATE'
print patient_db.patients.update({'external_id':eid},{'$set':p},w=0,upsert=True)
patient_hpo_terms=lookups.get_patient_hpo(hpo_db, patient_db, patient_id, ancestors=False)
patient_hpo_terms = dict([(hpo['id'][0],{'id':hpo['id'][0],'name':hpo['name'][0], 'is_a':hpo.get('is_a',[])}) for hpo in patient_hpo_terms])
patient_hpo_ids=patient_hpo_terms.keys()
# get hpo terms from patient
print 'processing rare variants of %s' % patient_id
print 'patient hpo terms', patient_hpo_terms
variants_reader=csv.DictReader(open(filename))
#for var in ['homozygous_variants', 'compound_hets', 'rare_variants']:
VARIANTS=[]
for var in variants_reader:
# look up variant on myvariant
chrom, pos, ref, alt, = var['signature'].split('_')
#for k in var.keys(): print k, ':', var[k]
#break
variant=lookups.vcf_query(chrom, pos, ref, alt, individual=patient_id, limit=100)
if variant is None:
sys.stderr.write( '\033[01;31m' + var['signature'] + ' not found!' + '\033[m' + '\n' )
with open("notfound.txt", "a") as myfile: myfile.write(var['signature'])
continue
print var['signature'], '==>', variant['POS'], variant['REF'], variant['ALT']
#variant['site_quality'] = variant['QUAL']
#variant['filter'] = variant['FILTER']
#pprint(variant)
#variant['vep']=vep_anno(str(chrom), str(pos), ref, alt,)
#variant['my_variant']=mv.getvariant(variant['hgvs'],fields='all')
#variant['rvs']=rvs_anno(chrom,pos,ref,alt)
#print(variant['exac'])
for k in var: variant[k]=var[k]
#print vep_anno(chrom, pos, ref, alt)
VAR=dict()
if patient_id in variant['hom_samples']: VAR['variant_type']='rare_homozygous'
elif patient_id in variant['het_samples']: VAR['variant_type']='rare_het'
else:
print variant['het_samples']
print variant['hom_samples']
print patient_id, 'not in hom or het samples'
VAR['variant_type']='rare_het'
#raise 'hell'
VAR['variant_id']=variant['variant_id']
VAR['allele_freq']=[ variant['allele_freq'], str(variant['allele_count'])+'/'+str(variant['allele_num']), variant['MISS_COUNT']]
print(VAR['allele_freq'])
#rvs=[impact for impact in variant['rvs']['impact'] if impact['alt']==alt]
#if len(rvs)==1:
VAR['HUGO']=re.sub('\(.*\)','',variant['HUGO'])
VAR['HUGO']=re.sub(',.*','',VAR['HUGO'])
VAR['ExAC_freq']=variant['exac']
VAR['Gene']=re.sub('\(.*\)','',variant['Gene'])
if VAR['HUGO']=='NA':
gene_id=VAR['Gene'].split(',')[0]
g=db.genes.find_one({'gene_id':gene_id})
if not g and 'vep_annotations' in variant['exac']:
VAR['HUGO']=variant['exac']['vep_annotations'][0]['SYMBOL']
else:
#g=mg.query(gene_id, scopes='symbol', fields='ensembl.gene', species='human')
g=rest.ensembl_xrefs(gene_id)
if 'error' in g:
# unnamed gene
VAR['HUGO']=''
else:
print gene_id, g
VAR['HUGO']=find_item(g,'display_id')
# get annotation from CSV file
if variant['splicing']=='FALSE':
if not variant['AAChange']: variant['AAChange']=re.compile('.*\((.*)\)').search(variant['Gene']).group(1)
VAR['p_change']=dict(zip(['gene_id','transcript_id','exon','hgvs_c','hgvs_p'],variant['AAChange'].split(':')))
if 'hgvs_p' in VAR['p_change']: VAR['p_change']['hgvs_p']=re.sub(',.*','',VAR['p_change']['hgvs_p'])
else:
VAR['p_change']={}
VAR['consequence']=variant['ExonicFunc']
VAR['filter']=variant['FILTER']
VAR['OMIM']=variant.get('Omim','').split(';')[0]
VAR['lof']=bool(variant['lof'])
VAR['description']=variant['Description']
if VAR['lof']:
print 'lof'
print VAR['HUGO']
g=db.genes.find_one({'gene_name_upper':VAR['HUGO'].upper()})
if g:
gene_id=g['gene_id']
print gene_id
else:
mg=mygene.MyGeneInfo()
g=mg.query(VAR['HUGO'], scopes='symbol', fields='ensembl.gene', species='human')
if g and 'hits' in g and 'ensembl' in g['hits'][0]:
print g
# {u'hits': [{u'_id': u'643669', u'ensembl': [{u'gene': u'ENSG00000262484'}, {u'gene': u'ENSG00000283099'}]}], u'total': 1, u'max_score': 443.8707, u'took': 2}
gene_id=find_item(g,'gene')
#gene_id=[x for _, x, in g['hits'][0]['ensembl'][0].iteritems()]
print gene_id
#raise 'hell'
else:
e=rest.ensembl_region('{}:{}-{}'.format(chrom,pos,pos))
gene_id=e[0]['gene_id']
print gene_id
lof=db.lof.find_one({'gene_id':gene_id})
if lof:
lof['patient_ids'][patient_id]=list(set(lof['patient_ids'].get(patient_id,[])+[VAR['variant_id']]))
print db.lof.update({'gene_id':gene_id}, {'$set':{'patient_ids':lof['patient_ids']}})
else:
print db.lof.insert({'gene_id':gene_id,'patient_ids':{patient_id:[VAR['variant_id']]}})
#hpo_terms=hpo_db.gene_hpo.find_one({'gene_name':VAR['HUGO']},{'hpo_terms':1,'_id':1})
#gene_hpo_ids=hpo_db.gene_hpo.find_one({'gene_name':'ABCA4'},{'hpo_terms':1,'_id':0}).get('hpo_terms',[])
#VAR['HUGO']='ABCA4'
gene_hpo_terms=lookups.get_gene_hpo(hpo_db,VAR['HUGO'],False)
gene_hpo_terms = dict([(hpo['id'][0],{'id':hpo['id'][0],'name':hpo['name'][0], 'is_a':hpo.get('is_a',[])}) for hpo in gene_hpo_terms])
gene_hpo_ids=gene_hpo_terms.keys()
#lookups.get_gene_hpo(hpo_db,gene_name,dot=False)
#print 'gene', gene_hpo_ids
#print 'patient', patient_hpo_ids
common_hpo_ids=list(set(gene_hpo_ids) & set(patient_hpo_ids))
# simplify hpo terms
common_hpo_ids=lookups.hpo_minimum_set(hpo_db, common_hpo_ids)
common_hpo_ids=[{'hpo_id':k,'hpo_term':patient_hpo_terms[k]['name']} for k in common_hpo_ids]
print VAR['HUGO'],common_hpo_ids
VAR['HPO']=common_hpo_ids
VARIANTS.append(VAR)
# determine count per gene
gene_counter=Counter([var['HUGO'] for var in VARIANTS])
for var in VARIANTS: var['gene_count']=gene_counter[var['HUGO']]
print('gene_counter', gene_counter)
print('rare_variants',len(VARIANTS))
print(db.patients.update({'external_id':patient_id}, {'$set':{'rare_variants':VARIANTS}}, upsert=True))
print(db.patients.update({'external_id':patient_id}, {'$set':{'rare_variants_count':len(VARIANTS)}}, upsert=True))
COMPOUND_HETS=[var for var in VARIANTS if var['gene_count']>1]
print('compound_hets',len(COMPOUND_HETS))
print(db.patients.update({'external_id':patient_id}, {'$set':{'compound_hets':COMPOUND_HETS}}, upsert=True))
print(db.patients.update({'external_id':patient_id}, {'$set':{'compound_hets_count':len(COMPOUND_HETS)}}, upsert=True))
HOMOZYGOUS_VARIANTS=[var for var in VARIANTS if var['variant_type']=='rare_homozygous']
print('rare_homozygous',len(HOMOZYGOUS_VARIANTS))
print(db.patients.update({'external_id':patient_id}, {'$set':{'homozygous_variants':HOMOZYGOUS_VARIANTS}}, upsert=True))
print(db.patients.update({'external_id':patient_id}, {'$set':{'homozygous_variants_count':len(HOMOZYGOUS_VARIANTS)}}, upsert=True))
| StarcoderdataPython |
188298 | <filename>snaps/models.py
from django.db import models
import datetime as dt
# Create your models here.
class Location(models.Model):
location = models.CharField(max_length=50)
def __str__(self):
return self.location
class Meta:
ordering = ['location']
class Category(models.Model):
category = models.CharField(max_length=50)
def __str__(self):
return self.category
class Image(models.Model):
image = models.ImageField(upload_to= 'media/')
image_name = models.CharField(max_length=50)
image_description = models.TextField()
location = models.ForeignKey(Location)
category = models.ForeignKey(Category)
@classmethod
def all_snaps(cls):
today = dt.date.today()
snaps = cls.objects.filter(pub_date_date = today)
return snaps
def save_image(self):
self.save()
def delete_image(self):
self.delete()
def update_image(self):
self.update()
@classmethod
def search_by_image(cls,search_term):
snaps = cls.objects.filter(category__category__icontains=search_term)
return snaps
def filter_by_location(cls,filter_term):
snaps = cls.objects.filter(location__location__icontains=filter_term)
return snaps
| StarcoderdataPython |
3356992 | """ This is an adaptation of <NAME> implementation of the poisson learning algorithm licensed
under the MIT licence. For the original source code see
`https://github.com/jwcalder/GraphLearning/blob/master/graphlearning/ssl.py`.
"""
import sys
import logging
import numpy as np
from scipy import sparse
from scipy.sparse.linalg import cg as spcg
import graphlearning as gl
from . import numerics
logger = logging.getLogger("pl.poisson")
class Poisson(gl.ssl.ssl):
def __init__(
self,
W=None,
rhs=None,
scale=None,
eps_scale=None,
class_priors=None,
solver="conjugate_gradient",
p=1,
normalization="combinatorial",
use_cuda=False,
min_iter=50,
max_iter=1000,
tol=1e-3,
spectral_cutoff=10,
homotopy_steps=None,
homotopy_start=None,
):
"""Poisson Learning
===================
Semi-supervised learning via the solution of the Poisson equation
\\[L^p u = \\sum_{j=1}^m \\delta_j(y_j - \\overline{y})^T,\\]
where \\(L=D-W\\) is the combinatorial graph Laplacian,
\\(y_j\\) are the label vectors, \\(\\overline{y} = \\frac{1}{m}\\sum_{i=1}^m y_j\\)
is the average label vector, \\(m\\) is the number of training points, and
\\(\\delta_j\\) are standard basis vectors. See the reference for more details.
Implements 3 different solvers, spectral, gradient_descent, and conjugate_gradient.
GPU acceleration is available for gradient descent. See [1] for details.
Parameters
----------
W : numpy array, scipy sparse matrix, or graphlearning graph object (optional), default=None
Weight matrix representing the graph.
rhs : numpy array (optional), default=None
(fill with details)
scale: float (optional), default=None
(fill with details)
class_priors : numpy array (optional), default=None
Class priors (fraction of data belonging to each class). If provided, the predict function
will attempt to automatic balance the label predictions to predict the correct number of
nodes in each class.
solver : {'spectral', 'conjugate_gradient', 'gradient_descent', 'variational'} (optional), default='conjugate_gradient'
Choice of solver for Poisson learning.
p : int (optional), default=1
Power for Laplacian, can be any positive real number. Solver will default to 'variational' if p!=1.
noralization : str (optional), default="combinatorial"
Normalization of he graph laplacian
use_cuda : bool (optional), default=False
Whether to use GPU acceleration for gradient descent solver.
min_iter : int (optional), default=50
Minimum number of iterations of gradient descent before checking stopping condition.
max_iter : int (optional), default=1000
Maximum number of iterations of gradient descent.
tol : float (optional), default=1e-3
Tolerance for conjugate gradient solver.
spectral_cutoff : int (optional), default=10
Number of eigenvectors to use for spectral solver.
Examples
--------
Poisson learning works on directed (i.e., nonsymmetric) graphs with the gradient descent solver: [poisson_directed.py](https://github.com/jwcalder/GraphLearning/blob/master/examples/poisson_directed.py).
```py
import numpy as np
import graphlearning as gl
import matplotlib.pyplot as plt
import sklearn.datasets as datasets
X,labels = datasets.make_moons(n_samples=500,noise=0.1)
W = gl.weightmatrix.knn(X,10,symmetrize=False)
train_ind = gl.trainsets.generate(labels, rate=5)
train_labels = labels[train_ind]
model = gl.ssl.poisson(W, solver='gradient_descent')
pred_labels = model.fit_predict(train_ind, train_labels)
accuracy = gl.ssl.ssl_accuracy(pred_labels, labels, len(train_ind))
print("Accuracy: %.2f%%"%accuracy)
plt.scatter(X[:,0],X[:,1], c=pred_labels)
plt.scatter(X[train_ind,0],X[train_ind,1], c='r')
plt.show()
```
Reference
---------
[1] <NAME>, <NAME>, <NAME>, <NAME>. [Poisson Learning: Graph Based Semi-Supervised
Learning at Very Low Label Rates.](http://proceedings.mlr.press/v119/calder20a.html),
Proceedings of the 37th International Conference on Machine Learning, PMLR 119:1306-1316, 2020.
"""
super().__init__(W, class_priors)
self.rhs = rhs
self.scale = scale
self.eps_scale = eps_scale
if solver not in [
"conjugate_gradient",
"spectral",
"gradient_descent",
"variational",
]:
sys.exit("Invalid Poisson solver")
self.solver = solver
self.p = p
if p != 1:
self.solver = "variational"
self.normalization = normalization
self.use_cuda = use_cuda
self.min_iter = min_iter
self.max_iter = max_iter
self.tol = tol
self.spectral_cutoff = spectral_cutoff
self.homotopy_steps = homotopy_steps
self.homotopy_start = homotopy_start
# Setup accuracy filename
fname = "_poisson"
if self.p != 1:
fname += "_p%.2f" % p
if self.solver == "spectral":
fname += "_N%d" % self.spectral_cutoff
self.requries_eig = True
self.accuracy_filename = fname
# Setup Algorithm name
self.name = "Poisson Learning"
def _fit(self, train_ind, train_labels, all_labels=None):
n = self.graph.num_nodes
unique_labels = np.unique(train_labels)
k = len(unique_labels)
# Zero out diagonal for faster convergence
W = self.graph.weight_matrix
W = W - sparse.spdiags(W.diagonal(), 0, n, n)
G = gl.graph(W)
additional_output = None
# Poisson source term
if self.rhs is None:
onehot = gl.utils.labels_to_onehot(train_labels)
source = np.zeros((n, onehot.shape[1]))
source[train_ind] = onehot - np.mean(onehot, axis=0)
else:
source = self.rhs
if self.solver == "conjugate_gradient": # Conjugate gradient solver
u = self._fit_cg(G, source)
elif self.solver == "gradient_descent":
# Setup matrices
D = G.degree_matrix(p=-1)
P = D * W.transpose()
Db = D * source
# Invariant distribution
v = np.zeros(n)
v[train_ind] = 1
v = v / np.sum(v)
deg = G.degree_vector()
vinf = deg / np.sum(deg)
RW = W.transpose() * D
u = np.zeros((n, k))
# Number of iterations
T = 0
if self.use_cuda:
import torch
Pt = gl.utils.torch_sparse(P).cuda()
ut = torch.from_numpy(u).float().cuda()
Dbt = torch.from_numpy(Db).float().cuda()
while (T < self.min_iter or np.max(np.absolute(v - vinf)) > 1 / n) and (
T < self.max_iter
):
ut = torch.sparse.addmm(Dbt, Pt, ut)
v = RW * v
T = T + 1
# Transfer to CPU and convert to numpy
u = ut.cpu().numpy()
else: # Use CPU
while (T < self.min_iter or np.max(np.absolute(v - vinf)) > 1 / n) and (
T < self.max_iter
):
u = Db + P * u
v = RW * v
T = T + 1
# Compute accuracy if all labels are provided
if all_labels is not None:
self.prob = u
labels = self.predict()
acc = gl.ssl.ssl_accuracy(labels, all_labels, len(train_ind))
print("%d,Accuracy = %.2f" % (T, acc))
# Use spectral solver
elif self.solver == "spectral":
vals, vecs = G.eigen_decomp(
normalization=self.normalization, k=self.spectral_cutoff + 1
)
V = vecs[:, 1:]
vals = vals[1:]
if self.p != 1:
vals = vals ** self.p
L = sparse.spdiags(1 / vals, 0, self.spectral_cutoff, self.spectral_cutoff)
u = V @ (L @ (V.T @ source))
elif self.solver == "variational":
if self.homotopy_start is None:
u = self._fit_cg(G, source)[:, 0] # Initialize with solution for p=2
else:
u = self.homotopy_start.copy()
homotopy_steps = self.homotopy_steps
if homotopy_steps is None:
if self.p > 1.5:
homotopy_steps = np.linspace(
2.5, self.p + 1, np.floor((self.p - 1.5) * 1.5).astype(int)
)
else:
homotopy_steps = [self.p + 1]
additional_output = {2: u}
for p_homotopy in homotopy_steps:
u = self._fit_variational(u, source[:, 0], W, p_homotopy)
additional_output[p_homotopy] = u.copy()
u = u[:, np.newaxis]
else:
sys.exit("Invalid Poisson solver " + self.solver)
# Normalize for zero weighted mean
D = G.degree_vector()
shift = np.dot(D, u) / np.sum(D)
u = u - shift
# Scale solution
if self.scale is not None:
u = self.scale ** (1 / self.p) * u
if additional_output is None:
return u
else:
return u, additional_output
def _fit_cg(self, G, source):
L = G.laplacian(normalization=self.normalization).tocsr()
if self.normalization == "combinatorial":
u = numerics.conjgrad(
L, source, tol=self.tol, max_iter=self.max_iter, preconditioner="ilu",
)
# u = gl.utils.conjgrad(L, source, tol=self.tol, max_iter=self.max_iter)
# u = np.empty_like(source, dtype="float64")
# D = G.degree_matrix()
# for i in range(u.shape[1]):
# u[:, i], _ = spcg(
# L, source[:, i], tol=self.tol, maxiter=self.max_iter, M=D
# )
elif self.normalization == "normalized":
D = G.degree_matrix(p=-0.5)
u = gl.utils.conjgrad(L, D * source, tol=self.tol, max_iter=self.max_iter)
u = D * u
else:
raise ValueError(
f"Normalization `{self.normalization}` not supported with \
solver `conjugate_gradient`."
)
return u
def _fit_variational(self, u0, source, W, p):
"""For algorithm details see
<NAME>, <NAME>, and <NAME>. "Analysis and algorithms for Lp-based semi-supervised learning on graphs. "
Applied and Computational Harmonic Analysis, 60:77-122, 2022."""
logger.info(f"Variational - Homotopy step with p={p}")
n = u0.shape[0]
# self.scale = None
# u = n * np.linspace(-1, 1, n + 1)
u = u0.copy() # np.concatenate((u0.copy(), [0]))
W = sparse.csc_matrix(W)
nonzero = W.nonzero()
values = np.abs(u[nonzero[0]] - u[nonzero[1]]) ** (p - 2)
a = sparse.csc_matrix((values, nonzero), shape=(n, n))
A = W.multiply(a)
D = sparse.spdiags(A.sum(axis=1).A1, diags=0, m=n, n=n, format="csc")
L = D - A
res = np.max(np.abs(L @ u - source))
it = 0
logger.info(f"Variational - It: {it}; Res: {res}; Amax: {L.max()}")
while it < self.max_iter and res > self.tol:
nonzero = W.nonzero()
values = np.abs(u[nonzero[0]] - u[nonzero[1]]) ** (p - 2)
a = sparse.csc_matrix((values, nonzero), shape=(n, n))
A = W.multiply(a)
D = sparse.spdiags(A.sum(axis=1).A1, diags=0, m=n, n=n, format="csc")
L = D - A
Lf = numerics.conjgrad(
L, source, preconditioner="ilu", tol=1e-8, max_iter=self.max_iter,
)
u = 1 / (p - 1) * ((p - 2) * u + Lf)
res = np.max(np.abs(L @ u - source))
# res = max(np.abs(jac(u[:-1])).max(), np.sum(u[:-1]))
it += 1
logger.info(f"Variational - It: {it}; Res: {res}; Amax: {L.max()}")
# u = u[:-1]
return u
| StarcoderdataPython |
284438 | <reponame>tabulon-ext/dedupsqlfs
# -*- coding: utf8 -*-
"""
Special action to collect all garbage and remove
"""
__author__ = 'sergey'
from time import time
from math import floor
from dedupsqlfs.my_formats import format_timespan
from dedupsqlfs.lib import constants
from dedupsqlfs.fuse.subvolume import Subvolume
def __collect_garbage(app):
"""
@param app:
@type app: dedupsqlfs.fuse.dedupfs.DedupFS
@return: None
"""
if app.isReadonly():
return
start_time = time()
app.getLogger().info("Performing garbage collection (this might take a while) ..")
clean_stats = False
gc_funcs = [
__collect_strings,
__collect_inodes_all,
__collect_xattrs,
__collect_links,
__collect_indexes,
__collect_blocks
]
cnt_sum = 0
for method in gc_funcs:
sub_start_time = time()
cnt, msg = method(app)
if cnt:
clean_stats = True
elapsed_time = time() - sub_start_time
if not app.getOption("parsable"):
app.getLogger().info(msg, format_timespan(elapsed_time))
cnt_sum += cnt
if clean_stats:
subv = Subvolume(app.operations)
subv.clean_stats(app.operations.mounted_subvolume_name)
if app.operations.mounted_subvolume_name == constants.ROOT_SUBVOLUME_NAME:
subv.clean_non_root_subvol_diff_stats()
elapsed_time = time() - start_time
if app.getOption("parsable"):
app.getLogger().info("Count: %s", cnt_sum)
app.getLogger().info("Time: %s", format_timespan(elapsed_time))
else:
app.getLogger().info("Finished garbage collection in %s.", format_timespan(elapsed_time))
return
def __collect_strings(app):
"""
Collect all file names and check fs tree
And cleanup removed
@param app:
@type app: dedupsqlfs.fuse.dedupfs.DedupFS
@return: str
"""
tableName = app.operations.getTable("name")
subv = Subvolume(app.operations)
treeNameIds = subv.prepareTreeNameIds()
app.getLogger().debug("Clean unused path segments...")
countNames = tableName.get_count()
app.getLogger().debug(" path segments: %d", countNames)
count = 0
current = 0
proc = ""
maxCnt = 10000
curBlock = 0
while True:
if current == countNames:
break
nameIds = tableName.get_name_ids(curBlock, curBlock + maxCnt)
current += len(nameIds)
curBlock += maxCnt
if not nameIds:
continue
# SET magick
to_delete = nameIds - treeNameIds
id_str = ",".join((str(_id) for _id in to_delete))
count += tableName.remove_by_ids(id_str)
p = "%6.2f%%" % (100.0 * current / countNames)
if p != proc:
proc = p
app.getLogger().debug("%s (count=%d)", proc, count)
msg = ""
if count > 0:
tableName.commit()
msg = "Cleaned up %i unused path segment%s in %%s." % (count, count != 1 and 's' or '')
return count, msg
def __collect_inodes_all(app):
"""
Collect all inodes missing in fs tree
And remove them
@param app:
@type app: dedupsqlfs.fuse.dedupfs.DedupFS
@return: string
"""
tableInode = app.operations.getTable("inode")
tableTree = app.operations.getTable("tree")
app.getLogger().debug("Clean unused inodes (all)...")
countInodes = tableInode.get_count()
app.getLogger().debug(" inodes: %d", countInodes)
count = 0
current = 0
proc = ""
curBlock = 0
maxCnt = 10000
while True:
if current == countInodes:
break
inodeIds = tableInode.get_inode_ids(curBlock, curBlock + maxCnt)
current += len(inodeIds)
curBlock += maxCnt
if not len(inodeIds):
continue
treeInodeIds = tableTree.get_inodes_by_inodes(inodeIds)
# SET magick
to_delete = inodeIds - treeInodeIds
count += tableInode.remove_by_ids(to_delete)
p = "%6.2f%%" % (100.0 * current / countInodes)
if p != proc:
proc = p
app.getLogger().debug("%s (count=%d)", proc, count)
msg = ""
if count > 0:
tableInode.commit()
msg = "Cleaned up %i unused inode%s in %%s." % (count, count != 1 and 's' or '')
return count, msg
def __collect_xattrs(app):
"""
Collect all xattrs not linked to inodes
And remove them
@param app:
@type app: dedupsqlfs.fuse.dedupfs.DedupFS
@return: string
"""
tableXattr = app.operations.getTable("xattr")
tableInode = app.operations.getTable("inode")
app.getLogger().debug("Clean unused xattrs...")
countXattrs = tableXattr.get_count()
app.getLogger().debug(" xattrs: %d", countXattrs)
count = 0
current = 0
proc = ""
curBlock = 0
maxCnt = 10000
while True:
if current == countXattrs:
break
inodeIds = tableXattr.get_inode_ids(curBlock, curBlock + maxCnt)
current += len(inodeIds)
curBlock += maxCnt
if not inodeIds:
continue
xattrInodeIds = tableInode.get_inodes_by_inodes(inodeIds)
# SET magick
to_delete = inodeIds - xattrInodeIds
count += tableXattr.remove_by_ids(to_delete)
p = "%6.2f%%" % (100.0 * current / countXattrs)
if p != proc:
proc = p
app.getLogger().debug("%s (count=%d)", proc, count)
msg = ""
if count > 0:
tableXattr.commit()
msg = "Cleaned up %i unused xattr%s in %%s." % (count, count != 1 and 's' or '')
return count, msg
def __collect_links(app):
"""
Collect all links not linked to inodes
And remove them
@param app:
@type app: dedupsqlfs.fuse.dedupfs.DedupFS
@return: string
"""
tableLink = app.operations.getTable("link")
tableInode = app.operations.getTable("inode")
app.getLogger().debug("Clean unused links...")
countLinks = tableLink.get_count()
app.getLogger().debug(" links: %d", countLinks)
count = 0
current = 0
proc = ""
curBlock = 0
maxCnt = 10000
while True:
if current == countLinks:
break
inodeIds = tableLink.get_inode_ids(curBlock, curBlock + maxCnt)
current += len(inodeIds)
curBlock += maxCnt
if not inodeIds:
continue
linkInodeIds = tableInode.get_inodes_by_inodes(inodeIds)
# SET magick
to_delete = inodeIds - linkInodeIds
count += tableLink.remove_by_ids(to_delete)
p = "%6.2f%%" % (100.0 * current / countLinks)
if p != proc:
proc = p
app.getLogger().debug("%s (count=%d)", proc, count)
msg = ""
if count > 0:
tableLink.commit()
msg = "Cleaned up %i unused link%s in %%s." % (count, count != 1 and 's' or '')
return count, msg
def __collect_indexes(app):
"""
Collect all inode-blocks not linked to inodes
And remove them
@param app:
@type app: dedupsqlfs.fuse.dedupfs.DedupFS
@return: string
"""
tableIndex = app.operations.getTable("inode_hash_block")
tableInode = app.operations.getTable("inode")
app.getLogger().debug("Clean unused block indexes...")
countInodes = tableIndex.get_count_uniq_inodes()
app.getLogger().debug(" block inodes: %d", countInodes)
count = 0
countTrunc = 0
current = 0
proc = ""
curBlock = 0
maxCnt = 10000
while True:
if current == countInodes:
break
inodeIds = tableIndex.get_inode_ids(curBlock, curBlock + maxCnt)
current += len(inodeIds)
curBlock += maxCnt
if not len(inodeIds):
continue
indexInodeIds = tableInode.get_inodes_by_inodes(inodeIds)
# SET magick
to_delete = inodeIds - indexInodeIds
to_trunc = inodeIds - to_delete
count += tableIndex.remove_by_inodes(to_delete)
# Slow?
inodeSizes = tableInode.get_sizes_by_id(to_trunc)
for inode_id in to_trunc:
size = inodeSizes.get(inode_id, -1)
if size < 0:
continue
inblock_offset = size % app.operations.block_size
max_block_number = int(floor(1.0 * (size - inblock_offset) / app.operations.block_size))
trunced = tableIndex.delete_by_inode_number_more(inode_id, max_block_number)
countTrunc += len(trunced)
p = "%6.2f%%" % (100.0 * current / countInodes)
if p != proc:
proc = p
app.getLogger().debug("%s (count=%d, trunced=%d)", proc, count, countTrunc)
count += countTrunc
msg = ""
if count > 0:
tableIndex.commit()
msg = "Cleaned up %i unused index entr%s in %%s." % (count, count != 1 and 'ies' or 'y')
return count, msg
def __collect_blocks(app):
"""
Collect all hashes not linked to inode-blocks
Across all subvolumes
And remove them
@param app:
@type app: dedupsqlfs.fuse.dedupfs.DedupFS
@return: string
"""
tableHash = app.operations.getTable("hash")
tableBlock = app.operations.getTable("block")
tableHCT = app.operations.getTable("hash_compression_type")
tableHSZ = app.operations.getTable("hash_sizes")
subv = Subvolume(app.operations)
indexHashIds = subv.prepareIndexHashIds()
app.getLogger().debug("Clean unused data blocks and hashes...")
countHashes = tableHash.get_count()
app.getLogger().debug(" hashes: %d", countHashes)
count = 0
current = 0
proc = ""
_curBlock = 0
maxCnt = 10000
while True:
if current == countHashes:
break
hashIds = tableHash.get_hash_ids(_curBlock, _curBlock + maxCnt)
current += len(hashIds)
_curBlock += maxCnt
if not hashIds:
continue
# SET magick
to_delete = hashIds - indexHashIds
id_str = ",".join((str(_id) for _id in to_delete))
count += tableHash.remove_by_ids(id_str)
tableBlock.remove_by_ids(id_str)
tableHCT.remove_by_ids(id_str)
tableHSZ.remove_by_ids(id_str)
p = "%6.2f%%" % (100.0 * current / countHashes)
if p != proc:
proc = p
app.getLogger().debug("%s (count=%d)", proc, count)
msg = ""
if count > 0:
tableHash.commit()
tableBlock.commit()
tableHCT.commit()
tableHSZ.commit()
msg = "Cleaned up %i unused data block%s and hashes in %%s." % (
count, count != 1 and 's' or '',
)
return count, msg
def do_defragment(options, _fuse):
"""
Defragment only selected Subvolume
@param options: Commandline options
@type options: object
@param _fuse: FUSE wrapper
@type _fuse: dedupsqlfs.fuse.dedupfs.DedupFS
"""
__collect_garbage(_fuse)
return 0
| StarcoderdataPython |
1623304 | <reponame>ottomattas/INFOMAIGT-AGENTS
#! /usr/bin/env -S python -u
from game import Game
from random_agent import RandomAgent
from bandit_agent import BanditAgent
from neural_network_agent import NNAgent
import argparse, time, cProfile
import numpy as np
import multiprocessing as mp
from collections import Counter
from itertools import starmap
import tensorflow as tf
def main(args):
if args.input:
# Load dataset
data = read_games(args.input)
#print("Game 1 array is \n", data[0])
# Count the board states
board_state_count = 0
# For each _ element, and game element
for _, game in data:
# For each _ element as only the number of elements is relevant
for _, _, _ in game:
board_state_count += 1
#print("Board state count is ",board_state_count)
# Create array for the input layer
# (Columns: each possible move, represented in one-hot encoding
# Rows: each possible board state)
x_train = np.zeros((board_state_count,75),dtype=int)
#print("X train is \n",x_train)
# Create array for the output layer with the proper shape
# (For each board state, save the winner)
y_train = np.zeros(board_state_count,dtype=int)
#y_train = tf.keras.utils.to_categorical(np.zeros(board_state_count,dtype=int),3)
#print("Y train is \n",y_train)
# Create indexes for game and board
game_index = 0
board_index = 0
# Loop over all games and boards
for winner, game in data:
game_index += 1
for player, move, board in game:
#print("Player is ", player)
#print("Move is ", move)
#print("Board is\n", board)
#print("Winner is ", winner)
##########################
# Create the input layer #
##########################
# For each player, we want to look it from their perspective.
# Set each player's move as 0 0 1 in x_train.
# Define a list for appending the one-hot encoded players
lst = []
# If player 1 move
if player == 1:
for x in range(5):
for y in range(5):
# When position value is 1 (player 1 move)
if board[x, y] == 1:
# Append 0 0 1
lst.append(0)
lst.append(0)
lst.append(1)
# When position value is 2 (player 2 move)
elif board[x, y] == 2:
# Append 0 1 0
lst.append(0)
lst.append(1)
lst.append(0)
# When position value is 0 (no player move yet)
else:
# Append 1 0 0
lst.append(1)
lst.append(0)
lst.append(0)
# Save the one-hot encoded list in the x_train array
# at position board_index
x_train[board_index] = np.array(lst)
#print("After player 1 move, encoded board is now \n", x_train[board_index])
#print("After player 1 move, x_train is now \n", x_train)
# If player 2 move
else:
for x in range(5):
for y in range(5):
# When position value is 2 (player 2 move)
if board[x, y] == 2:
# Append 0 0 1
lst.append(0)
lst.append(0)
lst.append(1)
# When position value is 1 (player 1 move)
elif board[x, y] == 1:
# Append 0 1 0
lst.append(0)
lst.append(1)
lst.append(0)
# When position value is 0 (no player move yet)
else:
# Append 1 0 0
lst.append(1)
lst.append(0)
lst.append(0)
# Save the one-hot encoded list in the x_train array
# at position board_index
x_train[board_index] = np.array(lst)
#print("After player 2 move, encoded board is now \n", x_train[board_index])
#print("After player 2 move, x_train is now \n", x_train)
###########################
# Create the output layer #
###########################
# If draw
if winner == 0:
y_train[board_index] = 0
# If player 1 is winner
elif winner == player:
y_train[board_index] = 1
# If player 2 is winner
else:
y_train[board_index] = 2
#print("y_train is", y_train)
board_index += 1
#print("This is game nr: ", game_index)
#print("This is board nr: ", board_index)
############
# Training #
############
# Create the tf.keras.Sequential model by stacking layers.
# Choose an optimizer and loss function for training.
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=(75)), # array with 75 objects
tf.keras.layers.Dense(75, activation='relu'),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Dense(50),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Dense(25),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Dense(3, activation='softmax') # win/loss/draw, so 3
])
# # # Compile the model
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# # # Adjust the model parameters to minimize the loss
model.fit(x_train, y_train, batch_size=200, epochs=5)
# Checks the models performance
#model.evaluate(x_test, y_test, verbose=2)
# Save the model
model.save("nn2_model", overwrite=False)
work = []
for i in range(args.games):
# swap order every game
if i % 2 == 0:
players = [BanditAgent(args.time,1), NNAgent(2)]
else:
players = [NNAgent(2), BanditAgent(args.time,1)]
work.append((args.size,
read_objectives(args.objectives),
players,
args.output,
args.print_board))
start = time.perf_counter()
# the tests can be run in parallel, or sequentially
# it is recommended to only use the parallel version for large-scale testing
# of your agent, as it is harder to debug your program when enabled
if args.parallel == None or args.output != None:
results = starmap(play_game, work)
else:
# you probably shouldn't set args.parallel to a value larger than the
# number of cores on your CPU, as otherwise agents running in parallel
# may compete for the time available during their turn
with mp.Pool(args.parallel) as pool:
results = pool.starmap(play_game, work)
stats = Counter(results)
end = time.perf_counter()
print(f'Total score {stats[1]}/{stats[2]}/{stats[0]}.')
print(f'Total time {end - start} seconds.')
def play_game(boardsize, objectives, players, output, print_board = None):
game = Game.new(boardsize, objectives, players, print_board == 'all')
if output:
with open(output, 'a') as outfile:
print(boardsize, file = outfile)
winner = game.play(outfile)
print(f'winner={winner.id if winner else 0}', file = outfile)
else:
winner = game.play()
if print_board == 'final':
game.print_result(winner)
return 0 if winner == None else winner.id
def read_objectives(filename):
with open(filename) as file:
lines = [line.strip() for line in file]
i = 0
shapes = []
while i < len(lines):
shape = []
# shapes are separated by blank lines
while i < len(lines) and lines[i].strip() != '':
shape_line = []
for char in lines[i].strip():
shape_line.append(char == 'x')
shape.append(shape_line)
i += 1
shapes.append(np.transpose(np.array(shape)))
i += 1
return shapes
def read_games(filename):
with open(filename) as file:
lines = list(file)
games = []
i = 0
while i < len(lines):
game = []
boardsize = int(lines[i])
i += 1
while not lines[i].startswith('winner'):
turn = int(lines[i])
i += 1
move = [int(x) for x in lines[i].split(',')]
i += 1
board = np.zeros((boardsize, boardsize), dtype = int)
for y in range(boardsize):
row = lines[i].split(',')
for x in range(boardsize):
board[(x, y)] = int(row[x])
i += 1
game.append((turn, move, board))
winner = int(lines[i].split('=')[1])
games.append((winner, game))
i += 1
return games
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--size', type = int, default = 10,
help = 'The size of the board.')
parser.add_argument('--games', type = int, default = 1,
help = 'The number of games to play.')
parser.add_argument('--time', type = int, default = 10,
help = 'The allowed time per move, in milliseconds.')
parser.add_argument('--print-board', choices = ['all', 'final'],
help = 'Show the board state, either every turn or only at the end.')
parser.add_argument('--parallel', type = int,
help = 'Run multiple games in parallel. Only use for large-scale '
'testing.')
parser.add_argument('--output',
help = 'Write training data to the given file.')
parser.add_argument('--input',
help = 'Read training data from the given file.')
parser.add_argument('objectives',
help = 'The name of a file containing the objective shapes. The file '
'should contain a rectangle with x on positions that should be '
'occupied, and dots on other positions. Separate objective shapes '
'should be separated by a blank line.')
args = parser.parse_args()
#cProfile.run('main(args)')
main(args)
| StarcoderdataPython |
73644 | # =============================================================================== #
# #
# This file has been generated automatically!! Do not change this manually! #
# #
# =============================================================================== #
from __future__ import annotations
from pydantic import Field
from .chat_invite_link_count import ChatInviteLinkCount
from ..base_object import BaseObject
class ChatInviteLinkCounts(BaseObject):
"""
Contains a list of chat invite link counts
:param invite_link_counts: List of invite linkcounts
:type invite_link_counts: :class:`list[ChatInviteLinkCount]`
"""
ID: str = Field("chatInviteLinkCounts", alias="@type")
invite_link_counts: list[ChatInviteLinkCount]
@staticmethod
def read(q: dict) -> ChatInviteLinkCounts:
return ChatInviteLinkCounts.construct(**q)
| StarcoderdataPython |
3351443 | #!/usr/bin/env python
from numpy import *
import sys,os,math,random
#cell_vec = 0.05/2.0
#cell_ang = 0.25/2.0
atom_pos = 0.01/2.0
iFile = sys.stdin
#iFile = "ideal.in"
#try: iFile = open(iFile, 'r')
#except: print "Problem opening ",iFile; sys.exit(1)
random.seed()
for i in range(7):
line = iFile.readline()
if not line: break
print line,
#line = iFile.readline()
#if not line: print "No cell info!"; sys.exit(1)
#pair = line.split()
#print random.uniform( float(pair[0])-cell_vec,float(pair[0])+cell_vec ), \
# random.uniform( float(pair[1])-cell_vec,float(pair[1])+cell_vec ), \
# random.uniform( float(pair[2])-cell_vec,float(pair[2])+cell_vec ), \
# random.uniform( float(pair[3])-cell_ang,float(pair[3])+cell_ang ), \
# random.uniform( float(pair[4])-cell_ang,float(pair[4])+cell_ang ), \
# random.uniform( float(pair[5])-cell_ang,float(pair[5])+cell_ang )
atomList = ["Pb","S","Te"]
while 1:
line = iFile.readline()
if not line: break
pair = line.split()
for i in range(3):
pair[i] = random.uniform(float(pair[i])-atom_pos,float(pair[i])+atom_pos)
line = ""
for i in range(len(pair)):
line = line+str(pair[i])+" "
print line
iFile.close()
| StarcoderdataPython |
9771860 | <reponame>Software-Engineering-Bachelor-Project/mycroft
import pytz
from django.conf import settings
from django.test import TestCase
from unittest.mock import patch
# Import module
from backend.video_manager import *
class GetClipInfoTest(TestCase):
@patch('backend.database_wrapper.create_hash_sum')
def setUp(self, mock_create_hash_sum) -> None:
mock_create_hash_sum.return_value = '1234'
self.cm_name = 'Test camera name'
self.fid = create_root_folder(path='home/user/', name='test_folder')
self.cid = create_clip(clip_name='test_clip', fid=self.fid, video_format='tvf', latitude=Decimal('0.0'),
longitude=Decimal('0.0'),
start_time=timezone.datetime(2020, 1, 17, tzinfo=pytz.timezone(settings.TIME_ZONE)),
end_time=timezone.datetime(2020, 1, 18, tzinfo=pytz.timezone(settings.TIME_ZONE)),
width=256, height=240, frame_rate=42, camera_name=self.cm_name)
@patch('backend.video_manager.os_aware', side_effect=lambda x: x)
def test_basic(self, mock_os_aware):
"""
Makes a simple call.
"""
code, res = get_clip_info(data={CLIP_ID: self.cid})
self.assertEqual(code, 200)
self.assertEqual(res, {'id': 1, 'name': 'test_clip', 'video_format': 'tvf',
'start_time': '2020-01-17T00:00:00+01:00',
'end_time': '2020-01-18T00:00:00+01:00', 'resolution': 1,
'frame_rate': 42.0, 'folder': 1, 'camera': 1,
'file_path': 'home/user/test_folder/test_clip.tvf', 'duplicates': [],
'overlap': [], 'playable': False, 'objectdetection_set': []})
class GetSequentialClipTest(TestCase):
@patch('backend.database_wrapper.create_hash_sum')
def setUp(self, mock_create_hash_sum) -> None:
mock_create_hash_sum.return_value = '1234'
self.cm_name = 'Test camera name'
self.fid = create_root_folder(path='home/user/', name='test_folder')
self.cid = create_clip(clip_name='test_clip', fid=self.fid, video_format='tvf', latitude=Decimal('0.0'),
longitude=Decimal('0.0'), start_time=timezone.now() - timezone.timedelta(hours=1),
end_time=timezone.now(), width=256, height=240, frame_rate=42, camera_name=self.cm_name)
self.clip = get_clip_by_id(cid=self.cid)
def test_basic(self):
"""
Makes a simple call.
"""
code, res = get_sequential_clip(data={CLIP_ID: self.cid})
self.assertEqual(code, 200)
self.assertEqual(res, {CLIP_ID: None})
@patch('backend.database_wrapper.create_hash_sum')
def test_sequential_clip(self, mock_create_hash_sum):
"""
Tests a sequential clip.
"""
mock_create_hash_sum.return_value = '1234567'
cid2 = create_clip(clip_name='test_clip2', fid=self.fid, video_format='tvf', latitude=Decimal('0.0'),
longitude=Decimal('0.0'), start_time=self.clip.end_time,
end_time=timezone.now() + timezone.timedelta(hours=1),
width=256, height=240, frame_rate=42, camera_name=self.cm_name)
code, res = get_sequential_clip(data={CLIP_ID: self.cid})
self.assertEqual(code, 200)
self.assertEqual(res, {CLIP_ID: cid2})
@patch('backend.database_wrapper.create_hash_sum')
def test_almost_sequential_clip(self, mock_create_hash_sum):
"""
Tests a clip that has a start time 5 seconds after the first clip.
"""
mock_create_hash_sum.return_value = '1234567'
cid2 = create_clip(clip_name='test_clip2', fid=self.fid, video_format='tvf', latitude=Decimal('0.0'),
longitude=Decimal('0.0'),
start_time=self.clip.end_time + timezone.timedelta(seconds=5),
end_time=timezone.now() + timezone.timedelta(hours=1),
width=256, height=240, frame_rate=42, camera_name=self.cm_name)
code, res = get_sequential_clip(data={CLIP_ID: self.cid})
self.assertEqual(code, 200)
self.assertEqual(res, {CLIP_ID: cid2})
@patch('backend.database_wrapper.create_hash_sum')
def test_not_sequential_clip(self, mock_create_hash_sum):
"""
Tests a clip that is not sequential.
"""
mock_create_hash_sum.return_value = '1234567'
create_clip(clip_name='test_clip2', fid=self.fid, video_format='tvf', latitude=Decimal('0.0'),
longitude=Decimal('0.0'),
start_time=self.clip.end_time + timezone.timedelta(seconds=6),
end_time=timezone.now() + timezone.timedelta(hours=1),
width=256, height=240, frame_rate=42, camera_name=self.cm_name)
code, res = get_sequential_clip(data={CLIP_ID: self.cid})
self.assertEqual(code, 200)
self.assertEqual(res, {CLIP_ID: None})
@patch('backend.database_wrapper.create_hash_sum')
def test_sequential_clip_different_camera(self, mock_create_hash_sum):
"""
Tests a clip that is sequential but belongs to a different camera.
"""
cm_name = 'Another test camera name'
mock_create_hash_sum.return_value = '1234567'
cid2 = create_clip(clip_name='test_clip2', fid=self.fid, video_format='tvf', latitude=Decimal('0.0'),
longitude=Decimal('0.0'), start_time=self.clip.end_time,
end_time=timezone.now() + timezone.timedelta(hours=1),
width=256, height=240, frame_rate=42, camera_name=cm_name)
code, res = get_sequential_clip(data={CLIP_ID: self.cid})
self.assertEqual(code, 200)
self.assertEqual(res, {CLIP_ID: None})
class GetCamerasTest(TestCase):
@patch('backend.database_wrapper.create_hash_sum')
def setUp(self, mock_create_hash_sum) -> None:
"""
Create cameras and a project.
"""
self.cm_name1 = 'Test camera name'
self.cm_name2 = 'Test another camera name'
self.pid = create_project(name="test_project")
self.lat = Decimal(value="13.37")
self.lon = Decimal(value="0.42")
self.st = timezone.datetime(2020, 1, 17, tzinfo=pytz.timezone(settings.TIME_ZONE))
self.et = timezone.datetime(2020, 1, 18, tzinfo=pytz.timezone(settings.TIME_ZONE))
self.rid = create_root_folder(path="/home/user/", name="test_folder")
self.sid = create_subfolder(parent_fid=self.rid, name="test_subfolder")
mock_create_hash_sum.return_value = '1234'
create_clip(fid=self.rid, clip_name="test_clip1", video_format="tvf", start_time=self.st,
end_time=self.et, latitude=self.lat, longitude=self.lon, width=256, height=240,
frame_rate=42.0, camera_name=self.cm_name1)
mock_create_hash_sum.return_value = '12345'
create_clip(fid=self.sid, clip_name="test_clip2", video_format="tvf", start_time=self.st,
end_time=self.et, latitude=self.lat, longitude=self.lon, width=256, height=240,
frame_rate=42.0, camera_name=self.cm_name1)
mock_create_hash_sum.return_value = '123456'
create_clip(fid=self.sid, clip_name="test_clip3", video_format="tvf", start_time=self.st,
end_time=self.et, latitude=self.lon, longitude=self.lat, width=256, height=240,
frame_rate=42.0, camera_name=self.cm_name2)
add_folder_to_project(fid=self.rid, pid=self.pid)
def test_basic(self):
"""
Makes a simple call.
"""
code, res = get_cameras(data={PROJECT_ID: self.pid})
self.assertEqual(code, 200)
self.assertEqual(len(res[CAMERAS]), 2)
@patch('backend.database_wrapper.create_hash_sum')
def test_camera_has_only_clips_in_project(self, mock_create_hash_sum):
"""
Tests that camera only contains clip that is in current project.
"""
pid2 = create_project(name="test_project2")
rid2 = create_root_folder(path="/home/user/", name="test_folder2")
mock_create_hash_sum.return_value = '1234567'
create_clip(fid=rid2, clip_name="test_clip4", video_format="tvf", start_time=self.st,
end_time=self.et, latitude=self.lat, longitude=self.lon, width=256, height=240,
frame_rate=42.0, camera_name=self.cm_name1)
mock_create_hash_sum.return_value = '12345678'
create_clip(fid=rid2, clip_name="test_clip5", video_format="tvf", start_time=self.st,
end_time=self.et, latitude=self.lat, longitude=self.lon, width=256, height=240,
frame_rate=42.0, camera_name=self.cm_name2)
mock_create_hash_sum.return_value = '123456789'
create_clip(fid=rid2, clip_name="test_clip6", video_format="tvf", start_time=self.st,
end_time=self.et, latitude=self.lat, longitude=self.lon, width=256, height=240,
frame_rate=42.0, camera_name=self.cm_name2)
add_folder_to_project(fid=rid2, pid=pid2)
code, res = get_cameras(data={PROJECT_ID: pid2})
self.assertEqual(len(res[CAMERAS]), 2)
self.assertEqual(len(res[CAMERAS][0]['clip_set']), 1)
self.assertEqual(len(res[CAMERAS][1]['clip_set']), 2)
code, res = get_cameras(data={PROJECT_ID: self.pid})
self.assertEqual(len(res[CAMERAS]), 2)
self.assertEqual(len(res[CAMERAS][0]['clip_set']), 2)
self.assertEqual(len(res[CAMERAS][1]['clip_set']), 1)
def test_non_existing_project(self):
"""
Test with a project id that doesn't exist.
"""
code, res = get_cameras(data={PROJECT_ID: 42})
self.assertEqual(code, 204)
self.assertEqual(res, {})
class GetVideoStreamTest(TestCase):
def test_simple_call(self):
pass
# TODO: I have no idea how to test this one
| StarcoderdataPython |
25686 | <gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
file1=open('data/u_Lvoid_20.txt',encoding='utf-8')
file2=open('temp2/void.txt','w',encoding='utf-8')
count=0
for line in file1:
count=count+1
if(line[0]=='R'):# 'line' here is a string
line_list=line.split( ) # 'line_list' is a list of small strings=['R41_1_2', 'n1_1620161_481040', n1_1620161_480880, 2.8e-05]
branch=line_list[0].split('-') #branch is a list of string=['R41','1','2']
branch0=branch[0].split('R')#branch is a list of string=['','41']
branch[0]=branch0[1]#now branch is a list of string=['41','1','2'], which is [layer_id, tree_id, branch_id]
for i in range(3):
file2.write(str(branch[i]))
file2.write(' ')
branch1=line_list[1].split('_')
for i in range(2):
file2.write(str(int(branch1[i+1])/1000))
file2.write(' ')
branch3=line_list[3].split('um')
a=float(branch3[0])
file2.write(str(a))
file2.write('\n')
file1.close()
file2.close()
| StarcoderdataPython |
9627995 | <reponame>ZFhuang/DiveIntoDLSketches
# coding=utf-8
# 导入自己的函数包d2lzh_pytorch,注意要先将目标包的父路径添加到系统路径中
import sys
sys.path.append(r".")
from d2lzh_pytorch import layers
from d2lzh_pytorch import data_process
from d2lzh_pytorch import train
import torch
import time
from torch import nn,optim
"""
这一节介绍了串联多个网络的"网络中的网络NiN"
"""
# 加上这个限定才能支持多线程读取
if __name__ == "__main__":
# 设置计算设备,让计算在GPU上进行
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# 前几节的网络都是多个卷积层和池化组成的块然后串联几个全连接层组成的块形成的结构
# NiN的特点在于其串联了多个"卷积+全连接"的子网络,由于全连接层需要打平数据
# 所以这里全连接层用前面说到的1*1卷积来代替,从而让空间信息能自然传递到后面的层
# 下面就是"卷积+全连接"的子网络NiN块的生成函数,网络中类似AlexNet来使用它
def NiN_block(in_channels, out_channels, kernel_size, stride, padding):
block = nn.Sequential(
# 一个和设定参数有关的核心卷积层
nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding),
nn.ReLU(),
# 用两个kernel_size=1的卷积来代替全连接层
nn.Conv2d(out_channels, out_channels, kernel_size=1),
nn.ReLU(),
nn.Conv2d(out_channels, out_channels, kernel_size=1),
nn.ReLU()
)
return block
# 实例化这个网络
net = nn.Sequential(
# 一开始利用NiN块进行逐步缩小的卷积,提取各种尺度的特征
NiN_block(1, 96, kernel_size=11, stride=4, padding=0),
nn.MaxPool2d(kernel_size=3, stride=2),
NiN_block(96, 256, kernel_size=5, stride=1, padding=2),
nn.MaxPool2d(kernel_size=3, stride=2),
NiN_block(256, 384, kernel_size=3, stride=1, padding=1),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Dropout(0.5),
# 结尾时进行最后一次卷积,大幅减少通道数
NiN_block(384, 10, kernel_size=3, stride=1, padding=1),
# 然后利用全局池化大幅减少参数数量,直接合并整个面,将4维打为实际上的2维
layers.GlobalAvgPool2d(),
# 最后用一个扁平层作为输出
layers.FlattenLayer()
)
# 和之前一样,测试一下网络结构
X = torch.rand(1, 1, 224, 224)
for name, blk in net.named_children():
X = blk(X)
print(name, 'output shape:', X.shape)
print('————————————————————————————')
# 训练的处理和AlexNet部分相同,但是这里选用更大的学习率,看一个epoch即可
# 1 epoch = 291.7sec
# 由于网络很大这里不能再用太大的batch了,小显卡的显存顶不住
batch_size = 128
train_iter, test_iter = data_process.load_data_fashion_mnist(
batch_size, resize=224)
lr, num_epochs = 0.002, 5
optim = torch.optim.Adam(net.parameters(), lr=lr)
train.train_ch5(net, train_iter, test_iter,
batch_size, optim, device, num_epochs)
| StarcoderdataPython |
1909524 | <reponame>VEINHORN/pgdocs
import argparse
import meta
import os
from command import enrich
from profile import profile
from command import backup
from command import show
from command import create
def main():
parser = argparse.ArgumentParser(add_help=False)
subparsers = parser.add_subparsers(help="commands", dest="command")
# Create command
create_parser = subparsers.add_parser(
"create", help="Create PostgreSQL documentation", add_help=False)
create_parser.add_argument("-h", "--host", help="Database host")
create_parser.add_argument("-p", "--port", help="Database port")
create_parser.add_argument("-d", "--database", help="Database name")
create_parser.add_argument(
"-f", "--format", help="Format of output docs (Markdown)")
create_parser.add_argument(
"-o", "--output", help="Output folder for generated docs")
# Backup command
meta_parser = subparsers.add_parser(
"backup", help="Fetch PostgreSQL metadata", add_help=False)
meta_parser.add_argument("-h", "--host", help="Database host")
meta_parser.add_argument("-p", "--port", help="Database port")
meta_parser.add_argument("-d", "--database", help="Database name")
meta_parser.add_argument(
"-f", "--format", help="Output format: YAML or JSON")
meta_parser.add_argument(
"-o", "--output", help="Output path for metadata file")
# Enrich command
enrich_parser = subparsers.add_parser(
"enrich", help="Used to enrich/update current db metadata", add_help=False)
enrich_parser.add_argument("-h", "--host", help="Database host")
enrich_parser.add_argument("-p", "--port", help="Database port")
enrich_parser.add_argument("-d", "--database", help="Database name")
enrich_parser.add_argument("-s", "--schema", help="Database schema name")
enrich_parser.add_argument("-t", "--table", help="Table name")
enrich_parser.add_argument("-c", "--column", help="Table column name")
enrich_parser.add_argument(
"-k", "--key", help="Parameter to describe schema/table/column")
enrich_parser.add_argument(
"--description", help="table/column/etc description")
# Show command
show_parser = subparsers.add_parser(
"show", help="Used to show db object description", add_help=False)
show_parser.add_argument("-h", "--host", help="Database host")
show_parser.add_argument("-p", "--port", help="Database port")
show_parser.add_argument("-d", "--database", help="Database name")
show_parser.add_argument("-s", "--schema", help="Database schema name")
show_parser.add_argument("-t", "--table", help="Database table name")
args = parser.parse_args()
if args.command == "create":
create.execute(args.host, args.port, args.database,
args.format, args.output)
elif args.command == "backup":
backup.execute(args.host, args.port, args.database,
args.output, args.format)
elif args.command == "enrich":
enrich.execute(args.host, args.port, args.database,
args.schema, args.table, args.column, args.key, args.description)
elif args.command == "show":
show.execute(args.host, args.port, args.database,
args.schema, args.table)
else:
print("You entered unsupported command...")
if __name__ == "__main__":
main()
| StarcoderdataPython |
178367 | # Author: <NAME>
# Date: 26/06/2018
# Project: TdaToolbox
try: from filtration.imports import *
except: from imports import *
# Time delay embedded procedure
# val refers to a 1D time-serie
# step corresponds to the time-delay
# dimension is the dimension of the time-delay embedding
# point_size refers to the dimensionnial plot
# graph is a boolean, whether we want to display the result
def vectorize(val, step, dimension=3, point_size=1, graph=False):
# Working on matrix
m_i = np.arange(dimension)*(step+1)
m_j = np.arange(np.max(val.shape[0]-(dimension-1)*(step+1), 0))
val = val[m_i + m_j.reshape(-1,1)]
# Memory efficiency
del m_i, m_j
if graph:
# Display
if dimension == 2:
plt.figure(figsize=(18,4))
plt.title('Vectorized Signal')
plt.scatter(val[:,0], val[:,1], c='grey', marker='x')
plt.grid()
plt.show()
elif dimension == 3:
lay = go.Layout(margin=dict(l=0, r=0, b=0, t=0))
img = go.Scatter3d(x=val[:,0], y=val[:,1], z=val[:,2], mode='markers',
marker=dict(size=point_size), opacity=0.5)
fig = tools.make_subplots(rows=1, cols=1)
fig['data'].append(img)
pyo.iplot(fig)
return val
| StarcoderdataPython |
3486047 | from flask import Blueprint, render_template, session,request,jsonify
from flask_login import login_required, current_user
from . import logger
import json
from .click import Click
from os import environ
CLICKHOUSE_NODES = json.loads(environ.get('CLICKHOUSE_NODES'))
CLICKHOUSE_USER = environ.get('CLICKHOUSE_USER')
CLICKHOUSE_PASS = environ.get('CLICKHOUSE_PASSWORD')
main = Blueprint('main', __name__)
@main.route('/static/<path:path>', methods=['GET'])
def get_static(path):
return send_from_directory('static', path)
@main.route('/profile')
@login_required
def profile():
return render_template('profile.html', name=current_user.login)
@main.route('/', methods=['GET'])
@login_required
def index():
click = Click(CLICKHOUSE_NODES,CLICKHOUSE_USER,CLICKHOUSE_PASS)
databases = click.get_databases()
return render_template('index.html',databases=databases)
@main.route('/api/<database>/tables',methods=['GET'])
@login_required
def get_databases(database):
click = Click(CLICKHOUSE_NODES,CLICKHOUSE_USER,CLICKHOUSE_PASS)
tables = click.get_tables(database)
return jsonify(tables)
@main.route('/api/<database>/<table>/detail', methods=['GET'])
@login_required
def get_table_info(database,table):
node = request.args.get('node')
click = Click(CLICKHOUSE_NODES,CLICKHOUSE_USER,CLICKHOUSE_PASS)
info = click.get_table_info(node,database,table)
return jsonify(info)
| StarcoderdataPython |
32906 | <gh_stars>0
from typing import Literal, TypedDict
class ForvoAPIItem(TypedDict):
id: int
word: str
original: str
addtime: str
hits: int
username: str
sex: str
country: str
code: str
langname: str
pathmp3: str
pathogg: str
rate: int
num_votes: int
num_positive_votes: int
class ForvoAPIResponse(TypedDict):
attributes: dict[Literal["total"], int]
items: list[ForvoAPIItem]
| StarcoderdataPython |
9648756 | from setuptools import setup, find_packages
setup(
name='karmabot',
version='1.0.1',
description='A Slack bot to track Karma points',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/target/karmabot',
packages=find_packages(),
include_package_data=True,
install_requires=[
'flask',
'urlfetch',
'pymongo',
'influxdb',
'flask-executor',
'hvac'
],
extras_require={
'dev': [
'flake8'
]
}
)
| StarcoderdataPython |
1753 | <filename>PID/PDControl.py
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import random
import numpy as np
import matplotlib.pyplot as plt
class Robot(object):
def __init__(self, length=20.0):
"""
Creates robotand initializes location/orientation to 0, 0, 0.
"""
self.x = 0.0
self.y = 0.0
self.orientation = 0.0
self.length =length
self.steering_noise = 0.0
self.distance_noise = 0.0
self.steering_drift = 0.0
def set(self, x,y, orientation):
"""
Sets a robotcoordinate.
"""
self.x = x
self.y = y
self.orientation = orientation % (2.0 * np.pi)
def set_noise(self, steering_noise, distance_noise):
"""
Sets thenoise parameters.
"""
# makes itpossible to change the noise parameters
# this isoften useful in particle filters
self.steering_noise = steering_noise
self.distance_noise = distance_noise
def set_steering_drift(self, drift):
"""
Sets thesystematical steering drift parameter
"""
self.steering_drift = drift
def move(self,steering, distance, tolerance=0.001, max_steering_angle=np.pi / 4.0):
"""
steering =front wheel steering angle, limited by max_steering_angle
distance =total distance driven, most be non-negative
"""
if steering> max_steering_angle:
steering= max_steering_angle
if steering <-max_steering_angle:
steering= -max_steering_angle
if distance< 0.0:
distance= 0.0
# apply noise
steering2 =random.gauss(steering, self.steering_noise)
distance2 =random.gauss(distance, self.distance_noise)
# applysteering drift
steering2 +=self.steering_drift
# Execute motion
turn =np.tan(steering2) * distance2 / self.length
if abs(turn)< tolerance:
#approximate by straight line motion
self.x +=distance2 * np.cos(self.orientation)
self.y +=distance2 * np.sin(self.orientation)
self.orientation = (self.orientation + turn) % (2.0 * np.pi)
else:
#approximate bicycle model for motion
radius =distance2 / turn
cx =self.x - (np.sin(self.orientation) * radius)
cy =self.y + (np.cos(self.orientation) * radius)
self.orientation = (self.orientation + turn) % (2.0 * np.pi)
self.x =cx + (np.sin(self.orientation) * radius)
self.y =cy - (np.cos(self.orientation) * radius)
def __repr__(self):
return'[x=%.5f y=%.5f orient=%.5f]' % (self.x, self.y, self.orientation)
def run_p(robot, tau, n=100, speed=1.0):
x_trajectory = []
y_trajectory = []
for i in range(n):
cte = robot.y
steer = -tau* cte
robot.move(steer, speed)
x_trajectory.append(robot.x)
y_trajectory.append(robot.y)
return x_trajectory, y_trajectory
robot = Robot()
robot.set(0, 1, 0)
robot.set_noise(0.1,0.05)
def run(robot, tau_p, tau_d, n=100, speed=1.0):
x_trajectory = []
y_trajectory = []
#steering =-tau_p * CTE - tau_d * diff_CTE
crosstrack_error= []
crosstrack_error.append(0.0)
diff_CTE = 0.0
startX = robot.x
startY = robot.y
startOrientation= robot.orientation
distance = 0.0
for i in range(n):
steering =-tau_p * crosstrack_error[i] - tau_d * diff_CTE
distance =speed
robot.move(steering, distance)
x_trajectory.append(robot.x)
y_trajectory.append(robot.y)
# when in theoriginal path, x=robot.x ,caculate y.
x1 = robot.x
y1 = startY +(x1 - startX) * np.tan(startOrientation)
crosstrack =(robot.y - y1) * np.cos(startOrientation)
crosstrack_error.append(crosstrack)
diff_CTE =crosstrack_error[i+1] - crosstrack_error[i]
print("{} [{}, {}] {}, {}".format(i,robot.x, robot.y,steering, crosstrack))
return x_trajectory, y_trajectory
x_trajectory, y_trajectory = run(robot, 0.1, 1.0)
n = len(x_trajectory)
fig, ax1 = plt.subplots(1, 1, figsize=(8, 8))
ax1.plot(x_trajectory, y_trajectory, 'g', label='PDcontroller')
ax1.plot(x_trajectory, np.zeros(n), 'r', label='reference')
plt.show()
| StarcoderdataPython |
8056960 | <reponame>PKUfudawei/cmssw
import FWCore.ParameterSet.Config as cms
from RecoTracker.TransientTrackingRecHit.tkTransientTrackingRecHitBuilderESProducer_cfi import tkTransientTrackingRecHitBuilderESProducer
ttrhbwor = tkTransientTrackingRecHitBuilderESProducer.clone(StripCPE = 'Fake',
Phase2StripCPE = '',
ComponentName = 'WithoutRefit',
PixelCPE = 'Fake',
Matcher = 'Fake',
ComputeCoarseLocalPositionFromDisk = False)
from Configuration.Eras.Modifier_trackingPhase2PU140_cff import trackingPhase2PU140
trackingPhase2PU140.toModify(ttrhbwor,
Phase2StripCPE = 'Phase2StripCPE',
StripCPE = 'FakeStripCPE')
| StarcoderdataPython |
11369185 | import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as nf
from .GCN import sum_aggregation
class APPNP(nn.Module):
'''
APPNP: ICLR 2019
Predict then Propagate: Graph Neural Networks Meet Personalized Pagerank
https://arxiv.org/pdf/1810.05997.pdf
'''
def __init__(self, feat_len, num_class, hidden=[64,32], dropout=[0], alpha=0.1):
super().__init__()
self.feat_len, self.hidden = feat_len, num_class
self.tran1 = nn.Linear(feat_len, hidden[0])
self.tran2 = nn.Linear(hidden[0], hidden[1])
self.app1 = GraphAppnp(alpha)
self.app2 = GraphAppnp(alpha)
self.acvt1 = nn.Sequential(nn.BatchNorm1d(1), nn.ReLU(), nn.Dropout(dropout[0]))
self.acvt2 = nn.Sequential(nn.BatchNorm1d(1), nn.ReLU(), nn.Dropout(dropout[0]))
self.classifier = nn.Linear(hidden[1], num_class)
def forward(self, x, neighbor):
# x, neighbor = nf.normalize(x), [nf.normalize(n) for n in neighbor]
h, neighbor = self.tran1(x), [self.tran1(n) for n in neighbor]
x, neighbor_agg = self.app1(h, neighbor, h, neighbor)
x, neighbor_agg = self.acvt1(x), [self.acvt1(n) for n in neighbor_agg]
x, neighbor = self.app2(x, neighbor_agg, h, neighbor)
h, neighbor = self.tran2(x), [self.tran2(n) for n in neighbor]
x, neighbor_agg = self.app1(h, neighbor, h, neighbor)
x, neighbor_agg = self.acvt1(x), [self.acvt1(n) for n in neighbor_agg]
x, _ = self.app2(x, neighbor_agg, h, neighbor)
x[torch.isnan(x)] = 0
return self.classifier(x).squeeze(1)
class APP(nn.Module):
'''
APPNP: ICLR 2019
Predict then Propagate: Graph Neural Networks Meet Personalized Pagerank
https://arxiv.org/pdf/1810.05997.pdf
A modified version for the graph lifelong learning
'''
def __init__(self, feat_len, num_class, hidden=[64,32], dropout=[0], alpha=0.1):
super().__init__()
self.feat_len, self.hidden = feat_len, num_class
self.tran1 = nn.Linear(feat_len, hidden[0])
self.tran2 = nn.Linear(hidden[0], hidden[1])
self.app1 = GraphApp(alpha)
self.app2 = GraphApp(alpha)
self.acvt = nn.Sequential(nn.ReLU(), nn.Dropout(dropout[0]))
self.classifier = nn.Linear(hidden[1], num_class)
def forward(self, x, neighbor):
x, neighbor = nf.normalize(x), [nf.normalize(n) for n in neighbor]
h, neighbor = self.tran1(x), [self.tran1(n) for n in neighbor]
x, neighbor_agg = self.app1(h, neighbor, h, neighbor)
x, neighbor_agg = self.acvt(x), [self.acvt(n) for n in neighbor_agg]
x, neighbor = self.app2(x, neighbor_agg, h, neighbor)
h, neighbor = self.tran2(x), [self.tran2(n) for n in neighbor]
x, neighbor_agg = self.app1(h, neighbor, h, neighbor)
x, neighbor_agg = self.acvt(x), [self.acvt(n) for n in neighbor_agg]
x, _ = self.app2(x, neighbor_agg, h, neighbor)
return self.classifier(x).squeeze(1)
class GraphApp(nn.Module):
def __init__(self, alpha):
super().__init__()
self.alpha = alpha
def forward(self, x, neighbor_agg, h, neighbor):
# operate adj @ x for the subgraph
x, neighbor_agg = sum_aggregation(x, neighbor_agg)
# momentum operation
x = (1-self.alpha) * x + self.alpha * h
return x, neighbor
class GraphAppnp(nn.Module):
def __init__(self, alpha):
super().__init__()
self.alpha = alpha
def forward(self, x, neighbor_agg, h, neighbor):
# operate adj @ x for the subgraph
x, neighbor_agg = sum_aggregation(x, neighbor_agg)
# momentum operation
x = (1-self.alpha) * x + self.alpha * h
neighbor_agg = [((1 - self.alpha) * n_agg + self.alpha * n) for (n_agg,n) in zip(neighbor_agg, neighbor)]
return x, neighbor_agg | StarcoderdataPython |
8022004 | Anything = 42
| StarcoderdataPython |
9746398 | <filename>Programmers/Lv.1/budget.py
def solution(d, budget):
answer = 0
for i in sorted(d):
budget-=i
if(budget<0): break
else: answer+=1
return answer
| StarcoderdataPython |
1735253 | # -*- coding: utf-8 -*-
import numpy as np
def check_images(fusioned, original):
assert len(fusioned) == len(original), "Supplied images have different sizes " + \
str(fusioned.shape) + " and " + str(original.shape)
if(len(fusioned.shape) == len(original.shape)):
estado = 'mtom'
if(len(fusioned.shape) == 2):
fusioned = fusioned[:,:,np.newaxis]
original = original[:,:,np.newaxis]
else:
assert fusioned.shape[2] == original.shape[2], "Supplied images have different number of bands "
else:
estado = 'mtop'
return estado, fusioned, original
def mse(fusioned, original):
"""calculates mean squared error (mse).
:param GT: first (original) input image.
:param P: second (deformed) input image.
:returns: float -- mse value.
"""
array_mse = []
mode, fusioned, original = check_images(fusioned, original)
if(mode == 'mtom'):
for i in range(fusioned.shape[2]):
aux_val = np.mean((fusioned[:,:,i].astype(np.float64)-original[:,:,i].astype(np.float64))**2)
array_mse.append(aux_val)
else:
for i in range(fusioned.shape[2]):
aux_val = np.mean((fusioned[:,:,i].astype(np.float64)-original.astype(np.float64))**2)
array_mse.append(aux_val)
return np.array(array_mse)
def rmse(fusioned, original):
return np.sqrt(mse(fusioned, original))
def bias(fusioned, original):
array_bias = []
mode, fusioned, original = check_images(fusioned, original)
if(mode == 'mtom'):
for i in range(fusioned.shape[2]):
aux_val = 1 - ((np.mean(fusioned[:,:,i].astype(np.float64)))/ (np.mean(original[:,:,i].astype(np.float64))))
array_bias.append(aux_val)
else:
for i in range(fusioned.shape[2]):
aux_val = 1 - ((np.mean(fusioned[:,:,i].astype(np.float64)))/ (np.mean(original.astype(np.float64))))
array_bias.append(aux_val)
return array_bias
def correlation_coeff(fusioned, original):
array_corrcoef = []
mode, fusioned, original = check_images(fusioned, original)
if(mode == 'mtom'):
for i in range(fusioned.shape[2]):
aux_val = np.corrcoef(fusioned[:,:,i].astype(np.float64).flat, original[:,:,i].astype(np.float64).flat)
array_corrcoef.append(aux_val[0][1])
else:
for i in range(fusioned.shape[2]):
aux_val = np.corrcoef(fusioned[:,:,i].astype(np.float64).flat, original.astype(np.float64).flat)
array_corrcoef.append(aux_val[0][1])
return array_corrcoef
| StarcoderdataPython |
5130509 | <filename>kea/utils/test_rising_edge_detector.py
from ._rising_edge_detector import rising_edge_detector
from kea.test_utils.base_test import (
KeaTestCase, KeaVivadoVHDLTestCase, KeaVivadoVerilogTestCase)
import random
from myhdl import *
class TestRisingEdgeDetectorSimulation(KeaTestCase):
def setUp(self):
self.clock = Signal(False)
self.reset = Signal(False)
self.trigger = Signal(False)
self.edge_detected_output = Signal(False)
self.args = {
'clock': self.clock,
'reset': self.reset,
'trigger': self.trigger,
'output': self.edge_detected_output,
'buffer_trigger': False,
}
self.arg_types = {
'clock': 'clock',
'reset': 'custom',
'trigger': 'custom',
'output': 'output',
'buffer_trigger': 'non-signal',
}
@block
def stimulator(self, clock, reset, trigger):
''' A block to drive the reset and trigger inputs.
'''
@always(clock.posedge)
def stim():
# Randomly drive the reset
if not reset:
if random.random() < 0.05:
reset.next = True
else:
if random.random() < 0.2:
reset.next = False
# Drive the trigger signal with pulses of random lengths
if not trigger:
if random.random() < 0.05:
trigger.next = True
else:
if random.random() < 0.2:
trigger.next = False
return stim
def test_rising_edge_detection(self):
''' On a rising edge of the trigger input the system should output a
single cycle pulse.
A reset should set the output low and any edges that had been received
but not yet signalled on the output should never be output.
The above is encapsulated in the following timing diagram
(defined in Wavedrom):
{ "signal": [
{ "name": "clock",
"wave": "p.....|....|.|......|....|..." },
{ "name": "reset",
"wave": "0.....|....|.|......|1..0|.10" },
{ "name": "trigger",
"wave": "0.10..|1...|0|1010..|.10.|10.",},
{ "name": "output pulse",
"wave": "0..10.|.10.|.|.1010.|....|...",},
]}
'''
cycles = 4000
@block
def stimulate_and_check(
clock, reset, trigger, output, buffer_trigger):
return_objects = []
return_objects.append(self.stimulator(clock, reset, trigger))
trigger_d0 = Signal(False)
rising_edge_detected = Signal(False)
@always(clock.posedge)
def check():
trigger_d0.next = trigger
if trigger and not trigger_d0:
# Detect rising edges on trigger
rising_edge_detected.next = True
else:
rising_edge_detected.next = False
if reset:
# Reset the test signals
trigger_d0.next = True
rising_edge_detected.next = False
# On reset output should be set low
self.assertFalse(output)
elif rising_edge_detected:
# Check the output is as expected
self.assertTrue(output)
else:
# At all other times the output should be set low
self.assertFalse(output)
return_objects.append(check)
return return_objects
dut_outputs, ref_outputs = self.cosimulate(
cycles, rising_edge_detector, rising_edge_detector, self.args,
self.arg_types,
custom_sources=[(stimulate_and_check, (), self.args)])
self.assertEqual(dut_outputs, ref_outputs)
def test_buffered_rising_edge_detection(self):
''' On a rising edge of the trigger input the system should output a
single cycle pulse.
When the `buffer_trigger` argument is set true, this block should
buffer the input. This amounts to a double buffer before the logic to
protect against meta stability. It also allows the tools to place the
trigger input close to the pin.
A reset should set the output low and any edges that had been received
but not yet signalled on the output should never be output.
The above is encapsulated in the following timing diagram
(defined in Wavedrom):
{ "signal": [
{ "name": "clock",
"wave": "p.....|....|.|......|....|..." },
{ "name": "reset",
"wave": "0.....|....|.|......|1..0|.10" },
{ "name": "trigger",
"wave": "0.10..|1...|0|1010..|.10.|10.",},
{ "name": "output pulse",
"wave": "0...10|..10|.|..1010|....|...",},
]}
'''
cycles = 4000
self.args['buffer_trigger'] = True
@block
def stimulate_and_check(
clock, reset, trigger, output, buffer_trigger):
return_objects = []
return_objects.append(self.stimulator(clock, reset, trigger))
trigger_d0 = Signal(False)
trigger_d1 = Signal(False)
rising_edge_detected = Signal(False)
@always(clock.posedge)
def check():
trigger_d0.next = trigger
trigger_d1.next = trigger_d0
if trigger_d0 and not trigger_d1:
# Detect rising edges on trigger
rising_edge_detected.next = True
else:
rising_edge_detected.next = False
if reset:
# Reset the test signals
trigger_d1.next = True
rising_edge_detected.next = False
# On reset output should be set low
self.assertFalse(output)
elif rising_edge_detected:
# Check the output is as expected
self.assertTrue(output)
else:
# At all other times the output should be set low
self.assertFalse(output)
return_objects.append(check)
return return_objects
dut_outputs, ref_outputs = self.cosimulate(
cycles, rising_edge_detector, rising_edge_detector, self.args,
self.arg_types,
custom_sources=[(stimulate_and_check, (), self.args)])
self.assertEqual(dut_outputs, ref_outputs)
class TestRisingEdgeDetectorVivadoVhdlSimulation(
KeaVivadoVHDLTestCase, TestRisingEdgeDetectorSimulation):
pass
class TestRisingEdgeDetectorVivadoVerilogSimulation(
KeaVivadoVerilogTestCase, TestRisingEdgeDetectorSimulation):
pass
| StarcoderdataPython |
3409229 | from TwisterControlSurface import TwisterControlSurface
def create_instance(c_instance):
return TwisterControlSurface(c_instance)
| StarcoderdataPython |
358984 | <reponame>huaweicloud/huaweicloud-sdk-python-v3
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class VirtualSpace:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'name': 'str',
'size': 'str',
'lvm_config': 'LVMConfig',
'runtime_config': 'RuntimeConfig'
}
attribute_map = {
'name': 'name',
'size': 'size',
'lvm_config': 'lvmConfig',
'runtime_config': 'runtimeConfig'
}
def __init__(self, name=None, size=None, lvm_config=None, runtime_config=None):
"""VirtualSpace - a model defined in huaweicloud sdk"""
self._name = None
self._size = None
self._lvm_config = None
self._runtime_config = None
self.discriminator = None
self.name = name
self.size = size
if lvm_config is not None:
self.lvm_config = lvm_config
if runtime_config is not None:
self.runtime_config = runtime_config
@property
def name(self):
"""Gets the name of this VirtualSpace.
virtualSpace的名称,当前仅支持三种类型:kubernetes、runtime、user。kubernetes:k8s空间配置,需配置lvmConfig;runtime:运行时空间配置,需配置runtimeConfig;user:用户空间配置,需配置lvmConfig
:return: The name of this VirtualSpace.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this VirtualSpace.
virtualSpace的名称,当前仅支持三种类型:kubernetes、runtime、user。kubernetes:k8s空间配置,需配置lvmConfig;runtime:运行时空间配置,需配置runtimeConfig;user:用户空间配置,需配置lvmConfig
:param name: The name of this VirtualSpace.
:type: str
"""
self._name = name
@property
def size(self):
"""Gets the size of this VirtualSpace.
virtualSpace的大小,仅支持整数百分比。例如:90%。 需要注意:一个group中所有virtualSpace的百分比之和不得超过100%
:return: The size of this VirtualSpace.
:rtype: str
"""
return self._size
@size.setter
def size(self, size):
"""Sets the size of this VirtualSpace.
virtualSpace的大小,仅支持整数百分比。例如:90%。 需要注意:一个group中所有virtualSpace的百分比之和不得超过100%
:param size: The size of this VirtualSpace.
:type: str
"""
self._size = size
@property
def lvm_config(self):
"""Gets the lvm_config of this VirtualSpace.
:return: The lvm_config of this VirtualSpace.
:rtype: LVMConfig
"""
return self._lvm_config
@lvm_config.setter
def lvm_config(self, lvm_config):
"""Sets the lvm_config of this VirtualSpace.
:param lvm_config: The lvm_config of this VirtualSpace.
:type: LVMConfig
"""
self._lvm_config = lvm_config
@property
def runtime_config(self):
"""Gets the runtime_config of this VirtualSpace.
:return: The runtime_config of this VirtualSpace.
:rtype: RuntimeConfig
"""
return self._runtime_config
@runtime_config.setter
def runtime_config(self, runtime_config):
"""Sets the runtime_config of this VirtualSpace.
:param runtime_config: The runtime_config of this VirtualSpace.
:type: RuntimeConfig
"""
self._runtime_config = runtime_config
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, VirtualSpace):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| StarcoderdataPython |
5130899 | from typing import Callable, Any
from amino import Either, List, Map, _
from amino.lazy import lazy
from amino.func import dispatch
from tubbs.formatter.breaker.cond import BreakCond, BreakCondOr, BreakCondAnd, BreakCondSet
from tubbs.tatsu.breaker_dsl import (Parser, Expr, OrCond, AndCond, NotCond, Prio, Name, Cond, LambdaExpr, Top, Side,
PrioCond, CondStrict)
from tubbs.formatter.breaker.conds import inv
class Builder:
def __init__(self, conds: Map[str, Any]) -> None:
self.conds = conds
@lazy
def build(self) -> Callable[[Expr], Any]:
types = List(Name, Cond, OrCond, AndCond, NotCond, PrioCond, LambdaExpr, Top, Side, Prio, CondStrict)
return dispatch(self, types, '')
def name(self, expr: Name) -> str:
return expr.data
def lambda_expr(self, expr: LambdaExpr) -> Callable[[Any], Callable[..., bool]]:
return expr.method_names.fold_left(_)(lambda z, a: getattr(z, a))
def cond_strict(self, expr: CondStrict) -> BreakCond:
name = expr.cond
return self.conds.lift(name).get_or_fail(f'invalid condition: {name}')
def cond(self, expr: Cond) -> BreakCond:
name = expr.cond
args = expr.arguments.map(self.build)
f = self.conds.lift(name).get_or_fail(f'invalid condition: {name}')
return f(*args)
def or_cond(self, expr: OrCond) -> BreakCond:
return BreakCondOr(self.build(expr.left), self.build(expr.right))
def and_cond(self, expr: AndCond) -> BreakCond:
return BreakCondAnd(self.build(expr.left), self.build(expr.right))
def not_cond(self, expr: NotCond) -> BreakCond:
raise Exception('condition negation is not implemented yet')
def prio_cond(self, expr: PrioCond) -> BreakCond:
return self.build(expr.expr).prio(expr.prio.value)
def prio(self, expr: Prio) -> BreakCond:
return inv(expr.value)
def side(self, expr: Side) -> BreakCond:
cond = self.build(expr.expr)
return cond.before if expr.side == 'before' else cond.after
def top(self, expr: Top) -> BreakCond:
return BreakCondSet(expr.conds / self.build)
def parse_break_expr(parser: Parser, expr: str, conds: Map[str, Any]) -> Either[str, BreakCond]:
ast = parser.parse(expr, 'top')
return Builder(conds).build(ast.value)
__all__ = ('parse_break_expr',)
| StarcoderdataPython |
9437 | <reponame>Dridi/blockdiag<filename>src/blockdiag/utils/rst/nodes.py
# -*- coding: utf-8 -*-
# Copyright 2011 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from hashlib import sha1
from docutils import nodes
import blockdiag.parser
import blockdiag.builder
import blockdiag.drawer
class blockdiag(nodes.General, nodes.Element):
name = 'blockdiag'
processor = blockdiag
def to_diagram(self):
try:
tree = self.processor.parser.parse_string(self['code'])
except:
code = '%s { %s }' % (self.name, self['code'])
tree = self.processor.parser.parse_string(code)
self['code'] = code # replace if succeeded
return self.processor.builder.ScreenNodeBuilder.build(tree)
def to_drawer(self, image_format, filename, fontmap, **kwargs):
diagram = self.to_diagram()
return self.processor.drawer.DiagramDraw(image_format, diagram,
filename, fontmap=fontmap,
**kwargs)
def get_path(self, **options):
options.update(self['options'])
hashseed = (self['code'] + str(options)).encode('utf-8')
hashed = sha1(hashseed).hexdigest()
filename = "%s-%s.%s" % (self.name, hashed, options['format'].lower())
outputdir = options.get('outputdir')
if outputdir:
filename = os.path.join(outputdir, filename)
return filename
| StarcoderdataPython |
4982376 | class 붕어빵틀:
def __init__(self, 앙꼬):
self.앙꼬 = 앙꼬
붕어빵1 = 붕어빵틀("초코맛")
붕어빵2 = 붕어빵틀("딸기맛")
print(붕어빵1.앙꼬)
print(붕어빵2.앙꼬)
| StarcoderdataPython |
390413 | <filename>notebooks/py_scripts/04-automate-optional.py
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.13.7
# kernelspec:
# display_name: 'Python 3.7.9 64-bit (''workflow-calcium-imaging'': conda)'
# name: python379jvsc74a57bd01a512f474e195e32ad84236879d3bb44800a92b431919ef0b10d543f5012a23c
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# # Run workflow in an automatic way
#
# In the previous notebook [03-process](03-process.ipynb), we ran through the workflow in detailed steps. For daily running routines, the current notebook provides a more succinct and automatic approach to run through the pipeline using some utility functions in the workflow.
# -
import os
os.chdir('..')
import numpy as np
from workflow_calcium_imaging.pipeline import lab, subject, session, scan, imaging
# ## Ingestion of subjects, sessions, scans
#
# + Fill subject and session information in files `/user_data/subjects.csv` and `/user_data/sessions.csv`
#
# + Run automatic scripts prepared in `workflow_calcium_imaging.ingest` for ingestion:
#
# + `ingest_subjects` - ingests data into subject.Subject
#
# + `ingest_sessions` - ingests data into Equipment, session.Session, session.SessionDirectory, scan.Scan
# +
from workflow_calcium_imaging.ingest import ingest_subjects, ingest_sessions
ingest_subjects()
ingest_sessions()
# -
# ## (Optional) Insert new ProcessingParamSet for Suite2p or CaImAn
#
# + This is not needed if you are using an existing ProcessingParamSet.
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
params_suite2p = {'look_one_level_down': 0.0,
'fast_disk': [],
'delete_bin': False,
'mesoscan': False,
'h5py': [],
'h5py_key': 'data',
'save_path0': [],
'subfolders': [],
'nplanes': 1,
'nchannels': 1,
'functional_chan': 1,
'tau': 1.0,
'fs': 10.0,
'force_sktiff': False,
'preclassify': 0.0,
'save_mat': False,
'combined': True,
'aspect': 1.0,
'do_bidiphase': False,
'bidiphase': 0.0,
'do_registration': True,
'keep_movie_raw': False,
'nimg_init': 300,
'batch_size': 500,
'maxregshift': 0.1,
'align_by_chan': 1,
'reg_tif': False,
'reg_tif_chan2': False,
'subpixel': 10,
'smooth_sigma': 1.15,
'th_badframes': 1.0,
'pad_fft': False,
'nonrigid': True,
'block_size': [128, 128],
'snr_thresh': 1.2,
'maxregshiftNR': 5.0,
'1Preg': False,
'spatial_hp': 50.0,
'pre_smooth': 2.0,
'spatial_taper': 50.0,
'roidetect': True,
'sparse_mode': False,
'diameter': 12,
'spatial_scale': 0,
'connected': True,
'nbinned': 5000,
'max_iterations': 20,
'threshold_scaling': 1.0,
'max_overlap': 0.75,
'high_pass': 100.0,
'inner_neuropil_radius': 2,
'min_neuropil_pixels': 350,
'allow_overlap': False,
'chan2_thres': 0.65,
'baseline': 'maximin',
'win_baseline': 60.0,
'sig_baseline': 10.0,
'prctile_baseline': 8.0,
'neucoeff': 0.7,
'xrange': np.array([0, 0]),
'yrange': np.array([0, 0])}
# -
imaging.ProcessingParamSet.insert_new_params(
processing_method='suite2p',
paramset_idx=0,
params=params_suite2p,
paramset_desc='Calcium imaging analysis with Suite2p using default Suite2p parameters')
# ## Trigger autoprocessing of the remaining calcium imaging workflow
from workflow_calcium_imaging import process
# + The `process.run()` function in the workflow populates every auto-processing table in the workflow. If a table is dependent on a manual table upstream, it will not get populated until the manual table is inserted.
#
# + At this stage, process script populates through the table upstream of `ProcessingTask` (i.e. scan.ScanInfo)
#
process.run()
# ## Insert new ProcessingTask to trigger ingestion of processing results
#
# To populate the rest of the tables in the workflow, an entry in the `ProcessingTask` needs to be added to trigger the ingestion of the processing results, with the two pieces of information specified:
# + `paramset_idx` used for the processing job
# + output directory storing the processing results
# +
session_key = session.Session.fetch1('KEY')
imaging.ProcessingTask.insert1(dict(session_key,
scan_id=0,
paramset_idx=0,
processing_output_dir='subject3/210107_run00_orientation_8dir/suite2p'), skip_duplicates=True)
# -
# ## Run populate for table `imaging.Processing`
process.run()
# ## Insert new Curation to trigger ingestion of curated results
key = (imaging.ProcessingTask & session_key).fetch1('KEY')
imaging.Curation().create1_from_processing_task(key)
# ## Run populate for the rest of the tables in the workflow (takes a while)
process.run()
# ## Summary and next step
#
# + This notebook runs through the workflow in an automatic manner.
#
# + In the next notebook [05-explore](05-explore.ipynb), we will introduce how to query, fetch and visualize the contents we ingested into the tables.
| StarcoderdataPython |
6653703 | import gym
import unittest
import numpy as np
from connect_four.hashing import TicTacToeHasher
from connect_four.transposition import simple_transposition_table
class TestSimpleTranspositionTable(unittest.TestCase):
def setUp(self) -> None:
self.env = gym.make('tic_tac_toe-v0')
def test_save_and_retrieve_initial_state_1_and_1(self):
self.env.state = np.array([
[
[0, 0, 0, ],
[0, 0, 0, ],
[0, 0, 0, ],
],
[
[0, 0, 0, ],
[0, 0, 0, ],
[0, 0, 0, ],
],
])
transposition = TicTacToeHasher(self.env).hash()
tt = simple_transposition_table.SimpleTranspositionTable()
want_phi, want_delta = 1, 1
tt.save(transposition=transposition, phi=want_phi, delta=want_delta)
got_phi, got_delta = tt.retrieve(transposition=transposition)
self.assertEqual(want_phi, got_phi)
self.assertEqual(want_delta, got_delta)
def test_overwrite_save(self):
self.env.state = np.array([
[
[0, 0, 0, ],
[0, 0, 0, ],
[0, 0, 0, ],
],
[
[0, 0, 0, ],
[0, 0, 0, ],
[0, 0, 0, ],
],
])
transposition = TicTacToeHasher(self.env).hash()
tt = simple_transposition_table.SimpleTranspositionTable()
tt.save(transposition=transposition, phi=1, delta=1)
want_phi, want_delta = 2, 2
tt.save(transposition=transposition, phi=want_phi, delta=want_delta)
got_phi, got_delta = tt.retrieve(transposition=transposition)
self.assertEqual(want_phi, got_phi)
self.assertEqual(want_delta, got_delta)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3304849 | <filename>leetcode/easy/LoggerRateLimiter.py
# Design a logger system that receive stream of messages along with its timestamps, each message should be
# printed if and only if it is not printed in the last 10 seconds.
# Given a message and a timestamp (in seconds granularity), return true if the message should be printed in
# the given timestamp, otherwise returns false.
# It is possible that several messages arrive roughly at the same time.
# Example:
# Logger logger = new Logger();
# // logging string "foo" at timestamp 1
# logger.shouldPrintMessage(1, "foo"); returns true;
# // logging string "bar" at timestamp 2
# logger.shouldPrintMessage(2,"bar"); returns true;
# // logging string "foo" at timestamp 3
# logger.shouldPrintMessage(3,"foo"); returns false;
# // logging string "bar" at timestamp 8
# logger.shouldPrintMessage(8,"bar"); returns false;
# // logging string "foo" at timestamp 10
# logger.shouldPrintMessage(10,"foo"); returns false;
# // logging string "foo" at timestamp 11
# logger.shouldPrintMessage(11,"foo"); returns true;
class Logger:
def __init__(self):
"""
Initialize your data structure here.
"""
self.msgCache = {}
def shouldPrintMessage(self, timestamp: int, message: str) -> bool:
"""
Returns true if the message should be printed in the given timestamp, otherwise returns false.
If this method returns false, the message will not be printed.
The timestamp is in seconds granularity.
"""
if (message not in self.msgCache) or (timestamp - self.msgCache[message] >= 10):
self.msgCache[message] = timestamp
return True
return False
# Your Logger object will be instantiated and called as such:
# obj = Logger()
# param_1 = obj.shouldPrintMessage(timestamp,message)
| StarcoderdataPython |
5040121 | from arduino_tweaks import Uno
| StarcoderdataPython |
5083689 | <gh_stars>1-10
#Modificatins by Sur_vivor
import html
import json
import os
import psutil
import random
import time
import datetime
from typing import Optional, List
import re
import requests
from telegram.error import BadRequest
from telegram import Message, Chat, Update, Bot, MessageEntity
from telegram import ParseMode
from telegram.ext import CommandHandler, run_async, Filters
from telegram.utils.helpers import escape_markdown, mention_html
from cinderella.modules.helper_funcs.chat_status import user_admin, sudo_plus, is_user_admin
from cinderella import dispatcher, OWNER_ID, SUDO_USERS, SUPPORT_USERS, DEV_USERS, WHITELIST_USERS
from cinderella.__main__ import STATS, USER_INFO, TOKEN
from cinderella.modules.disable import DisableAbleCommandHandler, DisableAbleRegexHandler
from cinderella.modules.helper_funcs.extraction import extract_user
from cinderella.modules.helper_funcs.filters import CustomFilters
import cinderella.modules.sql.users_sql as sql
import cinderella.modules.helper_funcs.cas_api as cas
@run_async
def info(bot: Bot, update: Update, args: List[str]):
message = update.effective_message
chat = update.effective_chat
user_id = extract_user(update.effective_message, args)
if user_id:
user = bot.get_chat(user_id)
elif not message.reply_to_message and not args:
user = message.from_user
elif not message.reply_to_message and (not args or (
len(args) >= 1 and not args[0].startswith("@") and not args[0].isdigit() and not message.parse_entities(
[MessageEntity.TEXT_MENTION]))):
message.reply_text("I can't extract a user from this.")
return
else:
return
text = (f"<b>User Information:</b>\n"
f"🆔: <code>{user.id}</code>\n"
f"👤Name: {html.escape(user.first_name)}")
if user.last_name:
text += f"\n🚹Last Name: {html.escape(user.last_name)}"
if user.username:
text += f"\n♻️Username: @{html.escape(user.username)}"
text += f"\n☣️Permanent user link: {mention_html(user.id, 'link🚪')}"
num_chats = sql.get_user_num_chats(user.id)
text += f"\n🌐Chat count: <code>{num_chats}</code>"
text += "\n🎭Number of profile pics: {}".format(bot.get_user_profile_photos(user.id).total_count)
try:
user_member = chat.get_member(user.id)
if user_member.status == 'administrator':
result = requests.post(f"https://api.telegram.org/bot{TOKEN}/getChatMember?chat_id={chat.id}&user_id={user.id}")
result = result.json()["result"]
if "custom_title" in result.keys():
custom_title = result['custom_title']
text += f"\n🛡This user holds the title⚜️ <b>{custom_title}</b> here."
except BadRequest:
pass
if user.id == OWNER_ID:
text += "\n🚶🏻♂️Uff,This person is my Owner🤴\nI would never do anything against him!."
elif user.id in DEV_USERS:
text += "\n🚴♂️Pling,This person is my dev🤷♂️\nI would never do anything against him!."
elif user.id == 1118936839:
text += "\n🚴♂️Pling,This person is my Creator/developer🤷♂️\nI would never do anything against him!."
elif user.id in SUDO_USERS:
text += "\n🚴♂️Pling,This person is one of my sudo users! " \
"Nearly as powerful as my owner🕊so watch it.."
elif user.id in SUPPORT_USERS:
text += "\n🚴♂️Pling,This person is one of my support users! " \
"Not quite a sudo user, but can still gban you off the map."
elif user.id in WHITELIST_USERS:
text += "\n🚴♂️Pling,This person has been whitelisted! " \
"That means I'm not allowed to ban/kick them."
elif user.id == bot.id:
text += "\n💃Lol🧞♂️It's Me😉"
text +="\n"
text += "\nCAS banned: "
result = cas.banchecker(user.id)
text += str(result)
for mod in USER_INFO:
if mod.__mod_name__ == "Users":
continue
try:
mod_info = mod.__user_info__(user.id)
except TypeError:
mod_info = mod.__user_info__(user.id, chat.id)
if mod_info:
text += "\n" + mod_info
try:
profile = bot.get_user_profile_photos(user.id).photos[0][-1]
bot.sendChatAction(chat.id, "upload_photo")
bot.send_photo(chat.id, photo=profile, caption=(text), parse_mode=ParseMode.HTML, disable_web_page_preview=True)
except IndexError:
update.effective_message.reply_text(text, parse_mode=ParseMode.HTML, disable_web_page_preview=True)
INFO_HANDLER = DisableAbleCommandHandler(["info", "whois"], info, pass_args=True)
dispatcher.add_handler(INFO_HANDLER)
| StarcoderdataPython |
3509833 | <reponame>pveentjer/scylla-stress-orchestrator
import os
import selectors
import subprocess
import time
from scyllaso.util import run_parallel, log_machine, LogLevel, WorkerThread
# Parallel SSH
class PSSH:
def __init__(self,
ip_list,
user,
ssh_options,
use_control_socket=True,
silent_seconds=30,
log_ssh=False):
self.ip_list = ip_list
self.user = user
self.ssh_options = ssh_options
self.use_control_socket = use_control_socket
self.silent_seconds = silent_seconds
self.log_ssh = log_ssh
def __new_ssh(self, ip):
return SSH(ip,
self.user,
self.ssh_options,
use_control_socket=self.use_control_socket,
silent_seconds=self.silent_seconds,
log_ssh=self.log_ssh)
def __exec(self, ip, cmd):
self.__new_ssh(ip).exec(cmd)
def exec(self, cmd):
run_parallel(self.__exec, [(ip, cmd) for ip in self.ip_list])
def async_exec(self, command):
thread = WorkerThread(self.exec, (command))
thread.start()
return thread.future
def __update(self, ip):
self.__new_ssh(ip).update()
def update(self):
# todo: needs to be fixed; should be parallel, now sequential
for ip in self.ip_list:
ssh = SSH(ip, self.user, self.ssh_options)
ssh.update()
# can't get this to run correctly.
# run_parallel(self.__update, [ip for ip in self.ip_list])
def __install_one(self, ip, *packages):
self.__new_ssh(ip).install_one(*packages)
def install_one(self, *packages):
run_parallel(self.__install_one, [(ip, *packages) for ip in self.ip_list])
def __try_install(self, ip, *packages):
self.__new_ssh(ip).try_install(*packages)
def try_install(self, *packages):
run_parallel(self.__try_install, [(ip, *packages) for ip in self.ip_list])
def __install(self, ip, *packages):
self.__new_ssh(ip).install(*packages)
def install(self, *packages):
run_parallel(self.__install, [(ip, *packages) for ip in self.ip_list])
def __scp_from_remote(self, src, dst_dir, ip):
self.__new_ssh(ip).scp_from_remote(src, os.path.join(dst_dir, ip))
def scp_from_remote(self, src, dst_dir):
run_parallel(self.__scp_from_remote, [(src, dst_dir, ip) for ip in self.ip_list])
def __scp_to_remote(self, src, dst, ip):
self.__new_ssh(ip).scp_to_remote(src, dst)
def scp_to_remote(self, src, dst):
run_parallel(self.__scp_to_remote, [(src, dst, ip) for ip in self.ip_list])
def __set_governor(self, ip, governor):
self.__new_ssh(ip).set_governor(governor)
def set_governor(self, governor):
run_parallel(self.__set_governor, [(ip, governor) for ip in self.ip_list])
class SSH:
def __init__(self,
ip,
user,
ssh_options,
silent_seconds=30,
use_control_socket=True,
log_ssh=False):
self.ip = ip
self.user = user
self.ssh_options = ssh_options
self.silent_seconds = silent_seconds
self.log_ssh = log_ssh
if use_control_socket:
self.control_socket_file = f"/tmp/{self.user}@{self.ip}.socket"
else:
self.control_socket_file = None
def __wait_for_connect(self):
args = f"-o ConnectTimeout=1 -o ConnectionAttempts=1 {self.ssh_options}"
if self.control_socket_file:
if os.path.exists(self.control_socket_file):
return
args = f"{args} -M -S {self.control_socket_file} -o ControlPersist=5m"
cmd = f'ssh {args} {self.user}@{self.ip} exit'
exitcode = None
max_attempts = 300
for attempt in range(1, max_attempts):
if attempt > self.silent_seconds:
log_machine(self.ip, f'Trying to connect, attempt [{attempt}/{max_attempts}], command [{cmd}]')
result = subprocess.run(cmd, shell=True, capture_output=True, text=True)
if result.stdout:
lines = result.stdout.splitlines()
for line in lines:
log_machine(self.ip, line, log_level=LogLevel.info)
if result.stderr:
lines = result.stderr.splitlines()
for line in lines:
log_machine(self.ip, line, log_level=LogLevel.warning)
exitcode = result.returncode
else:
exitcode = subprocess.call(cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
if exitcode == 0 or exitcode == 1: # todo: we need to deal better with exit code
self.wait_for_connect = False
return
time.sleep(1)
raise Exception(f"Failed to connect to {self.ip}, exitcode={exitcode}")
def __is_connected(self):
return self.control_socket_file and os.path.exists(self.control_socket_file)
def scp_from_remote(self, src, dst_dir):
os.makedirs(dst_dir, exist_ok=True)
cmd = f'scp {self.ssh_options} -r -q {self.user}@{self.ip}:{src} {dst_dir}'
self.__scp(cmd)
def scp_to_remote(self, src, dst):
cmd = f'scp {self.ssh_options} -r -q {src} {self.user}@{self.ip}:{dst}'
self.__scp(cmd)
def __scp(self, cmd):
self.__wait_for_connect()
exitcode = subprocess.call(cmd, shell=True)
# raise Exception(f"Failed to execute {cmd} after {self.max_attempts} attempts")
def exec(self, command, ignore_errors=False):
self.__wait_for_connect()
cmd_list = ["ssh"]
if self.__is_connected():
cmd_list.append("-S")
cmd_list.append(f"{self.control_socket_file}")
cmd_list.extend(self.ssh_options.split())
cmd_list.append(f"{self.user}@{self.ip}")
cmd_list.append(command)
if self.log_ssh:
log_machine(self.ip, cmd_list)
process = subprocess.Popen(cmd_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
sel = selectors.DefaultSelector()
sel.register(process.stdout, selectors.EVENT_READ)
sel.register(process.stderr, selectors.EVENT_READ)
while True:
for key, _ in sel.select():
data = key.fileobj.read1().decode()
if not data:
exitcode = process.poll()
if exitcode == 0 or ignore_errors:
return
else:
raise Exception(f"Failed to execute [{cmd_list}], exitcode={exitcode}")
lines = data.splitlines()
log_level = LogLevel.info if key.fileobj is process.stdout else LogLevel.warning
for line in lines:
log_machine(self.ip, line, log_level)
def async_exec(self, command):
thread = WorkerThread(self.exec, (command))
thread.start()
return thread.future
def update(self):
log_machine(self.ip, f'Update: started')
self.exec(
f"""
set -e
if [ -f /tmp/update.called ] ; then
# echo "Skipping update"
exit 0
fi
if hash apt-get 2>/dev/null; then
sudo apt-get -y -qq update
elif hash yum 2>/dev/null; then
sudo yum -y -q update
else
echo "Cannot update: yum/apt not found"
exit 1
fi
touch /tmp/update.called
""")
log_machine(self.ip, f'Update: done')
def install_one(self, *packages):
self.exec(
f"""
set -e
for package in {" ".join(packages)}
do
echo Trying package [$package]
if hash apt-get 2>/dev/null ; then
if sudo apt show $package >/dev/null 2>&1; then
echo [{self.ip}] Installing $package
sudo apt-get install -y -qq $package
exit 0
fi
elif hash yum 2>/dev/null; then
if sudo yum info $package >/dev/null 2>&1; then
echo [{self.ip}] Installing $package
sudo yum -y -q install $package
exit 0
fi
else
echo " [{self.ip}] Cannot install $package: yum/apt not found"
exit 1
fi
echo Not found $package
done
echo "Could not find any of the packages from {packages}"
exit 1
""")
def try_install(self, *packages):
self.install(*packages, ignore_errors=True)
def install(self, *packages, ignore_errors=False):
for package in packages:
log_machine(self.ip, f'Install: {package}')
self.exec(
f"""
set -e
if hash apt-get 2>/dev/null; then
sudo apt-get install -y -qq {package}
elif hash yum 2>/dev/null; then
sudo yum -y -q install {package}
else
echo "Cannot install {package}: yum/apt not found"
exit 1
fi
""", ignore_errors=ignore_errors)
def set_yaml_property(self, file_path, property, value):
self.exec(f"""
set -e
sudo touch {file_path}
if grep -q -E "^\\s*{property}\\s*:.*" {file_path}; then
#echo "Property {property} was found"
sudo sed -i "s/^\\s*{property}\\s*:.*/{property}: {value}/g" {file_path}
else
#echo "Property {property} was not found"
echo '{property}: {value}' | sudo tee -a {file_path}
fi
""")
def set_governor(self, governor):
log_machine(self.ip, f'Set governor [{governor}]')
self.exec(f"""
set -e
if ! hash cpupower 2>/dev/null; then
echo "Installing cpupower"
if hash apt-get 2>/dev/null; then
sudo rm /var/lib/dpkg/lock-frontend
sudo apt-get -y -qq update
version=$(uname -r)
sudo apt-get -y -qq install linux-tools-$version
echo "apt-get found"
elif hash yum 2>/dev/null; then
sudo yum -y -q install kernel-tools
else
echo "Cannot install {governor}: yum/apt not found"
exit 1
fi
fi
echo "=========== frequency info before change ==========="
sudo cpupower frequency-info
echo "===================================================="
frequencyinfo=$(sudo cpupower frequency-info)
if [[ $frequencyinfo =~ "{governor}" ]]; then
sudo cpupower frequency-set -g {governor}
echo "=========== frequency info after change ==========="
sudo cpupower frequency-info
echo "===================================================="
else
echo "Skipping set governor, [{governor}] is not supported"
fi
""")
| StarcoderdataPython |
1620595 | <filename>user-config.example.py
# -*- coding: utf-8 -*-
# This is a sample file. You should use generate_user_files.py
# to create your user-config.py file.
mylang = 'commons'
family = 'commons'
usernames['commons']['commons'] = 'ExampleUser'
password_file = "<PASSWORD>"
| StarcoderdataPython |
6632251 | from boto.s3.key import Key
from boto.s3.connection import S3Connection,OrdinaryCallingFormat
import re,os,pyDate,Utils;
import multiprocessing
WL_SP3_BUCKET = 'edu.mbevis.osu.data' ;
WL_NAV_BUCKET = 'edu.mbevis.osu.data' ;
WL_RES_BUCKET = 'edu.mbevis.osu.resources' ;
WL_SOLN_BUCKET= 'edu.mbevis.osu.solutions' ;
WL_RNX_BUCKET = 'edu.mbevis.osu.data'
WL_STN_BUCKET = 'edu.mbevis.osu.data'
WL_APR_BUCKET = 'edu.mbevis.osu.data'
# local dir relative work_dir for resources
WL_RESOURCES_LOCAL = 'resources' ;
class ResourceException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def get_sp3(year,doy,org,outdir=None):
year = Utils.get_norm_year_str(year);
doy = Utils.get_norm_doy_str (doy );
# initialize a date object
date = pyDate.Date(year=year, doy=doy);
# create string version of the gps week
gps_week_str = str(date.gpsWeek);
# make sure that the string is 5 characters
if date.gpsWeek < 1000: gps_week_str = '0'+gps_week_str;
# create the file name of the sp3
sp3_file_name_base = org+gps_week_str+str(date.gpsWeekDay)+'.sp3';
# set outdir to current directory if not set
if outdir is None: outdir = '.';
# init s3 connection to the metadata bucket
conn = S3Connection(calling_format=OrdinaryCallingFormat()) ;
bucket = conn.get_bucket(WL_SP3_BUCKET) ;
bucketKey = Key(bucket) ;
file_list = [];
for f in bucket.list(prefix=sp3_file_name_base) : file_list.append(f.key);
# check if the sp3 file listing was empty
if len(file_list) == 0:
raise ResourceException('sp3 resource: '+sp3_file_name_base+' could not be located');
# make sure no more than a single match occurred
if len(file_list) > 1:
raise ResourceException('sp3 resource: '+sp3_file_name_base+' matches multiple files');
# just be explicit about it
sp3_file_name = file_list[0];
# create the full path to file on local system
sp3_file_path = os.path.join(outdir,sp3_file_name);
# create the s3 object
bucketKey.key = sp3_file_name;
# pull the file
bucketKey.get_contents_to_filename(sp3_file_path);
# that's all
return sp3_file_path;
def get_nav(year,doy,org,outdir=None):
year = Utils.get_norm_year_str(year);
doy = Utils.get_norm_doy_str (doy );
# create the file name of the nav
nav_file_name = org+doy+'0.'+year[2:]+'n.Z';
# set outdir to current directory if not set
if outdir is None: outdir = '.';
# create the sp3 file path
nav_file_path = os.path.join(outdir,nav_file_name);
# init s3 connection to the metadata bucket
conn = S3Connection(calling_format=OrdinaryCallingFormat()) ;
bucket = conn.get_bucket(WL_NAV_BUCKET) ;
bucketKey = bucket.get_key(nav_file_name) ;
if bucketKey is None:
raise ResourceException('nav resource: '+nav_file_name+' could not be located');
# create the s3 object
bucketKey.key = nav_file_name;
# pull the file
bucketKey.get_contents_to_filename(nav_file_path);
# that's all
return nav_file_path;
def get_rnx(year,doy,stn_list,outdir=None):
year = Utils.get_norm_year_str(year);
doy = Utils.get_norm_doy_str (doy );
# init
rnx_file_list = list();
# init s3 connection to the metadata bucket
conn = S3Connection(calling_format=OrdinaryCallingFormat());
bucket = conn.get_bucket(WL_RNX_BUCKET);
for stnId in stn_list:
# parse the station id and extract the 4-char station code
#(ns,code) = Utils.parse_stnId(stnId);
# no more namespaces
code = stnId;
# create the file name of the sp3
rnx_file_name = code+doy+'0.'+year[2:]+'d.Z';
# set outdir to current directory if not set
if outdir is None: outdir = '.';
# create the sp3 file path
rnx_file_path = os.path.join(outdir,rnx_file_name);
# create key path to file in rnx
#rnx_key_path = '/'.join([ns,year,doy,rnx_file_name]);
rnx_key_path = rnx_file_name;
bucketKey = bucket.get_key(rnx_key_path) ;
if bucketKey is None:
# create the file name of the rnx with session 1
rnx_file_name = code+str(doy)+'1.'+str(year)[2:]+'d.Z';
# create key path to file in s3
#rnx_key_path = '/'.join([ns,str(year),str(doy),rnx_file_name]);
rnx_key_path = rnx_file_name;
# check for session 1 file
bucketKey = bucket.get_key(rnx_key_path);
if bucketKey is None:
os.sys.stderr.write('rnx resource: '+stnId+' could not be located for '+year+' '+doy+'\n');
continue;
# create the s3 object
bucketKey.key = rnx_key_path;
# pull the file
bucketKey.get_contents_to_filename(rnx_file_path);
# add the rinex file path to the file list
rnx_file_list.append(rnx_file_path);
return rnx_file_list;
def action(params):
params[0].get_contents_to_filename(params[1])
def get_rnx_parallel(year, doy, stn_list, outdir=None):
if len(stn_list) ==0: return
year = Utils.get_norm_year_str(year);
doy = Utils.get_norm_doy_str(doy);
# init
rnx_file_list = list();
# init s3 connection to the metadata bucket
conn = S3Connection(calling_format=OrdinaryCallingFormat());
bucket = conn.get_bucket(WL_RNX_BUCKET);
list_of_bucket_keys = list()
for stnId in stn_list:
# parse the station id and extract the 4-char station code
#(ns, code) = Utils.parse_stnId(stnId);
code = stnId;
# create the file name of the sp3
rnx_file_name = code + doy + '0.' + year[2:] + 'd.Z';
# set outdir to current directory if not set
if outdir is None: outdir = '.';
# create the sp3 file path
rnx_file_path = os.path.join(outdir, rnx_file_name);
# create key path to file in rnx
#rnx_key_path = '/'.join([ns, year, doy, rnx_file_name]);
rnx_key_path = rnx_file_name;
bucketKey = bucket.get_key(rnx_key_path);
if bucketKey is None:
# create the file name of the rnx with session 1
rnx_file_name = code + str(doy) + '1.' + str(year)[2:] + 'd.Z';
# create key path to file in s3
#rnx_key_path = '/'.join([ns, str(year), str(doy), rnx_file_name]);
rnx_key_path = rnx_file_name
# check for session 1 file
bucketKey = bucket.get_key(rnx_key_path);
if bucketKey is None:
os.sys.stderr.write('rnx resource: ' + stnId + ' could not be located for ' + year + ' ' + doy + '\n');
continue;
# create the s3 object
bucketKey.key = rnx_key_path;
# enqueue bucket key for download
list_of_bucket_keys.append((bucketKey,rnx_file_path));
# update list of rinex file procesed
rnx_file_list.append(rnx_file_path);
poolsz = max(1,min(16,len(rnx_file_list)))
pool = multiprocessing.Pool(poolsz);
pool.map(action, list_of_bucket_keys)
pool.close()
pool.join()
# pull the file
#bucketKey.get_contents_to_filename(rnx_file_path);
# add the rinex file path to the file list
return rnx_file_list;
def get_stn_info(year,doy,stn_list,outdir=None):
year = Utils.get_norm_year_str(year);
doy = Utils.get_norm_doy_str(doy);
# init s3 connection to the metadata bucket
conn = S3Connection(calling_format=OrdinaryCallingFormat());
bucket = conn.get_bucket(WL_STN_BUCKET);
if outdir is None: outdir = '.';
stn_info_file_name = year+'-'+doy+'.info'
bucketKey = bucket.get_key(stn_info_file_name);
# create the s3 object
bucketKey.key = stn_info_file_name;
# generate output path
out_file_path = os.path.join(outdir,stn_info_file_name)
# pull the file
bucketKey.get_contents_to_filename(out_file_path);
return out_file_path
# def get_stn_info_info(year,doy,stn_list,outdir=None):
#
# if len(stn_list) == 0: return
#
# # init
# file_list = list();
#
# # init s3 connection to the metadata bucket
# conn = S3Connection(calling_format=OrdinaryCallingFormat());
# bucket = conn.get_bucket(WL_STN_BUCKET);
#
# list_of_bucket_keys = list()
#
# for stnId in stn_list:
#
# # parse the station id and extract the 4-char station code
# (ns,code) = Utils.parse_stnId(stnId);
#
# # set outdir to current directory if not set
# if outdir is None: outdir = '.';
#
# # set the file name for the station info
# stn_info_file_name = '.'.join((ns,code,'station','info'));
#
# # next, create the path for the station info file
# stn_info_file_path = os.path.join(outdir,stn_info_file_name);
#
# bucketKey = bucket.get_key(stn_info_file_name) ;
#
# # let the user know that the file does not exist and continue
# if bucketKey is None:
# os.sys.stderr.write('station info resource: '+stnId+' could not be located\n');
# continue;
#
# # create the s3 object
# bucketKey.key = stn_info_file_name;
#
# # enqueue
# list_of_bucket_keys.append((bucketKey,stn_info_file_path))
#
# # add to list of files
# file_list.append(stn_info_file_path);
#
# # pull the file
# bucketKey.get_contents_to_filename(stn_info_file_path);
#
# poolsz = min(16, len(file_list))
# pool = multiprocessing.Pool(poolsz);
# pool.map(action, list_of_bucket_keys)
# pool.close()
# pool.join()
#
# return file_list;
def get_apr(year,doy,stn_list,outdir=None):
year = Utils.get_norm_year_str(year);
doy = Utils.get_norm_doy_str(doy);
# init s3 connection to the metadata bucket
conn = S3Connection(calling_format=OrdinaryCallingFormat());
bucket = conn.get_bucket(WL_STN_BUCKET);
if outdir is None: outdir = '.';
file_name = year+'-'+doy+'.apr'
bucketKey = bucket.get_key(file_name);
# create the s3 object
bucketKey.key = file_name;
# generate output path
out_file_path = os.path.join(outdir,file_name)
# pull the file
bucketKey.get_contents_to_filename(out_file_path);
return out_file_path
# def get_apr(year,doy,dns,outdir=None):
#
# year = Utils.get_norm_year_str(year);
# doy = Utils.get_norm_doy_str (doy );
#
# # set outdir to current directory if not set
# if outdir is None: outdir = '.';
#
# # set the file name for the station info
# apr_file_name = '.'.join((dns,year,doy,'apr'));
#
# # next, create the path for the station info file
# apr_file_path = os.path.join(outdir,apr_file_name);
#
# # init s3 connection to the metadata bucket
# conn = S3Connection(calling_format=OrdinaryCallingFormat()) ;
# bucket = conn.get_bucket(WL_APR_BUCKET) ;
# bucketKey = bucket.get_key(apr_file_name) ;
#
# # make sure we're on track here
# if bucketKey is None:
# raise ResourceException('could not locate resource: '+apr_file_name);
#
# # create the s3 object
# bucketKey.key = apr_file_name;
#
# # pull the file
# bucketKey.get_contents_to_filename(apr_file_path);
#
# # thats a wrap
# return apr_file_path;
def get_bin(program,outdir=None):
# make sure program specified is not bogus
if program is None or program == "":
raise ResourceException('invalid program name');
# figure out what platform we're on
pid = Utils.get_platform_id();
# compute the resource id
rid = Utils.get_resource_delimiter().join((program,pid));
# add the file and compression suffix
rid = '.'.join((rid,'tar','gz'));
# set outdir to current directory if not set
if outdir is None: outdir = '.';
# compute the full file path
bin_file_path = os.path.join(outdir,rid);
# init s3 connection to the resources bucket
conn = S3Connection(calling_format=OrdinaryCallingFormat()) ;
bucket = conn.get_bucket(WL_RES_BUCKET) ;
bucketKey = bucket.get_key(rid) ;
if bucketKey is None:
raise ResourceException('binary resource: '+rid+' could not be located');
# set the key to download
bucketKey.key = rid;
# pull the resource
bucketKey.get_contents_to_filename(bin_file_path);
# all done;
return bin_file_path;
def get_tables(program,outdir=None):
# make sure program specified is not bogus
if program is None or program == "":
raise ResourceException('invalid program name');
# set outdir to current directory if not set
if outdir is None: outdir = '.';
# compute the resource id
rid = Utils.get_resource_delimiter().join((program,'tables'));
# add the file suffix and the compression suffix
rid = '.'.join((rid,'tar','gz'));
# compute the full file path for tables resource
tables_file_path = os.path.join(outdir,rid);
# init s3 connection to the resources bucket
conn = S3Connection(calling_format=OrdinaryCallingFormat()) ;
bucket = conn.get_bucket(WL_RES_BUCKET) ;
bucketKey = bucket.get_key(rid) ;
if bucketKey is None:
raise ResourceException('tables resource: '+rid+' could not be located');
# set the key to download
bucketKey.key = rid;
# pull the resource
bucketKey.get_contents_to_filename(tables_file_path);
# yup yup
return tables_file_path
def pushSNX(key_path,file_path):
# parse the name of the file
file_name = os.path.basename(file_path);
# create the file key path into S3
file_key_path = "/".join((key_path,file_name));
# init s3 connection to the metadata bucket
conn = S3Connection(calling_format=OrdinaryCallingFormat()) ;
bucket = conn.get_bucket(WL_SOLN_BUCKET) ;
bucketKey = Key(bucket) ;
print "pushing snx file",file_path,"-->", file_key_path
# create the s3 object
bucketKey.key = file_key_path; bucketKey.set_contents_from_filename(file_path);
def pushSP3(file_path):
# parse the name of the file
file_name = os.path.basename(file_path);
# create the file key path into S3
file_key_path = file_name;
# init s3 connection to the metadata bucket
conn = S3Connection(calling_format=OrdinaryCallingFormat()) ;
bucket = conn.get_bucket(WL_SP3_BUCKET) ;
bucketKey = Key(bucket) ;
# create the s3 object
bucketKey.key = file_key_path; bucketKey.set_contents_from_filename(file_path);
def pushOUT(key_path,file_path):
# parse the name of the file
file_name = os.path.basename(file_path);
# create the file key path into S3
file_key_path = "/".join((key_path,file_name));
# init s3 connection to the metadata bucket
conn = S3Connection(calling_format=OrdinaryCallingFormat()) ;
bucket = conn.get_bucket(WL_SOLN_BUCKET) ;
bucketKey = Key(bucket) ;
print "pushing out file",file_path,"-->", file_key_path
# create the s3 object
bucketKey.key = file_key_path; bucketKey.set_contents_from_filename(file_path);
def get_snx(key_path,outdir=None):
# init list of files copied
snx_file_list = list();
# set outdir to current directory if not set
if outdir is None: outdir = '.';
# make sure to expand any user symbols
outdir = os.path.expanduser(outdir);
# initialize pattern to match sinex files
# Should we match the second '.'?
# will this match 'file.snx'?
pattern = re.compile('.*\.snx\..*');
# init s3 connection to the metadata bucket
conn = S3Connection(calling_format=OrdinaryCallingFormat()) ;
bucket = conn.get_bucket(WL_SOLN_BUCKET) ;
bucketKey = Key(bucket) ;
# ok now get list of snx files at the key path
file_keys = bucket.list(prefix=key_path);
# copy each file to the outpath with same keypath
for fk in file_keys:
# make sure it's a sinex file
if not pattern.match(fk.key): continue;
# fix the file name for unpadded gps week
file_name = Utils.fix_gps_week(fk.key);
# create file path w.r.t. outdir
file_path = os.path.join(outdir,file_name);
# try in initialize the output path
file_root = os.path.split(file_path)[0];
# make the root if it does not exist
try:
if not os.path.isdir(file_root): os.makedirs(file_root);
except Exception as e:
os.sys.stderr.write(str(e)+'\n');
continue;
# set the bucket key
bucketKey.key = fk;
# get the snx resource
bucketKey.get_contents_to_filename(file_path);
# add the file to the file list
snx_file_list.append(file_path);
return snx_file_list;
def get_resources(key_path,ext=None,outdir=None):
# init list of files copied
res_file_list = list();
# set the file extension to everything, if not set
if ext is None: ext = '*';
# set outdir to current directory if not set
if outdir is None: outdir = '.';
# make sure to expand any user symbols
outdir = os.path.expanduser(outdir);
# help user out before compile regex to translate literal "."
ext = ext.replace('.', '\.');
# initialize pattern to match files
# Should we match the second '.'?
# will this match 'file.snx'?
pattern = re.compile('.*'+ext);
# init s3 connection to the metadata bucket
conn = S3Connection(calling_format=OrdinaryCallingFormat()) ;
bucket = conn.get_bucket(WL_SOLN_BUCKET) ;
bucketKey = Key(bucket) ;
# ok now get list of snx files at the key path
file_keys = bucket.list(prefix=key_path);
# copy each file to the outpath with same keypath
for fk in file_keys:
# make sure it's a sinex file
if not pattern.match(fk.key): continue;
# fix the file name for unpadded gps week
file_name = Utils.fix_gps_week(fk.key);
# create file path w.r.t. outdir
file_path = os.path.join(outdir,file_name);
# try in initialize the output path
file_root = os.path.split(file_path)[0];
# make the root if it does not exist
try:
if not os.path.isdir(file_root): os.makedirs(file_root);
except Exception as e:
os.sys.stderr.write(str(e)+'\n');
continue;
# set the bucket key
bucketKey.key = fk;
# get the snx resource
bucketKey.get_contents_to_filename(file_path);
# add the file to the file list
res_file_list.append(file_path);
return res_file_list;
def list_resources(key_path,ext=None,outdir=None):
# init list of files copied
res_file_list = list();
# set the file extension to everything, if not set
if ext is None: ext = '*';
# set outdir to current directory if not set
if outdir is None: outdir = '.';
# make sure to expand any user symbols
outdir = os.path.expanduser(outdir);
# help user out before compile regex to translate literal "."
ext = ext.replace('.', '\.');
# initialize pattern to match files
# Should we match the second '.'?
# will this match 'file.snx'?
pattern = re.compile('.*'+ext);
# init s3 connection to the metadata bucket
conn = S3Connection(calling_format=OrdinaryCallingFormat()) ;
bucket = conn.get_bucket(WL_SOLN_BUCKET) ;
bucketKey = Key(bucket) ;
# ok now get list of snx files at the key path
file_keys = bucket.list(prefix=key_path);
# copy each file to the outpath with same keypath
for fk in file_keys:
# make sure it's a sinex file
if not pattern.match(fk.key): continue;
# fix the file name for unpadded gps week
file_name = Utils.fix_gps_week(fk.key);
# create file path w.r.t. outdir
file_path = os.path.join(outdir,file_name);
# try in initialize the output path
# file_root = os.path.split(file_path)[0];
#
# # make the root if it does not exist
# try:
# if not os.path.isdir(file_root): os.makedirs(file_root);
# except Exception as e:
# os.sys.stderr.write(str(e)+'\n');
# continue;
#
# # set the bucket key
# bucketKey.key = fk;
#
# # get the snx resource
# bucketKey.get_contents_to_filename(file_path);
#
# # add the file to the file list
res_file_list.append(file_path);
return res_file_list;
def soln_exists(date,expt,org,net='n0'):
# init s3 connection
conn = S3Connection(calling_format=OrdinaryCallingFormat());
# create a bucket object into s3
bucket = conn.get_bucket(WL_SOLN_BUCKET);
# construct the relative path to where the file should be
relPath = date.yyyy()+"/"+date.ddd()+"/"+expt+"/"+org+"/"+net
# construct the name of the sinex file
fileName = org+date.wwwwd()+".snx.gz"
# full file path
fullFilePath = relPath + "/" + fileName
# create a file in to the bucket
key = Key(bucket,fullFilePath)
return key.exists(),fullFilePath
if __name__ == '__main__':
#files = get_snx('2009/123/odot/g06','~/tmp');
#for f in files: print f;
#files = list_resources('2009/123/odot/g06/n1','.mat.gz');
#for f in files: print f;
date = pyDate.Date(year=2016,doy=101)
expt = 'glbf'
org = 'n08'
net = 'n0'
exists = soln_exists(date,expt,org,net)
print("file: "+exists[1]+", "+str(exists[0])) | StarcoderdataPython |
3351712 | # Copyright 2012 The Swarming Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0 that
# can be found in the LICENSE file.
import datetime
import getpass
import hashlib
import optparse
import os
import subprocess
import sys
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(ROOT_DIR, '..', 'third_party'))
import colorama
CHROMIUM_SWARMING_OSES = {
'darwin': 'Mac',
'cygwin': 'Windows',
'linux2': 'Ubuntu',
'win32': 'Windows',
}
def parse_args(use_isolate_server, use_swarming):
"""Process arguments for the example scripts."""
os.chdir(ROOT_DIR)
colorama.init()
parser = optparse.OptionParser(description=sys.modules['__main__'].__doc__)
if use_isolate_server:
parser.add_option(
'-I', '--isolate-server',
metavar='URL', default=os.environ.get('ISOLATE_SERVER', ''),
help='Isolate server to use')
if use_swarming:
task_name = '%s-%s-hello_world' % (
getpass.getuser(),
datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S'))
parser.add_option(
'--idempotent', action='store_true',
help='Tells Swarming to reused previous task result if possible')
parser.add_option(
'-S', '--swarming',
metavar='URL', default=os.environ.get('SWARMING_SERVER', ''),
help='Swarming server to use')
parser.add_option(
'-o', '--os', default=sys.platform,
help='Swarming slave OS to request. Should be one of the valid '
'sys.platform values like darwin, linux2 or win32 default: '
'%default.')
parser.add_option(
'-t', '--task-name', default=task_name,
help='Swarming task name, default is based on time: %default')
parser.add_option('-v', '--verbose', action='count', default=0)
parser.add_option(
'--priority', metavar='INT', type='int', help='Priority to use')
options, args = parser.parse_args()
if args:
parser.error('Unsupported argument %s' % args)
if use_isolate_server and not options.isolate_server:
parser.error('--isolate-server is required.')
if use_swarming:
if not options.swarming:
parser.error('--swarming is required.')
options.swarming_os = CHROMIUM_SWARMING_OSES[options.os]
del options.os
return options
def note(text):
"""Prints a formatted note."""
print(
colorama.Fore.YELLOW + colorama.Style.BRIGHT + '\n-> ' + text +
colorama.Fore.RESET)
def run(cmd, verbose):
"""Prints the command it runs then run it."""
cmd = cmd[:]
cmd.extend(['--verbose'] * verbose)
print(
'Running: %s%s%s' %
(colorama.Fore.GREEN, ' '.join(cmd), colorama.Fore.RESET))
cmd = [sys.executable, os.path.join('..', cmd[0])] + cmd[1:]
if sys.platform != 'win32':
cmd = ['time', '-p'] + cmd
subprocess.check_call(cmd)
def capture(cmd):
"""Prints the command it runs then return stdout."""
print(
'Running: %s%s%s' %
(colorama.Fore.GREEN, ' '.join(cmd), colorama.Fore.RESET))
cmd = [sys.executable, os.path.join('..', cmd[0])] + cmd[1:]
return subprocess.check_output(cmd)
def isolate(tempdir, isolate_server, swarming_os, verbose):
"""Archives the payload."""
# All the files are put in a temporary directory. This is optional and
# simply done so the current directory doesn't have the following files
# created:
# - hello_world.isolated
# - hello_world.isolated.state
isolated = os.path.join(tempdir, 'hello_world.isolated')
note('Archiving to %s' % isolate_server)
run(
[
'isolate.py',
'archive',
'--isolate', os.path.join('payload', 'hello_world.isolate'),
'--isolated', isolated,
'--isolate-server', isolate_server,
'--config-variable', 'OS', swarming_os,
], verbose)
with open(isolated, 'rb') as f:
hashval = hashlib.sha1(f.read()).hexdigest()
return isolated, hashval
| StarcoderdataPython |
1783218 | """Compute (all) LCS between two strings with brute force."""
import re
import itertools
def get_all_subsequences_generator(s):
for i in range(1, len(s) + 1):
yield from itertools.combinations(s, i)
def get_all_subsequences(s):
subs = []
for i in range(1, len(s) + 1):
subs.extend(list(itertools.combinations(s, i)))
return subs
def is_subsequence(subs, s):
if re.match(".*" + ".*".join(subs) + ".*", s):
return True
return False
def lcs_brute_force(s1, s2):
if len(s1) < len(s2):
short = s1
long_ = s2
else:
short = s2
long_ = s1
cs = []
for sub in get_all_subsequences_generator(short):
if is_subsequence(sub, long_):
cs.append(sub)
max_len = len(max(cs, key=len))
return (s for s in cs if len(s) == max_len)
def main():
examples = [("ABCBDAB", "BDCABA"), ("ABDEDA", "ADEBADDA")]
for s, t in examples:
lcs = lcs_brute_force(s, t)
lcs = ["".join(i) for i in lcs]
print(f"LCS between {s} and {t}: {lcs}")
if __name__ == "__main__":
main()
| StarcoderdataPython |
1859021 | <reponame>koshian2/TPU-Benchmark
import pickle, os
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
with open("cifar-100-python/train", "rb") as fp:
train = pickle.load(fp, encoding="latin-1")
with open("cifar-100-python/test", "rb") as fp:
test = pickle.load(fp, encoding="latin-1")
def parse_pickle(rawdata, rootdir):
for i in range(100):
dir = rootdir + "/" + f"{i:02d}"
if not os.path.exists(dir):
os.mkdir(dir)
m = len(rawdata["filenames"])
for i in range(m):
if i % 100 == 0:
print(i)
filename = rawdata["filenames"][i]
label = rawdata["fine_labels"][i]
data = rawdata["data"][i]
data = data.reshape(3, 32, 32)
data = np.swapaxes(data, 0, 2)
data = np.swapaxes(data, 0, 1)
with Image.fromarray(data) as img:
img.save(f"{rootdir}/{label:02d}/{filename}")
parse_pickle(train, "cifar100-raw/train")
parse_pickle(test, "cifar100-raw/test") | StarcoderdataPython |
6600822 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 24 10:43:31 2016
@author: <NAME>
"""
def maximum_b_extent():
extent_0249_dict = {'x_min': -0.028, 'x_max': 0.025,
'y_min': -0.043, 'y_max': 0.039,
'z_min': 0.249, 'z_max': 0.249}
extent_0302_dict = {'x_min': -0.022, 'x_max': 0.021,
'y_min': -0.038, 'y_max': 0.04,
'z_min': 0.302, 'z_max': 0.302}
extent_0357_dict = {'x_min': -0.041, 'x_max': 0.030,
'y_min': -0.019, 'y_max': 0.0255,
'z_min': 0.357, 'z_max': 0.357}
extent_0416_dict = {'x_min': -0.044, 'x_max': 0.031,
'y_min': -0.022, 'y_max': 0.027,
'z_min': 0.416, 'z_max': 0.416}
extent_dicts = {0.249: extent_0249_dict,
0.302: extent_0302_dict,
0.357: extent_0357_dict,
0.416: extent_0416_dict}
return extent_dicts
def maximum_all_probe_extent(plane):
pass
| StarcoderdataPython |
9754179 | try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open("README.rst", "r") as f:
long_description = f.read()
setup(
name="piece_table",
version="0.0.3",
description="A Python implementation of the piece table data structure",
long_description=long_description,
long_description_content_type="text/x-rst",
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/saiguy3/piece_table",
packages=["piece_table"],
license="MIT",
python_requires=">=3",
classifiers=[
"Development Status :: 1 - Planning",
"License :: OSI Approved :: MIT License",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Utilities",
],
keywords=["piecetable", "table"],
)
| StarcoderdataPython |
11355391 | <reponame>MorrellLAB/Crossing_Over<filename>scripts/data_handling/Plink2Rqtl2.py<gh_stars>1-10
#!/usr/bin/env python3
"""This script takes in Plink 1.9 PED and MAP files and an AB genotype lookup
table. It reformats data to R/qtl2 required input formats. Script currently
outputs in your current working directory.
Usage: ./PED_parental_fill-in.py [ped] [map] [lookup_table] [out_fp]
Where:
1) [ped] is a Plink PED file
2) [map] is a Plink MAP file with physical positions in bp
Note: this is important because script converts physical positions from
bp to Mbp as required by R/qtl2
3) [lookup_table] is a file used to convert genotypes into AB genotypes
4) [out_fp] is the full path to the output file directory plus the output file prefix
Note: Currently converts intercrosses only. Doesn't generate phenotype CSV
files yet, but feature coming soon. Script assumes user is running Python
v3.7 or later due to the assumption that dictionaries maintain insertion order.
"""
import sys
import os
import re
import pandas as pd
def parse_ped(pedfile):
"""Parse the PED file and store it in a dictionary of
{lineid: [PED_data]}"""
ped_data = {}
with open(pedfile, 'r') as f:
for line in f:
tmp = line.strip().split()
lid = tmp[1]
# add the line to the ped_data
ped_data[lid] = tmp
return (ped_data)
def parse_map(mapfile):
"""Parse the MAP file and store it in a dictionary of
{snpid: [MAP_data]}"""
map_data = {}
with open(mapfile, 'r') as f:
for line in f:
tmp = line.strip().split()
snpid = tmp[1]
# add the line to map_data
map_data[snpid] = tmp
return (map_data)
def parse_lookup_table(lookupfile):
"""Parse the lookup table that contains AB genotypes."""
ab_geno = {}
with open(lookupfile, 'r') as l:
for line in l:
tmp = line.strip().split(',')
snp = tmp[0]
# add the line to ab_geno
ab_geno[snp] = tmp
return(ab_geno)
def make_geno(ped_dat):
"""Use PED data to create Rqtl2's required genotype data frame.
If founder genotypes are present, this function will return two dataframes:
1) progeny genotypes, 2) founder genotypes. Otherwise, only progeny
genotypes dataframe will be returned."""
# If parental genotypes are present (Sex code = 1 or 2)
# Create separate founders genotype file
founder_dict = {}
progeny_dict = {}
for key in ped_dat:
if (ped_dat[key][4] == '1') or (ped_dat[key][4] == '2'):
tmp_founder = ped_dat[key][6:]
# concatenate alleles stored in separate columns into genotypes
tmp_founder_even = tmp_founder[0::2] # extract even
tmp_founder_odd = tmp_founder[1::2] # extract odd
founder_geno = []
for i in range(0, len(tmp_founder_even)):
tmp_geno = tmp_founder_even[i] + tmp_founder_odd[i]
founder_geno.append(tmp_geno)
# add to founder dictionary
founder_dict[key] = founder_geno
else:
tmp_progeny = ped_dat[key][6:]
# concatenate alleles stored in separate columns into genotypes
tmp_progeny_even = tmp_progeny[0::2] # extract even
tmp_progeny_odd = tmp_progeny[1::2] # extract odd
progeny_geno = []
for i in range(0, len(tmp_progeny_even)):
tmp_geno = tmp_progeny_even[i] + tmp_progeny_odd[i]
progeny_geno.append(tmp_geno)
# add to progeny dictionary
progeny_dict[key] = progeny_geno
return(founder_dict, progeny_dict)
def make_dataframe(geno_dat, map_dat):
"""Reformat PED founder/progeny genotypes that are stored in dictionaries
into dataframe where row names are sampleIDs and column names are marker
names."""
geno_df = pd.DataFrame.from_dict(geno_dat, orient='index',
columns=list(map_dat.keys()))
return(geno_df)
def convert_to_abgeno(geno_df, lookup_table):
"""Use lookup table to convert genotypes to AB genotype
representation. This function assumes lookup table has alleles
corresponding to A and B alleles in columns 2 and 3."""
abgeno_df = geno_df
for snp_col in abgeno_df.columns:
if snp_col in list(lookup_table.keys()):
tmp_rep = []
for i in range(0, len(abgeno_df[snp_col])):
tmp_rep1 = re.sub(lookup_table[snp_col][1], 'A',
abgeno_df[snp_col][i])
tmp_rep2 = re.sub(lookup_table[snp_col][2], 'B', tmp_rep1)
tmp_rep.append(tmp_rep2)
# In place replacement of column containing AB genotypes
abgeno_df[snp_col] = tmp_rep
else:
continue
return(abgeno_df)
def recode_missing(geno_df):
"""Recode missing data from '0' to 'NA'. Plink codes missing genotypes
using '0', but Rqtl2 does not allow '0' character to represent missing
genotypes because in R, fread() considers '0' as type boolean."""
for snp_col in geno_df.columns:
geno_df[snp_col] = geno_df[snp_col].replace('00', 'NA')
return(geno_df)
def make_pos_map(map_dat):
"""Create genetic map and physical map dataframes from
map file."""
gmap_dict = {}
pmap_dict = {}
for key in list(map_dat.keys()):
# Reformat chromosomes
if "chr" and "H" in map_dat[key][0]:
tmp_chr = map_dat[key][0].strip("chr").strip("H")
elif "chr" in map_dat[key][0]:
tmp_chr = map_dat[key][0].strip("chr")
else:
tmp_chr = map_dat[key][0]
# Physical positions need to be in Mbp
# as specified by R/qtl2's documentation
tmp_phys = int(map_dat[key][3])/1000000
# Reorder to: marker, chr, pos
tmp_gmap = [map_dat[key][1], tmp_chr, map_dat[key][2]]
tmp_pmap = [map_dat[key][1], tmp_chr, tmp_phys]
gmap_dict[key] = tmp_gmap
pmap_dict[key] = tmp_pmap
# Store in pandas dataframe
gmap_df = pd.DataFrame.from_dict(gmap_dict, orient='index',
columns=['marker', 'chr', 'pos'])
pmap_df = pd.DataFrame.from_dict(pmap_dict, orient='index',
columns=['marker', 'chr', 'pos'])
# Sort dataframes by position column
gmap_df_sorted = gmap_df.sort_values(by=['chr', 'pos'])
pmap_df_sorted = pmap_df.sort_values(by=['chr', 'pos'])
return(gmap_df_sorted, pmap_df_sorted)
def write_to_csv(df, outfile_name):
"""Write pandas dataframe to output CSV file. CSV file format
is the required input file format for R/qtl2."""
df.to_csv(outfile_name, sep=',', header=True, na_rep='NA',
line_terminator=os.linesep)
return
def write_map_to_csv(df, outfile_name):
"""Write genetic/physical map pandas dataframe to output CSV file,
exclude rownames from being written."""
df.to_csv(outfile_name, sep=',', header=True, na_rep='NA',
index=False, line_terminator=os.linesep)
return
def main(pedfile, mapfile, lookupfile, out_fp):
"""Driver function."""
# Read in data
ped_data = parse_ped(os.path.expanduser(pedfile))
map_data = parse_map(os.path.expanduser(mapfile))
lookup_table = parse_lookup_table(os.path.expanduser(lookupfile))
# 1) Create required geno.csv files for founders and progeny
# Convert Plink PED data format into genotypes
founder_data, progeny_data = make_geno(ped_data)
# Use lookup table to convert genotypes into AB genotypes
# Founder conversion to dataframe
founder_df = make_dataframe(founder_data, map_data)
# Progeny conversion to dataframe
progeny_df = make_dataframe(progeny_data, map_data)
# Translate genotypes to AB genotypes
founder_ab = convert_to_abgeno(founder_df, lookup_table)
progeny_ab = convert_to_abgeno(progeny_df, lookup_table)
# Recode missing genotypes from '0' to 'NA'
founder_abr = recode_missing(founder_ab)
progeny_abr = recode_missing(progeny_ab)
# Save dataframe to output CSV file
write_to_csv(founder_abr, os.path.expanduser(out_fp) + "_founder_AB_geno.csv")
write_to_csv(progeny_abr, os.path.expanduser(out_fp) + "_progeny_AB_geno.csv")
# 2) Create gmap.csv file with genetic map cM positions
# 3) Create pmap.csv file with physical positions
gmap, pmap = make_pos_map(map_data)
# Save gmap and pmap to output CSV file
write_map_to_csv(gmap, os.path.expanduser(out_fp) + "_gmap.csv")
write_map_to_csv(pmap, os.path.expanduser(out_fp) + "_pmap.csv")
return
# Print usage message if we don't have enough input arguments
if len(sys.argv) < 2:
print(__doc__)
exit(1)
else:
# Run the program
main(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])
| StarcoderdataPython |
6490788 | #std packages
from collections import OrderedDict
#third-party packages
import scipy.constants as sc
from PyQt5.QtGui import QIcon, QFont, QDoubleValidator
from PyQt5.QtWidgets import (QMainWindow, QWidget, QApplication, QPushButton, QLabel, QAction, QComboBox, QStackedWidget,
QDoubleSpinBox, QFormLayout, QCheckBox, QVBoxLayout, QMessageBox, QSplitter, QGridLayout,
QHBoxLayout, QFileDialog, QDialog, QLineEdit, QListWidget, QListWidgetItem, QTabWidget,
QScrollArea, QStatusBar)
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
from matplotlib.backends.qt_compat import QtCore, QtWidgets
from matplotlib.backends.backend_qt5agg import FigureCanvas, NavigationToolbar2QT as NavigationToolbar
#set constants
kB = sc.Boltzmann
class MagMessage(QMessageBox):
def __init__(self, title, message):
super(MagMessage, self).__init__()
self.setWindowTitle(title)
self.setText(message)
class PlottingWindow(QWidget):
def __init__(self, make_ax = False):
super(PlottingWindow, self).__init__()
self.layout = QVBoxLayout()
self.fig = Figure()
self.canvas = FigureCanvas(self.fig)
self.tools = NavigationToolbar(self.canvas, self)
if make_ax == "cax":
self.grid = plt.GridSpec(20,1)
self.ax = self.fig.add_subplot(self.grid[:17,0])
self.cax = self.fig.add_subplot(self.grid[-1,0])
self.cax.set_yticklabels([])
elif make_ax == "z":
self.grid = plt.GridSpec(2,1, height_ratios=[13,1])
self.ax = self.fig.add_subplot(self.grid[0], projection = '3d')
self.cax = self.fig.add_subplot(self.grid[1])
self.cax.set_yticklabels([])
self.cax.get_yaxis().labelpad = 15
self.cax.set_ylabel("Temperature (K)")
else:
self.ax = self.fig.add_subplot(111)
self.fig.subplots_adjust(left=0.1, bottom=0.05, right=0.95, top=0.95)
self.layout.addWidget(self.canvas)
self.tool_lo = QHBoxLayout()
self.tool_lo.addWidget(self.tools)
self.tool_lo.addStretch()
if make_ax != "z": #Reset axes btn does not work for the 3D plot and is therefore not shown in this case
self.reset_axes_btn = QPushButton('Reset axes')
self.reset_axes_btn.clicked.connect(self.reset_axes)
self.tool_lo.addWidget(self.reset_axes_btn)
self.layout.addLayout(self.tool_lo)
self.setLayout(self.layout)
def clear_canvas(self):
self.ax.clear()
self.canvas.draw()
def reset_axes(self):
s = 0
if len(self.ax.lines)<1: pass
else:
lines_to_manage = []
for line in self.ax.lines:
print(line)
if len(line.get_xdata())<1: pass
elif not line._visible: pass
else: lines_to_manage.append(line)
x = lines_to_manage[0].get_xdata()
y = lines_to_manage[0].get_ydata()
new_x = [x.min(), x.max()]
new_y = [y.min(), y.max()]
for line in lines_to_manage:
x = line.get_xdata()
y = line.get_ydata()
if len(x)>1 and len(y)>1:
if x.min()<new_x[0]: new_x[0] = x.min()
if x.max()>new_x[1]: new_x[1] = x.max()
if y.min()<new_y[0]: new_y[0] = y.min()
if y.max()>new_y[1]: new_y[1] = y.max()
if new_x[0] == new_x[1]:
new_x[0] -= 0.5
new_x[1] += 0.5
if new_y[0] == new_y[1]:
new_y[0] -= 0.5
new_y[1] += 0.5
self.ax.set_xlim(new_x[0]-0.05*(new_x[1]-new_x[0]),new_x[1]+0.05*(new_x[1]-new_x[0]))
self.ax.set_ylim(new_y[0]-0.05*(new_y[1]-new_y[0]),new_y[1]+0.05*(new_y[1]-new_y[0]))
self.canvas.draw()
class GuessDialog(QDialog):
def __init__(self,
parent=None,
guess=None,
fit_history=None):
super(GuessDialog, self).__init__()
self.layout = QVBoxLayout()
self.setWindowTitle('Guess parameters')
self.validator = QDoubleValidator()
self.validator.setNotation(QDoubleValidator.ScientificNotation)
self.fit_history = fit_history
self.init_guess = guess
self.values = []
self.parameter_inputs = OrderedDict()
self.parameter_inputs['tQT']=None
self.parameter_inputs['Cr']=None
self.parameter_inputs['n']=None
self.parameter_inputs['t0']=None
self.parameter_inputs['Ueff']=None
self.fit_history_lbl = QLabel('Fit history (latest first)')
self.layout.addWidget(self.fit_history_lbl)
self.fit_history_combo = QComboBox()
for e in self.fit_history:
rep = self.fit_history_element_repr(e)
self.fit_history_combo.addItem(rep)
self.layout.addWidget(self.fit_history_combo)
self.fit_history_combo.activated.connect(self.fit_take_control)
self.sim_vals_layout = QFormLayout()
for key in self.parameter_inputs.keys():
self.parameter_inputs[key] = QLineEdit()
self.parameter_inputs[key].setValidator(self.validator)
if key=='Ueff':
self.parameter_inputs[key].setText(str(self.init_guess[key]/kB))
else:
self.parameter_inputs[key].setText(str(self.init_guess[key]))
self.sim_vals_layout.addRow(key, self.parameter_inputs[key])
self.layout.addLayout(self.sim_vals_layout)
accept_btn = QPushButton('Fit')
accept_btn.clicked.connect(self.on_close)
self.layout.addWidget(accept_btn)
self.setLayout(self.layout)
#self.show()
def fit_history_element_repr(self, e):
fit_type = e[0]
fit_dict = e[1]
params = fit_dict['params']
quants = fit_dict['quantities']
rep = []
for key in self.parameter_inputs.keys():
if key in quants:
idx = quants.index(key)
param_val = params[idx]
if key=='Ueff':
param_val /= kB
rep.append(f'{key}: {param_val:.2e}')
else:
rep.append(f'{key}: None')
rep = f'Fit type: {fit_type}'+'\n'+'\n'.join(rep)
return rep
def fit_take_control(self):
idx = self.fit_history_combo.currentIndex()
fit = self.fit_history[idx]
fit_dict = fit[1]
params = fit_dict['params']
quants = fit_dict['quantities']
for key, val in self.parameter_inputs.items():
if key in quants:
key_idx = quants.index(key)
new_val = params[key_idx]
if key == 'Ueff':
new_val /= kB
val.setText(str(new_val))
def on_close(self):
self.return_guess = {key: float(val.text()) for key, val in self.parameter_inputs.items()}
self.return_guess['Ueff'] = self.return_guess['Ueff']*kB
self.accept()
class SimulationDialog(QDialog):
def __init__(self,
parent=None,
fit_history=[],
plot_type_list=[],
plot_parameters={'tQT': 0.1, 'Cr': 0.1, 'n': 0.1, 't0': 0.1, 'Ueff': 0.1},
min_and_max_temps=[0]*2):
super(SimulationDialog, self).__init__()
self.setWindowTitle('Add simulation')
self.headline_font = QFont()
self.headline_font.setBold(True)
# Storing input values
self.plot_type_list = plot_type_list
self.plot_parameters = plot_parameters
self.min_and_max_temps = min_and_max_temps
self.fit_history = fit_history
# Abstracting the validator for the QLineEdits
self.validator = QDoubleValidator()
self.validator.setNotation(QDoubleValidator.ScientificNotation)
# Containers for objects
self.parameter_inputs = OrderedDict()
self.parameter_inputs['tQT']=None
self.parameter_inputs['Cr']=None
self.parameter_inputs['n']=None
self.parameter_inputs['t0']=None
self.parameter_inputs['Ueff']=None
self.possible_functions = OrderedDict()
self.possible_functions['QT']=['QT',None]
self.possible_functions['R']=['Raman',None]
self.possible_functions['O']=['Orbach',None]
self.layout = QVBoxLayout()
self.fit_history_lbl = QLabel('Fit history (latest first)')
self.fit_history_lbl.setFont(self.headline_font)
self.layout.addWidget(self.fit_history_lbl)
self.fit_history_combo = QComboBox()
for fit in self.fit_history:
rep = self.fit_history_element_repr(fit)
self.fit_history_combo.addItem(rep)
self.fit_history_combo.activated.connect(self.fit_take_control)
self.layout.addWidget(self.fit_history_combo)
# Controls to play with temperature
self.temp_headline = QLabel('Temperature')
self.temp_headline.setFont(self.headline_font)
self.layout.addWidget(self.temp_headline)
self.temp_hbl = QHBoxLayout()
self.temp_min = QDoubleSpinBox()
self.temp_min.setValue(min_and_max_temps[0])
self.temp_min.editingFinished.connect(self.temp_interval_changed)
self.temp_hbl.addWidget(self.temp_min)
self.temp_max = QDoubleSpinBox()
self.temp_max.setValue(min_and_max_temps[1])
self.temp_max.editingFinished.connect(self.temp_interval_changed)
self.temp_hbl.addWidget(self.temp_max)
self.temp_hbl.addStretch()
self.layout.addLayout(self.temp_hbl)
# Controls for which type of plot to consider
self.plot_headline = QLabel('Plot type to make')
self.plot_headline.setFont(self.headline_font)
self.layout.addWidget(self.plot_headline)
self.plot_type_hbl = QHBoxLayout()
for key, val in reversed(self.possible_functions.items()):
shorthand = key
fullname = val[0]
val[1] = QCheckBox(fullname)
val[1].clicked.connect(self.plot_type_changed)
if key in self.plot_type_list: val[1].setChecked(True)
self.plot_type_hbl.addWidget(val[1])
self.plot_type_hbl.addStretch()
self.layout.addLayout(self.plot_type_hbl)
# Values to use
self.sim_vals_layout = QFormLayout()
for key in self.parameter_inputs.keys():
self.parameter_inputs[key] = QLineEdit()
self.parameter_inputs[key].setValidator(self.validator)
self.parameter_inputs[key].setText(str(self.plot_parameters[key]))
self.sim_vals_layout.addRow(key, self.parameter_inputs[key])
self.layout.addLayout(self.sim_vals_layout)
# Making control buttons at the end
self.button_layout = QHBoxLayout()
self.cancel_btn = QPushButton('Cancel')
self.cancel_btn.setAutoDefault(False)
self.cancel_btn.clicked.connect(self.reject)
self.button_layout.addWidget(self.cancel_btn)
self.accept_btn = QPushButton('Ok')
self.accept_btn.setAutoDefault(True)
self.accept_btn.clicked.connect(self.replace_and_accept)
self.button_layout.addWidget(self.accept_btn)
self.layout.addLayout(self.button_layout)
self.setLayout(self.layout)
#self.show()
def fit_history_element_repr(self, e):
fit_type = e[0]
fit_dict = e[1]
params = fit_dict['params']
quants = fit_dict['quantities']
rep = []
for key in self.parameter_inputs.keys():
if key in quants:
idx = quants.index(key)
param_val = params[idx]
if key=='Ueff':
param_val /= kB
rep.append(f'{key}: {param_val:.2e}')
else:
rep.append(f'{key}: None')
rep = f'Fit type: {fit_type}'+'\n'+'\n'.join(rep)
return rep
def fit_take_control(self):
idx = self.fit_history_combo.currentIndex()
fit = self.fit_history[idx]
fit_dict = fit[1]
params = fit_dict['params']
quants = fit_dict['quantities']
for key, val in self.parameter_inputs.items():
if key in quants:
key_idx = quants.index(key)
new_val = params[key_idx]
if key == 'Ueff':
new_val /= kB
val.setText(str(new_val))
def param_values_changed(self):
for key, val in self.parameter_inputs.items():
self.plot_parameters[key] = float(val.text())
def plot_type_changed(self):
self.plot_type_list = []
for key in self.possible_functions.keys():
val = self.possible_functions[key]
if val[1].isChecked(): self.plot_type_list.append(key)
def temp_interval_changed(self):
try:
self.min_and_max_temps[0] = self.temp_min.value()
self.min_and_max_temps[1] = self.temp_max.value()
assert self.min_and_max_temps[0]<=self.min_and_max_temps[1]
except AssertionError:
pass
def replace_and_accept(self):
self.param_values_changed()
self.accept()
class AboutDialog(QDialog):
def __init__(self, info):
super(AboutDialog, self).__init__()
self.layout = QVBoxLayout()
self.setWindowTitle('About')
self.author_lbl = QLabel('Written by {}'.format(info['author']))
self.layout.addWidget(self.author_lbl)
self.web_lbl = QLabel('<a href={}>Molecular magnetism at AU</a>'.format(info['webpage']))
self.web_lbl.setOpenExternalLinks(True)
self.layout.addWidget(self.web_lbl)
self.pers_lbl = QLabel('Personal <a href={}>webpage</a>'.format(info['personal']))
self.pers_lbl.setOpenExternalLinks(True)
self.layout.addWidget(self.pers_lbl)
self.setLayout(self.layout)
#self.show()
class ParamDialog(QDialog):
def __init__(self,
fit_history,
parent=None):
super(ParamDialog, self).__init__()
self.setWindowTitle('Fitted parameters')
self.layout = QVBoxLayout()
self.fit_history = fit_history
self.parameter_labels = OrderedDict()
self.parameter_labels['tQT']=QLabel()
self.parameter_labels['Cr']=QLabel()
self.parameter_labels['n']=QLabel()
self.parameter_labels['t0']=QLabel()
self.parameter_labels['Ueff']=QLabel()
self.fit_history_combo = QComboBox()
for e in self.fit_history:
rep = self.fit_history_element_repr(e)
self.fit_history_combo.addItem(rep)
self.fit_history_combo.activated.connect(self.show_fit)
self.layout.addWidget(self.fit_history_combo)
for key, val in self.parameter_labels.items():
self.layout.addWidget(val)
self.fit_history_combo.setCurrentIndex(0)
self.show_fit()
self.setLayout(self.layout)
#self.show()
def show_fit(self):
fit_idx = self.fit_history_combo.currentIndex()
fit = self.fit_history[fit_idx]
quants = fit[1]['quantities']
params = fit[1]['params']
sigmas = fit[1]['sigmas']
for key, val in self.parameter_labels.items():
if key in quants:
key_idx = quants.index(key)
key_param = params[key_idx]
key_sigma = sigmas[key_idx]
if key=='Ueff':
key_param /= kB
key_sigma /= kB
val.setText(f'{key} = {key_param:.6e} +- {key_sigma:.6e}')
else:
val.setText(f'{key} = None')
def fit_history_element_repr(self, e):
fit_type = e[0]
fit_dict = e[1]
params = fit_dict['params']
quants = fit_dict['quantities']
rep = []
for key in self.parameter_labels.keys():
if key in quants:
idx = quants.index(key)
param_val = params[idx]
if key=='Ueff':
param_val /= kB
rep.append(f'{key}: {param_val:.2e}')
else:
rep.append(f'{key}: None')
rep = f'Fit type: {fit_type}'+'\n'+'\n'.join(rep)
return rep
class FitResultPlotStatus(QDialog):
def __init__(self, list_input=None):
super(FitResultPlotStatus, self).__init__()
self.layout = QVBoxLayout()
self.setWindowTitle("Pick which temperature subsets to be shown in plotting window")
self.scroll = QScrollArea(self)
self.scroll.setWidgetResizable(True)
self.layout.addWidget(self.scroll)
self.content = QWidget(self.scroll)
self.cont_lo = QVBoxLayout(self.content)
self.content.setLayout(self.cont_lo)
self.scroll.setWidget(self.content)
self.checked_items = []
num_of_temps = list_input.count()
for idx in range(num_of_temps):
item = list_input.item(idx)
item_lo = QHBoxLayout()
item_data = item.data(32)
item_fit_bool = item_data['fit']
item_raw_bool = item_data['raw']
item_txt = item_data['temp']
raw_checked = QCheckBox('Raw data points')
fit_checked = QCheckBox('Fitted line')
temp = QLabel('{:5.2f}K'.format(item_data['temp']))
item_lo.addWidget(temp)
item_lo.addWidget(raw_checked)
item_lo.addWidget(fit_checked)
self.checked_items.append([raw_checked, fit_checked])
raw_checked.setChecked(item_raw_bool)
fit_checked.setChecked(item_fit_bool)
self.cont_lo.addLayout(item_lo)
self.state_btn_lo = QHBoxLayout()
self.check_all_btn = QPushButton('Check all')
self.check_all_btn.clicked.connect(self.check_all_function)
self.uncheck_all_btn = QPushButton('Uncheck all')
self.uncheck_all_btn.clicked.connect(self.uncheck_all_function)
self.state_btn_lo.addWidget(self.uncheck_all_btn)
self.state_btn_lo.addWidget(self.check_all_btn)
self.layout.addLayout(self.state_btn_lo)
self.judge_btn_lo = QHBoxLayout()
self.states_reject_btn = QPushButton('Cancel')
self.states_reject_btn.clicked.connect(self.reject)
self.judge_btn_lo.addWidget(self.states_reject_btn)
self.states_accept_btn = QPushButton('Ok')
self.states_accept_btn.clicked.connect(self.accept)
self.judge_btn_lo.addWidget(self.states_accept_btn)
self.layout.addLayout(self.judge_btn_lo)
self.resize(500,700)
self.setLayout(self.layout)
#self.show()
def check_all_function(self):
for sublist in self.checked_items:
sublist[0].setChecked(True)
sublist[1].setChecked(True)
def uncheck_all_function(self):
for sublist in self.checked_items:
sublist[0].setChecked(False)
sublist[1].setChecked(False) | StarcoderdataPython |
5109204 | <gh_stars>0
from c0101_retrieve_ref import retrieve_ref
from c0102_timestamp import timestamp_source
from c0104_plot_timestamp import plot_timestamp
from c0105_find_records import find_records
from c0106_record_to_summary import record_to_summary
from c0108_save_meta import save_meta
from c0109_retrieve_meta import retrieve_meta
import glob
import os
import pandas as pd
def trim_record_to_max():
"""
Input: path to a csv
Output: list of timestamps
"""
print("finding the end of the record")
study_list = retrieve_ref('study_list')
sensor_list = retrieve_ref('sensor_list')
max_record_time = retrieve_ref('max_record_time')
sensor = 'TEMP'
for study in study_list:
df_meta = retrieve_meta(study)
source_path = list(df_meta['source_path'])
df_meta['recordLength'] = [None] * len(source_path)
for record in source_path:
# timestamped_file = os.path.join(study, 'timestamp', record, sensor + ".csv")
timestamped_file = os.path.join(study, 'formatted', 'source', record, 'All' , sensor + ".csv")
df_timestamped = pd.read_csv(timestamped_file)
record_length = max(list(df_timestamped['timeMinutes']))
if record_length > max_record_time:
record_length = max_record_time
record_length = round(record_length, 4)
i = df_meta[ df_meta['source_path'] == record].index.values[0]
df_meta.loc[i, 'recordLength' ] = record_length
# save the record length to meta file
save_meta(study, df_meta)
| StarcoderdataPython |
3394829 | import pygame as pg
from os import path
screen = pg.display.set_mode((512, 512))
img = pg.image.load(path.join(path.join(path.dirname(__file__), 'img'), 'earth001.png'))
screen.blit(img, (256, 256))
pg.display.flip()
running = True
while running:
for e in pg.event.get():
if e.type == pg.QUIT:
running = False
| StarcoderdataPython |
6670713 | <reponame>softwarefactory-project/sf-conf
#!/usr/bin/env python3
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Load history data using:
# for i in $(seq 14 -1 0); do
# python status-page-update.py --output /var/www/status/index.html \
# --lib /var/lib/software-factory/backup/status_history.yaml \
# --now $(python -c "import datetime; print((datetime.datetime.utcnow() -
# datetime.timedelta(days=$i)).strftime('%Y-%m-%d'))");
# done
import argparse
import datetime
import json
import yaml
import pymysql
HTML_DOM = """<!DOCTYPE html>
<html>
<head>
<title>Status page</title>
<meta charset='UTF-8'>
<link rel='stylesheet' type='text/css'
href='/static/patternfly/css/patternfly.min.css' />
<link rel='stylesheet' type='text/css'
href='/static/patternfly/css/patternfly-additions.min.css' />
<style>
.body {top: 52px;}
</style>
</head>
<body>
<div class="container" style='width: 100%'>
<div class="list-group list-view-pf list-view-pf-view">
BODY
</div>
</div>
<script src='/static/js/jquery.min.js'></script>
<script src='/static/bootstrap/js/bootstrap.min.js'></script>
<script src='/static/patternfly/js/patternfly.min.js'></script>
<script>
$(".list-group-item-header").click(function(event){
if(!$(event.target).is("button, a, input, .fa-ellipsis-v")){
$(this).find(".fa-angle-right").toggleClass("fa-angle-down")
.end().parent().toggleClass("list-view-pf-expand-active")
.find(".list-group-item-container").toggleClass("hidden");
}
})
$(".list-group-item-container .close").on("click", function (){
$(this).parent().addClass("hidden")
.parent().removeClass("list-view-pf-expand-active")
.find(".fa-angle-right").removeClass("fa-angle-down");
})
</script>
</body>
</html>
"""
def table(dom, columns, rows):
dom.append(
"<table style='white-space: nowrap; margin: 0px' "
"class='table table-condensed table-responsive table-bordered'>"
)
if columns:
dom.append("<thead><tr>")
for col in columns:
dom.append("<th>%s</th>" % col)
dom.append("</tr></thead>")
dom.append("<tbody>")
for row in rows:
if columns and len(row) > len(columns):
dom.append("<tr id='%s'>" % row.pop())
else:
dom.append("<tr>")
for col in row:
dom.append("<td>%s</td>" % col)
dom.append("</tr>")
dom.append("</tbody></table><br />")
class StatusPage:
columns = [
"zuul_build.uuid",
"zuul_buildset.event_id",
"zuul_buildset.pipeline",
"zuul_build.job_name",
"zuul_buildset.project",
"zuul_build.result",
"zuul_build.log_url",
"zuul_build.end_time",
"zuul_buildset.tenant",
]
def connect(self):
secrets = yaml.safe_load(open(
"/var/lib/software-factory/ansible/group_vars/all.yaml"))
return pymysql.connect(
host=secrets["zuul_mysql_host"],
user=secrets["zuul_mysql_user"],
password=secrets["<PASSWORD>"],
db='zuul',
cursorclass=pymysql.cursors.DictCursor)
def update(self, now):
status = {
'date': now,
'jobs': {}
}
yesterday = now - datetime.timedelta(days=1)
with self.connect().cursor() as cursor:
cursor.execute(
"SELECT %s FROM zuul_build "
"INNER JOIN zuul_buildset "
"ON zuul_build.buildset_id=zuul_buildset.id "
"WHERE zuul_build.start_time > %s "
"AND zuul_build.start_time <= %s" %
(",".join(self.columns), "%s", "%s"), (yesterday, now))
status["jobs"]["count"] = cursor.rowcount
for build in cursor.fetchall():
status["jobs"].setdefault(build["tenant"], 0)
status["jobs"][build["tenant"]] += 1
if build["result"] == "SUCCESS":
continue
if build["result"] == "NODE_FAILURE" or \
(build["pipeline"] != "check" and
"openstack" not in build["pipeline"]):
status.setdefault("tenants", {}).setdefault(
build["tenant"], []).append(build)
return status
def renderStatus(self, status, expanded):
expand = " hidden"
list_expand = ""
angle = ""
if expanded:
expand = ""
angle = " fa-angle-down"
list_expand = " list-view-pf-expand-active"
statusInfo = []
# jobs has global count and tenant's counts,
# remove 1 to get tenant number
active_tenant = len(status["jobs"]) - 1
# Global summary
columns = ["tenant", "job executed", "job anomaly"]
active_tenant = 0
rows = []
for tenant, jobs in sorted(status["jobs"].items()):
if tenant == "count":
continue
active_tenant += 1
failures = 0
if tenant in status.get("tenants", {}):
failures = len(status["tenants"][tenant])
rows.append((tenant, jobs, failures))
if rows:
table(statusInfo, columns, rows)
bugs = 0
for tenant, failures in sorted(status.get("tenants", {}).items()):
bugs += len(failures)
statusInfo.append("<h3>%s anomalous jobs</h3>" % tenant)
columns = list(filter(
lambda x: x != "tenant",
map(lambda x: x.split('.')[-1], self.columns)))
rows = []
for failure in failures:
row = []
for column in columns:
value = failure.get(column, "")
if column == "log_url" and value is not None:
value = "<a href='%s'>logs</a>" % (value)
elif column == "uuid":
try:
value = "<a href='/zuul/t/%s/build/%s'>%s</a>" % (
tenant, value, value[:7])
except TypeError:
# It can be a situation, that there will be no
# build status because of NODE_FAILURE
pass
if value is None:
value = '-'
row.append(value)
rows.append(row)
table(statusInfo, columns, rows)
return ("""
<div class="list-group-item{list_expand}">
<div class="list-group-item-header">
<div class="list-view-pf-expand">
<span class="fa fa-angle-right{angle}"></span>
</div>
<div class="list-view-pf-main-info">
<div class="list-view-pf-body">
<span class="pficon pficon-info list-view-pf-icon-sm"></span>
<div class="list-view-pf-description">
<div class="list-group-item-heading">
{date}
</div>
</div>
<div class="list-view-pf-additional-info">
<div class="list-view-pf-additional-info-item">
<span class="fa fa-bug"></span>
<strong>{bugs}</strong> bugs
</div>
<div class="list-view-pf-additional-info-item">
<span class="pficon pficon-registry"></span>
<strong>{jobs}</strong> jobs
</div>
<div class="list-view-pf-additional-info-item">
<span class="pficon pficon-users"></span>
<strong>{tenant}</strong> tenants
</div>
</div>
</div>
</div>
</div>
<div class="list-group-item-container container-fluid{expand}">
<div class="close"><span class="pficon pficon-close"></span></div>
<div>
{status}
</div>
</div>
</div>""".format(
expand=expand,
angle=angle,
list_expand=list_expand,
date=status["date"].strftime("%Y-%m-%d (%a)"),
jobs=status["jobs"].get("count", 0),
tenant=active_tenant,
bugs=bugs,
status="\n".join(statusInfo)))
def render(self, history):
expanded = True
first = True
body = []
for status in history:
jobs = status["jobs"].get("count", 0)
if first or jobs:
body.append(self.renderStatus(status, expanded and jobs))
if jobs:
expanded = False
first = False
return HTML_DOM.replace('BODY', "\n".join(body))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--now", default=datetime.datetime.utcnow())
parser.add_argument("--dry", action="store_true",
help="Do not update history")
parser.add_argument("--lib", help="a yaml file to store history")
parser.add_argument("--json", help="a json file to write status content")
parser.add_argument("--output", help="a html file to write status page")
args = parser.parse_args()
history = []
if args.lib:
try:
with open(args.lib) as fileobj:
history = yaml.safe_load(fileobj)
except IOError:
pass
if not isinstance(args.now, datetime.datetime):
args.now = datetime.datetime.strptime(args.now, "%Y-%m-%d")
status = StatusPage()
if not args.dry:
history = [status.update(args.now)] + history
if args.output:
open(args.output, "w").write(status.render(history))
else:
print(history[0])
if args.lib:
with open(args.lib, "w") as fileobj:
yaml.safe_dump(
history[:60], fileobj, default_flow_style=False)
if args.json:
with open(args.json, "w") as fileobj:
json.dump(history[:60], fileobj, default=str)
if __name__ == "__main__":
main()
| StarcoderdataPython |
3444557 | <reponame>in2p3-dp0/qserv-tests<filename>rootfs/scaletests/python/test_dr6-wfd.py
#!/usr/bin/env python
"""
Utility script to benchmark the dr6-wfd database in qserv
Author: <NAME> - LAPP
"""
import os
import time
import mysql
from mysql.connector import Error
import sqlparse
from optparse import OptionParser
import pandas as pd
def qservInit(host, user, port):
# Inititialize qserv connection
conn = mysql.connector.connect(host=host, user=user, port=port)
cursor = conn.cursor(dictionary=True, buffered=True)
return conn, cursor
def listDB(conn, cursor):
query = "SHOW DATABASES;"
cursor.execute(query)
res = cursor.fetchall()
print(f'found {len(res)} databases on the qserv cluster')
print([item['Database'] for item in res])
def listTables(conn, cursor, db):
query = f"SHOW TABLES in {db};"
cursor.execute(query)
res = cursor.fetchall()
print(f"found {len(res)} tables in {db}")
tables = [list(item.values()) for item in res]
print(tables)
return tables
def countObjects(conn, cursor, db, tables):
for tbl in tables:
query = f"SELECT COUNT(*) FROM {db}.{tbl[0]};"
cursor.execute(query)
res = cursor.fetchall()
print(f"{tbl[0]} - {res[0]['COUNT(*)']} entries found - should be 147088445" )
def fullScan_1(conn):
# Simple query to trigger a full scan
ra_min = 20
ra_max = 95
dec_min = -70
dec_max = 0
#build a query equivalent to the "good" and "clean" flags in GCRCatalogs
query_good = f"""
AND dref.base_PixelFlags_flag_edge = 0
AND dref.base_PixelFlags_flag_interpolatedCenter = 0
AND dref.base_PixelFlags_flag_saturatedCenter = 0
AND dref.base_PixelFlags_flag_crCenter = 0
AND dref.base_PixelFlags_flag_bad = 0
AND dref.base_PixelFlags_flag_suspectCenter = 0
AND dref.base_PixelFlags_flag_clipped = 0
"""
query_clean = query_good + 'AND dref.deblend_skipped = 0 '
# Main query
query = f"""
-- This is the part of the query where we specify which columns we want to extract from the tables
SELECT
dref.coord_ra as ra,
dref.coord_dec as dec,
dref.ext_shapeHSM_HsmShapeRegauss_e1,
dref.ext_shapeHSM_HsmShapeRegauss_e2,
dref.objectId as id,
dfrc.i_modelfit_CModel_instFlux
-- Here we indicate which tables will be used and at the same time we define a short alias for each table
FROM
dp01_dc2_catalogs.reference as dref,
dp01_dc2_catalogs.position as dpos,
dp01_dc2_catalogs.forced_photometry as dfrc
-- Here we specify the ra, dec bounding box and we use the special SQL function: scisql_s2PtInBox
-- It is mandatory to put this constraint right after the WHERE statement
-- Using the special function is mandatory to use the special qserv optimization mechanism
WHERE
scisql_s2PtInBox(coord_ra, coord_dec, {ra_min}, {dec_min}, {ra_max}, {dec_max}) = 1
-- The following is a join between the 3 tables on the objectId. It gurantees that each line extracted
-- from the 3 tables corresponds to the same object
AND dref.objectId = dpos.objectId
AND dfrc.objectId = dpos.objectId
-- We add all the other selection cuts
AND dpos.detect_isPrimary = 1
AND dfrc.i_modelfit_CModel_flag = 0
AND dfrc.i_modelfit_CModel_instFlux > 0
AND dref.base_SdssCentroid_flag = 0
-- We can have SQL math funcions in the query
AND dref.base_Blendedness_abs < POWER(10, -0.375)
AND dref.base_Blendedness_abs_instFlux IS NULL
AND dref.base_ClassificationExtendedness_flag = 0
AND dref. base_ClassificationExtendedness_value > 0
AND ext_shapeHSM_HsmShapeRegauss_flag = 0
AND dfrc.i_modelfit_CModel_flag = 0 AND dfrc.i_modelfit_CModel_instFlux > 0
-- We put a crazy cut on the flux S/N for demonstration purpose to limit the number of returned lines
-- A more reasonable cut would be ~30 as in the commented line
AND dfrc.i_modelfit_CModel_instFlux/dfrc.i_modelfit_CModel_instFluxErr > 1000
AND dref.ext_shapeHSM_HsmShapeRegauss_resolution >= 0.3
AND dref.ext_shapeHSM_HsmShapeRegauss_sigma <= 0.4
-- Another example of SQL math
AND SQRT(POWER(dref.ext_shapeHSM_HsmShapeRegauss_e1, 2)+POWER(dref.ext_shapeHSM_HsmShapeRegauss_e2, 2)) < 2
"""
# Finally we add the query_clean that we have defined in the previous cell
query += query_clean
# And the final semi-column
query += ";"
# As the SQL python API doesn't accept SQL comments we need to filter out our
# nicely formatted query
query = sqlparse.format(query, strip_comments=True, reindent=True).strip()
print(query)
print("This query should run in ~2 minutes if the cache is empty and in ~35 seconds if the table is loaded in the cache")
startTime = time.time()
tab = pd.read_sql_query(query,conn)
endTime = time.time()
print(f"{len(tab)} galaxy clusters found (should be 260787)")
print("query ran in {:.1f} seconds".format(endTime - startTime))
def main():
parser = OptionParser(usage="usage: %prog [options] input",
version="%prog 1.0")
parser.add_option("-H", "--host",
type="string",
default="ccqserv201",
help="qserv host [%default]")
parser.add_option("-u", "--user",
type="string",
default="qsmaster",
help="qserv user [%default]")
parser.add_option("-p", "--port",
type="int",
default=30040,
help="qserv server port [%default]")
parser.add_option("-D", "--database",
type="string",
default="dp01_dc2_catalogs",
help="qserv user [%default]")
(opts, args) = parser.parse_args()
if len(args) != 0:
parse.error("Wrong number of arguments")
database = opts.database
cls = lambda: os.system('cls' if os.name=='nt' else 'clear')
conn, cursor = qservInit(opts.host, opts.user, opts.port)
print("\n \n \n")
print(f"Checking database {database}")
listDB(conn, cursor)
tables = listTables(conn, cursor, database)
countObjects(conn, cursor, database, tables)
fullScan_1(conn)
if __name__ == '__main__':
main() | StarcoderdataPython |
8134553 | import os
from typing import Optional
def drop_newline(str : str) -> str:
if len(str) > 0 and str[-1] == '\n':
return str[0:len(str) - 1]
return str
def load_file(number : int, drop_newlines : Optional[bool]) -> list[str]:
path = os.path.dirname(os.path.dirname(__file__))
path = os.path.join(path, 'days', f'_{number}_input.txt')
with open(path, 'r') as file:
lines = file.readlines()
return list(map(drop_newline, lines)) if drop_newlines else lines
| StarcoderdataPython |
11253707 | <reponame>scottwittenburg/vcs<filename>tests/test_vcs_template_ratio.py
import unittest
import vcs
import numpy
class VCSTestRatio(unittest.TestCase):
def assertClose(self, my, good):
self.assertEqual(numpy.ma.allclose(my, good), 1)
def testRatioOne(self):
t = vcs.createtemplate()
t.ratio(1)
self.assertClose(t.data.x1, 0.276658462196)
self.assertClose(t.data.y1, 0.259999990463)
self.assertClose(t.data.x2, 0.723341526628)
self.assertClose(t.data.y2, 0.860000014305)
def testScaleX(self):
t = vcs.createtemplate()
t.scale(.5, axis='x')
self.assertClose(t.data.x2, 0.499999994412)
def testScaleY(self):
t = vcs.createtemplate()
t.scale(.5, axis='y')
self.assertClose(t.data.y2, 0.560000002384)
def testScaleXY(self):
t = vcs.createtemplate()
t.scale(.5, axis='xy')
self.assertClose(t.data.x2, 0.499999994412)
self.assertClose(t.data.y2, 0.560000002384)
def testResetX(self):
t = vcs.createtemplate()
t.reset('x', .2, .8, t.data.x1, t.data.x2)
self.assertClose(t.data.x1, 0.2)
self.assertClose(t.data.x2, 0.8)
| StarcoderdataPython |
12860737 | <reponame>baba-hashimoto/BAT.py<filename>BAT/BAT.py
#!/usr/bin/env python2
import glob as glob
import os as os
import re
import shutil as shutil
import signal as signal
import subprocess as sp
import sys as sys
from lib import build
from lib import scripts
from lib import setup
from lib import analysis
ion_def = []
poses_list = []
poses_def = []
release_eq = []
translate_apr = []
attach_rest = []
lambdas = []
weights = []
components = []
aa1_poses = []
aa2_poses = []
# Read arguments that define input file and stage
if len(sys.argv) < 5:
scripts.help_message()
sys.exit(0)
for i in [1, 3]:
if '-i' == sys.argv[i].lower():
input_file = sys.argv[i + 1]
elif '-s' == sys.argv[i].lower():
stage = sys.argv[i + 1]
else:
scripts.help_message()
sys.exit(1)
# Open input file
with open(input_file) as f_in:
# Remove spaces and tabs
lines = (line.strip(' \t\n\r') for line in f_in)
lines = list(line for line in lines if line) # Non-blank lines in a list
for i in range(0, len(lines)):
# split line using the equal sign, and remove text after #
if not lines[i][0] == '#':
lines[i] = lines[i].split('#')[0].split('=')
# Read parameters from input file
for i in range(0, len(lines)):
if not lines[i][0] == '#':
lines[i][0] = lines[i][0].strip().lower()
lines[i][1] = lines[i][1].strip()
if lines[i][0] == 'pull_ligand':
if lines[i][1].lower() == 'yes':
pull_ligand = 'yes'
elif lines[i][1].lower() == 'no':
pull_ligand = 'no'
else:
print('Wrong input! Please use yes or no to indicate whether to pull out the ligand or not.')
sys.exit(1)
elif lines[i][0] == 'temperature':
temperature = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'eq_steps1':
eq_steps1 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'eq_steps2':
eq_steps2 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'prep_steps1':
prep_steps1 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'prep_steps2':
prep_steps2 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'a_steps1':
a_steps1 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'a_steps2':
a_steps2 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'l_steps1':
l_steps1 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'l_steps2':
l_steps2 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 't_steps1':
t_steps1 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 't_steps2':
t_steps2 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'u_steps1':
u_steps1 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'u_steps2':
u_steps2 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'c_steps1':
c_steps1 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'c_steps2':
c_steps2 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'r_steps1':
r_steps1 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'r_steps2':
r_steps2 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'e_steps1':
e_steps1 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'e_steps2':
e_steps2 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'v_steps1':
v_steps1 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'v_steps2':
v_steps2 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'w_steps1':
w_steps1 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'w_steps2':
w_steps2 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'f_steps1':
f_steps1 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'f_steps2':
f_steps2 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'pull_spacing':
pull_spacing = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'poses_list':
newline = lines[i][1].strip('\'\"-,.:;#()][').split(',')
for j in range(0, len(newline)):
poses_list.append(scripts.check_input('int', newline[j], input_file, lines[i][0]))
elif lines[i][0] == 'calc_type':
calc_type = lines[i][1].lower()
elif lines[i][0] == 'celpp_receptor':
celp_st = lines[i][1]
elif lines[i][0] == 'p1':
H1 = lines[i][1]
elif lines[i][0] == 'p2':
H2 = lines[i][1]
elif lines[i][0] == 'p3':
H3 = lines[i][1]
elif lines[i][0] == 'ligand_name':
mol = lines[i][1]
elif lines[i][0] == 'fe_type':
if lines[i][1].lower() == 'rest':
fe_type = lines[i][1].lower()
elif lines[i][1].lower() == 'dd':
fe_type = lines[i][1].lower()
elif lines[i][1].lower() == 'pmf':
fe_type = lines[i][1].lower()
elif lines[i][1].lower() == 'all':
fe_type = lines[i][1].lower()
elif lines[i][1].lower() == 'pmf-rest':
fe_type = lines[i][1].lower()
elif lines[i][1].lower() == 'dd-rest':
fe_type = lines[i][1].lower()
elif lines[i][1].lower() == 'custom':
fe_type = lines[i][1].lower()
else:
print('Free energy type not recognized, please choose all, rest (restraints), dd (double decoupling) or pmf (umbrella sampling), pmf-rest, dd-rest, or custom')
sys.exit(1)
elif lines[i][0] == 'dd_type':
if lines[i][1].lower() == 'mbar':
dd_type = lines[i][1].lower()
elif lines[i][1].lower() == 'ti':
dd_type = lines[i][1].lower()
else:
print('Double decoupling type not recognized, please choose ti or mbar')
sys.exit(1)
elif lines[i][0] == 'blocks':
blocks = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'hmr':
if lines[i][1].lower() == 'yes':
hmr = 'yes'
elif lines[i][1].lower() == 'no':
hmr = 'no'
else:
print('Wrong input! Please use yes or no to indicate whether hydrogen mass repartitioning '
'will be used.')
sys.exit(1)
elif lines[i][0] == 'water_model':
if lines[i][1].lower() == 'tip3p':
water_model = lines[i][1].upper()
elif lines[i][1].lower() == 'tip4pew':
water_model = lines[i][1].upper()
elif lines[i][1].lower() == 'spce':
water_model = lines[i][1].upper()
else:
print('Water model not supported. Please choose TIP3P, TIP4PEW or SPCE')
sys.exit(1)
elif lines[i][0] == 'num_waters':
num_waters = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'neutralize_only':
if lines[i][1].lower() == 'yes':
neut = 'yes'
elif lines[i][1].lower() == 'no':
neut = 'no'
else:
print('Wrong input! Please choose neutralization only or add extra ions')
sys.exit(1)
elif lines[i][0] == 'cation':
cation = lines[i][1]
elif lines[i][0] == 'anion':
anion = lines[i][1]
elif lines[i][0] == 'num_cations':
num_cations = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'num_cat_ligbox':
num_cat_ligbox = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'buffer_x':
buffer_x = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'buffer_y':
buffer_y = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'lig_buffer':
lig_buffer = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'rec_distance_force':
rec_distance_force = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'rec_angle_force':
rec_angle_force = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'rec_dihcf_force':
rec_dihcf_force = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'rec_discf_force':
rec_discf_force = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'lig_distance_force':
lig_distance_force = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'lig_angle_force':
lig_angle_force = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'lig_dihcf_force':
lig_dihcf_force = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'lig_discf_force':
lig_discf_force = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'l1_x':
l1_x = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'l1_y':
l1_y = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'l1_z':
l1_z = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'l1_zm':
l1_zm = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'l1_range':
l1_range = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'min_adis':
min_adis = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'max_adis':
max_adis = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'rec_bb':
if lines[i][1].lower() == 'yes':
rec_bb = 'yes'
elif lines[i][1].lower() == 'no':
rec_bb = 'no'
else:
print('Wrong input! Please use yes or no to indicate whether protein backbone restraints'
'will be used.')
sys.exit(1)
elif lines[i][0] == 'bb_start':
bb_start = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'bb_end':
bb_end = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'bb_equil':
if lines[i][1].lower() == 'yes':
bb_equil = lines[i][1].lower()
else:
bb_equil = 'no'
elif lines[i][0] == 'release_eq':
strip_line = lines[i][1].strip('\'\"-,.:;#()][').split()
for j in range(0, len(strip_line)):
release_eq.append(scripts.check_input('float', strip_line[j], input_file, lines[i][0]))
elif lines[i][0] == 'translate_apr':
strip_line = lines[i][1].strip('\'\"-,.:;#()][').split()
for j in range(0, len(strip_line)):
translate_apr.append(scripts.check_input('float', strip_line[j], input_file, lines[i][0]))
elif lines[i][0] == 'attach_rest':
strip_line = lines[i][1].strip('\'\"-,.:;#()][').split()
for j in range(0, len(strip_line)):
attach_rest.append(scripts.check_input('float', strip_line[j], input_file, lines[i][0]))
elif lines[i][0] == 'lambdas':
strip_line = lines[i][1].strip('\'\"-,.:;#()][').split()
for j in range(0, len(strip_line)):
lambdas.append(scripts.check_input('float', strip_line[j], input_file, lines[i][0]))
elif lines[i][0] == 'weights':
strip_line = lines[i][1].strip('\'\"-,.:;#()][').split()
for j in range(0, len(strip_line)):
weights.append(scripts.check_input('float', strip_line[j], input_file, lines[i][0]))
elif lines[i][0] == 'components':
strip_line = lines[i][1].strip('\'\"-,.:;#()][').split()
for j in range(0, len(strip_line)):
components.append(strip_line[j])
elif lines[i][0] == 'ntpr':
ntpr = lines[i][1]
elif lines[i][0] == 'ntwr':
ntwr = lines[i][1]
elif lines[i][0] == 'ntwe':
ntwe = lines[i][1]
elif lines[i][0] == 'ntwx':
ntwx = lines[i][1]
elif lines[i][0] == 'cut':
cut = lines[i][1]
elif lines[i][0] == 'gamma_ln':
gamma_ln = lines[i][1]
elif lines[i][0] == 'barostat':
barostat = lines[i][1]
elif lines[i][0] == 'receptor_ff':
receptor_ff = lines[i][1]
elif lines[i][0] == 'ligand_ff':
if lines[i][1].lower() == 'gaff':
ligand_ff = 'gaff'
elif lines[i][1].lower() == 'gaff2':
ligand_ff = 'gaff2'
else:
print('Wrong input! Available options for ligand force-field are gaff and gaff2')
sys.exit(1)
elif lines[i][0] == 'dt':
dt = lines[i][1]
# Number of simulations, 1 equilibrium and 1 production
apr_sim = 2
# Define free energy components
if fe_type == 'rest':
components = ['c', 'a', 'l', 't', 'r']
elif fe_type == 'dd':
components = ['e', 'v', 'f', 'w']
elif fe_type == 'pmf':
components = ['u']
elif fe_type == 'all':
components = ['c', 'a', 'l', 't', 'r', 'u', 'v', 'w', 'e', 'f']
elif fe_type == 'pmf-rest':
components = ['c', 'a', 'l', 't', 'r', 'u']
elif fe_type == 'dd-rest':
components = ['c', 'a', 'l', 't', 'r', 'e', 'v', 'w', 'f']
# Pull ligand out or not
if pull_ligand == 'no':
translate_apr = [ 0.00 ]
pull_spacing = 1.0
prep_steps2 = 0
# Do not apply protein backbone restraints
if rec_bb == 'no':
bb_start = 1
bb_end = 0
bb_equil = 'no'
# Create poses definitions
if calc_type == 'dock':
for i in range(0, len(poses_list)):
poses_def.append('pose'+str(poses_list[i]))
elif calc_type == 'crystal':
poses_def = [celp_st]
# Total distance
apr_distance = translate_apr[-1]
rng = 0
# Create restraint definitions
rest = [rec_distance_force, rec_angle_force, rec_dihcf_force, rec_discf_force, lig_distance_force, lig_angle_force, lig_dihcf_force, lig_discf_force]
# Create ion definitions
ion_def = [cation, anion, num_cations]
ion_lig = [cation, anion, num_cat_ligbox]
# Define number of steps for all stages
dic_steps1 = {}
dic_steps2 = {}
dic_steps1['a'] = a_steps1
dic_steps2['a'] = a_steps2
dic_steps1['l'] = l_steps1
dic_steps2['l'] = l_steps2
dic_steps1['t'] = t_steps1
dic_steps2['t'] = t_steps2
dic_steps1['c'] = c_steps1
dic_steps2['c'] = c_steps2
dic_steps1['r'] = r_steps1
dic_steps2['r'] = r_steps2
if stage == 'equil':
comp = 'q'
win = 0
trans_dist = 0
# Create equilibrium systems for all poses listed in the input file
for i in range(0, len(poses_def)):
rng = len(release_eq) - 1
pose = poses_def[i]
if not os.path.exists('./all-poses/'+pose+'.pdb'):
continue
print('Setting up '+str(poses_def[i]))
# Get number of simulations
num_sim = len(release_eq)
# Create aligned initial complex
anch = build.build_equil(pose, celp_st, mol, H1, H2, H3, calc_type, l1_x, l1_y, l1_z, l1_zm, l1_range, min_adis, max_adis, ligand_ff)
if anch == 'anch1':
aa1_poses.append(pose)
os.chdir('../')
continue
if anch == 'anch2':
aa2_poses.append(pose)
os.chdir('../')
continue
# Solvate system with ions
print('Creating box...')
build.create_box(hmr, pose, mol, num_waters, water_model, ion_def, neut, buffer_x, buffer_y, stage, ntpr, ntwr, ntwe, ntwx, cut, gamma_ln, barostat, receptor_ff, ligand_ff, dt)
# Apply restraints and prepare simulation files
print('Equil release weights:')
for i in range(0, len(release_eq)):
weight = release_eq[i]
print('%s' %str(weight))
setup.restraints(pose, rest, bb_start, bb_end, weight, stage, mol, trans_dist, comp, bb_equil)
shutil.copy('./'+pose+'/disang.rest', './'+pose+'/disang%02d.rest' %int(i))
shutil.copy('./'+pose+'/disang%02d.rest' %int(0), './'+pose+'/disang.rest')
setup.sim_files(hmr, temperature, mol, num_sim, pose, comp, win, stage, eq_steps1, eq_steps2, rng)
os.chdir('../')
if len(aa1_poses) != 0:
print('\n')
print 'WARNING: Could not find the ligand first anchor L1 for', aa1_poses
print 'The ligand is most likely not in the defined binding site in these systems.'
if len(aa2_poses) != 0:
print('\n')
print 'WARNING: Could not find the ligand L2 or L3 anchors for', aa2_poses
print 'Try reducing the min_adis parameter in the input file.'
elif stage == 'prep':
win = 0
weight = 100.0
comp = 's'
# Prepare systems after equilibration for poses listed in the input file
for i in range(0, len(poses_def)):
pose = poses_def[i]
if not os.path.exists('./equil/'+pose):
continue
print('Setting up '+str(poses_def[i]))
# Get number of simulations
num_sim = int(apr_distance/pull_spacing)+1
rng = num_sim - 1
# Create aligned initial complex
fwin = len(release_eq) - 1
anch = build.build_prep(pose, mol, fwin, l1_x, l1_y, l1_z, l1_zm, l1_range, min_adis, max_adis)
if anch == 'anch1':
aa1_poses.append(pose)
os.chdir('../')
continue
if anch == 'anch2':
aa2_poses.append(pose)
os.chdir('../')
continue
# Solvate system with ions
print('Creating box...')
build.create_box(hmr, pose, mol, num_waters, water_model, ion_def, neut, buffer_x, buffer_y, stage, ntpr, ntwr, ntwe, ntwx, cut, gamma_ln, barostat, receptor_ff, ligand_ff, dt)
# Apply restraints and prepare simulation files
print('Pulling distance interval: %s' %pull_spacing)
print('Total pulling distance: %s' %apr_distance)
print('Creating pulling steps...')
for i in range(0, num_sim):
trans_dist = float(i*pull_spacing)
setup.restraints(pose, rest, bb_start, bb_end, weight, stage, mol, trans_dist, comp, bb_equil)
shutil.copy('./'+pose+'/disang.rest', './'+pose+'/disang%03d.rest' %int(i))
shutil.copy('./'+pose+'/disang%03d.rest' %int(0), './'+pose+'/disang.rest')
setup.sim_files(hmr, temperature, mol, num_sim, pose, comp, win, stage, prep_steps1, prep_steps2, rng)
os.chdir('../')
if len(aa1_poses) != 0:
print('\n')
print 'WARNING: Could not find the ligand first anchor L1 for', aa1_poses
print 'The ligand most likely left the binding site during equilibration.'
if len(aa2_poses) != 0:
print('\n')
print 'WARNING: Could not find the ligand L2 or L3 anchors for', aa2_poses
print 'Try reducing the min_adis parameter in the input file.'
elif stage == 'fe':
# Create systems for all poses after preparation
num_sim = apr_sim
# Create and move to apr directory
if not os.path.exists('fe'):
os.makedirs('fe')
os.chdir('fe')
for i in range(0, len(poses_def)):
pose = poses_def[i]
if not os.path.exists('../prep/'+pose):
continue
print('Setting up '+str(poses_def[i]))
# Create and move to pose directory
if not os.path.exists(pose):
os.makedirs(pose)
os.chdir(pose)
# Generate folder and restraints for all components and windows
for j in range(0, len(components)):
comp = components[j]
# Translation (umbrella)
if (comp == 'u'):
if not os.path.exists('pmf'):
os.makedirs('pmf')
os.chdir('pmf')
weight = 100.0
for k in range(0, len(translate_apr)):
trans_dist = translate_apr[k]
win = k
print('window: %s%02d distance: %s' %(comp, int(win), str(trans_dist)))
build.build_apr(hmr, mol, pose, comp, win, trans_dist, pull_spacing, ntpr, ntwr, ntwe, ntwx, cut, gamma_ln, barostat, receptor_ff, ligand_ff, dt)
setup.sim_files(hmr, temperature, mol, num_sim, pose, comp, win, stage, u_steps1, u_steps2, rng)
os.chdir('../')
# Ligand conformational release in a small box
elif (comp == 'c'):
if not os.path.exists('rest'):
os.makedirs('rest')
os.chdir('rest')
trans_dist = 0
for k in range(0, len(attach_rest)):
weight = attach_rest[k]
win = k
if int(win) == 0:
print('window: %s%02d weight: %s' %(comp, int(win), str(weight)))
build.build_apr(hmr, mol, pose, comp, win, trans_dist, pull_spacing, ntpr, ntwr, ntwe, ntwx, cut, gamma_ln, barostat, receptor_ff, ligand_ff, dt)
print('Creating box for ligand only...')
build.ligand_box(mol, lig_buffer, water_model, neut, ion_lig, comp, ligand_ff)
setup.restraints(pose, rest, bb_start, bb_end, weight, stage, mol, trans_dist, comp, bb_equil)
setup.sim_files(hmr, temperature, mol, num_sim, pose, comp, win, stage, c_steps1, c_steps2, rng)
else:
print('window: %s%02d weight: %s' %(comp, int(win), str(weight)))
build.build_apr(hmr, mol, pose, comp, win, trans_dist, pull_spacing, ntpr, ntwr, ntwe, ntwx, cut, gamma_ln, barostat, receptor_ff, ligand_ff, dt)
setup.restraints(pose, rest, bb_start, bb_end, weight, stage, mol, trans_dist, comp, bb_equil)
setup.sim_files(hmr, temperature, mol, num_sim, pose, comp, win, stage, c_steps1, c_steps2, rng)
os.chdir('../')
# Receptor conformational release in a separate box
elif (comp == 'r'):
if not os.path.exists('rest'):
os.makedirs('rest')
os.chdir('rest')
trans_dist = translate_apr[-1]
for k in range(0, len(attach_rest)):
weight = attach_rest[k]
win = k
if int(win) == 0:
print('window: %s%02d weight: %s' %(comp, int(win), str(weight)))
build.build_apr(hmr, mol, pose, comp, win, trans_dist, pull_spacing, ntpr, ntwr, ntwe, ntwx, cut, gamma_ln, barostat, receptor_ff, ligand_ff, dt)
print('Creating box for apo protein...')
build.create_box(hmr, pose, mol, num_waters, water_model, ion_def, neut, buffer_x, buffer_y, stage, ntpr, ntwr, ntwe, ntwx, cut, gamma_ln, barostat, receptor_ff, ligand_ff, dt)
setup.restraints(pose, rest, bb_start, bb_end, weight, stage, mol, trans_dist, comp, bb_equil)
setup.sim_files(hmr, temperature, mol, num_sim, pose, comp, win, stage, r_steps1, r_steps2, rng)
else:
print('window: %s%02d weight: %s' %(comp, int(win), str(weight)))
build.build_apr(hmr, mol, pose, comp, win, trans_dist, pull_spacing, ntpr, ntwr, ntwe, ntwx, cut, gamma_ln, barostat, receptor_ff, ligand_ff, dt)
setup.restraints(pose, rest, bb_start, bb_end, weight, stage, mol, trans_dist, comp, bb_equil)
setup.sim_files(hmr, temperature, mol, num_sim, pose, comp, win, stage, r_steps1, r_steps2, rng)
os.chdir('../')
# Van der Waals decoupling
# site
elif (comp == 'v'):
if not os.path.exists('dd'):
os.makedirs('dd')
os.chdir('dd')
trans_dist = 0
if not os.path.exists('site'):
os.makedirs('site')
os.chdir('site')
for k in range(0, len(lambdas)):
weight = lambdas[k]
win = k
print('window: %s%02d lambda: %s' %(comp, int(win), str(weight)))
build.build_apr(hmr, mol, pose, comp, win, trans_dist, pull_spacing, ntpr, ntwr, ntwe, ntwx, cut, gamma_ln, barostat, receptor_ff, ligand_ff, dt)
setup.dec_files(temperature, mol, num_sim, pose, comp, win, stage, v_steps1, v_steps2, weight, lambdas)
os.chdir('../../')
# bulk
elif (comp == 'w'):
if not os.path.exists('dd'):
os.makedirs('dd')
os.chdir('dd')
trans_dist = 0
if not os.path.exists('bulk'):
os.makedirs('bulk')
os.chdir('bulk')
for k in range(0, len(lambdas)):
weight = lambdas[k]
win = k
if int(win) == 0:
print('window: %s%02d lambda: %s' %(comp, int(win), str(weight)))
build.build_apr(hmr, mol, pose, comp, win, trans_dist, pull_spacing, ntpr, ntwr, ntwe, ntwx, cut, gamma_ln, barostat, receptor_ff, ligand_ff, dt)
print('Creating box for ligand only...')
build.ligand_box(mol, lig_buffer, water_model, neut, ion_lig, comp, ligand_ff)
setup.restraints(pose, rest, bb_start, bb_end, weight, stage, mol, trans_dist, comp, bb_equil)
setup.dec_files(temperature, mol, num_sim, pose, comp, win, stage, w_steps1, w_steps2, weight, lambdas)
else:
print('window: %s%02d lambda: %s' %(comp, int(win), str(weight)))
build.build_apr(hmr, mol, pose, comp, win, trans_dist, pull_spacing, ntpr, ntwr, ntwe, ntwx, cut, gamma_ln, barostat, receptor_ff, ligand_ff, dt)
setup.dec_files(temperature, mol, num_sim, pose, comp, win, stage, w_steps1, w_steps2, weight, lambdas)
os.chdir('../../')
# Charge decoupling
# site
elif (comp == 'e'):
if not os.path.exists('dd'):
os.makedirs('dd')
os.chdir('dd')
trans_dist = 0
if not os.path.exists('site'):
os.makedirs('site')
os.chdir('site')
for k in range(0, len(lambdas)):
weight = lambdas[k]
win = k
print('window: %s%02d lambda: %s' %(comp, int(win), str(weight)))
build.build_dec(hmr, mol, pose, comp, win, water_model, ntpr, ntwr, ntwe, ntwx, cut, gamma_ln, barostat, receptor_ff, ligand_ff, dt)
setup.dec_files(temperature, mol, num_sim, pose, comp, win, stage, e_steps1, e_steps2, weight, lambdas)
os.chdir('../../')
# bulk
elif (comp == 'f'):
if not os.path.exists('dd'):
os.makedirs('dd')
os.chdir('dd')
trans_dist = 0
if not os.path.exists('bulk'):
os.makedirs('bulk')
os.chdir('bulk')
for k in range(0, len(lambdas)):
weight = lambdas[k]
win = k
if int(win) == 0:
print('window: %s%02d lambda: %s' %(comp, int(win), str(weight)))
build.build_dec(hmr, mol, pose, comp, win, water_model, ntpr, ntwr, ntwe, ntwx, cut, gamma_ln, barostat, receptor_ff, ligand_ff, dt)
print('Creating box for ligand decharging in bulk...')
build.ligand_box(mol, lig_buffer, water_model, neut, ion_lig, comp, ligand_ff)
setup.restraints(pose, rest, bb_start, bb_end, weight, stage, mol, trans_dist, comp, bb_equil)
setup.dec_files(temperature, mol, num_sim, pose, comp, win, stage, f_steps1, f_steps2, weight, lambdas)
else:
print('window: %s%02d lambda: %s' %(comp, int(win), str(weight)))
build.build_dec(hmr, mol, pose, comp, win, water_model, ntpr, ntwr, ntwe, ntwx, cut, gamma_ln, barostat, receptor_ff, ligand_ff, dt)
setup.dec_files(temperature, mol, num_sim, pose, comp, win, stage, f_steps1, f_steps2, weight, lambdas)
os.chdir('../../')
# Attachments in the bound system
else:
if not os.path.exists('rest'):
os.makedirs('rest')
os.chdir('rest')
trans_dist = 0
for k in range(0, len(attach_rest)):
weight = attach_rest[k]
win = k
print('window: %s%02d weight: %s' %(comp, int(win), str(weight)))
build.build_apr(hmr, mol, pose, comp, win, trans_dist, pull_spacing, ntpr, ntwr, ntwe, ntwx, cut, gamma_ln, barostat, receptor_ff, ligand_ff, dt)
setup.restraints(pose, rest, bb_start, bb_end, weight, stage, mol, trans_dist, comp, bb_equil)
steps1 = dic_steps1[comp]
steps2 = dic_steps2[comp]
setup.sim_files(hmr, temperature, mol, num_sim, pose, comp, win, stage, steps1, steps2, rng)
os.chdir('../')
os.chdir('../')
elif stage == 'analysis':
# Free energies MBAR/TI and analytical calculations
for i in range(0, len(poses_def)):
pose = poses_def[i]
analysis.fe_values(blocks, components, temperature, pose, attach_rest, translate_apr, lambdas, weights, dd_type, rest)
os.chdir('../../')
| StarcoderdataPython |
3530352 | <reponame>Chhekur/codechef-solutions<filename>COOK100B/TRUEDARE.py
for _ in range(int(input())):
tr = int(input())
tra = [int(x) for x in input().split()]
dr = int(input())
dra = [int(x) for x in input().split()]
ts = int(input())
tsa = [int(x) for x in input().split()]
ds = int(input())
dsa = [int(x) for x in input().split()]
f = 0
for i in tsa:
if i not in tra:f = 1
for i in dsa:
if i not in dra:f = 1
print("no") if f == 1 else print("yes") | StarcoderdataPython |
8140356 | <reponame>GenkiOtera/VHS-Extractor<gh_stars>0
from logging import getLogger
import os
import sys
import time
import subprocess as sp
from PIL.ImageOps import grayscale
import pyautogui as gui
from .dic import dic
class service():
def __init__(self) -> None:
self.logger = getLogger(__name__)
# 操作一覧モデルをインスタンス化
self.dic = dic()
# デフォルトのアプリインストールパス
self.app_path = "C:\\Program Files (x86)\\I-O DATA\\LightCapture\\LightCapture.exe"
self.default_download_path = os.environ['USERPROFILE']+"\\Documents\\Light Capture 録画フォルダ"
# アプリを起動
def stard_light_capture(self):
self.logger.info("LightCaptureを起動")
try:
sp.Popen(self.app_path)
self.logger.debug("wait 3 sec")
time.sleep(3)
except Exception as e:
self.logger.info(e)
sys.exit(0)
# アプリの起動確認
def check_run(self) -> bool:
self.logger.info("check startup")
result = False
# 画面上に設定ボタンがあるかどうかで起動を判定
try:
self.logger.info(self.dic.get("setting"))
x,y = gui.locateCenterOnScreen(self.dic.get("setting"))
self.logger.info("startup succeeded")
result = True
except:
self.logger.error("startup failed")
return result
# 録画開始
def start_rec(self):
self.__click_button("rec")
# 画面が録画状態になるまでタイムラグが生じるため3秒待機
time.sleep(3)
# 録画終了
def stop_rec(self):
try:
self.__click_button("stop")
except:
pass
# 保存先を変更
def change_destination(self, destination):
self.__click_button("destination",add_x=200, add_y=0)
gui.hotkey('ctrl', 'a')
gui.write(destination)
self.__click_button("cancel")
# アプリを閉じる
def exit(self):
self.__click_button("exit")
# 録画終了を監視
def check_end_rec(self, try_count:int = 3600) -> bool:
self.logger.info("checking the end of recording [" + str(try_count) + "]times")
for tryCount in range(try_count):
tryCount += 1
try:
# print(tryCount)
x,y = gui.locateCenterOnScreen(self.dic.get("finished"))
# x,y = gui.locateCenterOnScreen(self.dic.get("setting"))
self.logger.info("detected the end of recording")
# self.__click_button("stop")
break
except Exception as e:
if(tryCount < try_count):
self.logger.info("recording["+str(tryCount)+"]...")
time.sleep(10)
continue
else:
self.logger.info(e)
return False
return True
# 初期設定(S端子による録画と常に前面表示に変更)
def change_initial_settings(self):
self.__open_settings()
self.__change_s_terminal()
self.__change_dis_front()
self.__click_button("ok")
# 設定を開く
def __open_settings(self):
self.__click_button("setting")
# ダイアログが開くため1秒待機
time.sleep(1)
# S端子録画に変更
def __change_s_terminal(self):
self.__click_button("mov_format_category")
try:
x,y = gui.locateCenterOnScreen(self.dic.get("selected_s_term"))
except Exception:
self.__click_button("selected_composite")
gui.press('down')
gui.press('enter')
# 常に前面に表示へ変更
def __change_dis_front(self):
self.__click_button("other_category")
try:
x,y = gui.locateCenterOnScreen(self.dic.get("disp_front"))
except Exception:
self.__click_button("text_disp_front", add_x=-10)
# ボタンをクリックする
def __click_button(self, key:str, try_count:int = 3, add_x:int = 0, add_y:int = 0):
for tryCount in range(try_count):
tryCount += 1
self.logger.info("try " + key + " button [" + str(tryCount) + "]")
try:
self.logger.info(self.dic.get(key))
x,y = gui.locateCenterOnScreen(self.dic.get(key))
self.logger.info("click "+key+" button")
gui.click(x+add_x, y+add_y)
break
except Exception as e:
if(tryCount < try_count):
self.logger.debug("button is not found. wait a sec")
time.sleep(1)
continue
else:
self.__find_nothing()
# ボタン検出失敗時
def __find_nothing(self):
self.logger.info("タイムアウト:対象が見つかりませんでした")
| StarcoderdataPython |
106440 | <filename>devtools/qcexport/qcexport.py
'''Import/Export of QCArchive data
'''
from dataclasses import dataclass
import typing
from qcexport_extra import extra_children_map
from sqlalchemy.orm import make_transient, Load
from sqlalchemy import inspect
from qcfractal.storage_sockets.models import (
AccessLogORM,
BaseResultORM,
CollectionORM,
DatasetORM,
GridOptimizationProcedureORM,
MoleculeORM,
KeywordsORM,
KVStoreORM,
OptimizationProcedureORM,
QueueManagerLogORM,
QueueManagerORM,
ResultORM,
ServerStatsLogORM,
ServiceQueueORM,
QueueManagerORM,
TaskQueueORM,
TorsionDriveProcedureORM,
Trajectory,
VersionsORM,
WavefunctionStoreORM,
)
from qcfractal.storage_sockets.models.collections_models import DatasetEntryORM
from qcfractal.storage_sockets.models.results_models import GridOptimizationAssociation, TorsionInitMol
_all_orm = [
AccessLogORM,
BaseResultORM,
CollectionORM,
DatasetORM,
DatasetEntryORM,
GridOptimizationProcedureORM,
GridOptimizationAssociation,
MoleculeORM,
KeywordsORM,
KVStoreORM,
OptimizationProcedureORM,
QueueManagerLogORM,
QueueManagerORM,
ResultORM,
ServerStatsLogORM,
ServiceQueueORM,
QueueManagerORM,
TaskQueueORM,
TorsionDriveProcedureORM,
TorsionInitMol,
Trajectory,
VersionsORM,
WavefunctionStoreORM,
]
# Maps table names to sqlalchemy ORM objects
_table_orm_map = {orm.__tablename__: orm for orm in _all_orm}
class RowKeyValues:
'''Generates and stores information about primary and foreign keys of a table
'''
@dataclass(order=True)
class PKInfo:
'''Holds information about a row's primary key.
Holds the column names and the values of the primary key columns.
These are lists in order to handle composite primary keys
'''
table: str
columns: list
values: list
@dataclass(order=True)
class FKInfo:
'''Holds information about a row's foreign key.
For a single foreign key, holds the source and destination/foreign table names and columns. Also
holds the value in the source row.
'''
src_table: str
src_column: str
dest_table: str
dest_column: str
value: 'typing.Any'
def __init__(self, orm_obj):
'''Generates primary and foreign key info given an ORM object'''
self.orm_type = type(orm_obj)
insp = inspect(self.orm_type)
###########################################################
# First, get which columns are primary and foreign keys
###########################################################
# Handle if this is a derived class (polymorphic?)
# This seems poorly documented. But get the table name of the
# base class (if there is one)
base_class = insp.inherits.entity if insp.inherits else None
base_table = base_class.__tablename__ if base_class else None
# Get the columns comprising the primary key
primary_key_columns = [x.name for x in insp.primary_key]
# Now foreign keys. Loop over all the columns.
# Each column has a set() (which may be empty) stored in foreign_keys
foreign_key_info = []
for col in insp.columns:
for fk in sorted(list(col.foreign_keys)):
# Remove foreign keys to base class
# The purpose of this function is to get foreign keys that we need to
# load. But if it is part of the base class, then no need to do that
if not (base_table and fk.column.table.name == base_table):
new_fk = self.FKInfo(col.table.name, col.name, fk.column.table.name, fk.column.name, None)
foreign_key_info.append(new_fk)
# Not sure if order is always preserved, but sort just in case
# so that things are always consistent
primary_key_columns = sorted(primary_key_columns)
foreign_key_info = sorted(foreign_key_info)
# Now store in this class
self.primary_key = self.PKInfo(self.orm_type.__tablename__, primary_key_columns, None)
self.foreign_keys = foreign_key_info
#######################################################
# Obtain values for the primary and foreign key columns
#######################################################
self.primary_key.values = [getattr(orm_obj, column) for column in self.primary_key.columns]
for fk in self.foreign_keys:
fk.value = getattr(orm_obj, fk.src_column)
def is_composite_primary(self):
'''Returns True if this represents a composite primary key'''
return len(self.primary_key.columns) > 1
def as_lookup_key(self):
'''Return a unique string representing the primary key
This is used as a key to a dictionary to store already-copied data.
'''
return repr(self.orm_type) + repr(self.primary_key)
def remove_primary_key(self, orm_obj):
'''Remove primary key values that are integers and not part of
a composite primary key'''
if type(orm_obj) != self.orm_type:
raise RuntimeError("Removing primary keys of type f{type(orm_obj)} but I can only handle {self.orm_type}")
# Don't touch composite primary
if self.is_composite_primary():
return
for pk, old_value in zip(self.primary_key.columns, self.primary_key.values):
if isinstance(old_value, int):
setattr(orm_obj, pk, None)
def _add_children(orm_obj, session_dest, session_src, new_pk_map, options, row_key_info, indent=''):
'''Given an ORM object, adds the dependent data (through foreign keys)
Finds all the foreign keys for the object, and adds the dependent data to the DB.
It then fixes the values of the foreign keys in the ORM object to match the newly-inserted data.
Parameters
----------
orm_obj
An ORM object to add the children of
session_dest
SQLAlchemy session to write data to
session_src
SQLAlchemy session to read data from
new_pk_map : dict
Where to store the mapping of old to new data
options : dict
Various options to be passed into the internal functions
row_key_info : RowKeyValues
Information about the row's primary and foreign keys
indent : str
Prefix to add to all printed output lines
'''
for fk_info in row_key_info.foreign_keys:
# Data in that column may be empty/null
if fk_info.value is None:
continue
print(indent + "+ Handling child: ")
print(
indent +
f" - {fk_info.src_table}.{fk_info.src_column}:{fk_info.value} -> {fk_info.dest_table}.{fk_info.dest_column}"
)
# We need to load from the db (from the foreign/destination table) given the column and value
# in the foreign key info
fk_query = {fk_info.dest_column: fk_info.value}
# Copy the foreign info. This should only return one record
# NOTE: This requires going to the source db for info. It is possible that
# we can check new_pk_map here using the info from the foreign key to see if it
# was already done. However, the hit rate would generally be low, and might be error
# prone, especially with esoteric cases.
new_info = _general_copy(table_name=fk_info.dest_table,
session_dest=session_dest,
session_src=session_src,
new_pk_map=new_pk_map,
options=options,
filter_by=fk_query,
single=True,
indent=indent + ' ')
# Now set the foreign keys to point to the new id
setattr(orm_obj, fk_info.src_column, new_info[fk_info.dest_column])
def _add_tasks_and_services(base_result_id, session_dest, session_src, new_pk_map, options, indent):
'''Adds entries in the task_queue and service_queue given something deriving from base_result
Should only be called after adding the result or procedure.
Parameters
----------
base_result_id
ID of the base_result (result, procedure, ...)
session_dest
SQLAlchemy session to write data to
session_src
SQLAlchemy session to read data from
new_pk_map : dict
Where to store the mapping of old to new data
options : dict
Various options to be passed into the internal functions
indent : str
Prefix to add to all printed output lines
'''
print(indent + f"$ Adding task & service queue entries for base_result_id = {base_result_id}")
# Add anything from the task queue corresponding to the given base result id
# (if calculation is completed, task is deleted)
_general_copy(table_name='task_queue',
session_dest=session_dest,
session_src=session_src,
new_pk_map=new_pk_map,
options=options,
filter_by={'base_result_id': base_result_id},
indent=indent + ' ')
# Do the same for the services queue
#if int(base_result_id) == 17761750:
# breakpoint()
_general_copy(table_name='service_queue',
session_dest=session_dest,
session_src=session_src,
new_pk_map=new_pk_map,
options=options,
filter_by={'procedure_id': base_result_id},
indent=indent + ' ')
def _general_copy(table_name,
session_dest,
session_src,
new_pk_map,
options,
filter_by=None,
filter_in=None,
order_by=None,
limit=None,
single=False,
indent=''):
'''
Given queries, copies all results of the query from session_src to session_dest
Adds data to session_dest, keeping a map of newly-added info and fixing foreign keys
to match newly-inserted data.
Called recursively to add dependent data through foreign keys.
Parameters
----------
table_name : str
Name of the table to copy data from/to
session_dest
SQLAlchemy session to write data to
session_src
SQLAlchemy session to read data from
new_pk_map : dict
Where to store the mapping of old to new data
options : dict
Various options to be passed into the internal functions
filter_by : dict
Filters (column: value) to add to the query. ie, {'id': 123}
filter_in : dict
Filters (column: list(values)) to add to the query using 'in'. ie, {'id': [123,456]}
order_by: dict
How to order the results of the query. ie {'id': 'desc'}
limit : int
Limit the number of records returned
single : bool
If true, expect only one returned record. If not, raise an exception
indent : str
Prefix to add to all printed output lines
'''
orm_type = _table_orm_map[table_name]
# Build the query based on filtering, etc
query = session_src.query(orm_type)
if filter_by is not None:
query = query.filter_by(**filter_by)
if filter_in is not None:
for key, values in filter_in.items():
query = query.filter(getattr(orm_type, key).in_(values))
if order_by:
for column, order in order_by.items():
# Gets, for example, Trajectory.opt_id.desc
# opt_id = column, desc = bound function
o = getattr(orm_type, column)
o = getattr(o, order)
query = query.order_by(o())
if limit is not None:
if single and limit != 1:
raise RuntimeError(f'Limit = {limit} but single return is specified')
query = query.limit(limit)
elif single:
limit = 1
# Disable all relationship loading
query = query.options(Load(orm_type).noload('*'))
data = query.all()
return_info = []
# We have to expunge and make transient everything first
# If not, sqlalchemy tries to be smart. After you add the entries found
# through foreign keys, the rest of the objects in the data list may change.
# But then you will have parts of objects in session_src and parts in session_dest
for d in data:
session_src.expunge(d)
make_transient(d)
for d in data:
# Obtain primary/foreign key columns and values
src_rck = RowKeyValues(d)
# The type of the object may not be the same as we queried (due to polymorphic types)
real_orm_type = type(d)
real_table_name = real_orm_type.__tablename__
# real_orm_type should never be BaseResultORM
assert real_orm_type != BaseResultORM
print(indent +
f'* Copying {table_name} {str(src_rck.primary_key.columns)} = {str(src_rck.primary_key.values)}')
if real_orm_type != orm_type:
print(indent + f'& But actually using table {real_table_name}')
############################################################
############################################################
## TODO - If working with an existing db, do lookups here ##
## (this is for future capability of importing ##
## into an existing db) ##
############################################################
############################################################
src_lookup_key = src_rck.as_lookup_key()
if src_lookup_key in new_pk_map:
print(indent + f' - Already previously done')
return_info.append(new_pk_map[src_lookup_key])
continue
# Save src information for laters. When adding extra children, old ids and stuff may be needed
src_info = d.to_dict()
# Loop through foreign keys and recursively add those
_add_children(d, session_dest, session_src, new_pk_map, options, src_rck, indent + ' ')
# Remove the primary key. We will generate a new one on adding
src_rck.remove_primary_key(d)
# Truncate KV store entries by default
# (but can be overridden)
if table_name == 'kv_store':
truncate_kv_store = options.get('truncate_kv_store', True)
if truncate_kv_store:
d.value = str(d.value)[:2000]
# Now add it to the session
# and obtain the key info
session_dest.add(d)
session_dest.commit()
dest_rck = RowKeyValues(d)
print(indent + f'! adding {real_table_name} {str(src_rck.primary_key.values)} = {str(dest_rck.primary_key.values)}')
# Store the info for the entire row
# (exception: kvstore)
dest_info = d.to_dict()
# Don't store kvstore data in the dictionary (not needed)
if table_name == 'kv_store':
dest_info.pop('value')
# We can't just use primary key, since foreign keys may
# reference non-primary-keys of other tables (as long as they are unique)
new_pk_map[src_lookup_key] = dest_info
return_info.append(dest_info)
########################################################################
# Now handle children that are not specified by foreign keys
# This includes decoupled data like datasets, as well as when foreign
# keys are specified in json
#
# We do that here after adding. Some of these have foreign keys
# to this object, so we need the new id (retrieved through new_pk_map)
########################################################################
if real_orm_type in extra_children_map:
# The function called in extra_children_map may modify the object.
# We let the called function do that, then merge it back into the db
extra_children_map[real_orm_type](d, src_info, session_dest, session_src, new_pk_map, options, indent + ' ')
session_dest.commit()
########################################################################
# Now add tasks/services if this is a result/procedure
########################################################################
if issubclass(real_orm_type, BaseResultORM):
_add_tasks_and_services(src_info['id'], session_dest, session_src, new_pk_map, options, indent + ' ')
# If the caller specified single=True, should only be one record
if single:
if len(return_info) != 1:
raise RuntimeError(f'Wanted single record but got {len(return_info)} instead')
return return_info[0]
else:
return return_info
def general_copy(table_name,
storage_dest,
storage_src,
new_pk_map=None,
options={},
filter_by={},
order_by=None,
limit=None,
indent=''):
''' Copies data from the source db to the destination db
Given queries, copies all results of the query from session_src to session_dest
Handles copying of data required by foreign keys as well.
Parameters
----------
table_name : str
Name of the table to copy data from/to
storage_dest
Storage object to write data to
storage_src
Storage object to read data from
new_pk_map : dict
Where to store the mapping of old to new data
options : dict
Various options to be passed into the internal functions
filter_by : dict
Filters (column: value) to add to the query. ie, {'id': 123}
order_by: dict
How to order the results of the query. ie {'id': 'desc'}
limit : int
Limit the number of records returned
indent : str
Prefix to add to all printed output lines
'''
if new_pk_map is None:
new_pk_map = dict()
with storage_src.session_scope() as session_src:
with storage_dest.session_scope() as session_dest:
_general_copy(table_name,
session_dest,
session_src,
new_pk_map=new_pk_map,
options=options,
filter_by=filter_by,
order_by=order_by,
limit=limit,
indent=indent)
| StarcoderdataPython |
1620695 | # Copyright (C) 2019 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pymedphys._imports import numpy as np
from numpy import cos, radians, sin
from numpy.linalg import norm
def rotate_about_vector(coords_to_rotate, vector, theta, active=False):
r"""Rotates a 3 x n vector of the form np.array((x, y, z)) about the axis specified
by `vector`. Transforms can be active (alibi) or passive (alias). Default is
passive.
"""
unit_vector = vector / norm(vector)
u_x = unit_vector[0]
u_y = unit_vector[1]
u_z = unit_vector[2]
s = sin(radians(theta))
c = cos(radians(theta))
rotation_matrix = np.array(
[
[
c + u_x * u_x * (1 - c),
u_x * u_y * (1 - c) - u_z * s,
u_x * u_z * (1 - c) + u_y * s,
],
[
u_y * u_x * (1 - c) + u_z * s,
c + u_y * u_y * (1 - c),
u_y * u_z * (1 - c) - u_x * s,
],
[
u_z * u_x * (1 - c) - u_y * s,
u_z * u_y * (1 - c) + u_x * s,
c + u_z * u_z * (1 - c),
],
]
)
# Rotation matrix above is active (unlike in other functions). Will manually
# transpose to avoid confusion later...
if not active:
rotation_matrix = rotation_matrix.transpose()
return rotation_matrix @ coords_to_rotate
def rotate_about_x(coords_to_rotate, psi, active=False):
r"""Rotates a 3 x n vector of the form np.array((x, y, z)) about the x-axis.
Transforms can be active (alibi) or passive (alias), but are passive by default.
"""
s = sin(radians(psi))
c = cos(radians(psi))
x_rotation_matrix = np.array([[1, 0, 0], [0, c, s], [0, -s, c]])
if active:
x_rotation_matrix = x_rotation_matrix.transpose()
return x_rotation_matrix @ coords_to_rotate
def rotate_about_y(coords_to_rotate, phi, active=False):
r"""Rotates a 3 x n vector of the form np.array((x, y, z)) about the y-axis
Transforms can be active (alibi) or passive (alias), but are passive by default.
"""
s = sin(radians(phi))
c = cos(radians(phi))
y_rotation_matrix = np.array([[c, 0, -s], [0, 1, 0], [s, 0, c]])
if active:
y_rotation_matrix = y_rotation_matrix.transpose()
return y_rotation_matrix @ coords_to_rotate
def rotate_about_z(coords_to_rotate, theta, active=False):
r"""Rotates a 3 x n vector of the form np.array((x, y, z)) about the z-axis
Transforms can be active (alibi) or passive (alias), but are passive by default.
"""
s = sin(radians(theta))
c = cos(radians(theta))
z_rotation_matrix = np.array([[c, s, 0], [-s, c, 0], [0, 0, 1]])
if active:
z_rotation_matrix = z_rotation_matrix.transpose()
return z_rotation_matrix @ coords_to_rotate
def translate(coords_to_translate, translation_vector, active=False):
r"""Translates a 3 x Y array of the form np.array((x, y, z)) by a given
displacement vector of the same form. Transforms can be active (alibi)
or passive (alias), but are passive by default.
"""
translation_dims = np.shape(coords_to_translate)
for _ in translation_dims[1::]:
translation_vector = np.expand_dims(translation_vector, axis=-1)
if active:
translation_vector = -translation_vector
return coords_to_translate - translation_vector
| StarcoderdataPython |
4953909 | <reponame>shaun95/google-research
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The classes shared by all template definitions."""
import dataclasses
from typing import Any, Dict, List
from ipagnn.datasets.control_flow_programs.program_generators import top_down_refinement
@dataclasses.dataclass
class TemplateData:
weighted_templates: List[top_down_refinement.WeightedTemplate]
root_object: top_down_refinement.ThingWithHoles
hole_type_weights: Dict[Any, int]
start_with_initialization: bool = False
class ConfigurableTemplate(top_down_refinement.HoleFillerTemplate):
"""A hole filler template that accepts a config object."""
precedence = 1
def __init__(self, config):
self.config = config
super(ConfigurableTemplate, self).__init__()
| StarcoderdataPython |
1662153 | <filename>MXNet2Caffe/find_caffe.py
try:
import caffe
except ImportError:
import os, sys
curr_path = os.path.abspath(os.path.dirname(__file__))
sys.path.append("/usr/local/caffe/python")
import caffe
| StarcoderdataPython |
1863401 | import functools
import logging
from django.core.urlresolvers import reverse
from preserialize.serialize import serialize
from restlib2.http import codes
from restlib2.params import Parametizer, StrParam, BoolParam, IntParam
from avocado.conf import OPTIONAL_DEPS
from avocado.models import DataField
from avocado.events import usage
from ..base import ThrottledResource
from .. import templates
can_change_field = lambda u: u.has_perm('avocado.change_datafield')
stats_capable = lambda x: not x.searchable
log = logging.getLogger(__name__)
def is_field_orphaned(instance):
if instance.model is None or instance.field is None:
log.error("Field is an orphan.", extra={'field': instance.pk})
return True
return False
def field_posthook(instance, data, request):
"""Field serialization post-hook for augmenting per-instance data.
The only two arguments the post-hook takes is instance and data. The
remaining arguments must be partially applied using `functools.partial`
during the request/response cycle.
"""
uri = request.build_absolute_uri
# Augment the links
data['_links'] = {
'self': {
'href': uri(reverse('serrano:field',
args=[instance.pk])),
}
}
# Add flag denoting the field is orphaned, otherwise add links to
# supplementary resources.
if is_field_orphaned(instance):
data['orphaned'] = True
else:
data['_links']['values'] = {
'href': uri(reverse('serrano:field-values',
args=[instance.pk])),
}
data['_links']['distribution'] = {
'href': uri(reverse('serrano:field-distribution',
args=[instance.pk])),
}
if stats_capable(instance):
data['_links']['stats'] = {
'href': uri(reverse('serrano:field-stats',
args=[instance.pk])),
}
return data
class FieldParametizer(Parametizer):
"Supported params and their defaults for Field endpoints."
sort = StrParam()
order = StrParam('asc')
unpublished = BoolParam(False)
brief = BoolParam(False)
query = StrParam()
limit = IntParam()
# Not implemented
offset = IntParam()
page = IntParam()
class FieldBase(ThrottledResource):
model = DataField
parametizer = FieldParametizer
template = templates.Field
def get_queryset(self, request):
queryset = self.model.objects.all()
if not can_change_field(request.user):
queryset = queryset.published()
return queryset
def get_object(self, request, **kwargs):
if not hasattr(request, 'instance'):
queryset = self.get_queryset(request)
try:
instance = queryset.get(**kwargs)
except self.model.DoesNotExist:
instance = None
request.instance = instance
return request.instance
def prepare(self, request, instance, template=None, brief=False, **params):
if template is None:
template = templates.BriefField if brief else self.template
posthook = functools.partial(field_posthook, request=request)
return serialize(instance, posthook=posthook, **template)
def is_not_found(self, request, response, pk, *args, **kwargs):
return self.get_object(request, pk=pk) is None
class FieldResource(FieldBase):
"Resource for interacting with Field instances."
def get(self, request, pk):
instance = self.get_object(request, pk=pk)
usage.log('read', instance=instance, request=request)
# If the field is an orphan then log an error before returning an error
if self.checks_for_orphans and is_field_orphaned(instance):
data = {
'message': 'Orphaned field',
}
return self.render(request, data,
status=codes.internal_server_error)
return self.prepare(request, instance)
class FieldsResource(FieldResource):
"Field Collection Resource"
def is_not_found(self, request, response, *args, **kwargs):
return False
def get(self, request):
params = self.get_params(request)
queryset = self.get_queryset(request)
# For privileged users, check if any filters are applied, otherwise
# only allow for published objects.
if not can_change_field(request.user) or not params['unpublished']:
queryset = queryset.published()
# If Haystack is installed, perform the search
if params['query'] and OPTIONAL_DEPS['haystack']:
usage.log('search', model=self.model, request=request, data={
'query': params['query'],
})
results = self.model.objects.search(
params['query'], queryset=queryset,
max_results=params['limit'], partial=True)
objects = (x.object for x in results)
else:
if params['sort'] == 'name':
order = '-name' if params['order'] == 'desc' else 'name'
queryset = queryset.order_by(order)
if params['limit']:
queryset = queryset[:params['limit']]
objects = queryset
if self.checks_for_orphans:
pks = []
for obj in objects:
if not is_field_orphaned(obj):
pks.append(obj.pk)
objects = self.model.objects.filter(pk__in=pks)
return self.prepare(request, objects, **params)
| StarcoderdataPython |
4805463 | <reponame>pkestene/COSMA
#!/usr/bin/env python3
import argparse
import os
import sys
import tempfile
import subprocess
parser = argparse.ArgumentParser()
parser.add_argument(
'prefix',
type=str,
help='Installation prefix for dependencies'
)
args = parser.parse_args()
if not os.path.isdir(args.prefix):
print("The argument is not a directory.")
sys.exit()
def install_lib(tmppath, prefix, libname):
url = 'https://github.com/kabicm/{libname}.git'.format(**locals())
clone_dir = os.path.join(tmppath, libname)
build_dir = os.path.join(tmppath, 'build_{libname}'.format(**locals()))
install_dir ='{prefix}/{libname}-master'.format(**locals())
config_cmd = ('cmake ../{libname} '
'-DCMAKE_BUILD_TYPE=Release '
'-DCMAKE_INSTALL_PREFIX={install_dir}'.format(**locals())
)
build_and_install_cmd = 'cmake --build . --target install'
os.system('git clone --recursive {url} {clone_dir}'.format(**locals()))
os.makedirs(build_dir, exist_ok=True)
subprocess.call(config_cmd, cwd=build_dir, shell=True)
subprocess.call(build_and_install_cmd, cwd=build_dir, shell=True)
return install_dir
with tempfile.TemporaryDirectory() as tmppath:
install_dirs = ''
for libname in ['options', 'semiprof', 'grid2grid']:
install_dirs += '{};'.format(install_lib(tmppath, args.prefix, libname))
print('\nUse the following CMake parameter: -DCMAKE_PREFIX_PATH="{}"'.format(install_dirs))
| StarcoderdataPython |
3385950 | <reponame>django-doctor/lite-api
from django.http.response import JsonResponse
from rest_framework import status, permissions
from rest_framework.decorators import permission_classes
from rest_framework.parsers import JSONParser
from rest_framework.views import APIView
from api.audit_trail import service as audit_trail_service
from api.audit_trail.enums import AuditType
from api.audit_trail.serializers import AuditSerializer
from api.core.authentication import GovAuthentication
from api.core.constants import GovPermissions
from api.core.custom_views import OptionalPaginationView
from api.core.helpers import str_to_bool
from api.core.permissions import assert_user_has_permission
from lite_content.lite_api import strings
from api.picklists.enums import PickListStatus
from api.picklists.helpers import get_picklist_item
from api.picklists.models import PicklistItem
from api.picklists.serializers import (
PicklistUpdateCreateSerializer,
PicklistListSerializer,
TinyPicklistSerializer,
)
@permission_classes((permissions.AllowAny,))
class PickListsView(OptionalPaginationView):
authentication_classes = (GovAuthentication,)
serializer_class = PicklistListSerializer
def get_serializer_class(self):
if str_to_bool(self.request.GET.get("disable_pagination")):
return TinyPicklistSerializer
else:
return PicklistListSerializer
def get_queryset(self):
"""
Returns a list of all picklist items, filtered by type and by show_deactivated
"""
picklist_items = PicklistItem.objects.filter(team=self.request.user.govuser.team,)
picklist_type = self.request.GET.get("type")
name = self.request.GET.get("name")
show_deactivated = str_to_bool(self.request.GET.get("show_deactivated"))
ids = self.request.GET.get("ids")
if picklist_type:
picklist_items = picklist_items.filter(type=picklist_type)
if name:
picklist_items = picklist_items.filter(name__icontains=name)
if not show_deactivated:
picklist_items = picklist_items.filter(status=PickListStatus.ACTIVE)
if ids:
ids = ids.split(",")
picklist_items = picklist_items.filter(id__in=ids)
return picklist_items.order_by("-updated_at")
def post(self, request):
"""
Add a new picklist item
"""
assert_user_has_permission(self.request.user.govuser, GovPermissions.MANAGE_PICKLISTS)
data = JSONParser().parse(request)
data["team"] = request.user.govuser.team.id
serializer = PicklistUpdateCreateSerializer(data=data, partial=True)
if serializer.is_valid():
serializer.save()
audit_trail_service.create(
actor=request.user, verb=AuditType.CREATED_PICKLIST, target=serializer.instance,
)
return JsonResponse(data={"picklist_item": serializer.data}, status=status.HTTP_201_CREATED)
return JsonResponse(data={"errors": serializer.errors}, status=status.HTTP_400_BAD_REQUEST)
@permission_classes((permissions.AllowAny,))
class PicklistItemDetail(APIView):
authentication_classes = (GovAuthentication,)
def get(self, request, pk):
"""
Gets details of a specific picklist item
"""
picklist_item = get_picklist_item(pk)
data = PicklistListSerializer(picklist_item).data
audit_qs = audit_trail_service.get_activity_for_user_and_model(request.user, picklist_item)
data["activity"] = AuditSerializer(audit_qs, many=True).data
return JsonResponse(data={"picklist_item": data})
def put(self, request, pk):
"""
Edit status of a new picklist item
"""
assert_user_has_permission(self.request.user.govuser, GovPermissions.MANAGE_PICKLISTS)
picklist_item = get_picklist_item(pk)
if request.user.govuser.team != picklist_item.team:
return JsonResponse(data={"errors": strings.Picklists.FORBIDDEN}, status=status.HTTP_403_FORBIDDEN,)
serializer = PicklistUpdateCreateSerializer(instance=picklist_item, data=request.data, partial=True)
if serializer.is_valid():
if serializer.validated_data.get("text"):
if picklist_item.text != serializer.validated_data["text"]:
audit_trail_service.create(
actor=request.user,
verb=AuditType.UPDATED_PICKLIST_TEXT,
target=serializer.instance,
payload={"old_text": picklist_item.text, "new_text": serializer.validated_data["text"],},
)
if serializer.validated_data.get("name"):
if picklist_item.name != serializer.validated_data["name"]:
audit_trail_service.create(
actor=request.user,
verb=AuditType.UPDATED_PICKLIST_NAME,
target=serializer.instance,
payload={"old_name": picklist_item.name, "new_name": serializer.validated_data["name"],},
)
if serializer.validated_data.get("status"):
picklist_status = serializer.validated_data["status"]
if picklist_item.status != picklist_status:
if picklist_status == PickListStatus.DEACTIVATED:
audit_trail_service.create(
actor=request.user, verb=AuditType.DEACTIVATE_PICKLIST, target=serializer.instance,
)
else:
audit_trail_service.create(
actor=request.user, verb=AuditType.REACTIVATE_PICKLIST, target=serializer.instance,
)
serializer.save()
return JsonResponse(data={"picklist_item": serializer.data})
return JsonResponse(data={"errors": serializer.errors}, status=status.HTTP_400_BAD_REQUEST)
| StarcoderdataPython |
11390161 | # -*- coding: utf-8 -*-
"""Classes for managing data to be modelled."""
__authors__ = '<NAME>'
__license__ = 'MIT'
import numpy as np
import theano as th
import logging
import utils
logger = logging.getLogger(__name__)
class Dataset(object):
""" Basic dataset class. """
def __init__(self, data, n_valid, corruptor=None, prng=None):
"""
Parameters
----------
data : numpy array
Data matrix array with rows corresponding to data vectors.
n_valid : integer
Number of data vectors to use as validation set.
corruptor : function(Array, RandomState) or None
Optional function which applies random 'corruption' / augmentation
to data, for example dequantising pixel values, adding noise,
applying random affine transformation to image. Applied on
initialisation and at end of each training epoch.
prng : RandomState or None
Seeded pseudo-random number generator - used to shuffle data
and for corruptor if specified.
"""
self.data = data
self.n_valid = n_valid
self.n_train = data.shape[0] - n_valid
self.corruptor = corruptor
if prng is None:
prng = np.random.RandomState()
self.prng = prng
shuffled_data, self.perm = utils.shuffle(self.data, self.prng)
self.data_valid, self.data_train = utils.split(shuffled_data, n_valid)
if corruptor is None:
self.x_valid = th.shared(
self.data_valid.astype(th.config.floatX), 'x_valid')
self.x_train = th.shared(
self.data_train.astype(th.config.floatX), 'x_train')
else:
corrupted_data_valid = self.corruptor(self.data_valid, self.prng)
corrupted_data_train = self.corruptor(self.data_train, self.prng)
self.x_valid = th.shared(
corrupted_data_valid.astype(th.config.floatX), 'x_valid')
self.x_train = th.shared(
corrupted_data_train.astype(th.config.floatX), 'x_train')
def get_train_batch(self, batch_index, batch_size):
return self.x_train[batch_index * batch_size:
(batch_index + 1) * batch_size]
def end_of_epoch_callback(self):
if self.corruptor is not None:
data_train = self.corruptor(self.data_train, self.prng)
self.x_train.set_value(data_train.astype(th.config.floatX))
class AuxilliaryVariableDataset(Dataset):
""" Dataset class for models with inputs formed of data vector plus
independent auxillary random vector."""
def __init__(self, data, n_valid, auxilliary_sampler,
data_corruptor=None, prng=None):
def corruptor(data, prng):
if data_corruptor is not None:
data = data_corruptor(data, prng)
n_data = data.shape[0]
aux_vars = auxilliary_sampler(n_data, prng)
return np.hstack((data, aux_vars))
super(AuxilliaryVariableDataset, self).__init__(
data, n_valid, corruptor, prng)
| StarcoderdataPython |
11241279 | from django.shortcuts import render
def crossbee_display_summation(request,input_dict,output_dict,widget):
if sum(input_dict['intList']) == input_dict['sum']:
check = 'The calculation appears correct.'
else:
check = 'The calculation appears incorrect!'
return render(request, 'visualizations/crossbee_display_integers.html',{'widget':widget,'input_dict':input_dict, 'output_dict':output_dict, 'check':check})
def open_data_in_crossbee(request,input_dict,output_dict,widget):
#from mothra.settings import MEDIA_ROOT
#from workflows.helpers import ensure_dir
#destination = MEDIA_ROOT+'/'+str(request.user.id)+'/'+str(widget.id)+'.txt'
#ensure_dir(destination)
#f = open(destination,'w')
#f.write(str(input_dict['string']))
#f.close()
#filename = str(request.user.id)+'/'+str(widget.id)+'.txt'
#output_dict['filename'] = filename
return render(request, 'visualizations/open_data_in_crossbee.html',{'widget':widget}) #,'input_dict':input_dict,'output_dict':output_dict})
| StarcoderdataPython |
4985623 | # Generated by Django 3.1.5 on 2021-05-25 19:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_auto_20210406_2305'),
]
operations = [
migrations.CreateModel(
name='Parameter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('width_pca', models.IntegerField()),
('height_pca', models.IntegerField()),
('scaleFactor', models.IntegerField()),
('minNeighbors', models.IntegerField()),
('width_frame', models.IntegerField()),
('height_frame', models.IntegerField()),
('rec_color', models.CharField(max_length=10)),
('rec_stroke', models.IntegerField()),
],
options={
'verbose_name': 'Parameter',
'verbose_name_plural': 'Parameters',
},
),
]
| StarcoderdataPython |
3537503 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
A tool to transfer flickr photos to Wikimedia Commons.
-group_id: specify group ID of the pool
-photoset_id: specify a photoset id
-user_id: give the user id of the flickrriper user
-start_id: the photo id to start with
-end_id: the photo id to end with
-tags: a tag to filter photo items (only one is supported)
-flickerreview add a flickr review template to the description
-reviewer: specify the reviewer
-override: override text for licence
-addcategory: specify a category
-removecategories remove all categories
-autonomous run bot in autonomous mode
"""
#
# (C) Multichill, 2009
# (C) Pywikibot team, 2009-2019
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, division, unicode_literals
import base64
import hashlib
import io
import re
import pywikibot
from pywikibot import config, textlib
from pywikibot.comms.http import fetch
from pywikibot.specialbots import UploadRobot
from pywikibot.tools import PY2
try:
from pywikibot.userinterfaces.gui import Tkdialog
except ImportError as _tk_error:
Tkdialog = _tk_error
if not PY2:
from urllib.parse import urlencode
else:
from urllib import urlencode
try:
import flickrapi # see: http://stuvel.eu/projects/flickrapi
except ImportError as e:
flickrapi = e
flickr_allowed_license = {
0: False, # All Rights Reserved
1: False, # Creative Commons Attribution-NonCommercial-ShareAlike License
2: False, # Creative Commons Attribution-NonCommercial License
3: False, # Creative Commons Attribution-NonCommercial-NoDerivs License
4: True, # Creative Commons Attribution License
5: True, # Creative Commons Attribution-ShareAlike License
6: False, # Creative Commons Attribution-NoDerivs License
7: True, # No known copyright restrictions
8: True, # United States Government Work
}
def getPhoto(flickr, photo_id):
"""
Get the photo info and the photo sizes so we can use these later on.
TODO: Add exception handling
"""
while True:
try:
photoInfo = flickr.photos_getInfo(photo_id=photo_id)
# xml.etree.ElementTree.dump(photoInfo)
photoSizes = flickr.photos_getSizes(photo_id=photo_id)
# xml.etree.ElementTree.dump(photoSizes)
return photoInfo, photoSizes
except flickrapi.exceptions.FlickrError:
pywikibot.output('Flickr api problem, sleeping')
pywikibot.sleep(30)
def isAllowedLicense(photoInfo):
"""
Check if the image contains the right license.
TODO: Maybe add more licenses
"""
license = photoInfo.find('photo').attrib['license']
if flickr_allowed_license[int(license)]:
return True
else:
return False
def getPhotoUrl(photoSizes):
"""Get the url of the jpg file with the highest resolution."""
url = ''
# The assumption is that the largest image is last
for size in photoSizes.find('sizes').findall('size'):
url = size.attrib['source']
return url
def downloadPhoto(photoUrl):
"""
Download the photo and store it in a io.BytesIO object.
TODO: Add exception handling
"""
imageFile = fetch(photoUrl).raw
return io.BytesIO(imageFile)
def findDuplicateImages(photo, site=None):
"""Find duplicate images.
Take the photo, calculate the SHA1 hash and ask the MediaWiki api
for a list of duplicates.
TODO: Add exception handling.
@param photo: Photo
@type photo: io.BytesIO
@param site: Site to search for duplicates.
Defaults to using Wikimedia Commons if not supplied.
@type site: APISite or None
"""
if not site:
site = pywikibot.Site('commons', 'commons')
hashObject = hashlib.sha1()
hashObject.update(photo.getvalue())
return site.getFilesFromAnHash(base64.b16encode(hashObject.digest()))
def getTags(photoInfo):
"""Get all the tags on a photo."""
result = []
for tag in photoInfo.find('photo').find('tags').findall('tag'):
result.append(tag.text.lower())
return result
def getFlinfoDescription(photo_id):
"""
Get the description from http://wikipedia.ramselehof.de/flinfo.php.
TODO: Add exception handling, try a couple of times
"""
parameters = urlencode({'id': photo_id, 'raw': 'on'})
return fetch(
'http://wikipedia.ramselehof.de/flinfo.php?%s' % parameters).text
def getFilename(photoInfo, site=None, project='Flickr'):
"""Build a good filename for the upload based on the username and title.
Prevents naming collisions.
"""
if not site:
site = pywikibot.Site('commons', 'commons')
username = photoInfo.find('photo').find('owner').attrib['username']
title = photoInfo.find('photo').find('title').text
if title:
title = cleanUpTitle(title)
if not title:
# find the max length for a mw title
maxBytes = 240 - len(project.encode('utf-8')) \
- len(username.encode('utf-8'))
description = photoInfo.find('photo').find('description').text
if description:
descBytes = len(description.encode('utf-8'))
if descBytes > maxBytes:
# maybe we cut more than needed, anyway we do it
items = max(min(len(description), maxBytes // 4),
len(description) - descBytes + maxBytes)
description = description[:items]
title = cleanUpTitle(description)
else:
title = ''
# Should probably have the id of the photo as last resort.
if pywikibot.Page(site, 'File:{} - {} - {}.jpg'
.format(title, project, username)).exists():
i = 1
while True:
name = '{} - {} - {} ({}).jpg'.format(title, project, username, i)
if pywikibot.Page(site, 'File:' + name).exists():
i += 1
else:
return name
else:
return '{} - {} - {}.jpg'.format(title, project, username)
def cleanUpTitle(title):
"""Clean up the title of a potential MediaWiki page.
Otherwise the title of the page might not be allowed by the software.
"""
title = title.strip()
title = re.sub(r'[<{\[]', '(', title)
title = re.sub(r'[>}\]]', ')', title)
title = re.sub(r'[ _]?\(!\)', '', title)
title = re.sub(',:[ _]', ', ', title)
title = re.sub('[;:][ _]', ', ', title)
title = re.sub(r'[\t\n ]+', ' ', title)
title = re.sub(r'[\r\n ]+', ' ', title)
title = re.sub('[\n]+', '', title)
title = re.sub('[?!]([.\"]|$)', r'\1', title)
title = re.sub('[&#%?!]', '^', title)
title = re.sub('[;]', ',', title)
title = re.sub(r'[/+\\:]', '-', title)
title = re.sub('--+', '-', title)
title = re.sub(',,+', ',', title)
title = re.sub('[-,^]([.]|$)', r'\1', title)
title = title.replace(' ', '_')
title = title.strip('_')
return title
def buildDescription(flinfoDescription='', flickrreview=False, reviewer='',
override='', addCategory='', removeCategories=False):
"""Build the final description for the image.
The description is based on the info from flickrinfo and improved.
"""
description = '== {{int:filedesc}} ==\n{}'.format(flinfoDescription)
if removeCategories:
description = textlib.removeCategoryLinks(description,
pywikibot.Site(
'commons', 'commons'))
if override:
description = description.replace('{{cc-by-sa-2.0}}\n', '')
description = description.replace('{{cc-by-2.0}}\n', '')
description = description.replace('{{flickrreview}}\n', '')
description = description.replace(
'{{copyvio|Flickr, licensed as "All Rights Reserved" which is not '
'a free license --~~~~}}\n',
'')
description = description.replace('=={{int:license}}==',
'=={{int:license}}==\n' + override)
elif flickrreview:
if reviewer:
description = description.replace(
'{{flickrreview}}',
'{{flickrreview|%s|'
'{{subst:CURRENTYEAR}}-{{subst:CURRENTMONTH}}-'
'{{subst:CURRENTDAY2}}}}' % reviewer)
if addCategory:
description = description.replace('{{subst:unc}}\n', '')
description = description + '\n[[Category:' + addCategory + ']]\n'
description = description.replace('\r\n', '\n')
return description
def processPhoto(flickr, photo_id='', flickrreview=False, reviewer='',
override='', addCategory='', removeCategories=False,
autonomous=False):
"""Process a single Flickr photo.
For each image:
* Check the license
* Check if it isn't already on Commons
* Build suggested filename
* Check for name collision and maybe alter it
* Pull description from Flinfo
* Show image and description to user
* Add a nice hotcat lookalike for the adding of categories
* Filter the categories
* Upload the image
"""
if photo_id:
pywikibot.output(str(photo_id))
(photoInfo, photoSizes) = getPhoto(flickr, photo_id)
if isAllowedLicense(photoInfo) or override:
# Get the url of the largest photo
photoUrl = getPhotoUrl(photoSizes)
# Should download the photo only once
photo = downloadPhoto(photoUrl)
# Don't upload duplicate images, should add override option
duplicates = findDuplicateImages(photo)
if duplicates:
pywikibot.output('Found duplicate image at {}'
.format(duplicates.pop()))
else:
filename = getFilename(photoInfo)
flinfoDescription = getFlinfoDescription(photo_id)
photoDescription = buildDescription(flinfoDescription,
flickrreview, reviewer,
override, addCategory,
removeCategories)
# pywikibot.output(photoDescription)
if not isinstance(Tkdialog, ImportError) and not autonomous:
try:
(newPhotoDescription, newFilename, skip) = Tkdialog(
photoDescription, photo, filename).show_dialog()
except ImportError as e:
pywikibot.warning(e)
pywikibot.warning('Switching to autonomous mode.')
autonomous = True
elif not autonomous:
pywikibot.warning('Switching to autonomous mode because GUI '
'interface cannot be used')
pywikibot.warning(Tkdialog)
autonomous = True
if autonomous:
newPhotoDescription = photoDescription
newFilename = filename
skip = False
# Do the actual upload
# Would be nice to check before I upload if the file is already at
# Commons. Not that important for this program, but maybe for
# derived programs
if not skip:
bot = UploadRobot(photoUrl,
description=newPhotoDescription,
useFilename=newFilename,
keepFilename=True,
verifyDescription=False)
bot.upload_image(debug=False)
return 1
else:
pywikibot.output('Invalid license')
return 0
def getPhotos(flickr, user_id='', group_id='', photoset_id='',
start_id='', end_id='', tags=''):
"""Loop over a set of Flickr photos.
Get a set to work on (start with just a username).
* Make it possible to delimit the set (from/to)
"""
found_start_id = not start_id
# https://www.flickr.com/services/api/flickr.groups.pools.getPhotos.html
# Get the photos in a group
if group_id:
# First get the total number of photo's in the group
photos = flickr.groups_pools_getPhotos(group_id=group_id,
user_id=user_id, tags=tags,
per_page='100', page='1')
pages = photos.find('photos').attrib['pages']
gen = lambda i: flickr.groups_pools_getPhotos( # noqa: E731
group_id=group_id, user_id=user_id, tags=tags,
per_page='100', page=i
).find('photos').getchildren()
# https://www.flickr.com/services/api/flickr.photosets.getPhotos.html
# Get the photos in a photoset
elif photoset_id:
photos = flickr.photosets_getPhotos(photoset_id=photoset_id,
per_page='100', page='1')
pages = photos.find('photoset').attrib['pages']
gen = lambda i: flickr.photosets_getPhotos( # noqa: E731
photoset_id=photoset_id, per_page='100', page=i
).find('photoset').getchildren()
# https://www.flickr.com/services/api/flickr.people.getPublicPhotos.html
# Get the (public) photos uploaded by a user
elif user_id:
photos = flickr.people_getPublicPhotos(user_id=user_id,
per_page='100', page='1')
pages = photos.find('photos').attrib['pages']
gen = lambda i: flickr.people_getPublicPhotos( # noqa: E731
user_id=user_id, per_page='100', page=i
).find('photos').getchildren()
for i in range(1, int(pages) + 1):
gotPhotos = False
while not gotPhotos:
try:
for photo in gen(i):
gotPhotos = True
if photo.attrib['id'] == start_id:
found_start_id = True
if found_start_id:
if photo.attrib['id'] == end_id:
pywikibot.output('Found end_id')
return
else:
yield photo.attrib['id']
except flickrapi.exceptions.FlickrError:
gotPhotos = False
pywikibot.output('Flickr api problem, sleeping')
pywikibot.sleep(30)
return
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: unicode
"""
local_args = pywikibot.handle_args(args)
group_id = ''
photoset_id = ''
user_id = ''
start_id = ''
end_id = ''
tags = ''
addCategory = ''
removeCategories = False
autonomous = False
totalPhotos = 0
uploadedPhotos = 0
# Do we mark the images as reviewed right away?
if config.flickr['review']:
flickrreview = config.flickr['review']
else:
flickrreview = False
# Set the Flickr reviewer
if config.flickr['reviewer']:
reviewer = config.flickr['reviewer']
elif 'commons' in config.sysopnames['commons']:
pywikibot.output(config.sysopnames['commons'])
reviewer = config.sysopnames['commons']['commons']
elif 'commons' in config.usernames['commons']:
reviewer = config.usernames['commons']['commons']
else:
reviewer = ''
# Should be renamed to overrideLicense or something like that
override = ''
for arg in local_args:
if arg.startswith('-group_id'):
if len(arg) == 9:
group_id = pywikibot.input('What is the group_id of the pool?')
else:
group_id = arg[10:]
elif arg.startswith('-photoset_id'):
if len(arg) == 12:
photoset_id = pywikibot.input('What is the photoset_id?')
else:
photoset_id = arg[13:]
elif arg.startswith('-user_id'):
if len(arg) == 8:
user_id = pywikibot.input(
'What is the user_id of the flickr user?')
else:
user_id = arg[9:]
elif arg.startswith('-start_id'):
if len(arg) == 9:
start_id = pywikibot.input(
'What is the id of the photo you want to start at?')
else:
start_id = arg[10:]
elif arg.startswith('-end_id'):
if len(arg) == 7:
end_id = pywikibot.input(
'What is the id of the photo you want to end at?')
else:
end_id = arg[8:]
elif arg.startswith('-tags'):
if len(arg) == 5:
tags = pywikibot.input(
'What is the tag you want to filter out (currently only '
'one supported)?')
else:
tags = arg[6:]
elif arg == '-flickrreview':
flickrreview = True
elif arg.startswith('-reviewer'):
if len(arg) == 9:
reviewer = pywikibot.input('Who is the reviewer?')
else:
reviewer = arg[10:]
elif arg.startswith('-override'):
if len(arg) == 9:
override = pywikibot.input('What is the override text?')
else:
override = arg[10:]
elif arg.startswith('-addcategory'):
if len(arg) == 12:
addCategory = pywikibot.input(
'What category do you want to add?')
else:
addCategory = arg[13:]
elif arg == '-removecategories':
removeCategories = True
elif arg == '-autonomous':
autonomous = True
if isinstance(flickrapi, Exception):
pywikibot.error('This script requires the python flickrapi module. \n'
'See: http://stuvel.eu/projects/flickrapi')
elif not config.flickr['api_key']:
pywikibot.warning('Flickr api key not found! Get yourself an api key\n'
'Any flickr user can get a key at\n'
'https://www.flickr.com/services/api/keys/apply/')
elif user_id or group_id or photoset_id:
if 'api_secret' in config.flickr and config.flickr['api_secret']:
flickr = flickrapi.FlickrAPI(config.flickr['api_key'],
config.flickr['api_secret'])
else:
pywikibot.output('Accessing public content only')
flickr = flickrapi.FlickrAPI(config.flickr['api_key'])
for photo_id in getPhotos(flickr, user_id, group_id, photoset_id,
start_id, end_id, tags):
uploadedPhotos += processPhoto(flickr, photo_id, flickrreview,
reviewer, override, addCategory,
removeCategories, autonomous)
totalPhotos += 1
pywikibot.output('Finished running')
pywikibot.output('Total photos: ' + str(totalPhotos))
pywikibot.output('Uploaded photos: ' + str(uploadedPhotos))
if __name__ == '__main__':
main()
| StarcoderdataPython |
4936781 | <reponame>lukecq1231/nli
import cPickle as pkl
import os
from data_iterator import TextIterator
from main import (
build_model,
pred_probs,
prepare_data,
pred_acc,
load_params,
init_params,
init_tparams,
)
# MUST MATCH the ids in `dic` in preprocess.py
id2label = ["entailment", "neutral", "contradiction"]
def main():
model_name = os.path.basename(os.path.dirname(os.path.realpath(__file__)))
model = "{}.npz".format(model_name)
datasets = [
"../../data/word_sequence/premise_snli_1.0_train.txt",
"../../data/word_sequence/hypothesis_snli_1.0_train.txt",
"../../data/word_sequence/label_snli_1.0_train.txt",
]
valid_datasets = [
"../../data/word_sequence/premise_snli_1.0_dev.txt",
"../../data/word_sequence/hypothesis_snli_1.0_dev.txt",
"../../data/word_sequence/label_snli_1.0_dev.txt",
]
test_datasets = [
"../../data/word_sequence/premise_snli_1.0_test.txt",
"../../data/word_sequence/hypothesis_snli_1.0_test.txt",
"../../data/word_sequence/label_snli_1.0_test.txt",
]
dictionary = "../../data/word_sequence/vocab_cased.pkl"
# load model model_options
with open("%s.pkl" % model, "rb") as f:
options = pkl.load(f)
print(options)
# load dictionary and invert
with open(dictionary, "rb") as f:
word_dict = pkl.load(f)
n_words = options["n_words"]
valid_batch_size = options["valid_batch_size"]
valid = TextIterator(
valid_datasets[0],
valid_datasets[1],
valid_datasets[2],
dictionary,
n_words=n_words,
batch_size=valid_batch_size,
shuffle=False,
)
test = TextIterator(
test_datasets[0],
test_datasets[1],
test_datasets[2],
dictionary,
n_words=n_words,
batch_size=valid_batch_size,
shuffle=False,
)
# allocate model parameters
params = init_params(options, word_dict)
# load model parameters and set theano shared variables
params = load_params(model, params)
tparams = init_tparams(params)
trng, use_noise, x1, x1_mask, x2, x2_mask, y, opt_ret, cost, f_pred = build_model(
tparams, options
)
use_noise.set_value(0.0)
valid_acc = pred_acc(f_pred, prepare_data, options, valid)
test_acc = pred_acc(f_pred, prepare_data, options, test)
print("valid accuracy", valid_acc)
print("test accuracy", test_acc)
predict_labels_valid = pred_label(f_pred, prepare_data, valid)
predict_labels_test = pred_label(f_pred, prepare_data, test)
with open("predict_gold_samples_valid.txt", "w") as fw:
with open(valid_datasets[0], "r") as f1:
with open(valid_datasets[1], "r") as f2:
with open(valid_datasets[-1], "r") as f3:
for a, b, c, d in zip(predict_labels_valid, f3, f1, f2):
fw.write(
str(a)
+ "\t"
+ b.rstrip()
+ "\t"
+ c.rstrip()
+ "\t"
+ d.rstrip()
+ "\n"
)
with open("predict_gold_samples_test.txt", "w") as fw:
with open(test_datasets[0], "r") as f1:
with open(test_datasets[1], "r") as f2:
with open(test_datasets[-1], "r") as f3:
for a, b, c, d in zip(predict_labels_test, f3, f1, f2):
fw.write(
str(a)
+ "\t"
+ b.rstrip()
+ "\t"
+ c.rstrip()
+ "\t"
+ d.rstrip()
+ "\n"
)
print("Done")
def pred_label(f_pred, prepare_data, iterator):
labels = []
for x1, x2, y in iterator:
x1, x1_mask, x2, x2_mask, y = prepare_data(x1, x2, y)
preds = f_pred(x1, x1_mask, x2, x2_mask)
labels = labels + preds.tolist()
return labels
if __name__ == "__main__":
main()
| StarcoderdataPython |
11385073 | <gh_stars>0
import logging
import struct
import unittest
from unittest.case import SkipTest
from parameterized import parameterized
from stages import Decode, Execute, Fetch, Memory, Writeback
from stages import ForwardingUnit, Ram
# import mock
BASE_ADDR = 0x80000000
FORMAT = '%(message)s'
logging.basicConfig(filename='trace.log', format=FORMAT, filemode='w', level=logging.INFO)
logging.info("%s", f"{f'Stage':10}-- ({f'PC':8})")
logging.info("%s", "-" * 23)
class TestForwarding(unittest.TestCase):
"""[(rd, wdat)], rs1, rs2"""
@parameterized.expand([
[[(1, 1), (1, 2)], 1, 2, (2, None)],
[[(1, 1), (1, 2), (2, 3), (2, 4)], 1, 2, (2, 4)],
[[(1, 1), (1, 2), (2, 3), (2, 4), (1, 3)], 1, 2, (3, 4)]
])
def test_two(self, op_list, rs1, rs2, expected):
fwd = ForwardingUnit()
for l in op_list:
rd, wdat = l
fwd.insert(rd, wdat)
self.assertEqual(fwd.forward(rs1, rs2), expected)
class TestBranch(unittest.TestCase):
def setUp(self):
FILENAME = "assembly_unit_tests/branch.o"
ram = Ram()
self.s2 = Decode()
self.s1 = Fetch(ram)
ram.load(FILENAME)
def step(self):
self.s2.tick(self.s1)
self.s1.tick(decode=self.s2)
logging.info("-"*20)
def test_beq(self):
for i in range(7):
self.step()
class TestRam(unittest.TestCase):
def test_ram (self):
ram = Ram()
ram[0x80000000] = struct.pack("I", 1234)
assert(struct.pack("I", 1234) == ram.memory[0:4])
def test_load(self):
FILENAME = "asm/branch.o"
ram = Ram()
ram.load(FILENAME)
class TestForwarding(unittest.TestCase):
def setUp(self) -> None:
self.ram = Ram()
self.s4 = Memory()
self.s3 = Execute()
self.s2 = Decode()
self.s1 = Fetch(self.ram)
def tick(self):
fwd = ForwardingUnit()
# Memory <- Execute
r = self.s4.tick(self.s3, ram=self.ram)
fwd.insert(rd=self.s4.ins.rd, wdat=self.s4.ins.wdat)
# Execute <- Decode
self.s3.tick(self.s2)
fwd.insert(rd=self.s3.ins.rd, wdat=self.s3.ins.wdat)
# Decode <- Fetch
self.s2.tick(self.s1)
# Fetch
self.s1.tick(decode=self.s2)
def test_de_ex_fwd(self):
self.ram.load(filename="asm/forward.o")
if __name__ == '__main__':
unittest.main(verbosity=2)
| StarcoderdataPython |
3372663 | <gh_stars>0
import click
from .scrape import scrape
from .transform import transform
@click.group()
@click.option('--yaml-output', type=click.File('a+'), default='euas.yaml')
@click.pass_context
def cli(ctx, yaml_output):
ctx.obj = {'yaml_output': yaml_output}
cli.add_command(scrape)
cli.add_command(transform)
| StarcoderdataPython |
11275218 | <filename>mpi_extrapolation/mpi.py
# coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for learning to predict multiplane images (MPI).
For CVPR 2019 paper:
Pushing the Boundaries of View Extrapolation with Multiplane Images
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
Modified from code written by <NAME>
(https://github.com/google/stereo-magnification).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import numpy as np
import tensorflow as tf
import mpi_extrapolation.geometry.projector as pj
from mpi_extrapolation.nets import build_vgg19
from mpi_extrapolation.nets import ed_3d_net
from mpi_extrapolation.nets import refine_net
class MPI(object):
"""Class definition for MPI learning module.
"""
def __init__(self):
pass
def infer_mpi(self, raw_src_images, raw_ref_image, ref_pose, src_poses,
intrinsics, num_mpi_planes, mpi_planes,
run_patched=False,
patch_ind=np.array([0, 0]),
patchsize=np.array([256, 256]),
outsize=np.array([128, 128])):
"""Construct the MPI inference graph.
Args:
raw_src_images: stack of source images [batch, height, width, 3*#source]
raw_ref_image: reference image [batch, height, width, 3]
ref_pose: reference frame pose (world to camera) [batch, 4, 4]
src_poses: source frame poses (world to camera) [batch, #source, 4, 4]
intrinsics: camera intrinsics [batch, 3, 3]
num_mpi_planes: number of mpi planes to predict
mpi_planes: list of plane depths
run_patched: whether to only infer MPI for patches of PSV (inference only)
patch_ind: patch index for infer MPI inference
patchsize: spatial patch size for MPI inference
outsize: size of central portion to keep for patched inference
Returns:
outputs: a collection of output tensors.
"""
with tf.name_scope("preprocessing"):
src_images = self.preprocess_image(raw_src_images)
ref_image = self.preprocess_image(raw_ref_image)
with tf.name_scope("format_network_input"):
# WARNING: we assume the first src image/pose is the reference
net_input = self.format_network_input(ref_image, src_images[:, :, :, 3:],
ref_pose, src_poses[:, 1:],
mpi_planes, intrinsics)
with tf.name_scope("layer_prediction"):
# The network directly outputs the color image at each MPI plane.
chout = 4 # Number of output channels, RGBA
if run_patched:
# Patch the PSV spatially, with buffer, and generate MPI patch
# Only for inference (not implemented for training)
buffersize = (patchsize - outsize) // 2
padding = [[0, 0], [buffersize[0], buffersize[0]],
[buffersize[1], buffersize[1]], [0, 0], [0, 0]]
net_input_pad = tf.pad(net_input, padding)
patch_start = patch_ind * outsize
patch_end = patch_start + patchsize
net_input_patch = net_input_pad[:, patch_start[0]:patch_end[0],
patch_start[1]:patch_end[1], :, :]
rgba_layers, _ = ed_3d_net(net_input_patch, chout)
else:
# Generate entire MPI (training and inference, but takes more memory)
print("first step MPI prediction")
rgba_layers, _ = ed_3d_net(net_input, chout)
color_layers = rgba_layers[:, :, :, :, :-1]
alpha_layers = rgba_layers[:, :, :, :, -1:]
# Rescale alphas to (0, 1)
alpha_layers = (alpha_layers + 1.)/2.
rgba_layers = tf.concat([color_layers, alpha_layers], axis=4)
print("refining MPI")
transmittance = self.compute_transmittance(alpha_layers)
refine_input_colors = color_layers * transmittance
refine_input_alpha = alpha_layers * transmittance
stuff_behind = tf.cumsum(refine_input_colors, axis=3)
concat_trans = True # Concatenate transmittance to second input
if concat_trans:
refine_input = tf.concat([tf.stop_gradient(refine_input_colors),
tf.stop_gradient(stuff_behind),
tf.stop_gradient(refine_input_alpha),
tf.stop_gradient(transmittance)], axis=4)
normalized_disp_inds = tf.reshape(tf.linspace(0.0, 1.0, num_mpi_planes),
[1, 1, 1, num_mpi_planes, 1])
sh = tf.shape(refine_input)
normalized_disp_inds_stack = tf.tile(normalized_disp_inds,
[1, sh[1], sh[2], 1, 1])
refine_input = tf.concat([refine_input, normalized_disp_inds_stack],
axis=4)
print("refine input size:", refine_input.shape)
rgba_layers_refine = refine_net(refine_input)
print("predicting flow for occlusions")
flow_source = tf.stop_gradient(stuff_behind)
flow_vecs = rgba_layers_refine[:, :, :, :, :2]
color_layers = pj.flow_gather(flow_source, flow_vecs)
alpha_layers = rgba_layers_refine[:, :, :, :, -1:]
# Rescale alphas to (0, 1)
alpha_layers = (alpha_layers + 1.)/2.
rgba_layers_refine = tf.concat([color_layers, alpha_layers], axis=4)
# Collect output tensors
pred = {}
pred["rgba_layers"] = rgba_layers
pred["rgba_layers_refine"] = rgba_layers_refine
pred["refine_input_mpi"] = tf.concat([refine_input_colors,
refine_input_alpha], axis=-1)
pred["stuff_behind"] = stuff_behind
pred["flow_vecs"] = flow_vecs
pred["psv"] = net_input[:, :, :, :, 0:3]
# Add pred tensors to outputs collection
print("adding outputs to collection")
for i in pred:
tf.add_to_collection("outputs", pred[i])
return pred
def mpi_render_view(self, input_mpi, tgt_pose, planes, intrinsics):
"""Render a target view from MPI representation.
Args:
input_mpi: input MPI [batch, height, width, #planes, 4]
tgt_pose: target pose (relative) to render from [batch, 4, 4]
planes: list of depth for each plane
intrinsics: camera intrinsics [batch, 3, 3]
Returns:
rendered view [batch, height, width, 3]
"""
batch_size, _, _ = tgt_pose.get_shape().as_list()
rgba_layers = input_mpi
# Format for homography code
depths = tf.tile(planes[:, tf.newaxis], [1, batch_size])
rgba_layers = tf.transpose(rgba_layers, [3, 0, 1, 2, 4])
# Render target viewpoint
proj_images = pj.projective_forward_homography(
rgba_layers, intrinsics, tgt_pose, depths)
proj_images = tf.transpose(proj_images, [1, 2, 3, 0, 4])
output_image = pj.over_composite(proj_images)
output_image.set_shape([None, None, None, 3])
return output_image, proj_images
def build_train_graph(self,
inputs,
min_depth,
max_depth,
num_mpi_planes,
learning_rate=0.0002,
beta1=0.9,
vgg_model_file=None,
global_step=0):
"""Construct the training computation graph.
Args:
inputs: dictionary of tensors (see 'input_data' below) needed for training
min_depth: minimum depth for the PSV and MPI planes
max_depth: maximum depth for the PSV and MPI planes
num_mpi_planes: number of MPI planes to infer
learning_rate: learning rate
beta1: hyperparameter for Adam
vgg_model_file: path to vgg weights (needed when vgg loss is used)
global_step: current optimization step
Returns:
A train_op to be used for training.
"""
print("starting to build graph")
with tf.name_scope("input_size_randomization"):
dim_choices = tf.constant([[1, 16], [2, 32], [4, 32], [4, 64], [4, 128],
[8, 32], [8, 64], [8, 128]],
dtype=tf.int32)
rand_dim = tf.random_shuffle(dim_choices)[0, :]
height_div = rand_dim[0]
width_div = rand_dim[0]
num_mpi_planes = rand_dim[1]
tf.summary.scalar("num_mpi_planes", num_mpi_planes)
with tf.name_scope("setup"):
mpi_planes = self.inv_depths(min_depth, max_depth, num_mpi_planes)
with tf.name_scope("input_data"):
raw_tgt_image = inputs["tgt_image"]
raw_ref_image = inputs["ref_image"]
raw_src_images = inputs["src_images"]
_, img_height, img_width, _ = raw_src_images.get_shape().as_list(
)
img_height = img_height // height_div
img_width = img_width // width_div
raw_tgt_image = tf.image.convert_image_dtype(
raw_tgt_image, dtype=tf.float32)
raw_ref_image = tf.image.convert_image_dtype(
raw_ref_image, dtype=tf.float32)
raw_src_images = tf.image.convert_image_dtype(
raw_src_images, dtype=tf.float32)
raw_tgt_image = tf.image.resize_area(raw_tgt_image,
[img_height, img_width])
raw_ref_image = tf.image.resize_area(raw_ref_image,
[img_height, img_width])
raw_src_images = tf.image.resize_area(raw_src_images,
[img_height, img_width])
tgt_pose = inputs["tgt_pose"]
ref_pose = inputs["ref_pose"]
src_poses = inputs["src_poses"]
intrinsics = inputs["intrinsics"]
# Scale intrinsics based on size randomization
intrinsics = tf.concat([
intrinsics[:, 0:1, :] / tf.to_float(width_div),
intrinsics[:, 1:2, :] / tf.to_float(height_div), intrinsics[:, 2:3, :]
],
axis=1)
inputs["intrinsics"] = intrinsics
_, num_source, _, _ = src_poses.get_shape().as_list()
with tf.name_scope("inference"):
print("setting up MPI inference")
num_mpi_planes = tf.shape(mpi_planes)[0]
pred = self.infer_mpi(raw_src_images, raw_ref_image, ref_pose, src_poses,
intrinsics, num_mpi_planes,
mpi_planes)
rgba_layers = pred["rgba_layers"]
rgba_layers_refine = pred["rgba_layers_refine"]
stuff_behind = pred["stuff_behind"]
refine_input_mpi = pred["refine_input_mpi"]
psv = pred["psv"]
with tf.name_scope("synthesis"):
print("setting up rendering")
rel_pose = tf.matmul(tgt_pose, tf.matrix_inverse(ref_pose))
output_image, output_layers = self.mpi_render_view(
rgba_layers, rel_pose, mpi_planes, intrinsics)
output_alpha = output_layers[Ellipsis, -1]
output_image_refine, _ = self.mpi_render_view(
rgba_layers_refine, rel_pose, mpi_planes, intrinsics)
with tf.name_scope("loss"):
print("computing losses")
# Mask loss for pixels outside reference frustum
loss_mask = tf.where(
tf.equal(
tf.reduce_min(
tf.abs(tf.reduce_sum(output_layers, axis=-1)),
axis=3,
keep_dims=True), 0.0),
tf.zeros_like(output_alpha[:, :, :, 0:1]),
tf.ones_like(output_alpha[:, :, :, 0:1]))
loss_mask = tf.stop_gradient(loss_mask)
tf.summary.image("loss_mask", loss_mask)
# Helper functions for loss
def compute_error(real, fake, mask):
return tf.reduce_mean(mask * tf.abs(fake - real))
# Normalized VGG loss (from
# https://github.com/CQFIO/PhotographicImageSynthesis)
downsample = lambda tensor, ds: tf.nn.avg_pool(tensor, [1, ds, ds, 1],
[1, ds, ds, 1], "SAME")
def vgg_loss(raw_tgt_image, output_image, loss_mask):
"""Compute VGG loss."""
vgg_real = build_vgg19(raw_tgt_image * 255.0, vgg_model_file)
rescaled_output_image = (output_image + 1.)/2. * 255.0
vgg_fake = build_vgg19(
rescaled_output_image, vgg_model_file, reuse=True)
p0 = compute_error(vgg_real["input"], vgg_fake["input"], loss_mask)
p1 = compute_error(vgg_real["conv1_2"],
vgg_fake["conv1_2"],
loss_mask)/2.6
p2 = compute_error(vgg_real["conv2_2"],
vgg_fake["conv2_2"],
downsample(loss_mask, 2))/4.8
p3 = compute_error(vgg_real["conv3_2"],
vgg_fake["conv3_2"],
downsample(loss_mask, 4))/3.7
p4 = compute_error(vgg_real["conv4_2"],
vgg_fake["conv4_2"],
downsample(loss_mask, 8))/5.6
p5 = compute_error(vgg_real["conv5_2"],
vgg_fake["conv5_2"],
downsample(loss_mask, 16))*10/1.5
total_loss = p0+p1+p2+p3+p4+p5
return total_loss, vgg_real, vgg_fake
vgg_loss_initial, _, _ = vgg_loss(raw_tgt_image, output_image, loss_mask)
tf.summary.scalar("vgg_loss_initial", vgg_loss_initial)
total_loss = vgg_loss_initial
vgg_loss_refine, _, _ = vgg_loss(raw_tgt_image, output_image_refine,
loss_mask)
tf.summary.scalar("vgg_loss_refine", vgg_loss_refine)
total_loss += vgg_loss_refine
with tf.name_scope("train_op"):
print("setting up train op")
train_vars = [var for var in tf.trainable_variables()]
optim = tf.train.AdamOptimizer(learning_rate, beta1)
grads_and_vars = optim.compute_gradients(total_loss, var_list=train_vars)
train_op = [optim.apply_gradients(grads_and_vars)]
# Summaries
tf.summary.scalar("total_loss", total_loss)
# Source images
for i in range(num_source):
src_image = raw_src_images[:, :, :, i*3:(i+1)*3]
tf.summary.image("src_image_%d" % i, src_image)
# Output image
tf.summary.image("output_image", self.deprocess_image(output_image))
# Refined output image
tf.summary.image("output_image_refine",
self.deprocess_image(output_image_refine))
# Target image
tf.summary.image("tgt_image", raw_tgt_image)
# Ref image
tf.summary.image("ref_image", raw_ref_image)
# Predicted color and alpha layers, and PSV
num_summ = 16 # Number of plane summaries to show in tensorboard
for i in range(num_summ):
ind = tf.to_int32(i * num_mpi_planes/num_summ)
rgb = rgba_layers[:, :, :, ind, :3]
alpha = rgba_layers[:, :, :, ind, -1:]
ref_plane = psv[:, :, :, ind, 3:6]
source_plane = psv[:, :, :, ind, :3]
output_rgb = output_layers[:, :, :, ind, :3]
tf.summary.image("rgb_layer_%d" % i, self.deprocess_image(rgb))
tf.summary.image("alpha_layer_%d" % i, alpha)
tf.summary.image("rgba_layer_%d" % i, self.deprocess_image(rgb * alpha))
tf.summary.image("psv_avg_%d" % i,
(self.deprocess_image(0.5*ref_plane + 0.5*source_plane)))
tf.summary.image("output_rgb_%d" % i,
self.deprocess_image(output_rgb))
tf.summary.image("psv_ref_%d" % i, self.deprocess_image(ref_plane))
tf.summary.image("psv_source_%d" % i, self.deprocess_image(source_plane))
# Cumulative rendered images and refined MPI
for i in range(num_summ):
ind = tf.to_int32(i * num_mpi_planes/num_summ)
rgb = rgba_layers_refine[:, :, :, ind, :3]
alpha = rgba_layers_refine[:, :, :, ind, 3:]
render = stuff_behind[:, :, :, ind, :3]
input_colors = refine_input_mpi[:, :, :, ind, :3]
tf.summary.image("rgb_layer_refine_%d" % i, self.deprocess_image(rgb))
tf.summary.image("alpha_layer_refine_%d" % i, alpha)
tf.summary.image("rgba_layer_refine_%d" % i,
self.deprocess_image(rgb * alpha))
tf.summary.image("cumulative_render_%d" % i, self.deprocess_image(render))
tf.summary.image("input_colors_refine_%d" % i,
self.deprocess_image(input_colors))
return train_op
def train(self, train_op, load_dir, checkpoint_dir, summary_dir,
continue_train, summary_freq, save_latest_freq, max_steps,
global_step):
"""Runs the training procedure.
Args:
train_op: op for training the network
load_dir: where to load pretrained model
checkpoint_dir: where to save the model checkpoints
summary_dir: where to save the tensorboard summaries
continue_train: whether to restore training from previous checkpoint
summary_freq: summary frequency
save_latest_freq: Frequency of model saving
max_steps: maximum training steps
global_step: tf Variable for current optimization step
"""
# parameter_count = tf.reduce_sum(
# [tf.reduce_prod(tf.shape(v)) for v in tf.trainable_variables()])
incr_global_step = tf.assign(global_step, global_step + 1)
saver = tf.train.Saver([var for var in tf.trainable_variables()] +
[global_step],
max_to_keep=None)
sv = tf.train.Supervisor(logdir=summary_dir, save_summaries_secs=0,
saver=None)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with sv.managed_session("local", config=config) as sess:
if continue_train:
checkpoint = tf.train.latest_checkpoint(load_dir)
if checkpoint is not None:
print("Resume training from previous checkpoint:", checkpoint)
saver.restore(sess, checkpoint)
print("starting training iters")
for step in range(1, max_steps):
start_time = time.time()
fetches = {
"train": train_op,
"global_step": global_step,
"incr_global_step": incr_global_step,
}
if step % summary_freq == 0:
fetches["summary"] = sv.summary_op
results = sess.run(fetches)
gs = results["global_step"]
if step % summary_freq == 0:
sv.summary_writer.add_summary(results["summary"], gs)
print("[Step %.8d] time: %4.4f/it" % (gs, time.time() - start_time))
if step % save_latest_freq == 0:
print(" [*] Saving checkpoint to %s..." % checkpoint_dir)
saver.save(
sess, os.path.join(checkpoint_dir, "model.ckpt"), global_step=gs)
def format_network_input(self, ref_image, psv_src_images, ref_pose,
psv_src_poses, planes, intrinsics):
"""Format the network input.
Args:
ref_image: reference source image [batch, height, width, 3]
psv_src_images: stack of source images (excluding the ref image)
[batch, height, width, 3*(num_source -1)]
ref_pose: reference world-to-camera pose (where PSV is constructed)
[batch, 4, 4]
psv_src_poses: input poses (world to camera) [batch, num_source-1, 4, 4]
planes: list of scalar depth values for each plane
intrinsics: camera intrinsics [batch, 3, 3]
Returns:
net_input: [batch, height, width, #planes, num_source*3]
"""
_, num_psv_source, _, _ = psv_src_poses.get_shape().as_list()
num_planes = tf.shape(planes)[0]
net_input = []
for i in range(num_psv_source):
curr_pose = tf.matmul(psv_src_poses[:, i], tf.matrix_inverse(ref_pose))
curr_image = psv_src_images[:, :, :, i*3:(i+1)*3]
curr_psv = pj.plane_sweep(curr_image, planes, curr_pose, intrinsics)
net_input.append(curr_psv)
net_input = tf.concat(net_input, axis=4)
ref_img_stack = tf.tile(
tf.expand_dims(ref_image, 3), [1, 1, 1, num_planes, 1])
net_input = tf.concat([net_input, ref_img_stack], axis=4)
# Append normalized plane indices
normalized_disp_inds = tf.reshape(tf.linspace(0.0, 1.0, num_planes),
[1, 1, 1, num_planes, 1])
sh = tf.shape(net_input)
normalized_disp_inds_stack = tf.tile(normalized_disp_inds,
[1, sh[1], sh[2], 1, 1])
net_input = tf.concat([net_input, normalized_disp_inds_stack], axis=4)
return net_input
def preprocess_image(self, image):
"""Preprocess the image for CNN input.
Args:
image: the input image in either float [0, 1] or uint8 [0, 255]
Returns:
A new image converted to float with range [-1, 1]
"""
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
return image * 2.0 - 1.0
def deprocess_image(self, image):
"""Undo the preprocessing.
Args:
image: the input image in float with range [-1, 1]
Returns:
A new image converted to uint8 [0, 255]
"""
image = (image + 1.)/2.
return tf.image.convert_image_dtype(image, dtype=tf.uint8)
def inv_depths(self, start_depth, end_depth, num_depths):
"""Returns reversed, sorted inverse interpolated depths.
Args:
start_depth: The first depth.
end_depth: The last depth.
num_depths: The total number of depths to create, include start_depth and
end_depth are always included and other depths are interpolated
between them, in inverse depth space.
Returns:
The depths sorted in descending order (so furthest first). This order is
useful for back to front compositing.
"""
depths = 1.0 / tf.linspace(1.0/end_depth, 1.0/start_depth, num_depths)
return depths
def compute_transmittance(self, alpha):
"""Returns transmittance of MPI voxels in reference frame.
Args:
alpha: MPI alpha values
Returns:
Transmittance of each MPI voxel in reference frame.
"""
transmittance = tf.cumprod(
1.0 - alpha + 1.0e-8, axis=3, exclusive=True, reverse=True) * alpha
return transmittance
def compute_occ_map(self, mpi_planes, rgba_layers, output_alpha,
intrinsics, rel_pose):
"""Computes an occlusion map, indicating which pixels are occluded/disoccluded.
Args:
mpi_planes: MPI plane depths
rgba_layers: an MPI
output_alpha: alphas from MPI that has been warped into target frame
intrinsics: camera intrinsics [batch, 3, 3]
rel_pose: relative pose to target camera pose
Returns:
One-sided occlusion map (positive diff in transmittance of target vs. ref)
"""
# compute occlusion map, indicating which pixels are occluded/disoccluded
# when rendering a novel view
batch_size = tf.shape(rgba_layers)[0]
img_height = tf.shape(rgba_layers)[1]
img_width = tf.shape(rgba_layers)[2]
num_mpi_planes = tf.shape(rgba_layers)[3]
depths = tf.tile(mpi_planes[:, tf.newaxis], [1, batch_size])
# Compute transmittance from reference viewpoint, then warp to tgt viewpoint
trans_ref = self.compute_transmittance(
tf.stop_gradient(rgba_layers[Ellipsis, -1]))
trans_ref = tf.transpose(trans_ref, [3, 0, 1, 2])
trans_ref = tf.expand_dims(trans_ref, -1)
trans_ref_reproj = pj.projective_forward_homography(trans_ref, intrinsics,
rel_pose, depths)
trans_ref_reproj = tf.reshape(
trans_ref_reproj,
[batch_size, num_mpi_planes, img_height, img_width, 1])
trans_ref_reproj = tf.transpose(trans_ref_reproj, [0, 2, 3, 1, 4])
# Compute transmittance of alphas that have been warped to tgt viewpoint
trans_target = self.compute_transmittance(tf.stop_gradient(output_alpha))
trans_target = tf.expand_dims(trans_target, -1)
# One-sided occlusion map (positive diff in transmittance of target vs. ref)
occ_map = tf.reduce_max(tf.nn.relu(trans_target - trans_ref_reproj), axis=3)
return occ_map
| StarcoderdataPython |
4915038 | from __future__ import print_function
import pandas as pd
import random
import math
from scipy.linalg import toeplitz
import statsmodels.api as sm
from statsmodels.formula.api import ols
from datetime import datetime as dt
import numpy as np
import matplotlib.pyplot as plt
from statsmodels.iolib.table import (SimpleTable, default_txt_fmt)
df= pd.read_csv('C:\\Users\\merre\\Desktop\\jmmerrell.github.io\\movie_random_forest\\movie_data.txt', sep="|", header=0)
df.columns = ["date","movie","studio","genre","basedon","actionanim","factfict","budget","thrcount","threngag","fisrtweeknd","domgross","infdomgross","intgross","totgross","direct","compose","act1","act2","act3","act4","act5","act6","act7","act8","act9","act10","act11","act12","act13","act14","act15","act16","act17","act18","act19","act20","rating","franch"]
df['date'] = pd.to_datetime(df['date'])
cols = df.columns[list(range(7,15))]
df[cols] = df[cols].apply(pd.to_numeric, errors='coerce', axis=1)
###order data by date
df = df.sort_values(by=['date'], ascending=True)
###Drop all movies with no inflation adjusted gross and without actors
df= df.loc[(df['infdomgross'] > 0) & (df['act1'].isnull() == False)]
###Create inflation adjusted budget
df['infbudget'] = df['infdomgross']/df['domgross']*df['budget']
####Create a new dataframe with only new movies
df2 = df.loc[df['date']>='2000-01-01']
print(df2)
# df['Date'] = pd.to_datetime(df['Date'])
# df['month'] = df['Date'].dt.month
# df['winter'] = np.where(df['month'].isin([12,1,2,3]) , 1, 0)
# df['summer']= np.where(df['month'].isin([7,8,9,10]), 1, 0)
# plt.plot(df.Date, df.PowerBill)
# plt.show()
# temps = np.array([32,31,36,44.5,52,61,69.5,77.5,76,66.5,53.5,41.5])
# temps = abs(temps-65)
# temps = [temps]*4
# temps = np.concatenate(temps)
# temps = temps.tolist()
# temps2 = temps[:3]
# temps = temps+temps2
# df['temp'] = temps
# df['solar_winter'] = np.where((df['Solar']=='Y')&(df['winter']==1) , 1, 0)
# df['solar_summer'] = np.where((df['Solar']=='Y')&(df['summer']==1) , 1, 0)
# df['summer_temp'] = df['temp']*df['summer']
# df['winter_temp'] = df['temp']*df['winter']
# nsims=100
# out = [0.0]*(nsims*53)
# out = np.reshape(out,(nsims,53))
#
# for i in range(0,nsims):
# rowz = np.random.choice(df.shape[0], 5, replace=False)
# train = df.ix[set(range(1, df.shape[0])).difference(rowz)]
# test = df.ix[rowz]
# ols_resid = sm.OLS.from_formula('PowerBill ~ C(Solar) + C(solar_winter) + C(solar_summer) + summer_temp + winter_temp', data=df).fit().resid
# resid_fit = sm.OLS(endog=list(ols_resid[1:]), exog=sm.add_constant(ols_resid[:-1])).fit()
# rho = resid_fit.params[1]
# toeplitz(range(5))
# order = toeplitz(range(train.shape[0]))
# sigma = rho**order
# gls_model = sm.GLS.from_formula('PowerBill ~ C(Solar) + C(solar_winter) + C(solar_summer) + summer_temp + winter_temp', data=train, sigma=sigma)
# gls_results = gls_model.fit()
# preds=gls_results.predict(test)
# out[i][0]=np.mean(test['PowerBill']-preds)
# out[i][1]=math.sqrt(np.mean((test['PowerBill']-preds)**2))
# out[i][(rowz+1)]=preds
#
# def column(matrix, i):
# return [row[i] for row in matrix]
# print(np.mean(column(out,0)))
# print(np.mean(column(out,1)))
| StarcoderdataPython |
3225841 | import pygame
import random
from entity import Bullet
class Enemy:
def __init__(self):
# Define sprite variables
self.speed_x = 40
self.speed_y = 40
def bindBoard(self, board):
self.board = board
def fireBullet(self):
enemyBullet = Bullet.Bullet('enemy')
enemyBullet.setX((self.x + (self.getSize()[0] / 2)) - enemyBullet.getSize()[0] / 2)
enemyBullet.setY(self.y)
self.board.bullets.append(enemyBullet)
def setType(self, enemy_type):
self.type = enemy_type
# Define sprite paths
self.spriteImage = "assets/sprites/enemy/enemy" + str(enemy_type) + "_0.png"
self.spriteImageAlt = "assets/sprites/enemy/enemy" + str(enemy_type) + "_1.png"
# Load sprites
self.sprite = pygame.image.load(self.spriteImage)
self.spriteAlt = pygame.image.load(self.spriteImageAlt)
def getType(self):
return self.type
def getSprite(self, isAltSprite):
if isAltSprite:
return self.spriteAlt
else:
return self.sprite
def setX(self, x):
self.x = x
def getX(self):
return self.x
def setY(self, y):
self.y = y
def getY(self):
return self.y
def setSize(self, size):
self.size = size
def getSize(self):
return self.size
def getLocation(self):
return (self.x, self.y)
def getRect(self):
return pygame.Rect(self.getLocation(), self.getSize())
# Prompts the enemy class to consider firing a bullet.
# This is run once per frame.
def considerFireBullet(self):
percentageChanceFiring = 0.15
if random.random() < percentageChanceFiring / 100:
self.fireBullet()
return True
return False
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.