id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
1713117 | <gh_stars>0
import streamlit as st
from pyecharts import options as opts
from pyecharts.charts import Liquid
from pyecharts.charts import WordCloud
from streamlit_echarts import JsCode
from streamlit_echarts import st_echarts
from streamlit_echarts import st_pyecharts
def main():
PAGES = {"Wordcloud": render_wordcloud, "Liquidfill": render_liquid}
st.title("Hello ECharts extensions!")
st.sidebar.header("Configuration")
page = st.sidebar.selectbox("Choose an example", options=list(PAGES.keys()))
PAGES[page]()
def render_liquid():
with st.echo("below"):
options = {
"series": [
{
"type": "liquidFill",
"data": [0.5, 0.4, 0.3],
"color": ["red", "#0f0", "rgb(0, 0, 255)"],
"itemStyle": {"opacity": 0.6},
"emphasis": {"itemStyle": {"opacity": 0.9}},
}
]
}
st_echarts(options)
c = (
Liquid()
.add("lq", [0.6, 0.7])
.set_global_opts(title_opts=opts.TitleOpts(title="Liquid-基本示例"))
)
st_pyecharts(c)
def render_wordcloud():
with st.echo("below"):
options = {
"tooltip": {},
"series": [
{
"type": "wordCloud",
"gridSize": 2,
"sizeRange": [12, 50],
"rotationRange": [-90, 90],
"shape": "pentagon",
"width": 600,
"height": 400,
"drawOutOfBound": True,
"emphasis": {
"textStyle": {"shadowBlur": 10, "shadowColor": "#333"}
},
"data": [
{
"name": "<NAME>",
"value": 10000,
"textStyle": {"color": "black"},
"emphasis": {"textStyle": {"color": "red"}},
},
{"name": "Macys", "value": 6181},
{"name": "<NAME>", "value": 4386},
{"name": "<NAME>", "value": 4055},
{"name": "<NAME>", "value": 2467},
{"name": "<NAME>", "value": 2244},
{"name": "Planet Fitness", "value": 1898},
{"name": "<NAME>", "value": 1484},
{"name": "Express", "value": 1112},
{"name": "Home", "value": 965},
{"name": "<NAME>", "value": 847},
{"name": "<NAME>", "value": 582},
{"name": "<NAME>", "value": 555},
{"name": "KXAN", "value": 550},
{"name": "<NAME>", "value": 462},
{"name": "<NAME>", "value": 366},
{"name": "<NAME>", "value": 360},
{"name": "<NAME>", "value": 282},
{"name": "NCAA baseball tournament", "value": 273},
{"name": "Point Break", "value": 265},
],
}
],
}
st_echarts(options)
data = [
("生活资源", "999"),
("供热管理", "888"),
("供气质量", "777"),
("生活用水管理", "688"),
("一次供水问题", "588"),
("交通运输", "516"),
("城市交通", "515"),
("环境保护", "483"),
("房地产管理", "462"),
("城乡建设", "449"),
("社会保障与福利", "429"),
("社会保障", "407"),
("文体与教育管理", "406"),
("公共安全", "406"),
("公交运输管理", "386"),
("出租车运营管理", "385"),
("供热管理", "375"),
("市容环卫", "355"),
("自然资源管理", "355"),
("粉尘污染", "335"),
("噪声污染", "324"),
("土地资源管理", "304"),
("物业服务与管理", "304"),
("医疗卫生", "284"),
("粉煤灰污染", "284"),
("占道", "284"),
("供热发展", "254"),
("农村土地规划管理", "254"),
("生活噪音", "253"),
("供热单位影响", "253"),
("城市供电", "223"),
("房屋质量与安全", "223"),
("大气污染", "223"),
("房屋安全", "223"),
("文化活动", "223"),
("拆迁管理", "223"),
("公共设施", "223"),
("供气质量", "223"),
("供电管理", "223"),
("燃气管理", "152"),
("教育管理", "152"),
("医疗纠纷", "152"),
("执法监督", "152"),
("设备安全", "152"),
("政务建设", "152"),
("县区、开发区", "152"),
("宏观经济", "152"),
("教育管理", "112"),
("社会保障", "112"),
("生活用水管理", "112"),
("物业服务与管理", "112"),
("分类列表", "112"),
("农业生产", "112"),
("二次供水问题", "112"),
("城市公共设施", "92"),
("拆迁政策咨询", "92"),
("物业服务", "92"),
("物业管理", "92"),
("社会保障保险管理", "92"),
("低保管理", "92"),
("文娱市场管理", "72"),
("城市交通秩序管理", "72"),
("执法争议", "72"),
("商业烟尘污染", "72"),
("占道堆放", "71"),
("地上设施", "71"),
("水质", "71"),
("无水", "71"),
("供热单位影响", "71"),
("人行道管理", "71"),
("主网原因", "71"),
("集中供热", "71"),
("客运管理", "71"),
("国有公交(大巴)管理", "71"),
("工业粉尘污染", "71"),
("治安案件", "71"),
("压力容器安全", "71"),
("身份证管理", "71"),
("群众健身", "41"),
("工业排放污染", "41"),
("破坏森林资源", "41"),
("市场收费", "41"),
("生产资金", "41"),
("生产噪声", "41"),
("农村低保", "41"),
("劳动争议", "41"),
("劳动合同争议", "41"),
("劳动报酬与福利", "41"),
("医疗事故", "21"),
("停供", "21"),
("基础教育", "21"),
("职业教育", "21"),
("物业资质管理", "21"),
("拆迁补偿", "21"),
("设施维护", "21"),
("市场外溢", "11"),
("占道经营", "11"),
("树木管理", "11"),
("农村基础设施", "11"),
("无水", "11"),
("供气质量", "11"),
("停气", "11"),
("市政府工作部门(含部门管理机构、直属单位)", "11"),
("燃气管理", "11"),
("市容环卫", "11"),
("新闻传媒", "11"),
("人才招聘", "11"),
("市场环境", "11"),
("行政事业收费", "11"),
("食品安全与卫生", "11"),
("城市交通", "11"),
("房地产开发", "11"),
("房屋配套问题", "11"),
("物业服务", "11"),
("物业管理", "11"),
("占道", "11"),
("园林绿化", "11"),
("户籍管理及身份证", "11"),
("公交运输管理", "11"),
("公路(水路)交通", "11"),
("房屋与图纸不符", "11"),
("有线电视", "11"),
("社会治安", "11"),
("林业资源", "11"),
("其他行政事业收费", "11"),
("经营性收费", "11"),
("食品安全与卫生", "11"),
("体育活动", "11"),
("有线电视安装及调试维护", "11"),
("低保管理", "11"),
("劳动争议", "11"),
("社会福利及事务", "11"),
("一次供水问题", "11"),
]
c = (
WordCloud()
.add(series_name="热点分析", data_pair=data, word_size_range=[6, 66])
.set_global_opts(
title_opts=opts.TitleOpts(
title="热点分析", title_textstyle_opts=opts.TextStyleOpts(font_size=23)
),
tooltip_opts=opts.TooltipOpts(is_show=True),
)
)
st_pyecharts(c)
if __name__ == "__main__":
main()
| StarcoderdataPython |
1799882 | import pandas as pd
from pgmpy.estimators import BayesianEstimator
from pgmpy.models import BayesianModel
from pomegranate.BayesianNetwork import BayesianNetwork
from pomegranate.base import State
from pomegranate.distributions.ConditionalProbabilityTable import ConditionalProbabilityTable
from pomegranate.distributions.DiscreteDistribution import DiscreteDistribution
MOBILE = 'LEN_mobile'
LEFT_ARM = 'LEN_motor_left_arm'
RIGHT_ARM = 'LEN_motor_right_arm'
LEFT_FOOT = 'LEN_motor_left_foot'
RIGHT_FOOT = 'LEN_motor_right_foot'
DESIRE = 'LEN_desire'
BOREDOM = 'RON_BOREDOM'
MOTOR_HYPO = 'RON_MOVEMENT'
def baby_model():
d1 = DiscreteDistribution({'0': 0.6, '1': 0.4})
d2 = DiscreteDistribution({'0': 0.6, '1': 0.4})
d3 = ConditionalProbabilityTable(
[['1', '1', 0.1],
['1', '0', 0.9],
['0', '1', 0.9],
['0', '0', 0.1]], [d1])
d4 = ConditionalProbabilityTable(
[['1', '1', '1', 0.1],
['1', '1', '0', 0.9],
['1', '0', '1', 0.1],
['1', '0', '0', 0.9],
['0', '1', '1', 0.9],
['0', '1', '0', 0.1],
['0', '0', '1', 0.9],
['0', '0', '0', 0.1]], [d1, d2])
d5 = ConditionalProbabilityTable(
[['1', '1', 0.1],
['1', '0', 0.9],
['0', '1', 0.9],
['0', '0', 0.1]], [d2])
s1 = State(d1, name=BOREDOM)
s2 = State(d2, name=MOTOR_HYPO)
s3 = State(d3, name=DESIRE)
s4 = State(d4, name=MOBILE)
s5 = State(d5, name=LEFT_ARM)
model = BayesianNetwork()
model.add_states(s1, s2, s3, s4, s5)
model.add_edge(s1, s3)
model.add_edge(s1, s4)
model.add_edge(s2, s4)
model.add_edge(s2, s5)
model.bake()
return model
TRAINING_DATA = pd.DataFrame(data={
BOREDOM: [1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
DESIRE: [1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
MOBILE: [1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
MOTOR_HYPO: [1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
LEFT_ARM: [1, 1, 1, 1, 1, 1, 1, 1, 1, 0]
})
def fully_connected_model(nodes=None):
if not nodes:
nodes = [BOREDOM, DESIRE, MOBILE, MOTOR_HYPO, LEFT_ARM]
network = BayesianModel()
network.add_nodes_from(nodes)
for hypo in nodes:
if 'hypo' in hypo:
for obs in nodes:
if 'obs' in obs or 'motor' in obs:
network.add_edge(u=hypo, v=obs)
network.fit(TRAINING_DATA, estimator=BayesianEstimator, prior_type="BDeu")
return network
| StarcoderdataPython |
1683112 | <gh_stars>0
def get_count(n, k):
counts = 0
arr = [i for i in range(1, 13)]
length = len(arr)
for i in range(1 << length):
res = []
for j in range(length):
if i & (1 << j):
res.append(arr[j])
if len(res) == n and sum(res, start=0) == k:
counts += 1
return counts
t = int(input())
for i in range(t):
n, k = map(int, input().split())
counts = get_count(n, k)
print(f'#{i + 1} {counts}')
# print(get_count(5, 15))
| StarcoderdataPython |
69492 | from icevision.all import *
def test_voc_annotation_parser(samples_source, voc_class_map):
annotation_parser = parsers.voc(
annotations_dir=samples_source / "voc/Annotations",
images_dir=samples_source / "voc/JPEGImages",
class_map=voc_class_map,
)
records = annotation_parser.parse(data_splitter=SingleSplitSplitter())[0]
assert len(records) == 2
record = records[0]
expected = {
"imageid": 0,
"filepath": samples_source / "voc/JPEGImages/2007_000063.jpg",
"width": 500,
"height": 375,
"labels": [voc_class_map.get_name(k) for k in ["dog", "chair"]],
"bboxes": [BBox.from_xyxy(123, 115, 379, 275), BBox.from_xyxy(75, 1, 428, 375)],
}
assert record == expected
record = records[1]
expected = {
"imageid": 1,
"filepath": samples_source / "voc/JPEGImages/2011_003353.jpg",
"height": 500,
"width": 375,
"labels": [voc_class_map.get_name("person")],
"bboxes": [BBox.from_xyxy(130, 45, 375, 470)],
}
assert record == expected
def test_voc_mask_parser(samples_source):
mask_parser = parsers.VocMaskParser(
masks_dir=samples_source / "voc/SegmentationClass"
)
records = mask_parser.parse(data_splitter=SingleSplitSplitter())[0]
record = records[0]
expected = {
"imageid": 0,
"masks": [
VocMaskFile(samples_source / "voc/SegmentationClass/2007_000063.png"),
],
}
assert record == expected
def test_voc_combined_parser(samples_source, voc_class_map):
annotation_parser = parsers.VocXmlParser(
annotations_dir=samples_source / "voc/Annotations",
images_dir=samples_source / "voc/JPEGImages",
class_map=voc_class_map,
)
mask_parser = parsers.VocMaskParser(
masks_dir=samples_source / "voc/SegmentationClass"
)
combined_parser = parsers.CombinedParser(annotation_parser, mask_parser)
records = combined_parser.parse(data_splitter=SingleSplitSplitter())[0]
assert len(records) == 1
record = records[0]
expected = {
"imageid": 0,
"filepath": samples_source / "voc/JPEGImages/2007_000063.jpg",
"width": 500,
"height": 375,
"labels": [voc_class_map.get_name(k) for k in ["dog", "chair"]],
"bboxes": [BBox.from_xyxy(123, 115, 379, 275), BBox.from_xyxy(75, 1, 428, 375)],
"masks": [
VocMaskFile(samples_source / "voc/SegmentationClass/2007_000063.png")
],
}
assert record == expected
| StarcoderdataPython |
3243522 | <filename>apache_downloader/downloader.py
import hashlib
import logging
import os
import sys
from math import ceil
from os.path import basename, dirname, isdir, expanduser
from urllib.parse import urlunparse, urlencode
import humanize
import requests
from progress.bar import FillingCirclesBar
from progress.spinner import Spinner
DOWNLOAD_CHUNK_SIZE = 8192
def get_mirror_url(path, site="www"):
"""
Formats the download URL for the Apache project file path
:param path: the download file path, e.g. /nifi/nifi-registry/nifi-registry-0.5.0/nifi-registry-0.5.0-bin.tar.gz
:param site: "www" if the main site is used, "archive" if the archive site is used
:return: the direct download URL
"""
return {
'www': urlunparse(("https", "www.apache.org", "/dyn/mirrors/mirrors.cgi", "", urlencode({
"action": "download",
"filename": path
}), "")),
'archive': urlunparse(("https", "archive.apache.org", "/dist/%s" % path.lstrip("/"), "", "", ""))
}[site]
def get_hash(path, site="www"):
"""
Get the hash value from the official apache.org website
:param path: the download file path, e.g. /nifi/nifi-registry/nifi-registry-0.5.0/nifi-registry-0.5.0-bin.tar.gz
:param site: "www" if the main site is used, "archive" if the archive site is used
:return: the sha512 hash
"""
url = urlunparse(("https", site + ".apache.org", "/dist/%s.sha512" % path.lstrip("/"), "", "", ""))
logging.debug("fetch url {url}".format(url=url))
req = requests.get(url)
req.raise_for_status()
dl_hash = "".join(req.text.split()).lower().strip() # sometimes the hash is multi-line and chunked
dl_hash = dl_hash.split(":")[-1] # sometimes the hash begins with the file name + ":"
logging.debug("Expected hash is {hash}".format(hash=dl_hash))
return dl_hash
def download_and_verify(path, destination=None):
"""
Downloads the Apache file and verifies its hash
:param path: the download file path, e.g. /nifi/nifi-registry/nifi-registry-0.5.0/nifi-registry-0.5.0-bin.tar.gz
:param destination: the location to save the downloaded file or file object
"""
destination = destination or "."
if isinstance(destination, str):
download_dir = dirname(destination)
download_file = basename(destination) or basename(path)
if isdir(download_file):
download_dir = download_file
download_file = basename(path)
download_path = expanduser(os.path.join(download_dir, download_file))
logging.info("Downloading Apache project {path} to destination {dest}".format(path=path, dest=download_path))
assert not os.path.exists(download_path), "File already exists"
else:
download_path = destination
logging.info("Downloading Apache project {path}".format(path=path))
site = "www"
try:
expected_hash = get_hash(path, site)
except requests.exceptions.HTTPError:
logging.debug("Not found, try from archive")
site = "archive"
expected_hash = get_hash(path, site)
logging.info("Downloading from archive")
with requests.get(get_mirror_url(path, site), stream=True) as r:
r.raise_for_status()
file_length = r.headers.get("content-length")
if file_length:
file_length = int(file_length)
logging.info("File size: {size}".format(size=humanize.naturalsize(file_length)))
progress_bar = FillingCirclesBar("Downloading", max=ceil(file_length / DOWNLOAD_CHUNK_SIZE))
else:
progress_bar = Spinner("Downloading")
def save_to_file(_f):
m = hashlib.sha512()
for chunk in r.iter_content(chunk_size=DOWNLOAD_CHUNK_SIZE):
if chunk:
_f.write(chunk)
m.update(chunk)
progress_bar.next()
actual_hash = m.hexdigest()
assert actual_hash in expected_hash,\
"Hash of downloaded file is invalid, expected {expected_hash} but got {actual_hash}.".format(
expected_hash=expected_hash,
actual_hash=actual_hash
)
if hasattr(download_path, "write"):
save_to_file(download_path)
else:
with open(download_path, "wb") as f:
save_to_file(f)
assert os.path.exists(download_path), "File could not be downloaded."
print(" Done.", file=sys.stderr)
| StarcoderdataPython |
4821821 | <gh_stars>0
from BusConsumer import BusConsumer
from ElasticConnector import ElasticConnector
from src.Config import es_url, topic_name, consumer_config
def get_es_connection_status():
return 'Success' if es_client.test_connection() else 'Failure'
es_client = ElasticConnector(es_url)
es_client.create_es_connector()
print(f'ES connection status : {get_es_connection_status()}')
consumer = BusConsumer(es_client.es_connector, topic_name, consumer_config)
consumer.create_kafka_client()
consumer.consume_messages()
| StarcoderdataPython |
88655 | <gh_stars>0
#!/usr/bin/env python
u"""
test_time.py (08/2020)
Verify time conversion functions
"""
import pytest
import warnings
import numpy as np
import icesat2_toolkit.time
#-- parameterize calendar dates
@pytest.mark.parametrize("YEAR", np.random.randint(1992,2020,size=2))
@pytest.mark.parametrize("MONTH", np.random.randint(1,13,size=2))
#-- PURPOSE: verify forward and backwards time conversions
def test_julian(YEAR,MONTH):
#-- days per month in a leap and a standard year
#-- only difference is February (29 vs. 28)
dpm_leap = np.array([31,29,31,30,31,30,31,31,30,31,30,31])
dpm_stnd = np.array([31,28,31,30,31,30,31,31,30,31,30,31])
DPM = dpm_stnd if np.mod(YEAR,4) else dpm_leap
#-- calculate Modified Julian Day (MJD) from calendar date
DAY = np.random.randint(1,DPM[MONTH-1]+1)
HOUR = np.random.randint(0,23+1)
MINUTE = np.random.randint(0,59+1)
SECOND = 60.0*np.random.random_sample(1)
MJD = icesat2_toolkit.time.convert_calendar_dates(YEAR, MONTH, DAY,
hour=HOUR, minute=MINUTE, second=SECOND,
epoch=(1858,11,17,0,0,0))
#-- convert MJD to calendar date
JD = np.squeeze(MJD) + 2400000.5
YY,MM,DD,HH,MN,SS = icesat2_toolkit.time.convert_julian(JD,
FORMAT='tuple', ASTYPE=np.float)
#-- assert dates
eps = np.finfo(np.float16).eps
assert (YY == YEAR)
assert (MM == MONTH)
assert (DD == DAY)
assert (HH == HOUR)
assert (MN == MINUTE)
assert (np.abs(SS - SECOND) < eps)
#-- parameterize calendar dates
@pytest.mark.parametrize("YEAR", np.random.randint(1992,2020,size=2))
@pytest.mark.parametrize("MONTH", np.random.randint(1,13,size=2))
#-- PURPOSE: verify forward and backwards time conversions
def test_decimal_dates(YEAR,MONTH):
#-- days per month in a leap and a standard year
#-- only difference is February (29 vs. 28)
dpm_leap = np.array([31,29,31,30,31,30,31,31,30,31,30,31])
dpm_stnd = np.array([31,28,31,30,31,30,31,31,30,31,30,31])
DPM = dpm_stnd if np.mod(YEAR,4) else dpm_leap
#-- calculate Modified Julian Day (MJD) from calendar date
DAY = np.random.randint(1,DPM[MONTH-1]+1)
HOUR = np.random.randint(0,23+1)
MINUTE = np.random.randint(0,59+1)
SECOND = 60.0*np.random.random_sample(1)
#-- calculate year-decimal time
tdec = icesat2_toolkit.time.convert_calendar_decimal(YEAR, MONTH,
day=DAY, hour=HOUR, minute=MINUTE, second=SECOND)
#-- day of the year 1 = Jan 1, 365 = Dec 31 (std)
day_temp = np.mod(tdec, 1)*np.sum(DPM)
DofY = np.floor(day_temp) + 1
#-- cumulative sum of the calendar dates
day_cumulative = np.cumsum(np.concatenate(([0],DPM))) + 1
#-- finding which month date is in
i = np.nonzero((DofY >= day_cumulative[0:-1]) & (DofY < day_cumulative[1:]))
month_range = np.arange(1,13)
month = month_range[i]
#-- finding day of the month
day = (DofY - day_cumulative[i]) + 1
#-- convert residuals into time (hour, minute and second)
hour_temp = np.mod(day_temp,1)*24.0
minute_temp = np.mod(hour_temp,1)*60.0
second = np.mod(minute_temp,1)*60.0
#-- assert dates
eps = np.finfo(np.float16).eps
assert (np.floor(tdec) == YEAR)
assert (month == MONTH)
assert (day == DAY)
assert (np.floor(hour_temp) == HOUR)
assert (np.floor(minute_temp) == MINUTE)
assert (np.abs(second - SECOND) < eps)
#-- PURPOSE: verify forward and backwards delta time conversions
@pytest.mark.parametrize("delta_time", np.random.randint(1,31536000,size=4))
def test_delta_time(delta_time, gps_epoch=1198800018.0):
#-- convert to array if single value
delta_time = np.atleast_1d(delta_time)
#-- calculate gps time from delta_time
gps_seconds=gps_epoch + delta_time
time_leaps=icesat2_toolkit.time.count_leap_seconds(gps_seconds)
#-- compare output delta times with original values
output_time=icesat2_toolkit.time.convert_delta_time(gps_seconds-time_leaps,
epoch1=(1980,1,6,0,0,0),epoch2=(2018,1,1,0,0,0),scale=1.0)
assert (delta_time == output_time)
| StarcoderdataPython |
1679404 | # dataset/table/_SUCCESS
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""unit tests for gcs_ocn_bq_ingest"""
import re
import time
from typing import Dict, Optional
from unittest.mock import Mock
import pytest
from google.cloud import storage
import gcs_ocn_bq_ingest.common.constants
import gcs_ocn_bq_ingest.common.utils
import gcs_ocn_bq_ingest.main
COMPILED_DEFAULT_DENTINATION_REGEX = re.compile(
gcs_ocn_bq_ingest.common.constants.DEFAULT_DESTINATION_REGEX)
@pytest.mark.parametrize(
"test_input,expected",
[
(
"dataset/table/_SUCCESS", # flat
{
"dataset": "dataset",
"table": "table",
"partition": None,
"yyyy": None,
"mm": None,
"dd": None,
"hh": None,
"batch": None
}),
(
"dataset/table/$20201030/_SUCCESS", # partitioned
{
"dataset": "dataset",
"table": "table",
"partition": "$20201030",
"yyyy": None,
"mm": None,
"dd": None,
"hh": None,
"batch": None
}),
(
"dataset/table/$20201030/batch_id/_SUCCESS", # partitioned, batched
{
"dataset": "dataset",
"table": "table",
"partition": "$20201030",
"yyyy": None,
"mm": None,
"dd": None,
"hh": None,
"batch": "batch_id"
}),
(
"dataset/table/batch_id/_SUCCESS", # batched (no partitioning)
{
"dataset": "dataset",
"table": "table",
"partition": None,
"yyyy": None,
"mm": None,
"dd": None,
"hh": None,
"batch": "batch_id"
}),
("dataset/table/2020/01/02/03/batch_id/_SUCCESS", {
"dataset": "dataset",
"table": "table",
"partition": None,
"yyyy": "2020",
"mm": "01",
"dd": "02",
"hh": "03",
"batch": "batch_id"
}),
("project.dataset/table/2020/01/02/03/batch_id/_SUCCESS", {
"dataset": "project.dataset",
"table": "table",
"partition": None,
"yyyy": "2020",
"mm": "01",
"dd": "02",
"hh": "03",
"batch": "batch_id"
}),
("project.dataset/table/historical/2020/01/02/03/batch_id/_SUCCESS", {
"dataset": "project.dataset",
"table": "table",
"partition": None,
"yyyy": "2020",
"mm": "01",
"dd": "02",
"hh": "03",
"batch": "batch_id"
}),
("project.dataset/table/incremental/2020/01/02/04/batch_id/_SUCCESS", {
"dataset": "project.dataset",
"table": "table",
"partition": None,
"yyyy": "2020",
"mm": "01",
"dd": "02",
"hh": "04",
"batch": "batch_id"
}),
])
def test_default_destination_regex(test_input: str,
expected: Dict[str, Optional[str]]):
"""ensure our default regex handles each scenarios we document.
this test is to support improving this regex in the future w/o regressing
for existing use cases.
"""
match = COMPILED_DEFAULT_DENTINATION_REGEX.match(test_input)
if match:
assert match.groupdict() == expected
else:
raise AssertionError(f"{COMPILED_DEFAULT_DENTINATION_REGEX}"
f" did not match test case {test_input}.")
@pytest.mark.parametrize("test_input,expected", [
([], []),
([[]], []),
([["foo"], ["bar", "baz"]], ["foo", "bar", "baz"]),
([["foo"], []], ["foo"]),
([["foo"], [], ["bar", "baz"]], ["foo", "bar", "baz"]),
])
def test_flattend2dlist(test_input, expected):
assert gcs_ocn_bq_ingest.common.utils.flatten2dlist(test_input) == expected
@pytest.mark.parametrize(
"original, update, expected",
[
# yapf: disable
( # empty original
{}, {
"a": 1
}, {
"a": 1
}),
( # empty update
{
"a": 1
}, {}, {
"a": 1
}),
( # basic update of top-level key
{
"a": 1
}, {
"a": 2
}, {
"a": 2
}),
( # update of list
{
"a": [1]
}, {
"a": [2]
}, {
"a": [2]
}),
( # update of nested key
{
"a": {
"b": 1
}
}, {
"a": {
"b": 2
}
}, {
"a": {
"b": 2
}
}),
( # don't drop keys that only appear in original
{
"a": {
"b": 1,
"c": 2
},
"d": 3
}, {
"a": {
"b": 4
},
}, {
"a": {
"b": 4,
"c": 2
},
"d": 3
}),
# yapf: enable
])
def test_recursive_update(original, update, expected):
assert gcs_ocn_bq_ingest.common.utils.recursive_update(original,
update) == expected
@pytest.mark.parametrize(
"test_input,expected",
[
(
"dataset/table/_SUCCESS", # flat
"dataset/table"),
(
"dataset/table/$20201030/_SUCCESS", # partitioned
"dataset/table"),
(
"dataset/table/$20201030/batch_id/_SUCCESS", # partitioned, batched
"dataset/table"),
(
"dataset/table/batch_id/_SUCCESS", # batched (no partitioning)
"dataset/table"),
("dataset/table/2020/01/02/03/batch_id/_SUCCESS", "dataset/table"),
("project.dataset/table/2020/01/02/03/batch_id/_SUCCESS",
"project.dataset/table"),
("dataset/table/_BACKFILL", "dataset/table"),
("dataset/table/_bqlock", "dataset/table"),
("dataset/table/_backlog/2020/01/02/03/_SUCCESS", "dataset/table"),
])
def test_get_table_prefix(test_input, expected):
assert gcs_ocn_bq_ingest.common.utils.get_table_prefix(
test_input) == expected
def test_triage_event(mock_env, mocker):
test_event_blob: storage.Blob = storage.Blob.from_string(
"gs://foo/bar/baz/00/_SUCCESS")
apply_mock = mocker.patch('gcs_ocn_bq_ingest.common.utils.apply')
bq_mock = Mock()
bq_mock.project = "foo"
gcs_ocn_bq_ingest.main.triage_event(None, bq_mock, test_event_blob,
time.monotonic())
apply_mock.assert_called_once()
def test_triage_event_ordered(ordered_mock_env, mocker):
enforce_ordering = True
test_event_blob: storage.Blob = storage.Blob.from_string(
"gs://foo/bar/baz/00/_SUCCESS")
apply_mock = mocker.patch('gcs_ocn_bq_ingest.common.utils.apply')
publisher_mock = mocker.patch(
'gcs_ocn_bq_ingest.common.ordering.backlog_publisher')
bq_mock = Mock()
bq_mock.project = "foo"
gcs_ocn_bq_ingest.main.triage_event(None,
bq_mock,
test_event_blob,
time.monotonic(),
enforce_ordering=enforce_ordering)
publisher_mock.assert_called_once()
test_event_blob: storage.Blob = storage.Blob.from_string(
"gs://foo/bar/baz/_BACKFILL")
subscriber_mock = mocker.patch(
'gcs_ocn_bq_ingest.common.ordering.backlog_subscriber')
gcs_ocn_bq_ingest.main.triage_event(None,
None,
test_event_blob,
time.monotonic(),
enforce_ordering=enforce_ordering)
subscriber_mock.assert_called_once()
test_event_blob: storage.Blob = storage.Blob.from_string(
"gs://foo/bar/baz/_backlog/00/_SUCCESS")
monitor_mock = mocker.patch(
'gcs_ocn_bq_ingest.common.ordering.subscriber_monitor')
gcs_ocn_bq_ingest.main.triage_event(None,
None,
test_event_blob,
time.monotonic(),
enforce_ordering=enforce_ordering)
monitor_mock.assert_called_once()
apply_mock.assert_not_called()
| StarcoderdataPython |
4825715 | <reponame>max-brambach/neural_tube_patterning_paper<filename>3d/testround_difftest_set.py
# -*- coding: utf-8 -*-
from scipy.integrate import solve_ivp
import matplotlib
#matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import numpy as np
from numpy.linalg import inv
from copy import deepcopy
from matplotlib import colors as mcolors
import matplotlib.animation as animation
import matplotlib.cm as cm
from mpl_toolkits.mplot3d import Axes3D
import paras_dorsoventral as dors
import paras_rostrocaudal as ros
xlen =4
ylen =20
zlen =50
seeds =3
spheresize=30
class stencil:
def __init__(self,xdim,ydim,zdim, Name, coordlist):
self.grid = np.zeros((xdim,ydim,zdim))
self.name=Name
self.xlen=xdim
self.ylen=ydim
self.zlen=zdim
for coord in coordlist:
self.grid[coord[0]][coord[1]][coord[2]] = 1
class secrStencil:
def __init__(self,stencil, Name, coordlist,Base):
self.grid = np.zeros_like(stencil.grid)
self.grid_within_bounds = np.zeros_like(stencil.grid)
self.name=Name
self.xlen=stencil.xlen
self.ylen=stencil.ylen
self.zlen=stencil.zlen
self.stencil=stencil
self.base = Base
for coord in coordlist:
self.grid[coord[0]][coord[1]][coord[2]] = coord[3]
if stencil.grid[coord[0]][coord[1]][coord[2]] ==1:
self.grid_within_bounds[coord[0]][coord[1]][coord[2]] = coord[3]
self.secretion_levels, self.secretion_coords, = grid_to_vector(self.grid_within_bounds, justgrid = True)
self.secretion_levels = self.secretion_levels/255.0
def plotstenc(stenc,ax,r=0.47,g=0.0,b=1.0,color='red',alpha=0.8):
tubeindices = np.where(stenc.grid)
#restindices = np.where((np.ones_like(stenc.grid)-stenc.grid))
ax.scatter(tubeindices[0],tubeindices[1],tubeindices[2],marker = 'o',c=color,linewidth=0,vmin=0,vmax=1,depthshade=False,s=spheresize,alpha=alpha)
#if np.any(restindices) ==True:
#ax.scatter(restindices[0],restindices[1],restindices[2],marker = 'o',c='blue',linewidth=0,vmin=0,vmax=1,depthshade=False,s=spheresize )
def plotSecrStenc(secrStenc,ax):
gridvector, coordvector = grid_to_vector(secrStenc)
plotstenc(secrStenc.stencil,ax,color='blue',alpha=0.01)
for i in range(len(gridvector)):
x,y,z=coordvector[i][0],coordvector[i][1],coordvector[i][2]
colour = np.asarray([gridvector[i],0,0])
ax.scatter(x,y,z,marker = 'o',c=colour/255.0,linewidth=0,depthshade=False,s=spheresize,alpha=0.7)
def tubecoords(x,y,z,borders=True,bordersize=3):
grid= np.ones((x,y,z))
grid[0,:,: ]=0
if borders == True:
for i in range(bordersize):
print(i)
grid[:,:,i] =0
grid[:,i,:] =0
grid[:,:,-i-1] =0
grid[:,-i-1,:] =0
#for i in range(x):
#for j in np.arange(borders,int(y/2)+1):
#for k in np.arange(borders,int(z/2)+1):
#if j**(1.6) +borders >= k:
#print(j,k)
#grid[i][int(y/2)+1-(j+borders)][k]=0
#grid[i][int(y/2)+1-(j+borders)][-(k-1)]=0
#grid[i][-(int(y/2)+1-(j+borders))-1][k]=0
#grid[i][-(int(y/2)+1-(j+borders))-1][-(k-1)]=0
#for j in np.arange(borders,int(y/2)+1):
#for k in np.arange(borders,int(z/2)+1):
#if j**(3.5) +borders +15 <= k:
#print(j,k)
#grid[i][int(y/2)+1-(j+borders)][k]=0
#grid[i][int(y/2)+1-(j+borders)][-(k-1)]=0
#grid[i][-(int(y/2)+1-(j+borders))-1][k]=0
#grid[i][-(int(y/2)+1-(j+borders))-1][-(k-1)]=0
# grid[i][int(y/2)][int(z/2)]=0
# grid[i][int(y/2)+1][int(z/2)]=0
# grid[i][int(y/2)][int(z/2)+1]=0
# grid[i][int(y/2)-1][int(z/2)]=0
# grid[i][int(y/2)][int(z/2)-1]=0
return np.transpose(np.where(grid))
def grid_to_vector(gridname, justgrid =False):
if justgrid ==False:
grid = gridname.grid
else:
grid = gridname
coordvector = np.transpose(np.nonzero(grid))
gridvector = grid[grid!=0]
#print(coordvector,len(gridvector))
for i in range(len(coordvector)):
c = coordvector[i]
if grid[c[0]][c[1]][c[2]] != gridvector[i]:
print("False!")
#print("grid_to_vector:",gridvector)
return gridvector,coordvector
def vector_to_grid(u,gridname,coords):
newgrid = np.zeros_like(gridname.grid)
#print(len(gridname.grid),len(gridname.grid[0]),len(gridname.grid[0][0]))
c1,c2,c3 = np.transpose(coords)
newgrid[c1,c2,c3] = u
return newgrid
def Amatrix(stencil):
xdim = stencil.xlen
ydim = stencil.ylen
zdim = stencil.zlen
u,coord = grid_to_vector(stencil)
coorddict = {}
for i in range(len(coord)):
c = coord[i]
coorddict[str(c)] = i
grid = stencil.grid
dim=len(u)
print("dim:",dim)
print("xdimydimzdim",xdim*ydim*zdim)
A = []
for i in range(dim):
if i % 10000 ==0:
print(i)
x = coord[i][0]
y = coord[i][1]
z = coord[i][2]
frontcoords = [x,y+1,z]
backcoords = [x,y-1,z]
leftcoords = [x-1,y,z]
rightcoords = [x+1,y,z]
upcoords = [x,y,z+1]
downcoords = [x,y,z-1]
#nblist = [0,0,0,0,0,0]#[up,down,left,right,front,back] - zero if next to inner part, 1 if next to boundary
nbcoords = np.asarray([upcoords,downcoords,leftcoords,rightcoords,frontcoords,backcoords])
nb = 0
for c in range(len(nbcoords)):
coordStr = str(nbcoords[c])
j = coorddict.get(coordStr)
if j != None:
# nblist[c] = 0
A.append([i,j,1])
else:
nb += 1
#nblist[c] = 1
A.append([i,i, -(6-nb)])
return np.asarray(A)
def Amatrix_bs(stencil, secrStencil):
#stencil
xdim = stencil.xlen
ydim = stencil.ylen
zdim = stencil.zlen
u,coord = grid_to_vector(stencil)
coorddict = {}
for i in range(len(coord)):
c = coord[i]
coorddict[str(c)] = i
grid = stencil.grid
dim=len(u)
print("dim:",dim)
print("xdimydimzdim",xdim*ydim*zdim)
#secretion stencil
usecr,coordsecr = grid_to_vector(secrStencil)
secrcoorddict = {}
for i in range(len(coordsecr)):
c = coordsecr[i]
secrcoorddict[str(c)] = i
A = []
b=np.zeros(len(u))
for i in range(dim):
if i % 10000 ==0:
print(i)
x = coord[i][0]
y = coord[i][1]
z = coord[i][2]
frontcoords = [x,y+1,z]
backcoords = [x,y-1,z]
leftcoords = [x-1,y,z]
rightcoords = [x+1,y,z]
upcoords = [x,y,z+1]
downcoords = [x,y,z-1]
#nblist = [0,0,0,0,0,0]#[up,down,left,right,front,back] - zero if next to inner part, 1 if next to boundary
nbcoords = np.asarray([upcoords,downcoords,leftcoords,rightcoords,frontcoords,backcoords])
nb = 0
for c in range(len(nbcoords)):
coordStr = str(nbcoords[c])
j = coorddict.get(coordStr)
if j != None:
# nblist[c] = 0
A.append([i,j,1])
else:
k = secrcoorddict.get(coordStr)
if k == None:
nb += 1
else:
print("coords",coordStr,"k",usecr[k])
b[i] += (usecr[k]/255.0) *secrStencil.base
#nblist[c] = 1
A.append([i,i, -(6-nb)])
return np.asarray(A),b
| StarcoderdataPython |
1695548 | """pytest fixtures for simplified testing."""
from __future__ import absolute_import
import pytest
pytest_plugins = ['aiida.manage.tests.pytest_fixtures']
@pytest.fixture(scope='function', autouse=True)
def clear_database_auto(clear_database): # pylint: disable=unused-argument
"""Automatically clear database in between tests."""
@pytest.fixture(scope='function')
def adamant_code(aiida_local_code_factory):
"""Get a adamant code.
"""
executable = '/home/fmoitzi/CLionProjects/mEMTO/cmake-build-release' \
'-gcc-8/kgrn/source_lsf/kgrn'
adamant_code = aiida_local_code_factory(executable=executable,
entry_point='adamant')
return adamant_code
| StarcoderdataPython |
197446 | import itertools
def raster(input_size):
return itertools.product(*[range(dim_size) for dim_size in input_size])
| StarcoderdataPython |
3343799 | from setuptools import setup
setup() | StarcoderdataPython |
30925 | <gh_stars>10-100
import logging
import os
from logging import FileHandler, Formatter
from logging.handlers import TimedRotatingFileHandler
from pathlib import Path
from rich.logging import RichHandler
def my_namer(default_name):
# This will be called when doing the log rotation
# default_name is the default filename that would be assigned, e.g. Rotate_Test.txt.YYYY-MM-DD
# Do any manipulations to that name here, for example this changes the name to Rotate_Test.YYYY-MM-DD.txt
base_filename, _, ext, date = default_name.split(".")
return f"{base_filename}.{date}.{ext}"
def setup_logging():
"""
sets up custom logging into self.log variable
set format to
[2019-09-29 18:51:04] [INFO ] core.logger: Begining backup
"""
logging.getLogger('discord').setLevel(logging.WARNING)
logging.getLogger('discord.http').setLevel(logging.WARNING)
logging.getLogger("asyncio").setLevel(logging.INFO)
log = logging.getLogger()
shell_handler = RichHandler()
filename = Path("logs", __import__("datetime").datetime.now().strftime('bot.%Y-%m-%d.log'))
os.makedirs(os.path.dirname(filename), exist_ok=True)
all_file_handler = TimedRotatingFileHandler(filename, when='midnight')
all_file_handler.namer = my_namer
filename = Path("logs", "warn.log")
os.makedirs(os.path.dirname(filename), exist_ok=True)
warn_file_handler = FileHandler(filename, mode='a')
log.setLevel(logging.DEBUG)
shell_handler.setLevel(logging.INFO)
all_file_handler.setLevel(logging.DEBUG)
warn_file_handler.setLevel(logging.WARNING)
fmt_date = '%Y-%m-%d %H:%M:%S'
fmt_shell = '{message}'
fmt_file = '{asctime} | {levelname:<7} | {filename:>20}:{lineno:<4} | {message}'
shell_handler.setFormatter(Formatter(fmt_shell, fmt_date, style='{'))
all_file_handler.setFormatter(Formatter(fmt_file, fmt_date, style='{'))
warn_file_handler.setFormatter(Formatter(fmt_file, fmt_date, style='{'))
log.addHandler(shell_handler)
log.addHandler(all_file_handler)
log.addHandler(warn_file_handler)
| StarcoderdataPython |
1698639 | # Copyright 2022 The Nine Turn Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Pytorch based sequential decoder. Designed specially for dynamic graph learning."""
from typing import List, Tuple, Union
import tensorflow as tf
from tensorflow import Tensor
from tensorflow.keras.layers import MultiHeadAttention
from nineturn.dtdg.models.decoder.tf.sequentialDecoder.baseModel import SlidingWindowFamily
from nineturn.core.layers.time2Vec import Time2Vec
from nineturn.core.layers.tsa import TSA
from nineturn.dtdg.models.decoder.tf.simpleDecoder import SimpleDecoder
from nineturn.core.types import nt_layers_list
def FTSA(num_heads:int, input_d:int,embed_dims:List[int], n_nodes:int, window_size: int, time_kernel:int,
time_agg:str, simple_decoder: SimpleDecoder, **kwargs):
model = None
if time_agg == 'concate':
model = FTSAConcate(num_heads,input_d,embed_dims,n_nodes, window_size, time_kernel, simple_decoder, **kwargs)
return model
class FTSAConcate(SlidingWindowFamily):
def __init__(self,num_heads:int, input_d:int,embed_dims:List[int], n_nodes:int, window_size: int, time_kernel:int,
simple_decoder: SimpleDecoder, **kwargs):
"""Create a sequential decoder.
Args:
num_heads: int, number of attention heads
key_dim: int, dimension of input key
hidden_d: int, the hidden state's dimension.
window_size: int, the length of the sliding window
time_kernel: int, kernel size of the time2vec embedding
simple_decoder: SimpleDecoder, the outputing simple decoder.
"""
super().__init__(input_d, n_nodes, window_size, simple_decoder)
self.nn_layers = nt_layers_list()
for emb in embed_dims:
self.nn_layers.append(TSA(out_dim=emb, num_heads=num_heads, **kwargs))
self.time2vec = Time2Vec(time_kernel, **kwargs)
self.time_dimention = tf.range(window_size, dtype=tf.float32)
self.time_kernel = time_kernel
self.input_d = input_d
def build(self, input_shape):
self.wt = self.add_weight(
shape=(self.input_d + self.time_kernel , self.input_d),
initializer='uniform',
trainable=True
)
super().build(input_shape)
def call(self, in_state: Tuple[Tensor, List[int]]):
"""Forward function."""
# node_embs: [|V|, |hidden_dim|]
# sequence length = 1
# the sequential model processes each node at each step
# each snapshot the input number of nodes will change, but should only be increasing for V invariant DTDG.
node_embs, ids = in_state
ids = ids.numpy()
node_embs = tf.gather(node_embs, ids)
self.memory.update_window(node_embs.numpy(),ids)
input_windows = tf.convert_to_tensor(self.memory.get_memory(ids),dtype=tf.float32) #N, W, D N serve as batch size in this case
time_encoding = self.time2vec(self.time_dimention)
#input_with_time dimension: [N, W, D+T]
input_with_time = tf.concat([input_windows, [time_encoding for i in
range(node_embs.shape[0])]], -1)
input_with_time = tf.matmul(input_with_time, self.wt) #[N, W, D]
for layer in self.nn_layers:
input_with_time = layer(input_with_time,input_with_time)
last_sequence = tf.slice(input_with_time,
[0,self.window_size-1, 0],
[input_with_time.shape[0], 1, input_with_time.shape[2]])
out = self.simple_decoder((tf.reshape(last_sequence, [-1, input_with_time.shape[2]]), ids))
return out
class FTSASum(SlidingWindowFamily):
def __init__(self,num_heads:int, input_d:int,embed_dims:List[int], n_nodes:int, window_size: int, time_kernel:int,
simple_decoder: SimpleDecoder, **kwargs):
"""Create a sequential decoder.
Args:
num_heads: int, number of attention heads
key_dim: int, dimension of input key
hidden_d: int, the hidden state's dimension.
window_size: int, the length of the sliding window
time_kernel: int, kernel size of the time2vec embedding
simple_decoder: SimpleDecoder, the outputing simple decoder.
"""
super().__init__(input_d, n_nodes, window_size, simple_decoder)
self.nn_layers = nt_layers_list()
for emb in embed_dims:
self.nn_layers.append(TSA(out_dim=emb, num_heads=num_heads, **kwargs))
self.time2vec = Time2Vec(time_kernal, **kwargs)
self.time_dimention = tf.range(window_size, dtype=tf.float32)
self.time_kernal = time_kernal
def call(self, in_state: Tuple[Tensor, List[int]]):
"""Forward function."""
# node_embs: [|V|, |hidden_dim|]
# sequence length = 1
# the sequential model processes each node at each step
# each snapshot the input number of nodes will change, but should only be increasing for V invariant DTDG.
node_embs, ids = in_state
ids = ids.numpy()
node_embs = tf.gather(node_embs, ids)
self.memory.update_window(node_embs.numpy(),ids)
input_windows = tf.convert_to_tensor(self.memory.get_memory(ids),dtype=tf.float32) #N, W, D N serve as batch size in this case
time_encoding = self.time2vec(self.time_dimention)
input_with_time = tf.concat([input_windows, [time_encoding for i in
range(node_embs.shape[0])]], -1)
for layer in self.nn_layers:
input_with_time = layer(input_with_time,input_with_time)
last_sequence = tf.slice(input_with_time,
[0,self.window_size-1, 0],
[input_with_time.shape[0], 1, input_with_time.shape[2]])
out = self.simple_decoder((tf.reshape(last_sequence, [-1, input_with_time.shape[2]]), ids))
return out
| StarcoderdataPython |
3244350 | <gh_stars>1-10
import tkinter.messagebox
from tkinter import *
import tkinter.font as TkFont
from tkinter import ttk
window = Tk()
myFont = TkFont.Font(window, family="Helvetica", size=12)
def smallestnum():
if len(Entry1.get()) == 0 or len(Entry2.get()) == 0 or len(Entry3.get()) == 0:
tkinter.messagebox.showerror("Error!", "Missing input! Please try again and make sure each textbox has an input")
reset()
return None
# Find the least number among three numbers
Entry4.delete(0,"end")
L = []
L.append(int(Entry1.get()))
L.append(int(Entry2.get()))
L.append(int(Entry3.get()))
Largenum.set(int(min(L)))
def reset():
Entry1.delete(0, "end")
Entry2.delete(0, "end")
Entry3.delete(0, "end")
lbl1 = Label(window, text = "The Program that Finds the smallest Number", bg='#A5BFDA', font=(myFont))
lbl1.grid(row=0, column=0, columnspan=2)
lbl2 = Label(window,text = "Enter the first number:", bg='#A5BFDA', font=(myFont))
lbl2.grid(row=1, column=0, sticky=W,pady=25)
Entry1 = Entry(bd=10)
Entry1.grid(row=1, column=1)
lbl3 = Label(window,text = "Enter the second number:", bg='#A5BFDA', font=(myFont))
lbl3.grid(row=2, column=0,sticky=W)
Entry2 = Entry(bd=10)
Entry2.grid(row=2, column=1)
lbl4 = Label(window, text= "Enter the third number:", bg='#A5BFDA', font=(myFont))
lbl4.grid(row=3, column=0,sticky=W,pady=25)
Entry3 = Entry(bd=10)
Entry3.grid(row=3, column=1)
Bt1 = Button(window, text="Find the Smallest number", command=smallestnum,bd=5)
Bt1.grid(row=4, column=1)
lbl5 = Label(window, text="The Smallest variable is: ", bg='#A5BFDA', font=(myFont))
lbl5.grid(row=5, column=0, sticky=W,pady=25)
Largenum = StringVar(window, "Smallest number will appear here")
Entry4 = Entry(bd=10, textvariable=Largenum,justify="center")
Entry4.grid(row=5, column=1,ipadx=50,padx=75)
window.title("Find the smallest number")
window.geometry("525x300+20+10")
window.configure(background='#A5BFDA')
mainloop() | StarcoderdataPython |
195372 | # Interview Question #5
# The problem is that we want to find duplicates in a one-dimensional array of integers in O(N) running time
# where the integer values are smaller than the length of the array!
# For example: if we have a list [1, 2, 3, 1, 5] then the algorithm can detect that there are a duplicate with value 1.
# Note: the array can not contain items smaller than 0 and items with values greater than the size of the list.
# This is how we can achieve O(N) linear running time complexity!
def duplicate_finder_1(nums):
duplicates = []
for n in set(nums):
if nums.count(n) > 1:
duplicates.append(n)
return duplicates if duplicates else 'The array does not contain any duplicates!'
def duplicate_finder_2(nums):
duplicates = set()
for i, n in enumerate(sorted(nums)):
if i + 1 < len(nums) and n == nums[i + 1]:
duplicates.add(n)
return list(duplicates) if duplicates else 'The array does not contain any duplicates!'
# Course implementation
def duplicate_finder_3(nums):
duplicates = set()
for n in nums:
if nums[abs(n)] >= 0:
nums[abs(n)] *= -1
else:
duplicates.add(n)
return list({-n if n < 0 else n for n in duplicates}) if duplicates \
else 'The array does not contain any duplicates!'
# return list(map(lambda n: n * -1 if n < 0 else n,
# duplicates)) if duplicates else 'The array does not contain any duplicates!'
# My solutions allow the use of numbers greater than the length of the array
print(duplicate_finder_1([1, 2, 3, 4]))
print(duplicate_finder_1([1, 1, 2, 3, 3, 4]))
print(duplicate_finder_2([5, 6, 7, 8]))
print(duplicate_finder_2([5, 6, 6, 6, 7, 7, 9]))
print(duplicate_finder_3([2, 3, 1, 2, 4, 3, 3]))
print(duplicate_finder_3([0, 1, 2]))
| StarcoderdataPython |
3381446 | import random
import time
from augmentation.operations import OperationPipeline
from image_grabber.grab_settings import DEBUG_MODE
from utils.utils import FileUtil, ProgressBarUtil, NoImageFoundException, ExceptionUtil
class DatasetGenerator(OperationPipeline):
folder_path = None
num_files = None
save_to_disk = True
folder_destination = "result"
def __init__(self,
folder_path: str,
num_files: int = 50,
save_to_disk=True,
folder_destination="result") -> None:
super().__init__()
self.folder_path = folder_path
self.num_files = num_files
self.save_to_disk = save_to_disk
self.folder_destination = folder_destination
def preview(self):
"""
It print a preview of :
- dataset current size
- operations list
- dataset augmented size
"""
pass
def execute(self):
"""
Execute the pipeline operation as configured
"""
start_time = time.time()
images_in_folder = FileUtil.get_images_file_path_array(self.folder_path)
if not images_in_folder:
raise (NoImageFoundException("No images found in %s folder" % self.folder_path))
images_to_transform = []
# pick 'num_files' random files that will be use for data augmentation
while len(images_to_transform) < self.num_files:
images_to_transform.append(random.choice(images_in_folder))
i = 0
for file_path in images_to_transform:
try:
augmented_image = FileUtil.open(file_path)
for operation in self.operations:
random_num = random.uniform(0, 1)
do_operation = random_num <= operation.probability
if do_operation:
augmented_image = operation.execute(augmented_image)
if self.save_to_disk:
FileUtil.save_file(augmented_image, self.folder_destination, "aug")
except Exception as e:
ExceptionUtil.print(e)
pass
finally:
i = i + 1
ProgressBarUtil.update(i, self.num_files)
end_time = time.time()
print('\n\n %s images generated in the folder %s in %s sec' % (self.num_files, self.folder_destination, round(end_time - start_time, 2)))
| StarcoderdataPython |
146128 | <filename>{{cookiecutter.project_slug}}/app/__init__.py
# -*- coding: utf-8 -*-
# @Author : Aquish
# @Organization : NTT
| StarcoderdataPython |
47679 | from typing import Any
from .ast_expression import Expression
from .ast_statement import Statement
class Assertion(Statement):
def __init__(self, actual: Expression, expected: Expression) -> None:
self._actual = actual
self._expected = expected
@property
def actual(self) -> Expression:
return self._actual
@property
def expected(self) -> Expression:
return self._expected
def accept(self, visitor: Any):
visitor.visit_assertion(self) | StarcoderdataPython |
1741002 | <filename>ejercicios/f20211116/ejercicio_05.py
def myfunction(array_numbers):
#Se eliminan duplicados
score_mod = []
for item in array_numbers:
if item not in score_mod:
score_mod.append(item)
#Se ordena de mayor a menor
ordered_list = sorted(score_mod, reverse = True)
#Se llama al segundo elemento de la lista ordenada
return ordered_list[1]
| StarcoderdataPython |
1665493 | from lib.input import read_lines, blocks
input = read_lines(6)
def answered():
for block in blocks(input):
answers = set()
for line in block:
answers.update(line)
yield answers
def answered_by_all():
for block in blocks(input):
answers = [set(line) for line in block]
intersection = set.intersection(*answers)
yield intersection
def sum_len(collection):
return sum(len(e) for e in collection)
solve_1 = lambda: sum_len(answered())
solve_2 = lambda: sum_len(answered_by_all())
| StarcoderdataPython |
1602406 | from . import factorials
from . import numbers
| StarcoderdataPython |
1760292 | #!/usr/bin/python
#Original Author: <NAME>
#Original Date: Mar 6 2016
#Last Modified By: <NAME>
#Last Modified On: Mar 23 2016
import smbus
import sys
import logging
import i2cutil
#import sensor abstract
import abstractsensor
class VoltageSensor(abstractsensor.Sensor):
def __init__(self, bus=1, addr=0x40):
self.logger = logging.getLogger("PB.drivers.voltagesensor.local")
self.logger.info("Starting with parameters: bus=%i, addr=%x",bus,addr)
self.addr = addr
self.bus = smbus.SMBus(bus)
def getReading(self):
return (i2cutil.reverse_word(self.bus.read_word_data(self.addr, 0x02))>>3)*0.004
if __name__ == "__main__":
if len(sys.argv) == 1:
v = VoltageSensor()
elif len(sys.argv) == 2:
v = VoltageSensor(addr=int(sys.argv[1]))
elif len(sys.argv) == 3:
v = VoltageSensor(bus=int(sys.argv[1]), addr=int(sys.argv[2],16))
else:
print "Invalid arguments"
exit(1)
print "Current Voltage: ",v.getReading()
| StarcoderdataPython |
4829731 | <reponame>nickamon/grr
#!/usr/bin/env python
"""This file contains common grr jobs."""
import gc
import logging
import pdb
import time
import traceback
import psutil
from grr import config
from grr_response_client import client_utils
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import registry
from grr.lib import utils
from grr.lib.rdfvalues import flows as rdf_flows
# Our first response in the session is this:
INITIAL_RESPONSE_ID = 1
class Error(Exception):
pass
class CPUExceededError(Error):
pass
class NetworkBytesExceededError(Error):
"""Exceeded the maximum number of bytes allowed to be sent for this action."""
class ActionPlugin(object):
"""Baseclass for plugins.
An action is a plugin abstraction which receives an rdfvalue and
sends another rdfvalue in response.
The code is specified in the Run() method, while the data is
specified in the in_rdfvalue and out_rdfvalues classes.
Windows and OS X client actions cannot be imported on the linux server since
they require OS-specific libraries. If you are adding a client action that
doesn't have a linux implementation, you will need to register it in
libs/server_stubs.py
Windows and OS X implementations of client actions with the same name (e.g.
EnumerateInterfaces) as linux actions must accept and return the same rdfvalue
types as their linux counterparts.
"""
# The rdfvalue used to encode this message.
in_rdfvalue = None
# TODO(user): The RDFValue instance for the output protobufs. This is
# required temporarily until the client sends RDFValue instances instead of
# protobufs.
out_rdfvalues = [None]
# Authentication Required for this Action:
_authentication_required = True
__metaclass__ = registry.MetaclassRegistry
__abstract = True # pylint: disable=invalid-name
priority = rdf_flows.GrrMessage.Priority.MEDIUM_PRIORITY
require_fastpoll = True
last_progress_time = 0
def __init__(self, grr_worker=None):
"""Initializes the action plugin.
Args:
grr_worker: The grr client worker object which may be used to
e.g. send new actions on.
"""
self.grr_worker = grr_worker
self.response_id = INITIAL_RESPONSE_ID
self.cpu_used = None
self.nanny_controller = None
self.status = rdf_flows.GrrStatus(
status=rdf_flows.GrrStatus.ReturnedStatus.OK)
self._last_gc_run = rdfvalue.RDFDatetime.Now()
self._gc_frequency = config.CONFIG["Client.gc_frequency"]
self.proc = psutil.Process()
self.cpu_start = self.proc.cpu_times()
self.cpu_limit = rdf_flows.GrrMessage().cpu_limit
def Execute(self, message):
"""This function parses the RDFValue from the server.
The Run method will be called with the specified RDFValue.
Args:
message: The GrrMessage that we are called to process.
Returns:
Upon return a callback will be called on the server to register
the end of the function and pass back exceptions.
Raises:
RuntimeError: The arguments from the server do not match the expected
rdf type.
"""
self.message = message
if message:
self.priority = message.priority
self.require_fastpoll = message.require_fastpoll
args = None
try:
if self.message.args_rdf_name:
if not self.in_rdfvalue:
raise RuntimeError(
"Did not expect arguments, got %s." % self.message.args_rdf_name)
if self.in_rdfvalue.__name__ != self.message.args_rdf_name:
raise RuntimeError("Unexpected arg type %s != %s." %
(self.message.args_rdf_name,
self.in_rdfvalue.__name__))
args = self.message.payload
# Only allow authenticated messages in the client
if self._authentication_required and (
self.message.auth_state !=
rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED):
raise RuntimeError(
"Message for %s was not Authenticated." % self.message.name)
self.cpu_start = self.proc.cpu_times()
self.cpu_limit = self.message.cpu_limit
if getattr(flags.FLAGS, "debug_client_actions", False):
pdb.set_trace()
try:
self.Run(args)
# Ensure we always add CPU usage even if an exception occurred.
finally:
used = self.proc.cpu_times()
self.cpu_used = (used.user - self.cpu_start.user,
used.system - self.cpu_start.system)
except NetworkBytesExceededError as e:
self.SetStatus(rdf_flows.GrrStatus.ReturnedStatus.NETWORK_LIMIT_EXCEEDED,
"%r: %s" % (e, e), traceback.format_exc())
# We want to report back all errors and map Python exceptions to
# Grr Errors.
except Exception as e: # pylint: disable=broad-except
self.SetStatus(rdf_flows.GrrStatus.ReturnedStatus.GENERIC_ERROR,
"%r: %s" % (e, e), traceback.format_exc())
if flags.FLAGS.debug:
self.DisableNanny()
pdb.post_mortem()
if self.status.status != rdf_flows.GrrStatus.ReturnedStatus.OK:
logging.info("Job Error (%s): %s", self.__class__.__name__,
self.status.error_message)
if self.status.backtrace:
logging.debug(self.status.backtrace)
if self.cpu_used:
self.status.cpu_time_used.user_cpu_time = self.cpu_used[0]
self.status.cpu_time_used.system_cpu_time = self.cpu_used[1]
# This returns the error status of the Actions to the flow.
self.SendReply(self.status, message_type=rdf_flows.GrrMessage.Type.STATUS)
self._RunGC()
def _RunGC(self):
# After each action we can run the garbage collection to reduce our memory
# footprint a bit. We don't do it too frequently though since this is
# a bit expensive.
now = rdfvalue.RDFDatetime.Now()
if now - self._last_gc_run > self._gc_frequency:
gc.collect()
self._last_gc_run = now
def ForceGC(self):
self._last_gc_run = rdfvalue.RDFDatetime(0)
self._RunGC()
def Run(self, unused_args):
"""Main plugin entry point.
This function will always be overridden by real plugins.
Args:
unused_args: An already initialised protobuf object.
Raises:
KeyError: if not implemented.
"""
raise KeyError(
"Action %s not available on this platform." % self.message.name)
def SetStatus(self, status, message="", backtrace=None):
"""Set a status to report back to the server."""
self.status.status = status
self.status.error_message = utils.SmartUnicode(message)
if backtrace:
self.status.backtrace = utils.SmartUnicode(backtrace)
def SendReply(self,
rdf_value=None,
message_type=rdf_flows.GrrMessage.Type.MESSAGE):
"""Send response back to the server."""
self.grr_worker.SendReply(
rdf_value,
# This is not strictly necessary but adds context
# to this response.
name=self.__class__.__name__,
session_id=self.message.session_id,
response_id=self.response_id,
request_id=self.message.request_id,
message_type=message_type,
task_id=self.message.task_id,
priority=self.priority,
require_fastpoll=self.require_fastpoll)
self.response_id += 1
def Progress(self):
"""Indicate progress of the client action.
This function should be called periodically during client actions that do
not finish instantly. It will notify the nanny that the action is not stuck
and avoid the timeout and it will also check if the action has reached its
cpu limit.
Raises:
CPUExceededError: CPU limit exceeded.
"""
now = time.time()
if now - self.last_progress_time <= 2:
return
self.last_progress_time = now
# Prevent the machine from sleeping while the action is running.
client_utils.KeepAlive()
if self.nanny_controller is None:
self.nanny_controller = client_utils.NannyController()
self.nanny_controller.Heartbeat()
user_start = self.cpu_start.user
system_start = self.cpu_start.system
cpu_times = self.proc.cpu_times()
user_end = cpu_times.user
system_end = cpu_times.system
used_cpu = user_end - user_start + system_end - system_start
if used_cpu > self.cpu_limit:
self.grr_worker.SendClientAlert("Cpu limit exceeded.")
raise CPUExceededError("Action exceeded cpu limit.")
def SyncTransactionLog(self):
"""This flushes the transaction log.
This function should be called by the client before performing
potential dangerous actions so the server can get notified in case
the whole machine crashes.
"""
if self.nanny_controller is None:
self.nanny_controller = client_utils.NannyController()
self.nanny_controller.SyncTransactionLog()
def ChargeBytesToSession(self, length):
self.grr_worker.ChargeBytesToSession(
self.message.session_id, length, limit=self.network_bytes_limit)
def DisableNanny(self):
try:
self.nanny_controller.nanny.Stop()
except AttributeError:
logging.info("Can't disable Nanny on this OS.")
@property
def session_id(self):
try:
return self.message.session_id
except AttributeError:
return None
@property
def network_bytes_limit(self):
try:
return self.message.network_bytes_limit
except AttributeError:
return None
class IteratedAction(ActionPlugin):
"""An action which can restore its state from an iterator.
Implement iterating actions by extending this class and overriding the
Iterate() method.
"""
__abstract = True # pylint: disable=invalid-name
def Run(self, request):
"""Munge the iterator to the server and abstract it away."""
# Pass the client_state as a dict to the action. This is often more
# efficient than manipulating a protobuf.
client_state = request.iterator.client_state.ToDict()
# Derived classes should implement this.
self.Iterate(request, client_state)
# Update the iterator client_state from the dict.
request.iterator.client_state = client_state
# Return the iterator
self.SendReply(
request.iterator, message_type=rdf_flows.GrrMessage.Type.ITERATOR)
def Iterate(self, request, client_state):
"""Actions should override this."""
| StarcoderdataPython |
1691399 | <gh_stars>0
# -*- coding: UTF-8 -*-
""" API access object """
__author__ = "d01"
__email__ = "<EMAIL>"
__copyright__ = "Copyright (C) 2015, <NAME>"
__license__ = "MIT"
__version__ = "0.1.2a0"
__date__ = "2015-08-21"
# Created: 2015-07-30 04:44
import logging
import requests
logger = logging.getLogger(__name__)
api_url = "https://api.airgramapp.com/1/"
class AirgramException(Exception):
""" Custom airgram exception """
pass
class Airgram(object):
""" Wrapper class to make calls to http://www.airgramapp.com/api """
_logger = logging.getLogger(__name__ + ".Airgram")
def __init__(self, key=None, secret=None, verify_certs=True):
"""
Initialize object
:param key: Service key
:type key: None | str
:param secret: Service secret
:type secret: None | str
:param verify_certs: Verify the certificates when making a request
:type verify_certs: bool
:rtype: None
"""
self.session = requests.Session()
if key is not None:
self.session.auth = (key, secret)
self.verify_certs = verify_certs
def _request(self, method, email=None, msg=None, url=None):
"""
Make request to airgram api
:param method: Which method to invoke on the api
:type method: None | str
:param email: email parameter
:type email: None | str
:param msg: msg parameter
:type msg: None | str
:param url: url parameter
:type url: None | str
:return: Response from server
:rtype: dict
:raises AirgramException: On failure
"""
parameter = {}
if email:
parameter['email'] = email
if msg:
parameter['msg'] = msg
if url:
parameter['url'] = url
try:
resp = self.session.get(
api_url + method, params=parameter, verify=self.verify_certs
)
resp.raise_for_status()
return resp.json()
except Exception as e:
raise AirgramException(e)
def send_as_guest(self, email, msg, url=None):
"""
Sends an Airgram message to a user who already has the Airgram app
:param email: Email address of the Airgram account you want to message
:type email: str
:param msg: Text of the message
:type msg: str
:param url: URL to open when the recipient opens the message
(default: None) if None -> no url
:type url: None | str
:return: Response from server
:rtype: dict
:raises AirgramException: On failure
"""
return self._request("send_as_guest", email, msg, url)
def subscribe(self, email):
"""
Subscribes an email address to the authenticated Airgram service
:param email: Email address to subscribe
:type email: str
:return: Response from server
:rtype: dict
:raises AirgramException: On failure
"""
return self._request("subscrube", email)
def send(self, email, msg, url=None):
"""
Sends an Airgram message to a subscriber of the
authenticated Airgram service
:param email: Email address of the subscriber you want to message
:type email: str
:param msg: Text of the message
:type msg: str
:param url: URL to open when the recipient opens the message
(default: None) if None -> no url
:type url: None | str
:return: Response from server
:rtype: dict
:raises AirgramException: On failure
"""
return self._request("send", email, msg, url)
def broadcast(self, msg, url=None):
"""
Sends an Airgram message to all subscribers of the
authenticated Airgram service
:param msg: Text of the message
:type msg: str
:param url: URL to open when the recipient opens the message
(default: None) if None -> no url
:type url: None | str
:return: Response from server
:rtype: dict
:raises AirgramException: On failure
"""
return self._request("send", None, msg, url)
| StarcoderdataPython |
3274251 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from bs4 import BeautifulSoup
import time
from selenium.webdriver.chrome.options import Options
import pandas as pd
import datetime
def set_chromium():
options = Options()
options.add_argument('--headless')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
return options
def set_driver(options):
driver = webdriver.Chrome("/usr/lib/chromium/chromedriver", chrome_options=options)
driver.get("http://spojeni.dpp.cz/")
return driver
def get_times(when, departure, arrival, options):
""" This function set up Selenium and visits url and returns desired times"""
# Set up chromim and driver
driver = set_driver(options)
# Fill in the form
inputElement = driver.find_element_by_id("ctlFrom_txtObject")
inputElement.send_keys(departure)
inputElement = driver.find_element_by_id("ctlTo_txtObject")
inputElement.send_keys(arrival)
driver.find_element_by_id("txtTime").clear()
driver.find_element_by_id("txtTime").send_keys(when)
driver.find_element_by_xpath("//input[@id='optChangesDirect']").click()
# Send the form
inputElement.send_keys(Keys.ENTER)
# Waite 2 seconds for loading of the repsone page
time.sleep(2)
# Parse the response
soup = BeautifulSoup(driver.page_source, "html.parser")
# Search for time elements
spans = soup.find_all('span', {'class' : 'date'})
# Convert them to datetime
casy = [pd.to_datetime(span.get_text()) for span in spans]
# close the driver
driver.close()
# return times
return casy[0] if casy else "Nenaslo sa ziadne spojenie!"
| StarcoderdataPython |
1658505 | <filename>ckanext/validation/utils.py
import os
import logging
from ckan.lib.uploader import ResourceUpload
from ckantoolkit import config, asbool
log = logging.getLogger(__name__)
def get_update_mode_from_config():
if asbool(
config.get(u'ckanext.validation.run_on_update_sync', False)):
return u'sync'
elif asbool(
config.get(u'ckanext.validation.run_on_update_async', True)):
return u'async'
else:
return None
def get_create_mode_from_config():
if asbool(
config.get(u'ckanext.validation.run_on_create_sync', False)):
return u'sync'
elif asbool(
config.get(u'ckanext.validation.run_on_create_async', True)):
return u'async'
else:
return None
def get_local_upload_path(resource_id):
u'''
Returns the local path to an uploaded file give an id
Note: it does not check if the resource or file actually exists
'''
upload = ResourceUpload({u'url': u'foo'})
return upload.get_path(resource_id)
def delete_local_uploaded_file(resource_id):
u'''
Remove and uploaded file and its parent folders (if empty)
This assumes the default folder structure of:
{ckan.storage_path}/resources/{uuid[0:3]}/{uuid[3:6}/{uuid[6:]}
Note: some checks are performed in case a custom uploader class is used,
but is not guaranteed to work in all circumstances. Please test before!
'''
path = get_local_upload_path(resource_id)
try:
if os.path.exists(path):
os.remove(path)
first_directory = os.path.split(path)[0]
if first_directory.endswith(u'resources'):
return
if os.listdir(first_directory) == []:
os.rmdir(first_directory)
second_directory = os.path.split(first_directory)[0]
if second_directory.endswith(u'resources'):
return
if os.listdir(second_directory) == []:
os.rmdir(second_directory)
except OSError as e:
log.warning(u'Error deleting uploaded file: %s', e)
| StarcoderdataPython |
4839883 | <reponame>jansforte/Inteligencia-Artificial<filename>Naive Bayes/SpellingCorrector/pruebas.py
import re
import pandas as pd
#numero = "111,245,954"
#numero = int(re.sub(",","",numero))
#print(numero-1)
#numero ="737799456456498797987979. él"
#numero = re.sub("[0-9]+.\s","",numero)
#print(numero)
#words = open('CREA_total.txt', encoding='utf-8',errors='ignore').read()
df = pd.read_csv('prueba.TXT', sep='\t', encoding='latin-1',low_memory=False)
df.columns = ['palabra','frecuenciaAbsoluta','frecuenciaNormalizada']
df = df.drop(['frecuenciaNormalizada'], axis=1)
df['palabra'] = df['palabra'].str.replace(' ','')
#df = df.set_index('palabra')
df['frecuenciaAbsoluta'] = df['frecuenciaAbsoluta'].str.replace(',','')
df['frecuenciaAbsoluta'] = pd.to_numeric(df['frecuenciaAbsoluta'])
d1 = dict(zip(df['palabra'],df['frecuenciaAbsoluta']))
| StarcoderdataPython |
2330 | <filename>swm-master/swm-master/calc/mean_e_calc.py
## PRODUCE MEAN CALCULATIONS AND EXPORT AS .NPY
from __future__ import print_function
path = '/home/mkloewer/python/swm/'
import os; os.chdir(path) # change working directory
import numpy as np
from scipy import sparse
import time as tictoc
from netCDF4 import Dataset
# OPTIONS
runfolder = 15
print('Calculating subgrid-EKE means from run ' + str(runfolder))
## read data
runpath = path+'data/run%04i' % runfolder
skip = 5*365
e = np.load(runpath+'/e_sub.npy')[skip:,:,:]
print('run %i read.' % runfolder)
## create ouputfolder
try:
os.mkdir(runpath+'/analysis')
except:
pass
## U,V,H mean
em = e.mean(axis=0)
print('e mean done.')
## STORING
dic = dict()
all_var2export = ['em']
for v in all_var2export:
exec('dic[v] ='+v)
np.save(runpath+'/analysis/mean_e.npy',dic)
print('Everything stored.')
| StarcoderdataPython |
1610378 | <filename>delta/utils/loss/loss_utils.py
# Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
''' loss implementation function '''
import tensorflow as tf
from delta import utils
#pylint: disable=too-many-arguments
def cross_entropy(logits,
labels,
input_length=None,
label_length=None,
smoothing=0.0,
reduction=tf.losses.Reduction.SUM_BY_NONZERO_WEIGHTS):
'''
cross entropy function for classfication and seq classfication
:param, label_length, for seq task, this for target seq length, e.g. a b c </s>, 4
'''
del input_length
onehot_labels = tf.cond(
pred=tf.equal(tf.rank(logits) - tf.rank(labels), 1),
true_fn=lambda: tf.one_hot(labels, tf.shape(logits)[-1], dtype=tf.int32),
false_fn=lambda: labels)
if label_length is not None:
weights = utils.len_to_mask(label_length)
else:
weights = 1.0
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels,
logits=logits,
weights=weights,
label_smoothing=smoothing,
reduction=reduction)
return loss
def ctc_lambda_loss(logits, labels, input_length, label_length, blank_index=0):
'''
ctc loss function
psram: logits, (B, T, D)
psram: input_length, (B, 1), input length of encoder
psram: labels, (B, T)
psram: label_length, (B, 1), label length for convert dense label to sparse
returns: loss, scalar
'''
ilen = tf.cond(
pred=tf.equal(tf.rank(input_length), 1),
true_fn=lambda: input_length,
false_fn=lambda: tf.squeeze(input_length),
)
olen = tf.cond(
pred=tf.equal(tf.rank(label_length), 1),
true_fn=lambda: label_length,
false_fn=lambda: tf.squeeze(label_length))
deps = [
tf.assert_rank(labels, 2),
tf.assert_rank(logits, 3),
tf.assert_rank(ilen, 1), # input_length
tf.assert_rank(olen, 1), # output_length
]
with tf.control_dependencies(deps):
# (B, 1)
# blank index is consistent with Espnet, zero
batch_loss = tf.nn.ctc_loss_v2(
labels,
logits,
ilen,
olen,
logits_time_major=False,
blank_index=blank_index)
batch_loss.set_shape([None])
return batch_loss
def crf_log_likelihood(tags_scores, labels, input_length, transitions):
'''
:param tags_scores: [batch_size, max_seq_len, num_tags]
:param labels: [batch_size, max_seq_len]
:param input_length: [batch_size,]
:param transitions: [num_tags, num_tags]
:return: loss, transition_params
'''
log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(
inputs=tags_scores,
tag_indices=labels,
sequence_lengths=input_length,
transition_params=transitions)
loss = tf.reduce_mean(-log_likelihood)
return loss, transition_params
def mask_sequence_loss(logits,
labels,
input_length,
label_length,
smoothing=0.0):
'''
softmax cross entropy loss for sequence to sequence
:param logits: [batch_size, max_seq_len, vocab_size]
:param labels: [batch_size, max_seq_len]
:param input_length: [batch_size]
:param label_length: [batch_size]
:return: loss, scalar
'''
del smoothing
del input_length
if label_length is not None:
weights = tf.cast(utils.len_to_mask(label_length), tf.float32)
else:
weights = tf.ones_like(labels)
loss = tf.contrib.seq2seq.sequence_loss(logits, labels, weights)
return loss
| StarcoderdataPython |
4802124 | from django.contrib.admin import TabularInline
from cinemanio.core.admin import GenreAdmin
from cinemanio.sites.kinopoisk.models import KinopoiskGenre
from cinemanio.sites.kinopoisk import signals # noqa
class KinopoiskGenreInline(TabularInline):
model = KinopoiskGenre
GenreAdmin.inlines = GenreAdmin.inlines + [KinopoiskGenreInline]
| StarcoderdataPython |
3385762 | # 2019-11-14 10:01:24(JST)
import sys
# import collections
# import math
# from string import ascii_lowercase, ascii_uppercase, digits
# from bisect import bisect_left as bi_l, bisect_right as bi_r
# import itertools
# from functools import reduce
# import operator as op
# from scipy.misc import comb # float
# import numpy as np
def main():
H, W = [int(x) for x in sys.stdin.readline().split()]
grid = ['#' * (W + 2)]
for _ in range(H):
# s = f'#{sys.stdin.readline().rstrip()}#'
s = '#' + sys.stdin.readline().rstrip() + '#'
grid.append(s)
grid.append('#' * (W + 2))
lighted_squares = []
for h in range(1, H+1):
for w in range(1, W+1):
if grid[h][w] == '#':
lighted_squares.append(0)
else:
count = 1
for j in range(w-1, -1, -1):
if grid[h][j] == '.':
count += 1
continue
else:
break
for j in range(w+1, W+2):
if grid[h][j] == '.':
count += 1
continue
else:
break
for i in range(h-1, -1, -1):
if grid[i][w] == '.':
count += 1
continue
else:
break
for i in range(h+1, H+2):
if grid[i][w] == '.':
count += 1
continue
else:
break
lighted_squares.append(count)
print(max(lighted_squares))
if __name__ == "__main__":
main()
| StarcoderdataPython |
167474 | <reponame>WebberHuang/DeformationLearningSolver
__author__ = "<NAME>"
__contact__ = "<EMAIL>"
__website__ = "http://riggingtd.com"
import os
try:
from PySide import QtGui, QtCore
from PySide.QtGui import *
from PySide.QtCore import *
except ImportError:
from PySide2 import QtGui, QtCore, QtWidgets
from PySide2.QtGui import *
from PySide2.QtCore import *
from PySide2.QtWidgets import *
from DLS.widget import utils
from DLS.widget import baseOptionWindow
#reload(baseOptionWindow)
uifile = os.path.join(utils.SCRIPT_DIRECTORY, "ui/axisWindow.ui")
cfgfile = os.path.join(utils.SCRIPT_DIRECTORY, "config.ini")
########################################################################
class AxisOptionWindow(baseOptionWindow.BaseOptionWindow):
""""""
_TITLE = 'Axis Options'
#----------------------------------------------------------------------
def __init__(self, parent=None):
"""Constructor"""
super(AxisOptionWindow, self).__init__(parent)
#utils.loadUi(uifile, self)
#self.initWidgets()
#----------------------------------------------------------------------
def initWidgets(self):
""""""
self.readSettings()
self.setWindowTitle(self._TITLE)
cp = QDesktopWidget().screenGeometry().center()
self.move(cp)
# Add actions
actionReset = self.findChild(QAction, "actionReset")
if actionReset != None:
actionReset.triggered.connect(self.resetSettings)
#----------------------------------------------------------------------
def isX(self):
""""""
x_chk = self.findChild(QCheckBox, "x_chk")
if x_chk != None:
return x_chk.isChecked()
return True
#----------------------------------------------------------------------
def setX(self, val):
""""""
x_chk = self.findChild(QCheckBox, "x_chk")
if x_chk != None:
x_chk.setChecked(val)
#----------------------------------------------------------------------
def isY(self):
""""""
y_chk = self.findChild(QCheckBox, "y_chk")
if y_chk != None:
return y_chk.isChecked()
return True
#----------------------------------------------------------------------
def setY(self, val):
""""""
y_chk = self.findChild(QCheckBox, "y_chk")
if y_chk != None:
y_chk.setChecked(val)
#----------------------------------------------------------------------
def isZ(self):
""""""
z_chk = self.findChild(QCheckBox, "z_chk")
if z_chk != None:
return z_chk.isChecked()
return True
#----------------------------------------------------------------------
def setZ(self, val):
""""""
z_chk = self.findChild(QCheckBox, "z_chk")
if z_chk != None:
z_chk.setChecked(val)
########################################################################
class TranslateAxisOptionWindow(AxisOptionWindow):
""""""
_TITLE = 'Translate Axis Options'
#----------------------------------------------------------------------
def __init__(self, parent=None):
"""Constructor"""
super(TranslateAxisOptionWindow, self).__init__(parent)
utils.loadUi(uifile, self)
self.initWidgets()
#----------------------------------------------------------------------
def resetSettings(self):
settings = QSettings(cfgfile, QSettings.IniFormat)
settings.beginGroup("Default")
self.setX(bool(int(settings.value("translateX"))))
self.setY(bool(int(settings.value("translateY"))))
self.setZ(bool(int(settings.value("translateZ"))))
settings.endGroup()
#----------------------------------------------------------------------
def readSettings(self):
settings = QSettings(cfgfile, QSettings.IniFormat)
settings.beginGroup("Custom")
self.setX(bool(int(settings.value("translateX"))))
self.setY(bool(int(settings.value("translateY"))))
self.setZ(bool(int(settings.value("translateZ"))))
settings.endGroup()
#----------------------------------------------------------------------
def writeSettings(self):
settings = QSettings(cfgfile, QSettings.IniFormat)
settings.beginGroup("Custom")
settings.setValue("translateX", int(self.isX()))
settings.setValue("translateY", int(self.isY()))
settings.setValue("translateZ", int(self.isZ()))
settings.endGroup()
########################################################################
class RotateAxisOptionWindow(AxisOptionWindow):
""""""
_TITLE = 'Rotate Axis Options'
#----------------------------------------------------------------------
def __init__(self, parent=None):
"""Constructor"""
super(RotateAxisOptionWindow, self).__init__(parent)
utils.loadUi(uifile, self)
self.initWidgets()
#----------------------------------------------------------------------
def resetSettings(self):
settings = QSettings(cfgfile, QSettings.IniFormat)
settings.beginGroup("Default")
self.setX(bool(int(settings.value("rotateX"))))
self.setY(bool(int(settings.value("rotateY"))))
self.setZ(bool(int(settings.value("rotateZ"))))
settings.endGroup()
#----------------------------------------------------------------------
def readSettings(self):
settings = QSettings(cfgfile, QSettings.IniFormat)
settings.beginGroup("Custom")
self.setX(bool(int(settings.value("rotateX"))))
self.setY(bool(int(settings.value("rotateY"))))
self.setZ(bool(int(settings.value("rotateZ"))))
settings.endGroup()
#----------------------------------------------------------------------
def writeSettings(self):
settings = QSettings(cfgfile, QSettings.IniFormat)
settings.beginGroup("Custom")
settings.setValue("rotateX", int(self.isX()))
settings.setValue("rotateY", int(self.isY()))
settings.setValue("rotateZ", int(self.isZ()))
settings.endGroup()
########################################################################
class ScaleAxisOptionWindow(AxisOptionWindow):
""""""
_TITLE = 'Scale Axis Options'
#----------------------------------------------------------------------
def __init__(self, parent=None):
"""Constructor"""
super(ScaleAxisOptionWindow, self).__init__(parent)
utils.loadUi(uifile, self)
self.initWidgets()
#----------------------------------------------------------------------
def resetSettings(self):
settings = QSettings(cfgfile, QSettings.IniFormat)
settings.beginGroup("Default")
self.setX(bool(int(settings.value("scaleX"))))
self.setY(bool(int(settings.value("scaleY"))))
self.setZ(bool(int(settings.value("scaleZ"))))
settings.endGroup()
#----------------------------------------------------------------------
def readSettings(self):
settings = QSettings(cfgfile, QSettings.IniFormat)
settings.beginGroup("Custom")
self.setX(bool(int(settings.value("scaleX"))))
self.setY(bool(int(settings.value("scaleY"))))
self.setZ(bool(int(settings.value("scaleZ"))))
settings.endGroup()
#----------------------------------------------------------------------
def writeSettings(self):
settings = QSettings(cfgfile, QSettings.IniFormat)
settings.beginGroup("Custom")
settings.setValue("scaleX", int(self.isX()))
settings.setValue("scaleY", int(self.isY()))
settings.setValue("scaleZ", int(self.isZ()))
settings.endGroup()
#----------------------------------------------------------------------
def main():
import sys
app = QApplication(sys.argv)
window = ScaleAxisOptionWindow()
window.show()
app.exec_()
if __name__ == "__main__":
main() | StarcoderdataPython |
3234944 | import torch
import torch.nn as nn
from common.subsample import create_mask_for_mask_type
from models.neumann.operators import forward_adjoint_helper, gramian_helper
class NeumannNetwork(nn.Module):
def __init__(self, reg_network=None, hparams=None):
super(NeumannNetwork, self).__init__()
self.hparams = hparams
self.device = "cuda"
if hparams.gpus == 0:
self.device = "cpu"
self.mask_func = create_mask_for_mask_type(self.hparams.mask_type, self.hparams.center_fractions,
self.hparams.accelerations)
self.reg_network = reg_network
self.n_blocks = hparams.n_blocks
self.eta = nn.Parameter(torch.Tensor([0.1]), requires_grad=True)
self.preconditioned = False
def forward(self, kspace):
runner_img, runner_img_abs = forward_adjoint_helper(self.device, self.hparams, self.mask_func, kspace,
target=None)
runner_img_abs = runner_img_abs.unsqueeze(1)
runner_img_abs = self.eta * runner_img_abs
# unrolled gradient iterations
for i in range(self.n_blocks):
# print(f"\nNNeumann Iteration:{i}")
tmp = torch.rfft(runner_img_abs, 1, onesided=False).float().squeeze()
gramian_img, gramian_img_abs = gramian_helper(self.device, self.hparams, self.mask_func, tmp)
gramian_img_abs = gramian_img_abs.unsqueeze(1)
linear_component = runner_img_abs - self.eta * gramian_img_abs
learned_component = self.reg_network(runner_img_abs)
runner_img_abs = linear_component - learned_component
return runner_img_abs
def parameters(self):
return list([self.eta]) + list(self.reg_network.parameters())
# return list([self.eta, self.lambda_param]) + list(self.reg_network.parameters())
| StarcoderdataPython |
88355 | <reponame>Pro100Tema/ostap<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# =============================================================================
# @file ostap/plotting/makestyles.py
# Helper utilities to deal with ROOT styles
# =============================================================================
"""Helper utilities to deal with ROOT styles
"""
# =============================================================================
import ROOT
import ostap.plotting.color
__all__ = (
'StyleStore' , ## the storage/dictionary of created/known styles
'dump_style' , ## dump a style into dicitonary
'set_style' , ## configure style from the dictionary
'make_styles' , ## create the styles from the configuration
'make_ostap_style' , ## create Ostap-like style
)
# =============================================================================
# logging
# =============================================================================
from ostap.logger.logger import getLogger
if '__main__' == __name__ : logger = getLogger( 'ostap.plotting.makestyles' )
else : logger = getLogger( __name__ )
# =============================================================================
## font
ostap_font = 132 ## Times-Roman
## line thickness
ostap_line_width = 1
## define style for text
ostap_label = ROOT.TText ( )
ostap_label . SetTextFont ( ostap_font )
ostap_label . SetTextColor ( 1 )
ostap_label . SetTextSize ( 0.04 )
ostap_label . SetTextAlign ( 12 )
## define style of latex text
ostap_latex = ROOT.TLatex ()
ostap_latex . SetTextFont ( ostap_font )
ostap_latex . SetTextColor ( 1 )
ostap_latex . SetTextSize ( 0.04 )
ostap_latex . SetTextAlign ( 12 )
# =============================================================================
## @class StyleStore
# Store for all created/cofigures styles
class StyleStore(object) :
"""Store for all created/cofigures styles
"""
__styles = {}
@classmethod
def styles ( kls ) :
return kls.__styles
# =============================================================================
## get the essential methods of class ROOT.TStyle
# @code
# getters, setters, special = style_methods()
# @endcode
def style_methods () :
"""Get the essential methods of class ROOT.TStyle
>>> getters, setters, special = style_methods()
"""
# The style getters
_getters = set ( [ i[3:] for i in dir ( ROOT.TStyle ) if i.startswith ( 'Get' ) ] )
_getters_ = set ( [ i[3:] for i in dir ( ROOT.TNamed ) if i.startswith ( 'Get' ) ] )
_getters = list ( _getters - _getters_ )
_getters . sort ( )
_getters = tuple ( _getters )
# The style setters
_setters = set ( [ i[3:] for i in dir ( ROOT.TStyle ) if i.startswith ( 'Get' ) ] )
_setters_ = set ( [ i[3:] for i in dir ( ROOT.TNamed ) if i.startswith ( 'Set' ) ] )
_setters = list ( _setters - _setters_ )
_setters . sort ( )
_setters = tuple ( _setters )
#
_setters_int = set()
_setters_float = set()
_setters_str = set()
_style = ROOT.TStyle('tmp_style','Helper style')
for s in _setters :
fun = getattr ( _style , 'Set' + s , None )
if not fun : continue
try :
fun ( 0.0 )
_setters_float.add ( s )
continue
except :
pass
try :
fun ( 1 )
_setters_int.add ( s )
continue
except :
pass
try :
fun ( '' )
_setters_str.add ( s )
continue
except :
pass
del _style
# special methods
_special = (
## very special
'LineStyleString' ,
'AttDate' ,
'PaperSize' ,
'ColorPalette' ,
## not so special
'AxisColor' ,
'TickLength' ,
'Ndivisions' ,
'LabelColor' , 'LabelFont' , 'LabelOffset' , 'LabelSize' ,
'TitleColor' , 'TitleFont' , 'TitleOffset' , 'TitleSize' )
#
return _getters , ( _setters_float , _setters_int , _setters_str ) , _special
# =============================================================================
## th especial methods
style_getters , style_setters, style_special = style_methods ()
# =============================================================================
## dump the style to the dictionary
# @code
# style = ...
# conf = dump_style ( style )
# conf = style.dump () ## ditto
# conf = style.get () ## ditto
# @endcode
def dump_style ( style ) :
"""Dump the style to the dictionary
>>> style = ...
>>> conf = dump_style ( style )
>>> conf = style.dump () ## ditto
>>> conf = style.get () ## ditto
"""
config = {}
## regular attributes
for g in style_getters :
if g in style_special : continue
fun = getattr ( style , 'Get' + g , None )
if not fun : return
config [ g ] = fun ()
## half-special attributes
for attr in ( 'AxisColor' ,
'TickLength' ,
'Ndivisions' ,
'LabelColor' , 'LabelFont' , 'LabelOffset' , 'LabelSize' ,
'TitleColor' , 'TitleFont' , 'TitleOffset' , 'TitleSize' ) :
for axis in ( 'X' , 'Y' , 'Z' ) :
fun = getattr ( style , 'Get' + attr )
config [ '%s_%s' % ( attr , axis ) ] = fun ( axis )
## very special attribute
import array
x = array.array('f',[0] )
y = array.array('f',[0] )
style.GetPaperSize ( x , y )
config ['PaperSize_X' ] = x[0]
config ['PaperSize_Y' ] = y[0]
## very special attribute
for i in range(31) :
l = style.GetLineStyleString(i)
l = l.strip()
if l : config [ 'LineStyleString_%s' % i ] = l
return config
# =============================================================================
## Set the style from the configuration dictionary
# @code
# config = ...
# style = ...
# set_style ( style , config )
# style.set ( config ) ## ditto
# @endcode
def set_style ( style , config ) :
"""Set the style from the configurtaion dictionary
>>> config = ...
>>> style = ...
>>> set_style ( style , config )
>>> style.set ( config ) ## ditto
"""
for attr in style_setters [0] :
if not attr in config : continue
try :
value = float ( config [ attr ] )
setter = getattr ( style , 'Set' + attr )
setter ( value )
logger.debug ("Set (float) attribute %s/%s/%s " % ( attr , config[attr] , value ) )
except :
logger.warning("Can't set (float) attribute %s/%s, skip " % ( attr , config[attr] ) )
pass
for attr in style_setters [1] :
if not attr in config : continue
try :
value = int ( config [ attr ] )
setter = getattr ( style , 'Set' + attr )
setter ( value )
logger.debug ("Set (int) attribute %s/%s/%s " % ( attr , config[attr] , value ) )
except:
logger.warning("Can't set (int) attribute %s/%s, skip " % ( attr , config[attr] ) )
pass
for attr in style_setters [2] :
if not attr in config : continue
try :
value = config [ attr ]
setter = getattr ( style , 'Set' + attr )
setter ( value )
logger.debug ("Set (str) attribute %s/%s/%s " % ( attr , config[attr] , value ) )
except :
logger.warning("Can't set (str) attribute %s/%s, skip " % ( attr , config[attr] ) )
pass
## special attributes
for axis in ( 'X' , 'Y' , 'Z' ) :
key = 'AxisColor_%s' % axis
try :
if key in config : style.SetAxisColor ( int ( config [ key ] ) , axis )
except :
logger.warning ( "Can't set attribute %s" % key )
key = 'TickLength_%s' % axis
try :
if key in config : style.SetTickLength ( float ( config [ key ] ) , axis )
except :
logger.warning ( "Can't set attribute %s" % key )
key = 'Ndivisions_%s' % axis
try :
if key in config : style.SetNdivisions ( int ( config [ key ] ) , axis )
except :
logger.warning ( "Can't set attribute %s" % key )
key = 'LabelColor_%s' % axis
try :
if key in config : style.SetLabelColor ( int ( config [ key ] ) , axis )
except :
logger.warning ( "Can't set attribute %s" % key )
key = 'LabelFont_%s' % axis
try :
if key in config : style.SetLabelFont ( int ( config [ key ] ) , axis )
except :
logger.warning ( "Can't set attribute %s" % key )
key = 'LabelOffset_%s' % axis
try :
if key in config : style.SetLabelOffset ( float ( config [ key ] ) , axis )
except :
logger.warning ( "Can't set attribute %s" % key )
key = 'LabelSize_%s' % axis
try :
if key in config : style.SetLabelSize ( float ( config [ key ] ) , axis )
except :
logger.warning ( "Can't set attribute %s" % key )
key = 'TitleColor_%s' % axis
try :
if key in config : style.SetTitleColor ( int ( config [ key ] ) , axis )
except :
logger.warning ( "Can't set attribute %s" % key )
key = 'TitleFont_%s' % axis
try :
if key in config : style.SetTitleFont ( int ( config [ key ] ) , axis )
except :
logger.warning ( "Can't set attribute %s" % key )
key = 'TitleOffset_%s' % axis
try :
if key in config : style.SetTitleOffset ( float ( config [ key ] ) , axis )
except :
logger.warning ( "Can't set attribute %s" % key )
key = 'TitleSize_%s' % axis
try :
if key in config : style.SetTitleSize ( float ( config [ key ] ) , axis )
except :
logger.warning ( "Can't set attribute %s" % key )
## very special attribute
if 'PaperSize_X' in config and 'PaperSize_Y' in config :
key = 'PaperSize/1'
try :
style.SetPaperSize ( float ( config ['PaperSize_X'] ) ,
float ( config ['PaperSize_Y'] ) )
except :
logger.warning ( "Can't set attribute %s" % key )
elif 'PaperSize' in config :
key = 'PaperSize/2'
try :
style.SetPaperSize ( int ( config ['PaperSize'] ) )
except :
logger.warning ( "Can't set attribute %s" % key )
## one more very special attribute
for i in range ( 31 ) :
k = 'LineStyleString_%s' % i
if k in config :
style.SetLineStyleString ( i , config[k].strip() )
return style
# ============================================================================
ROOT.TStyle.dump = dump_style
ROOT.TStyle.get = dump_style
ROOT.TStyle.set = set_style
# =============================================================================
## Parse the configuration and create
# all the styles according to configuration
def make_styles ( config = None ) :
"""Parse the configuration and create
all the styles according to configuration
"""
if config is None :
import ostap.core.config as _CONFIG
config = _CONFIG.config
for key in config :
if not key.upper().startswith('STYLE') : continue
section = config [ key ]
s , c , n = key.partition (':')
if not c : continue
## the style name
name = n.strip ( )
description = section.get ( 'description' , fallback = 'The style %s' % name )
ok = section.getboolean ( 'ostaplike' , fallback = False )
## create ostap-like style
if ok : make_ostap_style ( name , description , section )
else :
## generic style
logger.info ( 'Create Generic style %s/%s' % ( name , description ) )
style = ROOT.TStyle ( name , description )
set_style ( style , section )
if name in StyleStore.styles() :
logger.warning ( "The configuration %s replaced" % name )
StyleStore.styles().update ( { name : style } )
# ==============================================================================
def get_float ( config , name , default ) :
try :
if hasattr ( config , 'getfloat' ) :
value = config.getfloat ( name , fallback = default )
else : value = config.get ( name , default )
return float ( value )
except :
return default
# =============================================================================
def get_int ( config , name , default ) :
try :
if hasattr ( config , 'getint') :
value = config.getint ( name , fallback = default )
else : value = config.get ( name , default )
return int ( value )
except :
return default
# =============================================================================
def get_str ( config , name , default ) :
try :
if hasattr ( config , 'getint') :
value = config.get ( name , fallback = default )
else : value = config.get ( name , default )
return str ( value )
except :
return default
# ============================================================================
## make Ostap-like style
def make_ostap_style ( name ,
description = 'The Style' ,
config = {} ,
colz = False ,
scale = 1.0 ,
font = ostap_font ,
line_width = ostap_line_width ) :
description = config.get ( 'description' , 'The Style' )
conf = {}
conf.update ( config )
conf [ 'FrameBorderMode' ] = get_int ( config , 'FrameBorderMode' , 0 )
conf [ 'CanvasBorderMode' ] = get_int ( config , 'CanvasBorderMode' , 0 )
conf [ 'PadBorderMode' ] = get_int ( config , 'PadBorderMode' , 0 )
conf [ 'PadColor' ] = get_int ( config , 'PadColor' , 0 )
conf [ 'CanvasColor' ] = get_int ( config , 'CanvasColor' , 0 )
conf [ 'StatColor' ] = get_int ( config , 'StatColor' , 0 )
if 'PaperSize_X' in config or 'PaperSize_Y' in config :
conf ['PaperSize_X' ] = get_float ( config , 'PaperSize_X' , 20 )
conf ['PaperSize_Y' ] = get_float ( config , 'PaperSize_Y' , 26 )
else :
a = str ( config.get ( 'PaperSize' ) ).upper()
if 'A4' in a : conf [ 'PaperSize' ] = ROOT.TStyle.kA4
elif 'US' in a : conf [ 'PaperSize' ] = ROOT.TStyle.kUSletter
elif 'LETTER' in a : conf [ 'PaperSize' ] = ROOT.TStyle.kUSletter
else : conf ['PaperSize' ] = get_int ( config , 'PaperSize' , ROOT.TStyle.kA4 )
conf [ 'PadTopMargin' ] = get_float ( config , 'PadTopMargin' , 0.05 )
conf [ 'PadRightMargin' ] = get_float ( config , 'PadRightMargin' , 0.14 if colz else 0.05 )
conf [ 'PadLeftMargin' ] = get_float ( config , 'PadLeftMargin' , 0.10 )
conf [ 'PadBottomMargin' ] = get_float ( config , 'PadBottomMargin' , 0.10 )
conf [ 'TextFont' ] = get_int ( config , 'TextFont' , font )
conf [ 'TextSize' ] = get_float ( config , 'FontSize' , 0.08 * scale )
conf [ 'LabelFont_X' ] = get_int ( config , 'LabelFont_X' , font )
conf [ 'LabelFont_Y' ] = get_int ( config , 'LabelFont_Y' , font )
conf [ 'LabelFont_Z' ] = get_int ( config , 'LabelFont_Z' , font )
conf [ 'LabelSize_X' ] = get_float ( config , 'LabelSize_X' , 0.05 * scale )
conf [ 'LabelSize_Y' ] = get_float ( config , 'LabelSize_Y' , 0.05 * scale )
conf [ 'LabelSize_Z' ] = get_float ( config , 'LabelSize_Z' , 0.05 * scale )
conf [ 'TitleFont_X' ] = get_int ( config , 'TitleFont_X' , font )
conf [ 'TitleFont_Y' ] = get_int ( config , 'TitleFont_Y' , font )
conf [ 'TitleFont_Z' ] = get_int ( config , 'TitleFont_Z' , font )
conf [ 'TitleSize_X' ] = get_float ( config , 'TitleSize_X' , -1 )
conf [ 'TitleSize_Y' ] = get_float ( config , 'TitleSize_Y' , 0.05 * scale )
conf [ 'TitleSize_Z' ] = get_float ( config , 'TitleSize_Z' , 0.05 * scale )
conf [ 'LineWidth' ] = get_int ( config , 'LineWidth' , line_width )
conf [ 'FrameWidth' ] = get_int ( config , 'FrameWidth' , line_width )
conf [ 'HistLineWidth' ] = get_int ( config , 'HistLineWidth' , line_width )
conf [ 'FuncWidth' ] = get_int ( config , 'FuncWidth' , line_width )
conf [ 'GridWidth' ] = get_int ( config , 'FuncWidth' , line_width )
conf [ 'MarkerStyle' ] = get_int ( config , 'MarkerStyle' , 20 )
conf [ 'MarkerSize' ] = get_float ( config , 'MarkerSize' , 1.2 )
conf [ 'LabelOffset' ] = get_float ( config , 'LabelOffset' , 0.015 )
conf [ 'StatFormat' ] = get_str ( config , 'StatFormat' , '6.3g')
conf [ 'OptTitle' ] = get_int ( config , 'OptTitle' , 0 )
conf [ 'OptFit' ] = get_int ( config , 'OptFit' , 0 )
conf [ 'OptStat' ] = get_int ( config , 'OptStat' , 0 )
conf [ 'LegendFont' ] = get_int ( config , 'LegendFont' , font )
## size of small lines at the end of error bars
conf [ 'EndErrorsSize' ] = get_float ( config , 'EndErrorsSize' , 3 )
## statistics box
conf [ 'StatBorderSize' ] = get_int ( config , 'StatBorderSize' , 0 )
conf [ 'StatFont' ] = get_int ( config , 'StatFont' , font )
conf [ 'StatFontSize' ] = get_float ( config , 'StatFontSize' , 0.05 * scale )
conf [ 'StatX' ] = get_float ( config , 'StatX' , 0.9 )
conf [ 'StatY' ] = get_float ( config , 'StatY' , 0.9 )
conf [ 'StatW' ] = get_float ( config , 'StatW' , 0.25 )
conf [ 'StatH' ] = get_float ( config , 'StatH' , 0.14 )
conf [ 'PadTickX' ] = get_int ( config , 'PadTickX' , 1 )
conf [ 'PadTickY' ] = get_int ( config , 'PadTickY' , 1 )
conf [ 'Ndivisions_X' ] = get_int ( config , 'Ndivisions_X' , 505 )
conf [ 'Ndivisions_Y' ] = get_int ( config , 'Ndivisions_Y' , 510 )
conf [ 'Ndivisions_Z' ] = get_int ( config , 'Ndivisions_Z' , 510 )
## dark-body radiator pallete
conf [ 'Palette' ] = get_int ( config , 'Paletter' , ROOT.kDarkBodyRadiator )
conf [ 'NumberContours' ] = get_int ( config , 'NumberContours' , 255 )
conf [ 'LineStyleString_2' ] = "12 12"
conf [ 'LineStyleString_11' ] = "76 24"
conf [ 'LineStyleString_12' ] = "60 16 8 16"
conf [ 'LineStyleString_13' ] = "168 32"
conf [ 'LineStyleString_14' ] = "32 32"
## create the style
style = ROOT.TStyle ( name , description )
set_style ( style , conf )
logger.debug ('Create Ostap style %s' % style.GetName() )
if name in StyleStore.styles() :
logger.info ( "The configuration %s replaced" % name )
StyleStore.styles().update ( { name : style } )
if name.startswith('Style') :
nname = name[5:]
if nname in StyleStore.styles() :
logger.info ( "The configuration %s replaced" % nname )
StyleStore.styles().update ( { nname : style } )
return style
# =============================================================================
## read the configuration files and create the styles
make_styles ()
# =============================================================================
_decorated_classes_ = (
ROOT.TStyle ,
)
_new_methods_ = (
#
ROOT.TStyle.dump ,
ROOT.TStyle.get ,
ROOT.TStyle.set ,
)
# =============================================================================
if '__main__' == __name__ :
from ostap.utils.docme import docme
docme ( __name__ , logger = logger )
# =============================================================================
# The END
# =============================================================================
| StarcoderdataPython |
3286064 | <reponame>Violet26/usaspending-api<filename>usaspending_api/etl/transaction_loaders/fpds_loader.py
import logging
from psycopg2.extras import DictCursor
from psycopg2 import Error
from django.db import connections, connection
from usaspending_api.etl.transaction_loaders.field_mappings_fpds import (
transaction_fpds_nonboolean_columns,
transaction_normalized_nonboolean_columns,
transaction_normalized_functions,
award_nonboolean_columns,
award_functions,
transaction_fpds_boolean_columns,
transaction_fpds_functions,
all_broker_columns,
)
from usaspending_api.etl.transaction_loaders.data_load_helpers import (
capitalize_if_string,
false_if_null,
get_deleted_fpds_data_from_s3,
)
from usaspending_api.etl.transaction_loaders.generic_loaders import (
update_transaction_fpds,
update_transaction_normalized,
insert_transaction_normalized,
insert_transaction_fpds,
insert_award,
)
from usaspending_api.common.helpers.timing_helpers import Timer
logger = logging.getLogger("console")
failed_ids = []
def delete_stale_fpds(date):
"""
Removed transaction_fpds and transaction_normalized records matching any of the
provided detached_award_procurement_id list
Returns list of awards touched
"""
detached_award_procurement_ids = get_deleted_fpds_data_from_s3(date)
if not detached_award_procurement_ids:
return []
ids_to_delete = ",".join([str(id) for id in detached_award_procurement_ids])
logger.debug(f"Obtained these delete record IDs: [{ids_to_delete}]")
with connection.cursor() as cursor:
cursor.execute(
f"select transaction_id from transaction_fpds where detached_award_procurement_id in ({ids_to_delete})"
)
# assumes that this won't be too many IDs and lead to degraded performance or require too much memory
transaction_normalized_ids = [str(row[0]) for row in cursor.fetchall()]
if not transaction_normalized_ids:
return []
txn_id_str = ",".join(transaction_normalized_ids)
cursor.execute(f"select distinct award_id from transaction_normalized where id in ({txn_id_str})")
awards_touched = cursor.fetchall()
# Set backreferences from Awards to Transaction Normalized to null. These FKs will be updated later
cursor.execute(
"update awards set latest_transaction_id = null, earliest_transaction_id = null "
"where latest_transaction_id in ({ids}) or earliest_transaction_id in ({ids}) "
"returning id".format(ids=txn_id_str)
)
deleted_awards = cursor.fetchall()
logger.info(f"{len(deleted_awards)} awards were unlinked from transactions due to pending deletes")
cursor.execute(f"delete from transaction_fpds where transaction_id in ({txn_id_str}) returning transaction_id")
deleted_fpds = set(cursor.fetchall())
cursor.execute(f"delete from transaction_normalized where id in ({txn_id_str}) returning id")
deleted_transactions = set(cursor.fetchall())
if deleted_transactions != deleted_fpds:
msg = "Delete Mismatch! Counts of transaction_normalized ({}) and transaction_fpds ({}) deletes"
raise RuntimeError(msg.format(len(deleted_transactions), len(deleted_fpds)))
return awards_touched
def load_fpds_transactions(chunk):
"""
Run transaction load for the provided ids. This will create any new rows in other tables to support the transaction
data, but does NOT update "secondary" award values like total obligations or C -> D linkages. If transactions are
being reloaded, this will also leave behind rows in supporting tables that won't be removed unless destroy_orphans
is called.
returns ids for each award touched
"""
with Timer() as timer:
retval = []
if chunk:
broker_transactions = _extract_broker_objects(chunk)
if broker_transactions:
load_objects = _transform_objects(broker_transactions)
retval = _load_transactions(load_objects)
logger.info("batch completed in {}".format(timer.as_string(timer.elapsed)))
return retval
def _extract_broker_objects(id_list):
broker_conn = connections["data_broker"]
broker_conn.ensure_connection()
with broker_conn.connection.cursor(cursor_factory=DictCursor) as cursor:
sql = "SELECT {} from detached_award_procurement where detached_award_procurement_id in %s".format(
",".join(all_broker_columns())
)
cursor.execute(sql, (tuple(id_list),))
results = cursor.fetchall()
return results
def _create_load_object(broker_object, non_boolean_column_map, boolean_column_map, function_map):
retval = {}
if non_boolean_column_map:
retval.update(
{non_boolean_column_map[key]: capitalize_if_string(broker_object[key]) for key in non_boolean_column_map}
)
if boolean_column_map:
retval.update({boolean_column_map[key]: false_if_null(broker_object[key]) for key in boolean_column_map})
if function_map:
retval.update({key: func(broker_object) for key, func in function_map.items()})
return retval
def _transform_objects(broker_objects):
retval = []
for broker_object in broker_objects:
connected_objects = {
# award. NOT used if a matching award is found later
"award": _create_load_object(broker_object, award_nonboolean_columns, None, award_functions),
"transaction_normalized": _create_load_object(
broker_object, transaction_normalized_nonboolean_columns, None, transaction_normalized_functions
),
"transaction_fpds": _create_load_object(
broker_object,
transaction_fpds_nonboolean_columns,
transaction_fpds_boolean_columns,
transaction_fpds_functions,
),
}
retval.append(connected_objects)
return retval
def _load_transactions(load_objects):
"""returns ids for each award touched"""
ids_of_awards_created_or_updated = set()
connection.ensure_connection()
with connection.connection.cursor(cursor_factory=DictCursor) as cursor:
# Handle transaction-to-award relationship for each transaction to be loaded
for load_object in load_objects:
try:
# AWARD GET OR CREATE
award_id = _matching_award(cursor, load_object)
if not award_id:
# If there is no award, we need to create one
award_id = insert_award(cursor, load_object)
load_object["transaction_normalized"]["award_id"] = award_id
ids_of_awards_created_or_updated.add(award_id)
# TRANSACTION UPSERT
transaction_id = _lookup_existing_transaction(cursor, load_object)
if transaction_id:
# Inject the Primary Key of transaction_normalized+transaction_fpds that was found, so that the
# following updates can find it to update
load_object["transaction_fpds"]["transaction_id"] = transaction_id
_update_fpds_transaction(cursor, load_object, transaction_id)
else:
# If there is no transaction we create a new one.
transaction_id = _insert_fpds_transaction(cursor, load_object)
load_object["transaction_fpds"]["transaction_id"] = transaction_id
load_object["award"]["latest_transaction_id"] = transaction_id
except Error as e:
logger.error(
f"load failed for Broker ids {load_object['transaction_fpds']['detached_award_procurement_id']}!"
f"\nDetails: {e.pgerror}"
)
failed_ids.append(load_object["transaction_fpds"]["detached_award_procurement_id"])
return list(ids_of_awards_created_or_updated)
def _matching_award(cursor, load_object):
""" Try to find an award for this transaction to belong to by unique_award_key"""
find_matching_award_sql = "select id from awards where generated_unique_award_id = '{}'".format(
load_object["transaction_fpds"]["unique_award_key"]
)
cursor.execute(find_matching_award_sql)
results = cursor.fetchall()
return results[0][0] if results else None
def _lookup_existing_transaction(cursor, load_object):
"""find existing fpds transaction, if any"""
find_matching_transaction_sql = (
"select transaction_id from transaction_fpds "
"where detached_award_proc_unique = '{}'".format(load_object["transaction_fpds"]["detached_award_proc_unique"])
)
cursor.execute(find_matching_transaction_sql)
results = cursor.fetchall()
return results[0][0] if results else None
def _update_fpds_transaction(cursor, load_object, transaction_id):
update_transaction_fpds(cursor, load_object)
update_transaction_normalized(cursor, load_object)
logger.debug("updated fpds transaction {}".format(transaction_id))
def _insert_fpds_transaction(cursor, load_object):
# transaction_normalized and transaction_fpds should be one-to-one
transaction_normalized_id = insert_transaction_normalized(cursor, load_object)
# Inject the Primary Key of transaction_normalized row that this record is mapped to in the one-to-one relationship
load_object["transaction_fpds"]["transaction_id"] = transaction_normalized_id
transaction_fpds_id = insert_transaction_fpds(cursor, load_object)
logger.debug("created fpds transaction {}".format(transaction_fpds_id))
return transaction_fpds_id
| StarcoderdataPython |
4813223 | <reponame>d-wortmann/judft_tutorials
from aiida.orm import Dict, load_node
from aiida.engine import submit
from aiida import load_profile
# import the FleurinpgenCalculation
# load ingpen Code
# create a StuctureData
structures = [Fe_structrure, Ni_structrure, Co_structrure]
# create a parameters Dict
# options
# assemble inputs in a single dictionary
# submit
for structure in structures:
#DO NOT FORGET TO PRING OUT THE PK OF EACH SUBMISSION
| StarcoderdataPython |
3289940 | import asyncio
from ray import workflow
from ray.tests.conftest import * # noqa
from ray.workflow import workflow_storage
from ray.workflow.storage import get_global_storage
import pytest
def get_metadata(paths, is_json=True):
store = get_global_storage()
key = store.make_key(*paths)
return asyncio.get_event_loop().run_until_complete(store.get(key, is_json))
def test_step_user_metadata(workflow_start_regular):
metadata = {"k1": "v1"}
step_name = "simple_step"
workflow_id = "simple"
@workflow.step(name=step_name, metadata=metadata)
def simple():
return 0
simple.step().run(workflow_id)
checkpointed_metadata = get_metadata(
[workflow_id, "steps", step_name, workflow_storage.STEP_USER_METADATA])
assert metadata == checkpointed_metadata
def test_step_runtime_metadata(workflow_start_regular):
step_name = "simple_step"
workflow_id = "simple"
@workflow.step(name=step_name)
def simple():
return 0
simple.step().run(workflow_id)
prerun_meta = get_metadata([
workflow_id, "steps", step_name, workflow_storage.STEP_PRERUN_METADATA
])
postrun_meta = get_metadata([
workflow_id, "steps", step_name, workflow_storage.STEP_POSTRUN_METADATA
])
assert "start_time" in prerun_meta
assert "end_time" in postrun_meta
def test_workflow_user_metadata(workflow_start_regular):
metadata = {"k1": "v1"}
workflow_id = "simple"
@workflow.step
def simple():
return 0
simple.step().run(workflow_id, metadata=metadata)
checkpointed_metadata = get_metadata(
[workflow_id, workflow_storage.WORKFLOW_USER_METADATA])
assert metadata == checkpointed_metadata
def test_workflow_runtime_metadata(workflow_start_regular):
workflow_id = "simple"
@workflow.step
def simple():
return 0
simple.step().run(workflow_id)
prerun_meta = get_metadata(
[workflow_id, workflow_storage.WORKFLOW_PRERUN_METADATA])
postrun_meta = get_metadata(
[workflow_id, workflow_storage.WORKFLOW_POSTRUN_METADATA])
assert "start_time" in prerun_meta
assert "end_time" in postrun_meta
def test_no_user_metadata(workflow_start_regular):
workflow_id = "simple"
step_name = "simple_step"
@workflow.step(name=step_name)
def simple():
return 0
simple.step().run(workflow_id)
checkpointed_user_step_metadata = get_metadata(
[workflow_id, "steps", step_name, workflow_storage.STEP_USER_METADATA])
checkpointed_user_run_metadata = get_metadata(
[workflow_id, workflow_storage.WORKFLOW_USER_METADATA])
assert {} == checkpointed_user_step_metadata
assert {} == checkpointed_user_run_metadata
def test_all_metadata(workflow_start_regular):
user_step_metadata = {"k1": "v1"}
user_run_metadata = {"k2": "v2"}
step_name = "simple_step"
workflow_id = "simple"
@workflow.step
def simple():
return 0
simple.options(
name=step_name, metadata=user_step_metadata).step().run(
workflow_id, metadata=user_run_metadata)
checkpointed_user_step_metadata = get_metadata(
[workflow_id, "steps", step_name, workflow_storage.STEP_USER_METADATA])
checkpointed_user_run_metadata = get_metadata(
[workflow_id, workflow_storage.WORKFLOW_USER_METADATA])
checkpointed_pre_step_meta = get_metadata([
workflow_id, "steps", step_name, workflow_storage.STEP_PRERUN_METADATA
])
checkpointed_post_step_meta = get_metadata([
workflow_id, "steps", step_name, workflow_storage.STEP_POSTRUN_METADATA
])
checkpointed_pre_run_meta = get_metadata(
[workflow_id, workflow_storage.WORKFLOW_PRERUN_METADATA])
checkpointed_post_run_meta = get_metadata(
[workflow_id, workflow_storage.WORKFLOW_POSTRUN_METADATA])
assert user_step_metadata == checkpointed_user_step_metadata
assert user_run_metadata == checkpointed_user_run_metadata
assert "start_time" in checkpointed_pre_step_meta
assert "start_time" in checkpointed_pre_run_meta
assert "end_time" in checkpointed_post_step_meta
assert "end_time" in checkpointed_post_run_meta
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| StarcoderdataPython |
3362642 | <gh_stars>1-10
acl_rule_ip = """
<config>
<{{address_type}}-acl
xmlns="urn:brocade.com:mgmt:brocade-{{address_type}}-access-list">
<{{address_type}}>
<access-list>
<{{acl_type}}>
<name>{{acl_name}}</name>
{% if address_type == "ip" %}
{% if acl_type == "extended" %}
<hide-{{address_type}}-acl-ext>
{% else %}
<hide-{{address_type}}-acl-std>
{% endif %}
{% endif %}
<seq>
<seq-id>{{seq_id}}</seq-id>
<action>{{action}}</action>
<src-host-any-sip>{{source.host_any}}</src-host-any-sip>
{% if source.host_any != "any" %}
{% if source.host_any == "host" %}
<src-host-ip>{{source.host_ip}}</src-host-ip>
{% elif source.mask is not none %}
<src-mask>{{source.mask}}</src-mask>
{% endif %}
{% endif %}
{% if acl_type == "extended" %}
<protocol-type>{{protocol_type}}</protocol-type>
{% if source.xport is not none %}
<sport>{{source.xport.op}}</sport>
{% if source.xport.op == "eq" or source.xport.op == "neq" %}
<sport-number-eq-neq-{{protocol_type}}>
{{source.xport.val[0]}}
</sport-number-eq-neq-{{protocol_type}}>
{% elif source.xport.op == "range" %}
<sport-number-range-lower-{{protocol_type}}>
{{source.xport.val[0]}}
</sport-number-range-lower-{{protocol_type}}>
<sport-number-range-higher-{{protocol_type}}>
{{source.xport.val[1]}}
</sport-number-range-higher-{{protocol_type}}>
{% else %}
<sport-number-{{source.xport.op}}-{{protocol_type}}>
{{source.xport.val[0]}}
</sport-number-{{source.xport.op}}-{{protocol_type}}>
{% endif %}
{% endif %}
<dst-host-any-dip>{{destination.host_any}}</dst-host-any-dip>
{% if destination.host_any != "any" %}
{% if destination.host_any == "host" %}
<dst-host-ip>{{destination.host_ip}}</dst-host-ip>
{% elif destination.mask is not none %}
<dst-mask>{{destination.mask}}</dst-mask>
{% endif %}
{% endif %}
{% if destination.xport is not none %}
<dport>{{destination.xport.op}}</dport>
{% if destination.xport.op == "eq" or
destination.xport.op == "neq" %}
<dport-number-eq-neq-{{protocol_type}}>
{{destination.xport.val[0]}}
</dport-number-eq-neq-{{protocol_type}}>
{% elif destination.xport.op == "range" %}
<dport-number-range-lower-{{protocol_type}}>
{{destination.xport.val[0]}}
</dport-number-range-lower-{{protocol_type}}>
<dport-number-range-higher-{{protocol_type}}>
{{destination.xport.val[1]}}
</dport-number-range-higher-{{protocol_type}}>
{% else %}
<dport-number-{{destination.xport.op}}-{{protocol_type}}>
{{destination.xport.val[0]}}
</dport-number-{{destination.xport.op}}-{{protocol_type}}>
{% endif %}
{% endif %}
{% if dscp is not none %} <dscp>{{dscp}}</dscp> {% endif %}
{% if dscp_force is not none %}
<dscp-force>{{dscp_force}}</dscp-force>
{% endif %}
{% if drop_precedence_force is not none %}
<drop-precedence-force>
{{drop_precedence_force}}
</drop-precedence-force>
{% endif %}
{% if vlan_id is not none %}
<vlan>{{vlan_id}}</vlan>
{% endif %}
{% if urg is not none %} <urg></urg> {% endif %}
{% if ack is not none %} <ack></ack> {% endif %}
{% if push is not none %} <push></push> {% endif %}
{% if fin is not none %} <fin></fin> {% endif %}
{% if rst is not none %} <rst></rst> {% endif %}
{% if sync is not none %} <sync></sync> {% endif %}
{% if mirror is not none %} <mirror></mirror> {% endif %}
{% endif %}
{% if count is not none %} <count></count> {% endif %}
{% if log is not none %}<log></log> {% endif %}
{% if copy_sflow is not none %}
<copy-sflow></copy-sflow>
{% endif %}
</seq>
{% if address_type == "ip" %}
{% if acl_type == "extended" %}
</hide-{{address_type}}-acl-ext>
{% else %}
</hide-{{address_type}}-acl-std>
{% endif %}
{% endif %}
</{{acl_type}}>
</access-list>
</{{address_type}}>
</{{address_type}}-acl>
</config>
"""
acl_rule_mac = """
<config>
<mac xmlns="urn:brocade.com:mgmt:brocade-mac-access-list">
<access-list>
<{{acl_type}}>
<name>{{acl_name}}</name>
{% if acl_type == "extended" %}
<hide-mac-acl-ext>
{% else %}
<hide-mac-acl-std>
{% endif %}
<seq>
<seq-id>{{seq_id}}</seq-id>
<action>{{action}}</action>
<source>{{source.source}}</source>
{% if source.source != "any" %}
{% if source.source == "host" %}
<srchost>{{source.srchost}}</srchost>
{% else %}
<src-mac-addr-mask>{{source.mask}}</src-mac-addr-mask>
{% endif %}
{% endif %}
{% if acl_type == "extended" %}
<dst>{{dst.dst}}</dst>
{% if dst.dst != "any" %}
{% if dst.dst == "host" %}
<dsthost>{{dst.dsthost}}</dsthost>
{% else %}
<dst-mac-addr-mask>{{dst.mask}}</dst-mac-addr-mask>
{% endif %}
{% endif %}
{% if ethertype is not none %}
<ethertype>{{ethertype}}</ethertype>
{% endif %}
{% if vlan_tag_format is not none %}
<vlan-tag-format>{{vlan_tag_format}}</vlan-tag-format>
{% endif %}
{% if vlan is not none %}
{% if vlan_tag_format is none %}
<vlan>{{vlan.vlan_id}}</vlan>
{% elif vlan_tag_format == "untagged" %}
<vlan>{{vlan.vlan_id}}</vlan>
{% if vlan.mask is not none %}
<vlan-id-mask>{{vlan.mask}}</vlan-id-mask>
{% endif %}
{% elif vlan_tag_format == "single-tagged" %}
<vlan>{{vlan.vlan_id}}</vlan>
{% if vlan.mask is not none %}
<vlan-id-mask>{{vlan.mask}}</vlan-id-mask>
{% endif %}
{% elif vlan_tag_format == "double-tagged" %}
<outer-vlan>{{vlan.outervlan}}</outer-vlan>
{% if vlan.outermask is not none %}
<outer-vlan-id-mask>{{vlan.outermask}}</outer-vlan-id-mask>
{% endif %}
<inner-vlan>{{vlan.innervlan}}</inner-vlan>
{% if vlan.innermask is not none %}
<inner-vlan-id-mask>{{vlan.innermask}}</inner-vlan-id-mask>
{% endif %}
{% endif %}
{% endif %}
{% if arp_guard is not none %}
<arp-guard></arp-guard>
{% endif %}
{% if pcp is not none %} <pcp>{{pcp}}</pcp> {% endif %}
{% if pcp_force is not none %}
<pcp-force>{{pcp_force}}</pcp-force>
{% endif %}
{% if drop_precedence_force is not none %}
<drop-precedence-force>
{{drop_precedence_force}}
</drop-precedence-force>
{% endif %}
{% if mirror is not none %} <mirror></mirror> {% endif %}
{% endif %}
{% if count is not none %} <count></count> {% endif %}
{% if log is not none %}<log></log> {% endif %}
{% if copy_sflow is not none %}
<copy-sflow></copy-sflow>
{% endif %}
</seq>
{% if acl_type == "extended" %}
</hide-mac-acl-ext>
{% else %}
</hide-mac-acl-std>
{% endif %}
</{{acl_type}}>
</access-list>
</mac>
</config>
"""
acl_apply_mac = """
<interface xmlns="urn:brocade.com:mgmt:brocade-interface">
<{intf_type}>
<name>{intf}</name>
<mac xmlns="urn:brocade.com:mgmt:brocade-mac-access-list">
<access-group {delete}>
<mac-access-list>{acl_name}</mac-access-list>
<mac-direction>{acl_direction}</mac-direction>
{traffic_type}
</access-group>
</mac>
</{intf_type}>
</interface>
"""
acl_apply_ipv4 = """
<interface xmlns="urn:brocade.com:mgmt:brocade-interface">
<{intf_type}>
<name>{intf}</name>
<ip-acl-interface xmlns="urn:brocade.com:mgmt:brocade-ip-access-list">
<ip>
<access-group {delete}>
<ip-access-list>{acl_name}</ip-access-list>
<ip-direction>{acl_direction}</ip-direction>
{traffic_type}
</access-group>
</ip>
</ip-acl-interface>
</{intf_type}>
</interface>
"""
acl_apply_ipv6 = """
<interface xmlns="urn:brocade.com:mgmt:brocade-interface">
<{intf_type}>
<name>{intf}</name>
<ipv6>
<access-group xmlns="urn:brocade.com:mgmt:brocade-ipv6-access-list" {delete}>
<ipv6-access-list>{acl_name}</ipv6-access-list>
<ip-direction>{acl_direction}</ip-direction>
{traffic_type}
</access-group>
</ipv6>
</{intf_type}>
</interface>
"""
rbridge_acl_apply = """
<config>
<rbridge-id xmlns="urn:brocade.com:mgmt:brocade-rbridge">
<rbridge-id>{rbridge_id}</rbridge-id>
{acl_apply}
</rbridge-id>
</config>
"""
acl_rule_ip_bulk = """
<config>
<{{address_type}}-acl
xmlns="urn:brocade.com:mgmt:brocade-{{address_type}}-access-list">
<{{address_type}}>
<access-list>
<{{acl_type}}>
<name>{{acl_name}}</name>
{% if address_type == "ip" %}
{% if acl_type == "extended" %}
<hide-{{address_type}}-acl-ext>
{% else %}
<hide-{{address_type}}-acl-std>
{% endif %}
{% endif %}
{% for ud in user_data_list %}
<seq>
<seq-id>{{ud.seq_id}}</seq-id>
<action>{{ud.action}}</action>
<src-host-any-sip>{{ud.source.host_any}}</src-host-any-sip>
{% if ud.source.host_any != "any" %}
{% if ud.source.host_any == "host" %}
<src-host-ip>{{ud.source.host_ip}}</src-host-ip>
{% elif ud.source.mask is not none %}
<src-mask>{{ud.source.mask}}</src-mask>
{% endif %}
{% endif %}
{% if acl_type == "extended" %}
<protocol-type>{{ud.protocol_type}}</protocol-type>
{% if ud.source.xport is not none %}
<sport>{{ud.source.xport.op}}</sport>
{% if ud.source.xport.op == "eq" or ud.source.xport.op == "neq" %}
<sport-number-eq-neq-{{ud.protocol_type}}>
{{ud.source.xport.val[0]}}
</sport-number-eq-neq-{{ud.protocol_type}}>
{% elif ud.source.xport.op == "range" %}
<sport-number-range-lower-{{ud.protocol_type}}>
{{ud.source.xport.val[0]}}
</sport-number-range-lower-{{ud.protocol_type}}>
<sport-number-range-higher-{{ud.protocol_type}}>
{{ud.source.xport.val[1]}}
</sport-number-range-higher-{{ud.protocol_type}}>
{% else %}
<sport-number-{{ud.source.xport.op}}-{{ud.protocol_type}}>
{{ud.source.xport.val[0]}}
</sport-number-{{ud.source.xport.op}}-{{ud.protocol_type}}>
{% endif %}
{% endif %}
<dst-host-any-dip>{{ud.destination.host_any}}</dst-host-any-dip>
{% if ud.destination.host_any != "any" %}
{% if ud.destination.host_any == "host" %}
<dst-host-ip>{{ud.destination.host_ip}}</dst-host-ip>
{% elif ud.destination.mask is not none %}
<dst-mask>{{ud.destination.mask}}</dst-mask>
{% endif %}
{% endif %}
{% if ud.destination.xport is not none %}
<dport>{{ud.destination.xport.op}}</dport>
{% if ud.destination.xport.op == "eq" or
ud.destination.xport.op == "neq" %}
<dport-number-eq-neq-{{ud.protocol_type}}>
{{ud.destination.xport.val[0]}}
</dport-number-eq-neq-{{ud.protocol_type}}>
{% elif ud.destination.xport.op == "range" %}
<dport-number-range-lower-{{ud.protocol_type}}>
{{ud.destination.xport.val[0]}}
</dport-number-range-lower-{{ud.protocol_type}}>
<dport-number-range-higher-{{ud.protocol_type}}>
{{ud.destination.xport.val[1]}}
</dport-number-range-higher-{{ud.protocol_type}}>
{% else %}
<dport-number-{{ud.destination.xport.op}}-{{ud.protocol_type}}>
{{ud.destination.xport.val[0]}}
</dport-number-{{ud.destination.xport.op}}-{{ud.protocol_type}}>
{% endif %}
{% endif %}
{% if ud.dscp is not none %} <dscp>{{ud.dscp}}</dscp> {% endif %}
{% if ud.drop_precedence_force is not none %}
<drop-precedence-force>
{{ud.drop_precedence_force}}
</drop-precedence-force>
{% endif %}
{% if ud.vlan_id is not none %}
<vlan>{{ud.vlan_id}}</vlan>
{% endif %}
{% if ud.urg is not none %} <urg></urg> {% endif %}
{% if ud.ack is not none %} <ack></ack> {% endif %}
{% if ud.push is not none %} <push></push> {% endif %}
{% if ud.fin is not none %} <fin></fin> {% endif %}
{% if ud.rst is not none %} <rst></rst> {% endif %}
{% if ud.sync is not none %} <sync></sync> {% endif %}
{% if ud.mirror is not none %} <mirror></mirror> {% endif %}
{% endif %}
{% if ud.count is not none %} <count></count> {% endif %}
{% if ud.log is not none %}<log></log> {% endif %}
{% if ud.copy_sflow is not none %}
<copy-sflow></copy-sflow>
{% endif %}
</seq>
{% endfor %}
{% if address_type == "ip" %}
{% if acl_type == "extended" %}
</hide-{{address_type}}-acl-ext>
{% else %}
</hide-{{address_type}}-acl-std>
{% endif %}
{% endif %}
</{{acl_type}}>
</access-list>
</{{address_type}}>
</{{address_type}}-acl>
</config>
"""
acl_apply = """
<config>
{% if intf_type == 've' %}
<routing-system xmlns="urn:brocade.com:mgmt:brocade-common-def">
<interface xmlns="urn:brocade.com:mgmt:brocade-interface">
{% elif intf_type == 'vlan' %}
<interface-vlan xmlns="urn:brocade.com:mgmt:brocade-interface">
{% else %}
<interface xmlns="urn:brocade.com:mgmt:brocade-interface">
{% endif %}
<{{intf_type}}>
<name>{{intf}}</name>
{% if intf_type == 'management' %}
<{{address_type}}>
<access-group
xmlns="urn:brocade.com:mgmt:brocade-{{address_type}}-access-list">
<mgmt-{{address_type}}-access-list>{{acl_name}}</mgmt-{{address_type}}-access-list>
<mgmt-ip-direction>{{acl_direction}}</mgmt-ip-direction>
</access-group>
</{{address_type}}>
{% elif address_type == 'mac' %}
<mac xmlns="urn:brocade.com:mgmt:brocade-mac-access-list">
<access-group>
<mac-access-list>{{acl_name}}</mac-access-list>
<mac-direction>{{acl_direction}}</mac-direction>
{% if traffic_type is not none %}
<traffic-type>{{traffic_type}}</traffic-type>
{% endif %}
</access-group>
</mac>
{% elif address_type == 'ip' %}
<ip-acl-interface
xmlns="urn:brocade.com:mgmt:brocade-ip-access-list">
<ip>
<access-group>
<ip-access-list>{{acl_name}}</ip-access-list>
<ip-direction>{{acl_direction}}</ip-direction>
{% if traffic_type is not none %}
<traffic-type>{{traffic_type}}</traffic-type>
{% endif %}
</access-group>
</ip>
</ip-acl-interface>
{% elif address_type == 'ipv6' %}
{% if intf_type == 've' %}
<ipv6 xmlns="urn:brocade.com:mgmt:brocade-ipv6-config">
{% else %}
<ipv6>
{% endif %}
<access-group
xmlns="urn:brocade.com:mgmt:brocade-ipv6-access-list">
<ipv6-access-list>{{acl_name}}</ipv6-access-list>
<ip-direction>{{acl_direction}}</ip-direction>
{% if traffic_type is not none %}
<traffic-type>{{traffic_type}}</traffic-type>
{% endif %}
</access-group>
</ipv6>
{% endif %}
</{{intf_type}}>
{% if intf_type == 've' %}
</interface>
</routing-system>
{% elif intf_type == 'vlan' %}
</interface-vlan>
{% else %}
</interface>
{% endif %}
</config>
"""
acl_remove = """
<config>
{% if intf_type == 've' %}
<routing-system xmlns="urn:brocade.com:mgmt:brocade-common-def">
<interface xmlns="urn:brocade.com:mgmt:brocade-interface">
{% elif intf_type == 'vlan' %}
<interface-vlan xmlns="urn:brocade.com:mgmt:brocade-interface">
{% else %}
<interface xmlns="urn:brocade.com:mgmt:brocade-interface">
{% endif %}
<{{intf_type}}>
<name>{{intf}}</name>
{% if intf_type == 'management' %}
<{{address_type}}>
<access-group
xmlns="urn:brocade.com:mgmt:brocade-{{address_type}}-access-list"
operation="delete">
<mgmt-{{address_type}}-access-list>{{acl_name}}</mgmt-{{address_type}}-access-list>
<mgmt-ip-direction>{{acl_direction}}</mgmt-ip-direction>
</access-group>
</{{address_type}}>
{% elif address_type == 'mac' %}
<mac xmlns="urn:brocade.com:mgmt:brocade-mac-access-list">
<access-group operation="delete">
<mac-access-list>{{acl_name}}</mac-access-list>
<mac-direction>{{acl_direction}}</mac-direction>
{% if traffic_type is not none %}
<traffic-type>{{traffic_type}}</traffic-type>
{% endif %}
</access-group>
</mac>
{% elif address_type == 'ip' %}
<ip-acl-interface
xmlns="urn:brocade.com:mgmt:brocade-ip-access-list">
<ip>
<access-group operation="delete">
<ip-access-list>{{acl_name}}</ip-access-list>
<ip-direction>{{acl_direction}}</ip-direction>
{% if traffic_type is not none %}
<traffic-type>{{traffic_type}}</traffic-type>
{% endif %}
</access-group>
</ip>
</ip-acl-interface>
{% elif address_type == 'ipv6' %}
{% if intf_type == 've' %}
<ipv6 xmlns="urn:brocade.com:mgmt:brocade-ipv6-config">
{% else %}
<ipv6>
{% endif %}
<access-group
xmlns="urn:brocade.com:mgmt:brocade-ipv6-access-list"
operation="delete">
<ipv6-access-list>{{acl_name}}</ipv6-access-list>
<ip-direction>{{acl_direction}}</ip-direction>
{% if traffic_type is not none %}
<traffic-type>{{traffic_type}}</traffic-type>
{% endif %}
</access-group>
</ipv6>
{% endif %}
</{{intf_type}}>
{% if intf_type == 've' %}
</interface>
</routing-system>
{% elif intf_type == 'vlan' %}
</interface-vlan>
{% else %}
</interface>
{% endif %}
</config>
"""
get_interface_by_name = """
<get-config>
<source> <running/> </source>
{% if intf_type == "ve" %}
<nc:filter type="xpath" select="/routing-system/interface/\
{{intf_type}}/name[text()=\'{{intf}}\']"></nc:filter>
{% elif intf_type == "vlan" %}
<nc:filter type="xpath" select="/interface-vlan/\
{{intf_type}}/name[text()=\'{{intf}}\']"></nc:filter>
{% else %}
<nc:filter type="xpath" select="/interface/{{intf_type}}/\
name[text()=\'{{intf}}\']"></nc:filter>
{% endif %}
</get-config>
"""
acl_rule_mac_bulk = """
<config>
<mac xmlns="urn:brocade.com:mgmt:brocade-mac-access-list">
<access-list>
<{{acl_type}}>
<name>{{acl_name}}</name>
{% if acl_type == "extended" %}
<hide-mac-acl-ext>
{% else %}
<hide-mac-acl-std>
{% endif %}
{% for ud in user_data_list %}
<seq>
<seq-id>{{ud.seq_id}}</seq-id>
<action>{{ud.action}}</action>
<source>{{ud.source.source}}</source>
{% if ud.source.source != "any" %}
{% if ud.source.source == "host" %}
<srchost>{{ud.source.srchost}}</srchost>
{% else %}
<src-mac-addr-mask>{{ud.source.mask}}</src-mac-addr-mask>
{% endif %}
{% endif %}
{% if acl_type == "extended" %}
<dst>{{ud.dst.dst}}</dst>
{% if ud.dst.dst != "any" %}
{% if ud.dst.dst == "host" %}
<dsthost>{{ud.dst.dsthost}}</dsthost>
{% else %}
<dst-mac-addr-mask>{{ud.dst.mask}}</dst-mac-addr-mask>
{% endif %}
{% endif %}
{% if ud.ethertype is not none %}
<ethertype>{{ud.ethertype}}</ethertype>
{% endif %}
{% if ud.vlan_tag_format is not none %}
<vlan-tag-format>{{ud.vlan_tag_format}}</vlan-tag-format>
{% endif %}
{% if ud.vlan is not none %}
{% if ud.vlan_tag_format is none %}
<vlan>{{ud.vlan.vlan_id}}</vlan>
{% elif ud.vlan_tag_format == "untagged" %}
<vlan>{{ud.vlan.vlan_id}}</vlan>
{% elif ud.vlan_tag_format == "single-tagged" %}
<vlan>{{ud.vlan.vlan_id}}</vlan>
{% if ud.vlan.mask is not none %}
<vlan-id-mask>{{ud.vlan.mask}}</vlan-id-mask>
{% endif %}
{% elif ud.vlan_tag_format == "double-tagged" %}
<outer-vlan>{{ud.vlan.outervlan}}</outer-vlan>
{% if ud.vlan.outermask is not none %}
<outer-vlan-id-mask>{{ud.vlan.outermask}}</outer-vlan-id-mask>
{% endif %}
<inner-vlan>{{ud.vlan.innervlan}}</inner-vlan>
{% if ud.vlan.innermask is not none %}
<inner-vlan-id-mask>{{ud.vlan.innermask}}</inner-vlan-id-mask>
{% endif %}
{% endif %}
{% endif %}
{% if ud.arp_guard is not none %}
<arp-guard></arp-guard>
{% endif %}
{% if ud.pcp is not none %} <pcp>{{ud.pcp}}</pcp> {% endif %}
{% if ud.pcp_force is not none %}
<pcp-force>{{ud.pcp_force}}</pcp-force>
{% endif %}
{% if ud.drop_precedence_force is not none %}
<drop-precedence-force>
{{ud.drop_precedence_force}}
</drop-precedence-force>
{% endif %}
{% if ud.mirror is not none %} <mirror></mirror> {% endif %}
{% endif %}
{% if ud.count is not none %} <count></count> {% endif %}
{% if ud.log is not none %}<log></log> {% endif %}
{% if ud.copy_sflow is not none %}
<copy-sflow></copy-sflow>
{% endif %}
</seq>
{% endfor %}
{% if acl_type == "extended" %}
</hide-mac-acl-ext>
{% else %}
</hide-mac-acl-std>
{% endif %}
</{{acl_type}}>
</access-list>
</mac>
</config>
"""
| StarcoderdataPython |
90628 | # AUTOGENERATED! DO NOT EDIT! File to edit: 04_device.ipynb (unless otherwise specified).
__all__ = ['versions']
# Cell
def versions():
"Checks if GPU enabled and if so displays device details with cuda, pytorch, fastai versions"
print("GPU: ", torch.cuda.is_available())
if torch.cuda.is_available() == True:
print("Device = ", torch.device(torch.cuda.current_device()))
print("Cuda version - ", torch.version.cuda)
print("cuDNN version - ", torch.backends.cudnn.version())
print("PyTorch version - ", torch.__version__)
print("fastai version", fastai.__version__) | StarcoderdataPython |
105434 | """
Implementation of the Deep Embedded Self-Organizing Map model
SOM layer
@author <NAME>
@version 1.0
"""
import tensorflow as tf
from tensorflow import keras # using Tensorflow's Keras API
from keras.engine.topology import Layer, InputSpec
class SOMLayer(Layer):
"""
Self-Organizing Map layer class with rectangular topology
# Example
```
model.add(SOMLayer(map_size=(10,10)))
```
# Arguments
map_size: Tuple representing the size of the rectangular map. Number of prototypes is map_size[0]*map_size[1].
prototypes: Numpy array with shape `(n_prototypes, latent_dim)` witch represents the initial cluster centers
# Input shape
2D tensor with shape: `(n_samples, latent_dim)`
# Output shape
2D tensor with shape: `(n_samples, n_prototypes)`
"""
def __init__(self, map_size, prototypes=None, **kwargs):
if 'input_shape' not in kwargs and 'latent_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('latent_dim'),)
super(SOMLayer, self).__init__(**kwargs)
self.map_size = map_size
self.n_prototypes = map_size[0]*map_size[1]
self.initial_prototypes = prototypes
self.input_spec = InputSpec(ndim=2)
def build(self, input_shape):
assert(len(input_shape) == 2)
input_dim = input_shape[1]
self.input_spec = InputSpec(dtype=tf.float32, shape=(None, input_dim))
self.prototypes = self.add_weight(shape=(self.n_prototypes, input_dim), initializer='glorot_uniform', name='prototypes')
if self.initial_prototypes is not None:
self.set_weights(self.initial_prototypes)
del self.initial_prototypes
self.built = True
def call(self, inputs, **kwargs):
"""
Calculate pairwise squared euclidean distances between inputs and prototype vectors
Arguments:
inputs: the variable containing data, Tensor with shape `(n_samples, latent_dim)`
Return:
d: distances between inputs and prototypes, Tensor with shape `(n_samples, n_prototypes)`
"""
# Note: (tf.expand_dims(inputs, axis=1) - self.prototypes) has shape (n_samples, n_prototypes, latent_dim)
d = tf.reduce_sum(tf.square(tf.expand_dims(inputs, axis=1) - self.prototypes), axis=2)
return d
def compute_output_shape(self, input_shape):
assert(input_shape and len(input_shape) == 2)
return input_shape[0], self.n_prototypes
def get_config(self):
config = {'map_size': self.map_size}
base_config = super(SOMLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| StarcoderdataPython |
133039 | <filename>junebug/tests/test_channel.py
import logging
import json
from twisted.internet.defer import inlineCallbacks
from vumi.message import TransportUserMessage, TransportStatus
from vumi.transports.telnet import TelnetServerTransport
from junebug.utils import api_from_message, api_from_status, conjoin
from junebug.workers import ChannelStatusWorker, MessageForwardingWorker
from junebug.channel import (
Channel, ChannelNotFound, InvalidChannelType, MessageNotFound)
from junebug.logging_service import JunebugLoggerService
from junebug.tests.helpers import JunebugTestBase, FakeJunebugPlugin
class TestChannel(JunebugTestBase):
@inlineCallbacks
def setUp(self):
self.patch_logger()
yield self.start_server()
@inlineCallbacks
def test_save_channel(self):
properties = self.create_channel_properties()
channel = yield self.create_channel(
self.service, self.redis)
props = yield self.redis.get('%s:properties' % channel.id)
self.assertEqual(json.loads(props), conjoin(properties, {
'config': conjoin(
properties['config'], {'transport_name': channel.id})
}))
channel_list = yield self.redis.get('channels')
self.assertEqual(channel_list, set([channel.id]))
@inlineCallbacks
def test_delete_channel(self):
properties = self.create_channel_properties()
channel = yield self.create_channel(
self.service, self.redis)
props = yield self.redis.get('%s:properties' % channel.id)
self.assertEqual(json.loads(props), conjoin(properties, {
'config': conjoin(
properties['config'], {'transport_name': channel.id})
}))
channel_list = yield self.redis.get('channels')
self.assertEqual(channel_list, set([channel.id]))
yield channel.delete()
properties = yield self.redis.get('%s:properties' % channel.id)
self.assertEqual(properties, None)
channel_list = yield self.redis.get('channels')
self.assertEqual(channel_list, set())
@inlineCallbacks
def test_start_channel_transport(self):
'''Starting the channel should start the transport, as well as the
logging service for that transport.'''
channel = yield self.create_channel(
self.service, self.redis)
worker = self.service.getServiceNamed(channel.id)
self.assertEqual(worker, channel.transport_worker)
self.assertTrue(isinstance(worker, TelnetServerTransport))
logging_worker = worker.getServiceNamed('Junebug Worker Logger')
self.assertTrue(
isinstance(logging_worker, channel.JUNEBUG_LOGGING_SERVICE_CLS))
@inlineCallbacks
def test_start_channel_logging(self):
'''When the channel is started, the logging worker should be started
along with it.'''
channel = yield self.create_channel(
self.service, self.redis,
'junebug.tests.helpers.LoggingTestTransport')
worker_logger = channel.transport_worker.getServiceNamed(
'Junebug Worker Logger')
self.assertTrue(isinstance(worker_logger, JunebugLoggerService))
@inlineCallbacks
def test_channel_logging_single_channel(self):
'''All logs from a single channel should go to the logging worker.'''
channel = yield self.create_channel(
self.service, self.redis,
'junebug.tests.helpers.LoggingTestTransport')
worker_logger = channel.transport_worker.getServiceNamed(
'Junebug Worker Logger')
worker_logger.startService()
channel.transport_worker.test_log('Test message1')
channel.transport_worker.test_log('Test message2')
[log1, log2] = worker_logger.logfile.logs
self.assertEqual(json.loads(log1)['message'], 'Test message1')
self.assertEqual(json.loads(log2)['message'], 'Test message2')
@inlineCallbacks
def test_channel_logging_multiple_channels(self):
'''All logs from a single channel should go to the logging worker.'''
channel1 = yield self.create_channel(
self.service, self.redis,
'junebug.tests.helpers.LoggingTestTransport')
worker_logger1 = channel1.transport_worker.getServiceNamed(
'Junebug Worker Logger')
channel2 = yield self.create_channel(
self.service, self.redis,
'junebug.tests.helpers.LoggingTestTransport')
worker_logger2 = channel2.transport_worker.getServiceNamed(
'Junebug Worker Logger')
worker_logger1.startService()
worker_logger2.startService()
channel1.transport_worker.test_log('Test message1')
channel1.transport_worker.test_log('Test message2')
[log1, log2] = worker_logger1.logfile.logs
self.assertEqual(json.loads(log1)['message'], 'Test message1')
self.assertEqual(json.loads(log2)['message'], 'Test message2')
channel2.transport_worker.test_log('Test message3')
self.assertEqual(len(worker_logger1.logfile.logs), 2)
[log3] = worker_logger2.logfile.logs
self.assertEqual(json.loads(log3)['message'], 'Test message3')
@inlineCallbacks
def test_transport_class_name_default(self):
config = yield self.create_channel_config(channels={})
properties = self.create_channel_properties(type='telnet')
channel = Channel(self.redis, config, properties)
self.assertEqual(
channel._transport_cls_name,
'vumi.transports.telnet.TelnetServerTransport')
@inlineCallbacks
def test_transport_class_name_specified(self):
config = yield self.create_channel_config(channels={'foo': 'bar.baz'})
properties = self.create_channel_properties(type='foo')
channel = Channel(self.redis, config, properties)
self.assertEqual(
channel._transport_cls_name,
'bar.baz')
@inlineCallbacks
def test_transport_class_name_overridden(self):
config = yield self.create_channel_config(
channels={'foo': 'bar.baz'}, replace_channels=True)
properties = self.create_channel_properties(type='telnet')
channel = Channel(self.redis, config, properties)
err = self.assertRaises(
InvalidChannelType, getattr, channel, '_transport_cls_name')
self.assertTrue(all(cls in err.message for cls in ['telnet', 'foo']))
@inlineCallbacks
def test_start_channel_application(self):
properties = self.create_channel_properties(mo_url='http://foo.org')
channel = yield self.create_channel(
self.service, self.redis, properties=properties)
worker = channel.application_worker
id = channel.application_id
self.assertTrue(isinstance(worker, MessageForwardingWorker))
self.assertEqual(self.service.namedServices[id], worker)
self.assertEqual(worker.config, {
'transport_name': channel.id,
'mo_message_url': 'http://foo.org',
'mo_message_url_auth_token': None,
'message_queue': None,
'redis_manager': channel.config.redis,
'inbound_ttl': channel.config.inbound_message_ttl,
'outbound_ttl': channel.config.outbound_message_ttl,
'metric_window': channel.config.metric_window,
})
@inlineCallbacks
def test_start_channel_status_application(self):
properties = self.create_channel_properties()
channel = yield self.create_channel(
self.service, self.redis, properties=properties)
worker = channel.status_application_worker
id = channel.status_application_id
self.assertTrue(isinstance(worker, ChannelStatusWorker))
self.assertEqual(self.service.namedServices[id], worker)
self.assertEqual(worker.config, {
'redis_manager': channel.config.redis,
'channel_id': channel.id,
'status_url': None,
})
@inlineCallbacks
def test_start_channel_status_application_status_url(self):
properties = self.create_channel_properties(status_url='example.org')
channel = yield self.create_channel(
self.service, self.redis, properties=properties)
worker = channel.status_application_worker
self.assertEqual(worker.config['status_url'], 'example.org')
@inlineCallbacks
def test_channel_character_limit(self):
'''`character_limit` parameter should return the character limit, or
`None` if no character limit was specified'''
properties_limit = self.create_channel_properties(character_limit=100)
properties_no_limit = self.create_channel_properties()
channel_limit = yield self.create_channel(
self.service, self.redis, properties=properties_limit)
channel_no_limit = yield self.create_channel(
self.service, self.redis, properties=properties_no_limit)
self.assertEqual(channel_limit.character_limit, 100)
self.assertEqual(channel_no_limit.character_limit, None)
@inlineCallbacks
def test_create_channel_invalid_type(self):
channel = yield self.create_channel(
self.service, self.redis)
channel._properties['type'] = 'foo'
err = yield self.assertFailure(channel.start(None), InvalidChannelType)
self.assertTrue(all(
s in err.message for s in ('xmpp', 'telnet', 'foo')))
@inlineCallbacks
def test_start_channel_plugins_called(self):
'''Starting a channel should call `channel_started` on all plugins'''
plugin = FakeJunebugPlugin()
plugin.calls = []
channel = yield self.create_channel(
self.service, self.redis, plugins=[plugin])
[(name, [plugin_channel])] = plugin.calls
self.assertEqual(name, 'channel_started')
self.assertEqual(plugin_channel, channel)
@inlineCallbacks
def test_stop_channel_plugins_called(self):
'''Stopping a channel should call `channel_stopped` on all plugins'''
plugin = FakeJunebugPlugin()
plugin.calls = []
channel = yield self.create_channel(
self.service, self.redis, plugins=[plugin])
plugin.calls = []
yield channel.stop()
[(name, [plugin_channel])] = plugin.calls
self.assertEqual(name, 'channel_stopped')
self.assertEqual(plugin_channel, channel)
@inlineCallbacks
def test_update_channel_config(self):
properties = self.create_channel_properties()
channel = yield self.create_channel(
self.service, self.redis)
update = yield channel.update({'foo': 'bar'})
self.assertEqual(update, conjoin(properties, {
'foo': 'bar',
'status': self.generate_status(),
'id': channel.id,
'config': conjoin(properties['config'], {
'transport_name': channel.id
})
}))
@inlineCallbacks
def test_update_channel_restart_transport_on_config_change(self):
channel = yield self.create_channel(
self.service, self.redis)
worker1 = channel.transport_worker
self.assertEqual(self.service.namedServices[channel.id], worker1)
yield channel.update({'foo': 'bar'})
self.assertEqual(self.service.namedServices[channel.id], worker1)
properties = self.create_channel_properties()
properties['config']['foo'] = ['bar']
yield channel.update(properties)
worker2 = channel.transport_worker
self.assertEqual(self.service.namedServices[channel.id], worker2)
self.assertTrue(worker1 not in self.service.services)
@inlineCallbacks
def test_update_channel_restart_application_on_config_change(self):
channel = yield self.create_channel(
self.service, self.redis)
worker1 = channel.application_worker
id = channel.application_id
self.assertEqual(self.service.namedServices[id], worker1)
yield channel.update({'foo': 'bar'})
self.assertEqual(self.service.namedServices[id], worker1)
properties = self.create_channel_properties(mo_url='http://baz.org')
yield channel.update(properties)
worker2 = channel.application_worker
self.assertEqual(self.service.namedServices[id], worker2)
self.assertTrue(worker1 not in self.service.services)
@inlineCallbacks
def test_stop_channel(self):
channel = yield self.create_channel(
self.service, self.redis)
self.assertEqual(
self.service.namedServices[channel.id], channel.transport_worker)
yield channel.stop()
self.assertEqual(self.service.namedServices.get(channel.id), None)
application_id = channel.application_id
self.assertEqual(self.service.namedServices.get(application_id), None)
status_application_id = channel.status_application_id
self.assertEqual(
self.service.namedServices.get(status_application_id), None)
@inlineCallbacks
def test_create_channel_from_id(self):
channel1 = yield self.create_channel(
self.service, self.redis)
channel2 = yield self.create_channel_from_id(
self.redis, {}, channel1.id, self.service)
self.assertEqual((yield channel1.status()), (yield channel2.status()))
self.assertEqual(
channel1.transport_worker,
channel2.transport_worker)
self.assertEqual(
channel1.application_worker,
channel2.application_worker)
self.assertEqual(
channel1.status_application_worker,
channel2.status_application_worker)
@inlineCallbacks
def test_create_channel_from_unknown_id(self):
yield self.assertFailure(
self.create_channel_from_id(
self.redis, {}, 'unknown-id', self.service),
ChannelNotFound)
@inlineCallbacks
def test_channel_status_empty(self):
properties = self.create_channel_properties()
channel = yield self.create_channel(
self.service, self.redis, id='channel-id')
self.assertEqual((yield channel.status()), conjoin(properties, {
'status': self.generate_status(),
'id': 'channel-id',
'config': conjoin(properties['config'], {
'transport_name': channel.id
})
}))
@inlineCallbacks
def test_channel_status_single_status(self):
channel = yield self.create_channel(
self.service, self.redis, id='channel-id')
status = TransportStatus(
status='ok',
component='foo',
type='bar',
message='Bar')
yield channel.sstore.store_status('channel-id', status)
self.assert_status((yield channel.status())['status'], components={
'foo': api_from_status('channel-id', status),
}, level='ok')
@inlineCallbacks
def test_channel_multiple_statuses_ok(self):
channel = yield self.create_channel(
self.service, self.redis, id='channel-id')
components = {}
for i in range(5):
status = TransportStatus(
status='ok',
component=i,
type='bar',
message='Bar')
yield channel.sstore.store_status('channel-id', status)
components[str(i)] = api_from_status('channel-id', status)
self.assert_status(
(yield channel.status())['status'], level='ok',
components=components)
@inlineCallbacks
def test_channel_multiple_statuses_degraded(self):
channel = yield self.create_channel(
self.service, self.redis, id='channel-id')
components = {}
for i in range(5):
status = TransportStatus(
status='ok',
component=i,
type='bar',
message='Bar')
yield channel.sstore.store_status('channel-id', status)
components[str(i)] = api_from_status('channel-id', status)
status = TransportStatus(
status='degraded',
component=5,
type='bar',
message='Bar')
yield channel.sstore.store_status('channel-id', status)
components['5'] = api_from_status('channel-id', status)
self.assert_status(
(yield channel.status())['status'], level='degraded',
components=components)
@inlineCallbacks
def test_channel_multiple_statuses_down(self):
channel = yield self.create_channel(
self.service, self.redis, id='channel-id')
components = {}
for i in range(5):
status = TransportStatus(
status='ok',
component=i,
type='bar',
message='Bar')
yield channel.sstore.store_status('channel-id', status)
components[str(i)] = api_from_status('channel-id', status)
status = TransportStatus(
status='degraded',
component=5,
type='bar',
message='Bar')
yield channel.sstore.store_status('channel-id', status)
components['5'] = api_from_status('channel-id', status)
status = TransportStatus(
status='down',
component=6,
type='bar',
message='Bar')
yield channel.sstore.store_status('channel-id', status)
components['6'] = api_from_status('channel-id', status)
self.assert_status(
(yield channel.status())['status'], level='down',
components=components)
@inlineCallbacks
def test_get_all(self):
channels = yield Channel.get_all(self.redis)
self.assertEqual(channels, set())
channel1 = yield self.create_channel(
self.service, self.redis)
channels = yield Channel.get_all(self.redis)
self.assertEqual(channels, set([channel1.id]))
channel2 = yield self.create_channel(
self.service, self.redis)
channels = yield Channel.get_all(self.redis)
self.assertEqual(channels, set([channel1.id, channel2.id]))
@inlineCallbacks
def test_start_all_channels(self):
yield Channel.start_all_channels(
self.redis, self.config, self.service)
channel1 = yield self.create_channel(
self.service, self.redis)
self.assertTrue(channel1.id in self.service.namedServices)
yield channel1.stop()
self.assertFalse(channel1.id in self.service.namedServices)
yield Channel.start_all_channels(
self.redis, self.config, self.service)
self.assertTrue(channel1.id in self.service.namedServices)
channel2 = yield self.create_channel(
self.service, self.redis)
self.assertTrue(channel2.id in self.service.namedServices)
yield channel2.stop()
self.assertFalse(channel2.id in self.service.namedServices)
yield Channel.start_all_channels(
self.redis, self.config, self.service)
self.assertTrue(channel1.id in self.service.namedServices)
self.assertTrue(channel2.id in self.service.namedServices)
@inlineCallbacks
def test_send_message(self):
'''The send_message function should place the message on the correct
queue'''
channel = yield self.create_channel(
self.service, self.redis, id='channel-id')
msg = yield channel.send_message(
self.message_sender, self.outbounds, {
'from': '+1234',
'content': 'testcontent',
})
self.assertEqual(msg['channel_id'], 'channel-id')
self.assertEqual(msg['from'], '+1234')
self.assertEqual(msg['content'], 'testcontent')
[dispatched_message] = self.get_dispatched_messages(
'channel-id.outbound')
self.assertEqual(msg['message_id'], dispatched_message['message_id'])
@inlineCallbacks
def test_send_message_event_url(self):
'''Sending a message with a specified event url should store the event
url for sending events in the future'''
channel = yield self.create_channel(
self.service, self.redis, id='channel-id')
msg = yield channel.send_message(
self.message_sender, self.outbounds, {
'from': '+1234',
'content': 'testcontent',
'event_url': 'http://test.org'
})
event_url = yield self.outbounds.load_event_url(
'channel-id', msg['message_id'])
self.assertEqual(event_url, 'http://test.org')
@inlineCallbacks
def test_send_message_with_event_auth_token(self):
'''Sending a message with a specified event_auth_token should store the
token for sending events in the future'''
channel = yield self.create_channel(
self.service, self.redis, id='channel-id')
msg = yield channel.send_message(
self.message_sender, self.outbounds, {
'from': '+1234',
'content': 'testcontent',
'event_url': 'http://test.org',
'event_auth_token': "<PASSWORD>-auth-token",
})
event_auth_token = yield self.outbounds.load_event_auth_token(
'channel-id', msg['message_id'])
self.assertEqual(event_auth_token, "the-auth-token")
@inlineCallbacks
def test_send_reply_message(self):
'''send_reply_message should place the correct reply message on the
correct queue'''
channel = yield self.create_channel(
self.service, self.redis, id='channel-id')
in_msg = TransportUserMessage(
from_addr='+2789',
to_addr='+1234',
transport_name='channel-id',
transport_type='_',
transport_metadata={'foo': 'bar'})
yield self.api.inbounds.store_vumi_message('channel-id', in_msg)
msg = yield channel.send_reply_message(
self.message_sender, self.outbounds, self.inbounds, {
'reply_to': in_msg['message_id'],
'content': 'testcontent',
})
expected = in_msg.reply(content='testcontent')
expected = conjoin(api_from_message(expected), {
'timestamp': msg['timestamp'],
'message_id': msg['message_id']
})
self.assertEqual(msg, expected)
[dispatched] = self.get_dispatched_messages('channel-id.outbound')
self.assertEqual(msg['message_id'], dispatched['message_id'])
self.assertEqual(api_from_message(dispatched), expected)
@inlineCallbacks
def test_send_reply_message_inbound_not_found(self):
'''send_reply_message should raise an error if the inbound message is
not found'''
channel = yield self.create_channel(
self.service, self.redis, id='channel-id')
self.assertFailure(channel.send_reply_message(
self.message_sender, self.outbounds, self.inbounds, {
'reply_to': 'i-do-not-exist',
'content': 'testcontent',
}), MessageNotFound)
@inlineCallbacks
def test_send_reply_message_event_url(self):
'''Sending a message with a specified event url should store the event
url for sending events in the future'''
channel = yield self.create_channel(
self.service, self.redis, id='channel-id')
in_msg = TransportUserMessage(
from_addr='+2789',
to_addr='+1234',
transport_name='channel-id',
transport_type='_',
transport_metadata={'foo': 'bar'})
yield self.api.inbounds.store_vumi_message('channel-id', in_msg)
msg = yield channel.send_reply_message(
self.message_sender, self.outbounds, self.inbounds, {
'reply_to': in_msg['message_id'],
'content': 'testcontent',
'event_url': 'http://test.org',
})
event_url = yield self.outbounds.load_event_url(
'channel-id', msg['message_id'])
self.assertEqual(event_url, 'http://test.org')
@inlineCallbacks
def test_send_reply_message_with_event_auth_token(self):
'''Sending a message with a specified event auth token should store the
token for sending events in the future'''
channel = yield self.create_channel(
self.service, self.redis, id='channel-id')
in_msg = TransportUserMessage(
from_addr='+2789',
to_addr='+1234',
transport_name='channel-id',
transport_type='_',
transport_metadata={'foo': 'bar'})
yield self.api.inbounds.store_vumi_message('channel-id', in_msg)
msg = yield channel.send_reply_message(
self.message_sender, self.outbounds, self.inbounds, {
'reply_to': in_msg['message_id'],
'content': 'testcontent',
'event_url': 'http://test.org',
'event_auth_token': "<PASSWORD>-auth-<PASSWORD>",
})
event_auth_token = yield self.outbounds.load_event_auth_token(
'channel-id', msg['message_id'])
self.assertEqual(event_auth_token, "the-auth-token")
@inlineCallbacks
def test_channel_status_inbound_message_rates(self):
'''When inbound messages are being receive, it should affect the
inbound message rate reported by the status'''
clock = self.patch_message_rate_clock()
channel = yield self.create_channel(
self.service, self.redis, id=u'channel-id')
yield self.api.message_rate.increment(
channel.id, 'inbound', channel.config.metric_window)
clock.advance(channel.config.metric_window)
self.assert_status(
(yield channel.status())['status'],
inbound_message_rate=1.0/channel.config.metric_window)
@inlineCallbacks
def test_channel_status_outbound_message_rates(self):
'''When outbound messages are being sent, it should affect the
outbound message rate reported by the status'''
clock = self.patch_message_rate_clock()
channel = yield self.create_channel(
self.service, self.redis, id=u'channel-id')
yield self.api.message_rate.increment(
channel.id, 'outbound', channel.config.metric_window)
clock.advance(channel.config.metric_window)
self.assert_status(
(yield channel.status())['status'],
outbound_message_rate=1.0/channel.config.metric_window)
@inlineCallbacks
def test_channel_status_submitted_event_rate(self):
'''When submitted events are being received, it should affect the
submitted event rate reported by the status'''
clock = self.patch_message_rate_clock()
channel = yield self.create_channel(
self.service, self.redis, id=u'channel-id')
yield self.api.message_rate.increment(
channel.id, 'submitted', channel.config.metric_window)
clock.advance(channel.config.metric_window)
self.assert_status(
(yield channel.status())['status'],
submitted_event_rate=1.0/channel.config.metric_window)
@inlineCallbacks
def test_channel_status_rejected_event_rate(self):
'''When rejected events are being received, it should affect the
rejected event rate reported by the status'''
clock = self.patch_message_rate_clock()
channel = yield self.create_channel(
self.service, self.redis, id=u'channel-id')
yield self.api.message_rate.increment(
channel.id, 'rejected', channel.config.metric_window)
clock.advance(channel.config.metric_window)
self.assert_status(
(yield channel.status())['status'],
rejected_event_rate=1.0/channel.config.metric_window)
@inlineCallbacks
def test_channel_status_delivery_succeeded_rate(self):
'''When delivery_succeeded events are being received, it should affect
the delivery succeeded event rate reported by the status'''
clock = self.patch_message_rate_clock()
channel = yield self.create_channel(
self.service, self.redis, id=u'channel-id')
yield self.api.message_rate.increment(
channel.id, 'delivery_succeeded', channel.config.metric_window)
clock.advance(channel.config.metric_window)
self.assert_status(
(yield channel.status())['status'],
delivery_succeeded_rate=1.0/channel.config.metric_window)
@inlineCallbacks
def test_channel_status_delivery_failed_rate(self):
'''When delivery_failed events are being received, it should affect
the delivery failed event rate reported by the status'''
clock = self.patch_message_rate_clock()
channel = yield self.create_channel(
self.service, self.redis, id=u'channel-id')
yield self.api.message_rate.increment(
channel.id, 'delivery_failed', channel.config.metric_window)
clock.advance(channel.config.metric_window)
self.assert_status(
(yield channel.status())['status'],
delivery_failed_rate=1.0/channel.config.metric_window)
@inlineCallbacks
def test_channel_status_delivery_pending_rate(self):
'''When delivery_pending events are being received, it should affect
the delivery pending event rate reported by the status'''
clock = self.patch_message_rate_clock()
channel = yield self.create_channel(
self.service, self.redis, id=u'channel-id')
yield self.api.message_rate.increment(
channel.id, 'delivery_pending', channel.config.metric_window)
clock.advance(channel.config.metric_window)
self.assert_status(
(yield channel.status())['status'],
delivery_pending_rate=1.0/channel.config.metric_window)
@inlineCallbacks
def test_get_logs_more_than_available(self):
'''If the amount of available logs is less than what is requested,
all the logs will be returned.'''
channel = yield self.create_channel(
self.service, self.redis,
'junebug.tests.helpers.LoggingTestTransport')
worker_logger = channel.transport_worker.getServiceNamed(
'Junebug Worker Logger')
worker_logger.startService()
channel.transport_worker.test_log('Test message1')
[log] = channel.get_logs(2)
self.assert_log(log, {
'logger': channel.id, 'message': 'Test message1',
'level': logging.INFO})
@inlineCallbacks
def test_get_logs_less_than_available(self):
'''If the amount of available logs is more than what is requested,
only the requested amount will be returned.'''
channel = yield self.create_channel(
self.service, self.redis,
'junebug.tests.helpers.LoggingTestTransport')
worker_logger = channel.transport_worker.getServiceNamed(
'Junebug Worker Logger')
worker_logger.startService()
channel.transport_worker.test_log('Test message1')
channel.transport_worker.test_log('Test message2')
channel.transport_worker.test_log('Test message3')
[log1, log2] = channel.get_logs(2)
self.assert_log(log1, {
'logger': channel.id, 'message': 'Test message3',
'level': logging.INFO})
self.assert_log(log2, {
'logger': channel.id, 'message': 'Test message2',
'level': logging.INFO})
@inlineCallbacks
def test_get_logs_more_than_configured(self):
'''If the amount of logs requested is more than the configured
maximum, then only the configured maximum amount is returned.'''
logpath = self.mktemp()
config = yield self.create_channel_config(
max_logs=2,
channels={
'logging': 'junebug.tests.helpers.LoggingTestTransport',
},
logging_path=logpath
)
properties = yield self.create_channel_properties(type='logging')
channel = yield self.create_channel(
self.service, self.redis, config=config, properties=properties)
worker_logger = channel.transport_worker.getServiceNamed(
'Junebug Worker Logger')
worker_logger.startService()
channel.transport_worker.test_log('Test message1')
channel.transport_worker.test_log('Test message2')
channel.transport_worker.test_log('Test message3')
[log1, log2] = channel.get_logs(3)
self.assert_log(log1, {
'logger': channel.id, 'message': 'Test message3',
'level': logging.INFO})
self.assert_log(log2, {
'logger': channel.id, 'message': 'Test message2',
'level': logging.INFO})
@inlineCallbacks
def test_get_logs_n_is_none(self):
'''If no value for n is supplied, then the configured maximum number
of logs should be returned.'''
logpath = self.mktemp()
config = yield self.create_channel_config(
max_logs=2,
channels={
'logging': 'junebug.tests.helpers.LoggingTestTransport',
},
logging_path=logpath
)
properties = yield self.create_channel_properties(type='logging')
channel = yield self.create_channel(
self.service, self.redis, config=config, properties=properties)
worker_logger = channel.transport_worker.getServiceNamed(
'Junebug Worker Logger')
worker_logger.startService()
channel.transport_worker.test_log('Test message1')
channel.transport_worker.test_log('Test message2')
channel.transport_worker.test_log('Test message3')
[log1, log2] = channel.get_logs(None)
self.assert_log(log1, {
'logger': channel.id, 'message': 'Test message3',
'level': logging.INFO})
self.assert_log(log2, {
'logger': channel.id, 'message': 'Test message2',
'level': logging.INFO})
| StarcoderdataPython |
4825930 | """Test some FileHandler internals."""
import re
from pathlib import Path
import pytest
from file_groups.file_handler import FileHandler
from .conftest import same_content_files
# pylint: disable=protected-access
@same_content_files('Hi', 'y')
def test_no_symlink_check_registered_delete_ok(duplicates_dir, capsys):
fh = FileHandler([], '.', None, dry_run=False, protected_regexes=[])
y_abs = str(Path('y').absolute())
fh._no_symlink_check_registered_delete(y_abs)
out, _ = capsys.readouterr()
assert f"deleting: {y_abs}" in out
assert not Path('y').exists()
@same_content_files('Hi', 'y')
def test_no_symlink_check_registered_delete_ok_dry(duplicates_dir, capsys):
fh = FileHandler([], '.', None, dry_run=True, protected_regexes=[])
y_abs = str(Path('y').absolute())
fh._no_symlink_check_registered_delete(y_abs)
out, _ = capsys.readouterr()
print(fh.moved_from)
assert f"deleting: {y_abs}" in out
assert Path('y').exists()
@same_content_files('Hi', 'ya')
def test_no_symlink_check_registered_delete_ok_protected_matched(duplicates_dir, capsys):
fh = FileHandler([], '.', None, dry_run=False, protected_regexes=[re.compile(r'.*a$')], debug=True)
ya_abs = str(Path('ya').absolute())
with pytest.raises(AssertionError) as exinfo:
fh._no_symlink_check_registered_delete(ya_abs)
assert f"Oops, trying to delete protected file '{str(ya_abs)}'." in str(exinfo.value)
out, _ = capsys.readouterr()
assert f"find may_work_on - '{duplicates_dir}/ya' is protected by regex re.compile('.*a$'), assigning to group must_protect instead." in out
assert Path(ya_abs).exists()
@same_content_files('Hi', 'ya')
def test_no_symlink_check_registered_delete_ok_dry_protected_matched(duplicates_dir, capsys):
fh = FileHandler([], '.', None, dry_run=True, protected_regexes=[re.compile(r'.*a$')])
ya_abs = str(Path('ya').absolute())
with pytest.raises(AssertionError) as exinfo:
fh._no_symlink_check_registered_delete(ya_abs)
assert f"Oops, trying to delete protected file '{str(ya_abs)}'." in str(exinfo.value)
assert Path(ya_abs).exists()
@same_content_files('Hi', 'ya')
def test_no_symlink_check_registered_delete_ok_protected_un_matched(duplicates_dir, capsys):
fh = FileHandler([], '.', None, dry_run=False, protected_regexes=[re.compile(r'.*b$')])
ya_abs = str(Path('ya').absolute())
fh._no_symlink_check_registered_delete(ya_abs)
out, _ = capsys.readouterr()
assert f"deleting: {ya_abs}" in out
assert not Path('ya').exists()
@same_content_files('Hi', 'ya')
def test_no_symlink_check_registered_delete_ok_dry_protected_un_matched(duplicates_dir, capsys):
fh = FileHandler([], '.', None, dry_run=True, protected_regexes=[re.compile(r'.*b$')])
ya_abs = str(Path('ya').absolute())
fh._no_symlink_check_registered_delete(ya_abs)
out, _ = capsys.readouterr()
assert f"deleting: {ya_abs}" in out
assert Path('ya').exists()
| StarcoderdataPython |
133580 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from sqlalchemy import (
Column,
Integer,
Text
)
from aria import (
application_model_storage,
modeling
)
from aria.storage import (
ModelStorage,
exceptions,
sql_mapi,
)
from tests import (
mock,
storage as tests_storage,
modeling as tests_modeling
)
@pytest.fixture
def storage():
base_storage = ModelStorage(sql_mapi.SQLAlchemyModelAPI,
initiator=tests_storage.init_inmemory_model_storage)
base_storage.register(tests_modeling.MockModel)
yield base_storage
tests_storage.release_sqlite_storage(base_storage)
@pytest.fixture(scope='module', autouse=True)
def module_cleanup():
modeling.models.aria_declarative_base.metadata.remove(tests_modeling.MockModel.__table__) # pylint: disable=no-member
def test_storage_base(storage):
with pytest.raises(AttributeError):
storage.non_existent_attribute()
def test_model_storage(storage):
mock_model = tests_modeling.MockModel(value=0, name='model_name')
storage.mock_model.put(mock_model)
assert storage.mock_model.get_by_name('model_name') == mock_model
assert [mm_from_storage for mm_from_storage in storage.mock_model.iter()] == [mock_model]
assert [mm_from_storage for mm_from_storage in storage.mock_model] == [mock_model]
storage.mock_model.delete(mock_model)
with pytest.raises(exceptions.StorageError):
storage.mock_model.get(mock_model.id)
def test_application_storage_factory():
storage = application_model_storage(sql_mapi.SQLAlchemyModelAPI,
initiator=tests_storage.init_inmemory_model_storage)
assert storage.service_template
assert storage.node_template
assert storage.group_template
assert storage.policy_template
assert storage.substitution_template
assert storage.substitution_template_mapping
assert storage.requirement_template
assert storage.relationship_template
assert storage.capability_template
assert storage.interface_template
assert storage.operation_template
assert storage.artifact_template
assert storage.service
assert storage.node
assert storage.group
assert storage.policy
assert storage.substitution
assert storage.substitution_mapping
assert storage.relationship
assert storage.capability
assert storage.interface
assert storage.operation
assert storage.artifact
assert storage.execution
assert storage.service_update
assert storage.service_update_step
assert storage.service_modification
assert storage.plugin
assert storage.task
assert storage.input
assert storage.output
assert storage.property
assert storage.attribute
assert storage.type
assert storage.metadata
tests_storage.release_sqlite_storage(storage)
def test_cascade_deletion(context):
service = context.model.service.list()[0]
assert len(context.model.service_template.list()) == 1
assert len(service.nodes) == len(context.model.node.list()) == 2
context.model.service.delete(service)
assert len(context.model.service_template.list()) == 1
assert len(context.model.service.list()) == 0
assert len(context.model.node.list()) == 0
@pytest.fixture
def context(tmpdir):
result = mock.context.simple(str(tmpdir))
yield result
tests_storage.release_sqlite_storage(result.model)
def test_mapi_include(context):
service1 = context.model.service.list()[0]
service1.name = 'service1'
service1.service_template.name = 'service_template1'
context.model.service.update(service1)
service_template2 = mock.models.create_service_template('service_template2')
service2 = mock.models.create_service(service_template2, 'service2')
context.model.service.put(service2)
assert service1 != service2
assert service1.service_template != service2.service_template
def assert_include(service):
st_name = context.model.service.get(service.id, include=('service_template_name',))
st_name_list = context.model.service.list(filters={'id': service.id},
include=('service_template_name', ))
assert len(st_name) == len(st_name_list) == 1
assert st_name[0] == st_name_list[0][0] == service.service_template.name
assert_include(service1)
assert_include(service2)
class MockModel(modeling.models.aria_declarative_base, modeling.mixins.ModelMixin): # pylint: disable=abstract-method
__tablename__ = 'op_mock_model'
name = Column(Text)
value = Column(Integer)
class TestFilterOperands(object):
@pytest.fixture()
def storage(self):
model_storage = application_model_storage(
sql_mapi.SQLAlchemyModelAPI, initiator=tests_storage.init_inmemory_model_storage)
model_storage.register(MockModel)
for value in (1, 2, 3, 4):
model_storage.op_mock_model.put(MockModel(value=value))
yield model_storage
tests_storage.release_sqlite_storage(model_storage)
def test_gt(self, storage):
assert len(storage.op_mock_model.list(filters=dict(value=dict(gt=3)))) == 1
assert len(storage.op_mock_model.list(filters=dict(value=dict(gt=4)))) == 0
def test_ge(self, storage):
assert len(storage.op_mock_model.list(filters=dict(value=dict(ge=3)))) == 2
assert len(storage.op_mock_model.list(filters=dict(value=dict(ge=5)))) == 0
def test_lt(self, storage):
assert len(storage.op_mock_model.list(filters=dict(value=dict(lt=2)))) == 1
assert len(storage.op_mock_model.list(filters=dict(value=dict(lt=1)))) == 0
def test_le(self, storage):
assert len(storage.op_mock_model.list(filters=dict(value=dict(le=2)))) == 2
assert len(storage.op_mock_model.list(filters=dict(value=dict(le=0)))) == 0
def test_eq(self, storage):
assert len(storage.op_mock_model.list(filters=dict(value=dict(eq=2)))) == 1
assert len(storage.op_mock_model.list(filters=dict(value=dict(eq=0)))) == 0
def test_neq(self, storage):
assert len(storage.op_mock_model.list(filters=dict(value=dict(ne=2)))) == 3
def test_gt_and_lt(self, storage):
assert len(storage.op_mock_model.list(filters=dict(value=dict(gt=1, lt=3)))) == 1
assert len(storage.op_mock_model.list(filters=dict(value=dict(gt=2, lt=2)))) == 0
def test_eq_and_ne(self, storage):
assert len(storage.op_mock_model.list(filters=dict(value=dict(eq=1, ne=3)))) == 1
assert len(storage.op_mock_model.list(filters=dict(value=dict(eq=1, ne=1)))) == 0
| StarcoderdataPython |
157855 | import math
import numpy as np
import scipy.optimize as opt
import matplotlib.pyplot as plt
import matplotlib.gridspec as gdsc
class concreteSection:
def __init__(self,sct,units='mm'):
'''
Imports section.
Parameters
----------
sct : Section Object
Object defining the section to be analysed.
units : string, optional
The units in which the dimensions are given. Options are 'mm','m'. The default is 'mm'.
Raises
------
Exception
If units given aren't mm or m raise an Exception.
Returns
-------
None.
'''
he,w = sct.getXHeights()
self.sct = sct
allowableUnits = ['mm','m']
if units not in allowableUnits:
raise Exception("Only mm or m currently allowed as section size units")
elif units == 'm':
self.concreteWidths = np.array(w*1000)
self.concreteHeights = np.array(he*1000)
self.steelHeights = np.array(sct.steel.steelCo[:,0]*1000)
self.barDiameters = np.array(sct.steel.steelDia*1000)
self.steelAreas = 0.25*(np.array(sct.steel.steelDia)*1000)**2*math.pi
self.step = (he[1]-he[0])*1000
self.h = (he[-1]-he[0])*1000
else:
self.concreteWidths = np.array(w)
self.concreteHeights = np.array(he)
self.steelHeights = np.array(sct.steel.steelCo[:,0])
self.barDiameters = np.array(sct.steel.steelDia)
self.steelAreas = 0.25*(np.array(sct.steel.steelDia))**2*math.pi
self.step = (he[1]-he[0])
self.h = (he[-1]-he[0])
def getConcreteForces(self,strains,b,concreteMaterial):
'''
Returns the stress and force in the concrete when given the strains, and the widths. If calling explicitly, make sure units are consistent. Recommended to use SI.
Parameters
----------
strains : array
Array of the strains down the section.
b : array
Array of the widths down the section.
concreteMaterial : material Object
Object holding the concrete material properties.
Returns
-------
stress : array
Returns the stress at each height.
force : array
Returns the forces at each height.
'''
stress = np.where(strains>0,0,np.where(strains<-concreteMaterial.ec3,-concreteMaterial.fcd,concreteMaterial.Ec*strains))
#print(stress)
force = b*stress*self.step
return (stress,force);
def resultantForce(self,topStrain,bottomStrain,concreteMaterial,steelMaterial):
'''
Calculates the force and moment in the section given the strain at the top and bottom. If calling explicitly, make sure units are consistent. Recommended to use SI.
If the axial force is assumed to act at the centroid, section can be relocated using the relevant method in the section object.
Parameters
----------
topStrain : number
Strain at the top of the section
bottomStrain : number
Strain at the bottom of the section.
concreteMaterial : material object
Object holding the concrete material properties.
steelMaterial : material object
Object holding the steel material properties.
Returns
-------
N : float
Axial Force in the section. The axial force acts at height zero.
M : float
Moment in the section. The moment is calculated about height zero.
'''
steelStrains = np.interp(self.steelHeights,[self.concreteHeights[0],self.concreteHeights[-1]],[bottomStrain,topStrain])
#print(steelStrains)
concStrains = np.interp(self.concreteHeights,[self.concreteHeights[0],self.concreteHeights[-1]],[bottomStrain,topStrain])
#print(concStrains)
concForces = self.getConcreteForces(concStrains,self.concreteWidths,concreteMaterial)[1]
#print(concForces)
steelForces = np.where(np.absolute(steelStrains)<steelMaterial.ey,steelStrains*steelMaterial.Es*self.steelAreas,self.steelAreas*(steelMaterial.fyd + ((np.absolute(steelStrains)-steelMaterial.ey)/(steelMaterial.euk-steelMaterial.ey))*(steelMaterial.k-1)*steelMaterial.fyd)*np.sign(steelStrains))
#print(steelForces)
N = np.sum(steelForces,axis=0) + np.sum(concForces,axis=0) #N
M = -np.sum(steelForces*(self.steelHeights)*0.001) + -np.sum(concForces*(self.concreteHeights)*0.001) #Nm
return (N,M);
def strainFinder(self,x,concreteMaterial,steelMaterial,N,M):
'''
Used by the strainSolver routine to solve the strains for a given input N and M.
Parameters
----------
x : list
List holding the initial guess for top and bottom strain. Varied by solver routine to find solution.
concreteMaterial : Material Object
An object holding the concrete material properties
steelMaterial : Material Object
An object holding the steel material properties
N : float
Axial force to solve for.
M : float
Moment to solve for.
Returns
-------
eqN : float
Difference between target axial force and calculated axial force. Aiming for zero with solver.
eqM : float
Difference between target moment and calculated moment. Aiming for zero with solver.
'''
topStrain,bottomStrain = x
if bottomStrain<-0.0035 or topStrain<-0.0035 or bottomStrain>1 or topStrain>1:
eqN=100000000
eqM=100000000
else:
eqN = N - self.resultantForce(topStrain,bottomStrain,concreteMaterial,steelMaterial)[0]
eqM = M - self.resultantForce(topStrain,bottomStrain,concreteMaterial,steelMaterial)[1]
return [eqN,eqM];
def strainSolver(self,N,M,concreteMaterial,steelMaterial,units='Nm'):
'''
Calculates the strain situation for a given axial force and bending moment.
Parameters
----------
N : Number
Input axial force.
M : Number
Input moment.
concreteMaterial : Material Object
An object holding the concrete material properties
steelMaterial : Material Object
An object holding the steel material properties
units : string, optional
The units in which the moment and axial force are given. Options are 'Nm','kNm','MNm'. The default is 'Nm'.
Raises
------
Exception
Exception is raised if a unit which isn't allowed is input.
Returns
-------
(topStrain,bottomStrain) : double
Returns both the strains at the extreme top and bottom of the section.
'''
allowableUnits = ['Nm','kNm','MNm']
if units not in allowableUnits:
raise Exception("Allowable units are 'Nm','kNm','MNm'")
elif (units == 'kNm'):
N = N*10**3
M = M*10**3
elif (units == 'MNm'):
N = N*10**6
M = M*10**6
topStrain,bottomStrain = opt.root(self.strainFinder,[0,0],args=(concreteMaterial,steelMaterial,N,M)).x
return (topStrain,bottomStrain);
def concreteLimitedMomentCapacity(self,M,N,concreteMaterial,steelMaterial):
topStrain,bottomStrain = self.strainSolver(N,M,concreteMaterial,steelMaterial)
if abs(topStrain-bottomStrain) < concreteMaterial.ecu3:
concLimit = -concreteMaterial.ecu2*(1+abs(topStrain-bottomStrain)/concreteMaterial.ecu3)
else:
concLimit = -concreteMaterial.ecu3
concEq = min(topStrain,bottomStrain) - concLimit
return concEq;
def steelLimitedMomentCapacity(self,M,N,concreteMaterial,steelMaterial):
topStrain,bottomStrain = self.strainSolver(N,M,concreteMaterial,steelMaterial)
steelStrains = np.interp(self.steelHeights,[self.concreteHeights[0],self.concreteHeights[-1]],[bottomStrain,topStrain])
steelEq = np.amax(steelStrains) - steelMaterial.eud
return steelEq;
def CapacitySolver(self,concreteMaterial,steelMaterial,Anum=50,Bnum=200,Cnum=50,returnStrains=False,units="Nm"):
'''
This function iterates through the limiting strain states, and caclulates the axial force and moment at each of these states.
Parameters
----------
concreteMaterial : Material Object
An object holding the concrete material properties
steelMaterial : Material Object
An object holding the steel material properties
Anum : integer, optional
The number of steps in the stage between uniform compression, and first extreme tension/limiting compression. The default is 50.
Bnum : integer, optional
The number of steps in the stage between first extreme tension and limiting tension (with limiting compression). Divided exponentially. The default is 200.
Cnum : integer, optional
The number of steps in the stage between extreme bending and uniform tension. The default is 50.
returnStrains : boolean, optional
Whether to return the strains or not. The default is False.
units : string, optional
The units which the axial force and moment should be returned in. Current options are 'Nm','kNm','MNm'. The default is "Nm".
Raises
------
Exception
If an invalid input for units is tried, an exception is raised.
Returns
-------
Lists
Default is to return a list of the axial force and corresponding moment capacity.
If returnStrains is true, the corresponding top and bottom strains are also returned.
'''
tenStrainLimit = (self.h/np.amax(self.steelHeights))*(concreteMaterial.ecu3+steelMaterial.eud) - concreteMaterial.ecu3
topStrainsA = np.linspace(-concreteMaterial.ecu2,-concreteMaterial.ecu3,num=Anum)
topStrainsB = np.tile([-concreteMaterial.ecu3],Bnum)
topStrainsC = np.linspace(-concreteMaterial.ecu3,steelMaterial.eud,Cnum)
topStrainsD = np.linspace(steelMaterial.eud,tenStrainLimit,Cnum)
topStrainsE = np.geomspace(tenStrainLimit,1e-10,Bnum)
topStrainsF = np.linspace(0,-concreteMaterial.ecu2,Anum)
botStrainsA = np.linspace(-concreteMaterial.ecu2,0,num=Anum)
botStrainsB = np.geomspace(1e-10,tenStrainLimit,num=Bnum)
botStrainsC = np.linspace(tenStrainLimit,steelMaterial.eud,Cnum)
botStrainsD = np.linspace(steelMaterial.eud,-concreteMaterial.ecu3,Cnum)
botStrainsE = np.tile([-concreteMaterial.ecu3],Bnum)
botStrainsF = np.linspace(-concreteMaterial.ecu3,-concreteMaterial.ecu2,Anum)
topStrains = np.concatenate([topStrainsA,topStrainsB,topStrainsC,topStrainsD,topStrainsE,topStrainsF])
botStrains = np.concatenate([botStrainsA,botStrainsB,botStrainsC,botStrainsD,botStrainsE,botStrainsF])
N = np.asarray([])
M = np.asarray([])
for a in range(0,2*(Anum+Bnum+Cnum)):
force,moment = self.resultantForce(topStrains[a],botStrains[a],concreteMaterial,steelMaterial)
N = np.append(N,force)
M = np.append(M,moment)
allowableUnits = ['Nm','kNm','MNm']
if units not in allowableUnits:
raise Exception("Allowable units are 'Nm','kNm','MNm'")
if (units == 'kNm'):
N = N/10**3
M = M/10**3
elif (units == 'MNm'):
N = N/10**6
M = M/10**6
if returnStrains:
return N,M,topStrains,botStrains;
else:
return N,M;
def formatSectionPlot(self,ax,xlabel,grid=True):
ax.spines['left'].set_position('zero')
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_position('zero')
ax.spines['top'].set_color('none')
ax.set_ylim(self.concreteHeights[0],self.concreteHeights[-1])
ax.set_yticks(np.arange(self.concreteHeights[0], self.concreteHeights[-1]+self.h/5, step=self.h/5))
if(ax.get_xlim()[1]<0):
ax.set_xlim(right=0)
ax.minorticks_on()
if grid:
ax.grid(grid,'major')
ax.grid(grid,'minor','both',linestyle=':')
ax.set_xlabel(xlabel,fontsize=8,fontweight='bold')
ax.xaxis.set_label_coords(0.5, -0.025)
ax.tick_params(labelsize=8)
return;
def plotStrainStressState(self,N,M,concreteMaterial,steelMaterial,units='Nm'):
'''
Plot the strain and stress blocks in the section for a given axial force and moment.
Parameters
----------
N : TYPE
DESCRIPTION.
M : TYPE
DESCRIPTION.
concreteMaterial : TYPE
DESCRIPTION.
steelMaterial : TYPE
DESCRIPTION.
units : TYPE, optional
DESCRIPTION. The default is 'Nm'.
Raises
------
Exception
DESCRIPTION.
Returns
-------
None.
'''
allowableUnits = ['Nm','kNm','MNm']
if units not in allowableUnits:
raise Exception("Allowable units are 'Nm','kNm','MNm'")
elif (units == 'kNm'):
N = N*10**3
M = M*10**3
elif (units == 'MNm'):
N = N*10**6
M = M*10**6
#Produce a figure to hold the plots
fig = plt.figure(figsize=(10,6))
spec = fig.add_gridspec(ncols=4,nrows=1,height_ratios = [1])
#Plot the section as a graph
ax0 = fig.add_subplot(spec[0,0],adjustable='box',aspect='equal')
self.sct.rotateSection(90).plotSection(ax=ax0)
self.formatSectionPlot(ax0,"",grid=False)
# Plot the strain
topStrain,bottomStrain = self.strainSolver(N,M,concreteMaterial,steelMaterial,units='Nm')
concStrains = np.interp(self.concreteHeights,[self.concreteHeights[0],self.concreteHeights[-1]],[bottomStrain,topStrain])
ax1 = fig.add_subplot(spec[0,1])
plt.plot(concStrains,self.concreteHeights)
self.formatSectionPlot(ax1,"Strain")
# Plot the stresses in the concrete
ax2 = fig.add_subplot(spec[0,2])
concStresses = self.getConcreteForces(concStrains,self.concreteWidths,concreteMaterial)[0]
plt.plot(concStresses,self.concreteHeights)
self.formatSectionPlot(ax2,"Stress (MPa)")
#Calculate the resultant forces from the stresses
steelStrains = np.interp(self.steelHeights,[self.concreteHeights[0],self.concreteHeights[-1]],[bottomStrain,topStrain])
steelForces = np.where(np.absolute(steelStrains)<steelMaterial.ey,steelStrains*steelMaterial.Es*self.steelAreas,self.steelAreas*(steelMaterial.fyd + ((np.absolute(steelStrains)-steelMaterial.ey)/(steelMaterial.euk-steelMaterial.ey))*(steelMaterial.k-1)*steelMaterial.fyd)*np.sign(steelStrains))
concForces = self.getConcreteForces(concStrains,self.concreteWidths,concreteMaterial)[1]
singleConcForce = np.sum(concForces,axis=0)
concCentroid = np.sum(concForces*self.concreteHeights,axis=0)/np.sum(concForces,axis=0)
steelTensionSum = np.sum(np.where(steelForces>0,steelForces,0))
steelCompressionSum = np.sum(np.where(steelForces<0,steelForces,0))
steelCompressionCentroid = np.sum(np.where(steelForces<0,steelForces*self.steelHeights,0))/steelCompressionSum
#Plot the resultant forces
ax3 = fig.add_subplot(spec[0,3])
if(steelTensionSum>0):
steelTensionCentroid = np.sum(np.where(steelForces>0,steelForces*self.steelHeights,0))/steelTensionSum
plt.plot(steelTensionSum*10**-6,steelTensionCentroid,'r.',label = "Steel Tensile Forces",markersize=8)
plt.plot(steelCompressionSum*10**-6,steelCompressionCentroid,'m.',label="Steel Compression Forces",markersize=8)
plt.plot(singleConcForce*10**-6,concCentroid,'gx',label="Concrete Force",markersize=8)
plt.legend(loc='upper left',fontsize=8)
self.formatSectionPlot(ax3,"Forces (MN)")
#Show the plots
plt.show()
return; | StarcoderdataPython |
69005 | <reponame>HACFelipe/PSaaS
from typing import List
from entity.building_block import BuildingBlock
from entity.project_task import ProjectTask
class Code(BuildingBlock):
"""Model for Code"""
def __init__(self, description : str, linked_project_task : ProjectTask, source_code : str, testable : bool = False, expected_results : List[object] = None):
super().__init__(
description=description,
linked_project_task = linked_project_task,
testable = testable)
self.expected_results = expected_results
self.source_code = source_code
def print_building_block(self):
print("║ %-4s ║ %-8s ║ %-30s ║ %-20s ║ %-9s ║ %-30s ║ %-30s ║" % (self.id, type(self).__name__, self.description, self.linked_project_task.id, self.testable, str(self.expected_results), self.source_code)) | StarcoderdataPython |
1645308 | <reponame>aveetron/cafe3_resturant_management
from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.contrib import messages
from .forms import *
from .models import *
# Create your views here.
def home(request):
allItem = Item.objects.all().order_by('-pk')
context = {
'allItem': allItem
}
return render(request, 'item.html', context)
def addItem(request):
if request.method == 'POST':
request.POST = request.POST.copy()
request.POST['name'] = request.POST.get('name')
request.POST['price'] = request.POST.get('price')
form = ItemForm(request.POST)
if form.is_valid():
form.save()
message = 'Item added successfully'
messages.success(request, message)
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
else:
message = 'Item cannot be added'
messages.warning(request, message)
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
else:
message = 'Item cannot be added'
messages.warning(request, message)
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
def deleteItem(request, id):
try:
getItem = Item.objects.get(id = id)
getItem.delete()
message = 'Item deleted successfully'
messages.success(request, message)
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
except:
message = 'Item cannot be Deleted'
messages.warning(request, message)
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
def indexLoadTest(request):
return render(request, 'index.html') | StarcoderdataPython |
1788153 | <gh_stars>1-10
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# -------------------------------------------#
# author: <NAME> #
# email: <EMAIL> #
#--------------------------------------------#
import argparse
parser = argparse.ArgumentParser(description='rnn attention')
parser.add_argument('--mode', type=str, default="train", help='train | test | valid')
parser.add_argument('--use_attention', action='store_true', default=True, help='whether to use attention')
parser.add_argument('--batch_size', type=int, default=16, help='batch size')
parser.add_argument('--epoch', type=int, default=50, help='batch size')
parser.add_argument('--log_step', type=int, default=2, help='batch size')
parser.add_argument('--rnn_type', type=str, default='gru', help='lstm or gru')
parser.add_argument('--bi_rnn', action='store_true', default=True, help='bidirectional')
parser.add_argument('--num_layers', type=int, default=1, help='num layers of rnn')
parser.add_argument('--embed_size', type=int, default=300, help='embed size')
parser.add_argument('--hidden_size', type=int, default=128, help='hidden size of rnn cell')
parser.add_argument('--attn_size', type=int, default=512, help='size of attention')
parser.add_argument('--dropout', type=float, default=0.5, help='keep prob of dropout')
parser.add_argument('--optim_type', type=str, default='sgd', help='adam | adagrad | sgd | rmsprop')
parser.add_argument('--learning_rate', type=float, default=0.01, help='learning rate')
parser.add_argument('--lr_decay', type=float, default=0.9, help='learning rate decay')
parser.add_argument('--l2_reg', type=float, default=0.001, help='l2 regularization')
parser.add_argument('--max_grad_norm', type=float, default=10.0, help='max_grad_norm')
parser.add_argument('--runtime_dir', type=str, default='runtime', help='dir of runtime')
parser.add_argument('--ckpt_dir', type=str, default='checkpoint', help='dir of checkpoint')
parser.add_argument('--resume', action='store_true', default=False, help='resume')
parser.add_argument('--valid', action='store_true', default=True, help='is valid')
args = parser.parse_args()
import os
from dataset import load_data, Corpus, Dataset
if args.mode == 'train':
from trainer import Trainer
datas, labels = load_data('./corpus/TREC.train')
corpus = Corpus(datas, labels)
valid_datas, valid_labels = load_data('./corpus/TREC.test')
dataset = {
'train': Dataset(corpus, datas, labels),
'valid': Dataset(corpus, valid_datas, valid_labels)
}
args.vocab_size = corpus.vocab_size
args.label_size = corpus.label_size
trainer = Trainer(args)
trainer.train(dataset) | StarcoderdataPython |
3313370 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-04-28 05:07
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_markdown.models
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('taggit', '0002_auto_20150616_2121'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('app', '0011_devhubquestionanswermodel'),
]
operations = [
migrations.CreateModel(
name='DevhubProjectModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('project_heading', models.CharField(max_length=200)),
('project_description', django_markdown.models.MarkdownField()),
('project_link', models.URLField()),
('tags', taggit.managers.TaggableManager(help_text='A comma-separated list of tags.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('user_profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.UserProfileModel')),
],
),
]
| StarcoderdataPython |
3225847 | # -*- coding: utf-8 -*-
from six.moves.urllib.parse import urlsplit
from lxml import html
from lxml.html import clean
# XXX move to iktomi.cms?
class Cleaner(clean.Cleaner):
safe_attrs_only = True
remove_unknown_tags = None
drop_empty_tags = frozenset()
dom_callbacks = []
allow_external_src = False
allowed_protocols = frozenset(['http', 'https', 'mailto'])
# None to allow all classes
allow_classes = {}
attr_val_is_uri = ['href', 'src', 'cite', 'action', 'longdesc']
a_without_href = True
# False : no tags wrapping;
# None : try to wrap tags on top in 'p' if 'p' is allowed or 'div'
# True : try to wrap tags on top in 'p' if 'p' is allowed or 'div',
# and raise error if no get_wrapper_tag was found
# if div allowed;
# 'div'/'p' : wrap tags in 'div' or 'p' respectively
# lambda : wrap tags in tag from lambda
wrap_inline_tags = None
# Tags to wrap in paragraphs on top
tags_to_wrap = ['b', 'big', 'i', 'small', 'tt',
'abbr', 'acronym', 'cite', 'code',
'dfn', 'em', 'kbd', 'strong', 'samp',
'var', 'a', 'bdo', 'br', 'map', 'object',
'q', 'span', 'sub', 'sup']
split_paragraphs_by_br = True
def __init__(self, *args, **kwargs):
clean.Cleaner.__init__(self, *args, **kwargs)
if self.wrap_inline_tags is True:
if self.get_wrapper_tag() is None:
raise ValueError('Cannot find top element')
def __call__(self, doc):
clean.Cleaner.__call__(self, doc)
if hasattr(doc, 'getroot'):
# ElementTree instance, instead of an element
doc = doc.getroot()
self.extra_clean(doc)
# retrieve tag to wrap around inline tags
def get_wrapper_tag(self):
if self.allow_tags is None:
return
if self.wrap_inline_tags in (None, True):
if 'p' in self.allow_tags:
return html.Element('p')
elif 'div' in self.allow_tags:
return html.Element('div')
elif self.wrap_inline_tags in ('p', 'div'):
if 'p' in self.allow_tags or 'div' in self.allow_tags:
return html.Element(self.wrap_inline_tags)
elif callable(self.wrap_inline_tags):
element = self.wrap_inline_tags()
if element.tag in self.allow_tags:
return element
def clean_top(self, doc):
par = None
first_par = False
if self.get_wrapper_tag() is None:
return
# create paragraph if there text in the beginning of top
if (doc.text or "").strip():
par = self.get_wrapper_tag()
doc.insert(0, par)
par.text = doc.text
doc.text = None
# remember if first paragraph created from text
first_par = True
for child in doc.getchildren():
i = doc.index(child)
if child.tag == 'br' and 'br' in self.tags_to_wrap:
if (child.tail or "").strip():
par = self.get_wrapper_tag()
doc.insert(i, par)
par.text = child.tail
doc.remove(child)
continue
if child.tag not in self.tags_to_wrap and \
(child.tail or "").strip():
par = self.get_wrapper_tag()
par.text = child.tail
child.tail = None
doc.insert(i+1, par)
continue
if child.tag in self.tags_to_wrap:
if par is None:
par = self.get_wrapper_tag()
doc.insert(i, par)
par.append(child)
else:
if first_par and i == 0:
continue
par = None
def _tail_is_empty(self, el):
return not (el.tail and el.tail.strip(u' \t\r\n\v\f\u00a0'))
def is_element_empty(self, el):
if el.tag == 'br':
return True
if el.tag not in self.drop_empty_tags:
return False
children = el.getchildren()
empty_children = all(
[self.is_element_empty(child) and self._tail_is_empty(child)
for child in children]
)
text = el.text and el.text.strip(u' \t\r\n\v\f\u00a0')
return not text and empty_children
def remove_brs_from_pars(self, doc):
def split_by_br(par):
br = par.find('.//br')
if br is not None:
next_par = html.Element('p')
par.addnext(next_par)
if br.tail:
next_par.text = br.tail
br.tail = None
nxt = br.getnext()
while nxt is not None:
next_par.append(nxt)
nxt = br.getnext()
br.drop_tag()
split_by_br(next_par)
for p in doc.findall('.//p'):
split_by_br(p)
def extra_clean(self, doc):
for el in doc.xpath('//*[@href]'):
scheme, netloc, path, query, fragment = urlsplit(el.attrib['href'])
if scheme and scheme not in self.allowed_protocols:
el.drop_tag()
for attr in self.attr_val_is_uri:
if attr == 'href':
continue
for el in doc.xpath('//*[@'+attr+']'):
scheme, netloc, path, query, fragment = urlsplit(el.attrib[attr])
scheme_fail = scheme and scheme not in self.allowed_protocols
netloc_fail = not self.allow_external_src and netloc
if scheme_fail or netloc_fail:
if attr == 'src':
el.drop_tag()
else:
el.attrib.pop(attr)
if self.a_without_href:
for link in doc.xpath('//a[not(@href)]'):
link.drop_tag()
if self.allow_classes is not None:
for el in doc.xpath('//*[@class]'):
classes = filter(None, el.attrib['class'].split())
if el.tag in self.allow_classes:
allowed = self.allow_classes[el.tag]
condition = allowed if callable(allowed) else \
(lambda cls: cls in allowed)
classes = filter(condition, classes)
else:
classes = []
if classes:
el.attrib['class'] = ' '.join(classes)
else:
el.attrib.pop('class')
for callback in self.dom_callbacks:
callback(doc)
if self.wrap_inline_tags is not False and self.tags_to_wrap:
self.clean_top(doc)
if self.split_paragraphs_by_br:
self.remove_brs_from_pars(doc)
for tag in self.drop_empty_tags:
for el in doc.xpath('//'+tag):
if not el.attrib and self.is_element_empty(el):
el.drop_tree()
def sanitize(value, **kwargs):
doc = html.fragment_fromstring(value, create_parent=True)
Cleaner(**kwargs)(doc)
clean = html.tostring(doc, encoding='utf-8').decode('utf-8')
return clean.split('>', 1)[1].rsplit('<', 1)[0]
| StarcoderdataPython |
1753180 | # Copyright 2017 National Renewable Energy Laboratory. This software
# is released under the license detailed in the file, LICENSE, which
# is located in the top-level directory structure.
# ========================================================================
#
# Imports
#
# ========================================================================
import os
import pygame
import windcraft.colors as colors
# ========================================================================
#
# Class definitions
#
# ========================================================================
class Turbine(pygame.sprite.Sprite):
"""This represents the turbine."""
def __init__(self, farm):
""" Constructor for Turbine.
:param farm: wind farm
:type farm: :class:`Farm`
"""
# Call the parent's constructor
super().__init__()
self.colors = colors.Colors()
self.size = farm.turbine_size
self.margin = farm.turbine_size * 0.4
self.rotation_counter = 0
self.speed = 0.0
self.alpha = 0.5
self.radius = 0.0
# Turbine images
art_dir = os.path.join(os.path.dirname(__file__),
'art',)
self.filenames = [os.path.join(art_dir, fname)
for fname in ["turbine_24x24_00.png",
"turbine_24x24_01.png",
"turbine_24x24_02.png",
"turbine_24x24_03.png"]]
self.images = [pygame.image.load(fname).convert()
for fname in self.filenames]
self.images = [pygame.transform.scale(image, (self.size, self.size))
for image in self.images]
for image in self.images:
image.set_colorkey(self.colors.black)
self.image = self.images[0]
self.rect = self.image.get_rect()
def rotate(self):
"""Pick another image to rotate the turbine"""
self.rotation_counter = (
self.rotation_counter + 1) % len(self.filenames)
loc = self.rect.center
self.image = self.images[self.rotation_counter]
self.image.get_rect().center = loc
self.rect = self.image.get_rect()
self.rect.center = loc
def update(self, u):
"""Use local velocity to update the turbine speed.
:param u: velocity at the turbine
:type u: float
"""
self.speed += self.alpha * (u - self.speed)
def place_turbine(self, pos, turbines, farm):
"""Place turbine in the wind farm.
:param pos: position to place turbine
:type pos: list
:param turbines: current turbines in the farm
:type turbines: list
:param farm: wind farm
:type farm: :class:`Farm`
"""
mouse = pygame.Surface([1, 1])
mouse = mouse.get_rect()
mouse.centerx = pos[0]
mouse.centery = pos[1]
if not mouse.colliderect(farm.inner_rect):
return False
else:
# Check for turbine-turbine collision
collision = False
turbine = self.images[0].get_rect()
turbine.centerx = mouse.centerx
turbine.centery = mouse.centery
for other_turbine in turbines:
if turbine.colliderect(
other_turbine.rect.inflate(
self.margin,
self.margin)):
return False
if not collision:
self.rect.centerx = mouse.centerx
self.rect.centery = mouse.centery
self.relative_pos = [
(self.rect.centerx - farm.rect.x) / farm.width,
(self.rect.centery - farm.rect.y) / farm.height]
self.radius = farm.turbine_ratio * 0.2
return True
| StarcoderdataPython |
77340 | <reponame>Eleveil/python-algorithm
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2019-05-18 23:29:05
# @Author : <NAME> (<EMAIL>)
arr1 = [1, 3, 4, 6, 10]
arr2 = [2, 5, 8, 11]
ind = 0
ans = arr1.copy()
for i in range(len(arr2)):
while ind < len(arr1):
if arr2[i] <= arr1[ind]:
ans.insert(ind+i, arr2[i])
break
else:
ind += 1
else:
ans = ans + arr2[i:]
print(ans) | StarcoderdataPython |
1620585 | # coding=utf-8
from setuptools import setup
from cms_support.utils.constants import Constants
# long_description=open('README.md').read(),
# https://betterscientificsoftware.github.io/python-for-hpc/tutorials/python-pypi-packaging/
setup(
name=Constants.PACKAGE_NAME,
version=Constants.VERSION,
author=Constants.AUTHOR,
author_email=Constants.EMAIL,
packages=[Constants.PACKAGE_NAME, Constants.PACKAGE_NAME+'.sites', Constants.PACKAGE_NAME+'.transfers'],
scripts=[],
url=Constants.URL_PROJECT,
license='LICENSE',
description='Tools to accelerate monitoring in the CMS computing grid',
install_requires=open('requirements.txt').read().split("\n"),
classifiers=[
'Development Status :: 1 - Planning',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
)
| StarcoderdataPython |
1751132 | <gh_stars>0
from dataclasses import dataclass
'''
@property is getter method
getter use: Class.attribute
@attribute.setter is setter method
setter use: Class.attribute = attribute
dataclass(frozen=True) is immutable
'''
@dataclass
class SettingsModel:
'''
Data class for settings objects
'''
_dialect: str
_username: str
_password: str
_host: str
_port: str
_db_name: str
@property
def dialect(self):
return self._dialect
@dialect.setter
def dialect(self, dialect):
self._dialect = dialect
@property
def username(self):
return self._username
@username.setter
def username(self, username):
self._username = username
@property
def password(self) -> str:
return self._password
@password.setter
def password(self, password):
self._password = password
@property
def host(self) -> str:
return self._host
@host.setter
def host(self, host):
self._host = host
@property
def port(self) -> str:
return self._port
@port.setter
def port(self, port):
self._port = port
@property
def db_name(self) -> str:
return self._db_name
@db_name.setter
def db_name(self, db_name):
self._db_name = db_name
@dataclass(frozen=True)
class StudentModel:
'''
Data class for student objects
'''
# Slots improves memory usage
# https://docs.python.org/reference/datamodel.html#slots
__slots__ = ['student_id', 'fname', 'lname']
student_id: int
fname: str
lname: str
@dataclass(frozen=True)
class ProfessorModel:
__slots__ = ['prof_id', 'prof_fname', 'prof_lname', 'prof_email']
prof_id: int
prof_fname: str
prof_lname: str
prof_email: str
@dataclass
class CourseModel:
course_id: int
name: str
start_date: str
end_date: str
@dataclass
class StudentToCourseModel:
student_takes_id: int
student_id: int
course_id: int
@dataclass
class TeacherToCourse:
prof_id: int
course_id: int
@dataclass
class Assignments:
student_takes_id: int
test_1: int
homework_1: int
'''
stu_model = StudentModel('1', 'Jay', 'White')
#stu_model('1', 'Jay', 'White')
print(stu_model._fname)
print(stu_model.fname)
stu_model.fname = 'Turd'
print(stu_model.fname)
#stu_model.set
'''
| StarcoderdataPython |
134967 | CORRECT_PIN = "1234"
MAX_TRIES = 3
tries_left = MAX_TRIES
pin = input(f"Insert your pni ({tries_left} tries left): ")
while tries_left > 1 and pin != CORRECT_PIN:
tries_left -= 1
print("Your PIN is incorrect.")
pin = input(f"Insert your pni ({tries_left} tries left): ")
if pin == CORRECT_PIN:
print("Your PIN is correct")
else:
print("Your bank card is blocked") | StarcoderdataPython |
1757575 | <reponame>zeqianli/douban-listing-helper
import numpy as np, pandas as pd
import os, re, requests, demjson,urllib, argparse
from bs4 import BeautifulSoup
def main(f_url_list='url_list.txt', dir_out=None):
if dir_out is None:
import time
dir_out=f'metadata_{int(time.time())}'
try:
os.mkdir(dir_out)
except FileExistsError:
pass
f_meta_out=os.path.join(dir_out, 'metadata.txt')
f_meta=open(f_meta_out,'w+')
# douban format:
fields=['album', 'barcode', 'album', 'album-alt', 'artist', 'genre', 'release-type', 'media', 'date', 'label', 'number_of_disc', 'isrc', 'tracks', 'description']
sep='='*50 + '\n'
f_meta.write(f"# {','.join(fields)} \n")
f_meta.write(sep)
with open(f_url_list,'r') as f_url:
for url in f_url:
print(f'Processing {url}')
try:
meta=get_metadata(url.strip())
except Exception as e:
print(f"Get metadata failed.")
continue
# Save metadata line by line
f_meta.write(url+'\n')
for field in fields:
try:
content=meta[field]
if field=='tracks':
content='\n'.join([f'{i+1}. {track}' for i, track in enumerate(content)])
content='*** TRACKLIST ***\n'+content
elif field=='description':
content='*** DESCRIPTION ***\n'+content
except KeyError:
content=f"*{field} MISSING*"
f_meta.write(content+'\n')
f_meta.write(sep)
# Save cover image
try:
f_cover=os.path.join(dir_out, f"{meta['album']} - {meta['artist']}.{meta['img_url'].split('.')[-1]}")
save_image(meta['img_url'],f_cover)
except:
print(f"Download cover failed")
f_meta.close()
def save_image(url, fout):
# Save image
with open(fout,'wb') as f:
f.write(requests.get(url).content)
def get_metadata(url):
site=re.findall(r'\.(.+)\.com',url)[0]
if site=='bandcamp':
return get_bandcamp_metadata(url)
elif site=='discogs':
return get_discogs_metadata(url)
elif site=='apple':
return get_apple_metadata(url)
elif site=='amazon':
return get_amazon_metatdata(url)
def get_bandcamp_metadata(url):
req=requests.get(url)
soup=BeautifulSoup(req.text,'html.parser')
out={}
out['album']=soup.find(id='name-section').find('h2').text.strip()
out['artist']=soup.find(id='name-section').find('h3').text[2:].strip()
out['img_url']=soup.find(id='tralbumArt').find('img').attrs['src']
out['tracks']=pd.read_html(str(soup.find(id='track_table')))[0][2].to_list()
out['description']=soup.find(class_='tralbumData tralbum-about').text.strip()
out['date']=soup.find(class_='tralbumData tralbum-credits').text.strip().split('\n')[0].replace('released ','') # TODO: format this
# spl=re.split(r'[, ]',out['_date'])
# spl.remove('')
# out['month'],out['day'],out['year']=spl
out['label']='Self-Released' # bandcamp album page generally don't have label info
return out
def get_discogs_metadata(url):
url_types=['master','release']
ps=[r'/master/(\d+)',r'/release/(\d+)']
api_urls=['https://api.discogs.com/masters/%s', 'https://api.discogs.com/releases/%s']
for url_type,p, api_url in zip(url_types,ps, api_urls):
_id=re.findall(p,url)
if len(_id)!=0:
js=requests.get(api_url % _id[0]).json()
if url_type=='master':
js=requests.get(js['main_release_url']).json()
break
req=requests.get(url)
soup=BeautifulSoup(req.text,'html.parser') # Get image link from this
out={}
out['album']=js['title']
out['artist']=js['artists'][0]['name'] # TODO: multiple artists
out['img_url']= soup.find(class_='thumbnail_center').find('img').attrs['src'] # TODO: this is a shrinked thumbnail image
out['tracks']=[tk['title']+' '+tk['duration'] for tk in js['tracklist']] # TODO: custum track # (A1, A2...)
try:
out['description']=js['notes']
except KeyError:
out['description']=''
out['date']=js['released']
# out['month'],out['day'],out['year']=out['_date'].split('-')
out['label']=js['labels'][0]['name']
return out
def get_apple_metadata(url):
# TODO
pass
if __name__=='__main__':
parser=argparse.ArgumentParser(description="Fetching album metadata")
parser.add_argument("-i","--input", action='store',dest='f_url_list',default='url_list.txt',help="Input file path")
parser.add_argument('-o','--output',action='store',dest='dir_out',default=None )
result=parser.parse_args()
main(result.f_url_list, result.dir_out)
| StarcoderdataPython |
188984 | """
MetaWIBELE: config module
Configuration settings
Copyright (c) 2019 Harvard School of Public Health
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os
import sys
import argparse
import re
# try to import the python2 ConfigParser
# if unable to import, then try to import the python3 configparser
try:
import ConfigParser as configparser
except ImportError:
import configparser
import logging
# ---------------------------------------------------------------
# Description and arguments
# ---------------------------------------------------------------
description = """
Config metawibele
"""
def get_args ():
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-c', "--config",
help = 'input global config file',
required = False,
default = "none")
values = parser.parse_args()
return values
# get_args
def log_settings():
"""
Write to the log file the config settings for the run
"""
lines = []
lines.append("DATABASE SETTINGS")
lines.append("nucleotide database folder = " + nucleotide_database)
lines.append("protein database folder = " + protein_database)
if pathways_database_part1:
lines.append("pathways database file 1 = " + pathways_database_part1)
lines.append("pathways database file 2 = " + pathways_database_part2)
else:
lines.append("pathways database file = " + pathways_database_part2)
lines.append("utility mapping database folder = " + utility_mapping_database)
lines.append("")
lines.append("RUN MODES")
lines.append("resume = " + str(resume))
lines.append("verbose = " + str(verbose))
lines.append("bypass prescreen = " + str(bypass_prescreen))
lines.append("bypass nucleotide index = " + str(bypass_nucleotide_index))
lines.append("bypass nucleotide search = " + str(bypass_nucleotide_search))
lines.append("bypass translated search = " + str(bypass_translated_search))
lines.append("translated search = " + translated_alignment_selected)
lines.append("pick frames = " + pick_frames_toggle)
lines.append("threads = " + str(threads))
lines.append("")
lines.append("SEARCH MODE")
lines.append("search mode = " + search_mode)
lines.append("identity threshold = " + str(identity_threshold))
lines.append("")
lines.append("ALIGNMENT SETTINGS")
lines.append("evalue threshold = " + str(evalue_threshold))
lines.append("prescreen threshold = " + str(prescreen_threshold))
lines.append("translated subject coverage threshold = " + str(translated_subject_coverage_threshold))
lines.append("translated query coverage threshold = " + str(translated_query_coverage_threshold))
lines.append("")
lines.append("PATHWAYS SETTINGS")
lines.append("minpath = " + minpath_toggle)
lines.append("xipe = " + xipe_toggle)
lines.append("gap fill = " + gap_fill_toggle)
lines.append("")
lines.append("INPUT AND OUTPUT FORMATS")
lines.append("input file format = " + input_format)
lines.append("output file format = " + output_format)
lines.append("output max decimals = " + str(output_max_decimals))
lines.append("remove stratified output = " + str(remove_stratified_output))
lines.append("remove column description output = " + str(remove_column_description_output))
lines.append("log level = " + log_level)
lines.append("")
logger.info("\nRun config settings: \n\n" + "\n".join(lines))
def update_user_edit_config_file_single_item(section, name, value):
"""
Update the settings to the user editable config file for one item
"""
new_config_items = {section: {name: value}}
update_user_edit_config_file(new_config_items)
print("MetaWIBELE configuration file updated: " + section + " : " + name + " = " + str(value))
def update_user_edit_config_file(new_config_items):
"""
Update the settings to the user editable config file
"""
config = configparser.RawConfigParser()
# start with the current config settings
config_items = read_user_edit_config_file(full_path_user_edit_config_file)
# update with the new config items
for section in new_config_items:
for name, value in new_config_items[section].items():
if section in config_items:
if name in config_items[section]:
config_items[section][name] = value
else:
sys.exit("ERROR: Unable to add new name ( " + name +
" ) to existing section ( " + section + " ) to " +
" config file: " + full_path_user_edit_config_file)
else:
sys.exit("ERROR: Unable to add new section ( " + section +
" ) to config file: " + full_path_user_edit_config_file)
for section in config_items:
config.add_section(section)
for name, value in config_items[section].items():
value = str(value)
if "file" in section or "folder" in section:
# convert to absolute path if needed
if not os.path.isabs(value):
value = os.path.abspath(value)
config.set(section, name, value)
try:
file_handle = open(full_path_user_edit_config_file, "wt")
config.write(file_handle)
file_handle.close()
except EnvironmentError:
sys.exit("Unable to write to the MetaWIBELE config file.")
def read_user_edit_config_file(full_path_user_edit_config_file):
"""
Read the settings from the config file
"""
config = configparser.ConfigParser()
try:
config.read(full_path_user_edit_config_file)
except EnvironmentError:
sys.exit("Unable to read from the config file: " + full_path_user_edit_config_file)
# read through all of the sections
config_items = {}
for section in config.sections():
config_list = config.items(section)
config_items[section] = {}
for name, value in config_list:
if "file" in section or "folder" in section:
# if not absolute path, then return absolute path relative to this folder
if not os.path.isabs(value):
value = os.path.abspath(os.path.join(os.path.dirname(full_path_user_edit_config_file), value))
config_items[section][name] = value
return config_items
def get_item(config_items, section, name, type=None):
"""
Get the item from the dictionary of section/names from the user edit config file
"""
# try to obtain the value from the config dictionary
try:
value = config_items[section][name]
except KeyError:
sys.exit("CRITICAL ERROR: Unable to load value from " + full_path_user_edit_config_file +
" . \nItem not found. \nItem should be in section (" + section + ") with name (" + name + ").")
# if present, try to change the value type
if type:
try:
if type == "string":
value = str(value)
elif type == "int":
value = int(value)
elif type == "float":
value = float(value)
elif type == "bool":
if value in ["False", "false", "F", "f"]:
value = False
elif value in ["True", "true", "T", "t"]:
value = True
else:
raise ValueError
except ValueError:
sys.exit("CRITICAL ERROR: Unable to load value from " + full_path_user_edit_config_file +
" . \nItem found in section (" + section + ") with name (" + name + "). " +
"\nItem is not of type (" + type + ").")
return value
## default option for MetaWIBELE ##
version = '0.4.4'
log_level_choices = ["DEBUG","INFO","WARNING","ERROR","CRITICAL"]
log_level = log_level_choices[1]
verbose = 'DEBUG'
# name global logging instance
logger = logging.getLogger(__name__)
#logging.basicConfig(format='%(asctime)s - %(levelname)s: %(message)s',
# level=getattr(logging, "INFO"), datefmt='%m-%d-%Y %I:%M:%S %p')
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s: %(message)s',
level = getattr(logging, log_level), filemode='w', datefmt='%m/%d/%Y %I:%M:%S %p')
## constant values ##
PROTEIN_FAMILY_ID = "familyID"
PROTEIN_ID = "seqID"
c_metedata_delim = "." # nested metadata, e.g. CD.dysbiosis
c_strat_delim = "|" # strantified item, e.g. Cluster_1000010|Bacteroides dorei
c_taxon_delim = "|" # taxonomic lineage, e.g. g__Faecalibacterium|s__Faecalibacterium_prausnitzii|t__Faecalibacterium_prausnitzii_A2-165
c_multiname_delim = ";" # multiple ietms, e.g. PF00482;PF01841
c_msp_unknown = "msp_unknown"
# diamond options
diamond_database_extension = ".dmnd"
diamond_cmmd_protein_search = "blastp"
diamond_cmmd_nucleotide_search = "blastx"
diamond_identity = 0.9 # identity threshold for uniref90 strong homologies
diamond_query_coverage = 0.8 # query and mutual coverage threshold of uniref90 strong homologies
diamond_mutual_coverage = 0.8
diamond_version={
"flag" : "--version",
"major" : 0,
"minor" : 8,
"second minor" : 22,
"line" : 0,
"column" : 2}
# CD-hit
cd_hit_prot_opts = "-d 100 -c 0.9 -aL 0.8 -aS 0.8 -G 0 -M 0 -B 0" # clustering protein families
cd_hit_gene_opts = "-d 100 -c 0.95 -aS 0.8 -G 0 -M 0 -B 0"
featureCounts_opts = " -F SAF "
## User config file ##
metawibele_install_directory = os.path.dirname(os.path.abspath(__file__))
config_directory = os.path.join(metawibele_install_directory, "configs")
user_edit_config_file = os.path.join(os.getcwd(), "metawibele.cfg")
if not os.path.exists (user_edit_config_file):
#user_edit_config_file = metawibele_install_directory + "/metawibele.cfg"
user_edit_config_file = os.path.join(config_directory, "metawibele.cfg")
full_path_user_edit_config_file = user_edit_config_file
# get the base settings from the user edit config file
config_items = read_user_edit_config_file(full_path_user_edit_config_file)
## Databases ##
# installed databases
database_directory = os.path.join(metawibele_install_directory, "data")
uniref_directory = os.path.join(database_directory, "uniref")
prefix_taxa = "uniprot_taxonomy"
prefix_mic_taxa = "uniprot_taxaID_microbiome"
prefix_mam_taxa = "uniprot_taxaID_mammalia"
prefix_human_pfam = "uniprot_human_pfam"
prefix_map = "map_"
taxonomy_database = ""
microbiome_taxa = ""
mammalia_taxa = ""
human_pfam_database = ""
uniref_dmnd = ""
uniref_database = []
map_file_names = []
files = [os.path.abspath(x) for x in os.listdir(uniref_directory)]
for i in files:
myname = os.path.basename(i)
i = os.path.join(uniref_directory, myname)
if myname.endswith(diamond_database_extension):
uniref_dmnd = i
if myname.startswith(prefix_taxa):
taxonomy_database = i
if myname.startswith(prefix_mic_taxa):
microbiome_taxa = i
if myname.startswith(prefix_mam_taxa):
mammalia_taxa = i
if myname.startswith(prefix_human_pfam):
human_pfam_database = i
if myname.startswith(prefix_map):
map_file_names.append(myname)
uniref_database.append(i)
domain_directory = os.path.join(database_directory, "domain")
pdb_database_choices = ["pdb_chain_taxonomy.tsv.gz","pdb_chain_pfam.tsv.gz"]
pdb_taxonomy = os.path.join(domain_directory, pdb_database_choices[0])
pdb_pfam = os.path.join(domain_directory, pdb_database_choices[1])
pfam_database_choices = ["pfam_descriptions.txt.gz"]
pfam_database = os.path.join(domain_directory, pfam_database_choices[0])
pfam2go_database_choices = ['gene_ontology.txt.gz']
pfam2go_database = os.path.join(domain_directory, pfam2go_database_choices[0])
interaction_database_choices = ['INTERACTION.txt.gz']
interaction_database = os.path.join(domain_directory, interaction_database_choices[0])
Expression_Atlas_database = [os.path.join(domain_directory, "32_Uhlen_Lab_colon_rectum.txt.gz"),
os.path.join(domain_directory, "Encode_sigmoid_colon.txt.gz"),
os.path.join(domain_directory, "FANTOM5_colon_rectum.txt.gz"),
os.path.join(domain_directory, "GTEx_sigmoid_transverse_colon.txt.gz"),
os.path.join(domain_directory,"Human_protein_Atlas_colon_rectum.txt.gz"),
os.path.join(domain_directory, "Human_proteome_map_colon_rectum.txt.gz"),
os.path.join(domain_directory, "Illumina_Body_Map_colon.txt.gz")]
misc_directory = os.path.join(database_directory, "misc")
vignettes_database = os.path.join(misc_directory, "vignette_function.tsv")
# update databases by using user provided
# uniref database
uniref_database_dir = ""
try:
env_uniref_db = os.path.abspath(os.environ['UNIREF_LOCATION'])
if os.path.exists (env_uniref_db):
uniref_database_dir = env_uniref_db
except:
pass
user_uniref_db = os.path.join(os.getcwd(), "uniref_database")
if os.path.exists (user_uniref_db):
uniref_database_dir = user_uniref_db
config_uniref_db = get_item (config_items, "database", "uniref_db", "string")
if os.path.exists (config_uniref_db) and config_uniref_db != "" and config_uniref_db != "none":
config_uniref_db = os.path.abspath(config_uniref_db)
if os.path.abspath(uniref_directory) != config_uniref_db:
uniref_database_dir = config_uniref_db
if uniref_database_dir.lower() != "none" and uniref_database_dir != "":
files = [os.path.abspath(x) for x in os.listdir(uniref_database_dir)]
for i in files:
myname = os.path.basename(i)
if myname.startswith(prefix_taxa):
taxonomy_database = os.path.join(uniref_database_dir, myname)
if myname.startswith(prefix_mic_taxa):
microbiome_taxa = os.path.join(uniref_database_dir, myname)
if myname.startswith(prefix_mam_taxa):
mammalia_taxa = os.path.join(uniref_database_dir, myname)
if myname.startswith(prefix_human_pfam):
human_pfam_database = os.path.join(uniref_database_dir, myname)
if myname.endswith(diamond_database_extension):
uniref_dmnd = os.path.join(uniref_database_dir, myname)
if myname.startswith(prefix_map):
i = os.path.join(uniref_database_dir, myname)
if myname in map_file_names:
j = os.path.join(uniref_directory, myname)
uniref_database.remove(j)
uniref_database.append(i)
else:
uniref_database.append(i)
uniref_database.sort(reverse=True)
else:
logger.info ("\n WARNING!! MetaWIBELE does't find valid uniref databse and will use the demo databse by default.\n" +
"\tPlease provide the correct location of the required uniref database by any one of the following options:\n" +
"\t1) set the location with the environment variable $UNIREF_LOCATION\n" +
"\t2) include the database (named as \"uniref_database\") in the current working directory\n" +
"\t3) set the location in the global config file (metawibele.cfg) which is in the current working directory")
# domain database
domain_database_dir = get_item (config_items, "database", "domain_db", "string")
if not domain_database_dir.lower() == "none" and not domain_database_dir == "":
files = [os.path.abspath(x) for x in os.listdir(domain_database_dir)]
for i in files:
myname = os.path.basename(i)
if myname == "pdb_chain_taxonomy.tsv.gz":
pdb_taxonomy = os.path.join(domain_database_dir, myname)
if myname == "pdb_chain_pfam.tsv.gz":
pdb_pfam = os.path.join(domain_database_dir, myname)
if myname == "pfam_descriptions.txt.gz":
pfam_database = os.path.join(domain_database_dir, myname)
if myname == "gene_ontology.txt.gz":
pfam2go_database = os.path.join(domain_database_dir, myname)
if myname == "INTERACTION.txt.gz":
interaction_database = os.path.join(domain_database_dir, myname)
## Computing resources ##
threads = get_item (config_items, "computation", "threads", "int")
memory = get_item(config_items, "computation", "memory", "int")
time = get_item(config_items, "computation", "time", "int")
cd_hit_memory = memory
## Input and output ##
# input folder and files
basename = get_item(config_items, "basic", "basename", "string")
#working_dir = get_item(config_items, "output", "output_dir", "string")
working_dir = ""
if working_dir.lower() == "none" or working_dir.lower() == "":
working_dir = os.getcwd()
working_dir = os.path.abspath (working_dir)
annotation_dir = os.path.join(working_dir, "characterization")
cluster_dir = os.path.join(annotation_dir, "clustering")
global_homology_dir = os.path.join(annotation_dir, "global_homology_annotation")
domain_motif_dir = os.path.join(annotation_dir, "doamin_motif_annotation")
abundance_dir = os.path.join(annotation_dir, "abundance_annotation")
priority_dir = os.path.join(working_dir, "prioritization")
study = get_item(config_items, "basic", "study", "string")
#metadata = get_item(config_items, "input", "metadata", "string")
#metadata = os.path.abspath(metadata)
#gene_catalog_prot = get_item(config_items, "input", "gene_catalog_prot", "string")
#gene_catalog_prot = os.path.abspath(gene_catalog_prot)
#gene_catalog_count = get_item(config_items, "input", "gene_catalog_count", "string")
#gene_catalog_count = os.path.abspath(gene_catalog_count)
metadata = ""
gene_catalog_prot = ""
gene_catalog_count = ""
protein_family = os.path.join(annotation_dir, basename + "_proteinfamilies.clstr")
protein_family_prot_seq = os.path.join(annotation_dir, basename + "_proteinfamilies.centroid.faa")
protein_family_nuc_seq = os.path.join(annotation_dir, basename + "_proteinfamilies.centroid.fna")
protein_family_relab = os.path.join(annotation_dir, basename + "_proteinfamilies_nrm.tsv")
protein_family_ann = os.path.join(annotation_dir, basename + "_proteinfamilies_annotation.tsv")
protein_family_attr = os.path.join(annotation_dir, basename + "_proteinfamilies_annotation.attribute.tsv")
unsupervised_rank = os.path.join(priority_dir, basename + "_unsupervised_prioritization.rank.table.tsv")
supervised_rank = os.path.join(priority_dir, basename + "_supervised_prioritization.rank.table.tsv")
## characterization ##
tshld_consistency = 0.75 # the minimum annotation consistency in one protein family
# protein family
tshld_identity = 0.25 # the minimum identity of homology
tshld_coverage = 0.25 # the minimum coverage of homology
taxa_source = "Rep" # the source of taxa for one protein family, representatives vs. LCA
# abundance
normalize = get_item(config_items, "abundance", "normalize", "string") # the method for abundance normalization
abundance_detection_level = get_item(config_items, "abundance", "abundance_detection_level", "float")
# MSP
tshld_classified = get_item(config_items, "msp", "tshld_classified", "float") # the minimum percentage of classified genes in each MSP
tshld_diff = get_item(config_items, "msp", "tshld_diff", "float") # the minimum percentage difference between most and second dominant taxa in the MSP
tshld_lca = 0.80 # the minimum consistency cutoff for LCA calculattion
taxa_final = "Rep" # the source of taxa for one protein family, representatives vs. LCA
mspminer = os.path.join(config_directory, "MSPminer_setting.cfg")
# interporscan
interproscan_cmmd = get_item(config_items, "interproscan", "interproscan_cmmd", "string")
interproscan_cmmd = re.sub("\"", "", interproscan_cmmd)
interproscan_appl = get_item(config_items, "interproscan", "interproscan_appl", "string")
interproscan_appl = re.sub("\"", "", interproscan_appl)
interproscan_appl_item = interproscan_appl
if interproscan_appl.lower() == "none" or interproscan_appl.lower() == "" or interproscan_appl.lower() == "all":
interproscan_appl_item = "CDD,COILS,Gene3D,HAMAP,MobiDBLite,PANTHER,Pfam,PIRSF,PRINTS,PROSITEPATTERNS,PROSITEPROFILES,SFLD,SMART,SUPERFAMILY,TIGRFAM,Phobius,SignalP,TMHMM"
interproscan_appl = "all"
interproscan_appl = re.sub("\"", "", interproscan_appl)
split_number = get_item(config_items, "interproscan", "split_number", "int")
interproscan_type = []
interproscan_appl_item = re.sub("\"", "", interproscan_appl_item)
tmp = interproscan_appl_item.split(",")
for item in tmp:
if item == "Phobius":
item = "phobius.signaling"
item = "interpro." + item + ".tsv"
interproscan_type.append(item)
item = "phobius.transmembrane"
item = "interpro." + item + ".tsv"
interproscan_type.append(item)
continue
if item == "Pfam":
item = "PfamDomain"
if item == "COILS":
item = "Coils"
if item == "SignalP":
item = "signalp.signaling"
if item == "TMHMM":
item = "tmhmm.transmembrane"
item = "interpro." + item + ".tsv"
interproscan_type.append(item)
# DDI
#human_microbiome_ddi = get_item(config_items, "DDI", "human_microbiome_ddi", "string")
# maaslin2
maaslin2_dir = os.path.join(abundance_dir, "DA", "maaslin2_output/")
phenotype = get_item(config_items, "maaslin2", "phenotype", "string")
phenotype = re.sub("\"", "", phenotype)
phenotype = phenotype.split(";")
if phenotype == "":
phenotype = "none"
reference = get_item(config_items, "maaslin2", "reference", "string")
reference = re.sub("\"", "", reference)
tmp = reference.split(";")
contrast_status = {}
ref_status_tmp = {}
for item in tmp:
tmp1 = item.split(",")
if len(tmp1) > 1:
contrast_status[tmp1[0]] = tmp1[1]
if not tmp1[0] in ref_status_tmp:
ref_status_tmp[tmp1[0]] = {}
tmp2 = tmp1[1].split(",")
for i in tmp2:
i = i.split("|")[-1]
ref_status_tmp[tmp1[0]][i] = ""
ref_status = {}
for i in ref_status_tmp.keys():
if len(ref_status_tmp[i].keys()) == 1:
for j in ref_status_tmp[i].keys():
ref_status[i] = j
tshld_prevalence = get_item(config_items, "maaslin2", "tshld_prevalence", "float")
tshld_qvalue = get_item(config_items, "maaslin2", "tshld_qvalue", "float")
effect_size = get_item(config_items, "maaslin2", "effect_size", "string")
if effect_size == "log(fc)":
effect_size = "log(FC)"
nested_effects = "none"
maaslin2_cmmd = get_item(config_items, "maaslin2", "maaslin2_cmmd", "string")
maaslin2_cmmd = re.sub("\"", "", maaslin2_cmmd)
maaslin2_utils = os.path.join(metawibele_install_directory, "Rscripts", "maaslin2_utils.R")
pcl_utils = os.path.join(metawibele_install_directory, "Rscripts", "pcl_utils.R")
transpose_cmmd = "metawibele_transpose"
min_abundance = get_item(config_items, "maaslin2", "min_abundance", "float")
min_prevalence = get_item(config_items, "maaslin2", "min_prevalence", "float")
min_variance = get_item(config_items, "maaslin2", "min_variance", "float")
max_significance = get_item(config_items, "maaslin2", "max_significance", "float")
normalization = get_item(config_items, "maaslin2", "normalization", "string")
transform = get_item(config_items, "maaslin2", "transform", "string")
analysis_method = get_item(config_items, "maaslin2", "analysis_method", "string")
fixed_effects = get_item(config_items, "maaslin2", "fixed_effects", "string")
random_effects = get_item(config_items, "maaslin2", "random_effects", "string")
correction = get_item(config_items, "maaslin2", "correction", "string")
standardize = get_item(config_items, "maaslin2", "standardize", "string")
plot_heatmap = get_item(config_items, "maaslin2", "plot_heatmap", "string")
heatmap_first_n = get_item(config_items, "maaslin2", "heatmap_first_n", "string")
plot_scatter = get_item(config_items, "maaslin2", "plot_scatter", "string")
maaslin2_cores = get_item(config_items, "maaslin2", "maaslin2_cores", "int")
if fixed_effects == "all":
maaslin2_cmmd_opts = ["--min_abundance", min_abundance, "--min_prevalence", min_prevalence, "--min_variance", min_variance, "--max_significance", max_significance, "--normalization", normalization, "--transform", transform, "--analysis_method", analysis_method, "--cores", maaslin2_cores, "--random_effects", random_effects, "--correction", correction, "--standardize", standardize, "--plot_heatmap", plot_heatmap, "--heatmap_first_n", heatmap_first_n, "--plot_scatter", plot_scatter, "--reference", reference]
else:
maaslin2_cmmd_opts = ["--min_abundance", min_abundance, "--min_prevalence", min_prevalence, "--min_variance", min_variance, "--max_significance", max_significance, "--normalization", normalization, "--transform", transform, "--analysis_method", analysis_method, "--cores", maaslin2_cores, "--fixed_effects", fixed_effects, "--random_effects", random_effects, "--correction", correction, "--standardize", standardize, "--plot_heatmap", plot_heatmap, "--heatmap_first_n", heatmap_first_n, "--plot_scatter", plot_scatter, "--reference", reference]
if __name__=='__main__':
pass
| StarcoderdataPython |
3203645 | # encoding: utf-8
# ---------------------------------------------------------------------------
# Copyright (C) 2008-2014, IPython Development Team and Enthought, Inc.
# Distributed under the terms of the BSD License. See COPYING.rst.
# ---------------------------------------------------------------------------
"""
Tests for distributed IO.
Many of these tests require a 4-engine cluster running and will write out (and
afterwards remove) temporary files. These tests assume that all engines have
access to the same filesystem but do not assume the client has access to that
same filesystem.
"""
import unittest
import numpy as np
from numpy.testing import assert_array_equal
from distarray.externals.six.moves import range
from distarray.testing import import_or_skip, DefaultContextTestCase
from distarray.globalapi.distarray import DistArray
from distarray.globalapi.maps import Distribution
def cleanup_file(filepath):
import os
if os.path.exists(filepath):
os.remove(filepath)
def engine_temp_path(extension=''):
from distarray.testing import temp_filepath
return temp_filepath(extension)
class TestDnpyFileIO(DefaultContextTestCase):
@classmethod
def setUpClass(cls):
super(TestDnpyFileIO, cls).setUpClass()
cls.distribution = Distribution(cls.context, (100,), dist={0: 'b'})
cls.da = cls.context.empty(cls.distribution)
cls.output_paths = cls.context.apply(engine_temp_path)
def test_save_load_with_filenames(self):
try:
self.context.save_dnpy(self.output_paths, self.da)
db = self.context.load_dnpy(self.output_paths)
self.assertTrue(isinstance(db, DistArray))
self.assertEqual(self.da, db)
finally:
for filepath, target in zip(self.output_paths, self.context.targets):
self.context.apply(cleanup_file, (filepath,), targets=(target,))
def test_save_load_with_prefix(self):
output_path = self.output_paths[0]
try:
self.context.save_dnpy(output_path, self.da)
db = self.context.load_dnpy(output_path)
self.assertTrue(isinstance(db, DistArray))
self.assertEqual(self.da, db)
finally:
for rank in self.context.targets:
filepath = output_path + "_" + str(rank) + ".dnpy"
self.context.apply(cleanup_file, (filepath,), targets=(rank,))
bn_test_data = [
({'size': 2,
'dist_type': 'b',
'proc_grid_rank': 0,
'proc_grid_size': 2,
'start': 0,
'stop': 1,
},
{'size': 10,
'dist_type': 'n',
}),
({'size': 2,
'dist_type': 'b',
'proc_grid_rank': 1,
'proc_grid_size': 2,
'start': 1,
'stop': 2,
},
{'size': 10,
'dist_type': 'n',
})
]
nc_test_data = [
({'size': 2,
'dist_type': 'n',
},
{'size': 10,
'dist_type': 'c',
'proc_grid_rank': 0,
'proc_grid_size': 2,
'start': 0,
},),
({'size': 2,
'dist_type': 'n',
},
{'size': 10,
'dist_type': 'c',
'proc_grid_rank': 1,
'proc_grid_size': 2,
'start': 1,
},)
]
nu_test_data = [
# Note: unstructured indices must be in increasing order
# (restriction of h5py / HDF5)
(
{'size': 2,
'dist_type': 'n',
},
{'size': 10,
'dist_type': 'u',
'proc_grid_rank': 0,
'proc_grid_size': 2,
'indices': [0, 3, 4, 6, 8],
},
),
(
{'size': 2,
'dist_type': 'n',
},
{'size': 10,
'dist_type': 'u',
'proc_grid_rank': 1,
'proc_grid_size': 2,
'indices': [1, 2, 5, 7, 9],
},
)
]
class TestNpyFileLoad(DefaultContextTestCase):
"""Try loading a .npy file on the engines.
This test assumes that all engines have access to the same file system.
"""
ntargets = 2
@classmethod
def setUpClass(cls):
super(TestNpyFileLoad, cls).setUpClass()
cls.expected = np.arange(20).reshape(2, 10)
def save_test_file(data):
import numpy
from distarray.testing import temp_filepath
output_path = temp_filepath('.npy')
numpy.save(output_path, data)
return output_path
cls.output_path = cls.context.apply(save_test_file, (cls.expected,),
targets=[cls.context.targets[0]])[0] # noqa
@classmethod
def tearDownClass(cls):
cls.context.apply(cleanup_file, (cls.output_path,),
targets=[cls.context.targets[0]])
super(TestNpyFileLoad, cls).tearDownClass()
def test_load_bn(self):
distribution = Distribution.from_dim_data_per_rank(self.context,
bn_test_data)
da = self.context.load_npy(self.output_path, distribution)
for i in range(da.shape[0]):
for j in range(da.shape[1]):
self.assertEqual(da[i, j], self.expected[i, j])
def test_load_nc(self):
distribution = Distribution.from_dim_data_per_rank(self.context,
nc_test_data)
da = self.context.load_npy(self.output_path, distribution)
for i in range(da.shape[0]):
for j in range(da.shape[1]):
self.assertEqual(da[i, j], self.expected[i, j])
def test_load_nu(self):
distribution = Distribution.from_dim_data_per_rank(self.context,
nu_test_data)
da = self.context.load_npy(self.output_path, distribution)
for i in range(da.shape[0]):
for j in range(da.shape[1]):
self.assertEqual(da[i, j], self.expected[i, j])
def check_hdf5_file(output_path, expected, dataset="buffer"):
import h5py
import numpy
with h5py.File(output_path, 'r') as fp:
if dataset not in fp:
return False
if not numpy.array_equal(expected, fp[dataset]):
return False
return True
class TestHdf5FileSave(DefaultContextTestCase):
def setUp(self):
super(TestHdf5FileSave, self).setUp()
self.h5py = import_or_skip('h5py')
self.output_path = self.context.apply(engine_temp_path, ('.hdf5',),
targets=[self.context.targets[0]])[0]
def tearDown(self):
self.context.apply(cleanup_file, (self.output_path,),
targets=[self.context.targets[0]])
def test_save_block(self):
datalen = 33
expected = np.arange(datalen)
da = self.context.fromarray(expected)
self.context.save_hdf5(self.output_path, da, mode='w')
file_check = self.context.apply(check_hdf5_file,
(self.output_path, expected),
targets=[self.context.targets[0]])[0]
self.assertTrue(file_check)
def test_save_3d(self):
shape = (4, 5, 3)
expected = np.random.random(shape)
dist = {0: 'b', 1: 'c', 2: 'n'}
distribution = Distribution(self.context, shape, dist=dist)
da = self.context.fromarray(expected, distribution=distribution)
self.context.save_hdf5(self.output_path, da, mode='w')
file_check = self.context.apply(check_hdf5_file,
(self.output_path, expected),
targets=[self.context.targets[0]])[0]
self.assertTrue(file_check)
def test_save_two_datasets(self):
datalen = 33
foo = np.arange(datalen)
bar = np.random.random(datalen)
da_foo = self.context.fromarray(foo)
da_bar = self.context.fromarray(bar)
# save 'foo' to a file
self.context.save_hdf5(self.output_path, da_foo, key='foo', mode='w')
# save 'bar' to a different dataset in the same file
self.context.save_hdf5(self.output_path, da_bar, key='bar', mode='a')
foo_checks = self.context.apply(check_hdf5_file,
(self.output_path, foo),
{'dataset': 'foo'},
targets=[self.context.targets[0]])[0]
self.assertTrue(foo_checks)
bar_checks = self.context.apply(check_hdf5_file,
(self.output_path, bar),
{'dataset': 'bar'},
targets=[self.context.targets[0]])[0]
self.assertTrue(bar_checks)
class TestHdf5FileLoad(DefaultContextTestCase):
ntargets = 2
@classmethod
def setUpClass(cls):
cls.h5py = import_or_skip('h5py')
super(TestHdf5FileLoad, cls).setUpClass()
cls.output_path = cls.context.apply(engine_temp_path, ('.hdf5',),
targets=[cls.context.targets[0]])[0]
cls.expected = np.arange(20).reshape(2, 10)
def make_test_file(output_path, arr):
import h5py
with h5py.File(output_path, 'w') as fp:
fp["test"] = arr
cls.context.apply(make_test_file, (cls.output_path, cls.expected),
targets=[cls.context.targets[0]])
@classmethod
def tearDownClass(cls):
cls.context.apply(cleanup_file, (cls.output_path,),
targets=[cls.context.targets[0]])
super(TestHdf5FileLoad, cls).tearDownClass()
def test_load_bn(self):
distribution = Distribution.from_dim_data_per_rank(self.context,
bn_test_data)
da = self.context.load_hdf5(self.output_path, distribution, key="test")
assert_array_equal(self.expected, da)
def test_load_nc(self):
distribution = Distribution.from_dim_data_per_rank(self.context,
nc_test_data)
da = self.context.load_hdf5(self.output_path, distribution, key="test")
assert_array_equal(self.expected, da)
def test_load_nu(self):
distribution = Distribution.from_dim_data_per_rank(self.context,
nu_test_data)
da = self.context.load_hdf5(self.output_path, distribution, key="test")
assert_array_equal(self.expected, da)
if __name__ == '__main__':
unittest.main(verbosity=2)
| StarcoderdataPython |
3219612 | import os
indir="/afs/cern.ch/work/e/edreyer/public/madgraph5atlasval/source/MCVal/events_DM/"
outdir="/afs/cern.ch/work/e/edreyer/public/madgraph5atlasval/source/MCVal/events_DM/"
channels=["mumu","ee"]
masses=["500","1000","2000"]
couplings=["0p02"]
#variables=["n_l1","pdgid_l1","e_l1","px_l1","py_l1","pz_l1","pt_l1","eta_l1","phi_l1","n_l2","pdgid_l2","e_l2","px_l2","py_l2","pz_l2","pt_l2","eta_l2","phi_l2","m_ll"]
variables=["pt_l1","pt_l2","eta_l1","eta_l2","phi_l1","phi_l2","m_ll"]
massString={
"500": "0p5",
"1000": "1p0",
"2000": "2p0",
}
titlex={
"m_ll": "m_{ll} [GeV]",
"pt_l1": "p_{T} (l1) [GeV]",
"pt_l2": "p_{T} (l2) [GeV]",
"phi_l1": "#phi (l1)",
"phi_l2": "#phi (l2)",
"eta_l1": "#eta (l1)",
"eta_l2": "#eta (l2)",
}
minx={
"eta_l1": "-4",
"eta_l2": "-4",
"phi_l1": "-3.5",
"phi_l2": "-3.5",
}
maxx={
"eta_l1": "4",
"eta_l2": "4",
"phi_l1": "3.5",
"phi_l2": "3.5",
}
channellabel={
"mumu": "#mu#mu",
"ee": "ee",
}
linecolor={
"500": 8,
"1000": 9,
"3000": 2,
}
for channel in channels:
for mass in masses:
for coupling in couplings:
for var in variables:
card="card_%s_%s_%s_%s.dat" % (channel, mass, coupling, var)
f=open(outdir + card,"w+")
f.write("file: %sevents_%s_%s_%s_DM.root\n" % (indir,channel,massString[mass],coupling))
#f.write("name: can_%s\n" % var)
f.write("name: events\n")
f.write("var: %s\n" % var)
f.write("cond: n_l1>0 && n_l2>0\n")
f.write("logy: 1\n")
f.write("title: %s GeV\n" % (mass))
f.write("titley: Events\n")
f.write("titlex: %s\n" % (titlex[var] if var in titlex else var))
f.write("minx: %s\n" % (minx[var] if var in minx else "0"))
f.write("maxx: %s\n" % (maxx[var] if var in maxx else "4000"))
f.write("miny: 0.5\n")
f.write("maxy: 50000\n")
f.write("linecolor: %s\n" % (linecolor[mass] if mass in linecolor else "2"))
f.write("atlas: Simulation\n")
f.write("atlasx: 0.6\n")
f.write("latex: Z'_{DM} \\rightarrow %s\n" % (channellabel[channel]))
f.write("latexx: 0.73\n")
f.write("latexy: 0.77\n")
f.write("sublatex: 10k events\n")
f.write("sublatexx: 0.73\n")
f.write("sublatexy: 0.69\n")
f.close()
| StarcoderdataPython |
78662 | <reponame>owitplat/aws-cloudwatch-log-minder<filename>src/aws_cloudwatch_log_minder/delete_empty_log_streams.py
import json
from datetime import datetime, timedelta
from typing import List
import boto3
from botocore.config import Config
from botocore.exceptions import ClientError
from .logger import log
cw_logs = None
def ms_to_datetime(ms: int) -> datetime:
return datetime(1970, 1, 1) + timedelta(milliseconds=ms)
def _delete_empty_log_streams(
group: dict, purge_non_empty: bool = False, dry_run: bool = False
):
now = datetime.utcnow().replace(hour=0, minute=0, second=0, microsecond=0)
log_group_name = group["logGroupName"]
retention_in_days = group.get("retentionInDays", 0)
if not retention_in_days:
log.info(
"skipping log group %s as it has no retention period set", log_group_name
)
return
log.info(
"%s deleting streams from log group %s older than the retention period of %s days",
("dry run" if dry_run else ""),
log_group_name,
retention_in_days,
)
kwargs = {
"logGroupName": log_group_name,
"orderBy": "LastEventTime",
"descending": False,
"PaginationConfig": {"PageSize": 50},
}
for response in cw_logs.get_paginator("describe_log_streams").paginate(**kwargs):
for stream in response["logStreams"]:
log_stream_name = stream["logStreamName"]
last_event = ms_to_datetime(
stream.get("lastEventTimestamp", stream.get("creationTime"))
)
if (
last_event > (now - timedelta(days=retention_in_days))
and "lastEventTimestamp" not in stream
):
log.info(
"keeping group %s, empty log stream %s, created on %s",
log_group_name,
log_stream_name,
last_event,
)
continue
elif last_event > (now - timedelta(days=retention_in_days)):
log.info(
"there are no log streams from group %s older than the retention period of %s days",
log_group_name,
retention_in_days,
)
return
if not purge_non_empty:
try:
response = cw_logs.get_log_events(
logGroupName=log_group_name,
logStreamName=log_stream_name,
startFromHead=False,
limit=2,
)
if response["events"]:
log.warn(
"keeping group %s, log stream %s, as it is non empty. Last event stored on %s",
log_group_name,
log_stream_name,
last_event,
)
continue
except ClientError as e:
if e.response["Error"]["Code"] == "ResourceNotFoundException":
log.info(
"log stream %s from group %s no longer present in cloudwatch",
log_stream_name,
log_group_name,
)
log.info(
"%s deleting from group %s, log stream %s, with %s bytes last event stored on %s",
("dry run" if dry_run else ""),
log_group_name,
log_stream_name,
stream["storedBytes"],
last_event,
)
if dry_run:
continue
try:
cw_logs.delete_log_stream(
logGroupName=log_group_name, logStreamName=log_stream_name
)
except ClientError as e:
if e.response["Error"]["Code"] == "ResourceNotFoundException":
log.info(
"log stream %s from group %s already deleted",
log_stream_name,
log_group_name,
)
else:
log.error(
"failed to delete log stream %s from group %s, %s",
log_stream_name,
log_group_name,
e,
)
def delete_empty_log_streams(
log_group_name_prefix: str = None,
purge_non_empty: bool = False,
dry_run: bool = False,
region: str = None,
profile: str = None,
):
global cw_logs
boto_session = boto3.Session(region_name=region, profile_name=profile)
cw_logs = boto_session.client("logs", config=Config(retries=dict(max_attempts=10)))
kwargs = {"PaginationConfig": {"PageSize": 50}}
if log_group_name_prefix:
kwargs["logGroupNamePrefix"] = log_group_name_prefix
log.info("finding log groups with prefix %r", log_group_name_prefix)
for response in cw_logs.get_paginator("describe_log_groups").paginate(**kwargs):
for group in response["logGroups"]:
_delete_empty_log_streams(group, purge_non_empty, dry_run)
def get_all_log_group_names() -> List[str]:
result: List[str] = []
cw_logs = boto3.client("logs", config=Config(retries=dict(max_attempts=10)))
for response in cw_logs.get_paginator("describe_log_groups").paginate(
PaginationConfig={"PageSize": 50}
):
result.extend(list(map(lambda g: g["logGroupName"], response["logGroups"])))
return result
def fan_out(
function_arn: str, log_group_names: List[str], purge_non_empty: bool, dry_run: bool
):
awslambda = boto3.client("lambda")
log.info(
"recursively invoking %s to delete empty log streams from %d log groups",
function_arn,
len(log_group_names),
)
for log_group_name in log_group_names:
payload = json.dumps(
{
"log_group_name_prefix": log_group_name,
"purge_non_empty": purge_non_empty,
"dry_run": dry_run,
}
)
awslambda.invoke(
FunctionName=function_arn, InvocationType="Event", Payload=payload
)
def handle(request, context):
global cw_logs
cw_logs = boto3.client("logs", config=Config(retries=dict(max_attempts=10)))
dry_run = request.get("dry_run", False)
if "dry_run" in request and not isinstance(dry_run, bool):
raise ValueError(f"'dry_run' is not a boolean value, {request}")
purge_non_empty = request.get("purge_non_empty", False)
if "purge_non_empty" in request and not isinstance(dry_run, bool):
raise ValueError(f"'purge_non_empty' is not a boolean value, {request}")
log_group_name_prefix = request.get("log_group_name_prefix")
if log_group_name_prefix:
delete_empty_log_streams(log_group_name_prefix, purge_non_empty, dry_run)
else:
fan_out(
context.invoked_function_arn,
get_all_log_group_names(),
purge_non_empty,
dry_run,
)
| StarcoderdataPython |
1601643 | from math import sqrt
import math
from math import atan2, degrees
from skimage import data
from skimage.feature import blob_dog, blob_log, blob_doh
from skimage.color import rgb2gray
from skimage import io
import matplotlib.pyplot as plt
from scipy import stats
from scipy import spatial
import numpy as np
from scipy import ndimage as ndi
from skimage.morphology import watershed
from skimage.feature import peak_local_max
# ref : https://scikit-image.org/docs/dev/auto_examples/features_detection/plot_blob.html
#image = data.hubble_deep_field()[0:500, 0:500]
#image_gray = rgb2gray(image)
neighbor_search_dist = 100
im_path = r'F:\entropy_veg\lidar\las_products\USGS_LPC_TN_27County_blk2_2015_2276581SE_LAS_2017\USGS_LPC_TN_27County_blk2_2015_2276581SE_LAS_2017_dhm.tif'
image_gray = io.imread(im_path)
image_gray[image_gray > 500] = 0
image_gray[image_gray < 3] = 0
image_gray = image_gray[2500:, 500:2000]
#image_gray = image_gray[500:2000, 4500:6000]
#image_gray = image_gray[3100:3500, 1100:1500]
def distance(p0, p1):
return math.sqrt((p0[0] - p1[0])**2 + (p0[1] - p1[1])**2)
def angle(p1, p2):
xDiff = p2[0] - p1[0]
yDiff = p2[1] - p1[1]
#return degrees(atan2(yDiff, xDiff))
return atan2(yDiff, xDiff)
io.imshow(image_gray)
io.show()
# blobs
print('Computing laplace of gaussian')
#blobs_log = blob_log(image_gray, max_sigma=35, min_sigma=3, num_sigma=10, threshold=2, overlap=.01)
blobs_log = blob_log(image_gray, max_sigma=35, min_sigma=6, num_sigma=10, threshold=2, overlap=.01)
# Compute radii in the 3rd column.
blobs_log[:, 2] = blobs_log[:, 2] * sqrt(2)
print('Computed')
fig, ax = plt.subplots(1, 1)
# ax.set_title('Laplacian of Gaussian')
ax.imshow(image_gray)
print('Drawing')
for blob in blobs_log:
y, x, r = blob
#c = plt.Circle((x, y), 3, color='red', linewidth=1, fill=False)
c = plt.Circle((x, y), r, color='red', linewidth=1, fill=False)
ax.add_patch(c)
ax.set_axis_off()
plt.tight_layout()
plt.show()
y, x = blobs_log[:,0], blobs_log[:,1]
y = 1500-y
# Define the borders
deltaX = (max(x) - min(x))/10
deltaY = (max(y) - min(y))/10
xmin = min(x) - deltaX
xmax = max(x) + deltaX
ymin = min(y) - deltaY
ymax = max(y) + deltaY
print(xmin, xmax, ymin, ymax)
# Create meshgrid
xx, yy = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
positions = np.vstack([xx.ravel(), yy.ravel()])
values = np.vstack([x, y])
kernel = stats.gaussian_kde(values)
f = np.reshape(kernel(positions).T, xx.shape)*10e9
fig = plt.figure(figsize=(8,8))
ax = fig.gca()
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
cfset = ax.contourf(xx, yy, f, cmap='coolwarm')
ax.imshow(np.rot90(f), cmap='coolwarm', extent=[xmin, xmax, ymin, ymax])
cset = ax.contour(xx, yy, f, colors='k')
ax.clabel(cset, inline=1, fontsize=10)
ax.set_xlabel('X')
ax.set_ylabel('Y')
plt.title('2D Gaussian Kernel density estimation')
for blob in blobs_log:
ya, xa, r = blob
#c = plt.Circle((x, y), 3, color='red', linewidth=1, fill=False)
c = plt.Circle((xa, 1500-ya), 5, color='red', linewidth=1, fill=True)
ax.add_patch(c)
plt.show()
pt_coords = blobs_log[:,0:2]
tree = spatial.cKDTree(pt_coords,
leafsize=16,
compact_nodes=True,
copy_data=False,
balanced_tree=True)
print('Finding neighbors')
neighbor_list = [tree.query_ball_point([x,y], neighbor_search_dist) for y,x in pt_coords]
for i,l in enumerate(neighbor_list):
if i in l:
l.remove(i)
distances_list = []
angles_list = []
print('Computing angles and distances')
for (y,x),group in zip(pt_coords,neighbor_list):
distance_group = []
angles_group = []
for neighbor in group:
nx = pt_coords[neighbor][1]
ny = pt_coords[neighbor][0]
d = distance([x,y],[nx,ny])
a = angle([x,y],[nx,ny])
distance_group.append(d)
angles_group.append(a)
distances_list.append(distance_group)
angles_list.append(angles_group)
pt_data = {i:{'neighbors':neis, 'distances':dists, 'angles':angs}
for i,(neis,dists,angs) in
enumerate(zip(neighbor_list,distances_list,angles_list))}
print('Done')
| StarcoderdataPython |
3296564 | <gh_stars>1-10
"""
18/12/18
Convert the XML annotations to csv
"""
from pathlib import Path
import pandas as pd
from vpv.annotations.impc_xml import load_xml
import yaml
from collections import defaultdict
xml_dir = Path('/home/neil/Desktop/xml_to_csv')
outfile = '/home/neil/Desktop/181218_xml_annotations_to_csv.csv'
term_mapping_file = '/vpv_viewer/annotations/options/e15_5/ucd_terms.yaml'
with open(term_mapping_file) as fh:
terms = yaml.load(fh)
map_ = {}
for _, p in terms['parameters'].items():
param_id = p['impc_id']
name = p['name']
map_[param_id] = name
all_spec = []
for xml_file in xml_dir.iterdir():
r = load_xml(str(xml_file))
print(r)
spec_records = {}
spec_id = r[5]
for param_id, options in r[7].items():
option = options['option']
param_name = map_[param_id]
spec_records[param_name] = option
df = pd.DataFrame.from_dict(spec_records, orient='index', columns=['option'])
df.rename(columns={'option': spec_id}, inplace=True)
all_spec.append(df)
o = pd.concat(all_spec, axis=1)
o.to_csv(outfile) | StarcoderdataPython |
69059 | # -*- coding: utf8 -*-
"""
This is part of shot detector.
Produced by w495 at 2017.05.04 04:18:27
"""
from __future__ import (absolute_import,
division,
print_function,
unicode_literals)
import itertools
import logging
from builtins import range, zip
from shot_detector.filters import (
DelayFilter,
MinStdMeanSWFilter,
NikitinSWFilter,
AlphaBetaSWFilter,
BsplineSWFilter,
DetrendSWFilter,
SavitzkyGolaySWFilter,
WienerSWFilter,
MedianSWFilter,
ExtremaSWFilter,
PearsonCorrelationSWFilter,
ShiftSWFilter,
LevelSWFilter,
MeanSWFilter,
NormFilter,
DeviationSWFilter,
StdSWFilter,
DecisionTreeRegressorSWFilter,
ModulusFilter,
DCTFilter,
DixonRangeSWFilter,
DHTFilter,
LogFilter,
ExpFilter,
MaxSWFilter,
MinSWFilter,
ZScoreSWFilter,
DCTRegressorSWFilter,
ScaleSWFilter,
StdErrorSWFilter,
DCTCoefSWFilter,
KurtosisSWFilter,
SkewnessSWFilter,
NormalTestSWFilter,
FFMpegLikeThresholdSWFilter,
StatTestSWFilter,
MadSWFilter,
MinStdRegressionSWFilter,
MinStdOtsuSWFilter,
ColourFilter,
SignAngleDiff1DFilter
)
# from shot_detector.filters import (
# mean_cascade
# )
from shot_detector.handlers import BaseEventHandler, BasePlotHandler
from shot_detector.utils.collections import SmartDict
sgn_changes = SignAngleDiff1DFilter(
)
norm = NormFilter(
)
fabs = ModulusFilter()
dct = DCTFilter()
dht = DHTFilter()
log = LogFilter()
exp = ExpFilter()
colour = ColourFilter()
extrema = ExtremaSWFilter(
strict_windows=True,
overlap_size=0,
cs=False,
)
delay = DelayFilter()
original = delay(0)
savgol = SavitzkyGolaySWFilter(
window_size=50,
strict_windows=True,
overlap_size=0,
# cs=False
)
wiener = WienerSWFilter(
window_size=50,
strict_windows=True,
overlap_size=0,
)
alpha_beta = AlphaBetaSWFilter(
window_size=50,
strict_windows=True,
overlap_size=0,
)
corr = PearsonCorrelationSWFilter(
window_size=10,
strict_windows=True,
# overlap_size=0,
# repeat_windows=True,
)
fmax = MaxSWFilter(
window_size=25,
strict_windows=True,
)
fmin = MinSWFilter(
window_size=25,
strict_windows=True,
)
zscore = ZScoreSWFilter(
window_size=25,
sigma_num=3,
cs=False,
)
deviation2 = DeviationSWFilter(
window_size=25,
std_coef=2.5,
)
shift = ShiftSWFilter(
window_size=2,
strict_windows=False,
cs=False,
)
level = LevelSWFilter(
level_number=10000,
global_max=1.0,
global_min=0.0,
)
adaptive_level = LevelSWFilter(
level_number=100,
window_size=50,
)
median = MedianSWFilter(
window_size=5,
strict_windows=True,
)
mean = MeanSWFilter(
window_size=25,
# strict_windows=True,
cs=False
)
ewma = MeanSWFilter(
window_size=50,
# strict_windows=True,
mean_name='EWMA',
cs=False
)
std = StdSWFilter(
window_size=25,
strict_windows=True,
)
std_error = StdErrorSWFilter(
window_size=25,
strict_windows=True,
)
dtr = DecisionTreeRegressorSWFilter(
window_size=100,
strict_windows=True,
overlap_size=0,
cs=False,
)
sad = original - shift
deviation = original - mean
dct_re = DCTRegressorSWFilter(
window_size=25,
strict_windows=True,
overlap_size=0,
)
dct_coef = DCTCoefSWFilter(
window_size=25,
strict_windows=True,
)
scale = ScaleSWFilter(
s=25 * 20,
strict_windows=True,
overlap_size=0,
)
bspline = BsplineSWFilter(
window_size=4,
strict_windows=True,
overlap_size=0,
)
smooth = dtr(s=25 * 32, d=5) | savgol(s=25 * 32)
nikitin_1 = NikitinSWFilter(
window_size=256,
depth=5,
strict_windows=True,
overlap_size=0,
cs=False,
)
detrend = DetrendSWFilter(
window_size=25 * 8,
strict_windows=True,
overlap_size=0,
)
msm = MinStdMeanSWFilter(
window_size=25,
min_size=2
)
kurtosis = KurtosisSWFilter(
window_size=25,
strict_windows=True,
)
skewness = SkewnessSWFilter(
window_size=25,
strict_windows=True,
)
normal_test = NormalTestSWFilter(
window_size=20,
overlap_size=0,
repeat_windows=True,
strict_windows=True,
)
# frange = (fmax - fmin) / mean
stat_test = StatTestSWFilter(
window_size=25,
strict_windows=True,
# overlap_size=0,
# repeat_windows=True
# cs=False,
)
mad = MadSWFilter(
window_size=25,
overlap_size=0,
repeat_windows=True
# cs=False,
)
dixon_r = DixonRangeSWFilter(
window_size=5,
strict_windows=True,
cs=False,
)
# mean | sad | sad | fabs — разладко по определению.
# nikitin = median | mean | nikitin_1 * 10
# nikitin = (sad | fabs | deviation) < (sad | fabs)
#
# nikitin = dct_re(last=10) # nikitin_1(use_first = True)
# nikitin = std / mean # — very cool
# nikitin = mean | skewness(s=25) / 10
# nikitin = norm(l=2) | (normal_test < 0.1) —— cool as periods of
# annormal distribution.
#
# Very cool way to get outlier
# nikitin = norm(l=1) | sad | original - median(s=25) | fabs
#
# nikitin = norm(l=1) | original - median(s=25) | fabs
# Use with extrema(s=100, x=1.1, order=50),
#
#
# Strange
# nikitin = norm(l=1) | (dixon_r > 0.9)
#
##
# Very cool
# nikitin = norm(l=1) | original - savgol(s=25) | fabs | mean(s=10)
#
##
# Very-very cool but slow
#
#
# def multi_savgol(begin=9, end=61):
# res = 0
# cnt = 0
# for size in range(begin, end, 2):
# res += (original - savgol(s=size))
# cnt += 1
# return (res/cnt)
#
#
# nikitin = norm(l=1) | multi_savgol() | fabs | zscore
#
# Normal, a bit strange. ~Marchuk-style (pp 10)
#
#
# def multi_savgol_with_bills(begin=9, end=25, esp=6):
# res = 0
# cnt = 0
# for size in range(begin, end, 2):
# delta = original - savgol(s=size) | abs
# bill = delta | (original > (esp * std(s=end))) | int
# res += bill
# cnt += 1
# res_mean = res | mean(s=100)
# res = (res > res_mean) | int
# return (res)
#
#
# nikitin = norm(l=1) | multi_savgol_with_bills()
# def multi_mean(begin=9, end=61):
# res = 0
# cnt = 0
# for size in range(begin, end, 2):
# print()
# res += (original - mean(s=size))
# cnt += 1
# return (res/cnt)
#
#
# nikitin = norm(l=1) | multi_mean()
# | original - median | abs
#
#
# nikitin9 = norm(l=1) | original - mean(s=9)
# | original - median | abs
#
# nikitin61 = norm(l=1) | original - mean(s=61)
# | original - median | abs
# import sys
# sys.setrecursionlimit(100000)
mstd = MinStdMeanSWFilter(
window_size=100,
strict_windows=True,
overlap_size=0,
repeat_windows=True,
cs=False,
)
mstdotsu = MinStdOtsuSWFilter(
window_size=100,
strict_windows=True,
overlap_size=0,
cs=False,
)
msr = MinStdRegressionSWFilter(
window_size=100,
strict_windows=True,
overlap_size=0,
cs=False,
)
# fdelta = norm(l=1) | min_std_cascade.multi_dtr() | abs
#
# nikitin = fdelta | (original > 6*std(s=25)) | int
#
# sigma3 = original > (mean(s=50) + 3*std(s=50))
#
# nikitin9 = norm(l=1) | mean(s=10) - mean(s=20) | abs | sigma3 | int
# diff = original - shift
# sigma3 = original > (mean(s=50) + 3*std(s=50))
# nikitin = norm(l=1) | diff | abs | sigma3 | int
#
#
# nikitin9 = (norm(l=1)
# | original - mole_filter()
# | abs
# | sigma3
# | int) | original * 0.9
# std_x = dct_re(last=2) # nikitin_1(use_first = True) | std
#
# std_x = norm(l=1) | sad
ffmpeg_like = FFMpegLikeThresholdSWFilter()
def sigma3(c=3.0, **kwargs):
"""
:param c:
:param kwargs:
:return:
"""
# noinspection PyTypeChecker
return (
original
> (
mean(**kwargs)
+ c * std(**kwargs)
)
) | int
# nikitin = norm(l=1) | mean_cascade.multi_mean()
nikitin = norm(l=1)
# noinspection PyTypeChecker
nikitin_s = nikitin | abs | sigma3() | int
#
# mean_cascade.multi_mean()
seq_filters = [
# SmartDict(
# name='windows',
# plot_options=SmartDict(
# linestyle=':',
# color='gray',
# linewidth=0.5,
# ),
# filter=DebugGridSWFilter(
# s=100,
# strict_windows=True,
# cs=False,
# ),
# ),
SmartDict(
name='$F_{L_1} = |F_{t}|_{L_1}$',
plot_options=SmartDict(
linestyle='-',
color='lightgray',
marker='x',
linewidth=3.0,
),
filter=norm(l=1),
),
SmartDict(
name='$DTR_{300,2}$',
plot_options=SmartDict(
linestyle='-',
color='red',
# marker='x',
linewidth=2.0,
),
filter=norm(l=1) | dtr(s=300, d=2)
),
#
# SmartDict(
# name='$DTR2$',
# plot_options=SmartDict(
# linestyle='-',
# color='green',
# #marker='x',
# linewidth=2.0,
# ),
# filter=norm(l=1) | delay(200) | dtr(s=300, d=1)
# ),
#
#
# SmartDict(
# name='$DTR3$',
# plot_options=SmartDict(
# linestyle='-',
# color='blue',
# #marker='x',
# linewidth=2.0,
# ),
# filter=norm(l=1) | delay(100) | dtr(s=300, d=2)
# ),
#
#
#
# SmartDict(
# name='$\sum DTR$',
# plot_options=SmartDict(
# linestyle='-',
# color='black',
# #marker='x',
# linewidth=2.0,
# ),
# filter=norm(l=1) | (
# dtr(s=301, d=2) + dtr(s=201, d=2) + dtr(s=100, d=2)
# # + (delay(200) | dtr(s=300, d=2))
# ) / 3
# ),
SmartDict(
name='$S_{DTR} = \\frac{1}{k}\sum_{j=1}^{k} '
'DTR_{i \cdot 25, 2} $',
plot_options=SmartDict(
linestyle='-',
color='magenta',
# marker='x',
linewidth=2.0,
),
filter=norm(l=1) | sum(
[dtr(s=25 * i + 1) for i in range(1, 9)]
) / 8 | (sad | abs)
),
SmartDict(
name='$\\frac{1}{k}\sum_{j=1}^{k} Bills S_{DTR=}$',
plot_options=SmartDict(
linestyle=':',
color='blue',
# marker='x',
linewidth=2.0,
),
filter=norm(l=1) | sum(
[dtr(s=25 * i + 1) for i in range(1, 9)]
) / 8 | (sad | abs) | sum(
[sigma3(s=25 * i) for i in range(1, 9)]
) / 8
),
# SmartDict(
# name='$DTR$',
# plot_options=SmartDict(
# linestyle='-',
# color='orange',
# #marker='x',
# linewidth=2.0,
# ),
# filter=norm(l=1) | min_std_cascade.multi_dtr()
# ),
# SmartDict(
# name='$M_{25} = |\hat{\mu}_{25}(F_{L_1})|$',
# plot_options=SmartDict(
# linestyle='-',
# color='red',
# #marker='x',
# linewidth=2.0,
# ),
# filter=norm(l=1) | mean(s=25, cs=True)
# ),
# SmartDict(
# name='$M_{50} = |\hat{\mu}_{50}(F_{L_1})|$',
# plot_options=SmartDict(
# linestyle='-',
# color='orange',
# #marker='x',
# linewidth=2.0,
# ),
# filter=norm(l=1) | mean(s=50, cs=True)
# ),
#
# SmartDict(
# name='$M_{100} = |\hat{\mu}_{100}(F_{L_1})|$',
# plot_options=SmartDict(
# linestyle='-',
# #marker='x',
# color='red',
# linewidth=2.0,
# ),
# filter=norm(l=1) | mean(s=100, cs=True)
# ),
#
# SmartDict(
# name='$M_{200} = |\hat{\mu}_{200}(F_{L_1})|$',
# plot_options=SmartDict(
# linestyle='-',
# color='blue',
# linewidth=2.0,
# ),
# filter=norm(l=1) | mean(s=200, cs=True)
# ),
#
#
#
#
# SmartDict(
# name='$|M_{100} - M_{50}| \\to_{\pm} 0$',
# plot_options=SmartDict(
# linestyle=':',
# color='purple',
# linewidth=1.1,
# ),
# filter=norm(l=1) | median(s=25)
# | (mean(s=100, cs=True) - mean(s=50, cs=True))
# | sgn_changes | fabs * 1
# ),
#
#
# SmartDict(
# name='$|M_{200} - M_{50}| \\to_{\pm} 0$',
# plot_options=SmartDict(
# linestyle='--',
# color='blue',
# linewidth=1.2,
# ),
# filter=norm(l=1) | median(s=25)
# | (mean(s=200, cs=True) - mean(s=50, cs=True))
# | sgn_changes | fabs * 0.9
# ),
#
#
# SmartDict(
# name='$|M_{200} - M_{100}| \\to_{\pm} 0$',
# plot_options=SmartDict(
# linestyle='-',
# marker='x',
# color='green',
# linewidth=1.3,
# ),
# filter=norm(l=1) | median(s=25)
# | (mean(s=200, cs=True) - mean(s=100, cs=True))
# | sgn_changes | fabs * 0.8
# ),
# SmartDict(
# name='$D_{t} = |F_{t} - F_{t-1}|_{L_1}$',
# plot_options=SmartDict(
# linestyle='-',
# color='black',
# linewidth=2.0,
# ),
# filter=sad | abs | norm(l=1)
# ),
#
# SmartDict(
# name='$D^{ffmpeg}_{t} = \min(D_t, D_t-D_{t-1})$',
# plot_options=SmartDict(
# linestyle='-',
# color='red',
# linewidth=2.0,
# ),
# filter=ffmpeg_like
# ),
# SmartDict(
# name='$T_{const} = 0.08 \in (0; 1)$',
# plot_options=SmartDict(
# linestyle='-',
# color='black',
# linewidth=2.0,
# ),
# filter=norm(l=1) | 0.08 ,
# ),
# SmartDict(
# name='$nikitin$',
# plot_options=SmartDict(
# linestyle='-',
# color='green',
# linewidth=3.0,
# ),
# filter= norm(l=1) | mean,
# ),
#
#
# SmartDict(
# name='$nikitin_s$',
# plot_options=SmartDict(
# linestyle='-',
# color='red',
# linewidth=1.0,
# ),
# filter= nikitin_s,
# ),
# SmartDict(
# name='$nikitin_e$',
# plot_options=SmartDict(
# linestyle='-',
# color='red',
# linewidth=1.0,
# ),
# filter= norm(l=1) | nikitin | extrema(s=99, x=1.1, order=50),
# ),
#
# SmartDict(
# name='nikitin9',
# plot_options=SmartDict(
# linestyle='-',
# color='blue',
# linewidth=1.0,
# ),
# filter= nikitin9,
# ),
#
#
# SmartDict(
# name='nikitin61',
# plot_options=SmartDict(
# linestyle='-',
# color='red',
# linewidth=1.0,
# ),
# filter= nikitin61,
# ),
#
# SmartDict(
# name='std_e',
# plot_options=SmartDict(
# linestyle='-',
# color='orange',
# linewidth=1.0,
# ),
# filter= norm(l=1) | std_x | extrema(s=100, x=0.9),
# ),
#
# SmartDict(
# name='$mean$',
# plot_options=SmartDict(
# linestyle='--',
# color='red',
# linewidth=1.0,
# ),
# filter=norm(l=1) | mean ,
# ),
#
#
# SmartDict(
# name='smooth',
# plot_options=SmartDict(
# linestyle='-',
# color='black',
# linewidth=1.0,
# ),
# filter=norm(l=1) | smooth ,
# ),
#
#
# SmartDict(
# name='$scale$',
# plot_options=SmartDict(
# linestyle=':',
# color='green',
# linewidth=1.0,
# ),
# filter=norm(l=1) | smooth | extrema(s=100,x=1),
# ),
#
# SmartDict(
# name='$scale min$',
# plot_options=SmartDict(
# linestyle=':',
# color='blue',
# linewidth=1.0,
# ),
# filter=norm(l=1) | smooth | extrema(s=100,x=1.1,case=min),
# ),
#
#
# SmartDict(
# name='$scale + d$',
# plot_options=SmartDict(
# linestyle=':',
# color='red',
# linewidth=1.0,
# ),
# filter=delay(50) | norm(l=1) | smooth | extrema(s=100,x=0.5),
# ),
#
# SmartDict(
# name='$scale+d min$',
# plot_options=SmartDict(
# linestyle=':',
# color='orange',
# linewidth=1.0,
# ),
# filter=delay(50) | norm(l=1) | smooth | extrema(s=100,x=0.6,
# case=min),
# ),
# SmartDict(
# name='$scale+d$',
# plot_options=SmartDict(
# linestyle='-',
# color='blue',
# linewidth=1.0,
# ),
# filter=delay(50) | norm(l=1) | mean | extrema(s=100)
# ),
#
# SmartDict(
# name='$corr$',
# plot_options=SmartDict(
# linestyle='-',
# color='red',
# linewidth=1.0,
# ),
# filter= mean(s=40) | norm(l=1),
# ),
#
# SmartDict(
# name='222',
# plot_options=SmartDict(
# linestyle='-',
# color='green',
# linewidth=1.0,
# ),
# filter= mean(s=40) | norm(l=1) | corr(s=10),
# ),
# SmartDict(
# name='max',
# plot_options=SmartDict(
# linestyle='-',
# color='green',
# linewidth=1.0,
# ),
# filter=norm(l=1) | fmax,
# ),
#
#
# SmartDict(
# name='min',
# plot_options=SmartDict(
# linestyle='-',
# color='blue',
# linewidth=1.0,
# ),
# filter=norm(l=1) | fmin,
# ),
#
# SmartDict(
# name='mean',
# plot_options=SmartDict(
# linestyle='-',
# color='red',
# linewidth=1.0,
# ),
# filter=norm(l=1) | (mean(s=100) / std(s=100)) * 0.1,
# ),
# SmartDict(
# name='++',
# plot_options=SmartDict(
# linestyle='-',
# color='blue',
# linewidth=1.0,
# ),
# filter=norm(l=1) | alpha_beta(
# alpha=0.1,
# beta=0.05,
# return_velocity = True,
# ),
# ),
# SmartDict(
# name='dct_re',
# plot_options=SmartDict(
# linestyle='-',
# color='red',
# linewidth=1.0,
# ),
# filter=norm(l=1) | dct_re(s=25),
# ),
#
# SmartDict(
# name='zscore',
# plot_options=SmartDict(
# linestyle='-',
# color='red',
# linewidth=1.0,
# ),
# filter=norm(l=1) | zscore,
# ),
#
# SmartDict(
# name='(original - mean) / std',
# plot_options=SmartDict(
# linestyle='-',
# color='red',
# linewidth=1.0,
# ),
# filter=norm(l=1) | (original - mean(s=50)) | fabs / std(s=40),
# ),
#
# SmartDict(
# name='$R_{61} = DTR_{61,2}(F_i)$',
# plot_options=SmartDict(
# linestyle='-',
# color='blue',
# linewidth=1.0,
# ),
# filter=norm | dtr(s=25, d=1) | sad ,
# ),
#
# SmartDict(
# name='$R_{47} = DTR_{47,1}(F_i)$',
# #offset=-1,
# plot_options=SmartDict(
# linestyle='-',
# color='red',
# linewidth=1.0,
# ),
# filter=norm | dtr(s=25, d=1, window_delay=5,) | sad,
# ),
#
#
# SmartDict(
# name='$3 R_{47} = DTR_{47,1}(F_i)$',
# #offset=-1,
# plot_options=SmartDict(
# linestyle='-',
# color='green',
# linewidth=1.0,
# ),
# filter=norm | dtr(s=25, d=1, window_delay=10) | sad,
# ),
#
# SmartDict(
# name='$4 R_{47} = DTR_{47,1}(F_i)$',
# #offset=-1,
# plot_options=SmartDict(
# linestyle='-',
# color='violet',
# linewidth=1.0,
# ),
# filter=norm | dtr(s=25, d=1, window_delay=15) | sad,
# ),
#
# SmartDict(
# name='$5 R_{47} = DTR_{47,1}(F_i)$',
# #offset=-1,
# plot_options=SmartDict(
# linestyle='-',
# color='orange',
# linewidth=1.0,
# ),
# filter=norm | dtr(s=25, d=1, window_delay=20) | sad,
# ),
#
# SmartDict(
# name='sad',
# plot_options=SmartDict(
# linestyle='-',
# color='red',
# linewidth=1.0,
# ),
# filter= (original - shift) | norm | fabs * 2,
# ),
#
#
# SmartDict(
# name='(original - mean(s=10)) | norm | fabs',
# plot_options=SmartDict(
# linestyle='-',
# color='blue',
# linewidth=1.0,
# ),
# filter=(original - mean(s=50)) | norm | fabs * 2,
# ),
#
# SmartDict(
# name='std',
# plot_options=SmartDict(
# linestyle='-',
# color='green',
# linewidth=1.0,
# ),
# filter=std(s=50) | norm | fabs * 2,
# ),
#
#
# SmartDict(
# name='dtr + | sad',
# plot_options=SmartDict(
# linestyle='-',
# color='green',
# linewidth=1.0,
# ),
# filter=norm
# | (dtr(s=47, d=1) | sad).i(dtr(s=61, d=2) | sad)
# | fabs | level(n=50)
# #| #adaptive_level(n=50, cm=1.1),
# ),
]
class BaseEventSelector(BaseEventHandler):
"""
...
"""
__logger = logging.getLogger(__name__)
cumsum = 0
chart = BasePlotHandler()
def plot(self, aevent_seq, chart, filter_seq):
"""
:param aevent_seq:
:param chart:
:param filter_seq:
"""
f_count = len(filter_seq)
event_seq_tuple = itertools.tee(aevent_seq, f_count + 1)
for filter_desc, event_seq in zip(
filter_seq,
event_seq_tuple[1:]
):
offset = filter_desc.get('offset', 0)
new_event_seq = filter_desc \
.get('filter') \
.filter_objects(event_seq)
for event in new_event_seq:
#
# print (
# filter_desc.get('name'),
# event,
# event.time,
# event.feature
# )
filtered = event.feature
time = event.time if event.time else 0
chart.add_data(
filter_desc.get('name'),
1.0 * (time - offset),
1.0 * filtered,
filter_desc.get('plot_style', ''),
**filter_desc.get('plot_options', {})
)
self.__logger.debug('chart.plot_data() enter')
chart.plot_data()
self.__logger.debug('chart.plot_data() exit')
return event_seq_tuple[0]
def filter_events(self, event_seq, **kwargs):
"""
Should be implemented
:param event_seq:
"""
event_seq = self.limit_seq(event_seq, 0.0, 1.5)
self.__logger.debug('plot enter')
event_seq = self.plot(event_seq, self.chart, seq_filters)
self.__logger.debug('plot exit')
#
# filter = sad | fabs | norm | level(n=10)
#
# # event_seq = self.log_seq(event_seq, 'before')
#
# event_seq = filter.filter_objects(event_seq)
#
# event_seq =
# itertools.ifilter(
# lambda item: item.feature > 0.0,
# event_seq
# )
#
# event_seq =
# self.log_seq(event_seq, '-> {item} {item.feature}')
#
#
# event_seq = self.log_seq(event_seq)
#
#
# event_seq = itertools.ifilter(lambda x: x>0,
# event_seq)
return event_seq
| StarcoderdataPython |
1770476 | <reponame>rlauer6/makala<filename>makala/__init__.py<gh_stars>0
from .lambda_config import LambdaConfig
from .makala_config import MakalaConfig
| StarcoderdataPython |
1731308 | <reponame>climbingdaily/spvnas
"""Visualization code for point clouds and 3D bounding boxes with mayavi.
Modified by <NAME>
Date: September 2017
"""
import argparse
import os
# import mayavi.mlab as mlab
import numpy as np
import torch
from torchsparse import SparseTensor
from torchsparse.utils.quantize import sparse_quantize
from torchpack.utils.config import configs
# from model_zoo import minkunet, spvcnn, spvnas_specialized
import open3d as o3d
from core.datasets.semantic_poss import SEM_COLOR
from tool_func import *
def process_point_cloud(input_point_cloud, input_labels=None, voxel_size=0.05):
input_point_cloud[:, 3] = input_point_cloud[:, 3]
pc_ = np.round(input_point_cloud[:, :3] / voxel_size)
pc_ -= pc_.min(0, keepdims=1)
label_map = create_label_map_poss()
if input_labels is not None:
labels_ = label_map[input_labels & 0xFFFF].astype(
np.int64) # semantic labels
else:
labels_ = np.zeros(pc_.shape[0], dtype=np.int64)
feat_ = input_point_cloud
# if input_labels is not None:
# out_pc = input_point_cloud[labels_ != labels_.max(), :3]
# pc_ = pc_[labels_ != labels_.max()]
# feat_ = feat_[labels_ != labels_.max()]
# labels_ = labels_[labels_ != labels_.max()]
# else:
# out_pc = input_point_cloud
# pc_ = pc_
out_pc = input_point_cloud
pc_ = pc_
_, inds, inverse_map = sparse_quantize(pc_,
return_index=True,
return_inverse=True)
# inds, labels, inverse_map = sparse_quantize(pc_,
# feat_,
# labels_,
# return_index=True,
# return_inverse=True)
pc = np.zeros((inds.shape[0], 4))
pc[:, :3] = pc_[inds]
feat = feat_[inds]
labels = labels_[inds]
lidar = SparseTensor(
torch.from_numpy(feat).float(),
torch.from_numpy(pc).int())
# labels = SparseTensor(labels, pc)
# labels_ = SparseTensor(labels_, pc_)
# inverse_map = SparseTensor(inverse_map, pc_)
return {
'pc': out_pc,
'lidar': lidar,
'targets': labels,
'targets_mapped': labels_,
'inverse_map': inverse_map
}
# mlab.options.offscreen = True
def draw_lidar(pc,
color=None,
fig=None,
bgcolor=(1, 1, 1),
pts_scale=0.06,
pts_mode='2dcircle',
pts_color=None):
if fig is None:
fig = mlab.figure(figure=None,
bgcolor=bgcolor,
fgcolor=None,
engine=None,
size=(800, 500))
if color is None:
color = pc[:, 2]
pts = mlab.points3d(pc[:, 0],
pc[:, 1],
pc[:, 2],
color,
mode=pts_mode,
scale_factor=pts_scale,
figure=fig)
pts.glyph.scale_mode = 'scale_by_vector'
pts.glyph.color_mode = 'color_by_scalar' # Color by scalar
pts.module_manager.scalar_lut_manager.lut.table = cmap
pts.module_manager.scalar_lut_manager.lut.number_of_colors = cmap.shape[0]
mlab.view(azimuth=180,
elevation=70,
focalpoint=[12.0909996, -1.04700089, -2.03249991],
distance=62,
figure=fig)
return fig
def inference(pc, model, label_file_name=None):
if label_file_name and os.path.exists(label_file_name):
label = np.fromfile(label_file_name, dtype=np.int32)
else:
label = None
feed_dict = process_point_cloud(pc, label)
inputs = feed_dict['lidar'].to(device)
with torch.no_grad():
outputs = model(inputs)
predictions = outputs.argmax(1).cpu().numpy()
predictions = predictions[feed_dict['inverse_map']]
return feed_dict, predictions
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('config', metavar='FILE', help='config file')
parser.add_argument('--run-dir', metavar='DIR', help='run directory')
# parser.add_argument('--velodyne-dir', type=str, default='/hdd/dyd/SemanticPOSS/sequences/05/velodyne')
parser.add_argument('--velodyne-dir', type=str, default='/hdd/dyd/lidarcap/velodyne/6')
parser.add_argument('--model', type=str, default='SemanticKITTI_val_SPVCNN@65GMACs')
# args = parser.parse_args()
args, opts = parser.parse_known_args()
configs.load(args.config, recursive=True)
configs.update(opts)
output_dir = args.velodyne_dir.replace('velodyne', 'human_semantic')
os.makedirs(output_dir, exist_ok=True)
if torch.cuda.is_available():
device = 'cuda:0'
else:
device = 'cpu'
from core import builder
from torchpack import distributed as dist
model = builder.make_model().to(device)
# if 'MinkUNet' in args.model:
# model = minkunet(args.model, pretrained=True)
# elif 'SPVCNN' in args.model:
# model = spvcnn(args.model, pretrained=True)
# elif 'SPVNAS' in args.model:
# model = spvnas_specialized(args.model, pretrained=True)
# else:
# raise NotImplementedError
model = model.to(device)
init = torch.load(os.path.join(args.run_dir, 'checkpoints', 'max-test-iou.pt'),
map_location='cuda:%d' % dist.local_rank()
if torch.cuda.is_available() else 'cpu')['model']
model.load_state_dict(init)
input_point_clouds = sorted(os.listdir(args.velodyne_dir))
model.eval()
files_num = len(input_point_clouds)
for i, point_cloud_name in enumerate(input_point_clouds):
point_cloud_file = f'{args.velodyne_dir}/{point_cloud_name}'
if point_cloud_name.endswith('.bin'):
# semantickitti
pc = np.fromfile(point_cloud_file,
dtype=np.float32).reshape(-1, 4) # 读取点云
label_file_name = os.path.join(args.velodyne_dir.replace(
'velodyne', 'labels'), point_cloud_name.replace('.bin', '.label'))
out_file_name = os.path.join(
output_dir, point_cloud_name.replace('.bin', '.pcd'))
elif point_cloud_name.endswith('.pcd'):
# lidarcap
pc = read_pcd(point_cloud_file)
make_horizon = read_json_file(args.velodyne_dir.split('velodyne')[
0] + '/make_horizon.json')
key = os.path.basename(args.velodyne_dir)
if key in make_horizon.keys():
pc[:, :3] = pc[:, :3] @ np.asarray(make_horizon[key]).T
label_file_name = None
out_file_name = os.path.join(output_dir, point_cloud_name)
else:
continue
# ground, non_ground = filter_ground(pc[:, :3])
feed_dict, predictions = inference(
pc, model, label_file_name=label_file_name)
output = o3d.geometry.PointCloud()
output.points = o3d.utility.Vector3dVector(feed_dict['pc'][:,:3])
output.colors = o3d.utility.Vector3dVector(SEM_COLOR[predictions]/255)
o3d.io.write_point_cloud(out_file_name, output)
print(f'\rFile save in {out_file_name} ({i:d}/{files_num})', end='\r',flush=True)
| StarcoderdataPython |
1603494 |
from .MultivariateGaussianGenerator import MultivariateGaussianGenerator
from .InverseWishartGenerator import InverseWishartGenerator
from .ExponentialDecayGenerator import ExponentialDecayGenerator
from weakref import ReferenceType
class MatrixGeneratorAdapter:
def __init__(self, matrix_reference: ReferenceType):
self.matrix_reference = matrix_reference
self.__last_C = None
self.__last_A = None
@property
def last_C(self):
return self.__last_C
@property
def last_A(self):
return self.__last_A
def multivariate_gaussian(self, C, A, number_of_iteratons: int, verbose=False):
generator = MultivariateGaussianGenerator(C, A, number_of_iteratons)
self.matrix_reference().array = generator.generate(verbose)
self.__last_A = generator.last_A
self.__last_C = generator.last_C
def inverse_wishart(self, number_of_assets, number_of_samples, kappa, number_of_iterations: int, normalize_covariance=True, verbose=False):
generator = InverseWishartGenerator(number_of_assets, number_of_samples, kappa, number_of_iterations, normalize_covariance)
self.matrix_reference().array = generator.generate(verbose)
self.__last_A = generator.last_A
self.__last_C = generator.last_C
def exponential_decay(self, number_of_assets, number_of_samples, autocorrelation_time, number_of_iterations: int, verbose=False):
generator = ExponentialDecayGenerator(number_of_assets, number_of_samples, autocorrelation_time, number_of_iterations)
self.matrix_reference().array = generator.generate(verbose)
self.__last_A = generator.last_A
self.__last_C = generator.last_C | StarcoderdataPython |
100566 | import numpy as np
from itertools import product
from deep_rlsp.envs.gridworlds.env import Env, Direction, get_grid_representation
class BasicRoomEnv(Env):
"""
Basic empty room with stochastic transitions. Used for debugging.
"""
def __init__(self, prob, use_pixels_as_observations=True):
self.height = 3
self.width = 3
self.init_state = (1, 1)
self.prob = prob
self.nS = self.height * self.width
self.nA = 5
super().__init__(1, use_pixels_as_observations=use_pixels_as_observations)
self.num_features = 2
self.default_action = Direction.get_number_from_direction(Direction.STAY)
self.num_features = len(self.s_to_f(self.init_state))
self.reset()
states = self.enumerate_states()
self.make_transition_matrices(states, range(self.nA), self.nS, self.nA)
self.make_f_matrix(self.nS, self.num_features)
def enumerate_states(self):
return product(range(self.width), range(self.height))
def get_num_from_state(self, state):
return np.ravel_multi_index(state, (self.width, self.height))
def get_state_from_num(self, num):
return np.unravel_index(num, (self.width, self.height))
def s_to_f(self, s):
return s
def _obs_to_f(self, obs):
return np.unravel_index(obs[0].argmax(), obs[0].shape)
def _s_to_obs(self, s):
layers = [[s]]
obs = get_grid_representation(self.width, self.height, layers)
return np.array(obs, dtype=np.float32)
# render_width = 64
# render_height = 64
# x, y = s
# obs = np.zeros((3, render_height, render_width), dtype=np.float32)
# obs[
# :,
# y * render_height : (y + 1) * render_height,
# x * render_width : (x + 1) * render_width,
# ] = 1
# return obs
def get_next_states(self, state, action):
# next_states = []
# for a in range(self.nA):
# next_s = self.get_next_state(state, a)
# p = 1 - self.prob if a == action else self.prob / (self.nA - 1)
# next_states.append((p, next_s, 0))
next_s = self.get_next_state(state, action)
next_states = [(self.prob, next_s, 0), (1 - self.prob, state, 0)]
return next_states
def get_next_state(self, state, action):
"""Returns the next state given a state and an action."""
action = int(action)
if action == Direction.get_number_from_direction(Direction.STAY):
pass
elif action < len(Direction.ALL_DIRECTIONS):
move_x, move_y = Direction.move_in_direction_number(state, action)
# New position is legal
if 0 <= move_x < self.width and 0 <= move_y < self.height:
state = move_x, move_y
else:
# Move only changes orientation, which we already handled
pass
else:
raise ValueError("Invalid action {}".format(action))
return state
def s_to_ansi(self, state):
return str(self.s_to_obs(state))
if __name__ == "__main__":
from gym.utils.play import play
env = BasicRoomEnv(1)
play(env, fps=5)
| StarcoderdataPython |
1751391 | import pulumi
import pulumi_aws as aws
db_cluster = aws.rds.Cluster("dbCluster", master_password=pulumi.secret("<PASSWORD>"))
| StarcoderdataPython |
1715058 | '''
Utilities to convert metrics to inclusive.
'''
import calltree as ct
import pandas as pd
import pandas as pd
import index_conversions as ic
def convert_series_to_inclusive(series, call_tree):
'''
Converts a series having Cnode IDs as index from exclusive to inclusive.
Takes as input a CubeTreeNode object (hopefully the root).
*Notice: The results may be nonsensical unless the metric acted upon is
"INCLUSIVE convertible"*
Parameters
----------
series : Series
A series representing exclusive measurements
call_tree : CubeTreeNode
A recursive representation of the call tree.
Returns
-------
res : Series
A series having the same structure as the input, but with data summed
over following the hierarchy given by the call_tree object.
'''
if type(series.index) == pd.MultiIndex and len(series.index.levels) > 1:
raise NotImplementedError("MultiIndex not supported for series")
assert series.index.name == "Cnode ID", "MultiIndex not supported for series"
# LRU cache does not work because of
# TypeError: unhashable type: 'list'
# from functools import lru_cache
# @lru_cache
def aggregate(root):
value = series.loc[root.cnode_id]
for child in root.children:
value += aggregate(child)
return value
return (
pd.DataFrame(
data=[
(node.cnode_id, aggregate(node))
for node in ct.iterate_on_call_tree(call_tree)
],
columns=["Cnode ID", "metric"],
)
.set_index("Cnode ID")
.metric
)
def select_metrics(df, selected_metrics):
""" Selects `selected_metrics` out of a DataFrame.
This function solves some problems:
- Finding the ``metric`` level in ``df.columns``;
- Selecting, out of ``selected_metrics`` only the ones that are also in the
Data Frame;
- Dealing with both the cases when ``df.columns`` is a
``pandas.MultiIndex`` or a ``pandas.Index``.
Parameters
----------
df: DataFrame
A dataframe containing the metrics to be selected as columns.
The dataframe columns are a `MultiIndex`
selected_metrics: iterable
Contains the names of the metrics that need need to be selected
Returns
-------
res : DataFrame
a DataFrame contaning only the selected metrics.
"""
# finding the level in the columns with the metrics
if type(df.columns) == pd.MultiIndex:
metric_level = df.columns.names.index("metric")
nlevels = len(df.columns.names)
df_metrics = df.columns.levels[metric_level]
elif type(df.columns) == pd.Index:
assert df.columns.name == "metric"
metric_level = 0
nlevels = 1
df_metrics = df.columns
# choosing the metrics
possible_metrics = set(selected_metrics).intersection(set(df_metrics))
if type(df.columns) == pd.MultiIndex:
metric_indexer = [slice(None)] * nlevels
metric_indexer[metric_level] = list(possible_metrics)
return df.loc[:, tuple(metric_indexer)]
elif type(df.columns) == pd.Index:
return df.loc[:, list(possible_metrics)]
def convert_df_to_inclusive(df_convertible, call_tree):
"""
Converts a DataFrame from exclusive to inclusive. A level named
``Cnode ID``, ``Full Callpath`` or ``Short Callpath`` must be in the index.
Parameters
----------
df_convertible : pandas.DataFrame
A DataFrame containing only metrics that can be converted safely from
exclusive to inclusive.
call_tree: CubeTreeNode
A recursive representation of the call tree.
Returns
-------
res : DataFrame
A DataFrame
"""
old_index_name = ic.find_index_col(df_convertible)
# dfcr = df_convertible_reindexed
tree_df = ct.calltree_to_df(call_tree)
dfcr = ic.convert_index(df_convertible, tree_df, target="Cnode ID")
levels_to_unstack = [
name for name in df_convertible.index.names if name != "Cnode ID"
]
df_transposed = df_convertible.unstack(levels_to_unstack)
def aggregate(root):
value = df_transposed.loc[root.cnode_id, :]
for child in root.children:
value += aggregate(child)
return value
names = df_transposed.columns.names
return (
pd.concat(
objs=[aggregate(n) for n in ct.iterate_on_call_tree(call_tree)],
keys=[n.cnode_id for n in ct.iterate_on_call_tree(call_tree)],
)
.rename_axis(mapper=["Cnode ID"] + names, axis="index")
.unstack(names)
.pipe(ic.convert_index, tree_df, old_index_name)
.stack(levels_to_unstack)
)
| StarcoderdataPython |
1630682 | <filename>osrefl/model/calculations.py
# Copyright (C) 2008 University of Maryland
# All rights reserved.
# See LICENSE.txt for details.
# Author: <NAME>
#Starting Date:6/5/2009
from numpy import greater, less, greater_equal, less_equal, min, max
from numpy import array, size, shape, hstack, vstack, linalg, cross
from numpy import zeros, empty, sum, sort, searchsorted
from numpy import cos,sin, tan, arctan, pi, abs, Inf, degrees
def sphere_point_test(center,r,x,y,z):
'''
Overview:
Determines whether a given point is in a sphere given the point being
tested and a Sphere object.
This module is much simpler than the calculation done for the k3d
module.
Parameters:
center:(float,[3]|angstroms) = The coordinates of the center of the sphere.
This parameter is in the form (x center,y center, z center)
r: (float|angstroms) = The radius of the sphere.
x,y,z:(float|angstroms) = coordinates for the point being tested.
Note:
-The API is left intentionally independent of the class structures used in
sample_prep.py to allow for code resuabilitiy.
'''
test_results = (((x - center[0])**2 + (y - center[1])**2 +
(z - center[2])**2)) <=(r**2 )
return test_results
def cylinder_point_test(pt1, pt2, r, x, y, z):
'''
Overview:
Determines whether a given point is in a cylinder given the point
being tested and the relevant parameters.
Parameters:
pt1:(float,[3]|angstroms) = The coordinates of the originized axis point.
This parameter is in the form (x, y, z)
pt2:(float,[3]|angstroms) = The coordinates of the other axis point.
x,y,z:(float|angstroms) = coordinates for the point being tested.
Note:
-The API is left intentionally independent of the class structures used in
sample_prep.py to allow for code resuabilitiy.
'''
dx = pt2[0]-pt1[0];
dy = pt2[1]-pt1[1];
dz = pt2[2]-pt1[2];
# handle the degenerate case of z1 == z2 with an approximation
if(dz == 0):
dz = .00000001;
# faster vector calculations
lengthSquared = dz * dz
# fd <=> final delta
fdx = x - pt1[0]
fdy = y - pt1[1]
fdz = z - pt1[2]
dotProduct = dx * fdx + dy * fdy + dz * fdz
if(dotProduct < 0.0 | dotProduct > lengthSquared):
return false
else:
# Point is inside two endpoints, find if it is close enough to Axis
# distanceFromAxis is in units^2
distanceFromAxis = (fdx*fdx + fdy*fdy + fdz*fdz) - dotProduct*dotProduct/lengthSquared;
if(distanceFromAxis > r*r):
return false
else:
return true
def parallel_point_test(center,dim,x,y,z):
'''
Overview:
Determines whether a given point is in a parallelapiped given the point
being tested and the relevant parameters.
Parameters:
center:(float,[3]|angstroms) = The coordinates of the center of the
parallelapiped. This parameter is in the form (x center,y center, z center)
dim:(float,[3]|angstroms) = The x, y and z dimensions of the parallelapiped
object.
x,y,z:(float|angstroms) = coordinates for the point being tested.
Note:
-The API is left intentionally independent of the class structures used in
sample_prep.py to allow for code resuabilitiy.
'''
low_lim = (array(center) - (array(dim)/2.0))
high_lim = (array(center) +(array(dim)/2.0))
height_lim = greater_equal (z,low_lim[2])*less_equal (z,high_lim[2])
length_lim = greater_equal (y,low_lim[1])*less_equal (y,high_lim[1])
width_lim = greater_equal (x,low_lim[0])*less_equal (x,high_lim[0])
test_results = height_lim * length_lim * width_lim
return test_results
def ellipse_point_test(center,dim,x,y,z):
'''
Overview:
Determines whether a given point is in an ellipse given the point being
tested and the relevant parameters.
Parameters:
center:(float,[3]|angstroms) = The coordinates of the center of the
ellipse. This parameter is in the form (x center,y center, z center)
dim:(float,[3]|angstroms) = The 'a' component, 'b' component and thickness
of the Ellipse respectively. 'a' is the radius of the Ellipse in the x
direction and 'b' is the radius of the ellipsoid in the y direction.
x,y,z:(float|angstroms) = coordinates for the point being tested.
Notes:
-To solve this equation more efficiently, the program takes in an array of
x,y and z so that x[size(x),1,1], y[1,size(y),1], z[1,1,size(z)]. This
module then solves each part of the test individually and takes the product.
Only the points where all of the inquires are True will be left as true in
the test_results array
-The API is left intentionally independent of the class structures used in
sample_prep.py to allow for code resuabilitiy.
'''
low_height_lim = greater_equal (z,(center[2] - dim[2]/2 ))
up_height_lim = less_equal (z,(center[2] + dim[2]/2))
xy_test = (((x-center[0])**2)/(dim[0]**2))+(((y-center[1])**2)
/(dim[1]**2))
in_plane_low_lim = less_equal (0.0,xy_test)
in_plane_high_lim = greater_equal (1.0,xy_test)
test_results = (low_height_lim * up_height_lim * in_plane_low_lim *
in_plane_high_lim)
return test_results
def cone_point_test(center,dim,stub,x,y,z):
'''
Overview:
Determines whether a given point is in an cone given the point being
tested and the relevant parameters..
Parameters:
center:float,[3]|angstroms) = The x, y, and z component of the central
point of the ellipsoid. In the case that the center is set to
[None,None,None] the shape will be put in the bottom corner of the unit cell
(the bounding box will start at (0,0,0).
dim:(float,[3]|angstroms) = The x component, y component and thickness
of the cone respectively. x is the radius of the cone base in the x
direction and b is the radius of the cone base in the y direction.
stub:(float|angstroms) = provides a hard cut-off for the thickness of the
cone. this allows for the creation of a truncated cone object who side slope
can be altered by using different z component values while keeping the stub
parameter fixed.
x,y,z:(float|angstroms) = coordinates for the point being tested.
Notes:
-To solve this equation more efficiently, the program takes in an array of
x,y and z so that x[size(x),1,1], y[1,size(y),1], z[1,1,size(z)]. This
module then solves each part of the test individually and takes the product.
Only the points where all of the inquires are True will be left as true in
the test_results array
-The API is left intentionally independent of the class structures used in
sample_prep.py to allow for code resuabilitiy.
'''
a_angle = arctan(dim[2]/dim[0])
b_angle = arctan(dim[2]/dim[1])
low_height_lim = greater_equal (z,(center[2] - dim[2]/2))
if stub == None:
up_height_lim = less_equal (z,(center[2] + dim[2]/2))
else:
up_height_lim = less_equal (z,(center[2] + stub/2))
xy_test = ((((x-center[0])**2)/((((center[2] +
dim[2]/2)-z)/tan(a_angle))**2))+(((y-center[1])**2)/((((center[2] +
dim[2]/2)-z)/tan(b_angle))**2)))
in_plane_low_lim = less_equal (0.0,xy_test)
in_plane_high_lim = greater_equal (1.0,xy_test)
test_results = (low_height_lim * up_height_lim * in_plane_low_lim *
in_plane_high_lim)
return test_results
def pyrimid_point_test(center,dim,stub,x,y,z):
'''
Overview:
Determines whether a given point is in an pyramid given the point being
tested and the relevant parameters..
Parameters:
center:float,[3]|angstroms) = The x, y, and z component of the central
point of the ellipsoid. In the case that the center is set to
[None,None,None] the shape will be put in the bottom corner of the unit cell
(the bounding box will start at (0,0,0).
dim:(float,[3]|angstroms) = The x component, y component and thickness
of the cone respectively. x is the length of the Pyramid base and y is the
width of the Pyramid base.
stub:(float|angstroms) = provides a hard cut-off for the thickness of the
Pyramid. this allows for the creation of a trapezoidal object who side slope
can be altered by using different z component values while keeping the stub
parameter fixed.
x,y,z:(float|angstroms) = coordinates for the point being tested.
Notes:
-To solve this equation more efficiently, the program takes in an array of
x,y and z so that x[size(x),1,1], y[1,size(y),1], z[1,1,size(z)]. This
module then solves each part of the test individually and takes the product.
Only the points where all of the inquires are True will be left as true in
the test_results array
-The API is left intentionally independent of the class structures used in
sample_prep.py to allow for code resuabilitiy.
'''
a_angle = arctan(dim[2]/dim[0])
b_angle = arctan(dim[2]/dim[1])
low_height_lim = greater_equal(z,(center[2] - dim[2]/2))
if stub == None:
up_height_lim = less_equal(z,(center[2] + dim[2]/2))
else:
up_height_lim = less_equal(z,(center[2] + stub/2))
test_results = (
less_equal((center[0] -((center[2] + dim[2]/2)-z)/tan(a_angle)),x) *
less_equal(x,(center[0] + ((center[2] + dim[2]/2)-z)/tan(a_angle)))*
less_equal((center[1] - ((center[2] + dim[2]/2)-z)/tan(b_angle)/2.0),y)*
less_equal(y,(center[1] + ((center[2] + dim[2]/2)-z)/tan(b_angle)/2.0)))
return test_results
def triangularprism_point_test(center,dim,x,y,z):
'''
Overview:
Determines whether a given point is in an pyramid given the point being
tested and the relevant parameters..
Parameters:
center:float,[3]|angstroms) = The x, y, and z component of the central
point of the ellipsoid. In the case that the center is set to
[None,None,None] the shape will be put in the bottom corner of the unit cell
(the bounding box will start at (0,0,0).
dim:(float,[3]|angstroms) = The x component, y component and thickness
of the cone respectively. x is the length of the Pyramid base and y is the
width of the Pyramid base.
stub:(float|angstroms) = provides a hard cut-off for the thickness of the
Pyramid. this allows for the creation of a trapezoidal object who side slope
can be altered by using different z component values while keeping the stub
parameter fixed.
x,y,z:(float|angstroms) = coordinates for the point being tested.
Notes:
-To solve this equation more efficiently, the program takes in an array of
x,y and z so that x[size(x),1,1], y[1,size(y),1], z[1,1,size(z)]. This
module then solves each part of the test individually and takes the product.
Only the points where all of the inquires are True will be left as true in
the test_results array
-The API is left intentionally independent of the class structures used in
sample_prep.py to allow for code resuabilitiy.
test_results = (
less_equal((center[0] -((center[2] + dim[2]/2)-z)/tan(a_angle)),x) *
less_equal(x,(center[0] + ((center[2] + dim[2]/2)-z)/tan(a_angle)))*
less_equal((center[0] - ((center[2] + dim[2]/2)-z)/tan(b_angle)/2.0),y)*
less_equal(y,(center[0] + ((center[2] + dim[2]/2)-z)/tan(b_angle)/2.0)))
'''
theta = arctan((dim[2]/2.0)/dim[1])
test_results = (
less_equal((center[0] - dim[0]),x) *
less_equal(x,(center[0] + dim[0])) *
less_equal((center[1] - ((center[2] + dim[2]/2)-z)/tan(theta)/2.0),y)*
less_equal(y,(center[1] + ((center[2] + dim[2]/2)-z)/tan(theta)/2.0)))
return test_results
def layer_point_test(thickness,start_point,z):
'''
Overview:
Creates an object that extends the length and width of the unit cell but
is parameterized in the thickness direction. This is useful for over-layers
or embed-layers.
Parameters:
thickness_value:(float|angstroms) = The thickness of the layer.
start_point:(float|angstroms) = The starting position of layer in the z
direction. This allows the layer to start at a height anywhere in the unit
cell. This is useful for over-layers or passivation layers.
z:(float|angstroms) = coordinate for the point being tested in the z
direction. Unlike the other tests, this only depends on the z coordinate.
Notes:
-The API is left intentionally independent of the class structures used in
sample_prep.py to allow for code resuabilitiy.
'''
low_height_lim = greater_equal (z,start_point)
up_height_lim = less_equal (z,start_point + thickness)
test_results = low_height_lim * up_height_lim
return test_results
def ellipsoid_point_test(center, a, b, c, x, y, z):
'''
Overview:
Uses the generic formula for a Ellipsoid feature to create a
Ellipsoid object. This object can be used to create a sphere by setting
dim[0] = dim[1] = dim[2]
Parameters:
a,b,c:(float|angstroms) = 'a' is the radius of the Ellipsoid
in the x direction, 'b' is the radius of the Ellipsoid in the y direction,
and 'c' is the radius of the Ellipsoid in the z direction.
center:float,[3]|angstroms) = The x, y, and z component of the central
point of the ellipsoid. In the case that the center is set to
[None,None,None] the shape will be put in the bottom corner of the unit cell
(the bounding box will start at (0,0,0).
x,y,z:(float|angstroms) = coordinates for the point being tested.
'''
test_one = less_equal(0,
(((x - center[0])**2) / (a**2)) +
(((y - center[1])**2) / (b**2)) +
(((z - center[2])**2) / (c**2))
)
test_two = greater_equal(1,
(((x - center[0])**2) / (a**2)) +
(((y - center[1])**2) / (b**2)) +
(((z - center[2])**2) / (c**2)))
test = test_one * test_two
return test
def K3D_point_test(self,x,y,z):
'''
This module makes a list of real space values that the matrix represents
cell_count = the number of cells in a specific dimension
step_size = the real space value of each voxile
'''
on_line_check = False
#************Calculates the points used in the matrix representation*******
unit_cell = zeros([size(x),size(y),size(z)])
mag_cell = zeros([size(x),size(y),size(z)])
x_neg = -2*z[-1]
x_far = 2*z[-1]
x_vector = array(x)
for l in range(size(self.k3d_shapelist)):
print 'VOXILIZING SHAPE.....',str(l+1)
#**************Loop is done for each point in my data******************
for k in range(size(z)):
for j in range(size(y)):
point_of_inter = zeros([3,self.k3d_shapelist[l].numpoly])
l1,l2 = line_chooser([x[0],y[0],
z[0]],y[j],z[k],x[-1],
self.k3d_shapelist[l].edges,
self.k3d_shapelist[l].vertices,
self.k3d_shapelist[l].numpoly)
y[j] = l1[1]
z[k] = l1[2]
inclusion = 0
line_end_neg = [x_neg,y[j],z[k]]
line_end_far = [x_far,y[j],z[k]]
crosses_raw = zeros(self.k3d_shapelist[l].numpoly)
for ii in range(self.k3d_shapelist[l].numpoly):
crosses_raw[ii],point_of_inter[:,ii] = (plane_test(
self.k3d_shapelist[l].vertices,
self.k3d_shapelist[l].edges[ii,:],
line_end_neg,line_end_far))
point_of_inter_array = array(point_of_inter)
crosses = point_of_inter_array[:,crosses_raw==True]
if (sum(crosses))>0:
new_vector=point_inclusion_test(crosses,x_vector,
self.SLD_list[l])
current_vector = unit_cell[:,j,k]
unit_cell[current_vector==0,j,k]= (new_vector
[current_vector==0])
#does not handle magnetic K3D feature. This mag_cell is a placeholder
return unit_cell, mag_cell
return
#******************Start of Line Test Functions*******************************
def plane_test(vertices,edges,line_end_neg,line_end_far):
p1 = vertices[edges[0],:]
p2 = vertices[edges[1],:]
p3 = vertices[edges[2],:]
p4 = vertices[edges[3],:]
[crosses_raw,point_of_inter] = line_through_plane(p1,p2,p3,p4,
line_end_neg,line_end_far)
return crosses_raw, point_of_inter
def approx(a,b,tol=1e-5):
'''
return true if a = b within tolerance
'''
return abs(a-b) < tol
def approx_between(a,b,c,tol=1e-5):
'''
returns true if b is approximately greater than or equal to a but
approximately less than or equal to c
'''
if b >= (a - tol) and b <= (c + tol):
return True
else:
return False
def vector_calc(point_one,point_two):
ans = point_one - point_two
return ans
def colinear(p1,p2,p3,p4):
'''
This module determines whether 4 points are colinear
p1,p2,p3,p4 = points to be tested for linearity
'''
check = empty(2)
p21 = p2 - p1
p31 = p3 - p1
p41 = p4 - p1
num = sum(abs(cross((p21),(p31))))
denom = sum(abs((p21)))
if (approx(num,0)) and (approx(denom,0)):
d = 0.0
else:
d = num/denom
if (approx(d,0) == True):
check[0] = True
else:
check[0] = False
num = float(sum(abs(cross((p21),(p41)))))
denom = float(sum(abs((p21))))
if (approx(num,0)) and (approx(denom,0)):
d = 0.0
else:
d = num/denom
if (approx(d,0) == True):
check[1] = True
else:
check[1] = False
if (approx(check[0],1)) and (approx(check[1],1)):
colinear = True
else:
colinear = False
return colinear
def line_inter(p1,p2,p3,p4):
'''
This module determines whether a ray and a segment intersect
p1,p2 defines a line segment from either a polyhedron face or a polygon side
p3,p4 defines the ray from the test point(p3) to an outer sphere (4)
'''
A = empty((2,2))
b = empty((2,1))
x = empty ((2,1))
p1_test = array([p1[0],p1[1]])
p2_test = array([p2[0],p2[1]])
p3_test = array([p3[0],p3[1]])
p4_test = array([p4[0],p4[1]])
#poorly written helps with the plane check
if size(p1)==3:
p1_test_2 = array([p1[1],p1[2]])
p2_test_2 = array([p2[1],p2[2]])
p3_test_2 = array([p3[1],p3[2]])
p4_test_2 = array([p4[1],p4[2]])
if (any(p1_test != p2_test)) and (any(p3_test != p4_test)):
#This is the case if there are only two dimensions being test or if the
#two lines are in the xy-plane
for f in range (2):
A[f,0] = -(p4[f] - p3[f])
A[f,1] = p2[f]- p1[f]
b[f] = p3[f]-p1[f]
try:
x = linalg.solve(A,b)
#x[0] is the position on the ray of the intersection
#x[1] is the position on the line segment
crossing = (0 <= x[0]) and (0 <= x[1] <= 1)
if crossing and len(p1) == 3:
crossing = approx((p3[2] - p1[2]),((p3[2]+p4[2])*x[0] -
(p1[2]+p2[2])*x[1]))
except linalg.LinAlgError:
crossing = False
elif(any(p1_test_2 != p2_test_2)) and (any(p3_test_2 != p4_test_2)):
#This is the case if the two lines are in the yz plane
for f in range (2):
A[f,0] = -(p4[f+1] - p3[f+1])
A[f,1] = p2[f+1]- p1[f+1]
b[f] = p3[f+1]-p1[f+1]
try:
x = linalg.solve(A,b)
#x[0] is the position on the ray of the intersection
#x[1] is the position on the line segment
crossing = (0<= x[0]) and (0<= x[1] <= 1)
if crossing and len(p1) == 3:
crossing = approx((p3[0] - p1[0]),((p2[0]-p1[0])*x[1]-
(p4[0]-p3[0])*x[0]))
except linalg.LinAlgError:
crossing = False
else:
#This is the case if the two lines are in the xz plane
for f in range (2):
A[f,0] = -(p4[f] - p3[f+1])
A[f,1] = p2[f]- p1[f+1]
b[f] = p3[f]-p1[f+1]
try:
x = linalg.solve(A,b)
#x[0] is the position on the ray of the intersection
#x[1] is the position on the line segment
crossing = (-1e-14<= x[0]) and (-1e-14<= x[1] <= 1+1e-14)
if crossing and len(p1) == 3:
crossing = approx((p3[0] - p1[0]),((p3[0]+p4[0])*x[0] -
(p1[0]+p2[0])*x[1]))
except linalg.LinAlgError:
crossing = False
return crossing
def point_on_line(p1,p2,point):
'''
This module determines where a point falls on a line segment.
p1, p2 = points which define the line being tested.
point = the point which will be determined whether or not falls on the
line.
'''
high = empty(3)
low = empty(3)
p21 = p2- p1
p1_point = p1- point
for i in range (3):
if p21[i] > 0:
high[i] = p2[i]
low [i] = p1[i]
else:
high[i] = p1[i]
low [i] = p2[i]
num = float(sum(abs(cross((p21),(p1_point)))))
denom = float(sum(abs((p21))))
if approx(num,0) and approx(denom,0):
d = 0
else:
d = num/denom
if ((approx(d,0) == True) and
(approx_between(low[0],point[0],high[0])) and
approx_between(low[1],point[1],high[1]) and
approx_between(low[2],point[2],high[2])):
on_line = True
else:
on_line = False
return on_line
def point_on_poly(p1,p2,p3,p4,point):
'''
This module determines if a point falls on a plane
p1,p2,p3,p4 = four points which make up a plane
V = a vector normal to the plane
point = point being tested to determine if it lies on the plane
'''
v = (cross(vector_calc(p1,p2),vector_calc(p3,p2)))
D = -(p4[0]*v[0]) - (p4[1]*v[1]) - (p4[2]*v[2])
angle = array([pi,pi/2,pi/4])
ang = 0
check = empty(4)
circle_point = empty(3)
point_test = True
plane_eq = hstack([v,D])
distance = (plane_eq[0]*point[0] + plane_eq[1]*point[1] +
plane_eq[2]*point[2] + plane_eq[3])**2/(plane_eq[0]**2 +
plane_eq[1]**2 +
plane_eq[2]**2)
if approx(distance,0):
if not colinear(array([p1[0],p1[1],0]), array([p2[0],p2[1],0]),
array([p3[0],p3[1],0]) , array([p4[0],p4[1],0])):
proj_axis_one = 0
proj_axis_two = 1
zero_axis = 2
elif not colinear(array([p1[0],0,p1[2]]), array([p2[0],0,p2[2]]),
array([p3[0],0,p3[2]]) , array([p4[0],0,p4[2]])):
proj_axis_one = 0
proj_axis_two = 2
zero_axis = 1
elif not colinear( array([0,p1[1],p1[2]]), array([0,p2[1],p2[2]]),
array([0,p3[1],p3[2]]) , array([0,p4[1],p4[2]])):
proj_axis_one = 1
proj_axis_two = 2
zero_axis = 0
poly = vstack([[p1],[p2],[p3],[p4]])
if (max(poly[:,proj_axis_one]) > point[proj_axis_one] >
min(poly[:,proj_axis_one])):
if (max(poly[:,proj_axis_two]) > point[proj_axis_two] >
min(poly[:,proj_axis_two])):
r = max(poly) + 1
while point_test == True:
circle_point[proj_axis_one] =r*cos(angle[ang])
circle_point[proj_axis_two] = r*sin(angle[ang])
circle_point[zero_axis] = 0
# Make a private copy for projection onto the zero_axis
point,p1,p2,p3,p4 = [array(m) for m in (point,p1,p2,p3,p4)]
point[zero_axis] = 0
p1[zero_axis] = 0
p2[zero_axis] = 0
p3[zero_axis] = 0
p4[zero_axis] = 0
x = array([point[proj_axis_one],point[proj_axis_two],0])
check[0] = point_on_line(point,circle_point,p1)
check[1] = point_on_line(point,circle_point,p2)
check[2] = point_on_line(point,circle_point,p3)
check[3] = point_on_line(point,circle_point,p4)
if check[0] == check[1] == check[2]== check[3] == False:
point_test = False
else:
ang =+ 1
check[0] = line_inter([p1[proj_axis_one],p1[proj_axis_two]],
[p2[proj_axis_one],p2[proj_axis_two]],
[point[proj_axis_one],
point[proj_axis_two]],
[circle_point[proj_axis_one],
circle_point[proj_axis_two]])
check[1] = line_inter([p2[proj_axis_one],p2[proj_axis_two]],
[p3[proj_axis_one],p3[proj_axis_two]],
[point[proj_axis_one],
point[proj_axis_two]],
[circle_point[proj_axis_one],
circle_point[proj_axis_two]])
check[2] = line_inter([p3[proj_axis_one],p3[proj_axis_two]],
[p4[proj_axis_one],p4[proj_axis_two]],
[point[proj_axis_one],
point[proj_axis_two]],
[circle_point[proj_axis_one],
circle_point[proj_axis_two]])
check[3] = line_inter([p4[proj_axis_one],p4[proj_axis_two]],
[p1[proj_axis_one],p1[proj_axis_two]],
[point[proj_axis_one],
point[proj_axis_two]],
[circle_point[proj_axis_one],
circle_point[proj_axis_two]])
total_lines = sum(check)
if total_lines == 0 or total_lines == 2:
on_plane = False
else:
on_plane = True
else:
on_plane = False
else:
on_plane = False
else:
on_plane = False
return on_plane
def line_through_plane(p1,p2,p3,p4,l1,l2):
'''
This module if a line which is known not to cross a
3D shape face at a line or a point and does not lie
along a line on the face crosses the face
p1, p2, p3, p4 = points which define the shape face in three dimensions
l1,l2 = the line being tested for crossing
'''
f = 0
point_inter = empty((3))
A = empty((3,3))
b = empty((3,1))
x = empty ((3,1))
for f in range (3):
b[f] = l1[f] - p1[f]
A[f,0] = l1[f] - l2[f]
A[f,1] = p2[f] - p1[f]
A[f,2] = p3[f] - p1[f]
try:
x = linalg.solve(A,b)
#x[0] is the position on the ray of the intersection
ray_check = (-1e-5<= x[0])
if ray_check:
for f in range (3):
point_inter[f] = l1[f]+(l2[f]-l1[f]) * x[0]
crossing = point_on_poly(p1,p2,p3,p4,point_inter)
else:
crossing = False
except linalg.LinAlgError:
crossing = False
return crossing,point_inter
def circle_test(x,y,z,sphere_r):
'''
This module is the final module which can determine whether a point falls
inside a sphere which encompasses a feature
x,y,z = the values of the point being tested
sphere_r = a point which, from zero, creates a sphere that encompases
the whole feature
'''
half_point = [(sphere_r[0]/2),(sphere_r[1]/2),(sphere_r[2]/2)]
if (((x - half_point[0])**2 +
(y - half_point[1])**2 +
(z - half_point[2])**2) >
(half_point[0]**2 ) + (half_point[1]**2) + (half_point[2]**2)):
inside = False
else:
inside = True
return inside
def line_chooser(step_list,y_value,z_value,Dx,poly_array,
point_array,num_polygons):
'''
This module shortens computation time by calculating a single line that can
be utilized for a full row of voxels
x_value,y_value,z_value = points which define the shape face in three
dimensions
'''
fraction_tracker = 1
y_start = y_value
z_start = z_value
x_neg = -2*Dx
x_far = 2*Dx
inner_point = array([x_neg,y_start,z_start])
outer_point = array([x_far,y_start,z_start])
point_determine = 0
for ii in range(num_polygons): #LOOP FOR EACH PLANE
for iii in range (4): #LOOP FOR EACH LINE THAT MAKES UP EACH PLANE
poly_point_one = point_array[poly_array[ii,iii],:]
if ((iii+1) < (4)):
poly_point_two = point_array[poly_array[ii,iii+1],:]
else:
poly_point_two = point_array[poly_array[ii,0]]
# test vector crosses a line or a node: chooses new vector
intersect_check = line_inter(poly_point_one,poly_point_two,
inner_point,outer_point)
if (intersect_check == True):
fraction_tracker += 1
y_start = y_value - (step_list[1]/fraction_tracker)
z_start = z_value - (step_list[2]/fraction_tracker)
inner_point = array([x_neg,y_start,z_start])
outer_point = array([x_far,y_start,z_start])
intersect_check = False
ii = 0
iii = 4
return inner_point,outer_point
def shape_builder(x_value,y_value,z_value,poly_array,point_array,
num_polygons,far_point):
'''
This module is the final module which can determine whether a point
falls inside a shape
x_value,y_value,z_value = points which define the shape face in three
dimensions
poly_array = array of numbers that indicate the four points in point_array
that make up a face
point_array = array of real points
'''
sphere_point = zeros(3)
poly_point_one = zeros(3)
poly_point_two = zeros(3)
check = False
on_line_check = False
intersect_check = False
test_result = False
face_count = 0
d = 0
e = 0
#**Tests possible complications caused by the point falling on a specific feature
test_point = [x_value,y_value,z_value]
for ii in range(num_polygons): #LOOP FOR EACH PLANE
#Test to see if the point falls on the polygon face
if point_on_poly(point_array[poly_array[ii,0],:],
point_array[poly_array[ii,1],:],
point_array[poly_array[ii,2],:],
point_array[poly_array[ii,3],:],
test_point) == True:
on_line_check = True
break
#Tests to see of the point falls on any of the lines that make up the polygon
elif point_on_line(point_array[poly_array[ii,0],:],
point_array[poly_array[ii,1],:],
test_point):
on_line_check = True
break
elif point_on_line(point_array[poly_array[ii,1],:],
point_array[poly_array[ii,2],:],
test_point):
on_line_check = True
break
elif point_on_line(point_array[poly_array[ii,2],:],
point_array[poly_array[ii,3],:],
test_point):
on_line_check = True
break
elif point_on_line(point_array[poly_array[ii,3],:],
point_array[poly_array[ii,0],:],
test_point):
on_line_check = True
break
else:
on_line_check = False
if (on_line_check == False):
for ii in range(num_polygons): #LOOP FOR EACH PLANE
if line_through_plane(point_array[poly_array[ii,0],:],
point_array[poly_array[ii,1],:],
point_array[poly_array[ii,2],:],
point_array[poly_array[ii,3],:],
test_point,far_point) == True:
face_count = face_count + 1
if (face_count == 1) or (face_count == 3) or (face_count == 5) or (face_count == 7):
test_result = True
else:
test_result = False
else:
test_result = True
return test_result
def point_inclusion_test(poly_crossed,vector,SLD):
'''
This module determines if a point falls in a feature by a less
expensive method involving the calculation of the point of line/poly
intersections and determining of the point falls between these points inside
or outside of the polyhedron.
poly_crossed = is a list of which polygons were and were not crossed.
point_of_inter = the point at which the line crosses the polygon for those
that do pass through it.
test_point = the point on which the inclusion test is being done.
'''
sorted_poly = sort(poly_crossed[0,:])
locator = sorted_poly.searchsorted(vector)%2
SLD_profile = zeros(size(locator))
SLD_profile[locator==1] = SLD
return SLD_profile
def test():
'''
this test contains an array of assertion statements to ensure the shapes
are being properly treated.
'''
from sample_prep import Parallelapiped, Sphere, Ellipse, Cone, Cylinder
first_cone = Cone(SLD = 9.4e-5,dim = [5.0,5.0,5.0],stub = None)
assert cone_point_test(first_cone.center, first_cone.dim, first_cone.stub,2.5,2.5,2.0) == True, 'Cone calculation broke'
test_sphere = Sphere(9.5e-4,10.0)
assert sphere_point_test(test_sphere.center,test_sphere.r,10,10,10) == True, 'Sphere: point in sphere'
assert sphere_point_test(test_sphere.center,test_sphere.r,1,1,1) == False, 'Sphere: point out of sphere'
test_parallel = Parallelapiped(9.87e-6,[10,10,10])
assert parallel_point_test(test_parallel.center, test_parallel.dim,5,5,5) == True, 'parallel: Inside box'
assert parallel_point_test(test_parallel.center, test_parallel.dim,11,5,5) == False, 'parallel: Outside Box'
assert parallel_point_test(test_parallel.center, test_parallel.dim,11,11,11) == False, 'parallel: all axes Outside Box'
test_ellipse = Ellipse(9.8e-6, [6.0,8.0,10.0])
assert ellipse_point_test(test_ellipse.center,test_ellipse.dim,3,8,5) == True, 'ellipse: yaxis limit '
assert ellipse_point_test(test_ellipse.center,test_ellipse.dim,3,8,11) == False, 'ellipse: z test '
assert ellipse_point_test(test_ellipse.center,test_ellipse.dim,0,4,5)== True, 'ellipse: xaxis limit'
assert ellipse_point_test(test_ellipse.center,test_ellipse.dim,5,4,5)== True, 'ellipse: random'
test_cylinder = Cylinder(9.8e-6, [6.0,8.0,10.0])
# Add Assertions later
# assert ellipse_point_test(test_ellipse.center,test_ellipse.dim,3,8,5) == True, 'ellipse: yaxis limit '
# assert ellipse_point_test(test_ellipse.center,test_ellipse.dim,3,8,11) == False, 'ellipse: z test '
# assert ellipse_point_test(test_ellipse.center,test_ellipse.dim,0,4,5)== True, 'ellipse: xaxis limit'
# assert ellipse_point_test(test_ellipse.center,test_ellipse.dim,5,4,5)== True, 'ellipse: random'
if __name__=="__main__":test()
| StarcoderdataPython |
23732 | <reponame>noahnisbet/human-rights-first-asylum-ds-noahnisbet<gh_stars>1-10
import os
os.environ["OMP_NUM_THREADS"]= '1'
os.environ["OMP_THREAD_LIMIT"] = '1'
os.environ["MKL_NUM_THREADS"] = '1'
os.environ["NUMEXPR_NUM_THREADS"] = '1'
os.environ["OMP_NUM_THREADS"] = '1'
os.environ["PAPERLESS_AVX2_AVAILABLE"]="false"
os.environ["OCR_THREADS"] = '1'
import poppler
import pytesseract
from pdf2image import convert_from_bytes
from fastapi import APIRouter, File
import sqlalchemy
from dotenv import load_dotenv, find_dotenv
from sqlalchemy import create_engine
from app.BIA_Scraper import BIACase
import requests
import pandas as pd
import numpy as np
from PIL import Image
router = APIRouter()
load_dotenv(find_dotenv())
database_url = os.getenv('DATABASE_URL')
engine = sqlalchemy.create_engine(database_url)
@router.post('/get_text')
async def get_text_from_case_file(file: bytes = File(...)):
'''
This function inserts a PDF and the OCR converted text into a database
'''
text = []
### Converts the bytes object recieved from fastapi
pages = convert_from_bytes(file,200,fmt='png',thread_count=2)
### Uses pytesseract to convert each page of pdf to txt
text.append(pytesseract.image_to_string(pages))
### Joins the list to an output string
string_to_return = " ".join(text)
return {'Text': string_to_return}
@router.post('/get_fields')
async def get_fields_from_case_file(file: bytes = File(...)):
text = []
### Converts the bytes object recieved from fastapi
pages = convert_from_bytes(file,200,fmt='png',thread_count=2)
### Uses pytesseract to convert each page of pdf to txt
for item in pages:
text.append(pytesseract.image_to_string(item))
### Joins the list to an output string
string = " ".join(text)
### Using the BIACase Class to populate fields
case = BIACase(string)
### Json object / dictionary to be returned
case_data = {}
### Application field
app = case.get_application()
app = [ap for ap, b in app.items() if b]
case_data['application'] = '; '.join(app) if app else None
### Date field
case_data['date'] = case.get_date()
### Country of origin
case_data['country_of_origin'] = case.get_country_of_origin()
### Getting Panel members
panel = case.get_panel()
case_data['panel_members'] = '; '.join(panel) if panel else None
### Getting case outcome
case_data['outcome'] = case.get_outcome()
### Getting protected grounds
pgs = case.get_protected_grounds()
case_data['protected_grounds'] = '; '.join(pgs) if pgs else None
### Getting the violence type on the asylum seeker
based_violence = case.get_based_violence()
violence = '; '.join([k for k, v in based_violence.items() if v]) \
if based_violence \
else None
### Getting keywords
keywords = '; '.join(['; '.join(v) for v in based_violence.values()]) \
if based_violence \
else None
case_data['based_violence'] = violence
case_data['keywords'] = keywords
### Getting references / sex of applicant
references = [
'Matter of AB, 27 I&N Dec. 316 (A.G. 2018)'
if case.references_AB27_216() else None,
'Matter of L-E-A-, 27 I&N Dec. 581 (A.G. 2019)'
if case.references_LEA27_581() else None
]
case_data['references'] = '; '.join([r for r in references if r])
case_data['sex_of_applicant'] = case.get_seeker_sex()
return case_data
| StarcoderdataPython |
1699621 | <reponame>SamirMitha/Denoising<gh_stars>0
import glob, os
import time
import tensorflow as tf
import numpy as np
import scipy.io as sio
import pickle
from models import FFDNet
from losses import mse
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint, CSVLogger, TerminateOnNaN
from data_generator import DataGenerator
import matplotlib.pyplot as plt
print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU')))
gpus = tf.config.list_physical_devices('GPU')
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
# Defaults
n_channels = 3
batch_size = 128
epochs = 80
learning_rate = 1e-5
model_loss = 'mse'
monitor = 'val_loss'
train_split = 0.8
validation_split = 0.1
test_split = 0.1
checkpoint = 00
# Directories
noisy_train_path = '/media/samir/Secondary/Denoising/dncnn/test/SIDD/train/noisy/'
gt_train_path = '/media/samir/Secondary/Denoising/dncnn/test/SIDD/train/gt/'
noisy_val_path = '/media/samir/Secondary/Denoising/dncnn/test/SIDD/val/noisy/'
gt_val_path = '/media/samir/Secondary/Denoising/dncnn/test/SIDD/val/gt/'
model_path = '/media/samir/Secondary/Denoising/ffdnet/models/'
model_name = 'FFDNet_Default_SIDD_'
# Create output directories
if(not os.path.isdir(model_path) or not os.listdir(model_path)):
os.makedirs(model_path + '/logs')
os.makedirs(model_path + '/models')
os.makedirs(model_path + '/history')
os.makedirs(model_path + '/figures')
os.makedirs(model_path + '/params')
os.makedirs(model_path + '/checkpoints')
# Create train list
train_names = glob.glob(noisy_train_path + '/*.png')
num_imgs = len(train_names)
idx = np.arange(num_imgs)
train_ids = idx
# Create validation lsit
val_names = glob.glob(noisy_val_path + '/*.png')
v_num_imgs = len(val_names)
idx = np.arange(v_num_imgs)
val_ids = idx
# Create generators
train_gen = DataGenerator(noisy_path=noisy_train_path, gt_path=gt_train_path, batch_size=batch_size)
val_gen = DataGenerator(noisy_path=noisy_val_path, gt_path=gt_val_path, batch_size=batch_size)
# Model Parameters
params = dict()
params['Number of channels'] = n_channels
params['Batch Size'] = batch_size
params['Epochs'] = epochs
params['Learning rate'] = learning_rate
params['Training split'] = train_split
params['Validation split'] = validation_split
params['Testing split'] = test_split
print(['Model Parameters'])
print('------------')
for key in params.keys():
print(key + ':', params[key])
# Create Model
FFDNet = FFDNet()
model = FFDNet.get_model()
# Model Summary
print(model.summary())
# Compile Model
model.compile(optimizer = Adam(learning_rate=learning_rate, beta_1=0.9, beta_2=0.999, decay=0.0001),
loss='mean_squared_error')
callbacks = []
# CSV Logger
callbacks.append(CSVLogger(model_path + '/logs/' + model_name + '.csv'))
# Model Checkpoints
callbacks.append(ModelCheckpoint(model_path + '/checkpoints/' + 'epoch-{epoch:02d}/' + model_name + '.h5', monitor=monitor, save_freq=100))
# Stop on NaN
callbacks.append(TerminateOnNaN())
# Fit model
start_time = time.time()
print("Starting Training...")
model_history = model.fit(train_gen,
steps_per_epoch=len(train_ids)//batch_size,
validation_data=val_gen,
validation_steps=len(val_ids)//batch_size,
verbose=1, epochs=epochs, callbacks=callbacks)
print("...Finished Training")
elapsed_time = time.time() - start_time
# Save history
with open(model_path + '/history/' + model_name, 'wb') as fp:
pickle.dump(model_history.history, fp)
# Save parameters
params['Training Times'] = elapsed_time
f = open(model_path + '/params/' + model_name + '.txt', 'w')
f.write('[Model Parameters]' + '\n')
f.write('------------' + '\n')
for k, v in params.items():
f.write(str(k) + ': '+ str(v) + '\n')
f.close()
timestr = time.strftime('%Y%m%d-%H%M%S.h5')
model.save(model_path + '/models/' + model_name + timestr)
print('Model saved successfully.')
# Display loss curves
fig, ax = plt.subplots(1, 1)
ax.plot(model_history.history['loss'], color='blue', label='Training Loss')
ax.plot(model_history.history['val_loss'], color='orange', label='Validation Loss')
ax.set_title('Loss Curves')
ax.set_ylabel(model_loss)
ax.set_xlabel('Epochs')
plt.legend()
# Save figure
plt.savefig(model_path + '/figures/' + model_name + '.png')
print('Loss figure saved successfully.') | StarcoderdataPython |
125176 | <reponame>gddcx/pytorch-ssd
# -*- coding: utf-8 -*-
# Author: <NAME>
# @Time: 2021/10/8 11:27
import os
import cv2 as cv
import random
import numpy as np
import torch
from torch.utils.data import Dataset
class VOCDataset(Dataset):
def __init__(self, data, image_root="", transform=None, train=True):
super().__init__()
self.data = data
self.transform = transform
self.image_root = image_root
self.train = train
self.feature_map_size = [37, 18, 9, 5, 3, 1]
self.default_boxes_num = [4, 6, 6, 6, 4, 4]
self.Sk = [0.2, 0.34, 0.48, 0.62, 0.76, 0.9, 1]
def __len__(self):
return len(self.data)
def __getitem__(self, index):
image_dict = self.data[index]
folder = image_dict["folder"]
name = image_dict["filename"]
image_path = os.path.join(self.image_root, *folder, "JPEGImages", name)
img = cv.imread(image_path)
category = image_dict["category"]
bbox = image_dict["bndbox"] # shape: nbox, 4
if self.train: # 数据增强
hsv_img = cv.cvtColor(img, cv.COLOR_BGR2HSV)
hsv_img = self.hue(hsv_img)
hsv_img = self.saturation(hsv_img)
hsv_img = self.value(hsv_img)
img = cv.cvtColor(hsv_img, cv.COLOR_HSV2BGR)
img = self.average_blur(img)
img, bbox = self.horizontal_flip(img, bbox)
img, bbox, category = self.crop(img, bbox, category)
img, bbox = self.scale(img, bbox)
img, bbox, category = self.translation(img, bbox, category)
# Caused by data augmentation, such as crop, translation and so on.
if len(bbox) == 0 or len(category) == 0:
return [], []
target = self.encoder(img, bbox, category)
img = cv.resize(img, (300, 300))
img = cv.cvtColor(img, cv.COLOR_BGR2RGB)
if self.transform:
img = self.transform(img)
return img, target
def hue(self, hsv_img):
if random.random() > 0.5:
factor = random.uniform(0.5, 1.5)
enhanced_hue = hsv_img[:, :, 0] * factor
enhanced_hue = np.clip(enhanced_hue, 0, 180).astype(hsv_img.dtype) # H的范围是0-180(360/2)
hsv_img[:, :, 0] = enhanced_hue
return hsv_img
def saturation(self, hsv_img):
if random.random() > 0.5:
factor = random.uniform(0.5, 1.5)
enhanced_saturation = hsv_img[:, :, 1] * factor
enhanced_saturation = np.clip(enhanced_saturation, 0, 255).astype(hsv_img.dtype)
hsv_img[:, :, 1] = enhanced_saturation
return hsv_img
def value(self, hsv_img):
if random.random() > 0.5:
factor = random.uniform(0.5, 1.5)
enhanced_value = hsv_img[:, :, 2] * factor
enhanced_value = np.clip(enhanced_value, 0, 255).astype(hsv_img.dtype)
hsv_img[:, :, 2] = enhanced_value
return hsv_img
def average_blur(self, img):
if random.random()>0.5:
img = cv.blur(img, (3, 3))
return img
def horizontal_flip(self, img, bbox):
if random.random() > 0.5:
img = cv.flip(img, 1)
h, w, _ = img.shape
temp = w - bbox[:, 0]
bbox[:, 0] = w - bbox[:, 2]
bbox[:, 2] = temp
return img, bbox
def crop(self, img, bbox, category):
if random.random() > 0.5:
factor_horizontal = random.uniform(0, 0.2)
factor_vertical = random.uniform(0, 0.2)
h, w, _ = img.shape
start_horizontal = int(w * factor_horizontal)
end_horizontal = start_horizontal + int(0.8 * w)
start_vertical = int(h * factor_vertical)
end_vertical = start_vertical + int(0.8 * h)
img = img[start_vertical: end_vertical, start_horizontal:end_horizontal, :]
center_x = (bbox[:, 0] + bbox[:, 2]) / 2
center_y = (bbox[:, 1] + bbox[:, 3]) / 2
inImage = (center_x > start_horizontal) & (center_x < end_horizontal) \
& (center_y > start_vertical) & (center_y < end_vertical)
bbox = bbox[inImage, :]
bbox[:, [0, 2]] = bbox[:, [0, 2]] - start_horizontal
bbox[:, [1, 3]] = bbox[:, [1, 3]] - start_vertical
bbox[:, [0, 2]] = np.clip(bbox[:, [0, 2]], 0, int(0.8 * w))
bbox[:, [1, 3]] = np.clip(bbox[:, [1, 3]], 0, int(0.8 * h))
category = category[inImage]
return img, bbox, category
def scale(self, img, bbox):
probility = random.random()
if probility > 0.7:
factor = random.uniform(0.5, 1.5)
h, w, _ = img.shape
h = int(h * factor)
img = cv.resize(img, (w, h)) # size的顺序是w,h
bbox[:, [1, 3]] = bbox[:, [1, 3]] * factor
elif probility < 0.3:
factor = random.uniform(0.5, 1.5)
h, w, _ = img.shape
w = int(w * factor)
img = cv.resize(img, (w, h))
bbox[:, [0, 2]] = bbox[:, [0, 2]] * factor
bbox = bbox.astype(np.int)
return img, bbox
def translation(self, img, bbox, category):
if random.random() > 0.5:
factor_horizontal = random.uniform(-0.2, 0.2)
factor_vertical = random.uniform(-0.2, 0.2)
h, w, _ = img.shape
w_tran = int(w * factor_horizontal)
h_tran = int(h * factor_vertical)
canvas = np.zeros_like(img)
if w_tran < 0 and h_tran < 0: # 向右下移动
canvas[-h_tran:, -w_tran:, :] = img[:h + h_tran, :w + w_tran, :]
elif w_tran < 0 and h_tran >= 0: # 向右上移动
canvas[:h - h_tran, -w_tran:, :] = img[h_tran:, :w + w_tran, :]
elif w_tran >= 0 and h_tran < 0: # 向左下移动
canvas[-h_tran:, :w - w_tran, :] = img[:h + h_tran, w_tran:, :]
elif w_tran >= 0 and h_tran >= 0: # 向左上移动
canvas[:h - h_tran, :w - w_tran, :] = img[h_tran:, w_tran:, :]
bbox[:, [0, 2]] = bbox[:, [0, 2]] - w_tran
bbox[:, [1, 3]] = bbox[:, [1, 3]] - h_tran
# 确保bbox中心点在图像内,因为中心点所在的格负责预测
center_x = (bbox[:, 0] + bbox[:, 2]) / 2 # shape: nbox
center_y = (bbox[:, 1] + bbox[:, 3]) / 2 # shape: nbox
inImage = ((center_x > 0) & (center_x < w)) & ((center_y > 0) & (center_y < h))
bbox = bbox[inImage, :]
bbox[:, [0, 2]] = np.clip(bbox[:, [0, 2]], 0, w) # 中心虽然还在图片内,但是边框可能会超过边界,要限制范围
bbox[:, [1, 3]] = np.clip(bbox[:, [1, 3]], 0, h)
category = category[inImage]
return canvas, bbox, category
return img, bbox, category
def encoder(self, img, bbox, category):
bbox = torch.from_numpy(bbox)
category = torch.from_numpy(category)
h, w, _ = img.shape
w_scale_factor = 300 / w
h_scale_factor = 300 / h
bbox[:, [0, 2]] = (bbox[:, [0, 2]] * w_scale_factor).long()
bbox[:, [1, 3]] = (bbox[:, [1, 3]] * h_scale_factor).long()
# 1、确保每个gt都能匹配到至少一个default box (解决:多个gt匹配到同一个default box的情况)
# 2、把多个default box分配到同一个gt
iou, default_box = self.calculate_iou(bbox, self.feature_map_size, self.default_boxes_num, self.Sk) # n, 8096
_, selected_default_box_idx = torch.max(iou, dim=1, keepdim=True) # 保证每个gt都能有对应的default box
selected_gt_iou, selected_gt_idx = torch.max(iou, dim=0, keepdim=True) # 每个default box选择一个gt,实现多个default box对应一个gt
selected_default_box_idx = selected_default_box_idx.squeeze(-1)
selected_gt_iou = selected_gt_iou.squeeze(0)
selected_gt_idx = selected_gt_idx.squeeze(0)
selected_gt_iou[selected_default_box_idx] = 2 # 如果gt与对应的唯一default box的iou<0.5,这样可以保证不会在后面的操作中被标记为背景
selected_gt_idx[selected_default_box_idx] = torch.arange(0, len(selected_default_box_idx))
label = category[selected_gt_idx] + 1 # 每个default box对应的label
mask = selected_gt_iou < 0.5
label[mask] = 0 # 背景类
label = label.unsqueeze(-1)
coordinate = bbox[selected_gt_idx, :] # 8096, 4
center_x = (coordinate[:, 0] + coordinate[:, 2]) // 2
center_y = (coordinate[:, 1] + coordinate[:, 3]) // 2
width = coordinate[:, 2] - coordinate[:, 0]
height = coordinate[:, 3] - coordinate[:, 1]
default_center_x = default_box[0]
default_center_y = default_box[1]
default_width = default_box[2]
default_height = default_box[3]
tx = (center_x - default_center_x)/default_width # 8096,
tx = tx.unsqueeze(-1)
ty = (center_y - default_center_y)/default_height
ty = ty.unsqueeze(-1)
tw = torch.log(width / default_width)
tw = tw.unsqueeze(-1)
th = torch.log(height / default_height)
th = th.unsqueeze(-1)
return torch.cat([tx, ty, tw, th, label], dim=-1) # # 8096, 5
def calculate_iou(self, bbox, feature_map_size, default_box_num, Sk):
# 逐层feature map计算
iou_list = []
center_x_list = []
center_y_list = []
width_list = []
height_list = []
gt_num = bbox.shape[0]
for idx, (fms, dbn, sk) in enumerate(zip(feature_map_size, default_box_num, Sk)):
center = torch.linspace(0.5, fms-0.5, fms, dtype=torch.float) / fms * 300 # fms,
center_x = center.unsqueeze(0).unsqueeze(0).unsqueeze(-1) # 1, 1, fms, 1
center_x = center_x.repeat(1, fms, 1, dbn) # 1, fms, fms, dbn
center_y = center.unsqueeze(-1).unsqueeze(-1).unsqueeze(0) # 1, fms, 1, 1
center_y = center_y.repeat(1, 1, fms, dbn) # 1, fms, fms, dbn
ratio = torch.Tensor([1, 2, 1 / 2, 3, 1 / 3])[:dbn-1]
width_prime = height_prime = torch.Tensor([np.sqrt(sk * Sk[idx + 1]) * 300])
width = sk * 300 / ratio
width = torch.cat([width, width_prime], dim=0) # dbn,
width = width.unsqueeze(0).unsqueeze(0).unsqueeze(0) # 1, 1, 1, dbn
width = width.repeat(1, fms, fms, 1) # 1, fms, fms, dbn
height = sk * 300 * ratio
height = torch.cat([height, height_prime], dim=0) # dbn,
height = height.unsqueeze(0).unsqueeze(0).unsqueeze(0) # 1, 1, 1, dbn
height = height.repeat(1, fms, fms, 1) # 1, fms, fms, dbn
x1 = center_x - width / 2 # 1, fms, fms, dbn
y1 = center_y - height / 2
x2 = center_x + width / 2
y2 = center_y + height / 2
bbox_copy = bbox.unsqueeze(1).unsqueeze(1).unsqueeze(1).float() # n, 1, 1, 1, 4
left = torch.max(bbox_copy[:, :, :, :, 0], x1)
top = torch.max(bbox_copy[:, :, :, :, 1], y1)
right = torch.min(bbox_copy[:, :, :, :, 2], x2)
bottom = torch.min(bbox_copy[:, :, :, :, 3], y2)
w = torch.max(right - left, torch.Tensor([1e-6]))
h = torch.max(bottom - top, torch.Tensor([1e-6]))
intersection = h * w
union = (bbox_copy[:, :, :, :, 2] - bbox_copy[:, :, :, :, 0]) * (bbox_copy[:, :, :, :, 3] - bbox_copy[:, :, :, :, 1]) \
+ (x2 - x1) * (y2 - y1) - intersection
iou = intersection / union # n, fms, fms, dbn
iou_list.append(iou.reshape(gt_num, -1))
center_x_list.append(center_x.reshape(-1))
center_y_list.append(center_y.reshape(-1))
width_list.append(width.reshape(-1))
height_list.append(height.reshape(-1))
return torch.cat(iou_list, dim=-1), (torch.cat(center_x_list, dim=0), torch.cat(center_y_list, dim=0),
torch.cat(width_list, dim=0), torch.cat(height_list, dim=0))
# #
# if __name__ == "__main__":
# import glob
# import xml.dom.minidom as xdm
# def load_data(data_path):
# voc2007_trainval_annotations = os.path.join(data_path, "VOC2007", "trainval", "Annotations", "*xml")
# annotation_path = glob.glob(voc2007_trainval_annotations)
#
# all_category = ["aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow",
# "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa",
# "train", "tvmonitor"]
#
# random.shuffle(annotation_path)
# res = []
# for i, path in enumerate(annotation_path):
# path_split = os.path.dirname(path).split('\\')
# folder = [path_split[2], path_split[3]] # [VOC2007/VOC2012, trainval/test]
# DOMTree = xdm.parse(path)
# collection = DOMTree.documentElement
# filename = collection.getElementsByTagName("filename")[0].childNodes[0].data
# category_list = []
# bndbox_list = []
# object_ = collection.getElementsByTagName("object")
# for obj in object_:
# category = obj.getElementsByTagName("name")[0].childNodes[0].data
# bndbox = obj.getElementsByTagName("bndbox")[0]
# xmin = int(bndbox.getElementsByTagName("xmin")[0].childNodes[0].data)
# ymin = int(bndbox.getElementsByTagName("ymin")[0].childNodes[0].data)
# xmax = int(bndbox.getElementsByTagName("xmax")[0].childNodes[0].data)
# ymax = int(bndbox.getElementsByTagName("ymax")[0].childNodes[0].data)
# category_list.append(all_category.index(category))
# bndbox_list.append([xmin, ymin, xmax, ymax])
# res.append({"folder": folder, "filename": filename, "category": np.array(category_list),
# "bndbox": np.array(bndbox_list)})
# return res
#
# DATA_PATH = "D:\\dataset"
# res = load_data(DATA_PATH)
# eval_res = res[:100]
# eval_set = VOCDataset(data=eval_res, image_root=DATA_PATH, transform=None, train=False)
# for data in eval_set:
# print(data) | StarcoderdataPython |
3332692 | <reponame>SJISTIC-LTD/Create-and-Read-QR-code
pip install qrcode
#Import Library
import qrcode
#Generate QR Code
img=qrcode.make('Hello World')
img.save('hello.png')
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_L,
box_size=10,
border=4,
)
qr.add_data("https://abhijithchandradas.medium.com/")
qr.make(fit=True)
img = qr.make_image(fill_color="red", back_color="black")
img.save("medium.png")
pip install cv2
import cv2
img=cv2.imread("medium.png")
det=cv2.QRCodeDetector()
val, pts, st_code=det.detectAndDecode(img)
print(val)
| StarcoderdataPython |
1791361 | #!/usr/bin/env python
# This file is part of the Mad Girlfriend software
# See the LICENSE file for copyright information
from rules import Rules
from alertgenerator import Alert, Alerter
from packetparser import Packet
import signal, sys, os, socket, time, traceback, exceptions
def getMemoryUsage():
data = open('/proc/meminfo', 'r').read(2048).split('\n')
memFree = int(data[1].split(':')[1].strip().split(' ')[0]) # kb
buffers = int(data[3].split(':')[1].strip().split(' ')[0]) # kb
cached = int(data[4].split(':')[1].strip().split(' ')[0]) # kb
# Available memory is what is free (completely unoccupied) plus what can
# can be emptied on demand (i.e. buffers and cache). The number returned
# by this function is how many KBs more python can use before OOM.
totalUsableMemory = memFree + buffers + cached
return totalUsableMemory
def canary(packet, alerter):
global lastPacketsHandled, lastBytesHandled
# The canary chirps its status every now and then
nowandthen = 15 # seconds
if 'lastalert' not in alerter.state:
alerter.state['lastalert'] = 0
elapsedSinceLastCanary = time.time() - alerter.state['lastalert']
if elapsedSinceLastCanary > nowandthen:
alerter.state['lastalert'] = time.time()
ph = ['packetsHandled', 'count', lastPacketsHandled / elapsedSinceLastCanary]
tph = ['totalPacketsHandled', 'count', packetsHandled]
bh = ['bytesHandled', 'count', lastBytesHandled / elapsedSinceLastCanary]
tbh = ['totalBytesHandled', 'count', bytesHandled]
memusage = ['memusage', 'count', getMemoryUsage()]
loadavg = ['loadavg', 'count', os.getloadavg()[0]]
extravalues = [tph, tbh, memusage, loadavg, ph, bh]
alerter.log(Alert.INFO, None, extravalues)
lastPacketsHandled = 0 # since last canary
lastBytesHandled = 0 # since last canary
# The rules array contains all rules we apply to each packet.
# The canary function, defined above, is always present.
rules = [(canary, Alerter('canary'))]
for methodName in Rules.__dict__:
if methodName[0] != '_':
if methodName == 'canary':
print("Error: you cannot have a rule named 'canary'. This is a reserved name.")
sys.exit(2)
rules.append((Rules.__dict__[methodName], Alerter(methodName)))
else:
if methodName not in ['__module__', '__doc__']:
print("Ignoring method '" + methodName + "' because it starts with an underscore.")
try:
s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.ntohs(0x0003))
except:
print('Error creating socket.')
sys.exit(1)
print("Mad Girlfriend initialized.")
packetsHandled = 0
bytesHandled = 0
lastPacketsHandled = 0 # since last canary
lastBytesHandled = 0 # since last canary
try:
while True:
data = s.recvfrom(65565)[0]
for rule, alerter in rules:
try:
rule(Packet(data), alerter)
except:
if sys.exc_info()[0] is exceptions.KeyboardInterrupt:
raise
else:
sys.stderr.write("Error in rule {}: {}: {}\n{}".format(alerter.name, \
sys.exc_info()[0], sys.exc_info()[1], traceback.print_tb(sys.exc_info()[2])))
packetsHandled += 1
lastPacketsHandled += 1
lastBytesHandled += len(data)
bytesHandled += len(data)
except KeyboardInterrupt:
print("Received SIGINT")
for rule, alerter in rules:
print("Closing " + alerter.name + ".log")
alerter.close()
print("Done! Have a nice day :)")
sys.exit(0)
sys.exit(3)
| StarcoderdataPython |
3391727 | """
DoMiniPigRegistrationPhase1.py
==================================
Description:
Author:
Usage:
"""
import errno
import os
import sys
if len(sys.argv) != 1:
print(
(
"""ERROR: Improper invocation
{PROGRAM_NAME} <Experiment.json>
* The experiment json contains the parameters needed to
* dynamically scale the images (use slicer to determine approprate ranges)
* and to define the output image space to use.
* For Example
== Pig1.json
{
"Atlas" : {
"NOTE_1" : "The atlas to be used as a reference",
"IntensityImage": "/Shared/johnsonhj/HDNI/20150416_MiniPigData/TEST/ghost.nii",
"IntensityWindowMin": 8000,
"IntensityWindowMax": 17000,
"LabelMapImage" : "/Shared/johnsonhj/HDNI/20150416_MiniPigData/TEST/atlas.nii",
"LabelMapLUT" : "/Shared/johnsonhj/HDNI/20150416_MiniPigData/TEST/PigBrainBW.txt",
"TEMP_CACHE" : "/Shared/johnsonhj/HDNI/20150416_MiniPigData/TEST/CACHE",
"LOG_DIR" : "/Shared/johnsonhj/HDNI/20150416_MiniPigData/TEST/CACHE"
},
"Subject" : {
"Raw_T1":"/Shared/johnsonhj/HDNI/20150416_MiniPigData/TEST/M268P100_20130606_3DT1TFEhrs.nii",
"T1WindowMin": 300,
"T1WindowMax": 1500,
"NOTE_Cropped_T1" : "You need to crop the T1 manually to get an approximate region that only includes the brain",
"Cropped_T1":"/Shared/johnsonhj/HDNI/20150416_MiniPigData/TEST/CroppedT1.nii.gz",
"Raw_T2":"/Shared/johnsonhj/HDNI/20150416_MiniPigData/TEST/M268P100_20130606_T2TSE2mm.nii",
"T2WindowMin": 200,
"T2WindowMax": 2300,
"Raw_BM":"/Shared/johnsonhj/HDNI/20150416_MiniPigData/TEST/M268P100_20130606_whole_brain_mask.nii",
"ResultDir":"/Shared/johnsonhj/HDNI/20150416_MiniPigData/TEST/M268P100_Atlas"
}
}
""".format(
PROGRAM_NAME=sys.argv[0]
)
)
)
def add_to_sys_path(index, path):
"""
This function...
:param index:
:param path:
:return: None
"""
if path not in sys.path:
sys.path.insert(index, path)
# Modify the PATH for python modules
add_to_sys_path(0, "/scratch/johnsonhj/src/NEP-11/NIPYPE")
add_to_sys_path(0, "/scratch/johnsonhj/src/NEP-11/BRAINSTools/AutoWorkup/semtools")
add_to_sys_path(1, "/scratch/johnsonhj/src/NEP-11/BRAINSTools/AutoWorkup")
add_to_sys_path(1, "/scratch/johnsonhj/src/NEP-11/BRAINSTools")
# Modify the PATH for executibles used
temp_paths = os.environ["PATH"].split(os.pathsep)
temp_paths.insert(0, os.path.join("/scratch/johnsonhj/src/NEP-11", "bin"))
os.environ["PATH"] = os.pathsep.join(temp_paths)
print((sys.path))
import SimpleITK as sitk
import matplotlib as mp
import nipype
from nipype.interfaces.base import (
CommandLine,
CommandLineInputSpec,
TraitedSpec,
File,
Directory,
)
from nipype.interfaces.base import traits, isdefined, BaseInterface
from nipype.interfaces.utility import Merge, Split, Function, Rename, IdentityInterface
import nipype.interfaces.io as nio # Data i/o
import nipype.pipeline.engine as pe # pypeline engine
from nipype.interfaces.ants import (
Registration,
ApplyTransforms,
AverageImages,
MultiplyImages,
AverageAffineTransform,
)
from nipype.interfaces.semtools import *
import yaml
## Using yaml to load keys and values as strings
with open(sys.argv[1], "r") as paramFptr:
ExperimentInfo = yaml.safe_load(paramFptr)
print(ExperimentInfo)
# del WDIR
def mkdir_p(path):
"""
This function..
:param path:
:return: None
"""
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
minipigWF = pe.Workflow(name="MINIPIG")
minipigWF.base_dir = ExperimentInfo["Atlas"]["TEMP_CACHE"]
mkdir_p(ExperimentInfo["Atlas"]["TEMP_CACHE"])
minipigWF.config["execution"] = {
"plugin": "Linear",
# 'stop_on_first_crash':'true',
# 'stop_on_first_rerun': 'true',
"stop_on_first_crash": "false",
"stop_on_first_rerun": "false",
# This stops at first attempt to rerun, before running, and before deleting previous results.
"hash_method": "timestamp",
"single_thread_matlab": "true", # Multi-core 2011a multi-core for matrix multip lication.
"remove_unnecessary_outputs": "true", # remove any interface outputs not needed by the workflow
"use_relative_paths": "false", # relative paths should be on, require hash updat e when changed.
"remove_node_directories": "false", # Experimental
"local_hash_check": "true",
"job_finished_timeout": 45,
}
minipigWF.config["logging"] = {
"workflow_level": "DEBUG",
"filemanip_level": "DEBUG",
"interface_level": "DEBUG",
"log_directory": ExperimentInfo["Atlas"]["LOG_DIR"],
}
input_spec = pe.Node(
interface=IdentityInterface(
fields=[
"Raw_Atlas",
"Raw_T1",
"Cropped_T1",
"Raw_T2",
"Raw_BM",
"DomesticLUT",
"Domestic_LabelMap",
]
),
run_without_submitting=True,
name="inputspec",
)
input_spec.inputs.Raw_T1 = ExperimentInfo["Subject"]["Raw_T1"]
input_spec.inputs.Raw_T2 = ExperimentInfo["Subject"]["Raw_T2"]
input_spec.inputs.Raw_BM = ExperimentInfo["Subject"]["Raw_BM"]
input_spec.inputs.Cropped_T1 = ExperimentInfo["Subject"]["Cropped_T1"]
input_spec.inputs.Raw_Atlas = ExperimentInfo["Atlas"]["IntensityImage"]
input_spec.inputs.DomesticLUT = ExperimentInfo["Atlas"]["LabelMapLUT"]
input_spec.inputs.Domestic_LabelMap = ExperimentInfo["Atlas"]["LabelMapImage"]
def change_dynamic_range_of_image(inFN, outFN, winMin, winMax, outMin, outMax):
"""
This function...
:param inFN:
:param outFN:
:param winMin:
:param winMax:
:param outMin:
:param outMax:
:return: os.path.realpath(outFN)
"""
import SimpleITK as sitk
import os
at = sitk.ReadImage(inFN)
out_at = sitk.IntensityWindowing(
at,
windowMinimum=winMin,
windowMaximum=winMax,
outputMinimum=outMin,
outputMaximum=outMax,
)
out_at = sitk.Cast(out_at, sitk.sitkUInt16)
sitk.WriteImage(out_at, outFN)
return os.path.realpath(outFN)
fixAtlas = pe.Node(
Function(
function=change_dynamic_range_of_image,
input_names=["inFN", "outFN", "winMin", "winMax", "outMin", "outMax"],
output_names=["outFN"],
),
run_without_submitting=True,
name="FixAtlas_DynFix",
)
fixAtlas.inputs.outFN = "ghost_fixed_dynamic_range.nii.gz"
fixAtlas.inputs.winMin = ExperimentInfo["Atlas"]["IntensityWindowMin"] # 8000
fixAtlas.inputs.winMax = ExperimentInfo["Atlas"]["IntensityWindowMax"] # 17000
fixAtlas.inputs.outMin = 0
fixAtlas.inputs.outMax = 4096
minipigWF.connect(input_spec, "Raw_Atlas", fixAtlas, "inFN")
T1DynFix = pe.Node(
Function(
function=change_dynamic_range_of_image,
input_names=["inFN", "outFN", "winMin", "winMax", "outMin", "outMax"],
output_names=["outFN"],
),
run_without_submitting=True,
name="T1DynFix",
)
T1DynFix.inputs.outFN = "Cropped_T1_DynamicRange.nii.gz"
T1DynFix.inputs.winMin = ExperimentInfo["Subject"]["T1WindowMin"] # 300
T1DynFix.inputs.winMax = ExperimentInfo["Subject"]["T1WindowMax"] # 1500
T1DynFix.inputs.outMin = 0
T1DynFix.inputs.outMax = 4096
minipigWF.connect(input_spec, "Cropped_T1", T1DynFix, "inFN")
T2DynFix = pe.Node(
Function(
function=change_dynamic_range_of_image,
input_names=["inFN", "outFN", "winMin", "winMax", "outMin", "outMax"],
output_names=["outFN"],
),
run_without_submitting=True,
name="T2DynFix",
)
T2DynFix.inputs.outFN = "T2_DynamicRange.nii.gz"
T2DynFix.inputs.winMin = ExperimentInfo["Subject"]["T2WindowMin"] # 200
T2DynFix.inputs.winMax = ExperimentInfo["Subject"]["T2WindowMax"] # 2300
T2DynFix.inputs.outMin = 0
T2DynFix.inputs.outMax = 4096
minipigWF.connect(input_spec, "Raw_T2", T2DynFix, "inFN")
ResampleBrainMask = pe.Node(BRAINSResample(), name="ResampleBrainMask")
ResampleBrainMask.inputs.pixelType = "binary"
ResampleBrainMask.inputs.outputVolume = "ResampledBrainMask.nii.gz"
minipigWF.connect(input_spec, "Raw_BM", ResampleBrainMask, "inputVolume")
minipigWF.connect(T1DynFix, "outFN", ResampleBrainMask, "referenceVolume")
def smooth_brain_mask(inFN, outFN):
"""
This function...
:param inFN:
:param outFN:
:return: os.path.realpath(outFN)
"""
import SimpleITK as sitk
import os
FIXED_BM = inFN
fbm = sitk.ReadImage(FIXED_BM) > 0 # Make binary
## A smoothing operation to get rid of rough brain edges
fbm = sitk.BinaryErode(fbm, 1)
fbm = sitk.BinaryDilate(fbm, 2)
fbm = sitk.BinaryErode(fbm, 1)
sitk.WriteImage(sitk.Cast(fbm, sitk.sitkUInt16), outFN)
return os.path.realpath(outFN)
smoothBrainMask = pe.Node(
Function(
function=smooth_brain_mask,
input_names=["inFN", "outFN"],
output_names=["outFN"],
),
run_without_submitting=True,
name="smoothBrainMask",
)
smoothBrainMask.inputs.outFN = "smoothedBrainMask.nii.gz"
minipigWF.connect(ResampleBrainMask, "outputVolume", smoothBrainMask, "inFN")
######===========================
T2_to_T1_Fit = pe.Node(BRAINSFit(), name="T2_to_T1_Fit")
T2_to_T1_Fit.inputs.samplingPercentage = 0.05
T2_to_T1_Fit.inputs.outputTransform = "T2_to_T1.h5"
T2_to_T1_Fit.inputs.transformType = "Rigid"
T2_to_T1_Fit.inputs.costMetric = "MMI"
T2_to_T1_Fit.inputs.numberOfMatchPoints = 20
T2_to_T1_Fit.inputs.numberOfHistogramBins = 50
T2_to_T1_Fit.inputs.minimumStepLength = 0.0001
T2_to_T1_Fit.inputs.outputVolume = "T2inT1.nii.gz"
T2_to_T1_Fit.inputs.outputVolumePixelType = "int"
T2_to_T1_Fit.inputs.interpolationMode = "BSpline"
T2_to_T1_Fit.inputs.initializeTransformMode = "Off"
minipigWF.connect(T1DynFix, "outFN", T2_to_T1_Fit, "fixedVolume")
minipigWF.connect(T2DynFix, "outFN", T2_to_T1_Fit, "movingVolume")
## No masking needed, these should be topologically equivalent in all spaces
## T2_to_T1_Fit.inputs.maskProcessingMode="ROI"
## minipigWF.connect( smoothBrainMask, 'outFN',T2_to_T1_Fit,'fixedBinaryVolume')
# T2_to_T1_Fit.inputs.initializeTransformMode useMomentsAlign
# T2_to_T1_Fit.inputs.interpolationMode ResampleInPlace \
# T2_to_T1_Fit.inputs.fixedBinaryVolume ${SMOOTHEDBM} \
# T2_to_T1_Fit.inputs.maskProcessingMode ROI \
######===========================
def chop_image(inFN, inMaskFN, outFN):
"""A function to apply mask to zero out all non-interesting pixels.
ideally this should not be needed, but in an attempt to figure out
why registration is acting difficult, this is a reasonable solution
:param inFN:
:param inMaskFN:
:param outFN:
:return: os.path.realpath(outFN)
"""
import SimpleITK as sitk
import os
fbm = sitk.ReadImage(inMaskFN) > 0
int1 = sitk.ReadImage(inFN)
int1_mask = sitk.Cast(int1 > 0, sitk.sitkUInt16)
outt1 = (
sitk.Cast(int1, sitk.sitkUInt16) * sitk.Cast(fbm, sitk.sitkUInt16) * int1_mask
)
sitk.WriteImage(outt1, outFN)
return os.path.realpath(outFN)
chopT1 = pe.Node(
Function(
function=chop_image,
input_names=["inFN", "inMaskFN", "outFN"],
output_names=["outFN"],
),
run_without_submitting=True,
name="chopT1",
)
chopT1.inputs.outFN = "T1_CHOPPED.nii.gz"
minipigWF.connect(T1DynFix, "outFN", chopT1, "inFN")
minipigWF.connect(smoothBrainMask, "outFN", chopT1, "inMaskFN")
chopT2 = pe.Node(
Function(
function=chop_image,
input_names=["inFN", "inMaskFN", "outFN"],
output_names=["outFN"],
),
run_without_submitting=True,
name="chopT2",
)
chopT2.inputs.outFN = "T2_CHOPPED.nii.gz"
minipigWF.connect(T2_to_T1_Fit, "outputVolume", chopT2, "inFN")
minipigWF.connect(smoothBrainMask, "outFN", chopT2, "inMaskFN")
######===========================
AT_to_T1_Fit = pe.Node(BRAINSFit(), name="AT_to_T1_Fit")
AT_to_T1_Fit.inputs.samplingPercentage = 0.15
AT_to_T1_Fit.inputs.outputTransform = "AT_to_T1.h5"
AT_to_T1_Fit.inputs.useRigid = True
AT_to_T1_Fit.inputs.useScaleVersor3D = True
AT_to_T1_Fit.inputs.useAffine = True
AT_to_T1_Fit.inputs.costMetric = "MMI"
AT_to_T1_Fit.inputs.numberOfMatchPoints = 20
AT_to_T1_Fit.inputs.numberOfHistogramBins = 50
AT_to_T1_Fit.inputs.minimumStepLength = [0.001, 0.0001, 0.0001]
AT_to_T1_Fit.inputs.outputVolume = "AT_to_T1.nii.gz"
AT_to_T1_Fit.inputs.outputVolumePixelType = "int"
### AT_to_T1_Fit.inputs.interpolationMode='BSpline'
AT_to_T1_Fit.inputs.initializeTransformMode = "useMomentsAlign" # 'useGeometryAlign'
### AT_to_T1_Fit.inputs.maskProcessingMode="ROIAUTO" ## Images are choppped already, so ROIAUTO should work
### AT_to_T1_Fit.inputs.ROIAutoClosingSize=2 ## Mini pig brains are much smalle than human brains
### AT_to_T1_Fit.inputs.ROIAutoDilateSize=.5 ## Auto dilate a very small amount
minipigWF.connect(chopT1, "outFN", AT_to_T1_Fit, "fixedVolume")
minipigWF.connect(fixAtlas, "outFN", AT_to_T1_Fit, "movingVolume")
######===========================
BeginANTS = pe.Node(interface=Registration(), name="antsA2S")
##many_cpu_sge_options_dictionary = {'qsub_args': modify_qsub_args(CLUSTER_QUEUE,8,8,24), 'overwrite': True}
##ComputeAtlasToSubjectTransform.plugin_args = many_cpu_sge_options_dictionary
BeginANTS.inputs.dimension = 3
""" This is the recommended set of parameters from the ANTS developers """
BeginANTS.inputs.output_transform_prefix = "A2S_output_tfm"
BeginANTS.inputs.transforms = ["Affine", "SyN", "SyN", "SyN"]
BeginANTS.inputs.transform_parameters = [
[0.1],
[0.1, 3.0, 0.0],
[0.1, 3.0, 0.0],
[0.1, 3.0, 0.0],
]
BeginANTS.inputs.metric = ["MI", "CC", "CC", "CC"]
BeginANTS.inputs.sampling_strategy = ["Regular", None, None, None]
BeginANTS.inputs.sampling_percentage = [0.27, 1.0, 1.0, 1.0]
BeginANTS.inputs.metric_weight = [1.0, 1.0, 1.0, 1.0]
BeginANTS.inputs.radius_or_number_of_bins = [32, 3, 3, 3]
BeginANTS.inputs.number_of_iterations = [
[1000, 1000, 1000, 1000],
[1000, 250],
[140],
[25],
]
BeginANTS.inputs.convergence_threshold = [5e-7, 5e-7, 5e-6, 5e-5]
BeginANTS.inputs.convergence_window_size = [10, 10, 10, 10]
BeginANTS.inputs.use_histogram_matching = [True, True, True, True]
BeginANTS.inputs.shrink_factors = [[8, 4, 2, 1], [8, 4], [2], [1]]
BeginANTS.inputs.smoothing_sigmas = [[3, 2, 1, 0], [3, 2], [1], [0]]
BeginANTS.inputs.sigma_units = ["vox", "vox", "vox", "vox"]
BeginANTS.inputs.use_estimate_learning_rate_once = [False, False, False, False]
BeginANTS.inputs.write_composite_transform = True
BeginANTS.inputs.collapse_output_transforms = False
BeginANTS.inputs.initialize_transforms_per_stage = True
BeginANTS.inputs.winsorize_lower_quantile = 0.01
BeginANTS.inputs.winsorize_upper_quantile = 0.99
BeginANTS.inputs.output_warped_image = "atlas2subject.nii.gz"
BeginANTS.inputs.output_inverse_warped_image = "subject2atlas.nii.gz"
BeginANTS.inputs.save_state = "SavedBeginANTSSyNState.h5"
BeginANTS.inputs.float = True
BeginANTS.inputs.num_threads = (
-1
) # Tell nipype to respect qsub envirionmental variable NSLOTS
BeginANTS.inputs.args = "--verbose"
BeginANTS.inputs.invert_initial_moving_transform = False
minipigWF.connect(chopT2, "outFN", BeginANTS, "fixed_image")
minipigWF.connect(fixAtlas, "outFN", BeginANTS, "moving_image")
minipigWF.connect(
AT_to_T1_Fit, "outputTransform", BeginANTS, "initial_moving_transform"
)
######===========================
def make_vector(inFN1, inFN2):
"""
This function...
:param inFN1:
:param inFN2:
:return: [inFN1, inFN2]
"""
return [inFN1, inFN2]
SubjectMakeVector = pe.Node(
Function(
function=make_vector, input_names=["inFN1", "inFN2"], output_names=["outFNs"]
),
run_without_submitting=True,
name="SubjectMakeVector",
)
minipigWF.connect(chopT1, "outFN", SubjectMakeVector, "inFN1")
minipigWF.connect(chopT2, "outFN", SubjectMakeVector, "inFN2")
AtlasMakeVector = pe.Node(
Function(
function=make_vector, input_names=["inFN1", "inFN2"], output_names=["outFNs"]
),
run_without_submitting=True,
name="AtlasMakeVector",
)
minipigWF.connect(fixAtlas, "outFN", AtlasMakeVector, "inFN1")
minipigWF.connect(fixAtlas, "outFN", AtlasMakeVector, "inFN2")
######===========================
BeginANTS2 = pe.Node(interface=Registration(), name="antsA2SMultiModal")
##many_cpu_sge_options_dictionary = {'qsub_args': modify_qsub_args(CLUSTER_QUEUE,8,8,24), 'overwrite': True}
##ComputeAtlasToSubjectTransform.plugin_args = many_cpu_sge_options_dictionary
BeginANTS2.inputs.dimension = 3
""" This is the recommended set of parameters from the ANTS developers """
BeginANTS2.inputs.output_transform_prefix = "A2S_output_tfm"
BeginANTS2.inputs.transforms = ["SyN"]
BeginANTS2.inputs.transform_parameters = [[0.1, 3.0, 0.0]]
BeginANTS2.inputs.metric = ["CC"]
BeginANTS2.inputs.sampling_strategy = [None]
BeginANTS2.inputs.sampling_percentage = [1.0]
BeginANTS2.inputs.metric_weight = [1.0]
BeginANTS2.inputs.radius_or_number_of_bins = [3]
BeginANTS2.inputs.number_of_iterations = [[25]]
BeginANTS2.inputs.convergence_threshold = [5e-5]
BeginANTS2.inputs.convergence_window_size = [10]
BeginANTS2.inputs.use_histogram_matching = [True]
BeginANTS2.inputs.shrink_factors = [[1]]
BeginANTS2.inputs.smoothing_sigmas = [[0]]
BeginANTS2.inputs.sigma_units = ["vox"]
BeginANTS2.inputs.use_estimate_learning_rate_once = [False]
BeginANTS2.inputs.write_composite_transform = True
BeginANTS2.inputs.collapse_output_transforms = False
BeginANTS2.inputs.initialize_transforms_per_stage = True
BeginANTS2.inputs.winsorize_lower_quantile = 0.01
BeginANTS2.inputs.winsorize_upper_quantile = 0.99
BeginANTS2.inputs.output_warped_image = "atlas2subjectMultiModal.nii.gz"
BeginANTS2.inputs.output_inverse_warped_image = "subject2atlasMultiModal.nii.gz"
BeginANTS2.inputs.save_state = "SavedBeginANTSSyNState.h5"
BeginANTS2.inputs.float = True
BeginANTS2.inputs.num_threads = (
-1
) # Tell nipype to respect qsub envirionmental variable NSLOTS
BeginANTS2.inputs.args = "--verbose"
minipigWF.connect(SubjectMakeVector, "outFNs", BeginANTS2, "fixed_image")
minipigWF.connect(AtlasMakeVector, "outFNs", BeginANTS2, "moving_image")
minipigWF.connect(BeginANTS, "save_state", BeginANTS2, "restore_state")
######===========================
def get_list_index(imageList, index):
"""
This function...
:param imageList:
:param index:
:return: imageList[indexs]
"""
return imageList[index]
ResampleLabelMap = pe.Node(BRAINSResample(), name="ResampleLabelMap")
ResampleLabelMap.inputs.pixelType = "ushort"
ResampleLabelMap.inputs.interpolationMode = "NearestNeighbor"
ResampleLabelMap.inputs.outputVolume = "ResampleLabelMap.nii.gz"
minipigWF.connect(
[(BeginANTS2, ResampleLabelMap, [("composite_transform", "warpTransform")])]
)
minipigWF.connect(input_spec, "Domestic_LabelMap", ResampleLabelMap, "inputVolume")
minipigWF.connect(T1DynFix, "outFN", ResampleLabelMap, "referenceVolume")
datasink = pe.Node(nio.DataSink(), name="sinker")
datasink.inputs.base_directory = ExperimentInfo["Subject"]["ResultDir"]
minipigWF.connect(
ResampleLabelMap, "outputVolume", datasink, "@outputAtlasLabelMapWarped"
)
# minipigWF.connect(BeginANTS2, 'output_warped_image', datasink, '@outputAtlasWarped')
minipigWF.connect(T1DynFix, "outFN", datasink, "@T1Fixed")
minipigWF.connect(T2DynFix, "outFN", datasink, "@T2Fixed")
minipigWF.write_graph(
dotfilename="graph.dot", graph2use="flat", format="svg", simple_form=True
)
minipigWF.run()
| StarcoderdataPython |
1766256 | from nltk.stem.wordnet import WordNetLemmatizer
Lem = WordNetLemmatizer()
import pandas as pd
from generic_operations import print_to_file
import global_variables as v
def lemmatisation():
# open preprocessed tokens
wo_data = pd.read_excel(v.input_file_path_lemmatisation, sheet_name=v.input_file_sheet_name)
selected_wo_data = pd.DataFrame(wo_data, columns=v.input_file_columns)
transformed_token_list = list(selected_wo_data[v.input_file_column])
# create list of tokens
token_list = []
for sentence in transformed_token_list:
tokens = sentence.split(' ')
for token in tokens:
token_list.append(token)
token_set = list(set(token_list))
final_sentences = []
for sentence in transformed_token_list:
tokens = sentence.split(' ')
final_tokens = []
for w in tokens:
final_word = w
lemmatized_word = Lem.lemmatize(w)
if len(w) > 3 and lemmatized_word != w:
if lemmatized_word in tokens:
final_word = lemmatized_word
elif len(w) > 4 and w[-1] == 's' and w[:-1] in token_set:
final_word = lemmatized_word
final_tokens.append(final_word)
final_sentences.append(' '.join(final_tokens))
print_to_file(v.transformed_text_path_stage_3, final_sentences, v.transformed_text_heading)
def main():
print("Starting preprocessor: lemmatisation")
lemmatisation()
print("preprocessor: lemmatisation is complete")
print("output: " + v.transformed_text_path_stage_3)
print('Now run "abbreviation-correction.py" file')
if __name__ == "__main__":
main() | StarcoderdataPython |
14458 | <filename>get_variances.py
from itertools import *
import time
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
#my own variance function runs much faster than numpy or the Python 3 ported statistics module
def variance(data,u):
return sum([(i-u)**2 for i in data])/len(data)
##rounding the means and variances helps to collapse them
precision_ave=16
precision_var=12
def run(n,r):
all_deviations={}
start=time.clock()
for i in combinations_with_replacement(range(n), r):
if n-1 in i:
u=round(sum(i)/float(len(i)),precision_ave)
var=round(variance(i,u),precision_var)
if var not in all_deviations:
all_deviations[var]={u:''}
else:
all_deviations[var][u]=''
end=time.clock()
duration=end-start
data=sorted(all_deviations.keys())
f=open(os.path.join(BASE_DIR,'raw_variances',str(r)+'.txt'),'w')
#write a header line that includes time to complete
f.write(str(n)+' '+str(duration))
f.write('\n')
for i in data:
f.write(str(i))
f.write('\t')
f.write(str(sorted(all_deviations[i].keys())))
f.write('\n')
f.close()
##perform runs
#n can probably just be set to 7 or even lower
#code will take a while, you should run copies of this script in parallel
for r in range(5,100):
n=30-r
if n<=7:
n=7
run(n,r)
| StarcoderdataPython |
166182 | <reponame>levelupresearch/sparclur
import shutil
from typing import List, Dict, Any, Union
import os
import re
import locale
import shlex
import tempfile
import subprocess
from subprocess import DEVNULL, TimeoutExpired
import yaml
from sparclur._tracer import Tracer
from sparclur._parser import VALID, VALID_WARNINGS, REJECTED, REJECTED_AMBIG, TRACER, TIMED_OUT
from sparclur.utils import hash_file
from sparclur.utils._config import _get_config_param, _load_config
class PDFCPU(Tracer):
"""Wrapper for PDFCPU (https://pdfcpu.io/)"""
def __init__(self, doc: Union[str, bytes],
skip_check: Union[bool, None] = None,
hash_exclude: Union[str, List[str], None] = None,
binary_path: Union[str, None] = None,
temp_folders_dir: Union[str, None] = None,
timeout: Union[int, None] = None
):
"""
Parameters
----------
binary_path : str
If the mutool binary is not in the system PATH, add the path to the binary here. Can also be used to trace
specific versions of the binary.
"""
config = _load_config()
skip_check = _get_config_param(PDFCPU, config, 'skip_check', skip_check, False)
hash_exclude = _get_config_param(PDFCPU, config, 'hash_exclude', hash_exclude, None)
binary_path = _get_config_param(PDFCPU, config, 'binary_path', binary_path, None)
temp_folders_dir = _get_config_param(PDFCPU, config, 'temp_folders_dir', temp_folders_dir, None)
timeout = _get_config_param(PDFCPU, config, 'timeout', timeout, None)
super().__init__(doc=doc,
temp_folders_dir=temp_folders_dir,
skip_check=skip_check,
hash_exclude=hash_exclude,
timeout=timeout)
self._pdfcpu_path = 'pdfcpu' if binary_path is None else os.path.join(binary_path, 'pdfcpu')
self._trace_exit_code = None
self._decoder = locale.getpreferredencoding()
def _check_for_tracer(self) -> bool:
if self._can_trace is None:
try:
subprocess.check_output(shlex.split(self._pdfcpu_path + " version"), shell=False)
pc_present = True
except Exception as e:
pc_present = False
self._can_trace = pc_present
return self._can_trace
@property
def validate_tracer(self) -> Dict[str, Any]:
if TRACER not in self._validity:
validity_results = dict()
if self._cleaned is None:
self._scrub_messages()
observed_messages = list(self._cleaned.keys())
if self._file_timed_out[TRACER]:
validity_results['valid'] = False
validity_results['status'] = TIMED_OUT
validity_results['info'] = 'Timed Out: %i' % self._timeout
elif self._trace_exit_code > 1:
validity_results['valid'] = False
validity_results['status'] = REJECTED
validity_results['info'] = 'Exit code: %i' % self._trace_exit_code
elif observed_messages == ['No warnings']:
validity_results['valid'] = True
validity_results['status'] = VALID
elif len([message for message in observed_messages if 'Error' in message]) > 0:
validity_results['valid'] = False
validity_results['status'] = REJECTED
validity_results['info'] = 'Errors returned'
elif len([message for message in observed_messages if 'Warning' in message]) == len(observed_messages):
validity_results['valid'] = True
validity_results['status'] = VALID_WARNINGS
validity_results['info'] = 'Warnings only'
else:
validity_results['valid'] = False
validity_results['status'] = REJECTED_AMBIG
validity_results['info'] = 'Unknown message type returned'
self._validity[TRACER] = validity_results
return self._validity[TRACER]
@staticmethod
def get_name():
return 'PDFCPU'
def _get_num_pages(self):
if not self._skip_check:
assert self._check_for_tracer(), "%s not found" % self.get_name()
with tempfile.TemporaryDirectory(dir=self._temp_folders_dir) as temp_path:
if isinstance(self._doc, bytes):
file_hash = hash_file(self._doc)
doc_path = os.path.join(temp_path, file_hash)
with open(doc_path+'.pdf', 'wb') as doc_out:
doc_out.write(self._doc)
# elif not self._doc.endswith('.pdf'):
# doc_path = os.path.join(temp_path, 'infile.pdf')
# shutil.copy2(self._doc, doc_path)
else:
doc_path = self._doc
try:
sp = subprocess.Popen([self._pdfcpu_path, 'info', doc_path], stdout=subprocess.PIPE, stderr=DEVNULL,
shell=False)
(stdout, _) = sp.communicate()
stdout = stdout.decode(self._decoder)
self._num_pages = [int(line.split(':')[1].strip())
for line in stdout.split('\n') if 'Page count:' in line][0]
except:
self._num_pages = 0
def _parse_document(self):
with tempfile.TemporaryDirectory(dir=self._temp_folders_dir) as temp_path:
if isinstance(self._doc, bytes):
file_hash = hash_file(self._doc)
doc_path = os.path.join(temp_path, file_hash + '.pdf')
with open(doc_path, 'wb') as doc_out:
doc_out.write(self._doc)
elif not self._doc.endswith('.pdf'):
doc_path = os.path.join(temp_path, 'infile.pdf')
shutil.copy2(self._doc, doc_path)
else:
doc_path = self._doc
try:
strict_cmd = '%s validate -m strict %s' % (self._pdfcpu_path, doc_path)
relaxed_cmd = '%s validate -m relaxed %s' % (self._pdfcpu_path, doc_path)
# if self._verbose:
# cmd = cmd + ' -v'
strict_sp = subprocess.Popen(
shlex.split(strict_cmd),
stderr=subprocess.PIPE, stdout=DEVNULL, shell=False)
relaxed_sp = subprocess.Popen(
shlex.split(relaxed_cmd),
stderr=subprocess.PIPE, stdout=DEVNULL, shell=False)
(_, strict_err) = strict_sp.communicate(timeout=self._timeout or 600)
strict_err = strict_err.decode(self._decoder, errors='ignore').strip()
strict_err = re.sub(r" \(try -mode=relaxed\)", '', strict_err)
(_, relaxed_err) = relaxed_sp.communicate(timeout=self._timeout or 600)
relaxed_err = relaxed_err.decode(self._decoder, errors='ignore').strip()
error_arr = [relaxed_err, strict_err] if relaxed_err not in strict_err else [relaxed_err]
# error_arr = [message for message in err.split('\n') if len(message) > 0]
self._trace_exit_code = max(strict_sp.returncode, relaxed_sp.returncode)
self._file_timed_out[TRACER] = False
except TimeoutExpired:
strict_sp.kill()
relaxed_sp.kill()
(_, strict_err) = strict_sp.communicate()
(_, relaxed_err) = relaxed_sp.communicate()
strict_err = strict_err.decode(self._decoder, errors='ignore').strip()
strict_err = re.sub(r" \(try -mode=relaxed\)", '', strict_err)
relaxed_err = relaxed_err.decode(self._decoder, errors='ignore').strip()
error_arr = [relaxed_err, strict_err] if relaxed_err not in strict_err else [relaxed_err]
error_arr.insert(0, 'Error: Subprocess timed out: %i' % (self._timeout or 600))
self._trace_exit_code = 0
self._file_timed_out[TRACER] = True
except Exception as e:
strict_sp.kill()
relaxed_sp.kill()
(_, strict_err) = strict_sp.communicate()
(_, relaxed_err) = relaxed_sp.communicate()
strict_err = strict_err.decode(self._decoder, errors='ignore').strip()
strict_err = re.sub(r" \(try -mode=relaxed\)", '', strict_err)
relaxed_err = relaxed_err.decode(self._decoder, errors='ignore').strip()
error_arr = str(e).split('\n')
pdfcpu_errs = [relaxed_err, strict_err] if relaxed_err not in strict_err else [relaxed_err]
error_arr.extend(pdfcpu_errs)
self._trace_exit_code = 0
self._file_timed_out[TRACER] = False
error_arr = [err for err in error_arr if len(err) > 0]
self._messages = ['No warnings'] if len(error_arr) == 0 else error_arr
def _scrub_messages(self):
if self._messages is None:
self._parse_document()
scrubbed_messages = [self._clean_message(err) for err in self._messages]
error_dict: Dict[str, int] = dict()
for error in scrubbed_messages:
error_dict[error] = error_dict.get(error, 0) + 1
self._cleaned = error_dict
def _clean_message(self, err):
message = err.strip()
cleaned = 'runtime error' if message.startswith('runtime:') else message
cleaned = re.sub(r"\<\<", "[", cleaned)
cleaned = re.sub(r"\>\>", "]", cleaned)
cleaned = re.sub(r"\[[^]]+\]", "<<x>>", cleaned)
cleaned = re.sub(r"\(obj\#:\d+\)", "(obj#:<x>)", cleaned)
cleaned = re.sub(r"\(obj\#\d+\)", "(obj#<x>)", cleaned)
cleaned = re.sub(r"line = \<[^>]+\>", "line = <x>", cleaned)
cleaned = re.sub(r"parsing \"[^\"]+\":", "parsing <x>:", cleaned)
cleaned = re.sub(r"problem dereferencing object \d+", "problem dereferencing object <x>", cleaned)
cleaned = re.sub(r"problem dereferencing stream \d+", "problem dereferencing stream <x>", cleaned)
cleaned = re.sub(r"unknown PDF Header Version: .+", "unknown PDF Header Version: <x>", cleaned)
cleaned = re.sub(r"\nThis file could be PDF/A compliant but pdfcpu only supports versions <= PDF V1.7", '',
cleaned)
cleaned = re.sub(r"validateDateEntry: .+ invalid date", "validateDateEntry: <x> invalid date", cleaned)
cleaned = re.sub(r"validateDateObject: .+ invalid date", "validateDateObject: <x> invalid date", cleaned)
cleaned = re.sub(r"leaf node corrupted .+", "leaf node corrupted", cleaned)
return cleaned
| StarcoderdataPython |
3201793 | <filename>lambdata_jbanks/mod.py
def enlarge(n):
return n*100
x= int(input("enter an integer"))
print(enlarge(x)) | StarcoderdataPython |
39260 | # terrascript/data/logicmonitor.py
import terrascript
class logicmonitor_collectors(terrascript.Data):
pass
class logicmonitor_dashboard(terrascript.Data):
pass
class logicmonitor_dashboard_group(terrascript.Data):
pass
class logicmonitor_device_group(terrascript.Data):
pass
__all__ = [
"logicmonitor_collectors",
"logicmonitor_dashboard",
"logicmonitor_dashboard_group",
"logicmonitor_device_group",
]
| StarcoderdataPython |
54697 | _base_ = './cascade_rcnn_r101_fpn_1x.py'
model = dict(
pretrained='open-mmlab://msra/hrnetv2_w40',
backbone=dict(
_delete_=True,
type='HRNet',
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(40, 80)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(40, 80, 160)),
stage4=dict(
num_modules=3,
num_branches=4,
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(40, 80, 160, 320)))),
neck=dict(
_delete_=True,
type='HRFPN',
in_channels=[40, 80, 160, 320],
out_channels=256))
load_from = ('/home/chenhansheng/src/mmdetection-tjiiv/checkpoints/'
'cascade_rcnn_hrnetv2p_w40_20e_coco_20200512_161112-75e47b04.pth')
| StarcoderdataPython |
3259888 | <filename>cnn_architectures.py
class Architectures():
'''Helper class that returns python dictionary containing
shapes for CNN layers
'''
def xsmall(kernel_size, n_classes):
return {'conv1' : [kernel_size, kernel_size, 1, 32],
'conv2' : [kernel_size, kernel_size, 32, 64],
'conv3' : [kernel_size, kernel_size, 64, 128],
'conv4' : [kernel_size, kernel_size, 128, 128],
'conv5' : [kernel_size, kernel_size, 128, 128],
'conv5_1' : [kernel_size, kernel_size, 128, 256],
'conv5_2' : [kernel_size, kernel_size, 256, 256],
'fc1' : [0, 2048],
'fc2' : [2048, n_classes],
}
def fat_shallow(kernel_size, n_classes):
return {'conv1' : [kernel_size, kernel_size, 1, 64],
'conv2' : [kernel_size, kernel_size, 64, 128],
'conv3' : [kernel_size, kernel_size, 128, 256],
'conv4' : [kernel_size, kernel_size, 256, 512],
'conv4_1' : [kernel_size, kernel_size, 256, 512],
'fc1' : [0, 2048],
'fc1_1' : [2048, 2048],
'fc2' : [2048, n_classes],
}
def slim_deep(kernel_size, n_classes):
return {'conv1' : [kernel_size, kernel_size, 1, 32],
'conv2' : [kernel_size, kernel_size, 32, 64],
'conv3' : [kernel_size, kernel_size, 64, 64],
'conv4' : [kernel_size, kernel_size, 64, 64],
'conv5' : [kernel_size, kernel_size, 64, 64],
'conv6' : [kernel_size, kernel_size, 64, 64],
'conv7' : [kernel_size, kernel_size, 64, 64],
'conv8' : [kernel_size, kernel_size, 64, 64],
'conv9' : [kernel_size, kernel_size, 64, 64],
'fc1' : [0, 2048],
'fc2' : [2048, n_classes],
}
def vgg16_skipped(kernel_size, n_classes):
return {'conv1_1' : [kernel_size, kernel_size, 1, 64],
'conv2_1' : [kernel_size, kernel_size, 64, 128],
'conv3_1' : [kernel_size, kernel_size, 128, 256],
'conv4_1' : [kernel_size, kernel_size, 256, 512],
'conv5_1' : [kernel_size, kernel_size, 512, 512],
'fc1' : [kernel_size*kernel_size*512, 4096],
'fc2' : [4096, n_classes]}
def vgg16(self):
return {'conv1_1' : [kernel_size, kernel_size, 1, 64],
'conv1_2' : [kernel_size, kernel_size, 64, 64],
'conv2_1' : [kernel_size, kernel_size, 64, 128],
'conv2_2' : [kernel_size, kernel_size, 128, 128],
'conv3_1' : [kernel_size, kernel_size, 128, 256],
'conv3_2' : [kernel_size, kernel_size, 256, 256],
'conv3_3' : [kernel_size, kernel_size, 256, 256],
'conv4_1' : [kernel_size, kernel_size, 256, 512],
'conv4_2' : [kernel_size, kernel_size, 512, 512],
'conv4_3' : [kernel_size, kernel_size, 512, 512],
'conv5_1' : [kernel_size, kernel_size, 512, 512],
'conv5_2' : [kernel_size, kernel_size, 512, 512],
'conv5_3' : [kernel_size, kernel_size, 512, 512],
'fc1' : [kernel_size*kernel_size*512, 4096],
'fc2' : [4096, 4096],
'fc3' : [4096, n_classes]}
| StarcoderdataPython |
3281414 | <gh_stars>0
# MIT License
#
# Copyright (c) 2019 <NAME> (g4 <at> novadsp <dot> com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from browser import alert, document, window, websocket
import json, javascript
#----------------------------------------------------------
# get global
def getGlobal():
return __BRYTHON__
#----------------------------------------------------------
# convert a generic JS object into a Python string
def toString(argname,jsObj):
window.js_dump(argname,jsObj)
return ""
if jsObj is javascript.UNDEFINED:
return "null"
if jsObj is None:
return "null"
props = {}
for k,v in enumerate(jsObj):
if k is javascript.UNDEFINED or v is javascript.UNDEFINED:
continue
if jsObj[v] is javascript.UNDEFINED:
continue
props[v] = jsObj[v]
return str(props)
#----------------------------------------------------------
# check a JS object to see if it has valid key
def exists(jsObj,key):
for k,v in enumerate(jsObj):
if v == key:
return True
return False
#----------------------------------------------------------
# track the actual states. websocket close can be delayed.
eDisconnected = 1
eConnecting = 2
eConnected = 3
eDisconnecting = 4
#----------------------------------------------------------
#
class Application:
#
def __init__(self,url,ui,grid,tree):
print("__init__ => " + url)
self.url = url
self.ui = ui
self.state = eDisconnected
self.webSocket = None
self.grid = grid
self.tree = tree
#
# attach some event handlers for the toolbar buttons
btn = self.ui['layout_top_toolbar'].get('btnConnect')
btn.onClick = self.onClickConnect
btn = self.ui['layout_top_toolbar'].get('btnDisconnect')
self.ui['layout_top_toolbar'].disable('btnDisconnect')
btn.onClick = self.onClickDisconnect
btn = self.ui['layout_top_toolbar'].get('btnClear')
self.ui['layout_top_toolbar'].disable('btnClear')
btn.onClick = self.onClickClear
# for grid and tree
self.tree.on('changed.jstree',self.onClickTree)
self.grid.on('click',self.onClickGrid)
# web socket handlers
def on_open(self,evt):
self.state = eConnected
self.ui['layout_top_toolbar'].disable('btnConnect')
self.ui['layout_top_toolbar'].enable('btnDisconnect')
def on_message(self,evt):
#
getGlobal().js_dump(evt)
#
self.grid.add({ 'recid': self.grid.total, 'directory' : evt.data })
if self.grid.total > 0:
self.ui['layout_top_toolbar'].enable('btnClear')
def on_close(self,evt):
self.state = eDisconnected
self.ui['layout_top_toolbar'].enable('btnConnect')
self.ui['layout_top_toolbar'].disable('btnDisconnect')
def on_wsError(self,evt):
pass
# handle opening and binding of the socket
def openSocket(self):
if not websocket.supported:
alert("WebSocket is not supported by your browser")
return
if self.state == eDisconnected:
self.state = eConnecting
# open a web socket
self.webSocket = websocket.WebSocket(self.url)
# bind functions to web socket events
self.webSocket.bind('open',self.on_open)
self.webSocket.bind('close',self.on_close)
self.webSocket.bind('message',self.on_message)
self.webSocket.bind('error',self.on_wsError)
# set socket to close
def closeSocket(self):
if self.state == eConnected or self.state == eConnecting:
self.webSocket.close()
# event handlers
def onClickConnect(self,evt):
self.openSocket()
def onClickDisconnect(self,evt):
self.closeSocket()
def onClickGrid(self,evt):
pass
# sel = self.grid.getSelection()
def onClickClear(self,evt):
self.grid.clear()
def onClickTree(self,evt,data):
# getGlobal().js_dump(evt)
# getGlobal().js_dump(data)
if self.state == eConnected:
self.webSocket.send(data.node.text)
#----------------------------------------------------------
# the main w2ui layout
w2layoutDefinition = {
'name': 'layout',
'padding': 8,
'panels': [
# { type: 'left', size: '16px', style: "background-color: rgb(255,255,255);" },
{ 'type': 'top', 'size': 24, 'style': 'border: none, background-color: rgb(255,255,255);',
'toolbar':
{
'name': 'toolbar',
# hook up button event handler
# 'onClick': window.app.onClickToolbar,
'items':
[
{ 'type': 'button', 'id': 'btnConnect', 'caption': 'Connect', 'icon': 'w2ui-icon-check' },
{ 'type': 'button', 'id': 'btnDisconnect', 'caption': 'Disconnect', 'icon': 'w2ui-icon-cross', 'disabled': False },
{ 'type': 'button', 'id': 'btnClear', 'caption': 'Clear', 'icon': 'w2ui-icon-cross' },
{ 'type': 'spacer' },
],
},
},
{
'type': 'left', 'resizable': True, 'size': '25%', 'content': '<div style="border:none, background-color: rgb(255,255,255); border:2px;"; id="jstree_container"></div>', 'style': "border: 1px; background-color: rgb(255,255,255);",
},
{
'type': 'main', 'style': "border:none, background-color: rgb(255,255,255);", 'minSize': 100, 'content': 'content'
},
# { 'type': 'preview', 'size': '50%', 'resizable': True, 'content': '<div style="height: 100%; width: 100%, background-color: rgb(255,255,255);" id="canvas-holder"><canvas id="bar-chart"></canvas></div>', style: "background-color: rgb(255,255,255);" },
{ 'type': 'right', 'size': '16px', 'style': "background-color: rgb(255,255,255);" },
]
}
# the tree
treeDefinition = { 'core' :
{
'themes' : { 'multiple': False, 'dots': False },
'data' :
[
{ 'text': 'Root node', 'state': { 'opened' : True },
'children': [
{ 'text' : 'Child node 1', 'children': [
{ 'text' : "1.1" }
]
},
{ 'text' : 'Child node 2' }
]
}
]
}
}
# the grid layout
gridDefinition = {
'name': 'grid',
# 'header': 'Brython',
'show': {
'header': False,
'footer': True,
'toolbar': False,
'toolbarDelete': False
},
'columns': [
# { field: 'recid', caption: 'ID', size: '50px', sortable: false, attr: 'align=left' },
{ 'field': 'c1', 'caption': 'I', 'size': '8%', 'w2ui' : { 'style': "1 : background-color: #00007F;"}, 'resizable': False },
{ 'field': 'c2', 'caption': 'II', 'size': '8%', 'sortable': False, 'resizable': False },
{ 'field': 'c3', 'caption': 'III', 'size': '8%', 'sortable': False, 'resizable': False },
{ 'field': 'c4', 'caption': 'IV', 'size': '8%', 'sortable': False, 'resizable': False },
{ 'field': 'c5', 'caption': 'V', 'size': '8%', 'sortable': False, 'resizable': False },
{ 'field': 'c6', 'caption': 'VI', 'size': '8%', 'sortable': False, 'resizable': False },
{ 'field': 'c7', 'caption': 'VII', 'size': '8%', 'sortable': False, 'resizable': False },
{ 'field': 'c8', 'caption': 'VIII', 'size': '8%', 'sortable': False, 'resizable': False },
{ 'field': 'c9', 'caption': 'IX', 'size': '8%', 'sortable': False, 'resizable': False },
{ 'field': 'c10', 'caption': 'X', 'size': '8%', 'sortable': False, 'resizable': False },
{ 'field': 'c11', 'caption': 'XI', 'size': '8%', 'sortable': False, 'resizable': False },
{ 'field': 'c12', 'caption': 'XII', 'size': '8%', 'sortable': False, 'resizable': False },
],
'records': [
# it's possible to add literals as shown below
{ 'recid': 1, 'c1': 'CMaj', 'c2': 'C#Maj', 'c3': 'DMaj', 'c4': 'EbMaj', 'c5': 'EMaj', 'c6': 'FMaj', 'c7': 'F#Maj', 'c8': 'GMaj', 'c9': 'AbMaj', 'c10': 'AMaj', 'c11': 'BbMaj', 'c12': 'BMaj' },
{ 'recid': 4, 'c1': 'CMaj7', },
{ 'recid': 5, 'c1': 'CMaj7', },
{ 'recid': 6, 'c1': 'CMin', },
{ 'recid': 7, 'c1': 'CMin7', },
{ 'recid': 8, 'c1': 'CSus4', },
{ 'w2ui': { 'style': { 1: 'background-color:red;', 2: 'background-color:green;'} }, 'recid': 9, 'c1': 'C', 'c2': 'C#', },
#{ 'recid': 3, 'c1': 'CMaj7', },
#{ 'recid': 4, 'c1': 'CSus4', },
#{ 'recid': 5, 'c1': 'C', },
]
}
# attach the lay0out to the HTML div
window.jQuery('#main').w2layout(w2layoutDefinition)
# jstree_container is defined in the style tag of the left-hand panel
tree = window.jQuery('#jstree_container').jstree(treeDefinition)
# create the grid
grid = window.jQuery().w2grid(gridDefinition)
# and insert the grid into the w2ui layout
window.w2ui.layout.content('main', grid)
# set up the global application instance
document.pyapp = Application("wss://echo.websocket.org",window.w2ui,grid,tree)
# ensure grid and friends are refreshed
grid.refresh()
#
r5 = grid.get(5)
#
getGlobal().js_dump(r5)
| StarcoderdataPython |
1709526 | <gh_stars>0
#!/usr/bin/env python3
import abc
from typing import Dict, Type
import torch
from ml.rl.core.registry_meta import RegistryMeta
from ml.rl.models.base import ModelBase
from ml.rl.parameters import NormalizationParameters
from ml.rl.prediction.predictor_wrapper import ParametricDqnWithPreprocessor
from ml.rl.preprocessing.preprocessor import Preprocessor
try:
from ml.rl.fb.prediction.fb_predictor_wrapper import (
FbParametricDqnPredictorWrapper as ParametricDqnPredictorWrapper,
)
except ImportError:
from ml.rl.prediction.predictor_wrapper import ( # type: ignore
ParametricDqnPredictorWrapper,
)
class ParametricDQNNetBuilder(metaclass=RegistryMeta):
"""
Base class for parametric DQN net builder.
"""
@classmethod
@abc.abstractmethod
def config_type(cls) -> Type:
"""
Return the config type. Must be conforming to Flow python 3 type API
"""
pass
@abc.abstractmethod
def build_q_network(
self,
state_normalization_parameters: Dict[int, NormalizationParameters],
action_normalization_parameters: Dict[int, NormalizationParameters],
output_dim: int = 1,
) -> ModelBase:
pass
def build_serving_module(
self,
q_network: ModelBase,
state_normalization_parameters: Dict[int, NormalizationParameters],
action_normalization_parameters: Dict[int, NormalizationParameters],
) -> torch.nn.Module:
"""
Returns a TorchScript predictor module
"""
state_preprocessor = Preprocessor(state_normalization_parameters, False)
action_preprocessor = Preprocessor(action_normalization_parameters, False)
dqn_with_preprocessor = ParametricDqnWithPreprocessor(
q_network.cpu_model().eval(), state_preprocessor, action_preprocessor
)
return ParametricDqnPredictorWrapper(
dqn_with_preprocessor=dqn_with_preprocessor
)
| StarcoderdataPython |
129843 | <reponame>LuccaBiasoli/python-cola
#if e else
#exemplo if else
tempo = int(input('Quantos anos tem seu carro? '))
if tempo <=3:
print('carro novo')
else:
print('carro velho')
print('--FIM--')
#outro exemplo if else
n1 = float(input('Qual foi sua primeira nota ?'))
n2 = float(input('Qual foi sua segunda nota ? '))
media = (n1+n2)/2
if media >= 5:
print('Parabens tu ta manjando')
else:
print('Tu é um bosta mermão')
#Outro exemplo if
velocidade = float(input('Qual é a velocidade atual do carro? '))
if velocidade > 80 :
print('MULTADO! Você excedeu o limite permitido que é de 80Km/h ')
multa = (velocidade -80) * 7
print(f'Você deve pagar uma multa de R${multa:.2f}!')
print('Tenha um bom dia e dirija com segurança! ')
#exemplo if para numero impar ou par
número = int(input('Me diga um número qualquer: '))
resultado = número % 2
if resultado == 0:
print(f'O numero {número} é par ')
else:
print(f'O numero {número} é impar ')
#exemplo if com and e or
ano = int(input('Que ano quer analisar?'))
if ano % 4 == 0 and ano % 100 != 0 or ano % 400 == 0: #!= diferente
print(f'O ano {ano} é bissexto ')
else:
print(f'O ano {ano} NAO é bissexto ')
#exemplo if com o lower
nome = str(input('Qual é seu nome? '))
nome = nome.lower()
if nome == 'lucca':
print('Que nome lindo você tem! ')
else:
print('Teu nome é tao normal')
print(f'Bom dia, {nome} ')
#exemplo if com random
import random
computador = random.randint(0 , 5)
print('Vou pensar em um numero de 0 a 5 ')
jogador = int(input('Em que numero eu pensei? '))
if jogador == computador:
print('Você me pegou! ')
else:
print('Não foi dessa vez! ')
#NOT
#exemplo not
a = ''
b = 2
if not b :
print('por favor preencha o falor de A.')
#IN
#exemplo in
nome = '<NAME>'
if 'u' in nome:
print ('Existe a letra U no seu nome ')
#exemplo senha com if and
usuario = input('nome de usuario : ')
senha = input('digite a sua senha : ')
usario_bd = ('lucca')
senha_bd =('<PASSWORD>')
if usuario_bd == usuario and senha_bd == senha:
print('voce esta logado no sistema ')
else :
print('Senha incorreta,tente novamente ')
| StarcoderdataPython |
3315350 | # Link --> https://www.hackerrank.com/challenges/30-hello-world/problem
# Code:
input_string = input()
print('Hello, World.')
print(input_string)
| StarcoderdataPython |
4831912 | <reponame>nile0316/propnet<filename>propnet/ext/tests/utils.py
from monty.serialization import dumpfn
from propnet.ext.aflow import AflowAPIQuery
from propnet.dbtools.aflow_ingester import AflowIngester
import os
import json
TEST_DATA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'test_data')
def create_aflow_test_docs():
auids = [
'aflow:0132ab6b9cddd429', # Has an elastic tensor file
'aflow:0136cbe39e59c471', # An average joe material
'aflow:d0c93a9396dc599e'
]
query = AflowAPIQuery.from_pymongo({'auid': {'$in': auids}}, AflowIngester._available_kws, 50,
property_reduction=True)
if query.N != len(auids):
auids_retrieved = [material['auid'] for page in query.responses.values()
for material in page.values()]
auids_not_retrieved = set(auids) - set(auids_retrieved)
raise ValueError("Not all materials retrieved. Perhaps they have been deprecated? "
"Unavailabie auids:\n{}".format(auids_not_retrieved))
data = []
for item in query:
raw_data = item.raw
try:
contcar_data = item.files['CONTCAR.relax.vasp']()
except Exception:
contcar_data = None
try:
elastic_tensor_data = item.files['AEL_elastic_tensor.json']()
elastic_tensor_data = json.loads(elastic_tensor_data)
except Exception:
elastic_tensor_data = None
raw_data['CONTCAR_relax_vasp'] = contcar_data
raw_data['AEL_elastic_tensor_json'] = elastic_tensor_data
data.append(raw_data)
dumpfn(data, os.path.join(TEST_DATA_DIR, 'aflow_store.json'))
| StarcoderdataPython |
4809914 | from unittest import TestCase
import proverb
class TestSaying(TestCase):
def test_is_string(self):
s = proverb.saying()
self.assertTrue(isinstance(s, str))
| StarcoderdataPython |
3200009 | from sys import argv
from random import randint
from functools import reduce
k = int(argv[1])
shares = map(int, argv[2:])
print(sum(shares) % k) | StarcoderdataPython |
3381797 | colors = {"clean": "\033[m",
"red": "\033[31m",
"green": "\033[32m",
"yellow": "\033[33m",
"blue": "\033[34m",
"purple": "\033[35m",
"cian": "\033[36m"}
n1 = float(input("Enter in meters the height of your wall: "))
n2 = float(input("Enter in meters the the width of your wall: "))
area = n1 * n2
paint = area / 2
print("Your wall are is {}{}{}m²"
"\nand you need {}{}{} liters of paint for paint all the wall"
.format(colors["blue"], area, colors["clean"],
colors["yellow"], paint, colors["clean"]))
| StarcoderdataPython |
3318278 | from string import punctuation
from filemanip import normalize_str, compare_normalized
def clean_text(text):
normalized = normalize_str(text)
trans_tbl = str.maketrans('','', punctuation)
return normalized.translate(trans_tbl)
if __name__ == '__main__':
raw = get_data(fname)
data = get_raw_verses(raw)
xdata = data[:10]
references = make_references(xdata)
print(references)
for ref in references:
print(ref.text)
| StarcoderdataPython |
3384721 | # -*- coding: utf-8 -*
#!/usr/bin/env python
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# --------------------------------------------------------
"""
Demo script showing detections in sample images.
See README.md for installation instructions before running.
"""
import _init_paths
from fast_rcnn.config import cfg
from fast_rcnn.test import im_detect
from fast_rcnn.nms_wrapper import nms
from utils.timer import Timer
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
import caffe, os, sys, cv2
import argparse
import json
import time
# from data_augmentation_back import data_augmentationc
# CLASSES = ('__background__', 'upbody', 'downbody', 'fullbody')
CLASSES = ('__background__', 'handbags', 'shoes', 'up_body', 'down_body', \
'all_body', 'boots', 'bra', 'underwear', 'skirt', 'dress', 'makeup')
## up: 1, down: 2, full: 3
class_to_categoryid = {'up_body':1, 'down_body':2, 'all_body':3, 'bra':1, 'underwear':2, 'skirt':2, 'dress':2}
global_index = 1
## absolute path input for image names
def bag_demo(annotations_list, net, image_name, image_id):
"""Detect object classes in an image using pre-computed object proposals."""
# Load the demo image
im = cv2.imread(image_name)
# Detect all object classes and regress object bounds
scores, boxes = im_detect(net, im)
# Visualize detections for each class
CONF_THRESH = 0.9
NMS_THRESH = 0.3
bbox = []
area = 0
iscrowd = 0
has_bbox = False
global global_index
for cls_ind, cls in enumerate(CLASSES[1:]):
if cls not in class_to_categoryid.keys():
continue
cls_ind += 1 # because we skipped background
# category_id = cls_ind
cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes,
cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(dets, NMS_THRESH)
dets = dets[keep, :]
inds = np.where(dets[:, -1] >= CONF_THRESH)[0]
if len(inds) != 0:
## only get clothing with the largest score
ind = inds[0]
bbox = [int(dets[ind, 0]), int(dets[ind, 1]), int(dets[ind, 2])-int(dets[ind, 0])+1, \
int(dets[ind, 3])-int(dets[ind, 1])+1]
area = bbox[2] * bbox[3]
annotations_list.append({'id': global_index, 'image_id': image_id, 'bbox': bbox, 'area': area, \
'category_id': class_to_categoryid[cls], 'iscrowd': iscrowd})
global_index += 1
has_bbox = True
if not has_bbox:
annotations_list.append({'id': global_index, 'image_id': image_id, 'bbox': [], 'area': area, \
'category_id': 0, 'iscrowd': iscrowd})
global_index += 1
# ## absolute path input for image names
# def bag_demo(annotations_list, net, image_name, image_id):
# """Detect object classes in an image using pre-computed object proposals."""
# # Load the demo image
# im = cv2.imread(image_name)
# # Detect all object classes and regress object bounds
# scores, boxes = im_detect(net, im)
# # Visualize detections for each class
# CONF_THRESH = 0.9
# NMS_THRESH = 0.3
# bbox = []
# area = 0
# iscrowd = 0
# global global_index
# for cls_ind, cls in enumerate(CLASSES[1:]):
# cls_ind += 1 # because we skipped background
# category_id = cls_ind
# cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
# cls_scores = scores[:, cls_ind]
# dets = np.hstack((cls_boxes,
# cls_scores[:, np.newaxis])).astype(np.float32)
# keep = nms(dets, NMS_THRESH)
# dets = dets[keep, :]
# inds = np.where(dets[:, -1] >= CONF_THRESH)[0]
# if len(inds) != 0:
# ## only get clothing with the largest score
# ind = inds[0]
# bbox = [int(dets[ind, 0]), int(dets[ind, 1]), int(dets[ind, 2])-int(dets[ind, 0])+1, \
# int(dets[ind, 3])-int(dets[ind, 1])+1]
# area = bbox[2] * bbox[3]
# annotations_list.append({'id': global_index, 'image_id': image_id, 'bbox': bbox, 'area': area, \
# 'category_id': category_id, 'iscrowd': iscrowd})
# global_index += 1
# cv2.imshow('ori', im)
# cv2.waitKey (0)
# cv2.destroyAllWindows()
# array = dets[ind, :-1]
# ret_img = data_augmentation(image=im, bbox=array[np.newaxis, :], scales=1)
# cv2.imshow('test', ret_img)
# cv2.waitKey (0)
# cv2.destroyAllWindows()
# ret_img = data_augmentation(image=im, bbox=array[np.newaxis, :], scales=1.3)
# cv2.imshow('test1', ret_img)
# cv2.waitKey (0)
# cv2.destroyAllWindows()
# return {'id': index, 'image_id': image_id, 'bbox': bbox, 'area': area, \
# 'category_id': category_id, 'iscrowd': iscrowd}
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Faster R-CNN demo')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--cpu', dest='cpu_mode',
help='Use CPU mode (overrides --gpu)',
action='store_true')
# parser.add_argument('--net', dest='demo_net', help='Network to use [vgg16]',
# choices=NETS.keys(), default='vgg16')
args = parser.parse_args()
return args
def load_json(json_file):
assert os.path.exists(json_file), \
'json file not found at: {}'.format(json_file)
with open(json_file, 'rb') as f:
data = json.load(f)
return data
if __name__ == '__main__':
cfg.TEST.HAS_RPN = True # Use RPN for proposals
cfg.TEST.RPN_POST_NMS_TOP_N = 100
args = parse_args()
prototxt = 'models/all_categories_0307/VGG16/faster_rcnn_end2end/test.prototxt'
caffemodel = 'output/faster_rcnn_end2end/2007_trainval_all_categories_0307/VGG16_all_categories_0307_frcnn_iter_300000.caffemodel'
input_json_file = '/data/home/liuhuawei/input/clothing_to_detect.json'
# output_json_file = '/data/home/liuhuawei/output/clothing_13w_output.json'
if not os.path.isfile(caffemodel):
raise IOError(('{:s} not found.\nDid you run ./data/script/'
'fetch_faster_rcnn_models.sh?').format(caffemodel))
if args.cpu_mode:
caffe.set_mode_cpu()
else:
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
cfg.GPU_ID = args.gpu_id
net = caffe.Net(prototxt, caffemodel, caffe.TEST)
print '\n\nLoaded network {:s}'.format(caffemodel)
annotations_list = []
images_list = []
data = load_json(input_json_file)
name_index = 1
for index, item in enumerate(data["images"]):
index += 1
print "Processing %d/%d!!!" % (index, len(data["images"]))
images_list.append(item)
image_name = item['file_name']
image_id = item['id']
s_t = time.time()
bag_demo(annotations_list, net, image_name, image_id)
# annotations_list.append()
# output_dict = {"images": images_list, "type": data["type"], "annotations": annotations_list,\
# "categories": data["categories"]}
print 'Detection time is: %f' % (time.time()-s_t)
if index % 10000 == 0 or index == len(data["images"]):
output_dict = {"images": images_list, "type": data["type"], "annotations": annotations_list,\
"categories": data["categories"]}
with open('/data/home/liuhuawei/output/clothing_13w_%dw_output.json' % (name_index), 'wt') as f:
f.write(json.dumps(output_dict))
name_index += 1
annotations_list = []
images_list = []
# with open('/data/home/liuhuawei/output/test_15w_clothing/clothing_test.json', 'wt') as f:
# f.write(json.dumps(output_dict))
| StarcoderdataPython |
3242861 | <gh_stars>0
__author__ = "<NAME> (nam4dev)"
__since__ = "11/25/2019"
__copyright__ = """MIT License
Copyright (c) 2019 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
"""
:summary: This module implements `.. releases: <path_or_url>` custom Sphinx Directive
"""
import json
import jinja2
import requests
from docutils import nodes
from docutils import statemachine
from docutils.parsers.rst import Directive
from docutils.parsers.rst import directives
class ReleasesDirective(Directive):
"""Directive to insert release markups into a table.
It handles whatever it is needed to be rendered
The releases directive takes as positional & required argument a file path to a JSON file
It could also be a valid HTTP uri producing expected JSON schema.
The expected JSON schema could be as needed, there's no constraints!
For example,
.. code-block:: json
{
"program": "<program name>",
"releases": [
{
"version": "1.0.0.0",
"date": "01-01-2019",
"mac": "https://site.com/mac-release.run",
"linux": "https://site.com/linux-release.run",
"windows": "https://site.com/windows-release.exe"
}
]
}
An optional argument `:format:` indicates in which format the directive's content is written.
Supported formats:
- html
- rest (rst)
- any format the `.. raw::` directive takes
Examples::
HTML example
.. releases:: ./release_list.json
:format: html
{% for release in release %}
<h1>{{ release['name'] }}</h1>
<table class="table">
<thead>
<tr>
<td>Linux</td>
<td>Windows</td>
</tr>
</thead>
<tbody>
<tr>
<td>{{ release['linux'] }}</td>
<td>{{ release['windows'] }}</td>
</tr>
</tbody>
</table>
{% endfor %}
RST example
Generating REST format will allow one to take advantage of REST parser.
For example, any title will be included into the TOC (represented into the right menu of the document)
.. releases:: https://wwww.site.com/release_list.json
:format: rest
{% macro raw_tag() %}
.. raw:: html
{% endmacro %}
{% macro to_title(release) %}
{%- set title=release['name'] -%}
{{ title }}
{{ '#' * title|length }}
{% endmacro %}
{% for release in release %}
{{ to_title(release) }}
{{ raw_tag() }}
<table class="table">
<thead>
<tr>
<td>Linux</td>
<td>Windows</td>
</tr>
</thead>
<tbody>
<tr>
<td>{{ release['linux'] }}</td>
<td>{{ release['windows'] }}</td>
</tr>
</tbody>
</table>
{% endfor %}
"""
has_content = True
required_arguments = 1
optional_arguments = 1
final_argument_whitespace = True
option_spec = {
'format': directives.unchanged
}
@property
def _template2output(self):
"""
Property getting Jinja template to the converted output
:return: The converted output
:rtype: str
"""
releases_info = self._get_releases_info(self.arguments[0])
template = self._jinja2_env.from_string('\n'.join(self.content))
try:
return template.render(**releases_info)
except Exception as error:
raise self.error('%s' % error)
def __init__(self, name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
"""
Override
Constructor
Instantiate the Jinja environment instance
"""
super().__init__(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine)
self._jinja2_env = jinja2.Environment(loader=jinja2.BaseLoader)
def _get_releases_info(self, uri):
"""
Get the releases info from an URI (HTTP or from a path)
:param uri: The HTTP URL or the local path where JSON data lies
:type uri: str
:return: The releases data
:rtype: dict
"""
releases_info = {}
try:
with open(uri) as fd:
try:
releases_info = json.load(fd)
except (TypeError, ValueError, Exception) as e:
raise self.error('{} is not a valid JSON file ({})'.format(uri, e))
except (FileNotFoundError, OSError):
try:
response = requests.get(uri, verify=False)
releases_info = response.json()
except requests.exceptions.RequestException as error:
raise self.error('Could not reach URI %s (%s)' % (uri, str(error)))
return releases_info
def _process_rst(self, output):
"""
Process RST output
:param output: The output to process
:type output: str
:return: The processed node(s)
:rtype: list
"""
try:
node = nodes.paragraph()
node.source, node.line = self.state_machine.get_source_and_line(self.lineno)
converted = statemachine.StringList(
initlist=output.split('\n'),
source=self.content.source,
parent=self.content.parent,
parent_offset=self.content.parent_offset,
)
self.state.nested_parse(converted, self.content_offset, node, match_titles=True)
except Exception as error:
raise self.error('%s' % error)
return node.children
def _process_raw(self, output):
"""
Process RAW output
:param output: The output to process
:type output: str
:return: The processed node(s)
:rtype: list
"""
attributes = {'format': self.options.get('format', 'html')}
raw_node = nodes.raw('', output, **attributes)
raw_node.source, raw_node.line = self.state_machine.get_source_and_line(self.lineno)
return [raw_node]
def run(self):
"""
Entry point
:return: The processed node(s)
:rtype: list
"""
self.assert_has_content()
output = self._template2output
selected_format = self.options.get('format', 'rst')
if selected_format in ('rst', 'rest',):
node_list = self._process_rst(output)
else:
node_list = self._process_raw(output)
return node_list
def setup(app, **options):
"""
Override
:param app: The Sphinx application reference
:param options: Any extra keyword options
"""
app.add_directive('releases', ReleasesDirective)
| StarcoderdataPython |
1702568 | from typing import Optional, Union
from typing_extensions import Literal
from anndata import AnnData
import numpy as np
import pandas as pd
from pandas import DataFrame
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import minimum_spanning_tree
from scipy.sparse.csgraph import shortest_path
import igraph
from tqdm import tqdm
import sys
import igraph
import warnings
import itertools
import math
from scipy import sparse
import simpleppt
from ..plot.trajectory import graph as plot_graph
from .. import logging as logg
from .. import settings
from .utils import get_X
def curve(
adata: AnnData,
Nodes: int = None,
use_rep: str = None,
ndims_rep: Optional[int] = None,
init: Optional[DataFrame] = None,
epg_lambda: Optional[Union[float, int]] = 0.01,
epg_mu: Optional[Union[float, int]] = 0.1,
epg_trimmingradius: Optional = np.inf,
epg_initnodes: Optional[int] = 2,
epg_verbose: bool = False,
device: Literal["cpu", "gpu"] = "cpu",
plot: bool = False,
basis: Optional[str] = "umap",
seed: Optional[int] = None,
copy: bool = False,
):
"""\
Generate a principal curve.
Learn a curved representation on any space, composed of nodes, approximating the
position of the cells on a given space such as gene expression, pca, diffusion maps, ...
Uses ElpiGraph algorithm.
Parameters
----------
adata
Annotated data matrix.
Nodes
Number of nodes composing the principial tree, use a range of 10 to 100 for
ElPiGraph approach and 100 to 2000 for PPT approach.
use_rep
Choose the space to be learned by the principal tree.
ndims_rep
Number of dimensions to use for the inference.
init
Initialise the point positions.
epg_lambda
Parameter for ElPiGraph, coefficient of ‘stretching’ elasticity [Albergante20]_.
epg_mu
Parameter for ElPiGraph, coefficient of ‘bending’ elasticity [Albergante20]_.
epg_trimmingradius
Parameter for ElPiGraph, trimming radius for MSE-based data approximation term [Albergante20]_.
epg_initnodes
numerical 2D matrix, the k-by-m matrix with k m-dimensional positions of the nodes
in the initial step
epg_verbose
show verbose output of epg algorithm
device
Run method on either `cpu` or on `gpu`
plot
Plot the resulting tree.
basis
Basis onto which the resulting tree should be projected.
seed
A numpy random seed.
copy
Return a copy instead of writing to adata.
Returns
-------
adata : anndata.AnnData
if `copy=True` it returns or else add fields to `adata`:
`.uns['epg']`
dictionnary containing information from elastic principal curve
`.obsm['X_R']`
hard assignment of cells to principal points
`.uns['graph']['B']`
adjacency matrix of the principal points
`.uns['graph']['F']`
coordinates of principal points in representation space
"""
logg.info(
"inferring a principal curve",
reset=True,
end=" " if settings.verbosity > 2 else "\n",
)
adata = adata.copy() if copy else adata
if Nodes is None:
if adata.shape[0] * 2 > 100:
Nodes = 100
else:
Nodes = int(adata.shape[0] / 2)
logg.hint(
"parameters used \n"
" "
+ str(Nodes)
+ " principal points, mu = "
+ str(epg_mu)
+ ", lambda = "
+ str(epg_lambda)
)
curve_epg(
adata,
Nodes,
use_rep,
ndims_rep,
init,
epg_lambda,
epg_mu,
epg_trimmingradius,
epg_initnodes,
device,
seed,
epg_verbose,
)
if plot:
plot_graph(adata, basis)
return adata if copy else None
def tree(
adata: AnnData,
Nodes: int = None,
use_rep: str = None,
ndims_rep: Optional[int] = None,
weight_rep: str = None,
method: Literal["ppt", "epg"] = "ppt",
init: Optional[DataFrame] = None,
ppt_sigma: Optional[Union[float, int]] = 0.1,
ppt_lambda: Optional[Union[float, int]] = 1,
ppt_metric: str = "euclidean",
ppt_nsteps: int = 50,
ppt_err_cut: float = 5e-3,
ppt_gpu_tpb: int = 16,
epg_lambda: Optional[Union[float, int]] = 0.01,
epg_mu: Optional[Union[float, int]] = 0.1,
epg_trimmingradius: Optional = np.inf,
epg_initnodes: Optional[int] = 2,
epg_verbose: bool = False,
device: Literal["cpu", "gpu"] = "cpu",
plot: bool = False,
basis: Optional[str] = "umap",
seed: Optional[int] = None,
copy: bool = False,
):
"""\
Generate a principal tree.
Learn a simplified representation on any space, compsed of nodes, approximating the
position of the cells on a given space such as gene expression, pca, diffusion maps, ...
If `method=='ppt'`, uses simpleppt implementation from [Soldatov19]_.
If `method=='epg'`, uses Elastic Principal Graph approach from [Albergante20]_.
Parameters
----------
adata
Annotated data matrix.
Nodes
Number of nodes composing the principial tree, use a range of 10 to 100 for
ElPiGraph approach and 100 to 2000 for PPT approach.
use_rep
Choose the space to be learned by the principal tree.
ndims_rep
Number of dimensions to use for the inference.
weight_rep
If `ppt`, use a weight matrix for learning the tree.
method
If `ppt`, uses simpleppt approach, `ppt_lambda` and `ppt_sigma` are the
parameters controlling the algorithm. If `epg`, uses ComputeElasticPrincipalTree
function from elpigraph python package, `epg_lambda` `epg_mu` and `epg_trimmingradius`
are the parameters controlling the algorithm.
init
Initialise the point positions.
ppt_sigma
Regularization parameter for simpleppt [Mao15]_.
ppt_lambda
Parameter for simpleppt, penalty for the tree length [Mao15]_.
ppt_metric
The metric to use to compute distances in high dimensional space.
For compatible metrics, check the documentation of
sklearn.metrics.pairwise_distances if using cpu or
cuml.metrics.pairwise_distances if using gpu.
ppt_nsteps
Number of steps for the optimisation process of simpleppt.
ppt_err_cut
Stop simpleppt algorithm if proximity of principal points between iterations less than defiend value.
ppt_gpu_tpb
Threads per block parameter for cuda computations.
epg_lambda
Parameter for ElPiGraph, coefficient of ‘stretching’ elasticity [Albergante20]_.
epg_mu
Parameter for ElPiGraph, coefficient of ‘bending’ elasticity [Albergante20]_.
epg_trimmingradius
Parameter for ElPiGraph, trimming radius for MSE-based data approximation term [Albergante20]_.
epg_initnodes
numerical 2D matrix, the k-by-m matrix with k m-dimensional positions of the nodes
in the initial step
epg_verbose
show verbose output of epg algorithm
device
Run either mehtod on `cpu` or on `gpu`
plot
Plot the resulting tree.
basis
Basis onto which the resulting tree should be projected.
seed
A numpy random seed.
copy
Return a copy instead of writing to adata.
Returns
-------
adata : anndata.AnnData
if `copy=True` it returns or else add fields to `adata`:
`.uns['ppt']`
dictionnary containing information from simpelppt tree if method='ppt'
`.uns['epg']`
dictionnary containing information from elastic principal tree if method='epg'
`.obsm['R']`
soft assignment of cells to principal points
`.uns['graph']['B']`
adjacency matrix of the principal points
`.uns['graph']['F']`
coordinates of principal points in representation space
"""
logg.info(
"inferring a principal tree",
reset=True,
end=" " if settings.verbosity > 2 else "\n",
)
adata = adata.copy() if copy else adata
X, use_rep = get_data(adata, use_rep, ndims_rep)
W = get_data(adata, weight_rep, ndims_rep)[0] if weight_rep is not None else None
if Nodes is None:
if adata.shape[0] * 2 > 2000:
Nodes = 2000
else:
Nodes = int(adata.shape[0] / 2)
if method == "ppt":
simpleppt.settings.verbosity = settings.verbosity
ppt = simpleppt.ppt(
X,
W,
Nodes=Nodes,
init=init,
sigma=ppt_sigma,
lam=ppt_lambda,
metric=ppt_metric,
nsteps=ppt_nsteps,
err_cut=ppt_err_cut,
device=device,
gpu_tbp=ppt_gpu_tpb,
seed=seed,
progress=settings.verbosity > 1,
)
ppt = vars(ppt)
graph = {
"B": ppt["B"],
"F": ppt["F"],
"tips": ppt["tips"],
"forks": ppt["forks"],
"metrics": ppt["metric"],
"use_rep": use_rep,
"ndims_rep": ndims_rep,
}
adata.uns["graph"] = graph
adata.uns["ppt"] = ppt
adata.obsm["X_R"] = ppt["R"]
elif method == "epg":
graph, epg = tree_epg(
X,
Nodes,
init,
epg_lambda,
epg_mu,
epg_trimmingradius,
epg_initnodes,
device,
seed,
epg_verbose,
)
graph["use_rep"] = use_rep
graph["ndims_rep"] = ndims_rep
adata.obsm["X_R"] = graph["R"]
del graph["R"]
adata.uns["graph"] = graph
adata.uns["epg"] = epg
if plot:
plot_graph(adata, basis)
logg.hint(
"added \n"
" .uns['" + method + "'], dictionnary containing inferred tree.\n"
" .obsm['X_R'] soft assignment of cells to principal points.\n"
" .uns['graph']['B'] adjacency matrix of the principal points.\n"
" .uns['graph']['F'] coordinates of principal points in representation space."
)
return adata if copy else None
def circle(
adata: AnnData,
Nodes: int = None,
use_rep: str = None,
ndims_rep: Optional[int] = None,
init: Optional[DataFrame] = None,
epg_lambda: Optional[Union[float, int]] = 0.01,
epg_mu: Optional[Union[float, int]] = 0.1,
epg_trimmingradius: Optional = np.inf,
epg_initnodes: Optional[int] = 3,
epg_verbose: bool = False,
device: Literal["cpu", "gpu"] = "cpu",
plot: bool = False,
basis: Optional[str] = "umap",
seed: Optional[int] = None,
copy: bool = False,
):
"""\
Generate a principal circle.
Learn a circled representation on any space, composed of nodes, approximating the
position of the cells on a given space such as gene expression, pca, diffusion maps, ...
Uses ElpiGraph algorithm.
Parameters
----------
adata
Annotated data matrix.
Nodes
Number of nodes composing the principial tree, use a range of 10 to 100 for
ElPiGraph approach and 100 to 2000 for PPT approach.
use_rep
Choose the space to be learned by the principal tree.
ndims_rep
Number of dimensions to use for the inference.
init
Initialise the point positions.
epg_lambda
Parameter for ElPiGraph, coefficient of ‘stretching’ elasticity [Albergante20]_.
epg_mu
Parameter for ElPiGraph, coefficient of ‘bending’ elasticity [Albergante20]_.
epg_trimmingradius
Parameter for ElPiGraph, trimming radius for MSE-based data approximation term [Albergante20]_.
epg_initnodes
numerical 2D matrix, the k-by-m matrix with k m-dimensional positions of the nodes
in the initial step
epg_verbose
show verbose output of epg algorithm
device
Run method on either `cpu` or on `gpu`
plot
Plot the resulting tree.
basis
Basis onto which the resulting tree should be projected.
seed
A numpy random seed.
copy
Return a copy instead of writing to adata.
Returns
-------
adata : anndata.AnnData
if `copy=True` it returns or else add fields to `adata`:
`.uns['epg']`
dictionnary containing information from elastic principal curve
`.obsm['X_R']`
soft assignment of cells to principal points
`.uns['graph']['B']`
adjacency matrix of the principal points
`.uns['graph']['F']`
coordinates of principal points in representation space
"""
logg.info(
"inferring a principal circle",
reset=True,
end=" " if settings.verbosity > 2 else "\n",
)
adata = adata.copy() if copy else adata
if Nodes is None:
if adata.shape[0] * 2 > 100:
Nodes = 100
else:
Nodes = int(adata.shape[0] / 2)
logg.hint(
"parameters used \n"
" "
+ str(Nodes)
+ " principal points, mu = "
+ str(epg_mu)
+ ", lambda = "
+ str(epg_lambda)
)
circle_epg(
adata,
Nodes,
use_rep,
ndims_rep,
init,
epg_lambda,
epg_mu,
epg_trimmingradius,
epg_initnodes,
device,
seed,
epg_verbose,
)
if plot:
plot_graph(adata, basis)
return adata if copy else None
def tree_epg(
X,
Nodes: int = None,
init: Optional[DataFrame] = None,
lam: Optional[Union[float, int]] = 0.01,
mu: Optional[Union[float, int]] = 0.1,
trimmingradius: Optional = np.inf,
initnodes: int = None,
device: str = "cpu",
seed: Optional[int] = None,
verbose: bool = True,
):
try:
import elpigraph
except Exception as e:
warnings.warn(
'ElPiGraph package is not installed \
\nPlease use "pip install git+https://github.com/j-bac/elpigraph-python.git" to install it'
)
logg.hint(
"parameters used \n"
" "
+ str(Nodes)
+ " principal points, mu = "
+ str(mu)
+ ", lambda = "
+ str(lam)
)
if seed is not None:
np.random.seed(seed)
if device == "gpu":
import cupy as cp
from cuml.metrics import pairwise_distances
from .utils import cor_mat_gpu
Tree = elpigraph.computeElasticPrincipalTree(
X.values.astype(np.float64),
NumNodes=Nodes,
Do_PCA=False,
InitNodes=initnodes,
Lambda=lam,
Mu=mu,
TrimmingRadius=trimmingradius,
GPU=True,
verbose=verbose,
)
R = pairwise_distances(
cp.asarray(X.values), cp.asarray(Tree[0]["NodePositions"])
)
R = cp.asnumpy(R)
# Hard assigment
R = sparse.csr_matrix(
(np.repeat(1, R.shape[0]), (range(R.shape[0]), R.argmin(axis=1))), R.shape
).A
else:
from .utils import cor_mat_cpu
from sklearn.metrics import pairwise_distances
Tree = elpigraph.computeElasticPrincipalTree(
X.values.astype(np.float64),
NumNodes=Nodes,
Do_PCA=False,
InitNodes=initnodes,
Lambda=lam,
Mu=mu,
TrimmingRadius=trimmingradius,
verbose=verbose,
)
R = pairwise_distances(X.values, Tree[0]["NodePositions"])
# Hard assigment
R = sparse.csr_matrix(
(np.repeat(1, R.shape[0]), (range(R.shape[0]), R.argmin(axis=1))), R.shape
).A
g = igraph.Graph(directed=False)
g.add_vertices(np.unique(Tree[0]["Edges"][0].flatten().astype(int)))
g.add_edges(
pd.DataFrame(Tree[0]["Edges"][0]).astype(int).apply(tuple, axis=1).values
)
# mat = np.asarray(g.get_adjacency().data)
# mat = mat + mat.T - np.diag(np.diag(mat))
# B=((mat>0).astype(int))
B = np.asarray(g.get_adjacency().data)
emptynodes = np.argwhere(R.max(axis=0) == 0).ravel()
sel = ~np.isin(np.arange(R.shape[1]), emptynodes)
B = B[sel, :][:, sel]
R = R[:, sel]
F = Tree[0]["NodePositions"].T[:, sel]
g = igraph.Graph.Adjacency((B > 0).tolist(), mode="undirected")
tips = np.argwhere(np.array(g.degree()) == 1).flatten()
def reconnect():
tips = np.argwhere(np.array(g.degree()) == 1).flatten()
distmat = np.triu(pairwise_distances(F[:, tips].T))
distmat = pd.DataFrame(distmat, columns=tips, index=tips)
distmat[distmat == 0] = np.inf
row, col = np.unravel_index(np.argmin(distmat.values), distmat.shape)
i, j = distmat.index[row], distmat.columns[col]
B[i, j] = 1
B[j, i] = 1
return B
if len(emptynodes) > 0:
logg.info(" removed %d non assigned nodes" % (len(emptynodes)))
recon = len(np.unique(np.array(g.clusters().membership))) > 1
while recon:
B = reconnect()
g = igraph.Graph.Adjacency((B > 0).tolist(), mode="undirected")
tips = np.argwhere(np.array(g.degree()) == 1).flatten()
recon = len(np.unique(np.array(g.clusters().membership))) > 1
forks = np.argwhere(np.array(g.degree()) > 2).flatten()
graph = {
"B": B,
"R": R,
"F": Tree[0]["NodePositions"].T,
"tips": tips,
"forks": forks,
"cells_fitted": X.index.tolist(),
"metrics": "euclidean",
}
Tree[0]["Edges"] = list(Tree[0]["Edges"])[0]
return graph, Tree[0]
def curve_epg(
adata: AnnData,
Nodes: int = None,
use_rep: str = None,
ndims_rep: Optional[int] = None,
init: Optional[DataFrame] = None,
lam: Optional[Union[float, int]] = 0.01,
mu: Optional[Union[float, int]] = 0.1,
trimmingradius: Optional = np.inf,
initnodes: int = None,
device: str = "cpu",
seed: Optional[int] = None,
verbose: bool = True,
):
try:
import elpigraph
except Exception as e:
warnings.warn(
'ElPiGraph package is not installed \
\nPlease use "pip install git+https://github.com/j-bac/elpigraph-python.git" to install it'
)
X, use_rep = get_data(adata, use_rep, ndims_rep)
if seed is not None:
np.random.seed(seed)
if device == "gpu":
import cupy as cp
from .utils import cor_mat_gpu
from cuml.metrics import pairwise_distances
Curve = elpigraph.computeElasticPrincipalCurve(
X.values.astype(np.float64),
NumNodes=Nodes,
Do_PCA=False,
InitNodes=initnodes,
Lambda=lam,
Mu=mu,
TrimmingRadius=trimmingradius,
GPU=True,
verbose=verbose,
)
R = pairwise_distances(
cp.asarray(X.values), cp.asarray(Curve[0]["NodePositions"])
)
R = cp.asnumpy(R)
# Hard assigment
R = sparse.csr_matrix(
(np.repeat(1, R.shape[0]), (range(R.shape[0]), R.argmin(axis=1))), R.shape
).A
else:
from .utils import cor_mat_cpu
from sklearn.metrics import pairwise_distances
Curve = elpigraph.computeElasticPrincipalCurve(
X.values.astype(np.float64),
NumNodes=Nodes,
Do_PCA=False,
InitNodes=initnodes,
Lambda=lam,
Mu=mu,
TrimmingRadius=trimmingradius,
verbose=verbose,
)
R = pairwise_distances(X.values, Curve[0]["NodePositions"])
# Hard assigment
R = sparse.csr_matrix(
(np.repeat(1, R.shape[0]), (range(R.shape[0]), R.argmin(axis=1))), R.shape
).A
g = igraph.Graph(directed=False)
g.add_vertices(np.unique(Curve[0]["Edges"][0].flatten().astype(int)))
g.add_edges(
pd.DataFrame(Curve[0]["Edges"][0]).astype(int).apply(tuple, axis=1).values
)
# mat = np.asarray(g.get_adjacency().data)
# mat = mat + mat.T - np.diag(np.diag(mat))
# B=((mat>0).astype(int))
B = np.asarray(g.get_adjacency().data)
emptynodes = np.argwhere(R.max(axis=0) == 0).ravel()
sel = ~np.isin(np.arange(R.shape[1]), emptynodes)
B = B[sel, :][:, sel]
R = R[:, sel]
F = Curve[0]["NodePositions"].T[:, sel]
g = igraph.Graph.Adjacency((B > 0).tolist(), mode="undirected")
tips = np.argwhere(np.array(g.degree()) == 1).flatten()
def reconnect():
tips = np.argwhere(np.array(g.degree()) == 1).flatten()
distmat = np.triu(pairwise_distances(F[:, tips].T))
distmat = pd.DataFrame(distmat, columns=tips, index=tips)
distmat[distmat == 0] = np.inf
row, col = np.unravel_index(np.argmin(distmat.values), distmat.shape)
i, j = distmat.index[row], distmat.columns[col]
B[i, j] = 1
B[j, i] = 1
return B
if len(emptynodes) > 0:
logg.info(" removed %d non assigned nodes" % (len(emptynodes)))
recon = len(np.unique(np.array(g.clusters().membership))) > 1
while recon:
B = reconnect()
g = igraph.Graph.Adjacency((B > 0).tolist(), mode="undirected")
tips = np.argwhere(np.array(g.degree()) == 1).flatten()
recon = len(np.unique(np.array(g.clusters().membership))) > 1
forks = np.argwhere(np.array(g.degree()) > 2).flatten()
graph = {
"B": B,
"F": Curve[0]["NodePositions"].T,
"tips": tips,
"forks": forks,
"metrics": "euclidean",
"use_rep": use_rep,
"ndims_rep": ndims_rep,
}
Curve[0]["Edges"] = list(Curve[0]["Edges"])[0]
adata.uns["graph"] = graph
adata.uns["epg"] = Curve[0]
adata.obsm["X_R"] = R
logg.info(" finished", time=True, end=" " if settings.verbosity > 2 else "\n")
logg.hint(
"added \n"
" .uns['epg'] dictionnary containing inferred elastic curve generated from elpigraph.\n"
" .obsm['X_R'] hard assignment of cells to principal points.\n"
" .uns['graph']['B'] adjacency matrix of the principal points.\n"
" .uns['graph']['F'], coordinates of principal points in representation space."
)
return adata
def circle_epg(
adata: AnnData,
Nodes: int = None,
use_rep: str = None,
ndims_rep: Optional[int] = None,
init: Optional[DataFrame] = None,
lam: Optional[Union[float, int]] = 0.01,
mu: Optional[Union[float, int]] = 0.1,
trimmingradius: Optional = np.inf,
initnodes: int = None,
device: str = "cpu",
seed: Optional[int] = None,
verbose: bool = True,
):
try:
import elpigraph
except Exception as e:
warnings.warn(
'ElPiGraph package is not installed \
\nPlease use "pip install git+https://github.com/j-bac/elpigraph-python.git" to install it'
)
X, use_rep = get_data(adata, use_rep, ndims_rep)
if seed is not None:
np.random.seed(seed)
if device == "gpu":
import cupy as cp
from .utils import cor_mat_gpu
from cuml.metrics import pairwise_distances
Curve = elpigraph.computeElasticPrincipalCircle(
X.values.astype(np.float64),
NumNodes=Nodes,
Do_PCA=False,
InitNodes=initnodes,
Lambda=lam,
Mu=mu,
TrimmingRadius=trimmingradius,
GPU=True,
verbose=verbose,
)
R = pairwise_distances(
cp.asarray(X.values), cp.asarray(Curve[0]["NodePositions"])
)
R = cp.asnumpy(R)
# Hard assigment
R = sparse.csr_matrix(
(np.repeat(1, R.shape[0]), (range(R.shape[0]), R.argmin(axis=1))), R.shape
).A
else:
from .utils import cor_mat_cpu
from sklearn.metrics import pairwise_distances
Curve = elpigraph.computeElasticPrincipalCircle(
X.values.astype(np.float64),
NumNodes=Nodes,
Do_PCA=False,
InitNodes=initnodes,
Lambda=lam,
Mu=mu,
TrimmingRadius=trimmingradius,
verbose=verbose,
)
R = pairwise_distances(X.values, Curve[0]["NodePositions"])
# Hard assigment
R = sparse.csr_matrix(
(np.repeat(1, R.shape[0]), (range(R.shape[0]), R.argmin(axis=1))), R.shape
).A
g = igraph.Graph(directed=False)
g.add_vertices(np.unique(Curve[0]["Edges"][0].flatten().astype(int)))
g.add_edges(
pd.DataFrame(Curve[0]["Edges"][0]).astype(int).apply(tuple, axis=1).values
)
# mat = np.asarray(g.get_adjacency().data)
# mat = mat + mat.T - np.diag(np.diag(mat))
# B=((mat>0).astype(int))
B = np.asarray(g.get_adjacency().data)
emptynodes = np.argwhere(R.max(axis=0) == 0).ravel()
sel = ~np.isin(np.arange(R.shape[1]), emptynodes)
B = B[sel, :][:, sel]
R = R[:, sel]
F = Curve[0]["NodePositions"].T[:, sel]
g = igraph.Graph.Adjacency((B > 0).tolist(), mode="undirected")
tips = np.argwhere(np.array(g.degree()) == 1).flatten()
def reconnect():
tips = np.argwhere(np.array(g.degree()) == 1).flatten()
distmat = np.triu(pairwise_distances(F[:, tips].T))
distmat = pd.DataFrame(distmat, columns=tips, index=tips)
distmat[distmat == 0] = np.inf
row, col = np.unravel_index(np.argmin(distmat.values), distmat.shape)
i, j = distmat.index[row], distmat.columns[col]
B[i, j] = 1
B[j, i] = 1
return B
if len(emptynodes) > 0:
logg.info(" removed %d non assigned nodes" % (len(emptynodes)))
recon = len(tips) > 0
while recon:
B = reconnect()
g = igraph.Graph.Adjacency((B > 0).tolist(), mode="undirected")
tips = np.argwhere(np.array(g.degree()) == 1).flatten()
recon = len(tips) > 0
forks = np.argwhere(np.array(g.degree()) > 2).flatten()
graph = {
"B": B,
"F": F,
"tips": tips,
"forks": forks,
"metrics": "euclidean",
"use_rep": use_rep,
"ndims_rep": ndims_rep,
}
Curve[0]["Edges"] = list(Curve[0]["Edges"])[0]
adata.uns["graph"] = graph
adata.uns["epg"] = Curve[0]
adata.obsm["X_R"] = R
logg.info(" finished", time=True, end=" " if settings.verbosity > 2 else "\n")
logg.hint(
"added \n"
" .uns['epg'] dictionnary containing inferred elastic circle generated from elpigraph.\n"
" .obsm['X_R'] hard assignment of cells to principal points.\n"
" .uns['graph']['B'] adjacency matrix of the principal points.\n"
" .uns['graph']['F'], coordinates of principal points in representation space."
)
return adata
def get_data(adata, use_rep, ndims_rep):
if use_rep not in adata.obsm.keys() and f"X_{use_rep}" in adata.obsm.keys():
use_rep = f"X_{use_rep}"
if (
(use_rep not in adata.layers.keys())
& (use_rep not in adata.obsm.keys())
& (use_rep != "X")
):
use_rep = "X" if adata.n_vars < 50 or n_pcs == 0 else "X_pca"
n_pcs = None if use_rep == "X" else n_pcs
if use_rep == "X":
ndims_rep = None
if sparse.issparse(adata.X):
X = DataFrame(adata.X.A, index=adata.obs_names)
else:
X = DataFrame(adata.X, index=adata.obs_names)
elif use_rep in adata.layers.keys():
if sparse.issparse(adata.layers[use_rep]):
X = DataFrame(adata.layers[use_rep].A, index=adata.obs_names)
else:
X = DataFrame(adata.layers[use_rep], index=adata.obs_names)
elif use_rep in adata.obsm.keys():
X = DataFrame(adata.obsm[use_rep], index=adata.obs_names)
if ndims_rep is not None:
X = X.iloc[:, :ndims_rep]
return X, use_rep
| StarcoderdataPython |
1657506 | <filename>webdjango/models/Core.py<gh_stars>1-10
import sys
from distutils.version import LooseVersion
from dirtyfields import DirtyFieldsMixin
from django.core.exceptions import ObjectDoesNotExist
from django.core.validators import validate_slug
from django.db import connection, models
from django.db.utils import OperationalError, ProgrammingError
from django_mysql.models import JSONField
from webdjango.models.AbstractModels import BaseModel
from webdjango.utils.DynamicLoader import DynamicLoader
class WebsiteProtocols:
HTTP = 'http'
HTTPS = 'https'
CHOICES = [
(HTTP, 'http'),
(HTTPS, 'https'),
]
class Website(BaseModel):
"""
Configuration for Future MultiSite
"""
domain = models.CharField(max_length=64, unique=True)
code = models.SlugField(validators=[validate_slug], unique=True)
protocol = models.CharField(max_length=12,
choices=WebsiteProtocols.CHOICES,
default=WebsiteProtocols.HTTP)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
@staticmethod
def get_current_website(request=None):
if request and request.website:
return request.website
try:
return Website.objects.first()
except:
print("Unexpected error: {0}".format(sys.exc_info()[0]))
return None
# TODO: Logic to get the current website based on route or domain or something like this, for now i will return the first we fint
class Meta:
ordering = ['-created']
db_table = 'core_website'
def __str__(self):
return self.domain
class CoreConfig(BaseModel):
"""
Core Config Holds Some Information for the Beggening of the application
"""
slug = models.SlugField(max_length=200, validators=[
validate_slug], unique=True)
value = JSONField(max_length=500, null=True)
secure = models.BooleanField(default=False)
website = models.ForeignKey(
Website, on_delete=models.CASCADE, null=False, related_name="Configs", default=1)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
@staticmethod
def read(slug, website=None):
try:
# TODO: Make This Recursive?!
slug_path = slug.split('.')
if not website:
website = Website.get_current_website()
config = CoreConfig.objects.filter(
slug=slug_path[0], website=website).first()
if config:
if len(slug_path) > 1:
return config.value[slug_path[1]]
return config.value
else:
return None
except:
print("Unexpected error: {0}".format(sys.exc_info()[0]))
return None
@staticmethod
def write(slug, value, website=None):
# TODO: Make This Recursive?!
slug_path = slug.split('.')
if not website:
website = Website.get_current_website()
config = CoreConfig.objects.filter(
slug=slug_path[0], website=website).first()
if config:
from .CoreConfig import CoreConfigGroup
group = CoreConfigGroup.get(config.slug)
config.slug = slug
if len(slug_path) > 1:
val_list = config.value
val_list[slug_path[1]] = value
value = val_list
config.value = value
config.website = website
config.secure = group.secure
config.save()
else:
# If the Value Should be inside a Json Object
if len(slug_path) > 1:
val_list = []
val_list[slug_path[1]] = value
value = val_list
config = CoreConfig.objects.create(
slug=slug_path[0], value=value, website=website)
return config
class Meta:
ordering = ['-created']
db_table = 'core_config'
def __str__(self):
return self.slug
class Author(BaseModel):
"""
Core Author, this model is used to show the Author information on the APP and Themes Acitivation Pages
"""
name = models.CharField(max_length=200)
email = models.EmailField()
website = models.URLField()
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class Meta:
ordering = ['-created']
db_table = 'core_author'
def __str__(self):
return self.name
class Plugin(DirtyFieldsMixin, BaseModel):
"""
Core Plugin, this model is used to check the installed Plugin and check the actives one
"""
slug = models.SlugField(max_length=100, validators=[
validate_slug], unique=True)
name = models.CharField(max_length=100)
author = models.ForeignKey(
Author, on_delete=models.CASCADE, related_name='plugins')
current_version = models.CharField(max_length=50, null=True)
version = models.CharField(max_length=50)
active = models.BooleanField(default=0)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
@staticmethod
def update_list():
"""
Function to get all the new installed modules based on the configuration files of each plugin
"""
from webdjango.serializers.CoreSerializer import PluginSerializer, AuthorSerializer
plugins_config = DynamicLoader.get_plugins_config()
for config in plugins_config:
# Creating Author
if config['author']:
author, created = Author.objects.get_or_create(
config['author'])
config['plugin']['author'] = author.id
plugin = Plugin.objects.filter(
slug=config['plugin']['slug']).first()
created = False
if not plugin:
serializer = PluginSerializer(data=config['plugin'])
serializer.is_valid(raise_exception=True)
plugin = serializer.save()
plugin.save()
created = True
if not created:
# Item Created Before, let's check for the version difference
if LooseVersion(config.plugin.version) > LooseVersion(plugin.current_version):
# Run Update Script (probably a migration + npm install with new requirements) and then, update the current version to the atual version
print("New Version of the Plugin")
else:
print("DO Nothing for now")
class Meta:
ordering = ['-created']
db_table = 'core_plugin'
def __str__(self):
return self.name
class Theme(DirtyFieldsMixin, BaseModel):
"""
Core Themes, this model is used to check the installed Themes and check the activated one
"""
slug = models.SlugField(max_length=100, validators=[
validate_slug], unique=True)
name = models.CharField(max_length=100)
angular_module = models.CharField(max_length=100, null=False)
author = models.ForeignKey(
Author, on_delete=models.CASCADE, related_name='themes')
parent_theme = models.ForeignKey(
'Theme', on_delete=models.SET_NULL, related_name='children', null=True
)
version = models.CharField(max_length=50)
current_version = models.CharField(max_length=50, null=True)
active = models.BooleanField(default=0)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
@staticmethod
def get_active():
return Theme.objects.filter(active=1).first()
@staticmethod
def update_list():
"""
Function to get all the new installed modules based on the configuration files of each theme
"""
from webdjango.serializers.CoreSerializer import ThemeSerializer, AuthorSerializer
active_theme = Theme.get_active()
themes_config = DynamicLoader.get_themes_config()
for config in themes_config:
# Checking if Theme has a Parent Theme
if config['theme']['parent_theme']:
parent_theme = Theme.objects.filter(
slug=config['theme']['parent_theme']).first()
if parent_theme:
config['theme']['parent_theme'] = parent_theme
else:
config['theme']['parent_theme'] = None
else:
config['theme']['parent_theme'] = None
# Creating Author
if config['author']:
author, created = Author.objects.get_or_create(
config['author'])
config['theme']['author'] = author.pk
# Creating Theme
theme = Theme.objects.filter(slug=config['theme']['slug']).first()
created = False
if not theme:
serializer = ThemeSerializer(data=config['theme'])
serializer.is_valid(raise_exception=True)
theme = serializer.save()
created = True
if not created:
# Item Created Before, let's check for the version difference
if theme.current_version and config['theme']['version']:
if LooseVersion(config['theme']['version']) > LooseVersion(theme.current_version):
# Run Update Script (probably a migration + npm install with new requirements) and then, update the current version to the atual version
print("New Version of the theme")
else:
if not active_theme:
print("No Theme is active, let's activate one")
theme.active = True
theme.save()
active_theme = theme
class Meta:
ordering = ['-created']
db_table = 'core_theme'
| StarcoderdataPython |
1702614 | from .base import BaseRecognizer
from .TSN2D import TSN2D
from .TSN3D import TSN3D
__all__ = [
'BaseRecognizer', 'TSN2D', 'TSN3D'
]
| StarcoderdataPython |
3295072 | # Generated by Django 3.1.1 on 2020-11-25 04:39
from django.conf import settings
import django.contrib.auth.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='Customer',
fields=[
],
options={
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('auth.user',),
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bio', models.TextField()),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| StarcoderdataPython |
1656003 | # Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def removeElements(self, head, val):
"""
:type head: ListNode
:type val: int
:rtype: ListNode
"""
if(head==None):
return head
dummy = ListNode(-1)
i = dummy
i.next = head
j = head
while(j != None):
if(j.val == val):
j = j.next
continue
i.next = j
i = i.next
j = j.next
i.next = None
return dummy.next
# 更精简的实现
def removeElements(self, head, val):
"""
:type head: ListNode
:type val: int
:rtype: ListNode
"""
p = ListNode(-1)
p.next,h = head,p
while p.next:
if p.next.val==val:
p.next = p.next.next
continue
p = p.next
return h.next | StarcoderdataPython |
1728243 | <filename>btclib/address.py
import base64
from abc import ABC, abstractmethod
from hashlib import sha256, new as hashlib_new
from ecdsa import SigningKey, SECP256k1, VerifyingKey
from ecdsa.keys import BadSignatureError
from ecdsa.util import sigencode_der, sigencode_string, sigdecode_string
from base58check import b58encode, b58decode
from sympy import sqrt_mod
from btclib.const import PREFIXES, MAX_ORDER, SIGHASHES, P, DEFAULT_WITNESS_VERSION, DEFAULT_NETWORK
from btclib.utils import d_sha256, get_address_network, validate_address, \
get_address_type, get_magic_hash, int2bytes, bytes2int, pprint_class
from btclib.script import Script
from btclib.services import NetworkAPI, Unspent
from btclib import exceptions
from btclib import bech32
class PrivateKey:
def __init__(self, wif: str = None):
if hasattr(self, '_from_bytes'):
key = SigningKey.from_string(self._from_bytes, SECP256k1)
elif wif is None:
key = SigningKey.generate(SECP256k1)
else:
key = self._from_wif(wif)
self.key = key
self.pub = self._get_public_key()
self.bytes = self.key.to_string()
@staticmethod
def _from_wif(wif: str) -> SigningKey:
data = b58decode(wif.encode('utf-8'))
key = data[:-4]
checksum = data[-4:]
h = d_sha256(key)
if not checksum == h[0:4]:
raise exceptions.InvalidWIF(wif)
key = key[1:] # network
key = key[:-1] if len(key) > 32 else key
return SigningKey.from_string(key, SECP256k1)
@classmethod
def from_bytes(cls, pv_bytes: bytes):
ins = cls.__new__(cls)
ins._from_bytes = pv_bytes
ins.__init__()
del ins._from_bytes
return ins
def to_wif(self, network: str = DEFAULT_NETWORK, *, compressed: bool = True) -> str:
data = PREFIXES['wif'][network] + self.key.to_string() + (b'\x01' if compressed else b'')
h = d_sha256(data)
checksum = h[0:4]
wif = b58encode(data + checksum)
return wif.decode('utf-8')
def to_bytes(self) -> bytes:
return self.key.to_string()
def _get_public_key(self) -> 'PublicKey':
return PublicKey('04' + self.key.get_verifying_key().to_string().hex())
def sign_message(self, message: str, *, compressed: bool = True) -> str:
digest = get_magic_hash(message)
sig = self.key.sign_digest_deterministic(digest, sha256, sigencode_string)
rec_id = 31 if compressed else 27
keys = VerifyingKey.from_public_key_recovery_with_digest(sig, digest, SECP256k1)
for i, key in enumerate(keys):
if key.to_string() == self.pub.bytes:
rec_id += i
break
return base64.b64encode(int2bytes(rec_id) + sig).decode()
def sign_tx(self, tx_hash: bytes, sighash: int = SIGHASHES['all']) -> str:
sig = self.key.sign_digest_deterministic(tx_hash, sha256, sigencode_der)
pref = sig[0]
full_len = sig[1]
der_type = sig[2]
r_len = sig[3]
r = sig[4:4 + r_len]
s_len = sig[5 + r_len]
s = sig[6 + r_len:]
s_bigint = int(s.hex(), 16)
half_order = MAX_ORDER // 2
if s_bigint > half_order:
assert s_len == 0x21
new_s_bigint = MAX_ORDER - s_bigint
new_s = bytes.fromhex(format(new_s_bigint, 'x').zfill(64))
assert len(new_s) == 0x20
s_len -= 1
full_len -= 1
else:
new_s = s
new_sig = bytes([pref, full_len, der_type, r_len]) + r + bytes([der_type, s_len]) + new_s + bytes([sighash])
return new_sig.hex()
class PublicKey:
def __init__(self, pb_hex: str):
pb_bytes = bytes.fromhex(pb_hex)
fb, pb_bytes = pb_bytes[0], pb_bytes[1:]
if len(pb_bytes) <= 33: # compressed check
x_coord = bytes2int(pb_bytes)
y_values = sqrt_mod((x_coord ** 3 + 7) % P, P, True)
even, odd = sorted(y_values, key=lambda x: x % 2 != 0)
y_coord = even if fb == 0x02 else odd if fb == 0x03 else None
if y_coord is None:
raise exceptions.InvalidCompressionFormat(pb_hex)
uncompressed_hex = '%0.64X%0.64X' % (x_coord, y_coord)
pb_bytes = bytes.fromhex(uncompressed_hex)
self.key = VerifyingKey.from_string(pb_bytes, SECP256k1)
self.bytes = self.key.to_string()
@classmethod
def from_signed_message(cls, sig_b64: str, message: str):
sig = base64.b64decode(sig_b64.encode())
if len(sig) != 65:
raise exceptions.InvalidSignatureLength(len(sig))
digest = get_magic_hash(message)
rec_id, sig = sig[0], sig[1:]
if 27 <= rec_id <= 30:
rec_id -= 27
elif 31 <= rec_id <= 34:
rec_id -= 31
else:
raise exceptions.InvalidRecoveryID(rec_id)
keys = VerifyingKey.from_public_key_recovery_with_digest(sig, digest, SECP256k1)
return cls('04' + keys[rec_id].to_string().hex())
@classmethod
def verify_message_for_address(cls, sig_b64: str, message: str, address: str) -> bool:
"""
WARNING! Default Bitcoin-Core verify message supports only P2PKH addresses. It's possible because
one PublicKey -> one P2PKH addresses.
With segwit addresses and P2SH address it gets hard since one PublicKey -> not one P2SH/P2WPKH/P2WSH address.
But verify_message_for_address anyway supports all address types, it checks to
P2SH/P2WPKH/P2WSH address was generated with PublicKey.get_address algorithm.
This means that address could be obtained from same public key just by a different method and
verify_message_for_address will be return False, remember this (in this situation you can use
PublicKey.from_signed_message() and by self-checking find out that from obtained public key
can get needed address). More details: https://github.com/bitcoin/bitcoin/issues/10542
:param sig_b64: String signature in base64 encoding.
:param message: Message for signature.
:param address: Address for check
"""
pub = cls.from_signed_message(sig_b64, message)
network = get_address_network(address)
address_type = 'P2SH-P2WPKH' if (address_type := get_address_type(address)) == 'P2SH' else address_type
if pub.get_address(address_type, network).string == address:
return True
return False
def verify_message(self, sig_b64: str, message: str):
magic_hash = get_magic_hash(message)
try:
return self.key.verify_digest(
base64.b64decode(sig_b64.encode())[1:],
magic_hash,
sigdecode=sigdecode_string
)
except BadSignatureError:
return False
def get_hash160(self, *, compressed: bool = True) -> str:
h = sha256(bytes.fromhex(self.to_hex(compressed=compressed))).digest()
ripemd160 = hashlib_new('ripemd160')
ripemd160.update(h)
return ripemd160.digest().hex()
def to_hex(self, *, compressed: bool = True) -> str:
key_hex = self.key.to_string().hex()
key_hex = (('02' if int(key_hex[-2:], 16) % 2 == 0 else '03') + key_hex[:64]) if compressed else '04' + key_hex
return key_hex
def get_address(self, address_type: str, network: str = DEFAULT_NETWORK) -> 'AbstractBitcoinAddress':
if address_type in ('P2PKH', 'P2WPKH'):
cls = {'P2PKH': P2PKH, 'P2WPKH': P2WPKH}.get(address_type)
hash_ = self.get_hash160()
elif address_type == 'P2SH-P2WPKH':
cls = P2SH
ripemd160 = hashlib_new('ripemd160')
ripemd160.update(sha256(Script('OP_0', self.get_hash160()).to_bytes()).digest())
hash_ = ripemd160.digest().hex()
elif address_type == 'P2WSH':
cls = P2WSH
witness_script = Script('OP_1', self.to_hex(), 'OP_1', 'OP_CHECKMULTISIG').to_bytes()
hash_ = sha256(witness_script).digest().hex()
else:
raise exceptions.UnsupportedAddressType(address_type)
return cls.from_hash(hash_, network)
class AbstractBitcoinAddress(ABC):
@property
@abstractmethod
def type(self) -> str:
...
def __init__(self, address: str):
self.string = address
self.network: str = get_address_network(address)
if self.network is None or not validate_address(self.string, self.type, self.network):
raise exceptions.InvalidAddress(self.string, self.type, self.network)
self.hash = self._get_hash()
self.script_pub_key: Script = self._get_script_pub_key()
def __str__(self):
return self.string
def __repr__(self):
return pprint_class(self, [self.__str__().__repr__()])
@abstractmethod
def from_hash(self, hash_: str, network: str, **kwargs) -> 'AbstractBitcoinAddress':
...
def change_network(self, network: str = None) -> 'AbstractBitcoinAddress':
if network == self.network:
return self
cls = type(self)
network = network if network is not None else ('mainnet' if self.network == 'testnet' else 'testnet')
return cls.from_hash(self.hash, network)
def get_info(self) -> dict:
return getattr(NetworkAPI, 'get_address_info' + ('_testnet' if self.network == 'testnet' else ''))(self.string)
def get_unspents(self) -> list[Unspent]:
return getattr(NetworkAPI, 'get_unspent' + ('_testnet' if self.network == 'testnet' else ''))(self.string)
@abstractmethod
def _get_hash(self) -> str:
...
@abstractmethod
def _get_script_pub_key(self) -> Script:
...
class DefaultAddress(AbstractBitcoinAddress, ABC):
@classmethod
def from_hash(cls, hash_: str, network: str = DEFAULT_NETWORK) -> 'DefaultAddress':
return cls(cls._b58encode(bytes.fromhex(hash_), network))
@classmethod
def _get_prefix(cls, network: str):
return PREFIXES[cls.type][network]
def _get_hash(self) -> str:
return self._b58decode(self.string)
@classmethod
def _b58encode(cls, data: bytes, network: str) -> str:
raw_address_bytes = cls._get_prefix(network) + data
raw_address_hash = d_sha256(raw_address_bytes)
address = b58encode(raw_address_bytes + raw_address_hash[0:4]).decode('utf-8')
return address
@staticmethod
def _b58decode(address: str) -> str:
hash160_bytes = b58decode(address.encode())[1:-4]
return hash160_bytes.hex()
class P2PKH(DefaultAddress):
type = 'P2PKH'
def _get_script_pub_key(self) -> Script:
return Script('OP_DUP', 'OP_HASH160', self.hash, 'OP_EQUALVERIFY', 'OP_CHECKSIG')
class P2SH(DefaultAddress):
type = 'P2SH'
def _get_script_pub_key(self) -> Script:
return Script('OP_HASH160', self.hash, 'OP_EQUAL')
class SegwitAddress(AbstractBitcoinAddress, ABC):
def __init__(self, address: str):
super().__init__(address)
self.version: int = self._bech32decode(address, self.network)[0]
@classmethod
def from_hash(cls, hash_: str, network: str = DEFAULT_NETWORK, *,
version: int = DEFAULT_WITNESS_VERSION) -> 'SegwitAddress':
return cls(cls._bech32encode(bytes.fromhex(hash_), network, version=version))
def _get_hash(self) -> str:
_, int_list = self._bech32decode(self.string, self.network)
return bytes(int_list).hex()
def _get_script_pub_key(self) -> Script:
return Script('OP_0', self.hash)
@staticmethod
def _bech32encode(data: bytes, network: str, *, version: int) -> str:
return bech32.encode(PREFIXES['bech32'][network], version, list(data))
@staticmethod
def _bech32decode(address: str, network: str) -> tuple:
return bech32.decode(PREFIXES['bech32'][network], address)
class P2WPKH(SegwitAddress):
type = 'P2WPKH'
class P2WSH(SegwitAddress):
type = 'P2WSH'
class Address:
def __new__(cls, address: str) -> AbstractBitcoinAddress:
"""
Returns P2PKH/P2SH/P2WPKH/P2WSH instance. Instance of this itself class impossible to get.
"""
addr_type = get_address_type(address)
addr_cls = {
'P2PKH': P2PKH,
'P2SH': P2SH,
'P2WPKH': P2WPKH,
'P2WSH': P2WSH
}.get(addr_type)
if addr_cls is None:
raise exceptions.InvalidAddress(address)
return addr_cls(address)
@staticmethod
def from_script_pub_key(data: Script | str, network: str = DEFAULT_NETWORK) -> AbstractBitcoinAddress:
script = data if isinstance(data, Script) else Script.from_raw(data)
script_len = len(script)
p2pkh = {
0: 'OP_DUP',
1: 'OP_HASH160',
3: 'OP_EQUALVERIFY',
4: 'OP_CHECKSIG'
}
p2sh = {
0: 'OP_HASH160',
-1: 'OP_EQUAL'
}
segwit = {
0: 'OP_0'
}
default_script_lens = {
5: (p2pkh, P2PKH, 2),
3: (p2sh, P2SH, 1),
}
segwit_script_lens = {
40: P2WPKH,
64: P2WSH
}
check = lambda dict_: all([script.script[index] == value for index, value in dict_.items()])
if default_script_lens.get(script_len) is not None: # if p2pkh/p2sh address
to_check, cls, hash_index = default_script_lens[script_len]
if check(to_check):
return cls.from_hash(script.script[hash_index], network)
elif script_len == 2 and check(segwit): # if segwit address
hs = script.script[1]
return segwit_script_lens[len(hs)].from_hash(hs)
raise exceptions.InvalidScriptPubKey(data)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.