id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
8197014 | #!/usr/bin/env python
# Designed for use with boofuzz v0.0.9
from boofuzz import *
def main():
session = Session(
target=Target(
connection=SocketConnection("127.0.0.1", 5900, proto='tcp')
),
)
s_initialize(name="Handshake")
with s_block("ProtocolVersion"):
s_string("RFB", name='RFB')
s_delim(" ", name='space-1')
s_string("003", name='Version1')
s_delim(".", name='space-2')
s_string('008', name='Version2')
s_delim("\n", name="end")
session.connect(s_get("Handshake"))
session.fuzz()
if __name__ == "__main__":
main()
| StarcoderdataPython |
1634198 | from collections import namedtuple, defaultdict
MenuItem = namedtuple("MenuItem", "section order label url")
def menu_order(item):
return item.order
class MenuRegistry(object):
def __init__(self):
self.callbacks = []
def register(self, func):
self.callbacks.append(func)
def get_menu_items(self, request):
if request is None:
return
sections = defaultdict(list)
for callback in self.callbacks:
menu_item = callback(request)
if menu_item is None:
continue
sections[menu_item.section].append(menu_item)
for section in sections:
sections[section] = sorted(sections[section], key=menu_order)
return sections
menu_registry = MenuRegistry()
| StarcoderdataPython |
355665 | # -*- coding:utf-8 -*-
import requests
import json
from bs4 import BeautifulSoup
from gevent import monkey
monkey.patch_all()
import gevent
import time
import csv
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
class TencentSpider(object):
def __init__(self):
self.base_url = "https://hr.tencent.com/position.php?"
self.headers = {"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko"}
self.begin_page = int(raw_input("请输入起始页:"))
self.end_page = int(raw_input("请输入结束页:"))
self.job_list = []
def send_request(self, params={}):
# 这里需要返回的内容就不要添加: content
response = requests.get(self.base_url, params=params, headers=self.headers)
return response
def html_page(self, html):
# bs4 css选择器
soup = BeautifulSoup(html, 'lxml')
# 解析页面内容获取, 解析职位(职位详情的跳转信息) 类别 人数 工作位置
tr_list = soup.find_all("tr", {"class": ["odd", "even"]})
for tr in tr_list:
job = {}
job["job_no"] = tr.select("td a")[0].get_text()[:5]
job["job_name"] = tr.select("td a")[0].get_text()[6:]
# 职位的跳转详情跳转链接
job["job_link"] = "https://hr.tencent.com/" + tr.select("td a")[0].get("href")
job["job_type"] = tr.select("td")[1].get_text()
job["job_num"] = tr.select("td")[2].get_text()
job["job_position"] = tr.select("td")[3].get_text()
job["update_time"] = tr.select("td")[4].get_text()
self.job_list.append(job)
def write_page(self, job_list):
# 将接收的内容写入json文件
# 内容的简写
# json_file = json.dump(job_list, open('./tencentjob.json', 'w'))
json_file = json.dumps(job_list) # 转成json数据格式
return json_file
def json_csv(self, json_file):
# json_file = open(json_file, "r")
#
csv_file = open("tencent.csv", "w")
# 读取json文件的字符串, 并返回python数据类型
item_list = json.loads(json_file) # 将json数据转换成python数据格式loads!!!
# 创建一个csv文件读写操作对象, 数据读写和文件交互
csv_writer = csv.writer(csv_file)
# 表头一层嵌套的列表, 这一部分就是字段那一栏
sheet_head = item_list[0].keys()
# 表数据是两层嵌套的列表--->表单内的数据
sheet_data = [item.values() for item in item_list]
# 先写一行表头部分
csv_writer.writerow(sheet_head)
csv_writer.writerows(sheet_data)
# 关闭文件, 保存数据
csv_file.close()
# json_file.close()
def main(self):
# 下载指定页面的职位信息数据
for page in range(self.begin_page, self.end_page + 1):
# 每页显示10条数据
pn = (page - 1)*10
# 放到url中
# https://hr.tencent.com/position.php?$start=10
params = {"start": pn}
# 1. 获取请求下来的整个页面
page_html = self.send_request(params).content
# 2. 对页面进行解析
self.html_page(page_html)
# 3. 将页面内容写入json文件中
json_file = self.write_page(self.job_list)
# print "in work %s" % gevent.getcurrent()
# 4.
self.json_csv(json_file)
if __name__ == '__main__':
spider = TencentSpider()
spider.main()
# TencentSpider().main()
start = time.time()
g1 = gevent.spawn(spider.main())
g2 = gevent.spawn(spider.main())
g3 = gevent.spawn(spider.main())
gevent.joinall([g1, g2, g3])
end = time.time()
print end - start
| StarcoderdataPython |
9667755 | """Methods for working with i2c devices."""
__author__ = '<NAME>'
import time
try:
import smbus
except ImportError:
import smbus2 as smbus
class I2CDevice:
"""Base I2C device class."""
def __init__(self, addr, port=1):
# type: (int, int) -> None
"""Initialization.
:param addr: address of i2c device
:param port: i2c bus: 0 - original Pi, 1 - Rev 2 Pi
"""
self.addr = addr
self.bus = smbus.SMBus(port)
def write_byte(self, cmd):
# type: (int) -> None
"""Write a single command."""
self.bus.write_byte(self.addr, cmd)
time.sleep(0.0001)
def write_byte_data(self, cmd, data):
# type: (int, int) -> None
"""Write a command and argument."""
self.bus.write_byte_data(self.addr, cmd, data)
time.sleep(0.0001)
def write_block_data(self, cmd, data):
# type: (int, int) -> None
"""Write a block of data."""
self.bus.write_block_data(self.addr, cmd, data)
time.sleep(0.0001)
def read_byte(self):
# type: () -> int
"""Read a single byte."""
return self.bus.read_byte(self.addr)
def read_byte_data(self, cmd):
# type: (int) -> int
"""Read."""
return self.bus.read_byte_data(self.addr, cmd)
def read_block_data(self, cmd):
# type: (int) -> int
"""Read a block of data."""
return self.bus.read_block_data(self.addr, cmd)
| StarcoderdataPython |
6638858 | <reponame>terror/Solutions
for _ in range(int(input())):
n = int(input())
p = list(map(int, input().split()))
print((max(p) - min(p)) + (max(p) - min(p)))
| StarcoderdataPython |
6518023 | """
Project: Visual Odometry
Name : Heru-05 | M09158023
Date : 10/06/2021
"""
import sys
import math
from enum import Enum
import numpy as np
import cv2
# https://www.robots.ox.ac.uk/~vgg/publications/2012/Arandjelovic12/arandjelovic12.pdf
# adapated from https://www.pyimagesearch.com/2015/04/13/implementing-rootsift-in-python-and-opencv/
class RootSIFTFeature2D:
def __init__(self, feature):
# initialize the SIFT feature detector
self.feature = feature
def detect(self, frame, mask=None):
return self.feature.detect(frame, mask)
def transform_descriptors(self, des, eps=1e-7):
# apply the Hellinger kernel by first L1-normalizing and
# taking the square-root
des /= (des.sum(axis=1, keepdims=True) + eps)
des = np.sqrt(des)
return des
def compute(self, frame, kps, eps=1e-7):
# compute SIFT descriptors
(kps, des) = self.feature.compute(frame, kps)
# if there are no keypoints or descriptors, return an empty tuple
if len(kps) == 0:
return ([], None)
# apply the Hellinger kernel by first L1-normalizing and
# taking the square-root
des = self.transform_descriptors(des)
# return a tuple of the keypoints and descriptors
return (kps, des)
# detect keypoints and their descriptors
# out: kps, des
def detectAndCompute(self, frame, mask=None):
# compute SIFT keypoints and descriptors
(kps, des) = self.feature.detectAndCompute(frame, mask)
# if there are no keypoints or descriptors, return an empty tuple
if len(kps) == 0:
return ([], None)
# apply the Hellinger kernel by first L1-normalizing and
# taking the square-root
des = self.transform_descriptors(des)
# return a tuple of the keypoints and descriptors
return (kps, des)
| StarcoderdataPython |
9758661 | <filename>subdomain_takeover_tools/extract_domain_names.py
import re
import sys
import tldextract
def main():
for line in sys.stdin:
sys.stdout.write(extract_domain_name(line) + '\n')
def extract_domain_name(subdomain):
if "(" in subdomain:
return _handle_pattern(subdomain)
else:
return _extract_single_domain_name(subdomain)
def _handle_pattern(subdomain):
end_result = ''
result = re.search(r'^(.*\.)\(([a-z.|]+)\)', subdomain)
if result is None:
# false positive
return _extract_single_domain_name(subdomain)
prefix = result.group(1)
postfixes = result.group(2).split('|')
for postfix in postfixes:
end_result = end_result + _extract_single_domain_name(prefix + postfix) + '\n'
return end_result.strip()
def _extract_single_domain_name(subdomain):
subdomain = subdomain.replace('(', '').replace(')', '')
r = tldextract.extract(subdomain)
return '.'.join(r[-2:])
if __name__ == "__main__":
main()
| StarcoderdataPython |
1639904 |
'''
demo for single image
'''
import numpy as np
import cv2
import face_recognition
from face import Face
from utils import putText
from utils import preprocess_input
model = Face(train=False)
model.load_weights('./face_weights/face_weights.26-val_loss-3.85-val_age_loss-3.08-val_gender_loss-0.22-val_race_loss-0.55.utk.h5')
gender_labels = ['Male', 'Female']
race_labels = ['Whites', 'Blacks', 'Asian', 'Indian', 'Others']
#https://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w11/papers/Rothe_DEX_Deep_EXpectation_ICCV_2015_paper.pdf
age_labels = np.reshape(np.arange(1, 94), (93,1))
demo_image = cv2.imread('./demo_images/how-old-demo5.jpg')
image_h, image_w = demo_image.shape[0], demo_image.shape[1]
margin = 0.01
face_locations = face_recognition.face_locations(demo_image, model='hog')
if len(face_locations) > 0:
face_batch = np.empty((len(face_locations), 200, 200, 3))
# add face images into batch
for i,rect in enumerate(face_locations):
# crop with a margin
top, bottom, left, right = rect[0], rect[2], rect[3], rect[1]
top = max(int(top - image_h * margin), 0)
left = max(int(left - image_w * margin), 0)
bottom = min(int(bottom + image_h * margin), image_h - 1)
right = min(int(right + image_w * margin), image_w - 1)
face_img = demo_image[top:bottom, left:right, :]
face_img = cv2.resize(face_img, (200, 200))
face_batch[i, :, :, :] = face_img
face_batch = preprocess_input(face_batch)
preds = model.predict(face_batch)
preds_ages = preds[0]
preds_genders = preds[1]
preds_races = preds[2]
# dispaly on srceen
for rect, age, gender, race in zip(face_locations, preds_ages, preds_genders, preds_races):
cv2.rectangle(demo_image, (rect[3], rect[0]), (rect[1], rect[2]), (255, 0, 0), 2)
age = np.expand_dims(age, 0)
# https://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w11/papers/Rothe_DEX_Deep_EXpectation_ICCV_2015_paper.pdf
age_data = int(age.dot(age_labels).flatten())
gender_index = np.argmax(gender)
race_index = np.argmax(race)
demo_image = putText(demo_image, 'gender: {0}'.format(gender_labels[gender_index]), (255, 0, 0), (rect[3], rect[0]-16), size=15)
demo_image = putText(demo_image, 'race: {0}'.format(race_labels[race_index]), (255, 0, 0), (rect[3], rect[0]-32), size=15)
demo_image = putText(demo_image, 'age: {0}'.format(age_data), (255, 0, 0), (rect[3], rect[0]-48), size=15)
cv2.imshow('image', demo_image)
if cv2.waitKey(0) & 0xff == ord("q"):
cv2.destroyAllWindows()
| StarcoderdataPython |
17723 | # vim: set encoding=utf-8
import re
from lxml import etree
import logging
from regparser import content
from regparser.tree.depth import heuristics, rules, markers as mtypes
from regparser.tree.depth.derive import derive_depths
from regparser.tree.struct import Node
from regparser.tree.paragraph import p_level_of
from regparser.tree.xml_parser.appendices import build_non_reg_text
from regparser.tree import reg_text
from regparser.tree.xml_parser import tree_utils
from settings import PARAGRAPH_HIERARCHY
def get_reg_part(reg_doc):
"""
Depending on source, the CFR part number exists in different places. Fetch
it, wherever it is.
"""
potential_parts = []
potential_parts.extend(
# FR notice
node.attrib['PART'] for node in reg_doc.xpath('//REGTEXT'))
potential_parts.extend(
# e-CFR XML, under PART/EAR
node.text.replace('Pt.', '').strip()
for node in reg_doc.xpath('//PART/EAR')
if 'Pt.' in node.text)
potential_parts.extend(
# e-CFR XML, under FDSYS/HEADING
node.text.replace('PART', '').strip()
for node in reg_doc.xpath('//FDSYS/HEADING')
if 'PART' in node.text)
potential_parts.extend(
# e-CFR XML, under FDSYS/GRANULENUM
node.text.strip() for node in reg_doc.xpath('//FDSYS/GRANULENUM'))
potential_parts = [p for p in potential_parts if p.strip()]
if potential_parts:
return potential_parts[0]
def get_title(reg_doc):
""" Extract the title of the regulation. """
parent = reg_doc.xpath('//PART/HD')[0]
title = parent.text
return title
def preprocess_xml(xml):
"""This transforms the read XML through macros. Each macro consists of
an xpath and a replacement xml string"""
for path, replacement in content.Macros():
replacement = etree.fromstring('<ROOT>' + replacement + '</ROOT>')
for node in xml.xpath(path):
parent = node.getparent()
idx = parent.index(node)
parent.remove(node)
for repl in replacement:
parent.insert(idx, repl)
idx += 1
def build_tree(reg_xml):
if isinstance(reg_xml, str) or isinstance(reg_xml, unicode):
doc = etree.fromstring(reg_xml)
else:
doc = reg_xml
preprocess_xml(doc)
reg_part = get_reg_part(doc)
title = get_title(doc)
tree = Node("", [], [reg_part], title)
part = doc.xpath('//PART')[0]
subpart_xmls = [c for c in part.getchildren() if c.tag == 'SUBPART']
if len(subpart_xmls) > 0:
subparts = [build_subpart(reg_part, s) for s in subpart_xmls]
tree.children = subparts
else:
section_xmls = [c for c in part.getchildren() if c.tag == 'SECTION']
sections = []
for section_xml in section_xmls:
sections.extend(build_from_section(reg_part, section_xml))
empty_part = reg_text.build_empty_part(reg_part)
empty_part.children = sections
tree.children = [empty_part]
non_reg_sections = build_non_reg_text(doc, reg_part)
tree.children += non_reg_sections
return tree
def get_subpart_title(subpart_xml):
hds = subpart_xml.xpath('./HD|./RESERVED')
return [hd.text for hd in hds][0]
def build_subpart(reg_part, subpart_xml):
subpart_title = get_subpart_title(subpart_xml)
subpart = reg_text.build_subpart(subpart_title, reg_part)
sections = []
for ch in subpart_xml.getchildren():
if ch.tag == 'SECTION':
sections.extend(build_from_section(reg_part, ch))
subpart.children = sections
return subpart
# @profile
def get_markers(text):
""" Extract all the paragraph markers from text. Do some checks on the
collapsed markers."""
markers = tree_utils.get_paragraph_markers(text)
collapsed_markers = tree_utils.get_collapsed_markers(text)
# Check that the collapsed markers make sense (i.e. are at least one
# level below the initial marker)
if markers and collapsed_markers:
initial_marker_levels = p_level_of(markers[-1])
final_collapsed_markers = []
for collapsed_marker in collapsed_markers:
collapsed_marker_levels = p_level_of(collapsed_marker)
if any(c > f for f in initial_marker_levels
for c in collapsed_marker_levels):
final_collapsed_markers.append(collapsed_marker)
collapsed_markers = final_collapsed_markers
markers_list = [m for m in markers] + [m for m in collapsed_markers]
return markers_list
def get_markers_and_text(node, markers_list):
node_text = tree_utils.get_node_text(node, add_spaces=True)
text_with_tags = tree_utils.get_node_text_tags_preserved(node)
if len(markers_list) > 1:
actual_markers = ['(%s)' % m for m in markers_list]
plain_markers = [m.replace('<E T="03">', '').replace('</E>', '')
for m in actual_markers]
node_texts = tree_utils.split_text(node_text, plain_markers)
tagged_texts = tree_utils.split_text(text_with_tags, actual_markers)
node_text_list = zip(node_texts, tagged_texts)
elif markers_list:
node_text_list = [(node_text, text_with_tags)]
else:
node_text_list = [('', '')]
return zip(markers_list, node_text_list)
def next_marker(xml_node, remaining_markers):
"""Try to determine the marker following the current xml_node. Remaining
markers is a list of other marks *within* the xml_node. May return
None"""
# More markers in this xml node
if remaining_markers:
return remaining_markers[0][0]
# Check the next xml node; skip over stars
sib = xml_node.getnext()
while sib is not None and sib.tag in ('STARS', 'PRTPAGE'):
sib = sib.getnext()
if sib is not None:
next_text = tree_utils.get_node_text(sib)
next_markers = get_markers(next_text)
if next_markers:
return next_markers[0]
def build_from_section(reg_part, section_xml):
section_texts = []
nodes = []
section_no = section_xml.xpath('SECTNO')[0].text
section_no_without_marker = re.search('[0-9]+\.[0-9]+',
section_no).group(0)
subject_xml = section_xml.xpath('SUBJECT')
if not subject_xml:
subject_xml = section_xml.xpath('RESERVED')
subject_text = subject_xml[0].text
manual_hierarchy = []
if (reg_part in PARAGRAPH_HIERARCHY
and section_no_without_marker in PARAGRAPH_HIERARCHY[reg_part]):
manual_hierarchy = PARAGRAPH_HIERARCHY[reg_part][
section_no_without_marker]
# Collect paragraph markers and section text (intro text for the
# section)
i = 0
children = [ch for ch in section_xml.getchildren()
if ch.tag in ['P', 'STARS']]
for ch in children:
text = tree_utils.get_node_text(ch, add_spaces=True)
tagged_text = tree_utils.get_node_text_tags_preserved(ch)
markers_list = get_markers(tagged_text.strip())
# If the child has a 'DEPTH' attribute, we're in manual
# hierarchy mode, just constructed from the XML instead of
# specified in configuration.
# This presumes that every child in the section has DEPTH
# specified, if not, things will break in and around
# derive_depths below.
if ch.get("depth") is not None:
manual_hierarchy.append(int(ch.get("depth")))
if ch.tag == 'STARS':
nodes.append(Node(label=[mtypes.STARS_TAG]))
elif not markers_list and manual_hierarchy:
# is this a bunch of definitions that don't have numbers next to
# them?
if len(nodes) > 0:
if (subject_text.find('Definitions.') > -1
or nodes[-1].text.find(
'For the purposes of this section')):
# TODO: create a grammar for definitions
if text.find('means') > -1:
def_marker = text.split('means')[0].strip().split()
def_marker = ''.join([word[0].upper() + word[1:]
for word in def_marker])
elif text.find('shall have the same meaning') > -1:
def_marker = text.split('shall')[0].strip().split()
def_marker = ''.join([word[0].upper() + word[1:]
for word in def_marker])
else:
def_marker = 'def{0}'.format(i)
i += 1
n = Node(text, label=[def_marker], source_xml=ch)
n.tagged_text = tagged_text
nodes.append(n)
else:
section_texts.append((text, tagged_text))
else:
if len(children) > 1:
def_marker = 'def{0}'.format(i)
n = Node(text, [], [def_marker], source_xml=ch)
n.tagged_text = tagged_text
i += 1
nodes.append(n)
else:
# this is the only node around
section_texts.append((text, tagged_text))
elif not markers_list and not manual_hierarchy:
# No manual heirarchy specified, append to the section.
section_texts.append((text, tagged_text))
else:
for m, node_text in get_markers_and_text(ch, markers_list):
n = Node(node_text[0], [], [m], source_xml=ch)
n.tagged_text = unicode(node_text[1])
nodes.append(n)
if node_text[0].endswith('* * *'):
nodes.append(Node(label=[mtypes.INLINE_STARS]))
# Trailing stars don't matter; slightly more efficient to ignore them
while nodes and nodes[-1].label[0] in mtypes.stars:
nodes = nodes[:-1]
m_stack = tree_utils.NodeStack()
# Use constraint programming to figure out possible depth assignments
if not manual_hierarchy:
depths = derive_depths(
[node.label[0] for node in nodes],
[rules.depth_type_order([mtypes.lower, mtypes.ints, mtypes.roman,
mtypes.upper, mtypes.em_ints,
mtypes.em_roman])])
if not manual_hierarchy and depths:
# Find the assignment which violates the least of our heuristics
depths = heuristics.prefer_multiple_children(depths, 0.5)
depths = sorted(depths, key=lambda d: d.weight, reverse=True)
depths = depths[0]
for node, par in zip(nodes, depths):
if par.typ != mtypes.stars:
last = m_stack.peek()
node.label = [l.replace('<E T="03">', '').replace('</E>', '')
for l in node.label]
if len(last) == 0:
m_stack.push_last((1 + par.depth, node))
else:
m_stack.add(1 + par.depth, node)
elif nodes and manual_hierarchy:
logging.warning('Using manual depth hierarchy.')
depths = manual_hierarchy
if len(nodes) == len(depths):
for node, spec in zip(nodes, depths):
if isinstance(spec, int):
depth = spec
elif isinstance(spec, tuple):
depth, marker = spec
node.marker = marker
last = m_stack.peek()
node.label = [l.replace('<E T="03">', '').replace('</E>', '')
for l in node.label]
if len(last) == 0:
m_stack.push_last((1 + depth, node))
else:
m_stack.add(1 + depth, node)
else:
logging.error('Manual hierarchy length does not match node '
'list length! ({0} nodes but {1} provided, '
'{2})'.format(
len(nodes),
len(depths),
[x.label[0] for x in nodes]))
elif nodes and not manual_hierarchy:
logging.warning(
'Could not determine depth when parsing {0}:\n{1}'.format(
section_no_without_marker, [node.label[0] for node in nodes]))
for node in nodes:
last = m_stack.peek()
node.label = [l.replace('<E T="03">', '').replace('</E>', '')
for l in node.label]
if len(last) == 0:
m_stack.push_last((3, node))
else:
m_stack.add(3, node)
nodes = []
section_nums = []
for match in re.finditer(r'%s\.(\d+)' % reg_part, section_no):
section_nums.append(int(match.group(1)))
# Span of section numbers
if u'§§' == section_no[:2] and '-' in section_no:
first, last = section_nums
section_nums = []
for i in range(first, last + 1):
section_nums.append(i)
for section_number in section_nums:
section_number = str(section_number)
plain_sect_texts = [s[0] for s in section_texts]
tagged_sect_texts = [s[1] for s in section_texts]
section_title = u"§ " + reg_part + "." + section_number
if subject_text:
section_title += " " + subject_text
section_text = ' '.join([section_xml.text] + plain_sect_texts)
tagged_section_text = ' '.join([section_xml.text] + tagged_sect_texts)
sect_node = Node(section_text, label=[reg_part, section_number],
title=section_title)
sect_node.tagged_text = tagged_section_text
m_stack.add_to_bottom((1, sect_node))
while m_stack.size() > 1:
m_stack.unwind()
nodes.append(m_stack.pop()[0][1])
return nodes
| StarcoderdataPython |
11319760 | <gh_stars>10-100
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class to manage metaparameters."""
from copy import deepcopy
from mtl.util import calc
import numpy as np
import torch
import torch.nn
class ParamManager(torch.nn.Parameter):
def __new__(cls, shape=None):
data = None if shape is None else torch.Tensor(*shape)
return super().__new__(cls, data=data, requires_grad=False)
def __init__(self):
super().__init__()
self.is_continuous = True
self.valid_range = None
def get_default(self):
return None
def random_sample(self):
# Sample within defined range with appropriate scaling
vals = torch.rand(self.shape)
vals = calc.map_val(vals, *self.valid_range)
# Discretize if necessesary
if not self.is_continuous:
vals = vals.astype(int)
return vals
def set_to_default(self):
self.data[:] = self.get_default()
def set_to_random(self):
self.data[:] = self.random_sample()
def mutate(self, delta=.1):
return None
def copy(self):
return deepcopy(self)
class Metaparam(torch.nn.Module):
def __init__(self):
super().__init__()
self.is_command = False
self.is_parameterized = None
self.model = None
def parameters(self, keys=None):
for name, param in self.named_parameters():
tmp_name = name.split('.')[0]
if keys is None or tmp_name in keys:
yield param
def get_params(self, keys):
return torch.nn.utils.parameters_to_vector(self.parameters(keys)).detach()
def update_params(self, keys, data):
torch.nn.utils.vector_to_parameters(data, self.parameters(keys))
def copy_from(self, src, keys):
self.update_params(keys, src.get_params(keys))
def parameterize(self, model, search, inp_size):
# Loop through all keys, get data shape size
data_ref = [self._parameters[k].data for k in search]
dim_ref = [d.shape for d in data_ref]
# Determine output size (flattened/concatted data)
out_size = int(sum([np.prod(d) for d in dim_ref]))
# Initialize model
self.model = model(inp_size, out_size)
self.is_parameterized = search
self.is_command = False
def reparameterize(self, x):
# Update metaparameters after forward call of model
new_params = self.model(x)
self.update_params(self.is_parameterized, new_params)
| StarcoderdataPython |
9797187 | from model.contact import Contact
from model.group import Group
import random
def test_delete_contact_from_group(app, db, orm):
if len(db.get_contact_list()) == 0:
app.contact.create_new_contact(
Contact(firstname="test", middlename="middlename"))
if len(db.get_group_list()) == 0:
app.group.create(Group(name="Было нечего", header="удалять", footer="создали тебя"))
if len(db.get_groups_with_contacts())==0:
contact_id = random.choice(db.get_contact_list()).id
group_id = random.choice(db.get_group_list()).id
app.contact.add_contact_to_group(contact_id, group_id)
group_id = random.choice(db.get_groups_with_contacts()).id
contact_id = random.choice(orm.get_contacts_in_group(Group(id=group_id))).id
app.contact.delete_contact_from_group(group_id)
assert db.get_contact_by_id(contact_id) not in orm.get_contacts_in_group(Group(id=group_id))
| StarcoderdataPython |
8067441 | # MIT License
#
# Copyright (c) 2020, <NAME> AG
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import datalayer.clib
from datalayer.provider_node import ProviderNodeCallbacks, NodeCallback
from datalayer.variant import Result, Variant
class MyProviderNode:
dataString: str = "Hello from Python Provider"
def __init__(self):
self.cbs = ProviderNodeCallbacks(
self.__on_create,
self.__on_remove,
self.__on_browse,
self.__on_read,
self.__on_write,
self.__on_metadata
)
def __on_create(self, userdata: datalayer.clib.userData_c_void_p, address: str, data: Variant, cb: NodeCallback):
print("__on_create")
self.dataString
cb(Result(Result.OK), None)
def __on_remove(self, userdata: datalayer.clib.userData_c_void_p, address: str, cb: NodeCallback):
# Not implemented because no wildcard is registered
print("__on_remove")
cb(Result(Result.UNSUPPORTED), None)
def __on_browse(self, userdata: datalayer.clib.userData_c_void_p, address: str, cb: NodeCallback):
print("__on_browse")
new_data = Variant()
new_data.set_array_string([])
cb(Result(Result.OK), new_data)
def __on_read(self, userdata: datalayer.clib.userData_c_void_p, address: str, data: Variant, cb: NodeCallback):
print("__on_read", userdata)
new_data = Variant()
new_data.set_string(self.dataString)
cb(Result(Result.OK), new_data)
def __on_write(self, userdata: datalayer.clib.userData_c_void_p, address: str, data: Variant, cb: NodeCallback):
print("__on_write")
self.dataString = data.get_string()
cb(Result(Result.OK), None)
def __on_metadata(self, userdata: datalayer.clib.userData_c_void_p, address: str, cb: NodeCallback):
print("__on_metadata")
cb(Result(Result.OK), None) | StarcoderdataPython |
12814261 | # -*- encoding:utf-8 -*-
"""
Author: Yijie.Wu
Email: <EMAIL>
Date: 2020/5/14 13:43
"""
| StarcoderdataPython |
3530158 | import adsk.core, adsk.fusion, traceback
def run(context):
ui = None
try:
app = adsk.core.Application.get()
ui = app.userInterface
# Create a document.
doc = app.documents.add(adsk.core.DocumentTypes.FusionDesignDocumentType)
product = app.activeProduct
design = adsk.fusion.Design.cast(product)
# Get the root component of the active design
rootComp = design.rootComponent
# Create a sketch
sketches = rootComp.sketches
sketch1 = sketches.add(rootComp.yZConstructionPlane)
# Create an object collection for the points.
points = adsk.core.ObjectCollection.create()
# Define the points the spline with fit through.
points.add(adsk.core.Point3D.create(-5, 0, 0))
points.add(adsk.core.Point3D.create(5, 1, 0))
points.add(adsk.core.Point3D.create(6, 4, 3))
points.add(adsk.core.Point3D.create(7, 6, 6))
points.add(adsk.core.Point3D.create(2, 3, 0))
points.add(adsk.core.Point3D.create(0, 1, 0))
# Create the spline.
spline = sketch1.sketchCurves.sketchFittedSplines.add(points)
# Get sketch lines
sketchLines = sketch1.sketchCurves.sketchLines
# Create sketch rectangle
startPoint = adsk.core.Point3D.create(0, 0, 0)
endPoint = adsk.core.Point3D.create(5.0, 5.0, 0)
sketchLines.addTwoPointRectangle(startPoint, endPoint)
# Get two sketch lines
sketchLineOne = sketchLines.item(0)
sketchLineTwo = sketchLines.item(1)
# Get the profile
prof = sketch1.profiles.item(0)
# Create an extrusion input
extrudes = rootComp.features.extrudeFeatures
extInput = extrudes.createInput(prof, adsk.fusion.FeatureOperations.NewBodyFeatureOperation)
# Define that the extent is a distance extent of 5 cm
distance = adsk.core.ValueInput.createByReal(5.0)
# Set the distance extent
extInput.setDistanceExtent(False, distance)
# Set the extrude type to be solid
extInput.isSolid = True
# Create the extrusion
ext = extrudes.add(extInput)
# Get the body with the extrude
body = ext.bodies.item(0)
# Get a vertex of the body
vertex = body.vertices.item(5)
# Get a face of the vertex
face = vertex.faces.item(0)
# Create perpendicular construction axis
axes = rootComp.constructionAxes
axisInput = axes.createInput()
axisInput.setByPerpendicularAtPoint(face, vertex)
axis = axes.add(axisInput)
# Create construction point
points = rootComp.constructionPoints
pointInput = points.createInput()
pointInput.setByTwoEdges(sketchLineOne, sketchLineTwo)
point = points.add(pointInput)
# Create construction plane
planes = rootComp.constructionPlanes
planeInput = planes.createInput()
offsetValue = adsk.core.ValueInput.createByReal(3.0)
planeInput.setByOffset(prof, offsetValue)
plane = planes.add(planeInput)
# Create another sketch
sketch2 = sketches.add(rootComp.xZConstructionPlane)
entities = []
entities.append(body) # body
entities.append(face) # face
entities.append(sketchLineOne) # edge
entities.append(vertex) # vertex
entities.append(spline) # sketch curve
entities.append(axis) # construction axis
entities.append(point) # construction point
entities.append(plane) # construction plane
sketchEntities = sketch2.intersectWithSketchPlane(entities)
except:
if ui:
ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
| StarcoderdataPython |
1971011 | <reponame>Nisenco/react_fastapi
from app.config.config import settings
__all__ = ['settings']
| StarcoderdataPython |
3589249 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = [
'AlertConfigurationMatcherArgs',
'AlertConfigurationMetricThresholdConfigArgs',
'AlertConfigurationNotificationArgs',
'AlertConfigurationThresholdConfigArgs',
'CloudBackupSchedulePolicyItemDailyArgs',
'CloudBackupSchedulePolicyItemHourlyArgs',
'CloudBackupSchedulePolicyItemMonthlyArgs',
'CloudBackupSchedulePolicyItemWeeklyArgs',
'CloudProviderAccessAuthorizationAwsArgs',
'CloudProviderAccessAuthorizationFeatureUsageArgs',
'CloudProviderAccessFeatureUsageArgs',
'CloudProviderAccessSetupAwsConfigArgs',
'CloudProviderSnapshotBackupPolicyPolicyArgs',
'CloudProviderSnapshotBackupPolicyPolicyPolicyItemArgs',
'CloudProviderSnapshotRestoreJobDeliveryTypeConfigArgs',
'ClusterAdvancedConfigurationArgs',
'ClusterBiConnectorConfigArgs',
'ClusterConnectionStringArgs',
'ClusterConnectionStringPrivateEndpointArgs',
'ClusterConnectionStringPrivateEndpointEndpointArgs',
'ClusterLabelArgs',
'ClusterReplicationSpecArgs',
'ClusterReplicationSpecRegionsConfigArgs',
'ClusterSnapshotBackupPolicyArgs',
'ClusterSnapshotBackupPolicyPolicyArgs',
'ClusterSnapshotBackupPolicyPolicyPolicyItemArgs',
'CustomDbRoleActionArgs',
'CustomDbRoleActionResourceArgs',
'CustomDbRoleInheritedRoleArgs',
'DataLakeAwsArgs',
'DataLakeDataProcessRegionArgs',
'DataLakeStorageDatabaseArgs',
'DataLakeStorageDatabaseCollectionArgs',
'DataLakeStorageDatabaseCollectionDataSourceArgs',
'DataLakeStorageDatabaseViewArgs',
'DataLakeStorageStoreArgs',
'DatabaseUserLabelArgs',
'DatabaseUserRoleArgs',
'DatabaseUserScopeArgs',
'EncryptionAtRestAwsKmsConfigArgs',
'EncryptionAtRestAzureKeyVaultConfigArgs',
'EncryptionAtRestGoogleCloudKmsConfigArgs',
'EventTriggerEventProcessorsArgs',
'EventTriggerEventProcessorsAwsEventbridgeArgs',
'GlobalClusterConfigCustomZoneMappingArgs',
'GlobalClusterConfigManagedNamespaceArgs',
'LdapConfigurationUserToDnMappingArgs',
'LdapVerifyLinkArgs',
'LdapVerifyValidationArgs',
'OnlineArchiveCriteriaArgs',
'OnlineArchivePartitionFieldArgs',
'ProjectTeamArgs',
'X509AuthenticationDatabaseUserCertificateArgs',
'GetCustomDbRoleInheritedRoleArgs',
'GetGlobalClusterConfigManagedNamespaceArgs',
]
@pulumi.input_type
class AlertConfigurationMatcherArgs:
def __init__(__self__, *,
field_name: Optional[pulumi.Input[str]] = None,
operator: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] field_name: Name of the field in the target object to match on.
:param pulumi.Input[str] operator: Operator to apply when checking the current metric value against the threshold value.
Accepted values are:
- `GREATER_THAN`
- `LESS_THAN`
:param pulumi.Input[str] value: Value to test with the specified operator. If `field_name` is set to TYPE_NAME, you can match on the following values:
- `PRIMARY`
- `SECONDARY`
- `STANDALONE`
- `CONFIG`
- `MONGOS`
"""
if field_name is not None:
pulumi.set(__self__, "field_name", field_name)
if operator is not None:
pulumi.set(__self__, "operator", operator)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="fieldName")
def field_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the field in the target object to match on.
"""
return pulumi.get(self, "field_name")
@field_name.setter
def field_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "field_name", value)
@property
@pulumi.getter
def operator(self) -> Optional[pulumi.Input[str]]:
"""
Operator to apply when checking the current metric value against the threshold value.
Accepted values are:
- `GREATER_THAN`
- `LESS_THAN`
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
Value to test with the specified operator. If `field_name` is set to TYPE_NAME, you can match on the following values:
- `PRIMARY`
- `SECONDARY`
- `STANDALONE`
- `CONFIG`
- `MONGOS`
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class AlertConfigurationMetricThresholdConfigArgs:
def __init__(__self__, *,
metric_name: Optional[pulumi.Input[str]] = None,
mode: Optional[pulumi.Input[str]] = None,
operator: Optional[pulumi.Input[str]] = None,
threshold: Optional[pulumi.Input[float]] = None,
units: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] metric_name: Name of the metric to check. The full list of current options is available [here](https://docs.atlas.mongodb.com/reference/alert-host-metrics/#measurement-types)
:param pulumi.Input[str] mode: This must be set to AVERAGE. Atlas computes the current metric value as an average.
:param pulumi.Input[str] operator: Operator to apply when checking the current metric value against the threshold value.
Accepted values are:
- `GREATER_THAN`
- `LESS_THAN`
:param pulumi.Input[float] threshold: Threshold value outside of which an alert will be triggered.
:param pulumi.Input[str] units: The units for the threshold value. Depends on the type of metric.
Accepted values are:
- `RAW`
- `BITS`
- `BYTES`
- `KILOBITS`
- `KILOBYTES`
- `MEGABITS`
- `MEGABYTES`
- `GIGABITS`
- `GIGABYTES`
- `TERABYTES`
- `PETABYTES`
- `MILLISECONDS`
- `SECONDS`
- `MINUTES`
- `HOURS`
- `DAYS`
"""
if metric_name is not None:
pulumi.set(__self__, "metric_name", metric_name)
if mode is not None:
pulumi.set(__self__, "mode", mode)
if operator is not None:
pulumi.set(__self__, "operator", operator)
if threshold is not None:
pulumi.set(__self__, "threshold", threshold)
if units is not None:
pulumi.set(__self__, "units", units)
@property
@pulumi.getter(name="metricName")
def metric_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the metric to check. The full list of current options is available [here](https://docs.atlas.mongodb.com/reference/alert-host-metrics/#measurement-types)
"""
return pulumi.get(self, "metric_name")
@metric_name.setter
def metric_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "metric_name", value)
@property
@pulumi.getter
def mode(self) -> Optional[pulumi.Input[str]]:
"""
This must be set to AVERAGE. Atlas computes the current metric value as an average.
"""
return pulumi.get(self, "mode")
@mode.setter
def mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mode", value)
@property
@pulumi.getter
def operator(self) -> Optional[pulumi.Input[str]]:
"""
Operator to apply when checking the current metric value against the threshold value.
Accepted values are:
- `GREATER_THAN`
- `LESS_THAN`
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def threshold(self) -> Optional[pulumi.Input[float]]:
"""
Threshold value outside of which an alert will be triggered.
"""
return pulumi.get(self, "threshold")
@threshold.setter
def threshold(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "threshold", value)
@property
@pulumi.getter
def units(self) -> Optional[pulumi.Input[str]]:
"""
The units for the threshold value. Depends on the type of metric.
Accepted values are:
- `RAW`
- `BITS`
- `BYTES`
- `KILOBITS`
- `KILOBYTES`
- `MEGABITS`
- `MEGABYTES`
- `GIGABITS`
- `GIGABYTES`
- `TERABYTES`
- `PETABYTES`
- `MILLISECONDS`
- `SECONDS`
- `MINUTES`
- `HOURS`
- `DAYS`
"""
return pulumi.get(self, "units")
@units.setter
def units(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "units", value)
@pulumi.input_type
class AlertConfigurationNotificationArgs:
def __init__(__self__, *,
api_token: Optional[pulumi.Input[str]] = None,
channel_name: Optional[pulumi.Input[str]] = None,
datadog_api_key: Optional[pulumi.Input[str]] = None,
datadog_region: Optional[pulumi.Input[str]] = None,
delay_min: Optional[pulumi.Input[int]] = None,
email_address: Optional[pulumi.Input[str]] = None,
email_enabled: Optional[pulumi.Input[bool]] = None,
flow_name: Optional[pulumi.Input[str]] = None,
flowdock_api_token: Optional[pulumi.Input[str]] = None,
interval_min: Optional[pulumi.Input[int]] = None,
mobile_number: Optional[pulumi.Input[str]] = None,
ops_genie_api_key: Optional[pulumi.Input[str]] = None,
ops_genie_region: Optional[pulumi.Input[str]] = None,
org_name: Optional[pulumi.Input[str]] = None,
roles: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
service_key: Optional[pulumi.Input[str]] = None,
sms_enabled: Optional[pulumi.Input[bool]] = None,
team_id: Optional[pulumi.Input[str]] = None,
type_name: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None,
victor_ops_api_key: Optional[pulumi.Input[str]] = None,
victor_ops_routing_key: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] api_token: Slack API token. Required for the SLACK notifications type. If the token later becomes invalid, Atlas sends an email to the project owner and eventually removes the token.
:param pulumi.Input[str] channel_name: Slack channel name. Required for the SLACK notifications type.
:param pulumi.Input[str] datadog_api_key: Datadog API Key. Found in the Datadog dashboard. Required for the DATADOG notifications type.
:param pulumi.Input[str] datadog_region: Region that indicates which API URL to use. Accepted regions are: `US`, `EU`. The default Datadog region is US.
:param pulumi.Input[int] delay_min: Number of minutes to wait after an alert condition is detected before sending out the first notification.
:param pulumi.Input[str] email_address: Email address to which alert notifications are sent. Required for the EMAIL notifications type.
:param pulumi.Input[bool] email_enabled: Flag indicating if email notifications should be sent. Configurable for `ORG`, `GROUP`, and `USER` notifications types.
:param pulumi.Input[str] flow_name: Flowdock flow name in lower-case letters. Required for the `FLOWDOCK` notifications type
:param pulumi.Input[str] flowdock_api_token: The Flowdock personal API token. Required for the `FLOWDOCK` notifications type. If the token later becomes invalid, Atlas sends an email to the project owner and eventually removes the token.
:param pulumi.Input[int] interval_min: Number of minutes to wait between successive notifications for unacknowledged alerts that are not resolved. The minimum value is 5. **CONDITIONAL** PAGER_DUTY manages the interval value, please do not set it in case of PAGER_DUTY
:param pulumi.Input[str] mobile_number: Mobile number to which alert notifications are sent. Required for the SMS notifications type.
:param pulumi.Input[str] ops_genie_api_key: Opsgenie API Key. Required for the `OPS_GENIE` notifications type. If the key later becomes invalid, Atlas sends an email to the project owner and eventually removes the token.
:param pulumi.Input[str] ops_genie_region: Region that indicates which API URL to use. Accepted regions are: `US` ,`EU`. The default Opsgenie region is US.
:param pulumi.Input[str] org_name: Flowdock organization name in lower-case letters. This is the name that appears after www.flowdock.com/app/ in the URL string. Required for the FLOWDOCK notifications type.
:param pulumi.Input[str] service_key: PagerDuty service key. Required for the PAGER_DUTY notifications type. If the key later becomes invalid, Atlas sends an email to the project owner and eventually removes the key.
:param pulumi.Input[bool] sms_enabled: Flag indicating if text message notifications should be sent. Configurable for `ORG`, `GROUP`, and `USER` notifications types.
:param pulumi.Input[str] team_id: Unique identifier of a team.
:param pulumi.Input[str] type_name: Type of alert notification.
Accepted values are:
- `DATADOG`
- `EMAIL`
- `FLOWDOCK`
:param pulumi.Input[str] username: Name of the Atlas user to which to send notifications. Only a user in the project that owns the alert configuration is allowed here. Required for the `USER` notifications type.
:param pulumi.Input[str] victor_ops_api_key: VictorOps API key. Required for the `VICTOR_OPS` notifications type. If the key later becomes invalid, Atlas sends an email to the project owner and eventually removes the key.
:param pulumi.Input[str] victor_ops_routing_key: VictorOps routing key. Optional for the `VICTOR_OPS` notifications type. If the key later becomes invalid, Atlas sends an email to the project owner and eventually removes the key.
"""
if api_token is not None:
pulumi.set(__self__, "api_token", api_token)
if channel_name is not None:
pulumi.set(__self__, "channel_name", channel_name)
if datadog_api_key is not None:
pulumi.set(__self__, "datadog_api_key", datadog_api_key)
if datadog_region is not None:
pulumi.set(__self__, "datadog_region", datadog_region)
if delay_min is not None:
pulumi.set(__self__, "delay_min", delay_min)
if email_address is not None:
pulumi.set(__self__, "email_address", email_address)
if email_enabled is not None:
pulumi.set(__self__, "email_enabled", email_enabled)
if flow_name is not None:
pulumi.set(__self__, "flow_name", flow_name)
if flowdock_api_token is not None:
pulumi.set(__self__, "flowdock_api_token", flowdock_api_token)
if interval_min is not None:
pulumi.set(__self__, "interval_min", interval_min)
if mobile_number is not None:
pulumi.set(__self__, "mobile_number", mobile_number)
if ops_genie_api_key is not None:
pulumi.set(__self__, "ops_genie_api_key", ops_genie_api_key)
if ops_genie_region is not None:
pulumi.set(__self__, "ops_genie_region", ops_genie_region)
if org_name is not None:
pulumi.set(__self__, "org_name", org_name)
if roles is not None:
pulumi.set(__self__, "roles", roles)
if service_key is not None:
pulumi.set(__self__, "service_key", service_key)
if sms_enabled is not None:
pulumi.set(__self__, "sms_enabled", sms_enabled)
if team_id is not None:
pulumi.set(__self__, "team_id", team_id)
if type_name is not None:
pulumi.set(__self__, "type_name", type_name)
if username is not None:
pulumi.set(__self__, "username", username)
if victor_ops_api_key is not None:
pulumi.set(__self__, "victor_ops_api_key", victor_ops_api_key)
if victor_ops_routing_key is not None:
pulumi.set(__self__, "victor_ops_routing_key", victor_ops_routing_key)
@property
@pulumi.getter(name="apiToken")
def api_token(self) -> Optional[pulumi.Input[str]]:
"""
Slack API token. Required for the SLACK notifications type. If the token later becomes invalid, Atlas sends an email to the project owner and eventually removes the token.
"""
return pulumi.get(self, "api_token")
@api_token.setter
def api_token(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_token", value)
@property
@pulumi.getter(name="channelName")
def channel_name(self) -> Optional[pulumi.Input[str]]:
"""
Slack channel name. Required for the SLACK notifications type.
"""
return pulumi.get(self, "channel_name")
@channel_name.setter
def channel_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "channel_name", value)
@property
@pulumi.getter(name="datadogApiKey")
def datadog_api_key(self) -> Optional[pulumi.Input[str]]:
"""
Datadog API Key. Found in the Datadog dashboard. Required for the DATADOG notifications type.
"""
return pulumi.get(self, "datadog_api_key")
@datadog_api_key.setter
def datadog_api_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "datadog_api_key", value)
@property
@pulumi.getter(name="datadogRegion")
def datadog_region(self) -> Optional[pulumi.Input[str]]:
"""
Region that indicates which API URL to use. Accepted regions are: `US`, `EU`. The default Datadog region is US.
"""
return pulumi.get(self, "datadog_region")
@datadog_region.setter
def datadog_region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "datadog_region", value)
@property
@pulumi.getter(name="delayMin")
def delay_min(self) -> Optional[pulumi.Input[int]]:
"""
Number of minutes to wait after an alert condition is detected before sending out the first notification.
"""
return pulumi.get(self, "delay_min")
@delay_min.setter
def delay_min(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "delay_min", value)
@property
@pulumi.getter(name="emailAddress")
def email_address(self) -> Optional[pulumi.Input[str]]:
"""
Email address to which alert notifications are sent. Required for the EMAIL notifications type.
"""
return pulumi.get(self, "email_address")
@email_address.setter
def email_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "email_address", value)
@property
@pulumi.getter(name="emailEnabled")
def email_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Flag indicating if email notifications should be sent. Configurable for `ORG`, `GROUP`, and `USER` notifications types.
"""
return pulumi.get(self, "email_enabled")
@email_enabled.setter
def email_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "email_enabled", value)
@property
@pulumi.getter(name="flowName")
def flow_name(self) -> Optional[pulumi.Input[str]]:
"""
Flowdock flow name in lower-case letters. Required for the `FLOWDOCK` notifications type
"""
return pulumi.get(self, "flow_name")
@flow_name.setter
def flow_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "flow_name", value)
@property
@pulumi.getter(name="flowdockApiToken")
def flowdock_api_token(self) -> Optional[pulumi.Input[str]]:
"""
The Flowdock personal API token. Required for the `FLOWDOCK` notifications type. If the token later becomes invalid, Atlas sends an email to the project owner and eventually removes the token.
"""
return pulumi.get(self, "flowdock_api_token")
@flowdock_api_token.setter
def flowdock_api_token(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "flowdock_api_token", value)
@property
@pulumi.getter(name="intervalMin")
def interval_min(self) -> Optional[pulumi.Input[int]]:
"""
Number of minutes to wait between successive notifications for unacknowledged alerts that are not resolved. The minimum value is 5. **CONDITIONAL** PAGER_DUTY manages the interval value, please do not set it in case of PAGER_DUTY
"""
return pulumi.get(self, "interval_min")
@interval_min.setter
def interval_min(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "interval_min", value)
@property
@pulumi.getter(name="mobileNumber")
def mobile_number(self) -> Optional[pulumi.Input[str]]:
"""
Mobile number to which alert notifications are sent. Required for the SMS notifications type.
"""
return pulumi.get(self, "mobile_number")
@mobile_number.setter
def mobile_number(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mobile_number", value)
@property
@pulumi.getter(name="opsGenieApiKey")
def ops_genie_api_key(self) -> Optional[pulumi.Input[str]]:
"""
Opsgenie API Key. Required for the `OPS_GENIE` notifications type. If the key later becomes invalid, Atlas sends an email to the project owner and eventually removes the token.
"""
return pulumi.get(self, "ops_genie_api_key")
@ops_genie_api_key.setter
def ops_genie_api_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ops_genie_api_key", value)
@property
@pulumi.getter(name="opsGenieRegion")
def ops_genie_region(self) -> Optional[pulumi.Input[str]]:
"""
Region that indicates which API URL to use. Accepted regions are: `US` ,`EU`. The default Opsgenie region is US.
"""
return pulumi.get(self, "ops_genie_region")
@ops_genie_region.setter
def ops_genie_region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ops_genie_region", value)
@property
@pulumi.getter(name="orgName")
def org_name(self) -> Optional[pulumi.Input[str]]:
"""
Flowdock organization name in lower-case letters. This is the name that appears after www.flowdock.com/app/ in the URL string. Required for the FLOWDOCK notifications type.
"""
return pulumi.get(self, "org_name")
@org_name.setter
def org_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "org_name", value)
@property
@pulumi.getter
def roles(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "roles")
@roles.setter
def roles(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "roles", value)
@property
@pulumi.getter(name="serviceKey")
def service_key(self) -> Optional[pulumi.Input[str]]:
"""
PagerDuty service key. Required for the PAGER_DUTY notifications type. If the key later becomes invalid, Atlas sends an email to the project owner and eventually removes the key.
"""
return pulumi.get(self, "service_key")
@service_key.setter
def service_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_key", value)
@property
@pulumi.getter(name="smsEnabled")
def sms_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Flag indicating if text message notifications should be sent. Configurable for `ORG`, `GROUP`, and `USER` notifications types.
"""
return pulumi.get(self, "sms_enabled")
@sms_enabled.setter
def sms_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "sms_enabled", value)
@property
@pulumi.getter(name="teamId")
def team_id(self) -> Optional[pulumi.Input[str]]:
"""
Unique identifier of a team.
"""
return pulumi.get(self, "team_id")
@team_id.setter
def team_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "team_id", value)
@property
@pulumi.getter(name="typeName")
def type_name(self) -> Optional[pulumi.Input[str]]:
"""
Type of alert notification.
Accepted values are:
- `DATADOG`
- `EMAIL`
- `FLOWDOCK`
"""
return pulumi.get(self, "type_name")
@type_name.setter
def type_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type_name", value)
@property
@pulumi.getter
def username(self) -> Optional[pulumi.Input[str]]:
"""
Name of the Atlas user to which to send notifications. Only a user in the project that owns the alert configuration is allowed here. Required for the `USER` notifications type.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "username", value)
@property
@pulumi.getter(name="victorOpsApiKey")
def victor_ops_api_key(self) -> Optional[pulumi.Input[str]]:
"""
VictorOps API key. Required for the `VICTOR_OPS` notifications type. If the key later becomes invalid, Atlas sends an email to the project owner and eventually removes the key.
"""
return pulumi.get(self, "victor_ops_api_key")
@victor_ops_api_key.setter
def victor_ops_api_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "victor_ops_api_key", value)
@property
@pulumi.getter(name="victorOpsRoutingKey")
def victor_ops_routing_key(self) -> Optional[pulumi.Input[str]]:
"""
VictorOps routing key. Optional for the `VICTOR_OPS` notifications type. If the key later becomes invalid, Atlas sends an email to the project owner and eventually removes the key.
"""
return pulumi.get(self, "victor_ops_routing_key")
@victor_ops_routing_key.setter
def victor_ops_routing_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "victor_ops_routing_key", value)
@pulumi.input_type
class AlertConfigurationThresholdConfigArgs:
def __init__(__self__, *,
operator: Optional[pulumi.Input[str]] = None,
threshold: Optional[pulumi.Input[float]] = None,
units: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] operator: Operator to apply when checking the current metric value against the threshold value.
Accepted values are:
- `GREATER_THAN`
- `LESS_THAN`
:param pulumi.Input[float] threshold: Threshold value outside of which an alert will be triggered.
:param pulumi.Input[str] units: The units for the threshold value. Depends on the type of metric.
Accepted values are:
- `RAW`
- `BITS`
- `BYTES`
- `KILOBITS`
- `KILOBYTES`
- `MEGABITS`
- `MEGABYTES`
- `GIGABITS`
- `GIGABYTES`
- `TERABYTES`
- `PETABYTES`
- `MILLISECONDS`
- `SECONDS`
- `MINUTES`
- `HOURS`
- `DAYS`
"""
if operator is not None:
pulumi.set(__self__, "operator", operator)
if threshold is not None:
pulumi.set(__self__, "threshold", threshold)
if units is not None:
pulumi.set(__self__, "units", units)
@property
@pulumi.getter
def operator(self) -> Optional[pulumi.Input[str]]:
"""
Operator to apply when checking the current metric value against the threshold value.
Accepted values are:
- `GREATER_THAN`
- `LESS_THAN`
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def threshold(self) -> Optional[pulumi.Input[float]]:
"""
Threshold value outside of which an alert will be triggered.
"""
return pulumi.get(self, "threshold")
@threshold.setter
def threshold(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "threshold", value)
@property
@pulumi.getter
def units(self) -> Optional[pulumi.Input[str]]:
"""
The units for the threshold value. Depends on the type of metric.
Accepted values are:
- `RAW`
- `BITS`
- `BYTES`
- `KILOBITS`
- `KILOBYTES`
- `MEGABITS`
- `MEGABYTES`
- `GIGABITS`
- `GIGABYTES`
- `TERABYTES`
- `PETABYTES`
- `MILLISECONDS`
- `SECONDS`
- `MINUTES`
- `HOURS`
- `DAYS`
"""
return pulumi.get(self, "units")
@units.setter
def units(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "units", value)
@pulumi.input_type
class CloudBackupSchedulePolicyItemDailyArgs:
def __init__(__self__, *,
frequency_interval: pulumi.Input[int],
retention_unit: pulumi.Input[str],
retention_value: pulumi.Input[int],
frequency_type: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[int] frequency_interval: Desired frequency of the new backup policy item specified by `frequency_type`.
:param pulumi.Input[str] retention_unit: Scope of the backup policy item: days, weeks, or months.
:param pulumi.Input[int] retention_value: Value to associate with `retention_unit`.
"""
pulumi.set(__self__, "frequency_interval", frequency_interval)
pulumi.set(__self__, "retention_unit", retention_unit)
pulumi.set(__self__, "retention_value", retention_value)
if frequency_type is not None:
pulumi.set(__self__, "frequency_type", frequency_type)
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter(name="frequencyInterval")
def frequency_interval(self) -> pulumi.Input[int]:
"""
Desired frequency of the new backup policy item specified by `frequency_type`.
"""
return pulumi.get(self, "frequency_interval")
@frequency_interval.setter
def frequency_interval(self, value: pulumi.Input[int]):
pulumi.set(self, "frequency_interval", value)
@property
@pulumi.getter(name="retentionUnit")
def retention_unit(self) -> pulumi.Input[str]:
"""
Scope of the backup policy item: days, weeks, or months.
"""
return pulumi.get(self, "retention_unit")
@retention_unit.setter
def retention_unit(self, value: pulumi.Input[str]):
pulumi.set(self, "retention_unit", value)
@property
@pulumi.getter(name="retentionValue")
def retention_value(self) -> pulumi.Input[int]:
"""
Value to associate with `retention_unit`.
"""
return pulumi.get(self, "retention_value")
@retention_value.setter
def retention_value(self, value: pulumi.Input[int]):
pulumi.set(self, "retention_value", value)
@property
@pulumi.getter(name="frequencyType")
def frequency_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "frequency_type")
@frequency_type.setter
def frequency_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "frequency_type", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@pulumi.input_type
class CloudBackupSchedulePolicyItemHourlyArgs:
def __init__(__self__, *,
frequency_interval: pulumi.Input[int],
retention_unit: pulumi.Input[str],
retention_value: pulumi.Input[int],
frequency_type: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[int] frequency_interval: Desired frequency of the new backup policy item specified by `frequency_type`.
:param pulumi.Input[str] retention_unit: Scope of the backup policy item: days, weeks, or months.
:param pulumi.Input[int] retention_value: Value to associate with `retention_unit`.
"""
pulumi.set(__self__, "frequency_interval", frequency_interval)
pulumi.set(__self__, "retention_unit", retention_unit)
pulumi.set(__self__, "retention_value", retention_value)
if frequency_type is not None:
pulumi.set(__self__, "frequency_type", frequency_type)
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter(name="frequencyInterval")
def frequency_interval(self) -> pulumi.Input[int]:
"""
Desired frequency of the new backup policy item specified by `frequency_type`.
"""
return pulumi.get(self, "frequency_interval")
@frequency_interval.setter
def frequency_interval(self, value: pulumi.Input[int]):
pulumi.set(self, "frequency_interval", value)
@property
@pulumi.getter(name="retentionUnit")
def retention_unit(self) -> pulumi.Input[str]:
"""
Scope of the backup policy item: days, weeks, or months.
"""
return pulumi.get(self, "retention_unit")
@retention_unit.setter
def retention_unit(self, value: pulumi.Input[str]):
pulumi.set(self, "retention_unit", value)
@property
@pulumi.getter(name="retentionValue")
def retention_value(self) -> pulumi.Input[int]:
"""
Value to associate with `retention_unit`.
"""
return pulumi.get(self, "retention_value")
@retention_value.setter
def retention_value(self, value: pulumi.Input[int]):
pulumi.set(self, "retention_value", value)
@property
@pulumi.getter(name="frequencyType")
def frequency_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "frequency_type")
@frequency_type.setter
def frequency_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "frequency_type", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@pulumi.input_type
class CloudBackupSchedulePolicyItemMonthlyArgs:
def __init__(__self__, *,
frequency_interval: pulumi.Input[int],
retention_unit: pulumi.Input[str],
retention_value: pulumi.Input[int],
frequency_type: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[int] frequency_interval: Desired frequency of the new backup policy item specified by `frequency_type`.
:param pulumi.Input[str] retention_unit: Scope of the backup policy item: days, weeks, or months.
:param pulumi.Input[int] retention_value: Value to associate with `retention_unit`.
"""
pulumi.set(__self__, "frequency_interval", frequency_interval)
pulumi.set(__self__, "retention_unit", retention_unit)
pulumi.set(__self__, "retention_value", retention_value)
if frequency_type is not None:
pulumi.set(__self__, "frequency_type", frequency_type)
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter(name="frequencyInterval")
def frequency_interval(self) -> pulumi.Input[int]:
"""
Desired frequency of the new backup policy item specified by `frequency_type`.
"""
return pulumi.get(self, "frequency_interval")
@frequency_interval.setter
def frequency_interval(self, value: pulumi.Input[int]):
pulumi.set(self, "frequency_interval", value)
@property
@pulumi.getter(name="retentionUnit")
def retention_unit(self) -> pulumi.Input[str]:
"""
Scope of the backup policy item: days, weeks, or months.
"""
return pulumi.get(self, "retention_unit")
@retention_unit.setter
def retention_unit(self, value: pulumi.Input[str]):
pulumi.set(self, "retention_unit", value)
@property
@pulumi.getter(name="retentionValue")
def retention_value(self) -> pulumi.Input[int]:
"""
Value to associate with `retention_unit`.
"""
return pulumi.get(self, "retention_value")
@retention_value.setter
def retention_value(self, value: pulumi.Input[int]):
pulumi.set(self, "retention_value", value)
@property
@pulumi.getter(name="frequencyType")
def frequency_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "frequency_type")
@frequency_type.setter
def frequency_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "frequency_type", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@pulumi.input_type
class CloudBackupSchedulePolicyItemWeeklyArgs:
def __init__(__self__, *,
frequency_interval: pulumi.Input[int],
retention_unit: pulumi.Input[str],
retention_value: pulumi.Input[int],
frequency_type: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[int] frequency_interval: Desired frequency of the new backup policy item specified by `frequency_type`.
:param pulumi.Input[str] retention_unit: Scope of the backup policy item: days, weeks, or months.
:param pulumi.Input[int] retention_value: Value to associate with `retention_unit`.
"""
pulumi.set(__self__, "frequency_interval", frequency_interval)
pulumi.set(__self__, "retention_unit", retention_unit)
pulumi.set(__self__, "retention_value", retention_value)
if frequency_type is not None:
pulumi.set(__self__, "frequency_type", frequency_type)
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter(name="frequencyInterval")
def frequency_interval(self) -> pulumi.Input[int]:
"""
Desired frequency of the new backup policy item specified by `frequency_type`.
"""
return pulumi.get(self, "frequency_interval")
@frequency_interval.setter
def frequency_interval(self, value: pulumi.Input[int]):
pulumi.set(self, "frequency_interval", value)
@property
@pulumi.getter(name="retentionUnit")
def retention_unit(self) -> pulumi.Input[str]:
"""
Scope of the backup policy item: days, weeks, or months.
"""
return pulumi.get(self, "retention_unit")
@retention_unit.setter
def retention_unit(self, value: pulumi.Input[str]):
pulumi.set(self, "retention_unit", value)
@property
@pulumi.getter(name="retentionValue")
def retention_value(self) -> pulumi.Input[int]:
"""
Value to associate with `retention_unit`.
"""
return pulumi.get(self, "retention_value")
@retention_value.setter
def retention_value(self, value: pulumi.Input[int]):
pulumi.set(self, "retention_value", value)
@property
@pulumi.getter(name="frequencyType")
def frequency_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "frequency_type")
@frequency_type.setter
def frequency_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "frequency_type", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@pulumi.input_type
class CloudProviderAccessAuthorizationAwsArgs:
def __init__(__self__, *,
iam_assumed_role_arn: pulumi.Input[str]):
pulumi.set(__self__, "iam_assumed_role_arn", iam_assumed_role_arn)
@property
@pulumi.getter(name="iamAssumedRoleArn")
def iam_assumed_role_arn(self) -> pulumi.Input[str]:
return pulumi.get(self, "iam_assumed_role_arn")
@iam_assumed_role_arn.setter
def iam_assumed_role_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "iam_assumed_role_arn", value)
@pulumi.input_type
class CloudProviderAccessAuthorizationFeatureUsageArgs:
def __init__(__self__, *,
feature_id: Optional[pulumi.Input[Mapping[str, Any]]] = None,
feature_type: Optional[pulumi.Input[str]] = None):
if feature_id is not None:
pulumi.set(__self__, "feature_id", feature_id)
if feature_type is not None:
pulumi.set(__self__, "feature_type", feature_type)
@property
@pulumi.getter(name="featureId")
def feature_id(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
return pulumi.get(self, "feature_id")
@feature_id.setter
def feature_id(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "feature_id", value)
@property
@pulumi.getter(name="featureType")
def feature_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "feature_type")
@feature_type.setter
def feature_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "feature_type", value)
@pulumi.input_type
class CloudProviderAccessFeatureUsageArgs:
def __init__(__self__, *,
feature_id: Optional[pulumi.Input[Mapping[str, Any]]] = None,
feature_type: Optional[pulumi.Input[str]] = None):
if feature_id is not None:
pulumi.set(__self__, "feature_id", feature_id)
if feature_type is not None:
pulumi.set(__self__, "feature_type", feature_type)
@property
@pulumi.getter(name="featureId")
def feature_id(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
return pulumi.get(self, "feature_id")
@feature_id.setter
def feature_id(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "feature_id", value)
@property
@pulumi.getter(name="featureType")
def feature_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "feature_type")
@feature_type.setter
def feature_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "feature_type", value)
@pulumi.input_type
class CloudProviderAccessSetupAwsConfigArgs:
def __init__(__self__, *,
atlas_assumed_role_external_id: Optional[pulumi.Input[str]] = None,
atlas_aws_account_arn: Optional[pulumi.Input[str]] = None):
if atlas_assumed_role_external_id is not None:
pulumi.set(__self__, "atlas_assumed_role_external_id", atlas_assumed_role_external_id)
if atlas_aws_account_arn is not None:
pulumi.set(__self__, "atlas_aws_account_arn", atlas_aws_account_arn)
@property
@pulumi.getter(name="atlasAssumedRoleExternalId")
def atlas_assumed_role_external_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "atlas_assumed_role_external_id")
@atlas_assumed_role_external_id.setter
def atlas_assumed_role_external_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "atlas_assumed_role_external_id", value)
@property
@pulumi.getter(name="atlasAwsAccountArn")
def atlas_aws_account_arn(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "atlas_aws_account_arn")
@atlas_aws_account_arn.setter
def atlas_aws_account_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "atlas_aws_account_arn", value)
@pulumi.input_type
class CloudProviderSnapshotBackupPolicyPolicyArgs:
def __init__(__self__, *,
id: pulumi.Input[str],
policy_items: pulumi.Input[Sequence[pulumi.Input['CloudProviderSnapshotBackupPolicyPolicyPolicyItemArgs']]]):
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "policy_items", policy_items)
@property
@pulumi.getter
def id(self) -> pulumi.Input[str]:
return pulumi.get(self, "id")
@id.setter
def id(self, value: pulumi.Input[str]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="policyItems")
def policy_items(self) -> pulumi.Input[Sequence[pulumi.Input['CloudProviderSnapshotBackupPolicyPolicyPolicyItemArgs']]]:
return pulumi.get(self, "policy_items")
@policy_items.setter
def policy_items(self, value: pulumi.Input[Sequence[pulumi.Input['CloudProviderSnapshotBackupPolicyPolicyPolicyItemArgs']]]):
pulumi.set(self, "policy_items", value)
@pulumi.input_type
class CloudProviderSnapshotBackupPolicyPolicyPolicyItemArgs:
def __init__(__self__, *,
frequency_interval: pulumi.Input[int],
frequency_type: pulumi.Input[str],
id: pulumi.Input[str],
retention_unit: pulumi.Input[str],
retention_value: pulumi.Input[int]):
pulumi.set(__self__, "frequency_interval", frequency_interval)
pulumi.set(__self__, "frequency_type", frequency_type)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "retention_unit", retention_unit)
pulumi.set(__self__, "retention_value", retention_value)
@property
@pulumi.getter(name="frequencyInterval")
def frequency_interval(self) -> pulumi.Input[int]:
return pulumi.get(self, "frequency_interval")
@frequency_interval.setter
def frequency_interval(self, value: pulumi.Input[int]):
pulumi.set(self, "frequency_interval", value)
@property
@pulumi.getter(name="frequencyType")
def frequency_type(self) -> pulumi.Input[str]:
return pulumi.get(self, "frequency_type")
@frequency_type.setter
def frequency_type(self, value: pulumi.Input[str]):
pulumi.set(self, "frequency_type", value)
@property
@pulumi.getter
def id(self) -> pulumi.Input[str]:
return pulumi.get(self, "id")
@id.setter
def id(self, value: pulumi.Input[str]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="retentionUnit")
def retention_unit(self) -> pulumi.Input[str]:
return pulumi.get(self, "retention_unit")
@retention_unit.setter
def retention_unit(self, value: pulumi.Input[str]):
pulumi.set(self, "retention_unit", value)
@property
@pulumi.getter(name="retentionValue")
def retention_value(self) -> pulumi.Input[int]:
return pulumi.get(self, "retention_value")
@retention_value.setter
def retention_value(self, value: pulumi.Input[int]):
pulumi.set(self, "retention_value", value)
@pulumi.input_type
class CloudProviderSnapshotRestoreJobDeliveryTypeConfigArgs:
def __init__(__self__, *,
automated: Optional[pulumi.Input[bool]] = None,
download: Optional[pulumi.Input[bool]] = None,
oplog_inc: Optional[pulumi.Input[int]] = None,
oplog_ts: Optional[pulumi.Input[int]] = None,
point_in_time: Optional[pulumi.Input[bool]] = None,
point_in_time_utc_seconds: Optional[pulumi.Input[int]] = None,
target_cluster_name: Optional[pulumi.Input[str]] = None,
target_project_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] target_cluster_name: Name of the target Atlas cluster to which the restore job restores the snapshot. Only required if deliveryType is automated.
"""
if automated is not None:
pulumi.set(__self__, "automated", automated)
if download is not None:
pulumi.set(__self__, "download", download)
if oplog_inc is not None:
pulumi.set(__self__, "oplog_inc", oplog_inc)
if oplog_ts is not None:
pulumi.set(__self__, "oplog_ts", oplog_ts)
if point_in_time is not None:
pulumi.set(__self__, "point_in_time", point_in_time)
if point_in_time_utc_seconds is not None:
pulumi.set(__self__, "point_in_time_utc_seconds", point_in_time_utc_seconds)
if target_cluster_name is not None:
pulumi.set(__self__, "target_cluster_name", target_cluster_name)
if target_project_id is not None:
pulumi.set(__self__, "target_project_id", target_project_id)
@property
@pulumi.getter
def automated(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "automated")
@automated.setter
def automated(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "automated", value)
@property
@pulumi.getter
def download(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "download")
@download.setter
def download(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "download", value)
@property
@pulumi.getter(name="oplogInc")
def oplog_inc(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "oplog_inc")
@oplog_inc.setter
def oplog_inc(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "oplog_inc", value)
@property
@pulumi.getter(name="oplogTs")
def oplog_ts(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "oplog_ts")
@oplog_ts.setter
def oplog_ts(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "oplog_ts", value)
@property
@pulumi.getter(name="pointInTime")
def point_in_time(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "point_in_time")
@point_in_time.setter
def point_in_time(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "point_in_time", value)
@property
@pulumi.getter(name="pointInTimeUtcSeconds")
def point_in_time_utc_seconds(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "point_in_time_utc_seconds")
@point_in_time_utc_seconds.setter
def point_in_time_utc_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "point_in_time_utc_seconds", value)
@property
@pulumi.getter(name="targetClusterName")
def target_cluster_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the target Atlas cluster to which the restore job restores the snapshot. Only required if deliveryType is automated.
"""
return pulumi.get(self, "target_cluster_name")
@target_cluster_name.setter
def target_cluster_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target_cluster_name", value)
@property
@pulumi.getter(name="targetProjectId")
def target_project_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "target_project_id")
@target_project_id.setter
def target_project_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target_project_id", value)
@pulumi.input_type
class ClusterAdvancedConfigurationArgs:
def __init__(__self__, *,
fail_index_key_too_long: Optional[pulumi.Input[bool]] = None,
javascript_enabled: Optional[pulumi.Input[bool]] = None,
minimum_enabled_tls_protocol: Optional[pulumi.Input[str]] = None,
no_table_scan: Optional[pulumi.Input[bool]] = None,
oplog_size_mb: Optional[pulumi.Input[int]] = None,
sample_refresh_interval_bi_connector: Optional[pulumi.Input[int]] = None,
sample_size_bi_connector: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[bool] fail_index_key_too_long: When true, documents can only be updated or inserted if, for all indexed fields on the target collection, the corresponding index entries do not exceed 1024 bytes. When false, mongod writes documents that exceed the limit but does not index them.
:param pulumi.Input[bool] javascript_enabled: When true, the cluster allows execution of operations that perform server-side executions of JavaScript. When false, the cluster disables execution of those operations.
:param pulumi.Input[str] minimum_enabled_tls_protocol: Sets the minimum Transport Layer Security (TLS) version the cluster accepts for incoming connections.Valid values are:
:param pulumi.Input[bool] no_table_scan: When true, the cluster disables the execution of any query that requires a collection scan to return results. When false, the cluster allows the execution of those operations.
:param pulumi.Input[int] oplog_size_mb: The custom oplog size of the cluster. Without a value that indicates that the cluster uses the default oplog size calculated by Atlas.
:param pulumi.Input[int] sample_refresh_interval_bi_connector: Interval in seconds at which the mongosqld process re-samples data to create its relational schema. The default value is 300. The specified value must be a positive integer. Available only for Atlas deployments in which BI Connector for Atlas is enabled.
:param pulumi.Input[int] sample_size_bi_connector: Number of documents per database to sample when gathering schema information. Defaults to 100. Available only for Atlas deployments in which BI Connector for Atlas is enabled.
"""
if fail_index_key_too_long is not None:
pulumi.set(__self__, "fail_index_key_too_long", fail_index_key_too_long)
if javascript_enabled is not None:
pulumi.set(__self__, "javascript_enabled", javascript_enabled)
if minimum_enabled_tls_protocol is not None:
pulumi.set(__self__, "minimum_enabled_tls_protocol", minimum_enabled_tls_protocol)
if no_table_scan is not None:
pulumi.set(__self__, "no_table_scan", no_table_scan)
if oplog_size_mb is not None:
pulumi.set(__self__, "oplog_size_mb", oplog_size_mb)
if sample_refresh_interval_bi_connector is not None:
pulumi.set(__self__, "sample_refresh_interval_bi_connector", sample_refresh_interval_bi_connector)
if sample_size_bi_connector is not None:
pulumi.set(__self__, "sample_size_bi_connector", sample_size_bi_connector)
@property
@pulumi.getter(name="failIndexKeyTooLong")
def fail_index_key_too_long(self) -> Optional[pulumi.Input[bool]]:
"""
When true, documents can only be updated or inserted if, for all indexed fields on the target collection, the corresponding index entries do not exceed 1024 bytes. When false, mongod writes documents that exceed the limit but does not index them.
"""
return pulumi.get(self, "fail_index_key_too_long")
@fail_index_key_too_long.setter
def fail_index_key_too_long(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "fail_index_key_too_long", value)
@property
@pulumi.getter(name="javascriptEnabled")
def javascript_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
When true, the cluster allows execution of operations that perform server-side executions of JavaScript. When false, the cluster disables execution of those operations.
"""
return pulumi.get(self, "javascript_enabled")
@javascript_enabled.setter
def javascript_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "javascript_enabled", value)
@property
@pulumi.getter(name="minimumEnabledTlsProtocol")
def minimum_enabled_tls_protocol(self) -> Optional[pulumi.Input[str]]:
"""
Sets the minimum Transport Layer Security (TLS) version the cluster accepts for incoming connections.Valid values are:
"""
return pulumi.get(self, "minimum_enabled_tls_protocol")
@minimum_enabled_tls_protocol.setter
def minimum_enabled_tls_protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "minimum_enabled_tls_protocol", value)
@property
@pulumi.getter(name="noTableScan")
def no_table_scan(self) -> Optional[pulumi.Input[bool]]:
"""
When true, the cluster disables the execution of any query that requires a collection scan to return results. When false, the cluster allows the execution of those operations.
"""
return pulumi.get(self, "no_table_scan")
@no_table_scan.setter
def no_table_scan(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "no_table_scan", value)
@property
@pulumi.getter(name="oplogSizeMb")
def oplog_size_mb(self) -> Optional[pulumi.Input[int]]:
"""
The custom oplog size of the cluster. Without a value that indicates that the cluster uses the default oplog size calculated by Atlas.
"""
return pulumi.get(self, "oplog_size_mb")
@oplog_size_mb.setter
def oplog_size_mb(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "oplog_size_mb", value)
@property
@pulumi.getter(name="sampleRefreshIntervalBiConnector")
def sample_refresh_interval_bi_connector(self) -> Optional[pulumi.Input[int]]:
"""
Interval in seconds at which the mongosqld process re-samples data to create its relational schema. The default value is 300. The specified value must be a positive integer. Available only for Atlas deployments in which BI Connector for Atlas is enabled.
"""
return pulumi.get(self, "sample_refresh_interval_bi_connector")
@sample_refresh_interval_bi_connector.setter
def sample_refresh_interval_bi_connector(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "sample_refresh_interval_bi_connector", value)
@property
@pulumi.getter(name="sampleSizeBiConnector")
def sample_size_bi_connector(self) -> Optional[pulumi.Input[int]]:
"""
Number of documents per database to sample when gathering schema information. Defaults to 100. Available only for Atlas deployments in which BI Connector for Atlas is enabled.
"""
return pulumi.get(self, "sample_size_bi_connector")
@sample_size_bi_connector.setter
def sample_size_bi_connector(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "sample_size_bi_connector", value)
@pulumi.input_type
class ClusterBiConnectorConfigArgs:
def __init__(__self__, *,
enabled: Optional[pulumi.Input[bool]] = None,
read_preference: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[bool] enabled: Specifies whether or not BI Connector for Atlas is enabled on the cluster.l
*
- Set to `true` to enable BI Connector for Atlas.
- Set to `false` to disable BI Connector for Atlas.
:param pulumi.Input[str] read_preference: Specifies the read preference to be used by BI Connector for Atlas on the cluster. Each BI Connector for Atlas read preference contains a distinct combination of [readPreference](https://docs.mongodb.com/manual/core/read-preference/) and [readPreferenceTags](https://docs.mongodb.com/manual/core/read-preference/#tag-sets) options. For details on BI Connector for Atlas read preferences, refer to the [BI Connector Read Preferences Table](https://docs.atlas.mongodb.com/tutorial/create-global-writes-cluster/#bic-read-preferences).
"""
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if read_preference is not None:
pulumi.set(__self__, "read_preference", read_preference)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether or not BI Connector for Atlas is enabled on the cluster.l
*
- Set to `true` to enable BI Connector for Atlas.
- Set to `false` to disable BI Connector for Atlas.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="readPreference")
def read_preference(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the read preference to be used by BI Connector for Atlas on the cluster. Each BI Connector for Atlas read preference contains a distinct combination of [readPreference](https://docs.mongodb.com/manual/core/read-preference/) and [readPreferenceTags](https://docs.mongodb.com/manual/core/read-preference/#tag-sets) options. For details on BI Connector for Atlas read preferences, refer to the [BI Connector Read Preferences Table](https://docs.atlas.mongodb.com/tutorial/create-global-writes-cluster/#bic-read-preferences).
"""
return pulumi.get(self, "read_preference")
@read_preference.setter
def read_preference(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "read_preference", value)
@pulumi.input_type
class ClusterConnectionStringArgs:
def __init__(__self__, *,
aws_private_link: Optional[pulumi.Input[Mapping[str, Any]]] = None,
aws_private_link_srv: Optional[pulumi.Input[Mapping[str, Any]]] = None,
private: Optional[pulumi.Input[str]] = None,
private_endpoints: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterConnectionStringPrivateEndpointArgs']]]] = None,
private_srv: Optional[pulumi.Input[str]] = None,
standard: Optional[pulumi.Input[str]] = None,
standard_srv: Optional[pulumi.Input[str]] = None):
if aws_private_link is not None:
warnings.warn("""This field is deprecated. Use connection_strings.private_endpoint[n].connection_string instead""", DeprecationWarning)
pulumi.log.warn("""aws_private_link is deprecated: This field is deprecated. Use connection_strings.private_endpoint[n].connection_string instead""")
if aws_private_link is not None:
pulumi.set(__self__, "aws_private_link", aws_private_link)
if aws_private_link_srv is not None:
warnings.warn("""This field is deprecated. Use connection_strings.private_endpoint[n].srv_connection_string instead""", DeprecationWarning)
pulumi.log.warn("""aws_private_link_srv is deprecated: This field is deprecated. Use connection_strings.private_endpoint[n].srv_connection_string instead""")
if aws_private_link_srv is not None:
pulumi.set(__self__, "aws_private_link_srv", aws_private_link_srv)
if private is not None:
pulumi.set(__self__, "private", private)
if private_endpoints is not None:
pulumi.set(__self__, "private_endpoints", private_endpoints)
if private_srv is not None:
pulumi.set(__self__, "private_srv", private_srv)
if standard is not None:
pulumi.set(__self__, "standard", standard)
if standard_srv is not None:
pulumi.set(__self__, "standard_srv", standard_srv)
@property
@pulumi.getter(name="awsPrivateLink")
def aws_private_link(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
return pulumi.get(self, "aws_private_link")
@aws_private_link.setter
def aws_private_link(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "aws_private_link", value)
@property
@pulumi.getter(name="awsPrivateLinkSrv")
def aws_private_link_srv(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
return pulumi.get(self, "aws_private_link_srv")
@aws_private_link_srv.setter
def aws_private_link_srv(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "aws_private_link_srv", value)
@property
@pulumi.getter
def private(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "private")
@private.setter
def private(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private", value)
@property
@pulumi.getter(name="privateEndpoints")
def private_endpoints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterConnectionStringPrivateEndpointArgs']]]]:
return pulumi.get(self, "private_endpoints")
@private_endpoints.setter
def private_endpoints(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterConnectionStringPrivateEndpointArgs']]]]):
pulumi.set(self, "private_endpoints", value)
@property
@pulumi.getter(name="privateSrv")
def private_srv(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "private_srv")
@private_srv.setter
def private_srv(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_srv", value)
@property
@pulumi.getter
def standard(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "standard")
@standard.setter
def standard(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "standard", value)
@property
@pulumi.getter(name="standardSrv")
def standard_srv(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "standard_srv")
@standard_srv.setter
def standard_srv(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "standard_srv", value)
@pulumi.input_type
class ClusterConnectionStringPrivateEndpointArgs:
def __init__(__self__, *,
connection_string: Optional[pulumi.Input[str]] = None,
endpoints: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterConnectionStringPrivateEndpointEndpointArgs']]]] = None,
srv_connection_string: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None):
if connection_string is not None:
pulumi.set(__self__, "connection_string", connection_string)
if endpoints is not None:
pulumi.set(__self__, "endpoints", endpoints)
if srv_connection_string is not None:
pulumi.set(__self__, "srv_connection_string", srv_connection_string)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="connectionString")
def connection_string(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "connection_string")
@connection_string.setter
def connection_string(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "connection_string", value)
@property
@pulumi.getter
def endpoints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterConnectionStringPrivateEndpointEndpointArgs']]]]:
return pulumi.get(self, "endpoints")
@endpoints.setter
def endpoints(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterConnectionStringPrivateEndpointEndpointArgs']]]]):
pulumi.set(self, "endpoints", value)
@property
@pulumi.getter(name="srvConnectionString")
def srv_connection_string(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "srv_connection_string")
@srv_connection_string.setter
def srv_connection_string(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "srv_connection_string", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class ClusterConnectionStringPrivateEndpointEndpointArgs:
def __init__(__self__, *,
endpoint_id: Optional[pulumi.Input[str]] = None,
provider_name: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] provider_name: Cloud service provider on which the servers are provisioned.
"""
if endpoint_id is not None:
pulumi.set(__self__, "endpoint_id", endpoint_id)
if provider_name is not None:
pulumi.set(__self__, "provider_name", provider_name)
if region is not None:
pulumi.set(__self__, "region", region)
@property
@pulumi.getter(name="endpointId")
def endpoint_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "endpoint_id")
@endpoint_id.setter
def endpoint_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "endpoint_id", value)
@property
@pulumi.getter(name="providerName")
def provider_name(self) -> Optional[pulumi.Input[str]]:
"""
Cloud service provider on which the servers are provisioned.
"""
return pulumi.get(self, "provider_name")
@provider_name.setter
def provider_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provider_name", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
@pulumi.input_type
class ClusterLabelArgs:
def __init__(__self__, *,
key: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] key: The key that you want to write.
:param pulumi.Input[str] value: The value that you want to write.
"""
if key is not None:
pulumi.set(__self__, "key", key)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
The key that you want to write.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
The value that you want to write.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class ClusterReplicationSpecArgs:
def __init__(__self__, *,
num_shards: pulumi.Input[int],
id: Optional[pulumi.Input[str]] = None,
regions_configs: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterReplicationSpecRegionsConfigArgs']]]] = None,
zone_name: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[int] num_shards: Number of shards to deploy in the specified zone, minimum 1.
:param pulumi.Input[str] id: Unique identifer of the replication document for a zone in a Global Cluster.
:param pulumi.Input[Sequence[pulumi.Input['ClusterReplicationSpecRegionsConfigArgs']]] regions_configs: Physical location of the region. Each regionsConfig document describes the region’s priority in elections and the number and type of MongoDB nodes Atlas deploys to the region. You must order each regionsConfigs document by regionsConfig.priority, descending. See Region Config below for more details.
:param pulumi.Input[str] zone_name: Name for the zone in a Global Cluster.
"""
pulumi.set(__self__, "num_shards", num_shards)
if id is not None:
pulumi.set(__self__, "id", id)
if regions_configs is not None:
pulumi.set(__self__, "regions_configs", regions_configs)
if zone_name is not None:
pulumi.set(__self__, "zone_name", zone_name)
@property
@pulumi.getter(name="numShards")
def num_shards(self) -> pulumi.Input[int]:
"""
Number of shards to deploy in the specified zone, minimum 1.
"""
return pulumi.get(self, "num_shards")
@num_shards.setter
def num_shards(self, value: pulumi.Input[int]):
pulumi.set(self, "num_shards", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Unique identifer of the replication document for a zone in a Global Cluster.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="regionsConfigs")
def regions_configs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterReplicationSpecRegionsConfigArgs']]]]:
"""
Physical location of the region. Each regionsConfig document describes the region’s priority in elections and the number and type of MongoDB nodes Atlas deploys to the region. You must order each regionsConfigs document by regionsConfig.priority, descending. See Region Config below for more details.
"""
return pulumi.get(self, "regions_configs")
@regions_configs.setter
def regions_configs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterReplicationSpecRegionsConfigArgs']]]]):
pulumi.set(self, "regions_configs", value)
@property
@pulumi.getter(name="zoneName")
def zone_name(self) -> Optional[pulumi.Input[str]]:
"""
Name for the zone in a Global Cluster.
"""
return pulumi.get(self, "zone_name")
@zone_name.setter
def zone_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "zone_name", value)
@pulumi.input_type
class ClusterReplicationSpecRegionsConfigArgs:
def __init__(__self__, *,
region_name: pulumi.Input[str],
analytics_nodes: Optional[pulumi.Input[int]] = None,
electable_nodes: Optional[pulumi.Input[int]] = None,
priority: Optional[pulumi.Input[int]] = None,
read_only_nodes: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[str] region_name: Physical location of your MongoDB cluster. The region you choose can affect network latency for clients accessing your databases. Requires the **Atlas region name**, see the reference list for [AWS](https://docs.atlas.mongodb.com/reference/amazon-aws/), [GCP](https://docs.atlas.mongodb.com/reference/google-gcp/), [Azure](https://docs.atlas.mongodb.com/reference/microsoft-azure/).
:param pulumi.Input[int] analytics_nodes: The number of analytics nodes for Atlas to deploy to the region. Analytics nodes are useful for handling analytic data such as reporting queries from BI Connector for Atlas. Analytics nodes are read-only, and can never become the primary. If you do not specify this option, no analytics nodes are deployed to the region.
:param pulumi.Input[int] electable_nodes: Number of electable nodes for Atlas to deploy to the region. Electable nodes can become the primary and can facilitate local reads.
* The total number of electableNodes across all replication spec regions must total 3, 5, or 7.
* Specify 0 if you do not want any electable nodes in the region.
* You cannot create electable nodes in a region if `priority` is 0.
:param pulumi.Input[int] priority: Election priority of the region. For regions with only read-only nodes, set this value to 0.
* For regions where `electable_nodes` is at least 1, each region must have a priority of exactly one (1) less than the previous region. The first region must have a priority of 7. The lowest possible priority is 1.
* The priority 7 region identifies the Preferred Region of the cluster. Atlas places the primary node in the Preferred Region. Priorities 1 through 7 are exclusive - no more than one region per cluster can be assigned a given priority.
* Example: If you have three regions, their priorities would be 7, 6, and 5 respectively. If you added two more regions for supporting electable nodes, the priorities of those regions would be 4 and 3 respectively.
:param pulumi.Input[int] read_only_nodes: Number of read-only nodes for Atlas to deploy to the region. Read-only nodes can never become the primary, but can facilitate local-reads. Specify 0 if you do not want any read-only nodes in the region.
"""
pulumi.set(__self__, "region_name", region_name)
if analytics_nodes is not None:
pulumi.set(__self__, "analytics_nodes", analytics_nodes)
if electable_nodes is not None:
pulumi.set(__self__, "electable_nodes", electable_nodes)
if priority is not None:
pulumi.set(__self__, "priority", priority)
if read_only_nodes is not None:
pulumi.set(__self__, "read_only_nodes", read_only_nodes)
@property
@pulumi.getter(name="regionName")
def region_name(self) -> pulumi.Input[str]:
"""
Physical location of your MongoDB cluster. The region you choose can affect network latency for clients accessing your databases. Requires the **Atlas region name**, see the reference list for [AWS](https://docs.atlas.mongodb.com/reference/amazon-aws/), [GCP](https://docs.atlas.mongodb.com/reference/google-gcp/), [Azure](https://docs.atlas.mongodb.com/reference/microsoft-azure/).
"""
return pulumi.get(self, "region_name")
@region_name.setter
def region_name(self, value: pulumi.Input[str]):
pulumi.set(self, "region_name", value)
@property
@pulumi.getter(name="analyticsNodes")
def analytics_nodes(self) -> Optional[pulumi.Input[int]]:
"""
The number of analytics nodes for Atlas to deploy to the region. Analytics nodes are useful for handling analytic data such as reporting queries from BI Connector for Atlas. Analytics nodes are read-only, and can never become the primary. If you do not specify this option, no analytics nodes are deployed to the region.
"""
return pulumi.get(self, "analytics_nodes")
@analytics_nodes.setter
def analytics_nodes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "analytics_nodes", value)
@property
@pulumi.getter(name="electableNodes")
def electable_nodes(self) -> Optional[pulumi.Input[int]]:
"""
Number of electable nodes for Atlas to deploy to the region. Electable nodes can become the primary and can facilitate local reads.
* The total number of electableNodes across all replication spec regions must total 3, 5, or 7.
* Specify 0 if you do not want any electable nodes in the region.
* You cannot create electable nodes in a region if `priority` is 0.
"""
return pulumi.get(self, "electable_nodes")
@electable_nodes.setter
def electable_nodes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "electable_nodes", value)
@property
@pulumi.getter
def priority(self) -> Optional[pulumi.Input[int]]:
"""
Election priority of the region. For regions with only read-only nodes, set this value to 0.
* For regions where `electable_nodes` is at least 1, each region must have a priority of exactly one (1) less than the previous region. The first region must have a priority of 7. The lowest possible priority is 1.
* The priority 7 region identifies the Preferred Region of the cluster. Atlas places the primary node in the Preferred Region. Priorities 1 through 7 are exclusive - no more than one region per cluster can be assigned a given priority.
* Example: If you have three regions, their priorities would be 7, 6, and 5 respectively. If you added two more regions for supporting electable nodes, the priorities of those regions would be 4 and 3 respectively.
"""
return pulumi.get(self, "priority")
@priority.setter
def priority(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "priority", value)
@property
@pulumi.getter(name="readOnlyNodes")
def read_only_nodes(self) -> Optional[pulumi.Input[int]]:
"""
Number of read-only nodes for Atlas to deploy to the region. Read-only nodes can never become the primary, but can facilitate local-reads. Specify 0 if you do not want any read-only nodes in the region.
"""
return pulumi.get(self, "read_only_nodes")
@read_only_nodes.setter
def read_only_nodes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "read_only_nodes", value)
@pulumi.input_type
class ClusterSnapshotBackupPolicyArgs:
def __init__(__self__, *,
cluster_id: Optional[pulumi.Input[str]] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
next_snapshot: Optional[pulumi.Input[str]] = None,
policies: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterSnapshotBackupPolicyPolicyArgs']]]] = None,
reference_hour_of_day: Optional[pulumi.Input[int]] = None,
reference_minute_of_hour: Optional[pulumi.Input[int]] = None,
restore_window_days: Optional[pulumi.Input[int]] = None,
update_snapshots: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[str] cluster_id: The cluster ID.
"""
if cluster_id is not None:
pulumi.set(__self__, "cluster_id", cluster_id)
if cluster_name is not None:
pulumi.set(__self__, "cluster_name", cluster_name)
if next_snapshot is not None:
pulumi.set(__self__, "next_snapshot", next_snapshot)
if policies is not None:
pulumi.set(__self__, "policies", policies)
if reference_hour_of_day is not None:
pulumi.set(__self__, "reference_hour_of_day", reference_hour_of_day)
if reference_minute_of_hour is not None:
pulumi.set(__self__, "reference_minute_of_hour", reference_minute_of_hour)
if restore_window_days is not None:
pulumi.set(__self__, "restore_window_days", restore_window_days)
if update_snapshots is not None:
pulumi.set(__self__, "update_snapshots", update_snapshots)
@property
@pulumi.getter(name="clusterId")
def cluster_id(self) -> Optional[pulumi.Input[str]]:
"""
The cluster ID.
"""
return pulumi.get(self, "cluster_id")
@cluster_id.setter
def cluster_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_id", value)
@property
@pulumi.getter(name="clusterName")
def cluster_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "cluster_name")
@cluster_name.setter
def cluster_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_name", value)
@property
@pulumi.getter(name="nextSnapshot")
def next_snapshot(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "next_snapshot")
@next_snapshot.setter
def next_snapshot(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "next_snapshot", value)
@property
@pulumi.getter
def policies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterSnapshotBackupPolicyPolicyArgs']]]]:
return pulumi.get(self, "policies")
@policies.setter
def policies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterSnapshotBackupPolicyPolicyArgs']]]]):
pulumi.set(self, "policies", value)
@property
@pulumi.getter(name="referenceHourOfDay")
def reference_hour_of_day(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "reference_hour_of_day")
@reference_hour_of_day.setter
def reference_hour_of_day(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "reference_hour_of_day", value)
@property
@pulumi.getter(name="referenceMinuteOfHour")
def reference_minute_of_hour(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "reference_minute_of_hour")
@reference_minute_of_hour.setter
def reference_minute_of_hour(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "reference_minute_of_hour", value)
@property
@pulumi.getter(name="restoreWindowDays")
def restore_window_days(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "restore_window_days")
@restore_window_days.setter
def restore_window_days(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "restore_window_days", value)
@property
@pulumi.getter(name="updateSnapshots")
def update_snapshots(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "update_snapshots")
@update_snapshots.setter
def update_snapshots(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "update_snapshots", value)
@pulumi.input_type
class ClusterSnapshotBackupPolicyPolicyArgs:
def __init__(__self__, *,
id: Optional[pulumi.Input[str]] = None,
policy_items: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterSnapshotBackupPolicyPolicyPolicyItemArgs']]]] = None):
"""
:param pulumi.Input[str] id: Unique identifer of the replication document for a zone in a Global Cluster.
"""
if id is not None:
pulumi.set(__self__, "id", id)
if policy_items is not None:
pulumi.set(__self__, "policy_items", policy_items)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Unique identifer of the replication document for a zone in a Global Cluster.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="policyItems")
def policy_items(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterSnapshotBackupPolicyPolicyPolicyItemArgs']]]]:
return pulumi.get(self, "policy_items")
@policy_items.setter
def policy_items(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterSnapshotBackupPolicyPolicyPolicyItemArgs']]]]):
pulumi.set(self, "policy_items", value)
@pulumi.input_type
class ClusterSnapshotBackupPolicyPolicyPolicyItemArgs:
def __init__(__self__, *,
frequency_interval: Optional[pulumi.Input[int]] = None,
frequency_type: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
retention_unit: Optional[pulumi.Input[str]] = None,
retention_value: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[str] id: Unique identifer of the replication document for a zone in a Global Cluster.
"""
if frequency_interval is not None:
pulumi.set(__self__, "frequency_interval", frequency_interval)
if frequency_type is not None:
pulumi.set(__self__, "frequency_type", frequency_type)
if id is not None:
pulumi.set(__self__, "id", id)
if retention_unit is not None:
pulumi.set(__self__, "retention_unit", retention_unit)
if retention_value is not None:
pulumi.set(__self__, "retention_value", retention_value)
@property
@pulumi.getter(name="frequencyInterval")
def frequency_interval(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "frequency_interval")
@frequency_interval.setter
def frequency_interval(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "frequency_interval", value)
@property
@pulumi.getter(name="frequencyType")
def frequency_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "frequency_type")
@frequency_type.setter
def frequency_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "frequency_type", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Unique identifer of the replication document for a zone in a Global Cluster.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="retentionUnit")
def retention_unit(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "retention_unit")
@retention_unit.setter
def retention_unit(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "retention_unit", value)
@property
@pulumi.getter(name="retentionValue")
def retention_value(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "retention_value")
@retention_value.setter
def retention_value(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "retention_value", value)
@pulumi.input_type
class CustomDbRoleActionArgs:
def __init__(__self__, *,
action: pulumi.Input[str],
resources: pulumi.Input[Sequence[pulumi.Input['CustomDbRoleActionResourceArgs']]]):
"""
:param pulumi.Input[str] action: Name of the privilege action. For a complete list of actions available in the Atlas API, see [Custom Role Actions](https://docs.atlas.mongodb.com/reference/api/custom-role-actions)
> **Note**: The privilege actions available to the Custom Roles API resource represent a subset of the privilege actions available in the Atlas Custom Roles UI.
:param pulumi.Input[Sequence[pulumi.Input['CustomDbRoleActionResourceArgs']]] resources: Contains information on where the action is granted. Each object in the array either indicates a database and collection on which the action is granted, or indicates that the action is granted on the cluster resource.
"""
pulumi.set(__self__, "action", action)
pulumi.set(__self__, "resources", resources)
@property
@pulumi.getter
def action(self) -> pulumi.Input[str]:
"""
Name of the privilege action. For a complete list of actions available in the Atlas API, see [Custom Role Actions](https://docs.atlas.mongodb.com/reference/api/custom-role-actions)
> **Note**: The privilege actions available to the Custom Roles API resource represent a subset of the privilege actions available in the Atlas Custom Roles UI.
"""
return pulumi.get(self, "action")
@action.setter
def action(self, value: pulumi.Input[str]):
pulumi.set(self, "action", value)
@property
@pulumi.getter
def resources(self) -> pulumi.Input[Sequence[pulumi.Input['CustomDbRoleActionResourceArgs']]]:
"""
Contains information on where the action is granted. Each object in the array either indicates a database and collection on which the action is granted, or indicates that the action is granted on the cluster resource.
"""
return pulumi.get(self, "resources")
@resources.setter
def resources(self, value: pulumi.Input[Sequence[pulumi.Input['CustomDbRoleActionResourceArgs']]]):
pulumi.set(self, "resources", value)
@pulumi.input_type
class CustomDbRoleActionResourceArgs:
def __init__(__self__, *,
cluster: Optional[pulumi.Input[bool]] = None,
collection_name: Optional[pulumi.Input[str]] = None,
database_name: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] database_name: Database on which the inherited role is granted.
"""
if cluster is not None:
pulumi.set(__self__, "cluster", cluster)
if collection_name is not None:
pulumi.set(__self__, "collection_name", collection_name)
if database_name is not None:
pulumi.set(__self__, "database_name", database_name)
@property
@pulumi.getter
def cluster(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "cluster")
@cluster.setter
def cluster(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "cluster", value)
@property
@pulumi.getter(name="collectionName")
def collection_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "collection_name")
@collection_name.setter
def collection_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "collection_name", value)
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> Optional[pulumi.Input[str]]:
"""
Database on which the inherited role is granted.
"""
return pulumi.get(self, "database_name")
@database_name.setter
def database_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "database_name", value)
@pulumi.input_type
class CustomDbRoleInheritedRoleArgs:
def __init__(__self__, *,
database_name: pulumi.Input[str],
role_name: pulumi.Input[str]):
"""
:param pulumi.Input[str] database_name: Database on which the inherited role is granted.
:param pulumi.Input[str] role_name: Name of the inherited role. This can either be another custom role or a built-in role.
"""
pulumi.set(__self__, "database_name", database_name)
pulumi.set(__self__, "role_name", role_name)
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> pulumi.Input[str]:
"""
Database on which the inherited role is granted.
"""
return pulumi.get(self, "database_name")
@database_name.setter
def database_name(self, value: pulumi.Input[str]):
pulumi.set(self, "database_name", value)
@property
@pulumi.getter(name="roleName")
def role_name(self) -> pulumi.Input[str]:
"""
Name of the inherited role. This can either be another custom role or a built-in role.
"""
return pulumi.get(self, "role_name")
@role_name.setter
def role_name(self, value: pulumi.Input[str]):
pulumi.set(self, "role_name", value)
@pulumi.input_type
class DataLakeAwsArgs:
def __init__(__self__, *,
role_id: pulumi.Input[str],
test_s3_bucket: pulumi.Input[str],
external_id: Optional[pulumi.Input[str]] = None,
iam_assumed_role_arn: Optional[pulumi.Input[str]] = None,
iam_user_arn: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "role_id", role_id)
pulumi.set(__self__, "test_s3_bucket", test_s3_bucket)
if external_id is not None:
pulumi.set(__self__, "external_id", external_id)
if iam_assumed_role_arn is not None:
pulumi.set(__self__, "iam_assumed_role_arn", iam_assumed_role_arn)
if iam_user_arn is not None:
pulumi.set(__self__, "iam_user_arn", iam_user_arn)
@property
@pulumi.getter(name="roleId")
def role_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "role_id")
@role_id.setter
def role_id(self, value: pulumi.Input[str]):
pulumi.set(self, "role_id", value)
@property
@pulumi.getter(name="testS3Bucket")
def test_s3_bucket(self) -> pulumi.Input[str]:
return pulumi.get(self, "test_s3_bucket")
@test_s3_bucket.setter
def test_s3_bucket(self, value: pulumi.Input[str]):
pulumi.set(self, "test_s3_bucket", value)
@property
@pulumi.getter(name="externalId")
def external_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "external_id")
@external_id.setter
def external_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "external_id", value)
@property
@pulumi.getter(name="iamAssumedRoleArn")
def iam_assumed_role_arn(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "iam_assumed_role_arn")
@iam_assumed_role_arn.setter
def iam_assumed_role_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "iam_assumed_role_arn", value)
@property
@pulumi.getter(name="iamUserArn")
def iam_user_arn(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "iam_user_arn")
@iam_user_arn.setter
def iam_user_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "iam_user_arn", value)
@pulumi.input_type
class DataLakeDataProcessRegionArgs:
def __init__(__self__, *,
cloud_provider: pulumi.Input[str],
region: pulumi.Input[str]):
pulumi.set(__self__, "cloud_provider", cloud_provider)
pulumi.set(__self__, "region", region)
@property
@pulumi.getter(name="cloudProvider")
def cloud_provider(self) -> pulumi.Input[str]:
return pulumi.get(self, "cloud_provider")
@cloud_provider.setter
def cloud_provider(self, value: pulumi.Input[str]):
pulumi.set(self, "cloud_provider", value)
@property
@pulumi.getter
def region(self) -> pulumi.Input[str]:
return pulumi.get(self, "region")
@region.setter
def region(self, value: pulumi.Input[str]):
pulumi.set(self, "region", value)
@pulumi.input_type
class DataLakeStorageDatabaseArgs:
def __init__(__self__, *,
collections: Optional[pulumi.Input[Sequence[pulumi.Input['DataLakeStorageDatabaseCollectionArgs']]]] = None,
max_wildcard_collections: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
views: Optional[pulumi.Input[Sequence[pulumi.Input['DataLakeStorageDatabaseViewArgs']]]] = None):
"""
:param pulumi.Input[str] name: Name of the Atlas Data Lake.
"""
if collections is not None:
pulumi.set(__self__, "collections", collections)
if max_wildcard_collections is not None:
pulumi.set(__self__, "max_wildcard_collections", max_wildcard_collections)
if name is not None:
pulumi.set(__self__, "name", name)
if views is not None:
pulumi.set(__self__, "views", views)
@property
@pulumi.getter
def collections(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DataLakeStorageDatabaseCollectionArgs']]]]:
return pulumi.get(self, "collections")
@collections.setter
def collections(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DataLakeStorageDatabaseCollectionArgs']]]]):
pulumi.set(self, "collections", value)
@property
@pulumi.getter(name="maxWildcardCollections")
def max_wildcard_collections(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "max_wildcard_collections")
@max_wildcard_collections.setter
def max_wildcard_collections(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_wildcard_collections", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the Atlas Data Lake.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def views(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DataLakeStorageDatabaseViewArgs']]]]:
return pulumi.get(self, "views")
@views.setter
def views(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DataLakeStorageDatabaseViewArgs']]]]):
pulumi.set(self, "views", value)
@pulumi.input_type
class DataLakeStorageDatabaseCollectionArgs:
def __init__(__self__, *,
data_sources: Optional[pulumi.Input[Sequence[pulumi.Input['DataLakeStorageDatabaseCollectionDataSourceArgs']]]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] name: Name of the Atlas Data Lake.
"""
if data_sources is not None:
pulumi.set(__self__, "data_sources", data_sources)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="dataSources")
def data_sources(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DataLakeStorageDatabaseCollectionDataSourceArgs']]]]:
return pulumi.get(self, "data_sources")
@data_sources.setter
def data_sources(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DataLakeStorageDatabaseCollectionDataSourceArgs']]]]):
pulumi.set(self, "data_sources", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the Atlas Data Lake.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class DataLakeStorageDatabaseCollectionDataSourceArgs:
def __init__(__self__, *,
default_format: Optional[pulumi.Input[str]] = None,
path: Optional[pulumi.Input[str]] = None,
store_name: Optional[pulumi.Input[str]] = None):
if default_format is not None:
pulumi.set(__self__, "default_format", default_format)
if path is not None:
pulumi.set(__self__, "path", path)
if store_name is not None:
pulumi.set(__self__, "store_name", store_name)
@property
@pulumi.getter(name="defaultFormat")
def default_format(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "default_format")
@default_format.setter
def default_format(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "default_format", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@property
@pulumi.getter(name="storeName")
def store_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "store_name")
@store_name.setter
def store_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "store_name", value)
@pulumi.input_type
class DataLakeStorageDatabaseViewArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
pipeline: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] name: Name of the Atlas Data Lake.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if pipeline is not None:
pulumi.set(__self__, "pipeline", pipeline)
if source is not None:
pulumi.set(__self__, "source", source)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the Atlas Data Lake.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def pipeline(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "pipeline")
@pipeline.setter
def pipeline(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pipeline", value)
@property
@pulumi.getter
def source(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "source")
@source.setter
def source(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source", value)
@pulumi.input_type
class DataLakeStorageStoreArgs:
def __init__(__self__, *,
additional_storage_classes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
bucket: Optional[pulumi.Input[str]] = None,
delimiter: Optional[pulumi.Input[str]] = None,
include_tags: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
prefix: Optional[pulumi.Input[str]] = None,
provider: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] name: Name of the Atlas Data Lake.
"""
if additional_storage_classes is not None:
pulumi.set(__self__, "additional_storage_classes", additional_storage_classes)
if bucket is not None:
pulumi.set(__self__, "bucket", bucket)
if delimiter is not None:
pulumi.set(__self__, "delimiter", delimiter)
if include_tags is not None:
pulumi.set(__self__, "include_tags", include_tags)
if name is not None:
pulumi.set(__self__, "name", name)
if prefix is not None:
pulumi.set(__self__, "prefix", prefix)
if provider is not None:
pulumi.set(__self__, "provider", provider)
if region is not None:
pulumi.set(__self__, "region", region)
@property
@pulumi.getter(name="additionalStorageClasses")
def additional_storage_classes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "additional_storage_classes")
@additional_storage_classes.setter
def additional_storage_classes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "additional_storage_classes", value)
@property
@pulumi.getter
def bucket(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "bucket")
@bucket.setter
def bucket(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "bucket", value)
@property
@pulumi.getter
def delimiter(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "delimiter")
@delimiter.setter
def delimiter(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "delimiter", value)
@property
@pulumi.getter(name="includeTags")
def include_tags(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "include_tags")
@include_tags.setter
def include_tags(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "include_tags", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the Atlas Data Lake.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def prefix(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "prefix")
@prefix.setter
def prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "prefix", value)
@property
@pulumi.getter
def provider(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "provider")
@provider.setter
def provider(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provider", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
@pulumi.input_type
class DatabaseUserLabelArgs:
def __init__(__self__, *,
key: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] key: The key that you want to write.
:param pulumi.Input[str] value: The value that you want to write.
"""
if key is not None:
pulumi.set(__self__, "key", key)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
The key that you want to write.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
The value that you want to write.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class DatabaseUserRoleArgs:
def __init__(__self__, *,
collection_name: Optional[pulumi.Input[str]] = None,
database_name: Optional[pulumi.Input[str]] = None,
role_name: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] collection_name: Collection for which the role applies. You can specify a collection for the `read` and `readWrite` roles. If you do not specify a collection for `read` and `readWrite`, the role applies to all collections in the database (excluding some collections in the `system`. database).
:param pulumi.Input[str] database_name: Database on which the user has the specified role. A role on the `admin` database can include privileges that apply to the other databases.
:param pulumi.Input[str] role_name: Name of the role to grant. See [Create a Database User](https://docs.atlas.mongodb.com/reference/api/database-users-create-a-user/) `roles.roleName` for valid values and restrictions.
"""
if collection_name is not None:
pulumi.set(__self__, "collection_name", collection_name)
if database_name is not None:
pulumi.set(__self__, "database_name", database_name)
if role_name is not None:
pulumi.set(__self__, "role_name", role_name)
@property
@pulumi.getter(name="collectionName")
def collection_name(self) -> Optional[pulumi.Input[str]]:
"""
Collection for which the role applies. You can specify a collection for the `read` and `readWrite` roles. If you do not specify a collection for `read` and `readWrite`, the role applies to all collections in the database (excluding some collections in the `system`. database).
"""
return pulumi.get(self, "collection_name")
@collection_name.setter
def collection_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "collection_name", value)
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> Optional[pulumi.Input[str]]:
"""
Database on which the user has the specified role. A role on the `admin` database can include privileges that apply to the other databases.
"""
return pulumi.get(self, "database_name")
@database_name.setter
def database_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "database_name", value)
@property
@pulumi.getter(name="roleName")
def role_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the role to grant. See [Create a Database User](https://docs.atlas.mongodb.com/reference/api/database-users-create-a-user/) `roles.roleName` for valid values and restrictions.
"""
return pulumi.get(self, "role_name")
@role_name.setter
def role_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "role_name", value)
@pulumi.input_type
class DatabaseUserScopeArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] name: Name of the cluster or Atlas Data Lake that the user has access to.
:param pulumi.Input[str] type: Type of resource that the user has access to. Valid values are: `CLUSTER` and `DATA_LAKE`
"""
if name is not None:
pulumi.set(__self__, "name", name)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the cluster or Atlas Data Lake that the user has access to.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
Type of resource that the user has access to. Valid values are: `CLUSTER` and `DATA_LAKE`
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class EncryptionAtRestAwsKmsConfigArgs:
def __init__(__self__, *,
access_key_id: Optional[pulumi.Input[str]] = None,
customer_master_key_id: Optional[pulumi.Input[str]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
region: Optional[pulumi.Input[str]] = None,
role_id: Optional[pulumi.Input[str]] = None,
secret_access_key: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] customer_master_key_id: The AWS customer master key used to encrypt and decrypt the MongoDB master keys.
:param pulumi.Input[bool] enabled: Specifies whether Encryption at Rest is enabled for an Atlas project. To disable Encryption at Rest, pass only this parameter with a value of false. When you disable Encryption at Rest, Atlas also removes the configuration details.
:param pulumi.Input[str] region: The AWS region in which the AWS customer master key exists: CA_CENTRAL_1, US_EAST_1, US_EAST_2, US_WEST_1, US_WEST_2, SA_EAST_1
:param pulumi.Input[str] role_id: ID of an AWS IAM role authorized to manage an AWS customer master key. To find the ID for an existing IAM role check the `role_id` attribute of the `CloudProviderAccess` resource.
"""
if access_key_id is not None:
pulumi.set(__self__, "access_key_id", access_key_id)
if customer_master_key_id is not None:
pulumi.set(__self__, "customer_master_key_id", customer_master_key_id)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if region is not None:
pulumi.set(__self__, "region", region)
if role_id is not None:
pulumi.set(__self__, "role_id", role_id)
if secret_access_key is not None:
pulumi.set(__self__, "secret_access_key", secret_access_key)
@property
@pulumi.getter(name="accessKeyId")
def access_key_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "access_key_id")
@access_key_id.setter
def access_key_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "access_key_id", value)
@property
@pulumi.getter(name="customerMasterKeyId")
def customer_master_key_id(self) -> Optional[pulumi.Input[str]]:
"""
The AWS customer master key used to encrypt and decrypt the MongoDB master keys.
"""
return pulumi.get(self, "customer_master_key_id")
@customer_master_key_id.setter
def customer_master_key_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "customer_master_key_id", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether Encryption at Rest is enabled for an Atlas project. To disable Encryption at Rest, pass only this parameter with a value of false. When you disable Encryption at Rest, Atlas also removes the configuration details.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
"""
The AWS region in which the AWS customer master key exists: CA_CENTRAL_1, US_EAST_1, US_EAST_2, US_WEST_1, US_WEST_2, SA_EAST_1
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
@property
@pulumi.getter(name="roleId")
def role_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of an AWS IAM role authorized to manage an AWS customer master key. To find the ID for an existing IAM role check the `role_id` attribute of the `CloudProviderAccess` resource.
"""
return pulumi.get(self, "role_id")
@role_id.setter
def role_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "role_id", value)
@property
@pulumi.getter(name="secretAccessKey")
def secret_access_key(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "secret_access_key")
@secret_access_key.setter
def secret_access_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secret_access_key", value)
@pulumi.input_type
class EncryptionAtRestAzureKeyVaultConfigArgs:
def __init__(__self__, *,
enabled: pulumi.Input[bool],
azure_environment: Optional[pulumi.Input[str]] = None,
client_id: Optional[pulumi.Input[str]] = None,
key_identifier: Optional[pulumi.Input[str]] = None,
key_vault_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
secret: Optional[pulumi.Input[str]] = None,
subscription_id: Optional[pulumi.Input[str]] = None,
tenant_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[bool] enabled: Specifies whether Encryption at Rest is enabled for an Atlas project. To disable Encryption at Rest, pass only this parameter with a value of false. When you disable Encryption at Rest, Atlas also removes the configuration details.
:param pulumi.Input[str] azure_environment: The Azure environment where the Azure account credentials reside. Valid values are the following: AZURE, AZURE_CHINA, AZURE_GERMANY
:param pulumi.Input[str] client_id: The client ID, also known as the application ID, for an Azure application associated with the Azure AD tenant.
:param pulumi.Input[str] key_identifier: The unique identifier of a key in an Azure Key Vault.
:param pulumi.Input[str] key_vault_name: The name of an Azure Key Vault containing your key.
:param pulumi.Input[str] resource_group_name: The name of the Azure Resource group that contains an Azure Key Vault.
:param pulumi.Input[str] secret: The secret associated with the Azure Key Vault specified by azureKeyVault.tenantID.
:param pulumi.Input[str] subscription_id: The unique identifier associated with an Azure subscription.
:param pulumi.Input[str] tenant_id: The unique identifier for an Azure AD tenant within an Azure subscription.
"""
pulumi.set(__self__, "enabled", enabled)
if azure_environment is not None:
pulumi.set(__self__, "azure_environment", azure_environment)
if client_id is not None:
pulumi.set(__self__, "client_id", client_id)
if key_identifier is not None:
pulumi.set(__self__, "key_identifier", key_identifier)
if key_vault_name is not None:
pulumi.set(__self__, "key_vault_name", key_vault_name)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if secret is not None:
pulumi.set(__self__, "secret", secret)
if subscription_id is not None:
pulumi.set(__self__, "subscription_id", subscription_id)
if tenant_id is not None:
pulumi.set(__self__, "tenant_id", tenant_id)
@property
@pulumi.getter
def enabled(self) -> pulumi.Input[bool]:
"""
Specifies whether Encryption at Rest is enabled for an Atlas project. To disable Encryption at Rest, pass only this parameter with a value of false. When you disable Encryption at Rest, Atlas also removes the configuration details.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="azureEnvironment")
def azure_environment(self) -> Optional[pulumi.Input[str]]:
"""
The Azure environment where the Azure account credentials reside. Valid values are the following: AZURE, AZURE_CHINA, AZURE_GERMANY
"""
return pulumi.get(self, "azure_environment")
@azure_environment.setter
def azure_environment(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "azure_environment", value)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> Optional[pulumi.Input[str]]:
"""
The client ID, also known as the application ID, for an Azure application associated with the Azure AD tenant.
"""
return pulumi.get(self, "client_id")
@client_id.setter
def client_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_id", value)
@property
@pulumi.getter(name="keyIdentifier")
def key_identifier(self) -> Optional[pulumi.Input[str]]:
"""
The unique identifier of a key in an Azure Key Vault.
"""
return pulumi.get(self, "key_identifier")
@key_identifier.setter
def key_identifier(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key_identifier", value)
@property
@pulumi.getter(name="keyVaultName")
def key_vault_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of an Azure Key Vault containing your key.
"""
return pulumi.get(self, "key_vault_name")
@key_vault_name.setter
def key_vault_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key_vault_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Azure Resource group that contains an Azure Key Vault.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def secret(self) -> Optional[pulumi.Input[str]]:
"""
The secret associated with the Azure Key Vault specified by azureKeyVault.tenantID.
"""
return pulumi.get(self, "secret")
@secret.setter
def secret(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secret", value)
@property
@pulumi.getter(name="subscriptionId")
def subscription_id(self) -> Optional[pulumi.Input[str]]:
"""
The unique identifier associated with an Azure subscription.
"""
return pulumi.get(self, "subscription_id")
@subscription_id.setter
def subscription_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subscription_id", value)
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> Optional[pulumi.Input[str]]:
"""
The unique identifier for an Azure AD tenant within an Azure subscription.
"""
return pulumi.get(self, "tenant_id")
@tenant_id.setter
def tenant_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tenant_id", value)
@pulumi.input_type
class EncryptionAtRestGoogleCloudKmsConfigArgs:
def __init__(__self__, *,
enabled: Optional[pulumi.Input[bool]] = None,
key_version_resource_id: Optional[pulumi.Input[str]] = None,
service_account_key: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[bool] enabled: Specifies whether Encryption at Rest is enabled for an Atlas project. To disable Encryption at Rest, pass only this parameter with a value of false. When you disable Encryption at Rest, Atlas also removes the configuration details.
:param pulumi.Input[str] key_version_resource_id: The Key Version Resource ID from your GCP account.
:param pulumi.Input[str] service_account_key: String-formatted JSON object containing GCP KMS credentials from your GCP account.
"""
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if key_version_resource_id is not None:
pulumi.set(__self__, "key_version_resource_id", key_version_resource_id)
if service_account_key is not None:
pulumi.set(__self__, "service_account_key", service_account_key)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether Encryption at Rest is enabled for an Atlas project. To disable Encryption at Rest, pass only this parameter with a value of false. When you disable Encryption at Rest, Atlas also removes the configuration details.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="keyVersionResourceId")
def key_version_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
The Key Version Resource ID from your GCP account.
"""
return pulumi.get(self, "key_version_resource_id")
@key_version_resource_id.setter
def key_version_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key_version_resource_id", value)
@property
@pulumi.getter(name="serviceAccountKey")
def service_account_key(self) -> Optional[pulumi.Input[str]]:
"""
String-formatted JSON object containing GCP KMS credentials from your GCP account.
"""
return pulumi.get(self, "service_account_key")
@service_account_key.setter
def service_account_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_account_key", value)
@pulumi.input_type
class EventTriggerEventProcessorsArgs:
def __init__(__self__, *,
aws_eventbridge: Optional[pulumi.Input['EventTriggerEventProcessorsAwsEventbridgeArgs']] = None):
if aws_eventbridge is not None:
pulumi.set(__self__, "aws_eventbridge", aws_eventbridge)
@property
@pulumi.getter(name="awsEventbridge")
def aws_eventbridge(self) -> Optional[pulumi.Input['EventTriggerEventProcessorsAwsEventbridgeArgs']]:
return pulumi.get(self, "aws_eventbridge")
@aws_eventbridge.setter
def aws_eventbridge(self, value: Optional[pulumi.Input['EventTriggerEventProcessorsAwsEventbridgeArgs']]):
pulumi.set(self, "aws_eventbridge", value)
@pulumi.input_type
class EventTriggerEventProcessorsAwsEventbridgeArgs:
def __init__(__self__, *,
config_account_id: Optional[pulumi.Input[str]] = None,
config_region: Optional[pulumi.Input[str]] = None):
if config_account_id is not None:
pulumi.set(__self__, "config_account_id", config_account_id)
if config_region is not None:
pulumi.set(__self__, "config_region", config_region)
@property
@pulumi.getter(name="configAccountId")
def config_account_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "config_account_id")
@config_account_id.setter
def config_account_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "config_account_id", value)
@property
@pulumi.getter(name="configRegion")
def config_region(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "config_region")
@config_region.setter
def config_region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "config_region", value)
@pulumi.input_type
class GlobalClusterConfigCustomZoneMappingArgs:
def __init__(__self__, *,
location: Optional[pulumi.Input[str]] = None,
zone: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] location: The ISO location code to which you want to map a zone in your Global Cluster. You can find a list of all supported location codes [here](https://cloud.mongodb.com/static/atlas/country_iso_codes.txt).
:param pulumi.Input[str] zone: The name of the zone in your Global Cluster that you want to map to location.
"""
if location is not None:
pulumi.set(__self__, "location", location)
if zone is not None:
pulumi.set(__self__, "zone", zone)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The ISO location code to which you want to map a zone in your Global Cluster. You can find a list of all supported location codes [here](https://cloud.mongodb.com/static/atlas/country_iso_codes.txt).
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def zone(self) -> Optional[pulumi.Input[str]]:
"""
The name of the zone in your Global Cluster that you want to map to location.
"""
return pulumi.get(self, "zone")
@zone.setter
def zone(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "zone", value)
@pulumi.input_type
class GlobalClusterConfigManagedNamespaceArgs:
def __init__(__self__, *,
collection: pulumi.Input[str],
custom_shard_key: pulumi.Input[str],
db: pulumi.Input[str]):
"""
:param pulumi.Input[str] collection: The name of the collection associated with the managed namespace.
:param pulumi.Input[str] custom_shard_key: The custom shard key for the collection. Global Clusters require a compound shard key consisting of a location field and a user-selected second key, the custom shard key.
:param pulumi.Input[str] db: The name of the database containing the collection.
"""
pulumi.set(__self__, "collection", collection)
pulumi.set(__self__, "custom_shard_key", custom_shard_key)
pulumi.set(__self__, "db", db)
@property
@pulumi.getter
def collection(self) -> pulumi.Input[str]:
"""
The name of the collection associated with the managed namespace.
"""
return pulumi.get(self, "collection")
@collection.setter
def collection(self, value: pulumi.Input[str]):
pulumi.set(self, "collection", value)
@property
@pulumi.getter(name="customShardKey")
def custom_shard_key(self) -> pulumi.Input[str]:
"""
The custom shard key for the collection. Global Clusters require a compound shard key consisting of a location field and a user-selected second key, the custom shard key.
"""
return pulumi.get(self, "custom_shard_key")
@custom_shard_key.setter
def custom_shard_key(self, value: pulumi.Input[str]):
pulumi.set(self, "custom_shard_key", value)
@property
@pulumi.getter
def db(self) -> pulumi.Input[str]:
"""
The name of the database containing the collection.
"""
return pulumi.get(self, "db")
@db.setter
def db(self, value: pulumi.Input[str]):
pulumi.set(self, "db", value)
@pulumi.input_type
class LdapConfigurationUserToDnMappingArgs:
def __init__(__self__, *,
ldap_query: Optional[pulumi.Input[str]] = None,
match: Optional[pulumi.Input[str]] = None,
substitution: Optional[pulumi.Input[str]] = None):
if ldap_query is not None:
pulumi.set(__self__, "ldap_query", ldap_query)
if match is not None:
pulumi.set(__self__, "match", match)
if substitution is not None:
pulumi.set(__self__, "substitution", substitution)
@property
@pulumi.getter(name="ldapQuery")
def ldap_query(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "ldap_query")
@ldap_query.setter
def ldap_query(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ldap_query", value)
@property
@pulumi.getter
def match(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "match")
@match.setter
def match(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "match", value)
@property
@pulumi.getter
def substitution(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "substitution")
@substitution.setter
def substitution(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "substitution", value)
@pulumi.input_type
class LdapVerifyLinkArgs:
def __init__(__self__, *,
href: Optional[pulumi.Input[str]] = None,
rel: Optional[pulumi.Input[str]] = None):
if href is not None:
pulumi.set(__self__, "href", href)
if rel is not None:
pulumi.set(__self__, "rel", rel)
@property
@pulumi.getter
def href(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "href")
@href.setter
def href(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "href", value)
@property
@pulumi.getter
def rel(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "rel")
@rel.setter
def rel(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "rel", value)
@pulumi.input_type
class LdapVerifyValidationArgs:
def __init__(__self__, *,
status: Optional[pulumi.Input[str]] = None,
validation_type: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] status: The current status of the LDAP over TLS/SSL configuration. One of the following values: `PENDING`, `SUCCESS`, and `FAILED`.
"""
if status is not None:
pulumi.set(__self__, "status", status)
if validation_type is not None:
pulumi.set(__self__, "validation_type", validation_type)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
The current status of the LDAP over TLS/SSL configuration. One of the following values: `PENDING`, `SUCCESS`, and `FAILED`.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@property
@pulumi.getter(name="validationType")
def validation_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "validation_type")
@validation_type.setter
def validation_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "validation_type", value)
@pulumi.input_type
class OnlineArchiveCriteriaArgs:
def __init__(__self__, *,
type: pulumi.Input[str],
date_field: Optional[pulumi.Input[str]] = None,
date_format: Optional[pulumi.Input[str]] = None,
expire_after_days: Optional[pulumi.Input[int]] = None,
query: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "type", type)
if date_field is not None:
pulumi.set(__self__, "date_field", date_field)
if date_format is not None:
pulumi.set(__self__, "date_format", date_format)
if expire_after_days is not None:
pulumi.set(__self__, "expire_after_days", expire_after_days)
if query is not None:
pulumi.set(__self__, "query", query)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="dateField")
def date_field(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "date_field")
@date_field.setter
def date_field(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "date_field", value)
@property
@pulumi.getter(name="dateFormat")
def date_format(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "date_format")
@date_format.setter
def date_format(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "date_format", value)
@property
@pulumi.getter(name="expireAfterDays")
def expire_after_days(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "expire_after_days")
@expire_after_days.setter
def expire_after_days(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "expire_after_days", value)
@property
@pulumi.getter
def query(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "query")
@query.setter
def query(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "query", value)
@pulumi.input_type
class OnlineArchivePartitionFieldArgs:
def __init__(__self__, *,
field_name: pulumi.Input[str],
order: pulumi.Input[int],
field_type: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "field_name", field_name)
pulumi.set(__self__, "order", order)
if field_type is not None:
pulumi.set(__self__, "field_type", field_type)
@property
@pulumi.getter(name="fieldName")
def field_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "field_name")
@field_name.setter
def field_name(self, value: pulumi.Input[str]):
pulumi.set(self, "field_name", value)
@property
@pulumi.getter
def order(self) -> pulumi.Input[int]:
return pulumi.get(self, "order")
@order.setter
def order(self, value: pulumi.Input[int]):
pulumi.set(self, "order", value)
@property
@pulumi.getter(name="fieldType")
def field_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "field_type")
@field_type.setter
def field_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "field_type", value)
@pulumi.input_type
class ProjectTeamArgs:
def __init__(__self__, *,
role_names: pulumi.Input[Sequence[pulumi.Input[str]]],
team_id: pulumi.Input[str]):
"""
:param pulumi.Input[Sequence[pulumi.Input[str]]] role_names: Each string in the array represents a project role you want to assign to the team. Every user associated with the team inherits these roles. You must specify an array even if you are only associating a single role with the team.
The following are valid roles:
* `GROUP_OWNER`
* `GROUP_READ_ONLY`
* `GROUP_DATA_ACCESS_ADMIN`
* `GROUP_DATA_ACCESS_READ_WRITE`
* `GROUP_DATA_ACCESS_READ_ONLY`
* `GROUP_CLUSTER_MANAGER`
:param pulumi.Input[str] team_id: The unique identifier of the team you want to associate with the project. The team and project must share the same parent organization.
"""
pulumi.set(__self__, "role_names", role_names)
pulumi.set(__self__, "team_id", team_id)
@property
@pulumi.getter(name="roleNames")
def role_names(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
Each string in the array represents a project role you want to assign to the team. Every user associated with the team inherits these roles. You must specify an array even if you are only associating a single role with the team.
The following are valid roles:
* `GROUP_OWNER`
* `GROUP_READ_ONLY`
* `GROUP_DATA_ACCESS_ADMIN`
* `GROUP_DATA_ACCESS_READ_WRITE`
* `GROUP_DATA_ACCESS_READ_ONLY`
* `GROUP_CLUSTER_MANAGER`
"""
return pulumi.get(self, "role_names")
@role_names.setter
def role_names(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "role_names", value)
@property
@pulumi.getter(name="teamId")
def team_id(self) -> pulumi.Input[str]:
"""
The unique identifier of the team you want to associate with the project. The team and project must share the same parent organization.
"""
return pulumi.get(self, "team_id")
@team_id.setter
def team_id(self, value: pulumi.Input[str]):
pulumi.set(self, "team_id", value)
@pulumi.input_type
class X509AuthenticationDatabaseUserCertificateArgs:
def __init__(__self__, *,
created_at: Optional[pulumi.Input[str]] = None,
group_id: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[int]] = None,
not_after: Optional[pulumi.Input[str]] = None,
subject: Optional[pulumi.Input[str]] = None):
if created_at is not None:
pulumi.set(__self__, "created_at", created_at)
if group_id is not None:
pulumi.set(__self__, "group_id", group_id)
if id is not None:
pulumi.set(__self__, "id", id)
if not_after is not None:
pulumi.set(__self__, "not_after", not_after)
if subject is not None:
pulumi.set(__self__, "subject", subject)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "created_at")
@created_at.setter
def created_at(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "created_at", value)
@property
@pulumi.getter(name="groupId")
def group_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "group_id")
@group_id.setter
def group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_id", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="notAfter")
def not_after(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "not_after")
@not_after.setter
def not_after(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "not_after", value)
@property
@pulumi.getter
def subject(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "subject")
@subject.setter
def subject(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subject", value)
@pulumi.input_type
class GetCustomDbRoleInheritedRoleArgs:
def __init__(__self__, *,
database_name: str,
role_name: str):
"""
:param str role_name: Name of the custom role.
"""
pulumi.set(__self__, "database_name", database_name)
pulumi.set(__self__, "role_name", role_name)
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> str:
return pulumi.get(self, "database_name")
@database_name.setter
def database_name(self, value: str):
pulumi.set(self, "database_name", value)
@property
@pulumi.getter(name="roleName")
def role_name(self) -> str:
"""
Name of the custom role.
"""
return pulumi.get(self, "role_name")
@role_name.setter
def role_name(self, value: str):
pulumi.set(self, "role_name", value)
@pulumi.input_type
class GetGlobalClusterConfigManagedNamespaceArgs:
def __init__(__self__, *,
collection: str,
custom_shard_key: str,
db: str):
"""
:param str collection: (Required) The name of the collection associated with the managed namespace.
:param str custom_shard_key: (Required) The custom shard key for the collection. Global Clusters require a compound shard key consisting of a location field and a user-selected second key, the custom shard key.
:param str db: (Required) The name of the database containing the collection.
"""
pulumi.set(__self__, "collection", collection)
pulumi.set(__self__, "custom_shard_key", custom_shard_key)
pulumi.set(__self__, "db", db)
@property
@pulumi.getter
def collection(self) -> str:
"""
(Required) The name of the collection associated with the managed namespace.
"""
return pulumi.get(self, "collection")
@collection.setter
def collection(self, value: str):
pulumi.set(self, "collection", value)
@property
@pulumi.getter(name="customShardKey")
def custom_shard_key(self) -> str:
"""
(Required) The custom shard key for the collection. Global Clusters require a compound shard key consisting of a location field and a user-selected second key, the custom shard key.
"""
return pulumi.get(self, "custom_shard_key")
@custom_shard_key.setter
def custom_shard_key(self, value: str):
pulumi.set(self, "custom_shard_key", value)
@property
@pulumi.getter
def db(self) -> str:
"""
(Required) The name of the database containing the collection.
"""
return pulumi.get(self, "db")
@db.setter
def db(self, value: str):
pulumi.set(self, "db", value)
| StarcoderdataPython |
5100423 | class Solution:
def XXX(self, head: ListNode, n: int) -> ListNode:
array = []
cur = head
while cur:
array.append(cur)
cur = cur.next
if n == len(array):
head = head.next
else:
array[-n-1].next = array[-n-1].next.next
return head
| StarcoderdataPython |
5061808 | <reponame>jonfisik/ScriptsPython
'''Exercício Python 111: Crie um pacote chamado utilidadesCeV que tenha dois módulos internos chamados moeda e dado. Transfira todas as funções utilizadas nos desafios 107, 108 e 109 para o primeiro pacote e mantenha tudo funcionando.'''
import moeda
#from moeda import metade, dobro, aumentar
p = ''
p = float(input(f'Digite o preço - {moeda.moeda(p)}'.center(30)))
moeda.resumo(p, 10/2, 16/2)
| StarcoderdataPython |
336883 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import datetime
from django.test.testcases import TestCase
from ..models import News
from .factories import NewsFactory
from freezegun import freeze_time
today = datetime.date(2019, 3, 23)
@freeze_time(today)
class NewsManagerTestCase(TestCase):
def setUp(self):
# published
self.active = NewsFactory.create_batch(3, title='active')
self.active_and_published = NewsFactory.create_batch(3, title='active_and_published', start=datetime.date(2019, 3, 22), end=None)
self.active_and_published_with_end = NewsFactory.create_batch(3, title='active_and_published_with_end', end=datetime.date(2019, 3, 23))
self.active_and_published_start_is_today = NewsFactory.create_batch(3, title='active_and_published_start_is_today', start=datetime.date(2019, 3, 23), end=None)
self.active_and_published_end_is_today = NewsFactory.create_batch(3, title='active_and_published_end_is_today', start=datetime.date(2019, 3, 22), end=datetime.date(2019, 3, 23))
# not published
self.inactive = NewsFactory.create_batch(2, title='inactive', status=News.INACTIVE)
self.active_but_not_yet_published = NewsFactory.create_batch(2, title='active_but_not_yet_published', start=datetime.date(2019, 3, 24), end=datetime.date(2019, 3, 27))
self.active_but_not_yet_published_no_end = NewsFactory.create_batch(2, title='active_but_not_yet_published_no_end', start=datetime.date(2019, 3, 24), end=None)
self.active_but_outdated_no_start= NewsFactory.create_batch(2, title='active_but_outdated_no_start', start=datetime.date(2019, 3, 24), end=None)
self.active_and_outdated = NewsFactory.create_batch(2, title='active_and_outdated', start=datetime.date(2019, 3, 21), end=datetime.date(2019, 3, 22))
def test_active(self):
all_active = self.active + self.active_and_published_with_end + self.active_and_published + self.active_and_published_start_is_today + self.active_and_published_end_is_today
self.assertCountEqual(all_active, list(News.objects.active()))
def test_published_fields_empty(self):
expected_result = self.active + self.inactive
self.assertCountEqual(expected_result, list(News.objects.published_fields_empty()))
def test_published_with_startdate_either_and_no_enddate(self):
expected_result = self.active_and_published + self.active_and_published_start_is_today
self.assertCountEqual(expected_result, list(News.objects.published_with_startdate(start=today)))
def test_published_with_enddate_and_no_startdate(self):
expected_result = self.active_and_published_with_end
self.assertCountEqual(expected_result, list(News.objects.published_with_enddate(end=today)))
| StarcoderdataPython |
1836512 | import csv
import functools
import hashlib
import logging
import sys
import warnings
from collections import Counter
from os.path import isfile as isfile
import click
import cloudpickle
import loglizer
import mlflow
import mlflow.sklearn
import numpy as np
import pandas as pd
import sklearn
from elasticsearch import Elasticsearch
from elasticsearch_dsl import Search
from loglizer import preprocessing
# import sys
# sys.path.append("dependencies/loglizer")
from loglizer.models import IsolationForest, LogClustering
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import FunctionTransformer
from sklearn.svm import OneClassSVM
ES_URL = "http://192.168.122.3:9200"
ES_INDEX = "logs-endpoint-winevent-sysmon-*"
COLUMNS = ["process_path", "event_id"]
DROP_NA_COLUMNS = COLUMNS
MODEL = LogClustering
MODEL_PARAMS = {"max_dist": 0.3, "anomaly_threshold": 0.05}
LIMIT_DATA = -1
logging.basicConfig(level=logging.WARN)
logger = logging.getLogger(__name__)
conda_env = "conda_running.yaml"
def get_data(elast_url, index, columns):
def save_to_csv(elast_url, index, columns, file_name):
logger.warning("saving to csv as file did not exist")
es = Elasticsearch(elast_url, timeout=600)
s = Search(using=es, index=index).query().source(fields=columns)
with open(file_name, mode="w") as es_fd:
writer = csv.DictWriter(es_fd, fieldnames=columns)
writer.writeheader()
for hit in s.scan():
# handles nested objects in response because of multilevel keys (i.e. agent.hostname)
# ac
def rgetattr(obj, attr):
def _getattr(obj, attr):
try:
return getattr(obj, attr)
except:
return None
return functools.reduce(_getattr, [obj] + attr.split("."))
hit_dict = {column: rgetattr(hit, column) for column in columns}
writer.writerow(hit_dict)
def read_from_csv(csv_file):
data = pd.read_csv(csv_file)
return data
file_name_clear = "{}{}{}{}{}".format(
len(elast_url), elast_url, len(index), index, len(columns), ".".join(columns)
)
file_name = (
str(hashlib.sha1(file_name_clear.encode("UTF-8")).hexdigest()[:10]) + ".csv"
)
logger.warning("filename: {}".format(file_name))
if not isfile(file_name):
save_to_csv(elast_url, index, columns, file_name)
data_frame = read_from_csv(file_name)
if len(DROP_NA_COLUMNS) > 0:
data_frame.dropna(subset=DROP_NA_COLUMNS, how="any")
logger.warning("finished reading data")
return data_frame
def build_pipeline(data):
pipe = Pipeline(
steps=[
("numpy_transformer", FunctionTransformer(lambda x: x.to_numpy())),
("feature_extractor", preprocessing.FeatureExtractor()),
("model", MODEL(**MODEL_PARAMS)),
]
)
logger.warning("finished pipeline creation")
return pipe
def log_output(pipe, data):
mlflow.sklearn.log_model(pipe, "model", conda_env=conda_env)
logger.warning("finished model logging")
mlflow.log_param("model_param", MODEL_PARAMS)
predictions = pipe.predict(data)
for k, v in Counter(predictions).items():
mlflow.log_metric("pred_{}".format(k), v)
logger.warning("finished output logging")
def set_model_config(model, model_config_json):
models = {"cluster": LogClustering, "iforest": IsolationForest}
MODEL = models.get(model, LogClustering)
try:
import json
model_config = json.loads(model_config_json)
MODEL_PARAMS.update(model_config)
except:
logger.error(
"cannot convert model_config: {} to dict".format(model_config_json)
)
exit(-1)
@click.command()
@click.option("--limit_data", type=int)
@click.option("--model")
@click.option("--model_config_json")
def train(limit_data, model, model_config_json):
# setup logging
logger.warning("started training")
warnings.filterwarnings("ignore")
np.random.seed(40)
elast_url = ES_URL
index = ES_INDEX
data = get_data(elast_url, index, columns=COLUMNS)
with mlflow.start_run():
set_model_config(model, model_config_json)
pipe = build_pipeline(data)
if limit_data:
pipe.fit(data[:limit_data])
else:
pipe.fit(data)
log_output(pipe, data[:limit_data])
return pipe
if __name__ == "__main__":
train()
| StarcoderdataPython |
9720159 | #!/usr/bin/env python2.7
# -*- coding:UTF-8 -*-2
u"""battle.py
Copyright (c) 2019 <NAME>
This software is released under BSD license.
戦闘関連ソーサリーモジュール。
"""
import random as _random
import sorcery as _sorcery
# ---- Break ----
class Break(_sorcery.Sorcery):
u"""装備破壊ソーサリー。
"""
__slots__ = "__target",
def __init__(self, string, cost, target, catalyst=None):
u"""コンストラクタ。
"""
is_agrsv, _target = target
super(Break, self).__init__(string, cost, is_agrsv, catalyst)
self.__target = int(_target)
def activate(self, systems, is_reverse):
u"""効果発動。
"""
target, _ = self._get_target(systems, is_reverse)
is_broken = False
for i, item in enumerate(target.battle.player.equip):
if 1 << i & self.__target and item.break_():
is_broken = True
if is_broken:
target.update()
def is_available(self, params):
u"""使用可能判定。
"""
target, _ = self._get_target(params, params[0].has_reverse_sorcery)
is_fulfill = bool(
(target.equip_broken_state ^ self.__target) & self.__target)
return super(Break, self).is_available(params) and is_fulfill
class Delete(_sorcery.Sorcery):
u"""カード削除ソーサリー。
"""
__slots__ = "__power",
def __init__(
self, string, cost, is_agrsv,
power=0, catalyst=None
):
u"""コンストラクタ。
"""
super(Delete, self).__init__(string, cost, is_agrsv, catalyst)
self.__power = power
def activate(self, systems, is_reverse):
u"""効果発動。
"""
target, _ = self._get_target(systems, is_reverse)
hand = target.battle.hand
power = len(hand)*self.__power
for card in hand[:]:
hand.remove(card)
target.resource.disappear(power)
def is_available(self, params):
u"""使用可能判定。
"""
target, _ = self._get_target(params, params[0].has_reverse_sorcery)
return (
super(Delete, self).is_available(params) and
self._rank <= len(target.hand) and not target.jokers)
# ---- Life ----
class Recovery(_sorcery.Sorcery):
u"""回復ソーサリー。
"""
__slots__ = "__is_all", "__rate",
def __init__(self, string, cost, target, rate=1.0, catalyst=None):
u"""コンストラクタ。
"""
is_agrsv, is_all = target
self.__is_all = bool(is_all)
super(Recovery, self).__init__(string, cost, is_agrsv, catalyst)
self.__rate = float(rate if rate < 1 else 1)
def activate(self, systems, is_reverse):
u"""効果発動。
"""
target, _ = self._get_target(systems, is_reverse)
group = target.battle.group
if group:
if self.__is_all:
for unit in group:
unit.life_with_effect += int(unit.max_life*self.__rate)
else:
unit = group.get_injured(group.get_livings(group))
if unit:
unit.life_with_effect += int(unit.max_life*self.__rate)
def is_available(self, params):
u"""使用可能判定。
"""
target, _ = self._get_target(params, params[0].has_reverse_sorcery)
return (
super(Recovery, self).is_available(params) and target.has_damaged)
class Critical(_sorcery.Sorcery):
u"""即死ソーサリー。
"""
__slots__ = "__hit_rate", "__is_all", "__is_force"
def __init__(
self, string, cost, target,
hit_rate=1.0, is_force=False, catalyst=None
):
u"""コンストラクタ。
"""
is_agrsv, is_all = target
self.__is_all = bool(is_all)
super(Critical, self).__init__(string, cost, is_agrsv, catalyst)
self.__hit_rate = float(hit_rate if hit_rate < 1 else 1)
self.__is_force = bool(is_force)
def activate(self, systems, is_reverse):
u"""効果発動。
"""
target, _ = self._get_target(systems, is_reverse)
group = target.battle.group
if group:
if self.__is_all:
for unit in group:
if (
self.__hit_rate <= _random.random() or
not unit.die(self.__is_force)
):
unit.flash("damage")
else:
unit = group.get_healthy(group.get_livings(group))
if unit and (
self.__hit_rate <= _random.random() or
not unit.die(self.__is_force)
):
unit.flash("damage")
group.destroy()
target.update()
def is_available(self, params):
u"""使用可能判定。
"""
target, _ = self._get_target(params, params[0].has_reverse_sorcery)
return super(Critical, self).is_available(params) and target.has_normal
# ---- Unit ----
class _Clone(_sorcery.Sorcery):
u"""複製ソーサリー。
"""
__slots__ = ()
def is_available(self, params):
u"""使用可能判定。
"""
target, other = self._get_target(
params, params[0].has_reverse_sorcery)
return (
super(_Clone, self).is_available(params) and
target.is_group_exsit and not other.is_full_group)
class Double(_Clone):
u"""分身ソーサリー。
"""
__slots__ = ()
def __init__(self, string, cost, is_agrsv, catalyst=None):
u"""コンストラクタ。
"""
type_, name, description = string.split("###")
super(Double, self).__init__(
type_+"###"+name+"###"+description, cost, is_agrsv, catalyst)
def activate(self, systems, is_reverse):
u"""効果発動。
"""
target, other = self._get_target(systems, is_reverse)
target_group = target.battle.group
other_group = other.battle.group
if target_group and not other_group.is_full:
unit = _random.choice(target_group)
if other_group.summon(unit.data):
other_group[-1].copy_parameter(unit)
other.update()
class Attract(_Clone):
u"""引き抜きソーサリー。
"""
__slots__ = "__hit_rate",
def __init__(
self, string, cost, is_agrsv,
hit_rate=1.0, catalyst=None
):
u"""コンストラクタ。
"""
type_, name, description = string.split("###")
super(Attract, self).__init__(
type_+"###"+name+"###"+description, cost, is_agrsv, catalyst)
self.__hit_rate = float(hit_rate if hit_rate < 1 else 1)
def activate(self, systems, is_reverse):
u"""効果発動。
"""
target, other = self._get_target(systems, is_reverse)
target_group = target.battle.group
other_group = other.battle.group
if target_group and not other_group.is_full:
unit = _random.choice(target_group)
if (
_random.random() < self.__hit_rate and
other_group.summon(unit.data)
):
other_group[-1].copy_parameter(unit)
other.update()
unit.die(True)
target_group.destroy()
target.update()
else:
unit.flash("damage")
class Spawn(_sorcery.Sorcery):
u"""生成ソーサリー。
"""
__slots__ = "__name", "__is_whole"
def __init__(
self, string, cost, is_agrsv,
is_whole=False, catalyst=None
):
u"""コンストラクタ。
"""
type_, name, description, self.__name = string.split("###")
self.__is_whole = bool(is_whole)
super(Spawn, self).__init__(
type_+"###"+name+"###"+description, cost, is_agrsv, catalyst)
def activate(self, systems, is_reverse):
u"""効果発動。
"""
import armament.collectible as __collectible
import armament.units.data as __data
import utils.const as __const
import utils.general as __general
POWER = 300
target, other = self._get_target(systems, is_reverse)
group = target.battle.group
if not group.is_full:
if not self.__name:
total = other.resource.total
summon = __data.Summon(
u"##dragon_11##"+__const.DRAGON_TRIBE +
u"##ジョーカードラゴン##死神竜#"
u"自身の合計スターによって強さが変化",
(10+(total << 1), total), (4, -1), POWER+(total << 2),
ability=__data.Ability(
__const.ADDITION_ABILITY+"###" +
__general.get_skill_names(
__const.COMPLETE_ASSIST_SKILL)))
if group.summon(summon):
target.update()
else:
number, = __collectible.Collectible.get_by_name(self.__name)
for _ in range(group.empty if self.__is_whole else 1):
if group.summon(__collectible.get(number)):
target.update()
def is_available(self, params):
u"""使用可能判定。
"""
target, _ = self._get_target(params, params[0].has_reverse_sorcery)
return (
super(Spawn, self).is_available(params) and
not target.is_full_group)
| StarcoderdataPython |
6625439 | import os
from threading import Thread
from execo.action import TaktukPut
from execo.log import style
from execo_engine import logger
from div_p2p.wrapper import DivP2PWrapper
class TestThread(Thread):
"""This class manages the consumption and execution of combinations."""
def __init__(self, host, comb_manager, stats_manager):
super(TestThread, self).__init__()
self.div_p2p = DivP2PWrapper(host)
self.comb_manager = comb_manager
self.stats_manager = stats_manager
self.comb = None
self.ds_id = -1
self.comb_id = -1
def _th_prefix(self):
return style.user1("[" + self.name + "] ")
def run(self):
while len(self.comb_manager.sweeper.get_remaining()) > 0:
# Getting the next combination (which uses a new dataset)
comb = self.comb_manager.sweeper.get_next()
if comb:
self.comb = comb
self.comb_id = self.comb_manager.get_comb_id(comb)
self.ds_id = self.comb_manager.get_ds_id(comb)
ds_comb = self.prepare_dataset(comb)
self.xp(comb, ds_comb)
# subloop over the combinations that use the same dataset
while True:
comb_in_ds = self.comb_manager.sweeper.get_next(
lambda r: filter(self._uses_same_ds, r))
if comb_in_ds:
self.comb = comb
self.comb_id = self.comb_manager.get_comb_id(comb)
try:
self.xp(comb_in_ds, ds_comb)
except:
break
else:
break
def _uses_same_ds(self, candidate_comb):
"""Determine if the candidate combination uses the same dataset as the
current one.
Args:
candidate_comb (dict): The combination candidate to be selected as the
new combination.
"""
return self.comb_manager.uses_same_ds(self.comb, candidate_comb)
def prepare_dataset(self, comb):
"""Prepare the dataset to be used in the next set of experiments.
Args:
comb (dict): The combination containing the dataset's parameters.
Returns:
dict: The dataset parameters.
"""
# Create ds_comb
(ds_class_name, ds_params) = self.comb_manager.get_ds_class_params(comb)
local_path = ds_params["local_path"]
remote_path = os.path.join(self.div_p2p.remote_dir,
os.path.basename(local_path))
ds_comb = {"ds.class.path": remote_path, "ds.class": ds_class_name}
# Copy dataset to host
logger.info(self._th_prefix() + "Prepare dataset with combination " +
str(self.comb_manager.get_ds_parameters(comb)))
copy_code = TaktukPut([self.div_p2p.host], [local_path], remote_path)
copy_code.run()
# Notify stats manager
self.stats_manager.add_ds(self.ds_id, comb)
return ds_comb
def xp(self, comb, ds_comb):
"""Perform the experiment corresponding to the given combination.
Args:
comb (dict): The combination with the experiment's parameters.
ds_comb (dict): The dataset parameters.
"""
comb_ok = False
try:
logger.info(self._th_prefix() +
"Execute experiment with combination " +
str(self.comb_manager.get_xp_parameters(comb)))
num_reps = self.comb_manager.get_num_repetitions()
for nr in range(0, num_reps):
if num_reps > 1:
logger.info(self._th_prefix() + "Repetition " + str(nr + 1))
# Change configuration
params = {}
for key in comb:
params[key] = comb[key]
for key in ds_comb:
params[key] = ds_comb[key]
self.div_p2p.change_conf(params)
# Execute job
stats_file = self.div_p2p.execute()
# Notify stats manager
self.stats_manager.add_xp(self.comb_id, comb, stats_file)
comb_ok = True
finally:
if comb_ok:
self.comb_manager.sweeper.done(comb)
else:
self.comb_manager.sweeper.cancel(comb)
logger.info('%s Remaining',
len(self.comb_manager.sweeper.get_remaining())) | StarcoderdataPython |
1690327 | # Copyright 2021 University of Adelaide
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import MarkerList
class MarkerListFO(MarkerList.MarkerList):
def __init__(self, markerset):
self.markerset = set(markerset)
def find(self, marker: str) -> int:
if marker in self.markerset:
return 1
else:
return -1
def intersection(self, markers):
return self.markerset.intersection(markers)
def makeset(self):
return self.markerset
def isempty(self):
return len(self.markerset) == 0
def __str__(self):
return str(self.markerset)
| StarcoderdataPython |
3229549 | <reponame>angstwad/set-cover-iam-roles<filename>find_solving_roles.py
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script intends to brute force combinations of GCP IAM roles, the
permissions of which, upon computing their set union, equal every possible IAM
permission available. The purpose is to determine the fewest possible IAM
roles which provides every possible IAM permissions.
There are two steps. The first step is running the producer, which creates
data files, one per core on the device, of combinations up to the '--num'
command line argument. As the production of data is slower than the
processing of the data (and is not easily parallelizable), this runs in a
single process. The next step is where the work is split in order to process
the combinations in the most efficient manner. A process per core is started,
the core is assigned a file to read, and the combinations are processed until
EOF. If any combination is found to be a "winner", and comprises all possible
IAM roles, it is immediately written to the --output-dir location as a
<timestamp>_winner.json file. When all process are complete, a winner
summary is written to the file `winners.json` in the path specified by
--output-dir.
"""
import argparse
import collections
import itertools
import json
import logging
import math
import multiprocessing
import multiprocessing.synchronize
import pathlib
import sys
import threading
import time
import typing
_LOG_FORMAT = "%(levelname)s:%(asctime)s:%(name)s:%(message)s"
logging.basicConfig(stream=sys.stdout, format=_LOG_FORMAT)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# TODO: fix hardcoded values; calculate total number of combinations dynamically
_n = []
for i in range(1, 16):
_n.append((math.factorial(39) // math.factorial(39 - i)) //
math.factorial(i))
TOTAL_COMBINATIONS = sum(_n)
class LazyFileType(argparse.FileType):
"""
Subclasses `argparse.FileType` in order to provide a way to lazily open
files for reading/writing from arguments. Initializes the same as the
parent, but provides `open` method which returns the file object.
Usage:
```
parser = argparse.ArgumentParser()
parser.add_argument('-f', type=LazyFileType('w'))
args = parser.parse_args()
with args.f.open() a f:
for line in foo:
...
```
Provides an alternate constructor for use with the `default` kwarg to
`ArgumentParser.add_argument`.
Usage:
```
#
parser.add_argument('-f', type=LazyFileType('w'),
default=LazyFileType.default('some_file.txt')
"""
def __call__(self, string: str) -> None:
self.filename = string
if 'r' in self._mode or 'x' in self._mode:
if not pathlib.Path(self.filename).exists():
m = (f"can't open {self.filename}: No such file or directory: "
f"'{self.filename}'")
raise argparse.ArgumentTypeError(m)
return self
def open(self) -> typing.IO:
return open(self.filename, self._mode, self._bufsize, self._encoding,
self._errors)
@classmethod
def default(cls, string: str, **kwargs) -> None:
inst = cls(**kwargs)
inst.filename = string
return inst
class DirType:
"""
Meant to be used as an `argparse.ArgumentParser.add_argument` argument
type or default. Has a single useful attribute, `path`, which is
`pathlib.Path` object representing the specified argument the current
working directory.
"""
path: pathlib.Path = pathlib.Path.cwd()
def __init__(self, path: str = None) -> None:
if path is not None:
self.path = pathlib.Path(path)
if not self.path.is_dir():
raise argparse.ArgumentTypeError(
f"'{path}' is not a directory")
def process_data(file: LazyFileType,
roles_to_perms: typing.DefaultDict[str, set],
perms_counts: typing.DefaultDict[str, int],
unique_perms: typing.Set[str]) -> None:
""" With a roles JSON file produced by `fetch_role_data.py`, takes the
contents of the file and sets up a few mappings. Mutates all the mutable
arguments to the function; returns nothing.
Args:
file: JSON raw roles data produced by fetch_role_data.py
roles_to_perms: mapping of roles to their permissions; populated by
this func
perms_counts: mapping of role to the number of permissions it contains;
populated by this func
unique_perms: set which will be populated with all unique permissions
"""
logger.info(f'Reading roles file "{file.filename}".')
with file.open() as f:
raw_role_data = json.load(f)
# Read over each role, inspect the permissions, count the number of
# permissions of each, and create a set of all unique roles. This will
# be used to generate combinations and by the workers as they process
# combinations.
logger.info(f'Processing roles.')
for role_name, role_data in raw_role_data.items():
for perm in role_data.get('includedPermissions', []):
roles_to_perms[role_name].add(perm)
perms_counts[role_name] += 1
unique_perms.add(perm)
logger.info(f'Unique roles: {len(roles_to_perms)}')
logger.info(f'Unique permissions: {len(unique_perms)}')
logger.info('Sorting roles.')
roles_sorted_by_perms_asc = sorted(perms_counts.items(), key=lambda x: x[1])
# Eliminate roles which are subsets of another, drastically reducing the
# number of combinations
subset_roles = set()
for this_role, _ in roles_sorted_by_perms_asc:
for other_role, other_perms in roles_to_perms.items():
if this_role == other_role:
continue
this_perms = roles_to_perms[this_role]
if this_perms.issubset(other_perms):
subset_roles.add(this_role)
# Remove subset roles from common structures
for role in subset_roles:
del roles_to_perms[role]
del perms_counts[role]
logger.info(f'Deduped roles: {len(roles_to_perms)}')
def producer(data_dir: DirType,
num_combinations: int,
roles_to_perms: typing.Dict[str, set]) -> None:
""" Evaluates roles_to_perms to create combinations of all the roles
contained therein. Creates a file per core which contains role
combinations, a comma-separated list of combinations (using the legend to
optimize space) per line. Iterates over the combinations, placing one
combination per file, moving onto the next file continuously until no
combinations remain. Combinations are calculated starting from
`num_combinations`, working to 1 (i.e. 39C15, then 39C14, etc). Uses
`multiprocessing.cpu_count()` to determine how many files to create (and
therefore how many workers to start when checking the combinations) so the
intent is for this to be run on the machine where the workers will run.
Args:
data_dir: directory where the data files are located
num_combinations: max number of combinations to try from a set
roles_to_perms: mapping of roles to the permissions they contain
"""
count = 0
start = time.time()
fps: typing.List[typing.IO] = []
cpu_count = multiprocessing.cpu_count()
# Rather than dump full role names in our combinations to the file,
# instead assign a much smaller int value to the role, and log this
# mapping of int value to role name as the "legend" to a file
legend = {str(i): r for i, r in enumerate(roles_to_perms)}
legend_fname = data_dir.path.joinpath('legend.json')
with open(legend_fname, 'w') as f:
json.dump(legend, f, indent=2)
# Create one file per CPU on the system; each file will be assigned on
# one spawned worker in the worker pool.
for i in range(cpu_count):
comb_fname = data_dir.path.joinpath(f'{i}.combination')
fps.append(open(comb_fname, 'w'))
logger.info(f'Writing {cpu_count} files.')
# Start writing across the files, splitting the combinations one at a
# time across each file, spreading the combos evenly
for i in range(num_combinations, 0, -1):
for combination in itertools.combinations(legend, i):
line = f'{",".join(combination)}\n'
fps[count % cpu_count].write(line)
count += 1
if time.time() - start > 60:
logger.info(f'Produced {count:,} combinations.')
start = time.time()
logger.info('Done.')
for fp in fps:
fp.close()
def worker(file: str,
winner_path: DirType,
winners: typing.List[typing.Tuple[str]],
roles_to_perms: typing.Dict[str, set],
unique_perms: typing.Set[str],
legend: typing.Dict[str, str],
counter: multiprocessing.Value,
lock=multiprocessing.synchronize.RLock) -> None:
""" The worker process to be started by the worker `multiprocessing.Pool`.
Reads a file of combinations created by the `producer` function and
evaluates whether the union of all the permissions for each role equals
the universe `unique_perms`. Keeps track of how many combinations it has
tried since starting, and manages an interval counter, tracking how many
combinations it has tried every 30 seconds, updating a shared global
counter passed in as `counter`. If a winning combination is found (one
that satisfies the universe). it is immediately written to a file in
`winners_path` and appended to the `winners` list.
Args:
file: file of combos (must have been written by `producer` func)
winner_path: path where winners are written
winners: list where winning combos are appended
roles_to_perms: mapping of roles to the permission they contain
unique_perms: set of unique permissions
legend: mapping of data file values to role names
counter: global counter shared by all workers
lock: lock to be used over the counter
"""
current = multiprocessing.current_process()
logger = logging.getLogger(f'Worker{current.name}')
logger.setLevel(logging.INFO)
logger.info('Worker starting.')
last = time.time()
g_count, int_count = 0, 0
logger.info(f'Opening {file}.')
try:
with open(file) as f:
for line in f:
# be sure to strip line endings or we'll never match
combination = line.strip().split(',')
# will contain perms of roles in combo
perms_union = set()
for role in combination:
# roles in the combination are stored using the legend,
# so look them up, get perms, add to perms_union
perms_union.update(roles_to_perms[legend[role]])
# We have a winner
if perms_union == unique_perms:
logger.warning(f'Solution found: '
f'{", ".join(combination)}')
t = round(time.time() * 1000)
fname = winner_path.path.joinpath(f'{t}_winner.json')
pretty_combo = [legend[role] for role in combination]
# dump immediately so the result is captured even if
# process encounters an issue
with open(fname, 'w') as f:
json.dump({'combo': pretty_combo}, f, indent=2)
with lock:
winners.append(pretty_combo)
g_count += 1
int_count += 1
# Update the global (all process) counter every 30 secs
if time.time() - last > 30:
last = time.time()
with lock:
counter.value += int_count
int_count = 0
except KeyboardInterrupt:
logger.warning('Got KeyboardInterrupt.')
except Exception as e:
# Be sure log using the process immediately, otherwise the exc is
# swallowed up (as we are not using error callbacks
logger.exception(e, exc_info=True)
raise
# Update global counter one last time for accuracy
with lock:
counter.value += int_count
logger.info(f'Terminating. Processed {g_count:,} combinations.')
def process_winners(winner_path: DirType,
winners: typing.List[typing.Tuple[str]]) -> None:
""" Writes a single file summary of all winning combinations found by the
workers.
Args:
winner_path: path to write the winner summary file
winners: list of winning combinations to summarize
"""
logger.info('Processing winners.')
results = collections.defaultdict(list)
for result in winners:
results[len(result)].append(result)
if results:
fpath = winner_path.path.joinpath(f'winner.json')
logger.info(f'Writing winners to {fpath.absolute()}.')
with open(fpath, 'w') as f:
json.dump(results, f, indent=2)
else:
logger.info('No winners.')
def log_counter(counter: multiprocessing.Value) -> None:
""" Intended to be run as a thread from the parent process. Reads the global
counter and writes a log entry every 60 seconds.
Args:
counter: global worker combinations counter
"""
while True:
logger.info(f'Processed {counter.value:,} combinations globally – '
f'{(counter.value / TOTAL_COMBINATIONS) * 100:.4f}% '
f'complete.')
time.sleep(60)
def main(args: argparse.Namespace) -> None:
""" The magic happens here. Evaluates arguments. Initializes the shared
role data mappings. Invoked `process_data` to process the raw role data
file. Invokes `producer` and spins up the worker `multiprocessing.Pool` as
specified by the command line arguments. Starts the `log_counter`
thread. Waits for the workers to complete. Invokes `process_winners` when
workers complete or `KeyboardInterrupt` is caught.
Args:
args: command line arguments namespace
"""
roles_to_perms: typing.DefaultDict[str, set] = collections.defaultdict(set)
perms_counts: typing.DefaultDict[str, int] = collections.defaultdict(int)
unique_perms: typing.Set[str] = set()
process_data(args.file, roles_to_perms, perms_counts, unique_perms)
if not args.no_producer:
producer(args.data_dir, args.num, roles_to_perms)
if not args.no_worker:
manager = multiprocessing.Manager()
pool = multiprocessing.Pool()
winners = manager.list()
lock = manager.RLock()
counter = manager.Value('i', 0)
with open(args.data_dir.path.joinpath('legend.json')) as f:
legend: typing.Dict[str, str] = json.load(f)
# Start one worker for every CPU
work = []
for i in range(multiprocessing.cpu_count()):
fname = args.data_dir.path.joinpath(f'{i}.combination')
args_ = (fname, args.output_dir, winners, roles_to_perms,
unique_perms, legend, counter, lock)
result = pool.apply_async(worker, args=args_)
work.append(result)
# We won't start any more processes
pool.close()
# Start a daemon thread (will be killed when the main program exits) to
# give regular updates about how many combinations have been tried
t = threading.Thread(target=log_counter, args=(counter,), daemon=True)
t.start()
# Wait for the workers to complete and handle KeyboardInterrupts so
# that the results still end up written to a file
try:
pool.join()
except KeyboardInterrupt:
try:
logger.error('Got KeyboardInterrupt; shutting down workers.')
logger.error('Press Control-C again to stop immediately.')
process_winners(args.output_dir, winners)
pool.terminate() # Sends KeyboardInterrupt to child procs
except KeyboardInterrupt:
pool.terminate()
raise SystemExit('Terminating on KeyboardInterrupt')
else:
process_winners(args.output_dir, winners)
finally:
logger.info(f'Processed {counter.value:,} combinations globally – '
f'{(counter.value / TOTAL_COMBINATIONS) * 100:.4f}% '
f'complete.')
def parse_args() -> argparse.Namespace:
""" Sets up an `argparse.ArgumentParser`, parses the args, returns them.
Returns: parsed arguments namespace
"""
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file',
type=LazyFileType(),
default=LazyFileType.default('roles.json'),
help='JSON file to read role data from. Default: '
'roles.json')
parser.add_argument('-n', '--num',
type=int,
default=15,
help='Max number of combinations to try. Default: 15')
parser.add_argument('-o', '--output-dir',
type=DirType,
default=DirType(),
help=f'Directory to write winning combinations to. '
f'Default: current working directory')
parser.add_argument('-d', '--data-dir',
type=DirType,
default=DirType(),
help=f'Path to read/write producer data files. '
f'Default: current working directory')
mutex = parser.add_mutually_exclusive_group()
mutex.add_argument('--no-producer',
action='store_true',
help='Do not run the producer (uses existing data)')
mutex.add_argument('--no-worker',
action='store_true',
help="Do not run the workers (produce only)")
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
main(args)
| StarcoderdataPython |
3232562 | """Created on Wed Sep 08 2016 13:11.
@author: <NAME>
"""
import unittest
import numpy as np
from ..lyapunov_element_steering import LyapunovElementSteering
from ..perturb_zero import PerturbZero
from ..model_mee import ModelMEE
from ..reference_coe import ReferenceCOE
from ...orbital_mech.orbit import Orbit
from ...orbital_mech.element_sets.orb_coe import OrbCOE
from ...orbital_mech.element_sets.orb_mee import OrbMEE
from ...mcpi.mcpi import MCPI
from ...mcpi.mcpi_approx import MCPIapprox
class TestLyapunovElementSteering(unittest.TestCase):
"""Test class for LyapunovElementSteering."""
def setUp(self):
"""."""
mu = 1.
W = np.diag([1.]*5 + [0.])
a_t = 1e-6
X0 = np.array([[2., .5, 1., .1, .1, 0.]])
xref = ReferenceCOE(X0, mu)
self.lmo = LyapunovElementSteering(mu, W, a_t, xref)
def test_instantiation(self):
"""."""
self.assertIsInstance(self.lmo, LyapunovElementSteering)
def test_getattr(self):
"""."""
self.assertEqual(self.lmo.mu, 1)
def test_setattr(self):
"""."""
self.lmo.mu = 2.
self.assertEqual(self.lmo.mu, 2)
def test_control(self):
"""."""
x = np.array([[2., .5, 1., .1, .1, 0.],
[4., .5, 1., .1, .1, 0.],
[8., .5, 1., .1, .1, 0.]])
t = np.array([[0.], [1.], [2.]])
U = self.lmo(t, x)
self.assertEqual(len(U), 3)
| StarcoderdataPython |
3338020 | #!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0OA
#
# Authors:
# - <NAME>, <<EMAIL>>, 2019
"""
performance test to insert contents.
"""
import cx_Oracle
from idds.common.config import config_get
# from idds.core.contents import add_content
def get_subfinished_requests(db_pool):
connection = db_pool.acquire()
req_ids = []
# sql = """select request_id from atlas_IDDS.requests where status in (4,5) and scope!='hpo'"""
sql = """select request_id from atlas_IDDS.requests where scope!='hpo' and ( status in (4,5) or request_id in (select request_id from atlas_idds.transforms where status in (4, 5) and transform_type=2)) order by request_id"""
sql = """select request_id from atlas_idds.collections where status=4 and total_files > processed_files order by request_id asc"""
cursor = connection.cursor()
cursor.execute(sql)
rows = cursor.fetchall()
for row in rows:
# print(row)
req_ids.append(row[0])
cursor.close()
connection.commit()
db_pool.release(connection)
print(len(req_ids))
print(req_ids)
def get_session_pool():
sql_connection = config_get('database', 'default')
sql_connection = sql_connection.replace("oracle://", "")
user_pass, tns = sql_connection.split('@')
user, passwd = user_pass.split(':')
db_pool = cx_Oracle.SessionPool(user, passwd, tns, min=12, max=20, increment=1)
return db_pool
def test():
pool = get_session_pool()
get_subfinished_requests(pool)
if __name__ == '__main__':
test()
| StarcoderdataPython |
1723383 | for i in range(2):
for j in range(3):
print j
print i
| StarcoderdataPython |
11222274 | """
98 / 98 test cases passed.
Runtime: 36 ms
Memory Usage: 14.9 MB
"""
class Solution:
def checkPerfectNumber(self, num: int) -> bool:
return num in [6, 28, 496, 8128, 33550336]
"""
98 / 98 test cases passed.
Runtime: 40 ms
Memory Usage: 15.1 MB
"""
class Solution2:
def checkPerfectNumber(self, num: int) -> bool:
if num == 1:
return False
total = 1
for i in range(2, int(num ** 0.5) + 1):
if num % i == 0:
total += i
total += num / i if i * i != num else 0
return total == num
| StarcoderdataPython |
6599221 | <reponame>ahonnecke/jolly-brancher
"""Jira stuff."""
import logging
from enum import Enum
from jira import JIRA
_logger = logging.getLogger(__name__)
class IssueType(Enum):
EPIC = "EPIC"
STORY = "STORY"
ENHANCEMENT = "ENHANCEMENT"
BUG = "BUG"
TASK = "TASK"
SUBTASK = "SUB-TASK"
def get_all_issues(jira_client, project_name=None):
issues = []
i = 0
chunk_size = 100
conditions = [
"assignee = currentUser()",
"status in ('In Dev', 'In Progress', 'To Do')",
]
if project_name:
conditions.append(f"project = '{project_name}'")
condition_string = " and ".join(conditions)
while True:
chunk = jira_client.search_issues(
condition_string,
startAt=i,
maxResults=chunk_size,
)
i += chunk_size
issues += chunk.iterable
if i >= chunk.total:
break
return issues
class JiraClient:
"""Wrapper class for external jira library."""
def __init__(self, url, email, token):
self._JIRA = JIRA(url, basic_auth=(email, token))
def get_all_issues(self, project_name=None):
return get_all_issues(self._JIRA, project_name=None)
def issue(self, ticket):
return self._JIRA.issue(ticket)
def add_comment(self, myissue, comment):
self._JIRA.add_comment(myissue, comment)
def add_comment_table(self, issue, body: dict):
pass
# @TODO finish this
# ||heading 1||heading 2||heading 3||
# |col A1|col A2|col A3|
# |col B1|col B2|col B3|
def add_comment_panel(self, issue, title, body):
head = (
"{"
+ "|".join(
[
f"panel:title={title}",
"borderStyle=solid",
"borderColor=#ccc",
"titleBGColor=#F7D6C1",
"bgColor=#FFFFCE",
]
)
+ "}"
)
foot = "{panel}"
pr_comment = "\n".join([head, body, foot])
self.add_comment(issue, pr_comment)
| StarcoderdataPython |
4883700 | import asyncio
from time import perf_counter
import pytest
# all test coroutines will be treated as marked
pytestmark = pytest.mark.asyncio
async def test_connected(cw):
assert cw.connected
async def test_get_info(cw):
info = await cw.get_info()
lines = info.strip().split("\n")
info_dict = dict([line.split("=", maxsplit=1) for line in lines])
keys_expected = [
"dev_id",
"fpga_id",
"channel_count",
"range_count",
"max_sample_rate",
"sw_version",
"fpga_version",
]
assert set(info_dict.keys()) == set(keys_expected)
@pytest.mark.parametrize("decimation", (1, 2, 5, 10, 50, 100))
@pytest.mark.parametrize("channel", (1, 2))
async def test_acq_decimation(cw, channel, decimation, duration_acq):
block_size = 10_000
samplerate = cw.MAX_SAMPLERATE / decimation
block_count_total = int(duration_acq * samplerate / block_size)
await cw.set_decimation(channel, decimation)
await cw.start_acquisition()
block_count = 0
time_start = perf_counter()
async for _ in cw.stream(channel, block_size):
block_count += 1
if block_count >= block_count_total:
break
time_stop = perf_counter()
time_elapsed = time_stop - time_start
await cw.stop_acquisition()
assert time_elapsed == pytest.approx(duration_acq, rel=0.025)
async def test_acq_status(cw):
assert cw.get_temperature() == None
assert cw.get_buffersize() == 0
await cw.set_decimation(0, 10) # prevent buffer overflow, we don't read the data
await cw.start_acquisition()
async for _ in cw.stream(1, 10_000):
await asyncio.sleep(2.5) # wait for acq status, sent every 2 seconds
assert cw.get_temperature() != 0
assert cw.get_buffersize() > 0
break
await cw.stop_acquisition()
| StarcoderdataPython |
3222269 | <reponame>Eerie6560/PonjoPyWrapper
"""
Copyright 2022 <NAME>. All rights reserved.
Project licensed under the MIT License: https://www.mit.edu/~amini/LICENSE.md
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
OR OTHER DEALINGS IN THE SOFTWARE.
All portions of this software are available for public use, provided that
credit is given to the original author(s).
"""
import sys
from src.utils.StartupUtil import StartupUtil
from src.managers.EndpointManager import EndpointManager
class PonjoPyWrapper:
def __init__(self, key: str):
self.key: str = key
self.endpointManager: EndpointManager = EndpointManager(self.key)
if not StartupUtil(self.key).isApiKeyValid():
print("An invalid API key was provided. Please obtain one by emailing <EMAIL>.")
sys.exit(0)
def getEndpointManager(self) -> EndpointManager:
return self.endpointManager
| StarcoderdataPython |
12832617 | # Copyright 2019 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import enum
import http
import logging
import uuid
# pylint: disable=wrong-import-order
import flask
from google.protobuf import symbol_database
# pylint: enable=wrong-import-order
# Referenced from https://grpc.github.io/grpc/core/md_doc_statuscodes.html
class RPCCanonicalErrorCode(enum.Enum):
PERMISSION_DENIED = (7, http.HTTPStatus.FORBIDDEN)
INTERNAL = (13, http.HTTPStatus.INTERNAL_SERVER_ERROR)
NOT_FOUND = (5, http.HTTPStatus.NOT_FOUND)
FAILED_PRECONDITION = (9, http.HTTPStatus.BAD_REQUEST)
ABORTED = (10, http.HTTPStatus.CONFLICT)
UNIMPLEMENTED = (12, http.HTTPStatus.NOT_IMPLEMENTED)
class ProtoRPCException(Exception):
"""RPC exceptions with addition information to set error status/code in stubby
requests."""
def __init__(self, code, detail=None):
super(ProtoRPCException, self).__init__()
self.code = code
self.detail = detail
class _ProtoRPCServiceBaseMeta(type):
"""Metaclass for ProtoRPC classes.
This metaclass customizes class creation flow to parse and convert the
service descriptor object into a friendly data structure for information
looking up in runtime.
"""
# pylint: disable=return-in-init
def __init__(cls, name, bases, attrs, **kwargs):
service_descriptor = attrs.get('SERVICE_DESCRIPTOR')
if service_descriptor:
sym_db = symbol_database.Default()
for method_desc in service_descriptor.methods:
method = getattr(cls, method_desc.name, None)
rpc_method_spec = getattr(method, 'rpc_method_spec', None)
if rpc_method_spec:
rpc_method_spec.request_type = sym_db.GetSymbol(
method_desc.input_type.full_name)
rpc_method_spec.response_type = sym_db.GetSymbol(
method_desc.output_type.full_name)
return super().__init__(name, bases, attrs, **kwargs)
class ProtoRPCServiceBase(metaclass=_ProtoRPCServiceBaseMeta):
"""Base class of a ProtoRPC Service.
Sub-class must override `SERVICE_DESCRIPTOR` to the correct descriptor
instance. To implement the service's methods, author should define class
methods with the same names and decorates it with `ProtoRPCServiceMethod`.
The method will be called with only one argument in type of the request
message defined in the protobuf file, and the return value should be in
type of the response message defined in the protobuf file as well.
"""
SERVICE_DESCRIPTOR = None
class _ProtoRPCServiceMethodSpec:
"""Placeholder for spec of a ProtoRPC method."""
def __init__(self, request_type, response_type):
self.request_type = request_type
self.response_type = response_type
def ProtoRPCServiceMethod(method):
"""Decorator for ProtoRPC methods.
It wraps the target method with type-checking assertions as well as attaching
additional a spec information placeholder.
"""
def wrapper(self, request):
assert isinstance(request, wrapper.rpc_method_spec.request_type)
logging.info("Request:\n%s", request)
response = method(self, request)
assert isinstance(response, wrapper.rpc_method_spec.response_type)
logging.info("Response:\n%s", response)
return response
# Since the service's descriptor will be parsed when the class is created,
# which is later than the invocation time of this decorator, here it just
# place the placeholder with dummy contents.
wrapper.rpc_method_spec = _ProtoRPCServiceMethodSpec(None, None)
return wrapper
class _ProtoRPCServiceFlaskAppViewFunc:
"""A helper class to handle ProtoRPC POST requests on flask apps."""
def __init__(self, service_inst):
self._service_inst = service_inst
def __call__(self, method_name):
method = getattr(self._service_inst, method_name, None)
rpc_method_spec = getattr(method, 'rpc_method_spec', None)
if not rpc_method_spec:
return flask.Response(status=404)
try:
request_msg = rpc_method_spec.request_type.FromString(
flask.request.get_data())
response_msg = method(request_msg)
response_raw_body = response_msg.SerializeToString()
except ProtoRPCException as ex:
rpc_code, http_code = ex.code.value
resp = flask.Response(status=http_code)
resp.headers['RPC-Canonical-Code'] = rpc_code
if ex.detail:
resp.headers['RPC-Error-Detail'] = ex.detail
return resp
except Exception:
logging.exception('Caught exception from RPC method %r.', method_name)
return flask.Response(status=http.HTTPStatus.INTERNAL_SERVER_ERROR)
response = flask.Response(response=response_raw_body)
response.headers['Content-type'] = 'application/octet-stream'
return response
def RegisterProtoRPCServiceToFlaskApp(app_inst, path, service_inst,
service_name=None):
"""Register the given ProtoRPC service to the given flask app.
Args:
app_inst: Instance of `flask.Flask`.
path: Root URL of the service.
service_inst: The ProtoRPC service to register, must be a subclass of
`ProtoRPCServiceBase`.
service_name: Specify the name of the service. Default to
`service_inst.SERVICE_DESCRIPTOR.name`.
"""
service_name = service_name or service_inst.SERVICE_DESCRIPTOR.name
endpoint_name = '__protorpc_service_view_func_' + str(uuid.uuid1())
view_func = _ProtoRPCServiceFlaskAppViewFunc(service_inst)
app_inst.add_url_rule('%s/%s.<method_name>' % (path, service_name),
endpoint=endpoint_name, view_func=view_func,
methods=['POST'])
| StarcoderdataPython |
1965828 | try:
from matplotlib import pyplot as plt
import matplotlib
except:
import matplotlib
matplotlib.rcParams['backend'] = 'TkAgg'
from matplotlib import pyplot as plt
import numpy as np
import pdb
def cornertex(s, ax, offset=(0,0), fontsize=14):
plt.text(0.02+offset[0],0.95+offset[1],s,transform=ax.transAxes,color='k',va='top',ha='left',fontsize=fontsize, zorder=100)
class DataPlt():
'''
Dynamic plot context, intended for displaying geometries.
like removing axes, equal axis, dynamically tune your figure and save it.
Args:
figsize (tuple, default=(6,4)): figure size.
filename (filename, str): filename to store generated figure, if None, it will not save a figure.
Attributes:
figsize (tuple, default=(6,4)): figure size.
filename (filename, str): filename to store generated figure, if None, it will not save a figure.
ax (Axes): matplotlib Axes instance.
Examples:
with DynamicShow() as ds:
c = Circle([2, 2], radius=1.0)
ds.ax.add_patch(c)
'''
def __init__(self, figsize=(6, 4), filename=None, dpi=300):
self.figsize = figsize
self.filename = filename
self.ax = None
self.fig = None
def __enter__(self):
_setup_mpl()
plt.ion()
self.fig = plt.figure(figsize=self.figsize)
self.ax = plt.gca()
return self
def __exit__(self, *args):
if self.filename is not None:
print('Press `c` to save figure to "%s", `Ctrl+d` to break >>' %
self.filename)
pdb.set_trace()
plt.savefig(self.filename, dpi=300)
else:
pdb.set_trace()
class NoBoxPlt():
'''
Dynamic plot context, intended for displaying geometries.
like removing axes, equal axis, dynamically tune your figure and save it.
Args:
figsize (tuple, default=(6,4)): figure size.
filename (filename, str): filename to store generated figure, if None, it will not save a figure.
Attributes:
figsize (tuple, default=(6,4)): figure size.
graph_layout (tuple|None): number of graphs, None for single graph.
filename (filename, str): filename to store generated figure, if None, it will not save a figure.
ax (Axes): matplotlib Axes instance.
Examples:
with DynamicShow() as ds:
c = Circle([2, 2], radius=1.0)
ds.ax.add_patch(c)
'''
def __init__(self, figsize=(6, 4), graph_layout=None, filename=None, dpi=300):
self.figsize = figsize
self.filename = filename
self.ax = None
self.graph_layout = graph_layout
def __enter__(self):
_setup_mpl()
plt.ion()
self.fig = plt.figure(figsize=self.figsize)
if self.graph_layout is None:
self.ax = plt.subplot(111)
else:
self.ax = []
self.gs = plt.GridSpec(*self.graph_layout)
for i in range(self.graph_layout[0]):
for j in range(self.graph_layout[1]):
self.ax.append(plt.subplot(self.gs[i, j]))
return self
def __exit__(self, *args):
axes = [self.ax] if self.graph_layout is None else self.ax
for ax in axes:
ax.axis('equal')
ax.axis('off')
plt.tight_layout()
if self.filename is not None:
print('Press `c` to save figure to "%s", `Ctrl+d` to break >>' %
self.filename)
pdb.set_trace()
plt.savefig(self.filename, dpi=300)
else:
pdb.set_trace()
def _setup_mpl():
'''customize matplotlib.'''
plt.rcParams['lines.linewidth'] = 2
plt.rcParams['axes.labelsize'] = 16
plt.rcParams['axes.titlesize'] = 18
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = 'Ubuntu'
plt.rcParams["mathtext.fontset"] = "dejavuserif"
plt.rcParams['font.monospace'] = 'Ubuntu Mono'
plt.rcParams['axes.labelweight'] = 'bold'
plt.rcParams['xtick.labelsize'] = 16
plt.rcParams['ytick.labelsize'] = 16
plt.rcParams['legend.fontsize'] = 14
plt.rcParams['figure.titlesize'] = 18
def _setup_font():
myfont = matplotlib.font_manager.FontProperties(
family='wqy', fname='/usr/share/fonts/truetype/wqy/wqy-microhei.ttc')
matplotlib.rcParams["pdf.fonttype"] = 42
return myfont
def visualize_tree(pairs, geometry):
if len(geometry)==2:
xs, ys = np.meshgrid(np.arange(geometry[0]), np.arange(geometry[1]), indexing='ij')
else:
num_bit = geometry[0]
t = np.linspace(0,2*np.pi*(num_bit-1)/num_bit,num_bit)
xs, ys = np.cos(t), np.sin(t)
locs = np.concatenate([xs[...,None], ys[...,None]], axis=-1).reshape([-1,2])
plt.scatter(locs[:,0], locs[:,1], s=80, zorder=101)
for i, loc in enumerate(locs):
plt.text(loc[0], loc[1]-0.2, '%d'%i, fontsize=18, va='center', ha='center')
wl = np.array([p[2] for p in pairs])
w_interval = wl.max()-wl.min()
wl/=w_interval*1.2
wl-=wl.min()-0.01
print(wl)
for (i, j, _), w in zip(pairs, wl):
start, end = locs[i], locs[j]
cmap = plt.get_cmap('jet')
plt.plot([start[0], end[0]], [start[1], end[1]],color=cmap(w*10))
def visualize_tree(pairs, geometry, engine='viznet', offsets=None):
if len(geometry)==2:
xs, ys = np.meshgrid(np.arange(geometry[0]), np.arange(geometry[1]), indexing='ij')
num_bit = np.prod(geometry)
else:
num_bit = geometry[0]
t = np.linspace(0,2*np.pi*(num_bit-1)/num_bit,num_bit)
xs, ys = np.sqrt(num_bit)/2.5*np.cos(t), np.sqrt(num_bit)/2.5*np.sin(t)
locs = np.concatenate([xs[...,None], ys[...,None]], axis=-1).reshape([-1,2])
wl = np.log(np.array([p[2] for p in pairs]))
if offsets is None:
offsets = [(0,0)]*num_bit
if engine == 'networkx':
import networkx as nx
G = nx.Graph()
for i, loc in enumerate(locs):
plt.text(loc[0], loc[1]-0.2, '%d'%i, fontsize=18, va='center', ha='center')
G.add_node(i, loc=loc)
G.add_edges_from([p[:2] for p in pairs])
vmin = wl.min()-0.3
vmax = wl.max()+0.3
print(vmin, vmax)
nx.draw(G, pos=locs, node_color='#A0CBE2', edge_color=np.log(wl), edge_vmin=vmin, edge_vmax=vmax,
width=4, edge_cmap=plt.cm.Blues, with_labels=False, alpha=1)
elif engine == 'matplotlib':
for i, loc in enumerate(locs):
plt.text(loc[0], loc[1]-0.2, '%d'%i, fontsize=18, va='center', ha='center')
plt.scatter(xs, ys, s=200, zorder=101)
w_interval = wl.max()-wl.min()
wl/=w_interval*1.2
wl-=wl.min()-0.01
for (i, j, _), w in zip(pairs, wl):
start, end = locs[i], locs[j]
cmap = plt.get_cmap('hot')
plt.plot([start[0], end[0]], [start[1], end[1]],color=cmap(w*10))
else:
import viznet
cmap = plt.get_cmap('binary')
w_interval = wl.max()-wl.min()
wl=(wl-wl.min())/(w_interval+0.1)
print(wl)
wl+=0.1
viznet.setting.node_setting['lw']=0
node = viznet.NodeBrush('basic', size='small', color='#6699CC')
nl = []
for i, pos in enumerate(locs):
ni = node >> np.array(pos)+offsets[i]
ni.text(i)
nl.append(ni)
for (i,j,_), w in zip(pairs, wl):
edge = viznet.EdgeBrush('-', color=cmap(w), lw=2)
edge >> (nl[i], nl[j])
| StarcoderdataPython |
8034501 | <reponame>jontlu/ECE303-Comm-Nets
import datetime
import logging
class Logger(object):
def __init__(self, name, debug_level):
now = datetime.datetime.now()
logging.basicConfig(filename='{}_{}.log'.format(name, datetime.datetime.strftime(now, "%Y_%m_%dT%H%M%S")),
level=debug_level)
@staticmethod
def info(message):
logging.info(message)
@staticmethod
def debug(message):
logging.debug(message)
| StarcoderdataPython |
8198021 | import pandas as pd
import warnings
def add_bins_col_to_rank_df(df_feature,
n_bins,
bin_no_col='bin_no',
item_rank_col='equity_rank',
max_rank_col='max_rank'
):
"""
Description: This function takes as input a dataframe with ranks,
and creates a column with the respective bin number computed from the rank.
:param df_feature:Type pandas dataframe. feature and period level dataframe with rank values.
:param n_bins:Type int. number of bins to split the equities into.
:param bin_no_col:Type str. bin number column name.
:param item_rank_col:Type str. individual item rank column name.
:param max_rank_col:Type str. maximum possible rank column name.
:return:Type pandas dataframe. feature and period level dataframe with bin assignments.
"""
df_feature[bin_no_col] = 1 + (n_bins * (df_feature[item_rank_col] - 1) // df_feature[max_rank_col])
return df_feature
def get_ranks(df_feature,
date_col_name,
feature):
"""
Description: This function takes as input a long dataframe with date and a feature value.
Ranks are computed for the set of feature values within in a date, and saved as a column.
The maximum possible rank is also added as a column. This is used to compute the bin number based on rank.
:param df_feature:Type pandas dataframe. feature and period level dataframe with feature values.
:param date_col_name:Type string. name of date column.
:param feature:Type string. name of feature.
:return:Type pandas dataframe. feature and period level dataframe with ranks assigned to the equities.
"""
df_feature['equity_rank'] = df_feature.groupby(date_col_name)[feature].rank(method='min', ascending=False)
df_feature['max_rank'] = df_feature.groupby(date_col_name)['equity_rank'].transform('max')
return df_feature
def compute_date_level_metrics(df_detail,
bin_labels,
date_col_name,
return_col_name,
feature,
corr_method):
"""
Description: This function computes the back-testing metrics, at the date level (freq of the input dataframe)
:param df_detail:Type pandas dataframe. Detail dataframe with bins and rank information.
:param bin_labels:Type list. list of bin labels. It is assumed that the labels are in descending order.
eg: ['Q1','Q2','Q3'] implies Q1 is the highest portfolio and Q3 is the lowest.
:param date_col_name:Type str. Name of the date column.
:param return_col_name:Type str. Name of the return column.
:param feature:Type string. Name of feature.
:param corr_method:Type string. correlation method being used.
:return: Type pandas dataframe. feature and period level aggregate metrics.
"""
df_agg = pd.DataFrame()
for index, bin_lbl in enumerate(bin_labels):
bin_no = index + 1
bin_avg_lbl = bin_lbl + '_avg'
bin_std_lbl = bin_lbl + '_std'
df_agg[bin_avg_lbl] = df_detail[df_detail['bin_no'] == bin_no].groupby(date_col_name)[return_col_name].mean()
df_agg[bin_std_lbl] = df_detail[df_detail['bin_no'] == bin_no].groupby(date_col_name)[return_col_name].std()
highest_bin_avg_col = bin_labels[0] + '_avg'
lowest_bin_idx = len(bin_labels) - 1
lowest_bin_avg_col = bin_labels[lowest_bin_idx] + '_avg'
df_agg['spread'] = (df_agg[highest_bin_avg_col] - df_agg[lowest_bin_avg_col])*100
corr_object = df_detail.groupby(date_col_name)[[return_col_name, feature]].corr(method=corr_method)
corr_series = corr_object.reset_index(1, drop=True)[return_col_name]
corr_series = corr_series[~corr_series.eq(1)]
df_agg['ic_cs'] = corr_series
return df_agg
def get_dates_deviating_from_threshold(df_in,
date_col_name,
equity_identifier,
items_per_bin_deviation_threshold,
expected_no_of_items_per_bin):
"""
Description: This function measures deviation in number of items per bin from the expected.
The months which deviate outside the acceptable threshold are flagged and output to the user.
:param df_in:Type pandas dataframe. dataframe that is subset to rows relating to a certain bin number.
:param date_col_name:Type str. Name of the date column.
:param equity_identifier:Type str. Name of the equity identifier column.
:param items_per_bin_deviation_threshold:Type int. Permissible deviation from the expected number of items per bin.
:param expected_no_of_items_per_bin:Type int. Correct number of items per bin.
:return:Type list. list of dates which deviate outside the acceptable threshold.
"""
df_in_cnt = pd.DataFrame(df_in.groupby(date_col_name)[equity_identifier].count())
df_in_cnt.rename(columns={equity_identifier: 'items_count'}, inplace=True)
df_in_cnt['deviation'] = abs(df_in_cnt['items_count'] - expected_no_of_items_per_bin)
threshold_deviation_mask = df_in_cnt['deviation'] > items_per_bin_deviation_threshold
bad_dates = [item.date().strftime("%Y-%m-%d") for item in df_in_cnt[threshold_deviation_mask].index.tolist()]
return bad_dates
def custom_formatwarning(message, category, filename, lineno, line=''):
"""
Description: This is a custom function that overrides the default format warning function.
This will return only the warning message, without the code line details.
:param message: Type str. Warning message.
:return: Type str. Returns only the warning message, without the code line details.
"""
return str(message) + '\n'
def get_detail_backtest_results(input_df,
features,
return_col_name='returns',
equity_identifier='Equity Parent',
date_col_name='date',
n_bins=5,
bin_labels=None,
corr_method='spearman',
items_per_bin_deviation_threshold=1,
drop_months_outside_of_threshold=False):
"""
Description: This function generates the back testing results for a list of features.
This procedure does not handle subsetting for a specified date range.
Subset to a specified date range needs to be done prior to passing the input dataframe.
The procedure works on the assumption that the bin_labels are specified in descending order.
eg: ['Q1','Q2','Q3','Q4'] implies Q1 is the highest portfolio and Q4 is the lowest.
The generation of bins work ideally with a sufficient number of unique values in a feature.
The items_per_bin_deviation_threshold parameter can be used to decide how strict we want to be with the effect of non-unique values.
items_per_bin_deviation_threshold acts on the difference between the expected number of items in a bin
vs the actual number of items.
drop_months_outside_of_threshold can be set to True, if the months deviating from the
above threshold should be excluded from back testing.
:param input_df: Type pandas dataframe. long format dataframe.
:param features: Type list. list of features for which backtesting needs to be perforrmed. These should correspond
to the names of the columns in the df_long dataframe.
:param return_col_name: Type str. Name of the return column.
:param equity_identifier : Type str. Name of the equity identifier column.
:param date_col_name:Type str. Name of the date column.
:param n_bins:Type int. number of bins to split the equities into.
:param bin_labels:Type list. list of bin labels. It is assumed that the labels are in descending order.
eg: ['Q1','Q2','Q3'] implies Q1 is the highest portfolio and Q3 is the lowest.
:param corr_method:Type string. correlation method being used.
:param items_per_bin_deviation_threshold:Type int. Permissible deviation from the expected number of items per bin.
:param drop_months_outside_of_threshold:Type boolean. Decision to drop months that break deviate beyond the acceptable
items_per_bin_deviation_threshold.
:return:Type pandas dataframe. detail backtesting results for each period
"""
if bin_labels is None:
bin_labels = ['Q' + str(i + 1) for i in range(n_bins)]
df_long = input_df.copy()
long_cols = list(df_long.columns)
if date_col_name not in long_cols:
df_long = df_long.reset_index()
df_long.rename(columns={'index': 'date'}, inplace=True)
if return_col_name in features:
features.remove(return_col_name)
detail_results = []
features = sorted(features)
feature_cnt = 0
total_features = len(features)
print('Total features for processing: ' + str(total_features))
warnings.formatwarning = custom_formatwarning
for feature in features:
category = feature.split('_bshift')[0]
feature_cols = [equity_identifier, date_col_name, return_col_name, feature]
df_feature_detail = df_long[feature_cols].copy()
df_feature_detail = get_ranks(df_feature_detail,
date_col_name,
feature)
df_feature_detail = add_bins_col_to_rank_df(df_feature_detail,
n_bins)
df_bin_check = pd.DataFrame(df_feature_detail.groupby(date_col_name)['bin_no'].max())
bin_check_mask = df_bin_check['bin_no'] != n_bins
insufficient_bins_dates = [item.date().strftime("%Y-%m-%d") for item in df_bin_check[bin_check_mask].index.tolist()]
if len(insufficient_bins_dates) > 0:
warnings.warn('\nInsufficient bins warning:\nFeature: ' + feature+'\n'+'\n' +
'Months with insufficient bins:' + str(insufficient_bins_dates)+ '\n' + '\n' +
'These months are excluded from the back testing computation')
df_feature_detail = df_feature_detail[~df_feature_detail[date_col_name].isin(insufficient_bins_dates)]
print(df_feature_detail.shape)
total_no_of_items = df_feature_detail[equity_identifier].unique().shape[0]
expected_no_of_items_per_bin = total_no_of_items/n_bins
mask_bin_lowest = df_feature_detail['bin_no'] == 1
mask_bin_highest = df_feature_detail['bin_no'] == n_bins
df_bin_lowest = df_feature_detail[mask_bin_lowest].copy()
df_bin_highest = df_feature_detail[mask_bin_highest].copy()
bin_lowest_bad_dates = get_dates_deviating_from_threshold(df_bin_lowest,
date_col_name,
equity_identifier,
items_per_bin_deviation_threshold,
expected_no_of_items_per_bin)
bin_highest_bad_dates = get_dates_deviating_from_threshold(df_bin_highest,
date_col_name,
equity_identifier,
items_per_bin_deviation_threshold,
expected_no_of_items_per_bin)
if len(bin_lowest_bad_dates) > 0 or len(bin_highest_bad_dates) > 0:
warnings.warn('\nDeviation from threshold warning:\nFeature: ' + feature+'\n'+'\n' +
'Top Portfolio - Months which deviate from threshold: '+str(bin_highest_bad_dates)+'\n'+'\n' +
'Bottom Portfolio - Months which deviate from threshold: '+str(bin_lowest_bad_dates))
if drop_months_outside_of_threshold:
months_to_drop = bin_lowest_bad_dates + bin_highest_bad_dates
warnings.warn('\nMonths dropped warning:\nFeature: ' + feature + '\n'+'\n' +
'Months: '+str(months_to_drop) +' will be dropped from computation')
df_feature_detail = df_feature_detail[~df_feature_detail[date_col_name].isin(months_to_drop)]
df_feature_detail_agg = compute_date_level_metrics(df_feature_detail,
bin_labels,
date_col_name,
return_col_name,
feature,
corr_method)
df_feature_detail_agg['feature'] = feature
df_feature_detail_agg['category'] = category
detail_results.append(df_feature_detail_agg)
feature_cnt += 1
if feature_cnt % 100 == 0:
print(str(feature_cnt) + ' features completed')
detail_results_df = pd.concat(detail_results)
return detail_results_df
def perform_aggregation_across_time(detail_results):
"""
Description: This function takes as input the monthly level back testing results, and
returns a data frame with the results aggregated across time.
:param detail_results: Type pandas dataframe. detail results that need to be aggregated.
:return:Type pandas dataframe. Aggregated dataframe.
"""
out_df = pd.DataFrame()
all_cols = list(detail_results.columns)
cols_for_std = ['spread', 'ic_cs', 'Qe', 'mc_return']
key_cols = ['feature', 'category']
cols_for_avg = sorted(list(set(all_cols) - set(cols_for_std) - set(key_cols)))
for col in cols_for_avg:
out_df[col] = detail_results.groupby(key_cols)[col].mean()
for col in cols_for_std:
if 'avg' not in col:
avg_col = col + '_avg'
std_col = col + '_std'
else:
avg_col = col
std_col = col.split('_')[0] + '_std'
out_df[avg_col] = detail_results.groupby(key_cols)[col].mean()
out_df[std_col] = detail_results.groupby(key_cols)[col].std()
return out_df
| StarcoderdataPython |
151244 | <gh_stars>0
import setuptools
setuptools.setup() # still required for editable installs.
| StarcoderdataPython |
348887 | <filename>CA117/Lab_8/swapletters_51.py<gh_stars>1-10
(lambda l:print(''.join([l[i+1]+l[i]for i in range(0,len(l)-1,2)])+(l[-1]if len(l)%2else'')))(list(__import__("sys").argv[1]))
| StarcoderdataPython |
3485370 | '''Simple script that outputs dataset information.'''
from src.datasets.catalog import DATASET_DICT, PRETRAINING_DATASETS, TRANSFER_DATASETS
def bold(string):
return f'\033[1m{string}\033[0m'
def main():
print(bold('All supported datasets'))
max_name_len = max(len(name) for name in DATASET_DICT.keys())
for name in sorted(DATASET_DICT.keys()):
class_name = DATASET_DICT[name].__name__
print(f'{name.ljust(max_name_len)}: {class_name}')
print()
print(bold('Datasets for pretraining'))
print(f'{", ".join(sorted(PRETRAINING_DATASETS))}')
print()
print(bold('Datasets for transfer learning'))
print(f'{", ".join(sorted(TRANSFER_DATASETS))}')
print()
print('Pass the dataset name for training from the command line as the flag `dataset=name` (i.e. dataset=cifar10)')
if __name__ == '__main__':
main()
| StarcoderdataPython |
6548429 | import pymysql
#database connection
connection = pymysql.connect(host="localhost", user="root", passwd="", database="databaseName")
cursor = connection.cursor()
# queries for inserting values
insert1 = "INSERT INTO Artists(NAME, TRACK) VALUES('Towang', 'Jazz' );"
insert2 = "INSERT INTO Artists(NAME, TRACK) VALUES('Sadduz', 'Rock' );"
insert3 = "INSERT INTO Artists(NAME, TRACK) VALUES('maek', 'zoko' );"
#executing the quires
cursor.execute(insert1)
cursor.execute(insert2)
cursor.execute(insert3)
#commiting the connection then closing it.
connection.commit()
connection.close() | StarcoderdataPython |
1820426 | import pytest
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), "..", ".."))
from src.map.DriverProfile import DriverProfile
def get_driver_profile_name(name, o_b_f, f_t, max_a, min_a, max_s, a_t, u_t_ms):
"""
Function returns the profile name of the driver
:param name: name of profile
:param o_b_f: over braking factor of the driver
:param f_t: following time of the driver
:param max_a: max acceleration that the driver is comfortable with
:param min_a: min acceleration that the driver is comfortable with
:param max_s: max speed that the driver is comfortable with
:param a_t: the amount of time it takes for the driver to accelerate to a desired speed
:param u_t_ms: intervals in which the driver checks its surroundings
:type name: str
:type o_b_f: float
:type f_t: float
:type max_a: float
:type min_a: float
:type max_s: float
:type a_t: float
:type u_t_ms: float
:return: the driver profile name of the driver
"""
driver = DriverProfile(name, o_b_f, f_t, max_a, min_a, max_s, a_t, u_t_ms)
return driver.get_driver_profile_name()
def test_get_driver_profile_name():
"""
Test function for get_driver_profile_name function
:return: Tests pass if driver profile name is returned properly, false if otherwise.
"""
assert get_driver_profile_name('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.0, 3.0) == 'test'
assert get_driver_profile_name('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.0, 3.0) != 1.2
assert get_driver_profile_name('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.0, 3.0) != 3.4
assert get_driver_profile_name('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.0, 3.0) != 25.2
assert get_driver_profile_name('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.0, 3.0) != 2.2
assert get_driver_profile_name('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.0, 3.0) != 75.0
assert get_driver_profile_name('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.0, 3.0) != 5.0
assert get_driver_profile_name('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.0, 3.0) != 3.0
def get_over_braking_factor(name, o_b_f, f_t, max_a, min_a, max_s, a_t, u_t_ms):
"""
Function returns the over braking factor of the driver
:param name: name of profile
:param o_b_f: over braking factor of the driver
:param f_t: following time of the driver
:param max_a: max acceleration that the driver is comfortable with
:param min_a: min acceleration that the driver is comfortable with
:param max_s: max speed that the driver is comfortable with
:param a_t: the amount of time it takes for the driver to accelerate to a desired speed
:param u_t_ms: intervals in which the driver checks its surroundings
:type name: str
:type o_b_f: float
:type f_t: float
:type max_a: float
:type min_a: float
:type max_s: float
:type a_t: float
:type u_t_ms: float
:return: the overbraking factor of the driver
"""
driver = DriverProfile(name, o_b_f, f_t, max_a, min_a, max_s, a_t, u_t_ms)
return driver.get_over_braking_factor()
def test_get_over_braking_factor():
"""
Test function for get_over_braking_factor function
:return: Tests pass if over braking factor is returned properly, false if otherwise.
"""
assert get_over_braking_factor('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.0, 3.0) == 1.2
assert get_over_braking_factor('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.0, 3.0) != 3.4
assert get_over_braking_factor('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.0, 3.0) != 25.2
assert get_over_braking_factor('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.0, 3.0) != 2.2
assert get_over_braking_factor('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.0, 3.0) != 75.0
assert get_over_braking_factor('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.0, 3.0) != 5.0
assert get_over_braking_factor('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.0, 3.0) != 3.0
def get_following_time(name, o_b_f, f_t, max_a, min_a, max_s, a_t, u_t_ms):
"""
Function returns the following time of the driver
:param name: name of profile
:param o_b_f: over braking factor of the driver
:param f_t: following time of the driver
:param max_a: max acceleration that the driver is comfortable with
:param min_a: min acceleration that the driver is comfortable with
:param max_s: max speed that the driver is comfortable with
:param a_t: the amount of time it takes for the driver to accelerate to a desired speed
:param u_t_ms: intervals in which the driver checks its surroundings
:type name: str
:type o_b_f: float
:type f_t: float
:type max_a: float
:type min_a: float
:type max_s: float
:type a_t: float
:type u_t_ms: float
:return: the following time of the driver
"""
driver = DriverProfile(name, o_b_f, f_t, max_a, min_a, max_s, a_t, u_t_ms)
return driver.get_following_time()
def test_get_following_time():
"""
Test function for get_following_time function
:return: Tests pass if following time is returned properly, false if otherwise.
"""
assert get_following_time('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.0, 3.0) == 3.4
assert get_following_time('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.0, 3.0) != 1.2
assert get_following_time('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.0, 3.0) != 25.2
assert get_following_time('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.0, 3.0) != 2.2
assert get_following_time('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.0, 3.0) != 75.0
assert get_following_time('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.0, 3.0) != 5.0
assert get_following_time('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.0, 3.0) != 3.0
def get_max_accel(name, o_b_f, f_t, max_a, min_a, max_s, a_t, u_t_ms):
"""
Function returns the max acceleration of the driver
:param name: name of profile
:param o_b_f: over braking factor of the driver
:param f_t: following time of the driver
:param max_a: max acceleration that the driver is comfortable with
:param min_a: min acceleration that the driver is comfortable with
:param max_s: max speed that the driver is comfortable with
:param a_t: the amount of time it takes for the driver to accelerate to a desired speed
:param u_t_ms: intervals in which the driver checks its surroundings
:type name: str
:type o_b_f: float
:type f_t: float
:type max_a: float
:type min_a: float
:type max_s: float
:type a_t: float
:type u_t_ms: float
:return: the max acceleration that the driver is comfortable with
"""
driver = DriverProfile(name, o_b_f, f_t, max_a, min_a, max_s, a_t, u_t_ms)
return driver.get_max_accel()
def test_get_max_accel():
"""
Test function for get_max_accel function
:return: Tests pass if maximum acceleration is returned properly, false if otherwise.
"""
assert get_max_accel('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.0, 3.0) == 25.2
assert get_max_accel('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.0, 3.0) != 1.2
assert get_max_accel('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.0, 3.0) != 3.4
assert get_max_accel('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.0, 3.0) != 2.2
assert get_max_accel('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.0, 3.0) != 75.0
assert get_max_accel('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.0, 3.0) != 5.0
assert get_max_accel('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.0, 3.0) != 3.0
def get_min_accel(name, o_b_f, f_t, max_a, min_a, max_s, a_t, u_t_ms):
"""
Function returns the min acceleration of the driver
:param name: name of profile
:param o_b_f: over braking factor of the driver
:param f_t: following time of the driver
:param max_a: max acceleration that the driver is comfortable with
:param min_a: min acceleration that the driver is comfortable with
:param max_s: max speed that the driver is comfortable with
:param a_t: the amount of time it takes for the driver to accelerate to a desired speed
:param u_t_ms: intervals in which the driver checks its surroundings
:type name: str
:type o_b_f: float
:type f_t: float
:type max_a: float
:type min_a: float
:type max_s: float
:type a_t: float
:type u_t_ms: float
:return: the min acceleration that the driver is comfortable with
"""
driver = DriverProfile(name, o_b_f, f_t, max_a, min_a, max_s, a_t, u_t_ms)
return driver.get_min_accel()
def test_get_min_accel():
"""
Test function for get_min_accel function
:return: Tests pass if minimum acceleration is returned properly, false if otherwise.
"""
assert get_min_accel('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.0, 3.0) == 2.2
assert get_min_accel('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.0, 3.0) != 1.2
assert get_min_accel('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.0, 3.0) != 3.4
assert get_min_accel('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.0, 3.0) != 25.2
assert get_min_accel('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.0, 3.0) != 75.0
assert get_min_accel('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.0, 3.0) != 5.0
assert get_min_accel('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.0, 3.0) != 3.0
def get_max_speed(name, o_b_f, f_t, max_a, min_a, max_s, a_t, u_t_ms):
"""
Function returns the max speed of the driver
:param name: name of profile
:param o_b_f: over braking factor of the driver
:param f_t: following time of the driver
:param max_a: max acceleration that the driver is comfortable with
:param min_a: min acceleration that the driver is comfortable with
:param max_s: max speed that the driver is comfortable with
:param a_t: the amount of time it takes for the driver to accelerate to a desired speed
:param u_t_ms: intervals in which the driver checks its surroundings
:type name: str
:type o_b_f: float
:type f_t: float
:type max_a: float
:type min_a: float
:type max_s: float
:type a_t: float
:type u_t_ms: float
:return: the max speed that the driver is comfortable with
"""
driver = DriverProfile(name, o_b_f, f_t, max_a, min_a, max_s, a_t, u_t_ms)
return driver.get_max_speed()
def test_get_max_speed():
"""
Test function for get_max_speed function
:return: Tests pass if max speed is returned properly, false if otherwise.
"""
assert get_max_speed('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.0, 3.0) == 75.0
assert get_max_speed('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.0, 3.0) != 1.2
assert get_max_speed('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.0, 3.0) != 3.4
assert get_max_speed('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.0, 3.0) != 25.2
assert get_max_speed('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.0, 3.0) != 2.2
assert get_max_speed('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.0, 3.0) != 5.0
assert get_max_speed('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.0, 3.0) != 3.0
def get_accel_time(name, o_b_f, f_t, max_a, min_a, max_s, a_t, u_t_ms):
"""
Function returns the acceleration time of the driver
:param name: name of profile
:param o_b_f: over braking factor of the driver
:param f_t: following time of the driver
:param max_a: max acceleration that the driver is comfortable with
:param min_a: min acceleration that the driver is comfortable with
:param max_s: max speed that the driver is comfortable with
:param a_t: the amount of time it takes for the driver to accelerate to a desired speed
:param u_t_ms: intervals in which the driver checks its surroundings
:type name: str
:type o_b_f: float
:type f_t: float
:type max_a: float
:type min_a: float
:type max_s: float
:type a_t: float
:type u_t_ms: float
:return: the amount of time it takes to accelerate to desired speed
"""
driver = DriverProfile(name, o_b_f, f_t, max_a, min_a, max_s, a_t, u_t_ms)
return driver.get_accel_time()
def test_get_accel_time():
"""
Test function for get_accel_time function
:return: Tests pass if acceleration time is returned properly, false if otherwise.
"""
assert get_accel_time('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.4, 3.0) == 5.4
assert get_accel_time('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.4, 3.0) != 1.2
assert get_accel_time('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.4, 3.0) != 3.4
assert get_accel_time('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.4, 3.0) != 25.2
assert get_accel_time('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.4, 3.0) != 2.2
assert get_accel_time('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.4, 3.0) != 75.0
assert get_accel_time('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.4, 3.0) != 3.0
def get_update_time_ms(name, o_b_f, f_t, max_a, min_a, max_s, a_t, u_t_ms):
"""
Function returns the update time of the driver
:param name: name of profile
:param o_b_f: over braking factor of the driver
:param f_t: following time of the driver
:param max_a: max acceleration that the driver is comfortable with
:param min_a: min acceleration that the driver is comfortable with
:param max_s: max speed that the driver is comfortable with
:param a_t: the amount of time it takes for the driver to accelerate to a desired speed
:param u_t_ms: intervals in which the driver checks its surroundings
:type name: str
:type o_b_f: float
:type f_t: float
:type max_a: float
:type min_a: float
:type max_s: float
:type a_t: float
:type u_t_ms: float
:return: the interval in which the driver checks its surroundings
"""
driver = DriverProfile(name, o_b_f, f_t, max_a, min_a, max_s, a_t, u_t_ms)
return driver.get_update_time_ms()
def test_get_update_time_ms():
"""
Test function for get_update_time_ms function
:return: Tests pass if update time of driver is returned properly, false if otherwise.
"""
assert get_update_time_ms('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.4, 7.77) == 7.77
assert get_update_time_ms('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.4, 7.77) != 1.2
assert get_update_time_ms('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.4, 7.77) != 3.4
assert get_update_time_ms('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.4, 7.77) != 25.2
assert get_update_time_ms('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.4, 7.77) != 2.2
assert get_update_time_ms('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.4, 7.77) != 75.0
assert get_update_time_ms('test', 1.2, 3.4, 25.2, 2.2, 75.0, 5.4, 7.77) != 5.4
def update_over_braking_factor(driver, obf):
"""
Updates the over braking factor of the driver
:param driver: driver
:param obf: new over braking factor
:type driver: DriverProfile
:return: updated driver profile
"""
return driver.update_over_breaking_factor(obf)
def test_update_over_braking_factor():
"""
Tests the update_over_braking_factor method
:return: Tests if cases pass
"""
default_driver = DriverProfile("Default", 8, 2, 2, 0, 30, 3, 1)
assert default_driver.get_over_braking_factor() == 8
update_over_braking_factor(default_driver, 15)
assert default_driver.get_over_braking_factor() == 15
def update_following_time(driver, ft):
"""
Updates the following time of the driver
:param driver: driver
:param ft: following time
:type driver: DriverProfile
:return: updated driver profile
"""
return driver.update_following_time(ft)
def test_update_following_time():
"""
Tests the update_following_time method
:return: Tests if cases pass
"""
default_driver = DriverProfile("Default", 8, 2, 2, 0, 30, 3, 1)
assert default_driver.get_following_time() == 2
update_following_time(default_driver, 5)
assert default_driver.get_following_time() == 5
def update_max_accel(driver, ma):
"""
Updates the max accel of the driver
:param driver: driver
:param ma: new max accel
:type driver: DriverProfile
:return: updated driver profile
"""
return driver.update_max_accel(ma)
def test_update_max_accel():
"""
Tests the update_max_accel method
:return: Tests if cases pass
"""
default_driver = DriverProfile("Default", 8, 2, 2, 0, 30, 3, 1)
assert default_driver.get_max_accel() == 2
update_max_accel(default_driver, 7)
assert default_driver.get_max_accel() == 7
def update_min_accel(driver, min_a):
"""
Updates the min accel of the driver
:param driver: driver
:param min_a: min accel
:type driver: DriverProfile
:return: updated driver profile
"""
return driver.update_min_accel(min_a)
def test_update_min_accel():
"""
Tests the update_min_accel method
:return: Tests if cases pass
"""
default_driver = DriverProfile("Default", 8, 2, 2, 0, 30, 3, 1)
assert default_driver.get_min_accel() == 0
update_min_accel(default_driver, 3)
assert default_driver.get_min_accel() == 3
def update_max_speed(driver, speed):
"""
Updates the max speed of the driver
:param driver: driver
:param speed: new max speed
:type driver: DriverProfile
:return: updated driver profile
"""
return driver.update_max_speed(speed)
def test_update_max_speed():
"""
Tests the update_max_speed method
:return: Tests if cases pass
"""
default_driver = DriverProfile("Default", 8, 2, 2, 0, 30, 3, 1)
assert default_driver.get_max_speed() == 30
update_max_speed(default_driver, 60)
assert default_driver.get_max_speed() == 60
def update_accel_time(driver, at):
"""
Updates the accel time of the driver
:param driver: driver
:param at: new accel time
:type driver: DriverProfile
:return: updated driver profile
"""
return driver.update_accel_time(at)
def test_update_accel_time():
"""
Tests the update_accel_time method
:return: Tests if cases pass
"""
default_driver = DriverProfile("Default", 8, 2, 2, 0, 30, 3, 1)
assert default_driver.get_accel_time() == 3
update_accel_time(default_driver, 9)
assert default_driver.get_accel_time() == 9
def update_update_time_ms(driver, utm):
"""
Updates the update time of the driver
:param driver: driver
:param utm: new update time
:type driver: DriverProfile
:return: updated driver profile
"""
return driver.update_update_time_ms(utm)
def test_update_update_time_ms():
"""
Tests the update_update_time_ms method
:return: Tests if cases pass
"""
default_driver = DriverProfile("Default", 8, 2, 2, 0, 30, 3, 1)
assert default_driver.get_update_time_ms() == 1
update_update_time_ms(default_driver, 4)
assert default_driver.get_update_time_ms() == 4
def update_driver_profile_name(driver, name):
"""
Updates the name of the driver
:param driver: driver
:param name: new name
:type driver: DriverProfile
:return: updated driver profile
"""
return driver.update_driver_profile_name(name)
def test_update_driver_profile_name():
"""
Tests the update_driver_profile_name method
:return: Tests if cases pass
"""
default_driver = DriverProfile("Default", 8, 2, 2, 0, 30, 3, 1)
assert default_driver.get_driver_profile_name() == "Default"
update_driver_profile_name(default_driver, "updatedname")
assert default_driver.get_driver_profile_name() == "updatedname" | StarcoderdataPython |
3521189 | from pygame import mixer
import json
class AudioLoader:
def __init__(self, folder_location: str):
"""
Loads in audio
:param folder_location: Folder to load in
"""
self.audio_by_id = {}
self.audio = []
with open(f"{folder_location}/data.json", "r") as f:
data = json.load(f)
for sound in data["moves"]:
working_sound = mixer.Sound(f"{folder_location}/{sound['file']}")
self.audio_by_id[f"{sound['id']}"] = working_sound
self.audio.append(working_sound)
| StarcoderdataPython |
3559184 | import logging
import os
import subprocess
import sys
def cpuStats():
import psutil
print(sys.version)
print(psutil.cpu_percent())
print(psutil.virtual_memory()) # physical memory usage
pid = os.getpid()
py = psutil.Process(pid)
memoryUse = py.memory_info()[0] / 2.**30 # memory use in GB...I think
print('memory GB:', memoryUse)
def get_gpu_memory_map():
"""Get the current gpu usage.
Returns
-------
usage: dict
Keys are device ids as integers.
Values are memory usage as integers in MB.
"""
result = subprocess.check_output(
['nvidia-smi', '--query-gpu=memory.used', '--format=csv,nounits,noheader'],
encoding='utf-8'
)
# Convert lines into a dictionary
gpu_memory = [int(x) for x in result.strip().split('\n')]
gpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory))
return gpu_memory_map
def setup_logger(name, log_file, level=logging.INFO):
"""To setup as many loggers as you want"""
handler = logging.FileHandler(log_file)
logger = logging.getLogger(name)
logger.setLevel(level)
logger.addHandler(handler)
return logger
| StarcoderdataPython |
6582352 | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""linebot.models.emojis module."""
from __future__ import unicode_literals
from abc import ABCMeta
from future.utils import with_metaclass
from .base import Base
class Background(with_metaclass(ABCMeta, Base)):
"""Background."""
def __init__(self, **kwargs):
"""__init__ method.
:param kwargs:
"""
super(Background, self).__init__(**kwargs)
self.type = None
class LinearGradientBackground(Background):
"""LinearGradientBackground."""
def __init__(self, angle, start_color, end_color,
center_color=None, center_position=None, **kwargs):
"""__init__ method.
:param str type: The type of background used
:param str angle: The angle at which a linear gradient moves
:param str start_color: The color at the gradient's starting point
:param str end_color: The color at the gradient's ending point
:param str center_color: The color in the middle of the gradient
:param str center_position: The position of the intermediate color stop
:param kwargs:
"""
super(LinearGradientBackground, self).__init__(**kwargs)
self.type = 'linearGradient'
self.angle = angle
self.start_color = start_color
self.end_color = end_color
self.center_color = center_color
self.center_position = center_position
| StarcoderdataPython |
5002674 | <reponame>MyrtoLimnios/covid19-biblio<filename>scripts/generate_readme/generate_readme.py
# -*- coding: UTF-8
import pandas as pd
# import sys
# sys.path.append('../utils/')
GG_SPREADSHEET = "https://docs.google.com/spreadsheets/d/1WWIOWnuJuOKKNQA71qgxs7IVHtYL7ROKm7m7LwGY3gU"
GG_SPREADSHEET_NAME = GG_SPREADSHEET + "/export?format=csv&id=KEY&gid=0"
GG_SPREADSHEET_COLUMNS = '/edit#gid=1061863733'
GG_SPREADSHEET_GLOSSARY = '/edit#gid=693948220'
KIBANA = "http://covidreview.org/kibana"
def load_gsheet(path):
return pd.read_csv(path).fillna('null')
def add_h2_title(fd, text):
print(text.encode('utf-8'))
fd.write(u' '.join(('##', text, '\n')))
def add_h1_title(fd, text):
print(text.encode('utf-8'))
fd.write(u' '.join(('#', text, '\n')))
def add_general_information(fd):
fd.write(u''.join(('###', ' General information', '\n')))
def add_technical_information(fd):
fd.write(u''.join(('###', ' Technical information', '\n')))
def add_model_num(fd, num):
text_num = "" + "First"*(num==1) + "Second"*(num==2) + "Third"*(num==3) + "Fourth"*(num==4)
fd.write(u''.join(('###', ' '+text_num+' model', '\n')))
def add_model_information(fd):
fd.write(u''.join(('####', ' Model information', '\n')))
def add_estimation_information(fd):
fd.write(u''.join(('####', ' Estimation information', '\n')))
def add_parameters_information(fd):
fd.write(u''.join(('####', ' Model parameters information', '\n')))
def add_additional_information(fd):
fd.write(u''.join(('####', ' Additional information', '\n')))
def add_authors(fd, text):
#print(text.encode('utf-8'))
fd.write(u''.join(('**Authors** : ', text.encode('utf-8').decode('utf-8'), '</br>', '\n')))
def add_publication_date(fd, text):
#print(text.encode('utf-8'))
fd.write(u''.join(('**Publication date** : ', text.encode('utf-8').decode('utf-8'), '</br>', '\n')))
def add_paper_link(fd, text):
#print(text.encode('utf-8'))
fd.write(u''.join(('**Paper** : Available [here](', text.encode('utf-8').decode('utf-8'), ')</br>', '\n')))
def add_code_available(fd, text):
#print(text.encode('utf-8'))
fd.write(u''.join(('**Code available** : ', text.encode('utf-8').decode('utf-8'), '</br>', '\n')))
def add_stochastic_deterministic(fd, text):
#print(text.encode('utf-8'))
fd.write(u''.join(('**Deterministic or stochastic model** : ', text.encode('utf-8').decode('utf-8'), '</br>', '\n')))
def add_category_of_model(fd, text):
#print(text.encode('utf-8'))
fd.write(u''.join(('**Model category** : ', text.encode('utf-8').decode('utf-8'), '</br>', '\n')))
def add_sub_category_of_model(fd, text):
form = '''<details><summary> <b>Model sub-category</b> </summary>''' + text.encode('utf-8').decode('utf-8') + '''</details>'''
fd.write(u''.join((form, '\n', '\n')))
def add_data_used_for_the_model(fd, text):
form = '''<details><summary> <b>Data used for the model</b> </summary>''' + text.encode('utf-8').decode('utf-8') + '''</details>'''
fd.write(u''.join((form, '\n', '\n')))
def add_global_approach(fd, text):
form = '''<details><summary> <b>Global approach</b> </summary>''' + text.encode(
'utf-8').decode('utf-8') + '''</details>'''
fd.write(u''.join((form, '\n', '\n')))
def add_details_approach(fd, text):
form = '''<details><summary> <b>Details of approach</b> </summary>''' + text.encode(
'utf-8').decode('utf-8') + '''</details>'''
fd.write(u''.join((form, '\n', '\n')))
def add_outputs(fd, text):
form = '''<details><summary> <b>Outputs</b> </summary>''' + text.encode(
'utf-8').decode('utf-8') + '''</details>'''
fd.write(u''.join((form, '\n', '\n')))
def add_intervention_strategies(fd, text):
form = '''<details><summary> <b>How intervention strategies are modelled</b> </summary>''' + text.encode(
'utf-8').decode('utf-8') + '''</details>'''
fd.write(u''.join((form, '\n', '\n')))
def add_problem_formulation(fd, text):
form = '''<details><summary> <b>Problem Formulation</b> </summary>''' + text.encode(
'utf-8').decode('utf-8') + '''</details>'''
fd.write(u''.join((form, '\n', '\n')))
def add_solving_method(fd, text):
form = '''<details><summary> <b>Solving Method</b> </summary>''' + text.encode(
'utf-8').decode('utf-8') + '''</details>'''
fd.write(u''.join((form, '\n', '\n')))
def add_epidemiological_parameters(fd, text):
form = '''<details><summary> <b>Epidemiological parameters</b> </summary>''' + text.encode(
'utf-8').decode('utf-8') + '''</details>'''
fd.write(u''.join((form, '\n', '\n')))
def add_other_parameters(fd, text):
form = '''<details><summary> <b>Other parameters</b> </summary>''' + text.encode(
'utf-8').decode('utf-8') + '''</details>'''
fd.write(u''.join((form, '\n', '\n')))
def add_input_estimation(fd, text):
form = '''<details><summary> <b>How parameters are estimated</b> </summary>''' + text.encode(
'utf-8').decode('utf-8') + '''</details>'''
fd.write(u''.join((form, '\n', '\n')))
def add_details_input_estimation(fd, text):
form = '''<details><summary> <b>Details on parameters estimation</b> </summary>''' + text.encode(
'utf-8').decode('utf-8') + '''</details>'''
fd.write(u''.join((form, '\n', '\n')))
def add_additional_assumptions(fd, text):
form = '''<details><summary> <b>Additional Assumptions</b> </summary>''' + text.encode(
'utf-8').decode('utf-8') + '''</details>'''
fd.write(u''.join((form, '\n', '\n')))
def add_comments(fd, text):
form = '''<details><summary> <b>Comment/issues</b> </summary>''' + text.encode(
'utf-8').decode('utf-8') + '''</details>'''
fd.write(u''.join((form, '\n', '\n')))
def add_space(fd):
fd.write(u'</br>\n\n')
def get_href(title):
return '#' + title.lower().replace('(', '').replace(')', '').replace(':', '').replace(',', '').replace("'", '').replace(' ', '-')
def get_author(authors):
authors_list = authors.split(',')[0]
if len(authors_list) > 1:
return authors.split(',')[0] + ' et al.'
else:
return authors.split(',')[0]
def add_table(fd, titles, authors):
fd.write(u''.join(('| Title | Authors | Description |', '\n')))
fd.write(u'| --- | --- | --- |\n')
for i in range(len(titles)):
fd.write(u''.join(('| ' + titles[i] + ' | ' + get_author(authors[i]) + ' | [here](' + get_href(titles[i]) + ') |', '\n')))
if __name__ == '__main__':
# Load data
df = load_gsheet(GG_SPREADSHEET_NAME)
# Open target file
myfile = open("../../README.md", "w")
add_h1_title(myfile, 'Repository of a selection of papers related to COVID-19 outbreak operated by Centre Borelli (ENS Paris-Saclay, CNRS, Université de Paris, SSA)')
myfile.write(u'The repository prioritizes papers presenting mathematical models with practical impact, use of empirical data, strategy of containment policy, open and reproducible implementation of the model.\n\n')
myfile.write(u'The repository compiles the key elements of each paper such as: type of model, main assumptions, input parameters, output of the model, open source implementation, etc. The complete table can be found under three different formats:\n\n')
myfile.write(u"* Interactive dashboard-like table under [Kibana](https://covidreview.org/kibana)\n" +
"* A [spreadsheet](" + GG_SPREADSHEET + ") --> Comments are allowed \n" +
"* List with clickable entries below.\n\n")
add_h1_title(myfile, 'Additional information')
myfile.write(u'List of characteristics is provided for each paper : see [characteristics](' + GG_SPREADSHEET + GG_SPREADSHEET_COLUMNS + ') description\n\n')
myfile.write(u'A [glossary](' + GG_SPREADSHEET + GG_SPREADSHEET_GLOSSARY + ') of technical terms is available. \n')
add_h1_title(myfile, 'Provided by Centre Borelli (ENS Paris-Saclay, CNRS, Université de Paris, SSA)')
myfile.write(u'Authors: <NAME>, <NAME>, <NAME>, <NAME>\n\n')
myfile.write(u'Contributors: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>\n\n')
myfile.write(u'Credits for technical support: <NAME>, <NAME>, <NAME>.\n\n')
add_h1_title(myfile, 'Contribution')
myfile.write(u'If you wish to suggest an article to be added to the review, please contact us via email at <A href="mailto:<EMAIL>"><EMAIL></A> and we will proceed with the new entry after an internal assessment.\n')
add_h1_title(myfile, 'Contact us')
myfile.write(u'Email: <A href="mailto:<EMAIL>"><EMAIL></A>\n')
add_h1_title(myfile, 'Terms of Use')
myfile.write(u'This GitHub repository and its contents herein, copyright 2020 ENS Paris-Scalay, all rights reserved, is provided to the public strictly for educational and academic research purposes. The Website relies upon publicly available data from multiple sources, that do not always agree. The ENS Paris-Saclay hereby disclaims any and all representations and warranties with respect to the Website, including accuracy, fitness for use, and merchantability. Reliance on the Website for medical guidance or use of the Website in commerce is strictly prohibited.\n')
models_dic = {}
for unique_title in df["Paper(s)"].unique():
models_dic[unique_title] = sorted(df[(df["Paper(s)"]==unique_title)].index)
unique_index = [v[0] for k,v in models_dic.items()]
df_unique = df.loc[unique_index].reset_index()
add_h1_title(myfile, 'The review (%d articles in total)' %(df_unique.shape[0]))
add_table(myfile, df_unique['Paper(s)'], df_unique['Authors'])
for index, row in df.iterrows():
is_alone = (len(models_dic[row["Paper(s)"]])==1)
is_first_of_serie = not is_alone and (index==models_dic[row["Paper(s)"]][0])
is_last_of_serie = not is_alone and (index==models_dic[row["Paper(s)"]][-1])
if is_alone or is_first_of_serie:
if row['Paper(s)'].encode('utf-8') != b'null':
add_h2_title(myfile, row['Paper(s)'])
# General info
add_general_information(myfile)
add_authors(myfile, row['Authors'])
add_publication_date(myfile, row['Date of publication'])
add_paper_link(myfile, row['Link/source'])
if is_first_of_serie:
model_num = 1
add_model_num(myfile, model_num)
if not is_alone and not is_first_of_serie:
model_num+=1
add_model_num(myfile, model_num)
add_code_available(myfile, row['Code available'])
# technical information
add_technical_information(myfile)
# Model information
add_model_information(myfile)
if row['Stochastic vs. Deterministic'] != 'null':
add_stochastic_deterministic(myfile, row['Stochastic vs. Deterministic'])
if row['Category of model'] != 'null':
add_category_of_model(myfile, row['Category of model'])
if row['Subcategory of model'] != 'null':
add_sub_category_of_model(myfile, row['Subcategory of model'])
if row['Data used for the model (e.g. historical or simulated)'] != 'null':
add_data_used_for_the_model(myfile, row['Data used for the model (e.g. historical or simulated)'])
if row['Global approach'] != 'null':
add_global_approach(myfile, row['Global approach'])
if row['Details of approach'] != 'null':
add_details_approach(myfile, row['Details of approach'])
if row['Outputs'] != 'null':
add_outputs(myfile, row['Outputs'])
if row['How intervention strategies are modelled'] != 'null':
add_intervention_strategies(myfile, row['How intervention strategies are modelled'])
if row['Additional Assumptions'] != 'null':
add_additional_assumptions(myfile, row['Additional Assumptions'])
if (row["Problem Formulation (eg numerical scheme, objective function, etc.)"] != 'null' and row["Problem Formulation (eg numerical scheme, objective function, etc.)"] != 'not explained' ):
add_problem_formulation(myfile, row["Problem Formulation (eg numerical scheme, objective function, etc.)"])
if (row["Solving Method"] != 'null' and row["Solving Method"] != 'not explained'):
add_solving_method(myfile, row["Solving Method"])
# Estimation
add_parameters_information(myfile)
if row['Epidemiological parameters (eg inherent of the virus: infection, recovery, death rates)'] != 'null':
add_epidemiological_parameters(myfile, row['Epidemiological parameters (eg inherent of the virus: infection, recovery, death rates)'])
if row['Other parameters'] != 'null':
add_other_parameters(myfile, row['Other parameters'])
if row['How input parameters are estimated (data-driven or from literature)'] != 'null':
add_input_estimation(myfile, row['How input parameters are estimated (data-driven or from literature)'])
if row['Details on parameters estimation'] != 'null':
add_details_input_estimation(myfile, row['Details on parameters estimation'])
# Additional
if row['Comment/issues'] != 'null':
add_additional_information(myfile)
if row['Comment/issues'] != 'null':
add_comments(myfile, row['Comment/issues'])
if is_alone or is_last_of_serie:
add_space(myfile)
myfile.close()
| StarcoderdataPython |
4920131 | import os
import jinja2
import webapp2
import json
import logging
import re
import includes
from google.appengine.ext import ndb
from google.appengine.api import urlfetch
urlfetch.set_default_fetch_deadline(60)
import Blocktrail_com
import Blockchain_info
import Insight
def validAddress(address):
valid = False
if re.match("^[13][a-km-zA-HJ-NP-Z0-9]{26,33}$", address):
valid = True
return valid
def validAddresses(addresses):
valid = False
for address in addresses.split("|"):
if validAddress(address):
valid = True
else:
valid = False
break
return valid
def validTxid(txid):
valid = False
try:
int(txid, 16)
valid = True
except ValueError:
valid = False
if len(txid) != 64:
valid = False
return valid
def logProviderFailures(i):
if i == 1:
logging.info('Primary provider failed')
elif i == 2:
logging.warning('Primary and Secondary provider failed')
elif i == 3:
logging.warning('Primary and Secondary and Tertiary provider failed')
elif i > 3:
logging.error('All providers failed')
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'])
#Default URLs for Bitcoin Spellbook modules
BLOCKCHAINDATA_URL = "https://blockchaindata.appspot.com"
SIMPLIFIEDINPUTSLIST_URL = "https://simplifiedinputslist.appspot.com"
BLOCKLINKER_URL = "https://blocklinker.appspot.com"
PROPORTIONALRANDOM_URL = "https://proportionalrandom.appspot.com"
BITVOTER_URL = "https://bitvoter.appspot.com"
HDFORWARDER_URL = "https://hdforwarder.appspot.com"
DISTRIBUTEBTC_URL = "https://distributebtc.appspot.com"
BITCOINWELLS_URL = "https://bitcoinwells.appspot.com"
def Spellbook(parameters):
spellbook_urls = []
spellbook_urls.append({'url': parameters.blockchaindataURL, 'name': 'Blockchaindata'})
spellbook_urls.append({'url': parameters.simplifiedInputsListURL, 'name': 'Simplified Inputs List'})
spellbook_urls.append({'url': parameters.blocklinkerURL, 'name': 'Blocklinker'})
spellbook_urls.append({'url': parameters.bitvoterURL, 'name': 'Bitvoter'})
spellbook_urls.append({'url': parameters.proportionalRandomURL, 'name': 'Proportional Random'})
spellbook_urls.append({'url': parameters.hdforwarderURL, 'name': 'HDForwarder'})
spellbook_urls.append({'url': parameters.distributeBTCURL, 'name': 'DistributeBTC'})
spellbook_urls.append({'url': parameters.bitcoinWellsURL, 'name': 'BitcoinWells'})
return spellbook_urls
class Parameters(ndb.Model):
#Model for parameters
trackingID = ndb.StringProperty(indexed=False, default="")
blockchaindataURL = ndb.StringProperty(indexed=False, default=BLOCKCHAINDATA_URL)
simplifiedInputsListURL = ndb.StringProperty(indexed=False, default=SIMPLIFIEDINPUTSLIST_URL)
blocklinkerURL = ndb.StringProperty(indexed=False, default=BLOCKLINKER_URL)
proportionalRandomURL = ndb.StringProperty(indexed=False, default=PROPORTIONALRANDOM_URL)
bitvoterURL = ndb.StringProperty(indexed=False, default=BITVOTER_URL)
hdforwarderURL = ndb.StringProperty(indexed=False, default=HDFORWARDER_URL)
distributeBTCURL = ndb.StringProperty(indexed=False, default=DISTRIBUTEBTC_URL)
bitcoinWellsURL = ndb.StringProperty(indexed=False, default=BITCOINWELLS_URL)
class Providers(ndb.Model):
#Model for 3rd party data providers parameters
blocktrail_key = ndb.StringProperty(indexed=True, default="a8a84ed2929da8313d75d16e04be2a26c4cc4ea4")
insight_url = ndb.StringProperty(indexed=True, default="https://blockexplorer.com/api/")
provider = Providers.get_or_insert('DefaultConfig')
blockchain = Blockchain_info.API()
blocktrail = Blocktrail_com.API(provider.blocktrail_key)
insight = Insight.API(provider.insight_url)
#list of 3rd party data providers in order of preference
providerNames = ['Blocktrail.com', 'Blockchain.info', 'Insight']
providerApis = [blocktrail, blockchain, insight]
class UTXOs(webapp2.RequestHandler):
def get(self):
response = {'success': 0}
self.addresses = ''
if self.request.get('addresses') != '':
self.addresses = self.request.get('addresses')
if validAddresses(self.addresses):
for i in range(0, len(providerApis)):
data = providerApis[i].getUTXOs(self.addresses)
if 'success' in data and data['success'] == 1:
response = data
response['provider'] = providerNames[i]
break
logProviderFailures(i)
if response['success'] == 0:
response['error'] = 'All data providers failed'
else:
logging.error('Invalid address found')
response['error'] = 'Invalid address found'
self.response.write(json.dumps(response))
class Balances(webapp2.RequestHandler):
def get(self):
response = {'success': 0}
self.addresses = ''
if self.request.get('addresses') != '':
self.addresses = self.request.get('addresses')
if validAddresses(self.addresses):
for i in range(0, len(providerApis)):
data = providerApis[i].getBalance(self.addresses)
if 'success' in data and data['success'] == 1:
response = data
response['provider'] = providerNames[i]
break
logProviderFailures(i)
if response['success'] == 0:
response['error'] = 'All data providers failed'
else:
logging.error('Invalid address found')
response['error'] = 'Invalid address found'
self.response.write(json.dumps(response))
class Block(webapp2.RequestHandler):
def get(self):
response = {'success': 0}
self.blockHeight = 0
validBlock = False
if self.request.get('block') != '':
try:
self.blockHeight = int(self.request.get('block'))
if self.blockHeight > 0:
validBlock = True
else:
logging.error('block must be a positive integer')
response['error'] = 'block must be a positive integer'
except ValueError:
logging.error('block must be a positive integer')
response['error'] = 'block must be a positive integer'
if validBlock:
for i in range(0, len(providerApis)):
data = providerApis[i].getBlock(self.blockHeight)
if 'success' in data and data['success'] == 1:
response = data
response['provider'] = providerNames[i]
break
logProviderFailures(i)
if response['success'] == 0:
response['error'] = 'All data providers failed'
self.response.write(json.dumps(response))
class LatestBlock(webapp2.RequestHandler):
def get(self):
response = {'success': 0}
for i in range(0, len(providerApis)):
data = providerApis[i].getLatestBlock()
if 'success' in data and data['success'] == 1:
response = data
response['provider'] = providerNames[i]
break
logProviderFailures(i)
if response['success'] == 0:
response['error'] = 'All data providers failed'
self.response.write(json.dumps(response))
class PrimeInputAddress(webapp2.RequestHandler):
def get(self):
response = {'success': 0}
self.txid = ''
if self.request.get('txid') != '':
self.txid = self.request.get('txid')
if validTxid(self.txid):
for i in range(0, len(providerApis)):
data = providerApis[i].getPrimeInputAddress(self.txid)
if 'success' in data and data['success'] == 1:
response = data
response['provider'] = providerNames[i]
break
logProviderFailures(i)
if response['success'] == 0:
response['error'] = 'All data providers failed'
else:
logging.error("Invalid txid: "+ self.txid)
response['error'] = "Invalid txid: "+ self.txid
self.response.write(json.dumps(response))
class Transactions(webapp2.RequestHandler):
def get(self):
response = {'success': 0}
self.address = ''
if self.request.get('address') != '':
self.address = self.request.get('address')
if validAddress(self.address):
for i in range(0, len(providerApis)):
data = providerApis[i].getTXS(self.address)
if 'success' in data and data['success'] == 1:
response['success'] = 1
response['TXS'] = self.TXS2JSON(data['TXS'], self.address)
response['provider'] = providerNames[i]
break
logProviderFailures(i)
if response['success'] == 0:
response['error'] = 'All data providers failed'
else:
logging.error('Invalid address: ' + self.address)
response['error'] = 'Invalid address: ' + self.address
self.response.write(json.dumps(response))
def TXS2JSON(self, TXS, address):
jsonObj = []
for i in range(0, len(TXS)):
tx = TXS[i]
jsonObj.append(tx.toDict(address))
return jsonObj
class mainPage(webapp2.RequestHandler):
def get(self):
parameters = Parameters.get_or_insert('DefaultConfig')
template_values = {
'Title': 'Blockchaindata',
'cssHTML': includes.get_CssHTML(),
'metaHTML': includes.get_MetaHTML(),
'scriptsHTML': includes.get_ScriptsHTML(),
'navigationHTML': includes.get_NavigationHTML(Spellbook(parameters)),
'logoHTML': includes.get_LogoHTML(),
'footerHTML': includes.get_FooterHTML(),
'googleAnalyticsHTML': includes.get_GoogleAnalyticsHTML(parameters.trackingID),
}
template = JINJA_ENVIRONMENT.get_template('index.html')
self.response.write(template.render(template_values))
class Documentation(webapp2.RequestHandler):
def get(self):
parameters = Parameters.get_or_insert('DefaultConfig')
template_values = {
'Title': 'Blockchaindata documentation',
'cssHTML': includes.get_CssHTML(),
'metaHTML': includes.get_MetaHTML(),
'scriptsHTML': includes.get_ScriptsHTML(),
'navigationHTML': includes.get_NavigationHTML(Spellbook(parameters)),
'logoHTML': includes.get_LogoHTML(),
'footerHTML': includes.get_FooterHTML(),
'googleAnalyticsHTML': includes.get_GoogleAnalyticsHTML(parameters.trackingID),
}
template = JINJA_ENVIRONMENT.get_template('documentation.html')
self.response.write(template.render(template_values))
app = webapp2.WSGIApplication([
('/', mainPage),
('/documentation', Documentation),
('/transactions', Transactions),
('/utxos', UTXOs),
('/primeInputAddress', PrimeInputAddress),
('/latestBlock', LatestBlock),
('/block', Block),
('/balances', Balances),
], debug=True)
| StarcoderdataPython |
1818950 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class C4JtumblrPipeline(object):
def process_item(self, item, spider):
# replace image src to downloaded image path
for image in item['images']:
print('replace ' + image['url'] + ' to /images/' + image['path'])
item['post_content'] = item['post_content'].replace(image['url'], '/images/' + image['path'])
return item | StarcoderdataPython |
82680 | <reponame>ChristopherBradley/rad-classify
from os.path import join, dirname
# Default stuff
src_folder = dirname(__file__)
root_folder = dirname(src_folder)
data_folder = join(root_folder, 'data')
# WOS stuff
wos_folder = join(data_folder, "WebOfScience")
WOS5736_X = join(wos_folder, "WOS5736", "X.txt")
WOS5736_Y = join(wos_folder, "WOS5736", "Y.txt")
WOS11967_X = join(wos_folder, "WOS11967", "X.txt")
WOS11967_Y = join(wos_folder, "WOS11967", "Y.txt")
WOS46985_X = join(wos_folder, "WOS46985", "X.txt")
WOS46985_Y = join(wos_folder, "WOS46985", "Y.txt")
# Fasttext stuff
fasttext_train = join(data_folder, 'fasttext_train.csv')
pretrained_vectors = join(data_folder, "wiki-news-300d-1M.vec")
# Preprocessing
pickle_keywords = join(data_folder, 'keywords.pickle')
# Results
results_file = join(data_folder, "results.csv")
test_classifications = join(data_folder, "test_classifications.csv") | StarcoderdataPython |
4974017 | <reponame>wpfff/labcore
from typing import Any, Callable, Union
import inspect
def same_type(*args: Any, target_type: type = None) -> bool:
"""Check whether all elements of a sequence have the same type.
:param seq: Sequence to inspect
:param target_type: if not `None`, check if all elements are of that type.
if `None`, only check if all elements are equal (of any type).
:return: `True` if all equal, `False` otherwise.
"""
if len(args) == 0:
raise ValueError('nothing to compare. supply at least one argument.')
for elt in args:
if target_type is None:
target_type = type(elt)
if type(elt) is not target_type:
return False
return True
# FIXME: 'None' should never override a default!
def map_input_to_signature(func: Union[Callable, inspect.Signature],
*args: Any, **kwargs: Any):
"""Try to re-organize the positional arguments `args` and key word
arguments `kwargs` such that `func` can be called with them.
if `func` expects arguments that cannot be given, they will be given
as ``None``.
Surplus arguments are ignored if `func` does not accept variable positional
and/or keyword arguments.
Example::
>>> def myfunc(x, y, z=1):
... print(f"x={x}, y={y}, z={z}")
...
... args, kwargs = map_input_to_signature(myfunc, z=1, x=1, unused=4)
... myfunc(*args, **kwargs)
x=1, y=None, z=1
It is important to note that the position of positional arguments is not
preserved, because input key words that match expected positional arguments
are inserted as positional arguments at the right position. The order,
however, is preserved. Example::
>>> def myfunc(x, y, z):
... print(f"x={x}, y={y}, z={z}")
...
... args, kwargs = map_input_to_signature(myfunc, 1, 2, x=5)
... myfunc(*args, **kwargs)
x=5, y=1, z=2
"""
args = list(args)
func_args = []
func_kwargs = {}
if isinstance(func, inspect.Signature):
sig = func
else:
sig = inspect.signature(func)
# Logic:
# for each param the function expects, we need to check if have
# received a fitting one
for idx, p in enumerate(sig.parameters):
p_ = sig.parameters[p]
# we treat anything that can be given positionally as positional.
# first prio to keyword-given values, second to positionally given,
# finally default if given in signature.
if p_.kind in [inspect.Parameter.POSITIONAL_OR_KEYWORD,
inspect.Parameter.POSITIONAL_ONLY]:
if p in kwargs:
func_args.insert(idx, kwargs.pop(p))
else:
if len(args) > 0:
func_args.insert(idx, args.pop(0))
elif p_.default is inspect.Parameter.empty:
func_args.insert(idx, None)
else:
func_args.insert(idx, p_.default)
elif p_.kind is inspect.Parameter.KEYWORD_ONLY:
if p in kwargs:
func_kwargs[p] = kwargs.pop(p)
elif p_.kind is inspect.Parameter.VAR_POSITIONAL:
for a in args:
func_args.append(a)
elif p_.kind is inspect.Parameter.VAR_KEYWORD:
func_kwargs.update(kwargs)
return func_args, func_kwargs
def indent_text(text: str, level: int = 0) -> str:
"""Indent each line of ``text`` by ``level`` spaces."""
return "\n".join([" " * level + line for line in text.split('\n')])
| StarcoderdataPython |
6650962 | # ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.2'
# jupytext_version: 1.2.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # Two Asset HANK Model [<cite data-cite="6202365/ECL3ZAR7"></cite>](https://cepr.org/active/publications/discussion_papers/dp.php?dpno=13071)
#
# [](https://mybinder.org/v2/gh/econ-ark/HARK/BayerLuetticke/notebooks?filepath=HARK%2FBayerLuetticke%2FTwoAsset.ipynb)
#
# - Adapted from original slides by <NAME> and <NAME> (Henceforth, 'BL')
# - Jupyter notebook originally by <NAME>
# - Further edits by <NAME>, <NAME>, <NAME>
# %% [markdown]
# ### Overview
#
# BL propose a method for solving Heterogeneous Agent DSGE models that uses fast tools originally employed for image and video compression to speed up a variant of the solution methods proposed by <NAME>. <cite data-cite="undefined"></cite>
#
# The Bayer-Luetticke method has the following broad features:
# * The model is formulated and solved in discrete time (in contrast with some other recent approaches <cite data-cite="6202365/WN76AW6Q"></cite>)
# * Solution begins by calculation of the steady-state equilibrium (StE) with no aggregate shocks
# * Both the representation of the consumer's problem and the desciption of the distribution are subjected to a form of "dimensionality reduction"
# * This means finding a way to represent them efficiently using fewer points
# * "Dimensionality reduction" of the consumer's decision problem is performed before any further analysis is done
# * This involves finding a representation of the policy functions using some class of basis functions
# * Dimensionality reduction of the joint distribution is accomplished using a "copula"
# * See the companion notebook for description of the copula
# * The method approximates the business-cycle-induced _deviations_ of the individual policy functions from those that characterize the riskless StE
# * This is done using the same basis functions originally optimized to match the StE individual policy function
# * The method of capturing dynamic deviations from a reference frame is akin to video compression
# %% [markdown]
# ### Setup
#
# #### The Recursive Dynamic Planning Problem
#
# BL describe their problem a generic way; here, we will illustrate the meaning of their derivations and notation using the familiar example of the Krusell-Smith model, henceforth KS. <cite data-cite="6202365/VPUXICUR"></cite>
#
# Consider a household problem in presence of aggregate and idiosyncratic risk
# * $S_t$ is an (exogenous) aggregate state (e.g., levels of productivity and unemployment)
# * $s_{it}$ is a partly endogenous idiosyncratic state (e.g., wealth)
# * $\mu_t$ is the distribution over $s$ at date $t$ (e.g., the wealth distribution)
# * $P_{t}$ is the pricing kernel
# * It captures the info about the aggregate state that the consumer needs to know in order to behave optimally
# * e.g., KS showed that for their problem, a good _approximation_ to $P_{t}$ could be constructed using only $S_{t}$ and the aggregate capital stock $K_{t}$
# * $\Gamma$ defines the budget set
# * This delimits the set of feasible choices $x$ that the agent can make
#
# The Bellman equation is:
#
# \begin{equation}
# v(s_{it},S_t,\mu_t) = \max\limits_{x \in \Gamma(s_{it},P_t)} u(s_{it},x) + \beta \mathbb{E}_{t} v(s_{it+1}(x,s_{it}),S_{t+1},\mu_{t+1})
# \end{equation}
#
# which, for many types of problems, implies an Euler equation: <!-- Question: Why isn't R a t+1 dated variable (and inside the expectations operator? -->
# \begin{equation}
# u^{\prime}\left(x(s_{it},S_t,\mu_t)\right) = \beta R(S_t,\mu_t) \mathbb{E}_{t} u^{\prime}\left(x(s_{it+1},S_{t+1},\mu_{t+1})\right)
# \end{equation}
#
# %% [markdown]
# #### Solving for the StE
#
# The steady-state equilibrium is the one that will come about if there are no aggregate risks (and consumers know this)
#
# The first step is to solve for the steady-state:
# * Discretize the state space
# * Representing the nodes of the discretization in a set of vectors
# * Such vectors will be represented by an overbar
# * e.g. $\bar{m}$ is the nodes of cash-on-hand $m$
# * The optimal policy $\newcommand{\policy}{c}\newcommand{\Policy}{C}\policy(s_{it};P)$ induces flow utility $u_{\policy}$ whose discretization is a vector $\bar{u}_{\bar{\policy}}$
# * Idiosyncratic dynamics are captured by a transition probability matrix $\Pi_{\bar{\policy}}$
# * $\Pi$ is like an expectations operator
# * It depends on the vectorization of the policy function $\bar{\policy}$
# * $P$ is constant because in StE aggregate prices are constant
# * e.g., in the KS problem, $P$ would contain the (constant) wage and interest rates
# * In StE, the discretized Bellman equation implies
# \begin{equation}
# \bar{v} = \bar{u} + \beta \Pi_{\bar{\policy}}\bar{v}
# \end{equation}
# holds for the optimal policy
# * A linear interpolator is used to represent the value function
# * For the distribution, which (by the definition of steady state) is constant:
#
# \begin{eqnarray}
# \bar{\mu} & = & \bar{\mu} \Pi_{\bar{\policy}} \\
# d\bar{\mu} & = & d\bar{\mu} \Pi_{\bar{\policy}}
# \end{eqnarray}
# where we differentiate in the second line because we will be representing the distribution as a histogram, which counts the _extra_ population obtained by moving up <!-- Is this right? $\mu$ vs $d \mu$ is a bit confusing. The d is wrt the state, not time, right? -->
#
# We will define an approximate equilibrium in which:
# * $\bar{\policy}$ is the vector that defines a linear interpolating policy function $\policy$ at the state nodes
# * given $P$ and $v$
# * $v$ is a linear interpolation of $\bar{v}$
# * $\bar{v}$ is value at the discretized nodes
# * $\bar{v}$ and $d\bar{\mu}$ solve the approximated Bellman equation
# * subject to the steady-state constraint
# * Markets clear ($\exists$ joint requirement on $\bar{\policy}$, $\mu$, and $P$; denoted as $\Phi(\bar{\policy}, \mu, P) = 0$) <!-- Question: Why is this not $\bar{\mu}$ -->
#
# This can be solved by:
# 1. Given $P$,
# 1. Finding $d\bar{\mu}$ as the unit-eigenvalue of $\Pi_{\bar{\policy}}$
# 2. Using standard solution techniques to solve the micro decision problem
# * Like wage and interest rate
# 2. Using a root-finder to solve for $P$
# * This basically iterates the other two steps until it finds values where they are consistent
# %% [markdown]
# #### Introducing aggregate risk
#
# With aggregate risk
# * Prices $P$ and the distribution $\mu$ change over time
#
# Yet, for the household:
# * Only prices and continuation values matter
# * The distribution does not influence decisions directly
# %% [markdown]
# #### Redefining equilibrium (Reiter, 2002)
# A sequential equilibrium with recursive individual planning <cite data-cite="6202365/UKUXJHCN"></cite> is:
# * A sequence of discretized Bellman equations, such that
# \begin{equation}
# v_t = \bar{u}_{P_t} + \beta \Pi_{\policy_t} v_{t+1}
# \end{equation}
# holds for policy $\policy_t$ which optimizes with respect to $v_{t+1}$ and $P_t$
# * and a sequence of "histograms" (discretized distributions), such that
# \begin{equation}
# d\mu_{t+1} = d\mu_t \Pi_{\policy_t}
# \end{equation}
# holds given the policy $h_{t}$, that is optimal given $P_t$, $v_{t+1}$
# * Prices, distribution, and policies lead to market clearing
# %% {"code_folding": [0, 6, 17]}
from __future__ import print_function
# This is a jupytext paired notebook that autogenerates a corresponding .py file
# which can be executed from a terminal command line via "ipython [name].py"
# But a terminal does not permit inline figures, so we need to test jupyter vs terminal
# Google "how can I check if code is executed in the ipython notebook"
def in_ipynb():
try:
if str(type(get_ipython())) == "<class 'ipykernel.zmqshell.ZMQInteractiveShell'>":
return True
else:
return False
except NameError:
return False
# Determine whether to make the figures inline (for spyder or jupyter)
# vs whatever is the automatic setting that will apply if run from the terminal
if in_ipynb():
# %matplotlib inline generates a syntax error when run from the shell
# so do this instead
get_ipython().run_line_magic('matplotlib', 'inline')
else:
get_ipython().run_line_magic('matplotlib', 'auto')
# The tools for navigating the filesystem
import sys
import os
# Find pathname to this file:
my_file_path = os.path.dirname(os.path.abspath("TwoAsset.ipynb"))
# Relative directory for pickled code
code_dir = os.path.join(my_file_path, "../Assets/Two")
sys.path.insert(0, code_dir)
sys.path.insert(0, my_file_path)
# %% {"code_folding": [0]}
## Load Stationary equilibrium (StE) object EX3SS_20
import pickle
os.chdir(code_dir) # Go to the directory with pickled code
## EX3SS_20.p is the information in the stationary equilibrium (20: the number of illiquid and liquid weath grids )
EX3SS=pickle.load(open("EX3SS_20.p", "rb"))
## WangTao: Find the code that generates this
# %% [markdown]
# #### Compact notation
#
# It will be convenient to rewrite the problem using a compact notation proposed by Schmidt-Grohe and Uribe (2004)
#
# The equilibrium conditions can be represented as a non-linear difference equation
# * Controls: $Y_t = [v_t \ P_t \ Z_t^Y]$ and States: $X_t=[\mu_t \ S_t \ Z_t^X]$
# * where $Z_t$ are purely aggregate states/controls
# * Define <!-- Q: What is $\epsilon$ here? Why is it not encompassed in S_{t+1}? -->
# \begin{align}
# F(d\mu_t, S_t, d\mu_{t+1}, S_{t+1}, v_t, P_t, v_{t+1}, P_{t+1}, \epsilon_{t+1})
# &= \begin{bmatrix}
# d\mu_{t+1} - d\mu_t\Pi_{\policy_t} \\
# v_t - (\bar{u}_{\policy_t} + \beta \Pi_{\policy_t}v_{t+1}) \\
# S_{t+1} - \Policy(S_t,d\mu_t,\epsilon_{t+1}) \\
# \Phi(\policy_t,d\mu_t,P_t,S_t) \\
# \epsilon_{t+1}
# \end{bmatrix}
# \end{align}
# s.t. <!-- Q: Why are S_{t+1} and \epsilon_{t+1} not arguments of v_{t+1} below? -->
# \begin{equation}
# \policy_t(s_{t}) = \arg \max\limits_{x \in \Gamma(s,P_t)} u(s,x) + \beta \mathop{\mathbb{E}_{t}} v_{t+1}(s_{t+1})
# \end{equation}
# * The solution is a function-valued difference equation:
# \begin{equation}
# \mathop{\mathbb{E}_{t}}F(X_t,X_{t+1},Y_t,Y_{t+1},\epsilon_{t+1}) = 0
# \end{equation}
# where $\mathop{\mathbb{E}}$ is the expectation over aggregate states
# * It becomes real-valued when we replace the functions by their discretized counterparts
# * Standard techniques can solve the discretized version
# %% [markdown]
# #### So, is all solved?
# The dimensionality of the system F is a big problem
# * With high dimensional idiosyncratic states, discretized value functions and distributions become large objects
# * For example:
# * 4 income states $\times$ 100 illiquid capital states $\times$ 100 liquid capital states $\rightarrow$ $\geq$ 40,000 values in $F$
# %% [markdown]
# ### Bayer-Luetticke method
# #### Idea:
# 1. Use compression techniques as in video encoding
# * Apply a discrete cosine transformation (DCT) to all value/policy functions
# * DCT is used because it is the default in the video encoding literature
# * Choice of cosine is unimportant; linear basis functions might work just as well
# * Represent fluctuations as differences from this reference frame
# * Assume all coefficients of the DCT from the StE that are close to zero do not change when there is an aggregate shock (small things stay small)
#
# 2. Assume no changes in the rank correlation structure of $\mu$
# * Calculate the Copula, $\bar{C}$ of $\mu$ in the StE
# * Perturb only the marginal distributions
# * This assumes that the rank correlations remain the same
# * See the companion notebook for more discussion of this
# * Use fixed Copula to calculate an approximate joint distribution from marginals
#
#
# The approach follows the insight of KS in that it uses the fact that some moments of the distribution do not matter for aggregate dynamics
# %% [markdown]
# #### Details
# 1) Compression techniques from video encoding
# * Let $\bar{\Theta} = dct(\bar{v})$ be the coefficients obtained from the DCT of the value function in StE
# * Define an index set $\mathop{I}$ that contains the x percent largest (i.e. most important) elements from $\bar{\Theta}$
# * Let $\theta$ be a sparse vector with non-zero entries only for elements $i \in \mathop{I}$
# * Define
# \begin{equation}
# \tilde{\Theta}(\theta_t)=\left\{
# \begin{array}{@{}ll@{}}
# \bar{\Theta}(i)+\theta_t(i), & i \in \mathop{I} \\
# \bar{\Theta}(i), & \text{else}
# \end{array}\right.
# \end{equation}
# * This assumes that the basis functions with least contribution to representation of the function in levels, make no contribution at all to its changes over time
# %% [markdown]
# 2) Decoding
# * Now we reconstruct $\tilde{v}(\theta_t)=dct^{-1}(\tilde{\Theta}(\theta_{t}))$
# * idct=$dct^{-1}$ is the inverse dct that goes from the $\theta$ vector to the corresponding values
# * This means that in the StE the reduction step adds no addtional approximation error:
# * Remember that $\tilde{v}(0)=\bar{v}$ by construction
# * But it allows us to reduce the number of derivatives that need to be calculated from the outset.
# * We only calculate derivatives for those basis functions that make an important contribution to the representation of the function
#
# 3) The histogram is recovered as follows
# * $\mu_t$ is approximated as $\bar{C}(\bar{\mu_t}^1,...,\bar{\mu_t}^n)$, where $n$ is the dimensionality of the idiosyncratic states <!-- Question: Why is there no time subscript on $\bar{C}$? I thought the copula was allowed to vary over time ... --> <!-- Question: is $\mu_{t}$ linearly interpolated between gridpoints? ... -->
# * $\mu_t^{i}$ are the marginal distributions <!-- Question: These are cumulatives, right? They are not in the same units as $\mu$ -->
# * The StE distribution is obtained when $\mu = \bar{C}(\bar{\mu}^1,...,\bar{\mu}^n)$
# * Typically prices are only influenced through the marginal distributions
# * The approach ensures that changes in the mass of one state (say, wealth) are distributed in a sensible way across the other dimensions
# * Where "sensible" means "like in StE" <!-- Question: Right? -->
# * The implied distributions look "similar" to the StE one (different in (Reiter, 2009))
#
# 4) The large system above is now transformed into a much smaller system:
# \begin{align}
# F(\{d\mu_t^1,...,d\mu_t^n\}, S_t, \{d\mu_{t+1}^1,...,d\mu_{t+1}^n\}, S_{t+1}, \theta_t, P_t, \theta_{t+1}, P_{t+1})
# &= \begin{bmatrix}
# d\bar{C}(\bar{\mu}_t^1,...,\bar{\mu}_t^n) - d\bar{C}(\bar{\mu}_t^1,...,\bar{\mu}_t^n)\Pi_{\policy_t} \\
# dct\left[idct\left(\tilde{\Theta}(\theta_t) - (\bar{u}_{\policy_t} + \beta \Pi_{\policy_t}idct(\tilde{\Theta}(\theta_{t+1}))\right)\right] \\
# S_{t+1} - \Policy(S_t,d\mu_t) \\
# \Phi(\policy_t,d\mu_t,P_t,S_t) \\
# \end{bmatrix}
# \end{align}
#
# %% [markdown]
# ### The two-asset HANK model
#
# We illustrate the algorithm in a two-asset HANK model described as below
#
#
# #### Households
# - Maximizing discounted felicity
# - Consumption $c$
# - CRRA coefficent: $\xi$
# - EOS of CES consumption bundle: $\eta$
# - Disutility from work in GHH form:
# - Frisch elasticity $\gamma$
# - Two assets:
# - Liquid nominal bonds $b$, greater than lower bound $\underline b$
# - Borrowing constraint due to a wedge between borrowing and saving rate: $R^b(b<0)=R^B(b>0)+\bar R$
# - Illiquid assets capital $k$ nonnegative
# - Trading of illiquid assets is subject to a friction governed by $v$, the fraction of agents who can trade
# - If nontrading, receive dividend $r$ and depreciates by $\tau$
# - Idiosyncratic labor productivity $h$:
# - $h = 0$ for entreprener, only receive profits $\Pi$
# - $h = 1$ for labor, evolves according to an autoregressive process,
# - $\rho_h$ persistence parameter
# - $\epsilon^h$: idiosyncratic risk
#
# #### Production
# - Intermediate good producer
# - CRS production with TFP $Z$
# - Wage $W$
# - Cost of capital $r+\delta$
# - Reseller
# - Rotemberg price setting: quadratic adjustment cost scalled by $\frac{\eta}{2\kappa}$
# - Constant discount factor $\beta$
# - Investment subject to Tobin's q adjustment cost $\phi$
# - Aggregate risks $\Omega$ include
# - TFP $Z$, AR(1) process with persistence of $\rho^Z$ and shock $\epsilon^Z$
# - Uncertainty
# - Monetary policy
# - Central bank
# - Taylor rule on nominal saving rate $R^B$: reacts to deviation of inflation from target by $\theta_R$
# - $\rho_R$: policy innertia
# - $\epsilon^R$: monetary policy shocks
# - Government (fiscal rule)
# - Government spending $G$
# - Tax $T$
# - $\rho_G$: intensity of repaying government debt: $\rho_G=1$ implies roll-over
#
# #### Taking stock
#
# - Individual state variables: $\newcommand{\liquid}{m}\liquid$, $k$ and $h$, the joint distribution of individual states $\Theta$
# - Individual control variables: $c$, $n$, $\liquid'$, $k'$
# - Optimal policy for adjusters and nonadjusters are $c^*_a$, $n^*_a$ $k^*_a$ and $\liquid^*_a$ and $c^*_n$, $n^*_n$ and $\liquid^*_n$, respectively
#
# %%
import time
from FluctuationsTwoAsset import FluctuationsTwoAsset, SGU_solver, plot_IRF
start_time = time.perf_counter()
## Choose an aggregate shock to perturb(one of three shocks: MP, TFP, Uncertainty)
# EX3SS['par']['aggrshock'] = 'MP'
# EX3SS['par']['rhoS'] = 0.0 # Persistence of variance
# EX3SS['par']['sigmaS'] = 0.001 # STD of variance shocks
#EX3SS['par']['aggrshock'] = 'TFP'
#EX3SS['par']['rhoS'] = 0.95
#EX3SS['par']['sigmaS'] = 0.0075
EX3SS['par']['aggrshock'] = 'Uncertainty'
EX3SS['par']['rhoS'] = 0.84 # Persistence of variance
EX3SS['par']['sigmaS'] = 0.54 # STD of variance shocks
## Choose an accuracy of approximation with DCT
### Determines number of basis functions chosen -- enough to match this accuracy
### EX3SS is precomputed steady-state pulled in above
EX3SS['par']['accuracy'] = 0.99999
## Implement state reduction and DCT
### Do state reduction on steady state
EX3SR = FluctuationsTwoAsset(**EX3SS)
SR = EX3SR.StateReduc()
print('SGU_solver')
SGUresult = SGU_solver(SR['Xss'],SR['Yss'],SR['Gamma_state'],SR['indexMUdct'],SR['indexVKdct'],SR['par'],SR['mpar'],SR['grid'],SR['targets'],SR['Copula'],SR['P_H'],SR['aggrshock'])
print('plot_IRF')
plot_IRF(SR['mpar'],SR['par'],SGUresult['gx'],SGUresult['hx'],SR['joint_distr'],
SR['Gamma_state'],SR['grid'],SR['targets'],SR['Output'])
end_time = time.perf_counter()
print('Elapsed time is ', (end_time-start_time), ' seconds.')
# %%
| StarcoderdataPython |
82399 | # -*- coding: utf-8 -*-
# @File : session.py
# @Date : 2021/2/25
# @Desc :
from Lib.api import data_return
from Lib.configs import Session_MSG_ZH, CODE_MSG_ZH, RPC_SESSION_OPER_SHORT_REQ, CODE_MSG_EN, Session_MSG_EN
from Lib.log import logger
from Lib.method import Method
from Lib.notice import Notice
from Lib.rpcclient import RpcClient
from Lib.sessionlib import SessionLib
from Lib.xcache import Xcache
from Msgrpc.serializers import SessionLibSerializer
class Session(object):
"""session信息"""
@staticmethod
def list(sessionid=None):
if sessionid is None or sessionid <= 0:
context = data_return(304, {}, Session_MSG_ZH.get(304), Session_MSG_EN.get(304))
return context
session_interface = SessionLib(sessionid, rightinfo=True, uacinfo=True, pinfo=True)
result = SessionLibSerializer(session_interface).data
context = data_return(200, result, CODE_MSG_ZH.get(200), CODE_MSG_EN.get(200))
return context
@staticmethod
def update(sessionid=None):
if sessionid is None or sessionid <= 0:
context = data_return(304, {}, Session_MSG_ZH.get(304), Session_MSG_EN.get(304))
return context
Xcache.set_session_info(sessionid, None)
session_lib = SessionLib(sessionid, rightinfo=True, uacinfo=True, pinfo=True)
result = SessionLibSerializer(session_lib).data
context = data_return(203, result, Session_MSG_ZH.get(203), Session_MSG_EN.get(203))
return context
@staticmethod
def destroy(sessionid=None):
if sessionid is None or sessionid <= 0:
context = data_return(304, {}, Session_MSG_ZH.get(304), Session_MSG_EN.get(304))
return context
else:
params = [sessionid]
try:
result = RpcClient.call(Method.SessionStop, params, timeout=RPC_SESSION_OPER_SHORT_REQ)
if result is None: # 删除超时
Notice.send_success(f"{Session_MSG_ZH.get(202)} SID: {sessionid}",
f"{Session_MSG_EN.get(202)} SID: {sessionid}")
context = data_return(202, {}, Session_MSG_ZH.get(202), Session_MSG_EN.get(202))
return context
elif result.get('result') == 'success':
Notice.send_success(f"{Session_MSG_ZH.get(201)} SID: {sessionid}",
f"{Session_MSG_EN.get(201)} SID: {sessionid}")
context = data_return(201, {}, Session_MSG_ZH.get(201), Session_MSG_EN.get(201))
return context
else:
Notice.send_warning(f"{Session_MSG_ZH.get(301)} SID: {sessionid}",
f"{Session_MSG_EN.get(301)} SID: {sessionid}")
context = data_return(301, {}, Session_MSG_ZH.get(301), Session_MSG_EN.get(301))
return context
except Exception as E:
logger.error(E)
Notice.send_warning(f"{Session_MSG_ZH.get(301)} SID: {sessionid}",
f"{Session_MSG_EN.get(301)} SID: {sessionid}")
context = data_return(301, {}, Session_MSG_ZH.get(301), Session_MSG_EN.get(301))
return context
| StarcoderdataPython |
8010678 | <gh_stars>10-100
from twitter_analysis import get_tweets
import simplejson
thefile = open('tweets.txt', 'w')
tweets = list(get_tweets('Urbandecay', tweets=100, retweets=False))
for tweet in tweets:
print(tweet['text'])
simplejson.dump(tweet,thefile)
thefile.write('\n')
thefile.close()
| StarcoderdataPython |
133716 | from typing import Any, Mapping
from ghaudit.query.sub_query_common import SubQueryCommon
from ghaudit.query.utils import PageInfo
class OrgRepoQuery(SubQueryCommon):
FRAGMENTS = ["frag_org_repo_fields.j2", "frag_org_repo.j2"]
def __init__(self) -> None:
SubQueryCommon.__init__(
self,
self.FRAGMENTS,
"repositories",
{"organisation": "String!", "repositoriesMax": "Int!"},
)
def update_page_info(self, response: Mapping[str, Any]) -> None:
if "root" in response and "repositories" in response["root"]:
if not self._page_info:
self._params["repositoriesCursor"] = "String!"
self._page_info = response["root"]["repositories"][
"pageInfo"
] # type: PageInfo
self._values["repositoriesCursor"] = self._page_info["endCursor"]
self._count += 1
| StarcoderdataPython |
11370408 | <gh_stars>0
import util.game_info as gi
import numpy as np
from time import time
from Networks.legacy.XInputReader import get_xbox_output as get_controller_output
from util.data_processor_v3 import xbox_to_rlbot_controls
from util.game_info import GameInfo
from util.vector_math import Vector3, angle
from rlbot.agents.base_agent import BaseAgent
from Agents.FlowBotv2.flow_bot import Renderer
from configparser import ConfigParser
BOT_ROOT = str(__file__).replace("tdc_agent.py", "")
INFO_INTERVAL = 2
class Agent (BaseAgent):
def __init__(self, name, team, index):
super().__init__(name, team, index)
self.name = name
self.team = team
self.index = index
self.prev_time = time()
self.state_composition = ConfigParser()
self.state_composition.read(BOT_ROOT + "state_composition.cfg")
self.sc_norm = {}
for s in self.state_composition.keys():
# print("section:", s)
section = {}
for key in self.state_composition[s]:
# print("\tkey:", key)
section[key] = self.state_composition[s][key]
self.sc_norm[s] = section
self.sc_norm["general"]["norm"] = "True"
def get_output(self, game_tick_packet):
controls = xbox_to_rlbot_controls(get_controller_output())
# controls = nn_to_rlbot_controls(gi.get_random_action(self.bot_types[self.cur_bot], true_random=True))
game_info = GameInfo(game_tick_packet)
self.render(game_info)
cur_time = time()
if cur_time - self.prev_time >= INFO_INTERVAL:
print(game_info.get_state(self.index, self.state_composition))
print(game_info.get_state(self.index, self.sc_norm))
self.prev_time = cur_time
return controls
def render(self, state):
r = self.renderer
car = state.get_player(self.index)
ball = state.ball_info
r.begin_rendering()
# some default colors
red = r.create_color(255, 255, 0, 0)
green = r.create_color(255, 0, 255, 0)
blue = r.create_color(255, 0, 0, 255)
text_color = r.create_color(255, 255, 255, 255)
own_car = state.get_player(self.index)
info = {
"Max Boost Timer": str(max([b.timer for b in state.boosts])),
}
for i, key in enumerate(info):
r.draw_string_2d(32, i*16 + 16, 1, 1, key + ": " + info[key], color=text_color)
# basis of the relative coordinates
basis_x, basis_y, basis_z = car.get_basis(as_v3=True)
basis_x = basis_x.normalize().scalar_mul(100)
basis_y = basis_y.normalize().scalar_mul(100)
basis_z = basis_z.normalize().scalar_mul(100)
pos = car.location.as_list()
x_line_end = (car.location + basis_x).as_list()
r.draw_line_3d(pos, x_line_end, color=red)
y_line_end = (car.location + basis_y).as_list()
r.draw_line_3d(pos, y_line_end, color=blue)
z_line_end = (car.location + basis_z).as_list()
r.draw_line_3d(pos, z_line_end, color=green)
# velocities
vel_line_end = (car.location + car.velocity).as_list()
r.draw_line_3d(pos, vel_line_end, color=red)
vel_line_end = (ball.location + ball.velocity).as_list()
r.draw_line_3d(ball.location.as_list(), vel_line_end, color=red)
for p in state.get_all_players():
if not p.player_id == self.index:
vel_line_end = (p.location + p.velocity).as_list()
r.draw_line_3d(p.location.as_list(), vel_line_end, color=red)
# line to ball
# r.draw_line_3d(pos, ball.location.as_list(), color=red)
# ball box
# r2 = Renderer(r)
# box_anchor = ball.location - Vector3(gi.BALL_SIZE/2, gi.BALL_SIZE/2, gi.BALL_SIZE/2)
# r2.draw_cube(box_anchor.as_list(), size=gi.BALL_SIZE, color=r2.red)
r2 = Renderer(r)
basis = np.transpose(car.get_basis())
ball_pos = np.matmul(basis, ball.get_relative(basis, offset=car.location).location.as_list())
ball_pos = Vector3.from_list(ball_pos) + car.location - Vector3(gi.BALL_SIZE/2, gi.BALL_SIZE/2, gi.BALL_SIZE/2)
r2.draw_cube(ball_pos.as_list(), size=gi.BALL_SIZE, color=r2.green)
r.end_rendering()
def __str__(self):
return "TDC(" + str(self.index) + ") " + ("blue" if self.team == 0 else "orange")
| StarcoderdataPython |
5043480 | <reponame>UKPLab/arxiv2018-xling-sentence-embeddings
import tensorflow as tf
from tensorflow.contrib.layers import xavier_initializer, l2_regularizer
def weight_variable(name, shape, regularization=None):
regularizer = None
if regularization is not None:
regularizer = l2_regularizer(regularization)
return tf.get_variable(name, shape=shape, initializer=xavier_initializer(), regularizer=regularizer)
def bias_variable(name, shape, value=0.1, regularization=None):
regularizer = None
if regularization is not None:
regularizer = l2_regularizer(regularization)
return tf.get_variable(name, shape, initializer=tf.constant_initializer(value), regularizer=regularizer)
| StarcoderdataPython |
9665524 | import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import numpy as np
"""
A basic dataset for pytorch.
"""
class Dataset:
def __init__(self, data, label):
self.data = data
self.label = label
def __len__(self):
return len(self.data)
def __getitem__(self, i):
return self.data[i], self.label[i]
"""
The model that we use for our network.
"""
class Model_multiClass(nn.Module):
"""
The building of the NN
"""
def __init__(self, char_num, hid1_size, hid2_size, num_options, activation_fun, dropout):
super(Model_multiClass, self).__init__()
# two layer nn with dropout
self.layer_1 = nn.Linear(char_num, hid1_size)
self.layer_2 = nn.Linear(hid1_size, hid2_size)
self.layer_out = nn.Linear(hid2_size, num_options)
self.activation = activation_fun
self.dropout = nn.Dropout(p=dropout)
"""
The actual NN guessing procces
"""
def forward(self, inputs):
x = self.activation(self.layer_1(inputs))
x = self.activation(self.layer_2(x))
x = self.dropout(x)
x = self.layer_out(x)
return x
"""
Load data to pytorch tensors
"""
def loading_data(train_data, train_tag, val_data, val_tag, batch_size):
train_data = Dataset(torch.FloatTensor(train_data.values), torch.FloatTensor(train_tag.values))
validation_data = Dataset(torch.FloatTensor(val_data.values), torch.FloatTensor(val_tag.values))
train_loader = DataLoader(dataset=train_data, batch_size=batch_size, shuffle=True)
validation_loader = DataLoader(dataset=validation_data, batch_size=batch_size, shuffle=False)
return train_loader, validation_loader
"""
For weighted in loss function (in multi classification)
"""
def calculate_weighted_in_train(train_tag, options):
weighted = np.zeros(options)
# adding up the counts of each option.
for i in range(options):
weighted[i] = list(train_tag).count(i)
return torch.FloatTensor(weighted)
| StarcoderdataPython |
6559978 | <reponame>OUCyf/SeisFlow
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 29 20:07:31 2021
Input
1. Generate tmp file: find $(pwd) -name \*.SAC > ../tmp.sac
2. Run this script
3. KILL: ps -aux|grep python|grep -v grep|gawk '{print $2}' |xargs kill -9
Output
1.DB_output format: [ network_name/year/network_name.station_name/yearday/sac ]
@author: yf
"""
import os
import sys
import glob
import obspy
import numpy as np
import time as time_pac
from obspy.core import UTCDateTime
from mpi4py import MPI
os.environ["OMP_NUM_THREADS"] = "1" # export OMP_NUM_THREADS=1
def CPU_split(Source_num,MPI_n):
'''
The number of all earthquakes is evenly divided into N calculation cores
(MPI_N), and the remaining earthquake numbers are divided into the first
several cores
'''
cpu_int = Source_num // MPI_n
cpu_rem = Source_num % MPI_n
cpu_each_num = np.zeros(shape=(MPI_n,1) )
for i in range(0,MPI_n,1):
if i < cpu_rem:
cpu_each_num[i]=cpu_int+1
else:
cpu_each_num[i]=cpu_int
CPU_chunk = np.cumsum(cpu_each_num)
CPU_splits=[]
for i in range(0,MPI_n,1):
if i==0:
index_start = 1
else:
index_start = round( CPU_chunk[i-1]+1 )
index_end = round( CPU_chunk[i] )
CPU_splits.append([index_start,index_end])
return CPU_splits
#%% 1. input parameters
# tmp.mseed fiel path
TMP_PATH = '/cluster/datapool2/yinf/2.Project/1.yunnan_vel_change/1.DATA/SAC_DATA/tmp_server.sac'
# mseed_ID
BEGIN_sac_ID = '/cluster/datapool2/yinf/2.Project/1.yunnan_vel_change/1.DATA/SAC_DATA/G1/2016/G1.53034/2016104/G1.53034.01.SHN.D.2016.104.00.00.00.SAC'
# ouput
OUT_PATH = '/cluster/datapool2/yinf/2.Project/1.yunnan_vel_change/1.DATA/SeisNoise_DATA'
# file of sta and lon
STA_LAT_LON_PATH = "/cluster/datapool2/yinf/2.Project/1.yunnan_vel_change/1.DATA/sta.all"
# filter
freqmin = 0.5
freqmax = 8 # hz
sampling_rate = 20 # targeted sampling rate (hz)
# MPI_n
MPI_n = 10
#%% 2. MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
# 2.1 read and split
if rank == 0:
# Read TMP_FILE file
print("rank: ",str(rank),flush=True)
print(f"Now begin to calculate with {MPI_n} cores...",flush=True)
TMP_FILE = []
with open(TMP_PATH,'r') as f:
for line in f:
TMP_FILE.append(line.strip("\n"))
BEGIN_ID = TMP_FILE.index(BEGIN_sac_ID)
END_ID = len(TMP_FILE)
sacfile_num = END_ID-BEGIN_ID
CPU_splits = CPU_split(sacfile_num, MPI_n)
STA_INFO={} # store a station corresponding to the source
with open(STA_LAT_LON_PATH,'r') as f:
for line in f:
line = line.strip()
line = line.strip('\t')
line = line.strip('\n')
line = line.split()
sta_name = line[0]
info = line[1:]
STA_INFO.update({ sta_name: info})
else:
TMP_FILE,CPU_splits,STA_INFO = [None for _ in range(3)]
# 2.2 broadcast the variables
TMP_FILE = comm.bcast(TMP_FILE,root=0)
CPU_splits = comm.bcast(CPU_splits,root=0)
STA_INFO = comm.bcast(STA_INFO ,root=0)
#%% 3. loop
begin_ID = CPU_splits[rank][0]-1
end_ID = CPU_splits[rank][1]
tt0=time_pac.time()
for i in range(begin_ID,end_ID,1):
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
try:
# 3.1 read mseed
t0=time_pac.time()
tr = obspy.read(TMP_FILE[i])
NET =tr[0].stats.network
STA = tr[0].stats.station
LOC = tr[0].stats.location
CHN = tr[0].stats.channel
tr[0].stats.sac["stlo"] = STA_INFO[STA][0]
tr[0].stats.sac["stla"] = STA_INFO[STA][1]
# tr[0].stats.sac["stel"] = STA_INFO[STA][2]
# 3.2 滤波降采样
tr.filter('bandpass', freqmin=freqmin, freqmax=freqmax, corners=4, zerophase=True) # bandpass
# tr.resample(sampling_rate,window='hanning',no_filter=True, strict_length=True) # resample
# 3.3 write sac
time = obspy.UTCDateTime(tr[0].stats.starttime)
year = "{:04d}".format(time.year)
julday = "{:03d}".format(time.julday)
hour = "{:02d}".format(time.hour)
minute = "{:02d}".format(time.minute)
second = "{:02d}".format(time.second)
UTCtime_begin = UTCDateTime(year=time.year, julday=time.julday)
UTCtime_end = UTCtime_begin + 60*60*24
tr.trim(UTCtime_begin, UTCtime_end, pad=True,fill_value=0)
sac_path = os.path.join(OUT_PATH,
year,
year+julday)
if not os.path.exists(sac_path):
try:
os.makedirs(sac_path)
except OSError as reason:
print("Rank = "+str(rank)+": now ID "+str(i)+ " || "+ str(end_ID)+" error~")
print(" mkdir sac_path error with file:"+TMP_FILE[i])
print(' reason: ' + str(reason))
sys.exit()
sac_name = os.path.join(sac_path,
NET+'.'+
STA+'.'+
LOC+'.'+
CHN+'.'+
'D'+'.'+ # what's the D?
year+'.'+
julday+'.'+
hour+'.'+
minute+'.'+
second+'.'+
'SAC')
tr.write(sac_name, format='SAC')
tt1=time_pac.time()
print("Rank = "+str(rank)+": now ID "+str(i)+ " || "+ str(end_ID) +" || "+'time takes '+str(tt1-tt0)+' s'+" ok~")
except Exception:
tt1=time_pac.time()
print("Rank = "+str(rank)+": now ID "+str(i)+ " || "+ str(end_ID) +" || "+'time takes '+str(tt1-tt0)+' s'+" skip~")
print(" skipping with file:"+TMP_FILE[i]);continue
#%% 4.end
comm.barrier()
if rank == 0:
print("\n\n*****************************************************************",flush=True)
print("Successful !\nAll of the sac files have been computed.\n\n",flush=True)
sys.exit()
| StarcoderdataPython |
4809367 | <gh_stars>0
import argparse
import os
from datetime import datetime, timedelta, timezone
import logging
from radiko.recorder import record_time_free
def _get_args():
parser = argparse.ArgumentParser(description='record radiko')
parser.add_argument('station', type=str, help='radiko station')
parser.add_argument('start', type=int, help='start time')
parser.add_argument('end', type=int, help='end time')
parser.add_argument('program', type=str, help='radiko program name')
parser.add_argument('timeout',
type=float,
default=None,
nargs='?',
help='limit time of recording.(unit:miniutes)')
args = parser.parse_args()
return args.station, args.start, args.end, args.program, args.timeout
if __name__ == "__main__":
logging.basicConfig(filename=os.getenv('RADIKO_RECORDER_LOG_FILE',
f'/var/log/record_radiko.log'),
level=logging.DEBUG)
station, start, end, program, timeout = _get_args()
JST = timezone(timedelta(hours=+9), 'JST')
current_time = datetime.now(tz=JST).strftime("%Y%m%d%H%M%S")
logging.debug(f'current time: {current_time}, '
f'station: {station}, '
f'program name: {program}, '
f'start: {start}, '
f'end: {end}')
out_file_name = f'./output/{start}_{station}_{program}.m4a'
logging.debug(f'out file name:{out_file_name}')
record_time_free(station, out_file_name, start, end, timeout)
| StarcoderdataPython |
1668372 | <filename>Crawler/src/database/scanner.py<gh_stars>1-10
"""
This script starts a scanner that scans through the database
to check expired links
"""
from datetime import datetime, timedelta
def db_scanner(collection):
"""
continuously check to see if any URL is expired
By expiring it means that the last_updated is "frequency" older
"""
item = collection.find_one()
if not item:
return []
date = datetime.today() - timedelta(days=item['frequency'])
#check = 0
expired_link = []
for item in collection.find({"last_updated":{"$lte":date}}): # 7 or older
# if check == 0:
# print("Links need to be crawled again:")
# check = 1
expired_link.append(item['url'])
return expired_link
| StarcoderdataPython |
11216850 | from .bme280 import Bme280
from .bme280 import HO_SKIPPED, HO_1, HO_2, HO_4, HO_8, HO_16
from .bme280 import PO_SKIPPED, PO_1, PO_2, PO_4, PO_8, PO_16
from .bme280 import TO_SKIPPED, TO_1, TO_2, TO_4, TO_8, TO_16
from .bme280 import MODE_SLEEP, MODE_FORCED, MODE_NORMAL
from .bme280 import TSTANDBY_0_5, TSTANDBY_62_5, TSTANDBY_125, TSTANDBY_250, TSTANDBY_500, TSTANDBY_1000
from .bme280 import TSTANDBY_10, TSTANDBY_20
from .bme280 import FILTER_OFF, FILTER_2, FILTER_4, FILTER_8, FILTER_16
| StarcoderdataPython |
30373 | from django.conf import settings
from django.contrib.auth.models import BaseUserManager, AbstractBaseUser, \
PermissionsMixin
from django.core.mail import send_mail
from django.db import models
from django.template.loader import render_to_string
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from issues.models import Proposal, ProposalVote, ProposalVoteValue, \
ProposalStatus
from meetings.models import MeetingParticipant
from users.default_roles import DefaultGroups
import datetime
import logging
import random
import string
CODE_LENGTH = 48
logger = logging.getLogger(__name__)
class OCUserManager(BaseUserManager):
@classmethod
def normalize_email(cls, email):
return email.lower()
def get_by_natural_key(self, username):
return self.get(email__iexact=username)
def create_user(self, email, display_name=None, password=<PASSWORD>, **kwargs):
"""
Creates and saves a User with the given email, display name and
password.
"""
if not email:
raise ValueError('Users must have an email address')
if not display_name:
display_name = email
user = self.model(
email=OCUserManager.normalize_email(email),
display_name=display_name,
**kwargs
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, display_name, password):
"""
Creates and saves a superuser with the given email, display name and
password.
"""
user = self.create_user(email,
password=password,
display_name=display_name
)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class OCUser(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(_('email address'), max_length=255, unique=True,
db_index=True,
)
display_name = models.CharField(_("Your name"), max_length=200)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(_('staff status'), default=False,
help_text=_('Designates whether the user can log into this admin '
'site.'))
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
objects = OCUserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['display_name']
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
def __unicode__(self):
return self.display_name
def get_full_name(self):
# The user is identified by their email address
return self.display_name
def get_short_name(self):
# The user is identified by their email address
return self.display_name
def get_default_group(self, community):
try:
return self.memberships.get(community=community).default_group_name
except Membership.DoesNotExist:
return ""
def email_user(self, subject, message, from_email=None):
"""
Sends an email to this User.
"""
send_mail(subject, message, from_email, [self.email])
class MembershipManager(models.Manager):
def board(self):
return self.get_query_set().exclude(
default_group_name=DefaultGroups.MEMBER)
def none_board(self):
return self.get_query_set().filter(
default_group_name=DefaultGroups.MEMBER)
class Membership(models.Model):
community = models.ForeignKey('communities.Community', verbose_name=_("Community"),
related_name='memberships')
user = models.ForeignKey(OCUser, verbose_name=_("User"),
related_name='memberships')
default_group_name = models.CharField(_('Group'), max_length=50,
choices=DefaultGroups.CHOICES)
created_at = models.DateTimeField(auto_now_add=True,
verbose_name=_("Created at"))
invited_by = models.ForeignKey(settings.AUTH_USER_MODEL,
verbose_name=_("Invited by"),
related_name="members_invited", null=True,
blank=True)
in_position_since = models.DateField(default=datetime.date.today(),
verbose_name=_("In position since"))
objects = MembershipManager()
class Meta:
unique_together = (("community", "user"),)
verbose_name = _("Community Member")
verbose_name_plural = _("Community Members")
def __unicode__(self):
return "%s: %s (%s)" % (self.community.name, self.user.display_name,
self.get_default_group_name_display())
@models.permalink
def get_absolute_url(self):
return "member_profile", (self.community.id, self.id)
def get_permissions(self):
return DefaultGroups.permissions[self.default_group_name]
def total_meetings(self):
""" In the future we'll check since joined to community or rejoined """
return self.community.meetings.filter(held_at__gte=self.in_position_since).count()
def meetings_participation(self):
""" In the future we'll check since joined to community or rejoined """
return MeetingParticipant.objects.filter(user=self.user, is_absent=False,
meeting__community=self.community,
meeting__held_at__gte=self.in_position_since).count()
def meetings_participation_percantage(self):
""" In the future we'll check since joined to community or rejoined """
return round((float(self.meetings_participation()) / float(self.total_meetings())) * 100.0)
def member_open_tasks(self, user=None, community=None):
return Proposal.objects.object_access_control(
user=user, community=community).filter(status=ProposalStatus.ACCEPTED, assigned_to_user=self.user, active=True, task_completed=False).exclude(due_by__lte=datetime.date.today())
def member_close_tasks(self, user=None, community=None):
""" Need to create a field to determine closed tasks """
return Proposal.objects.object_access_control(
user=user, community=community).filter(status=ProposalStatus.ACCEPTED, assigned_to_user=self.user, active=True, task_completed=True)
def member_late_tasks(self, user=None, community=None):
return Proposal.objects.object_access_control(
user=user, community=community).filter(status=ProposalStatus.ACCEPTED, assigned_to_user=self.user, due_by__lte=datetime.date.today(), active=True, task_completed=False)
def member_votes_dict(self):
res = {'pro': {}, 'neut': {}, 'con': {}}
pro_count = 0
con_count = 0
neut_count = 0
votes = self.user.board_votes.select_related('proposal') \
.filter(proposal__issue__community_id=self.community_id,
proposal__register_board_votes=True,
proposal__active=True,
proposal__decided_at_meeting__held_at__gte=self.in_position_since) \
.exclude(proposal__status=ProposalStatus.IN_DISCUSSION).order_by('-proposal__issue__created_at', 'proposal__id')
for v in votes:
if not v.proposal.register_board_votes:
continue
if v.value == ProposalVoteValue.NEUTRAL:
key = 'neut'
neut_count += 1
elif v.value == ProposalVoteValue.PRO:
key = 'pro'
pro_count += 1
elif v.value == ProposalVoteValue.CON:
key = 'con'
con_count += 1
issue_key = v.proposal.issue
p_list = res[key].setdefault(issue_key, [])
p_list.append(v.proposal)
res['pro_count'] = pro_count
res['con_count'] = con_count
res['neut_count'] = neut_count
return res
def _user_board_votes(self):
return self.user.board_votes.select_related('proposal').filter(proposal__issue__community_id=self.community_id,
proposal__active=True,
proposal__register_board_votes=True,
proposal__decided_at_meeting__held_at__gte=self.in_position_since)
def member_proposal_pro_votes_accepted(self):
return self._user_board_votes().filter(value=ProposalVoteValue.PRO,
proposal__status=ProposalStatus.ACCEPTED)
def member_proposal_con_votes_rejected(self):
return self._user_board_votes().filter(value=ProposalVoteValue.CON,
proposal__status=ProposalStatus.REJECTED)
def member_proposal_nut_votes_accepted(self):
return self._user_board_votes().filter(value=ProposalVoteValue.NEUTRAL,
proposal__status=ProposalStatus.ACCEPTED)
CODE_CHARS = string.lowercase + string.digits
def create_code(length=CODE_LENGTH):
"""
Creates a random code of lowercase letters and numbers
"""
return "".join(random.choice(CODE_CHARS) for _x in xrange(length))
class EmailStatus(object):
PENDING = 0
SENT = 1
FAILED = 2
choices = (
(PENDING, _('Pending')),
(SENT, _('Sent')),
(FAILED, _('Failed')),
)
class Invitation(models.Model):
community = models.ForeignKey('communities.Community',
verbose_name=_("Community"),
related_name='invitations')
created_at = models.DateTimeField(auto_now_add=True, verbose_name=_("Created at"))
created_by = models.ForeignKey(settings.AUTH_USER_MODEL,
verbose_name=_("Created by"),
related_name="invitations_created")
name = models.CharField(_("Name"), max_length=200, null=True, blank=True)
email = models.EmailField(_("Email"))
message = models.TextField(_("Message"), null=True, blank=True)
code = models.CharField(max_length=CODE_LENGTH, default=create_code)
user = models.ForeignKey(OCUser, verbose_name=_("User"),
related_name='invitations', null=True, blank=True)
default_group_name = models.CharField(_('Group'), max_length=50,
choices=DefaultGroups.CHOICES)
status = models.PositiveIntegerField(_("Status"),
choices=EmailStatus.choices, default=EmailStatus.PENDING)
times_sent = models.PositiveIntegerField(_("Times Sent"), default=0)
error_count = models.PositiveIntegerField(_("Error count"), default=0)
last_sent_at = models.DateTimeField(_("Sent at"), null=True, blank=True)
class Meta:
unique_together = (("community", "email"),)
verbose_name = _("Invitation")
verbose_name_plural = _("Invitations")
DEFAULT_MESSAGE = _("The system will allow you to take part in the decision making process of %s. "
"Once you've joined, you'll be able to see the topics for the agenda in the upcoming meeting, decisions at previous meetings, and in the near future you'll be able to discuss and influence them.")
def __unicode__(self):
return "%s: %s (%s)" % (self.community.name, self.email,
self.get_default_group_name_display())
@models.permalink
def get_absolute_url(self):
return "accept_invitation", (self.code,)
def send(self, sender, recipient_name='', base_url=None):
if not base_url:
base_url = settings.HOST_URL
subject = _("Invitation to %s") % self.community.name
d = {
'base_url': base_url,
'object': self,
'recipient_name': recipient_name,
}
message = render_to_string("emails/invitation.txt", d)
recipient_list = [self.email]
from_email = "%s <%s>" % (self.community.name, settings.FROM_EMAIL)
self.last_sent_at = timezone.now()
try:
send_mail(subject, message, from_email, recipient_list)
self.times_sent += 1
self.status = EmailStatus.SENT
self.save()
return True
except:
logger.error("Invitation email sending failed", exc_info=True)
self.error_count += 1
self.status = EmailStatus.FAILED
self.save()
return False
| StarcoderdataPython |
1794912 | <filename>segmenter/layers/NoisyOr.py
from tensorflow.keras.layers import Multiply
class NoisyOr(Multiply):
def _merge_function(self, inputs):
output = 1. - inputs[0]
for i in range(1, len(inputs)):
output *= 1. - inputs[i]
return 1. - output
| StarcoderdataPython |
3416954 | <reponame>Ca2Patton/PythonStuff<gh_stars>0
#!/Library/Frameworks/Python.framework/Versions/2.7/bin/python
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.properties import NumericProperty, ReferenceListProperty, ObjectProperty
from kivy.vector import Vector
from kivy.clock import Clock
from random import randint, random
from kivy.graphics import Color
from kivy.core.window import Window
class PongPaddle(Widget):
score = NumericProperty(0)
def bounce_ball(self, ball):
if self.collide_widget(ball):
vx, vy = ball.velocity
offset = (ball.center_y - self.center_y) / (self.height /2)
bounced = Vector(-1 * vx , vy)
vel = bounced * 1.1
ball.velocity = vel.x, vel.y + offset
color = (random(), random(), random())
with self.canvas.before:
Color(*color)
class PongBall(Widget):
#velocity of the ball on the X and Y Axis
velocity_x = NumericProperty(0)
velocity_y = NumericProperty(0)
#Shorthand for referencelist
velocity = ReferenceListProperty(velocity_x, velocity_y)
#Used for ball movement.
def move(self):
self.pos = Vector(*self.velocity) + self.pos
class PongGame(Widget):
ball = ObjectProperty(None)
player1 = ObjectProperty(None)
player2 = ObjectProperty(None)
def __init__(self, **kwargs):
super(PongGame, self).__init__(**kwargs)
self._keyboard = Window.request_keyboard(self._keyboard_closed, self)
self._keyboard.bind(on_key_down=self._on_keyboard_down)
def _keyboard_closed(self):
self.keyboard.unbind(on_key_down=self._on_keyboard_down)
self._keyboard = None
def _on_keyboard_down(self, keyboard, keycode, text, modifiers):
if keycode[1] == 'w':
self.player1.center_y += 10
elif keycode[1] == 's':
self.player1.center_y -= 10
elif keycode[1] == 'k':
self.player2.center_y += 10
elif keycode[1] == 'l':
self.player2.center_y -= 10
return True
def serve_ball(self, vel=(4, 0)):
self.ball.center = self.center
self.ball.velocity = vel
def update(self, dt):
self.ball.move()
self.player1.bounce_ball(self.ball)
self.player2.bounce_ball(self.ball)
#Bounce ball
if (self.ball.y < 0) or (self.ball.top > self.height):
self.ball.velocity_y *= -1
#Scoring
if self.ball.x < self.x:
self.player2.score += 1
self.serve_ball(vel=(4, 0))
if self.ball.x > self.width:
self.player1.score += 1
self.serve_ball(vel=(-4, 0))
def on_touch_move(self, touch):
if touch.x < self.width / 3:
self.player1.center_y = touch.y
if touch.x > self.width - self.width / 3:
self.player2.center_y = touch.y
class PongApp(App):
def build(self):
game = PongGame()
game.serve_ball()
Clock.schedule_interval(game.update, 1.0/60.0)
return game
if __name__ == '__main__':
PongApp().run()
| StarcoderdataPython |
5120032 | """ Handler for the hook and rhook action tags. """
# pylint: disable=too-few-public-methods,too-many-arguments,protected-access,unused-argument
__author__ = "<NAME>"
__copyright__ = "Copyright 2016-2019"
__license__ = "Apache License 2.0"
from . import ActionHandler
from ..nodes import Node
from ..tokenizer import Token
from ..errors import ParserError
class HookNode(Node):
""" A node to call a registered hook. """
def __init__(self, template, line, hook, assigns, reverse):
""" Initialize """
Node.__init__(self, template, line)
self.hook = hook
self.assigns = assigns
self.reverse = reverse
def render(self, state):
""" Expand the variables. """
hook = self.hook.eval(state)
params = {}
for (name, expr) in self.assigns:
params[name] = expr.eval(state)
state.line = self.line
self.env.call_hook(hook, state, params, self.reverse)
class HookActionHandler(ActionHandler):
""" Handle hook and rhook """
def handle_action_hook(self, line, start, end):
""" Handle hook """
self._handle_action_hook(line, start, end, False)
def handle_action_rhook(self, line, start, end):
""" Handle rhook """
self._handle_action_hook(line, start, end, True)
def _handle_action_hook(self, line, start, end, reverse):
""" Handle the actual parsing """
hook = None
assigns = []
segments = self.parser.find_tag_segments(start, end)
# First item should be expression
if len(segments) > 0:
(start, end) = segments[0]
hook = self.parser.parse_expr(start, end)
for segment in segments[1:]:
(start, end) = segment
# Only support "with"
token = self.parser.get_expected_token(start, end, Token.TYPE_WORD, values="with")
start += 1
assigns = self.parser.parse_multi_assign(start, end)
if hook is None:
raise ParserError(
"Hook expecting name expression",
self.template.filename,
line
)
node = HookNode(self.template, line, hook, assigns, reverse)
self.parser.add_node(node)
ACTION_HANDLERS = {"hook": HookActionHandler, "rhook": HookActionHandler}
| StarcoderdataPython |
1955623 | <reponame>betagouv/ecosante<filename>migrations/versions/ca7c02fcb035_add_chauffage_animaux_connaissance.py
"""Add chauffage, animaux, connaissance
Revision ID: <KEY>
Revises: 723f9ab27edf
Create Date: 2021-03-10 15:05:29.180649
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '723f9ab27edf'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('inscription', sa.Column('animaux_domestiques', postgresql.ARRAY(sa.String()), nullable=True))
op.add_column('inscription', sa.Column('chauffage', postgresql.ARRAY(sa.String()), nullable=True))
op.add_column('inscription', sa.Column('connaissance_produit', postgresql.ARRAY(sa.String()), nullable=True))
def downgrade():
op.drop_column('inscription', 'connaissance_produit')
op.drop_column('inscription', 'chauffage')
op.drop_column('inscription', 'animaux_domestiques')
| StarcoderdataPython |
9635643 | <reponame>NULLCT/LOMC
import heapq
N, Q = map(int, input().split())
INF = float('inf')
G = [[] for _ in range(N)]
for i in range(N - 1):
a, b = map(int, input().split())
G[a - 1].append([1, b - 1])
G[b - 1].append([1, a - 1])
def dijkstra(N, start, goal, edge_list):
Q = []
#始点距離=0, (dist, vertex)
heapq.heappush(Q, (0, start))
dist = [-1] * N
dist[start] = 0
while len(Q) > 0:
d, v = heapq.heappop(Q)
for nd, nv in edge_list[v]:
if dist[nv] == -1 or dist[nv] > d + nd:
dist[nv] = d + nd
heapq.heappush(Q, (dist[nv], nv))
return dist
cost = dijkstra(N, 0, N, G)
for i in range(Q):
c, d = map(int, input().split())
cost_c = cost[c - 1]
cost_d = cost[d - 1]
if abs(cost_c - cost_d) % 2 == 0:
print('Town')
else:
print('Road')
| StarcoderdataPython |
3417600 | #!/usr/bin/env python
# From Hendrik
import math, string, sys, os
import scipy
import scipy.integrate
def norm(k_vec): # the norm of a 3d vector
return math.sqrt(k_vec[0]**2+k_vec[1]**2+k_vec[2]**2)
def W_k(k_vec): # the Fourier transform of the survey volume
a=k_vec[0]*l[0]/2
b=k_vec[1]*l[1]/2
c=k_vec[2]**2*l[2]**2/2
return exp(-c)*math.sin(a)/a*math.sin(b)/b
def f_k(k,R): # the Fourier transform of a spherical top-hat with radius R
y=R*k
return 3/y**3*(math.sin(y)-y*math.cos(y))
class Cosmology:
"""This class computes various cosmological quantities like comoving,
angular diameter, luminosity distance, lookback time etc.. Distance
definitions are from Hogg 1999, astro-ph/9905116.
"""
def __init__(self, omega_m=0.27, omega_l=0.73, h=0.7, Gamma=0.2, n_s=1.0, sigma_8=0.81):
self.omega_m = omega_m
self.omega_l = omega_l
self.omega_k = 1. - self.omega_m - self.omega_l
self.h = h
self.c = 2.99792458E8 # speed of light in m/s
self.pc = 3.085678E16 # parsec in metres
self.G = 6.673E-11 # Gravitational constant
self.M_sun = 1.98892E30 # solar mass in kg
self.H_0 = self.h * 100. * 1.E3 / 1.E6 / self.pc # Hubble constant in SI units
self.dh = 3000./self.h # Hubble distance (Hogg eq. 4) in Mpc.
self.th = 9.78e9/self.h # Hubble time in years
self.th_sec = 3.09e17/self.h # Hubble time in seconds
self.Gamma=Gamma # should be calculated by gamma=omega_m*h*exp(-omega_b*(1 + sqrt(2*h)/omega_m))
self.n_s=n_s
self.sigma_8=sigma_8
self.norm_int=1/(2*math.pi)**3 * 4*math.pi * scipy.integrate.quad(lambda k: k**2*self.P_L(k)*f_k(k,8.0)**2, 0, scipy.Inf)[0]
self.A=self.sigma_8**2/self.norm_int
self.ro_0=2.77786E11 # critical density in M_sun/Mpc**3
self.dlnsigma_dlnM=(math.log(self.sigma_M(10.**15))-math.log(self.sigma_M(10.**5)))/(math.log(15)-math.log(5))
return
def Ez(self, z):
"""E(z) function of Hogg's equation 14"""
e = math.sqrt(self.omega_m*(1+z)**3 + self.omega_k*(1+z)**2 \
+ self.omega_l)
return e
def ooEz(self, z):
"""Returns 1/E(z), E(z) being Hogg's eq. 14."""
return 1./self.Ez(z)
def ooEzopz(self, z):
"""Returns 1/(E(z)*(1+z)), E(z) being Hogg's eq. 14."""
return 1./(self.Ez(z)*(1+z))
def dcom_los(self, z1, z2):
"""Returns the line of sight comoving distance between objects at
redshifts z1 and z2, z2>z1. Value is in Mpc/h"""
if z1>=z2:
print("z2 must be greater than z1")
return -1
dclos = self.dh * scipy.integrate.quad(self.ooEz, z1, z2)[0]
return dclos
def dcom_tra(self, z1, z2):
"""Returns the transverse comoving distance (proper motion distance)
between objects at redshift z1 and z2."""
dcl = self.dcom_los(z1, z2)
if self.omega_k == 0.0:
dct = dcl
elif self.omega_k > 0:
dct = self.dh / math.sqrt(self.omega_k) \
* math.sinh(math.sqrt(self.omega_k)*dcl/self.dh)
else:
dct = self.dh / math.sqrt(math.fabs(self.omega_k)) \
* math.sin(math.sqrt(math.fabs(self.omega_k))*dcl/self.dh)
return dct
def dang(self, z1, z2):
"""Returns the angular diameter distance between objects at
redshift z1 and z2."""
dct = self.dcom_tra(z1, z2)
return dct/(1+z2)
def dlum(self, z1, z2):
"""Returns the luminosity distance between objects at
redshift z1 and z2.
WARNING! WARNING!
This function is untested for z1>0!
WARNING! WARNING!
"""
dct = self.dcom_tra(z1, z2)
return (1+z2)/(1+z1) * dct
def covol(self, z):
"""Returns the comoving volume element d V_c in a solid angle
d Omaga at redshift z."""
da = self.dang(0, z)
return self.dh * (1+z)**2 * da**2 / self.Ez(z)
def tlook(self, z):
"""This function returns the lookback time in units of the
Hubble time. The Hubble time can be accessed as the attributes
th (in years) or th_sec (in seconds)."""
tl = scipy.integrate.quad(self.ooEzopz, 0, z)[0]
return tl
def DM(self, z1, z2):
"""Returns the distance modulus between objects at
redshift z1 and z2.
"""
x=self.dlum(z1,z2)
return 5*math.log(x/1.e-5)/math.log(10)
def rho_crit(self, z1):
"""Returns the critical density at z1 in SI units.
"""
return 3*(self.Ez(z1)*self.H_0)**2/(8*math.pi*self.G)
def Sigma_crit(self, z1, z2):
"""Returns the critical surface mass density for lenses at z1 and sources at z2 in SI units.
"""
return self.c**2/(4*math.pi*self.G)*self.dang(0.,z2)/(self.dang(0.,z1)*self.dang(z1,z2))/(1.E6*self.pc)*self.h
########## Power spectrum and mass function #############
def T_k(self, k): # the Transfer function
q=k/self.Gamma
T=math.log(1+2.34*q)/(2.34*q)*(1+3.89*q+(16.1*q)**2+(5.46*q)**3+(6.71*q)**4)**(-0.25)
return T
def H_sqd(self, a1): # the Hubble parameter
H=(100.*self.h)**2*(self.omega_m/(a1**3)+self.omega_l)
return H
def D_plus(self, a2): # the growth factor
def func(x):
return 1/(self.omega_m/x+self.omega_l*x**2)**1.5
integral=scipy.integrate.quad(func,0,a2)
integral_0=scipy.integrate.quad(func,0,1)
D_a=math.sqrt(self.H_sqd(a2))/100.*integral[0]
D_0=math.sqrt(self.H_sqd(1))/100.*integral_0[0]
return D_a/D_0
def D_plus2(self, a2): # the growth factor
om = self.omega_m/(a2+self.omega_m*(1.-a2)+self.omega_l*a2*(a2*a2-1.))
ol = self.omega_l*a2*a2*a2/(a2+self.omega_m*(1.-a2)+self.omega_l*a2*(a2*a2-1.))
g1 = 5./2.*self.omega_m/(self.omega_m**(4./7.)-self.omega_l+(1+self.omega_m/2.0)*(1.0+self.omega_l/70.0))
g = 5./2.*om/(om**(4./7.)-ol+(1+om/2.0)*(1.0+ol/70.0))
return a2*g/g1
def P_L(self, k): # the linear CDM power spectrum
P=self.T_k(k)**2*k**self.n_s
return P
def P_L_norm(self, k): # the normalised, linear CDM power spectrum
P=self.A*self.T_k(k)**2*k**self.n_s
return P
def P_L_norm_z(self, k, z): # the normalised, linear CDM power spectrum
P=self.A*self.T_k(k)**2*k**self.n_s*self.D_plus(1/(1+z))
return P
def d_ln_P_L_norm(self, k): # derivative of the normalised, linear CDM power spectrum
P=(math.log(self.P_L_norm(k+k/1000.))-math.log(self.P_L_norm(k-k/1000.)))/(math.log(k+k/1000.)-math.log(k-k/1000.))
return P
def d_ln_P_L_norm_z(self, k,z): # derivative of the normalised, linear CDM power spectrum
P=(math.log(self.P_L_norm_z(k+k/1000.,z))-math.log(self.P_L_norm_z(k-k/1000.,z)))/(math.log(k+k/1000.)-math.log(k-k/1000.))
return P
def Delta_sq_L_norm(self, k): # the normalised, linear, dimensionless CDM power spectrum
P=self.A*self.T_k(k)**2*k**self.n_s*k**3/(2*math.pi**2)
return P
def Delta_sq_L_norm_z(self, k,z): # the normalised, linear, dimensionless CDM power spectrum
P=self.A*self.T_k(k)**2*k**self.n_s*k**3/(2*math.pi**2)*self.D_plus(1/(1+z))
return P
def sigma_M(self, M):
def func(k,R):
return k**2*self.P_L_norm(k)*f_k(k,R)
R=(M/self.ro_0*3/4/math.pi)**(1/3.)
integrand=scipy.integrate.quad(func, 0, scipy.Inf, args=(R), limit=50000)[0]
return R #1/(2*math.pi**2)*integrand
def Jenkins(self, M):
return 0.315*self.ro_0/M**2*self.dlnsigma_dlnM*math.exp(-math.sqrt((0.61-math.log(self.sigma_M(M)))**2)**3.8)
def f96(self, x, n_eff): # Peacock and Dodds 1996 fitting formula
A_c=0.482*(1.+n_eff/3.)**(-0.947)
B_c=0.226*(1.+n_eff/3.)**(-1.778)
alpha_c=3.310*(1.+n_eff/3.)**(-0.244)
beta_c=0.862*(1.+n_eff/3.)**(-0.287)
V_c=11.55*(1.+n_eff/3.)**(-0.423)
g=5./2.*self.omega_m*(self.omega_m**(4./7.)-self.omega_l+(1+self.omega_m/2)*(1+self.omega_l/70))**(-1)
return x*((1+B_c*beta_c*x+(A_c*x)**(alpha_c*beta_c))/(1+((A_c*x)**alpha_c*g**3/(V_c*x**0.5))**beta_c))**(1/beta_c)
def Delta_sq_NL_PD96_norm(self, k_L): # the normalised, non-linear, dimensionless CDM power spectrum from Peacock and Dodds 1996
n_eff=self.d_ln_P_L_norm(k_L/2.)
return self.f96(self.Delta_sq_L_norm(k_L), n_eff)
def Delta_sq_NL_PD96_norm_z(self, k_L,z): # the normalised, non-linear, dimensionless CDM power spectrum from Peacock and Dodds 1996
n_eff=self.d_ln_P_L_norm_z(k_L/2.,z)
return self.f96(self.Delta_sq_L_norm_z(k_L,z), n_eff)
def P_NL_PD96_norm(self, k): # the normalised, non-linear CDM power spectrum from Peacock and Dodds 1996
return self.Delta_sq_NL_PD96_norm(k)*((k/self.k_L_over_k_NL_PD96(self.Delta_sq_NL_PD96_norm(k)))**3/(2*math.pi**2))**(-1)
def P_NL_PD96_norm_z(self, k, z): # the normalised, non-linear CDM power spectrum from Peacock and Dodds 1996
return self.Delta_sq_NL_PD96_norm_z(k,z)*((k/self.k_L_over_k_NL_PD96(self.Delta_sq_NL_PD96_norm_z(k,z)))**3/(2*math.pi**2))**(-1)
def k_L_over_k_NL_PD96(self, Delta):
return (1+Delta)**(-1./3.)
| StarcoderdataPython |
3375935 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from mindspore.ops import prim_attr_register, PrimitiveWithInfer
# sum = input1 + input2 + const_bias
class CusAdd3(PrimitiveWithInfer):
"""Custom add3 definition"""
@prim_attr_register
def __init__(self, const_bias=0.0):
self.init_prim_io_names(inputs=['input1', 'input2'], outputs=['sum3'])
from add3_impl import CusAdd3Impl
def infer_shape(self, input1, input2):
return input1
def infer_dtype(self, input1, input2):
return input1
| StarcoderdataPython |
5093589 | <gh_stars>100-1000
"""
XX. Model inheritance
Model inheritance across apps can result in models with the same name resulting
in the need for an %(app_label)s format string. This app specifically tests
this feature by redefining the Copy model from model_inheritance/models.py
"""
from django.db import models
from modeltests.model_inheritance.models import NamedURL
#
# Abstract base classes with related models
#
class Copy(NamedURL):
content = models.TextField()
def __unicode__(self):
return self.content
| StarcoderdataPython |
9661982 | <filename>fisher_score.py
from __future__ import division, print_function
import nibabel as nb
import numpy as np
import click
import os
import csv
import itertools
from tqdm import tqdm
from healthybrains.inputoutput import id_from_file_name
@click.command()
@click.option("--targets")
@click.argument("file_names", nargs=-1)
def main(targets, file_names):
targets = np.genfromtxt(targets, delimiter=",")
shape = nb.load(file_names[0]).shape[:-1]
file_names = np.array(file_names)
file_ids = [id_from_file_name(file_name)
for file_name in file_names]
feature = np.array([
targets[file_id - 1, :]
for file_id in file_ids], dtype=np.bool)
for f in range(3):
print("feature", f)
for i in range(2):
feature_files = file_names[feature[:, f] == i]
data = np.stack([
np.squeeze(nb.load(file_name).get_data())
for file_name in feature_files],
axis=-1
)
median = np.median(data, axis=-1)
sd = np.std(data, dtype=np.float32, axis=-1)
np.savez(
"data/median_{0}_{1}.npz".format(i, f),
median
)
np.savez(
"data/sd_{0}_{1}.npz".format(i, f),
sd
)
if __name__ == "__main__":
main()
| StarcoderdataPython |
11290214 | <reponame>gf-atebbe/python-mandrel<filename>mandrel/test/integration_test.py
import unittest
import os
import yaml
from mandrel.test import utils
import mandrel
from mandrel import config
BOOTSTRAP = """
bootstrap.SEARCH_PATHS.insert(0, 'specific_config')
bootstrap.DISABLE_EXISTING_LOGGERS = False
"""
def logger_conf(name, logfile):
loggers = "[loggers]\nkeys=root,common%s,app%s\n\n" % (name, name)
handlers = "[handlers]\nkeys=consoleHandler%s,fileHandler%s\n\n" % (name,name)
formatters = "[formatters]\nkeys=simpleFormatter%s\n\n" % name
root_logger = "[logger_root]\nlevel=DEBUG\nhandlers=consoleHandler%s\n\n" % name
app_logger = "[logger_app%s]\nlevel=DEBUG\npropagate=0\nhandlers=fileHandler%s\nqualname=app%s\n\n" % (name, name, name)
common_logger = "[logger_common%s]\nlevel=DEBUG\npropagate=0\nhandlers=fileHandler%s\nqualname=common%s\n\n" % (name, name, name)
console = "[handler_consoleHandler%s]\nclass=StreamHandler\nlevel=DEBUG\nformatter=simpleFormatter%s\nargs=(sys.stderr,)\n\n" % (name, name)
handler = "[handler_fileHandler%s]\nclass=FileHandler\nlevel=DEBUG\nformatter=simpleFormatter%s\nargs=('%s',)\n\n" % (name, name, logfile)
formatter = "[formatter_simpleFormatter" + name + "]\nformat=%(asctime)s - %(levelname)s - %(name)s - %(message)s\ndatefmt=\n\n"
return loggers + handlers + formatters + root_logger + app_logger + common_logger + console + handler + formatter
def scenario(scenario_name, log=(), common=(), app=()):
def decorator(wrapped):
def func(*positionals, **keywords):
with utils.bootstrap_scenario(text=BOOTSTRAP) as spec:
path, bootstrap_file = spec
os.mkdir('specific_config')
for name, levels in (('common', common), ('app', app)):
for level in levels:
p = os.path.join(os.path.realpath(level), '%s%s.yaml' % (name, scenario_name))
with open(p, 'w') as stream:
yaml.dump({'%s_a' % name: 'a_%s' % level, '%s_b' % name: 'b_%s' % level}, stream)
for level in log:
p = os.path.join(os.path.realpath(level), 'logging.cfg')
with open(p, 'w') as stream:
stream.write(logger_conf(scenario_name, '%s.log' % level))
utils.refresh_bootstrapper()
return wrapped(*positionals, **keywords)
return func
return decorator
def get_common(name, *chain):
class Common(mandrel.config.Configuration):
NAME = 'common%s' % name
return Common.get_configuration(*chain)
def get_app(name, *chain):
class App(mandrel.config.Configuration):
NAME = 'app%s' % name
return App.get_configuration(*chain)
class TestIntegratedScenarios(unittest.TestCase):
@scenario('simple', log=('',), common=('',), app=('',))
def testSimple(self):
# In this case, all three configs are at the end of the search path.
c = get_common('simple')
a = get_app('simple')
self.assertEqual('a_', c.common_a)
self.assertEqual('b_', c.common_b)
self.assertEqual('a_', a.app_a)
self.assertEqual('b_', a.app_b)
c.get_logger().info('common message')
a.get_logger().info('app message')
log = open('.log', 'r').read()
self.assertTrue(' - commonsimple - common message' in log)
self.assertTrue(' - appsimple - app message' in log)
@scenario('specific', log=('','specific_config'), common=('', 'specific_config'), app=('', 'specific_config'))
def testSpecific(self):
# In this case, all three configs are in both levels of search path;
# the specific_config one should win.
c = get_common('specific')
a = get_app('specific')
self.assertEqual('a_specific_config', c.common_a)
self.assertEqual('b_specific_config', c.common_b)
self.assertEqual('a_specific_config', a.app_a)
self.assertEqual('b_specific_config', a.app_b)
c.get_logger().info('common message')
a.get_logger().info('app message')
log = open('specific_config.log', 'r').read()
self.assertTrue(' - commonspecific - common message' in log)
self.assertTrue(' - appspecific - app message' in log)
| StarcoderdataPython |
11253843 | <reponame>johntellsall/minibatch<filename>minibatch/window.py
import datetime
from minibatch import Buffer, Stream
from minibatch.models import Window
class WindowEmitter(object):
"""
a window into a stream of buffered objects
WindowEmitter.run() implements the generic emitter protocol as follows:
1. determine if a window is ready to be processed
2. retrieve the data from the buffer to create a Window
3. process the data (i.e. mark the buffered data processed)
4. run the emit function on the window
Note that run() is blocking. Between running the protocol,
it will sleep to conserve resources.
Each time run() wakes up, it will call the following methods in turn:
window_ready() - called to determine if the buffer contains enough
data for a window.
query() - return the Buffer objects to process
process() - process the data
timestamp() - timestamp the stream for the next processing
commit() - commit processed data back to the buffer. by
default this means removing the objects from the
buffer and deleting the window.
sleep() - sleep until the next round
Use timestamp() to mark the stream (or the buffer data) for the next
round. Use sleep() to set the amount of time to sleep. Depending on
the emitter's semantics this may be a e.g. a fixed interval or some function
of the data.
WindowEmitter implements several defaults:
process() - mark all data returned by query() as processed
sleep() - sleep self.interval / 2 seconds
undo() - called if the emit function raises an exception. marks
the data returned by query() as not processed and deletes
the window
For examples of how to implement a custom emitter see TimeWindow, CountWindow
and SampleFunctionWindow.
Note there should only be one WindowEmitter per stream. This is a
a limitation of the Buffer's way of marking documentes as processed (a boolean
flag). This decision was made in favor of performance and simplicity. Supporting
concurrent emitters would mean each Buffer object needs to keep track of which
emitter has processed its data and make sure Window objects are processed by
exactly one emitter.
"""
def __init__(self, stream, interval=None, processfn=None, emitfn=None,
emit_empty=False):
self.stream_name = stream
self.interval = interval
self.emit_empty = emit_empty
self.emitfn = emitfn
self.processfn = processfn
self._stream = None
self._window = None # current window if any
self._delete_on_commit = True
def query(self, *args):
raise NotImplemented()
def window_ready(self):
""" return a tuple of (ready, qargs) """
raise NotImplemented()
def timestamp(self, query_args):
self.stream.modify(query={}, last_read=datetime.datetime.now())
@property
def stream(self):
if self._stream:
return self._stream
self._stream = Stream.get_or_create(self.stream_name)
return self._stream
def process(self, qs):
if self.processfn:
return self.processfn(qs)
data = []
for obj in qs:
obj.modify(processed=True)
data.append(obj)
return data
def undo(self, qs):
for obj in qs:
obj.modify(processed=False)
if self._window:
self._window.delete()
return qs
def persist(self, flag=True):
self._delete_on_commit = not flag
def commit(self, qs, window):
if not self._delete_on_commit:
window.modify(processed=True)
return
for obj in qs:
obj.delete()
window.delete()
def emit(self, qs):
self._window = Window(stream=self.stream.name,
data=[obj.data for obj in qs]).save()
if self.emitfn:
self._window = self.emitfn(self._window) or self._window
return self._window
def sleep(self):
import time
time.sleep((self.interval or self.stream.interval) / 2.0)
def run(self):
while True:
ready, query_args = self.window_ready()
if ready:
qs = self.query(*query_args)
qs = self.process(qs)
if qs or self.emit_empty:
try:
window = self.emit(qs)
except Exception as e:
self.undo(qs)
print(str(e))
else:
self.commit(qs, window)
finally:
self.timestamp(*query_args)
self.sleep()
class FixedTimeWindow(WindowEmitter):
"""
a fixed time-interval window
Yields windows of all data retrieved in fixed intervals of n
seconds. Note that windows are created in fixed-block sequences,
i.e. in steps of n_seconds since the start of the stream. Empty
windows are also emitted. This guarantees that any window
contains only those documents received in that particular window.
This is useful if you want to count e.g. the number of events
per time-period.
Usage:
@stream(name, interval=n_seconds)
def myproc(window):
# ...
"""
def __init__(self, *args, **kwargs):
super(FixedTimeWindow, self).__init__(*args, **kwargs)
self.emit_empty = True
def window_ready(self):
stream = self.stream
last_read = stream.last_read
now = datetime.datetime.now()
max_read = last_read + datetime.timedelta(seconds=self.interval)
return now > max_read, (last_read, max_read)
def query(self, *args):
last_read, max_read = args
fltkwargs = dict(created__gte=last_read, created__lte=max_read)
return Buffer.objects.no_cache().filter(**fltkwargs)
def timestamp(self, *args):
last_read, max_read = args
self.stream.modify(query=dict(last_read__gte=last_read), last_read=max_read)
self.stream.reload()
def sleep(self):
import time
# we have strict time windows, only sleep if we are up to date
if self.stream.last_read > datetime.datetime.now() - datetime.timedelta(seconds=self.interval):
# sleep slightly longer to make sure the interval is complete
# and all data had a chance to accumulate. if we don't do
# this we might get empty windows on accident, resulting in
# lost data
time.sleep(self.interval + 0.25)
class RelaxedTimeWindow(WindowEmitter):
"""
a relaxed time-interval window
Every interval n_seconds, yields windows of all data in the buffer
since the last successful retrieval of data. This does _not_
guarantee the data retrieved is in a specific time range. This is
useful if you want to retrieve data every n_seconds but do not
care when the data was inserted into the buffer.
Usage:
@stream(name, interval=n_seconds)
def myproc(window):
# ...
"""
def window_ready(self):
stream = self.stream
last_read = stream.last_read
max_read = datetime.datetime.now()
return True, (last_read, max_read)
def query(self, *args):
last_read, max_read = args
fltkwargs = dict(created__gt=last_read, created__lte=max_read,
processed=False)
return Buffer.objects.no_cache().filter(**fltkwargs)
def timestamp(self, *args):
last_read, max_read = args
self.stream.modify(query=dict(last_read=last_read), last_read=max_read)
self.stream.reload()
class CountWindow(WindowEmitter):
def window_ready(self):
qs = Buffer.objects.no_cache().filter(processed=False).limit(self.interval)
self._data = list(qs)
return len(self._data) >= self.interval, ()
def query(self, *args):
return self._data
def timestamp(self, *args):
self.stream.modify(query={}, last_read=datetime.datetime.now())
def sleep(self):
import time
time.sleep(0.1)
| StarcoderdataPython |
11322450 | """Test to verify that we can load components."""
from unittest.mock import ANY, patch
import pytest
from homeassistant import core, loader
from homeassistant.components import http, hue
from homeassistant.components.hue import light as hue_light
from tests.common import MockModule, async_mock_service, mock_integration
async def test_component_dependencies(hass):
"""Test if we can get the proper load order of components."""
mock_integration(hass, MockModule("mod1"))
mock_integration(hass, MockModule("mod2", ["mod1"]))
mod_3 = mock_integration(hass, MockModule("mod3", ["mod2"]))
assert {"mod1", "mod2", "mod3"} == await loader._async_component_dependencies(
hass, "mod_3", mod_3, set(), set()
)
# Create circular dependency
mock_integration(hass, MockModule("mod1", ["mod3"]))
with pytest.raises(loader.CircularDependency):
print(
await loader._async_component_dependencies(
hass, "mod_3", mod_3, set(), set()
)
)
# Depend on non-existing component
mod_1 = mock_integration(hass, MockModule("mod1", ["nonexisting"]))
with pytest.raises(loader.IntegrationNotFound):
print(
await loader._async_component_dependencies(
hass, "mod_1", mod_1, set(), set()
)
)
# Having an after dependency 2 deps down that is circular
mod_1 = mock_integration(
hass, MockModule("mod1", partial_manifest={"after_dependencies": ["mod_3"]})
)
with pytest.raises(loader.CircularDependency):
print(
await loader._async_component_dependencies(
hass, "mod_3", mod_3, set(), set()
)
)
def test_component_loader(hass):
"""Test loading components."""
components = loader.Components(hass)
assert components.http.CONFIG_SCHEMA is http.CONFIG_SCHEMA
assert hass.components.http.CONFIG_SCHEMA is http.CONFIG_SCHEMA
def test_component_loader_non_existing(hass):
"""Test loading components."""
components = loader.Components(hass)
with pytest.raises(ImportError):
components.non_existing
async def test_component_wrapper(hass):
"""Test component wrapper."""
calls = async_mock_service(hass, "persistent_notification", "create")
components = loader.Components(hass)
components.persistent_notification.async_create("message")
await hass.async_block_till_done()
assert len(calls) == 1
async def test_helpers_wrapper(hass):
"""Test helpers wrapper."""
helpers = loader.Helpers(hass)
result = []
@core.callback
def discovery_callback(service, discovered):
"""Handle discovery callback."""
result.append(discovered)
helpers.discovery.async_listen("service_name", discovery_callback)
await helpers.discovery.async_discover("service_name", "hello", None, {})
await hass.async_block_till_done()
assert result == ["hello"]
async def test_custom_component_name(hass):
"""Test the name attribute of custom components."""
integration = await loader.async_get_integration(hass, "test_standalone")
int_comp = integration.get_component()
assert int_comp.__name__ == "custom_components.test_standalone"
assert int_comp.__package__ == "custom_components"
comp = hass.components.test_standalone
assert comp.__name__ == "custom_components.test_standalone"
assert comp.__package__ == "custom_components"
integration = await loader.async_get_integration(hass, "test_package")
int_comp = integration.get_component()
assert int_comp.__name__ == "custom_components.test_package"
assert int_comp.__package__ == "custom_components.test_package"
comp = hass.components.test_package
assert comp.__name__ == "custom_components.test_package"
assert comp.__package__ == "custom_components.test_package"
integration = await loader.async_get_integration(hass, "test")
platform = integration.get_platform("light")
assert platform.__name__ == "custom_components.test.light"
assert platform.__package__ == "custom_components.test"
# Test custom components is mounted
from custom_components.test_package import TEST
assert TEST == 5
async def test_log_warning_custom_component(hass, caplog):
"""Test that we log a warning when loading a custom component."""
await loader.async_get_integration(hass, "test_standalone")
assert "You are using a custom integration test_standalone" in caplog.text
await loader.async_get_integration(hass, "test")
assert "You are using a custom integration test " in caplog.text
async def test_custom_integration_missing_version(hass, caplog):
"""Test that we log a warning when custom integrations are missing a version."""
test_integration_1 = loader.Integration(
hass, "custom_components.test1", None, {"domain": "test1"}
)
test_integration_2 = loader.Integration(
hass,
"custom_components.test2",
None,
loader.manifest_from_legacy_module("test2", "custom_components.test2"),
)
with patch("homeassistant.loader.async_get_custom_components") as mock_get:
mock_get.return_value = {
"test1": test_integration_1,
"test2": test_integration_2,
}
await loader.async_get_integration(hass, "test1")
assert (
"No 'version' key in the manifest file for custom integration 'test1'."
in caplog.text
)
await loader.async_get_integration(hass, "test2")
assert (
"No 'version' key in the manifest file for custom integration 'test2'."
in caplog.text
)
async def test_no_version_warning_for_none_custom_integrations(hass, caplog):
"""Test that we do not log a warning when core integrations are missing a version."""
await loader.async_get_integration(hass, "hue")
assert (
"No 'version' key in the manifest file for custom integration 'hue'."
not in caplog.text
)
async def test_custom_integration_version_not_valid(hass, caplog):
"""Test that we log a warning when custom integrations have a invalid version."""
test_integration = loader.Integration(
hass, "custom_components.test", None, {"domain": "test", "version": "test"}
)
with patch("homeassistant.loader.async_get_custom_components") as mock_get:
mock_get.return_value = {"test": test_integration}
await loader.async_get_integration(hass, "test")
assert (
"'test' is not a valid version for custom integration 'test'."
in caplog.text
)
async def test_get_integration(hass):
"""Test resolving integration."""
integration = await loader.async_get_integration(hass, "hue")
assert hue == integration.get_component()
assert hue_light == integration.get_platform("light")
async def test_get_integration_legacy(hass):
"""Test resolving integration."""
integration = await loader.async_get_integration(hass, "test_embedded")
assert integration.get_component().DOMAIN == "test_embedded"
assert integration.get_platform("switch") is not None
async def test_get_integration_custom_component(hass, enable_custom_integrations):
"""Test resolving integration."""
integration = await loader.async_get_integration(hass, "test_package")
assert integration.get_component().DOMAIN == "test_package"
assert integration.name == "Test Package"
def test_integration_properties(hass):
"""Test integration properties."""
integration = loader.Integration(
hass,
"homeassistant.components.hue",
None,
{
"name": "Philips Hue",
"domain": "hue",
"dependencies": ["test-dep"],
"requirements": ["test-req==1.0.0"],
"zeroconf": ["_hue._tcp.local."],
"homekit": {"models": ["BSB002"]},
"dhcp": [
{"hostname": "tesla_*", "macaddress": "4CFCAA*"},
{"hostname": "tesla_*", "macaddress": "044EAF*"},
{"hostname": "tesla_*", "macaddress": "98ED5C*"},
],
"ssdp": [
{
"manufacturer": "Royal Philips Electronics",
"modelName": "Philips hue bridge 2012",
},
{
"manufacturer": "Royal Philips Electronics",
"modelName": "Philips hue bridge 2015",
},
{"manufacturer": "Signify", "modelName": "Philips hue bridge 2015"},
],
"mqtt": ["hue/discovery"],
"version": "1.0.0",
},
)
assert integration.name == "Philips Hue"
assert integration.domain == "hue"
assert integration.homekit == {"models": ["BSB002"]}
assert integration.zeroconf == ["_hue._tcp.local."]
assert integration.dhcp == [
{"hostname": "tesla_*", "macaddress": "4CFCAA*"},
{"hostname": "tesla_*", "macaddress": "044EAF*"},
{"hostname": "tesla_*", "macaddress": "98ED5C*"},
]
assert integration.ssdp == [
{
"manufacturer": "Royal Philips Electronics",
"modelName": "Philips hue bridge 2012",
},
{
"manufacturer": "Royal Philips Electronics",
"modelName": "Philips hue bridge 2015",
},
{"manufacturer": "Signify", "modelName": "Philips hue bridge 2015"},
]
assert integration.mqtt == ["hue/discovery"]
assert integration.dependencies == ["test-dep"]
assert integration.requirements == ["test-req==1.0.0"]
assert integration.is_built_in is True
assert integration.version == "1.0.0"
integration = loader.Integration(
hass,
"custom_components.hue",
None,
{
"name": "Philips Hue",
"domain": "hue",
"dependencies": ["test-dep"],
"requirements": ["test-req==1.0.0"],
},
)
assert integration.is_built_in is False
assert integration.homekit is None
assert integration.zeroconf is None
assert integration.dhcp is None
assert integration.ssdp is None
assert integration.mqtt is None
assert integration.version is None
integration = loader.Integration(
hass,
"custom_components.hue",
None,
{
"name": "Philips Hue",
"domain": "hue",
"dependencies": ["test-dep"],
"zeroconf": [{"type": "_hue._tcp.local.", "name": "hue*"}],
"requirements": ["test-req==1.0.0"],
},
)
assert integration.is_built_in is False
assert integration.homekit is None
assert integration.zeroconf == [{"type": "_hue._tcp.local.", "name": "hue*"}]
assert integration.dhcp is None
assert integration.ssdp is None
async def test_integrations_only_once(hass):
"""Test that we load integrations only once."""
int_1 = hass.async_create_task(loader.async_get_integration(hass, "hue"))
int_2 = hass.async_create_task(loader.async_get_integration(hass, "hue"))
assert await int_1 is await int_2
async def test_get_custom_components_internal(hass):
"""Test that we can a list of custom components."""
# pylint: disable=protected-access
integrations = await loader._async_get_custom_components(hass)
assert integrations == {"test": ANY, "test_package": ANY}
def _get_test_integration(hass, name, config_flow):
"""Return a generated test integration."""
return loader.Integration(
hass,
f"homeassistant.components.{name}",
None,
{
"name": name,
"domain": name,
"config_flow": config_flow,
"dependencies": [],
"requirements": [],
"zeroconf": [f"_{name}._tcp.local."],
"homekit": {"models": [name]},
"ssdp": [{"manufacturer": name, "modelName": name}],
"mqtt": [f"{name}/discovery"],
},
)
def _get_test_integration_with_zeroconf_matcher(hass, name, config_flow):
"""Return a generated test integration with a zeroconf matcher."""
return loader.Integration(
hass,
f"homeassistant.components.{name}",
None,
{
"name": name,
"domain": name,
"config_flow": config_flow,
"dependencies": [],
"requirements": [],
"zeroconf": [{"type": f"_{name}._tcp.local.", "name": f"{name}*"}],
"homekit": {"models": [name]},
"ssdp": [{"manufacturer": name, "modelName": name}],
},
)
def _get_test_integration_with_dhcp_matcher(hass, name, config_flow):
"""Return a generated test integration with a dhcp matcher."""
return loader.Integration(
hass,
f"homeassistant.components.{name}",
None,
{
"name": name,
"domain": name,
"config_flow": config_flow,
"dependencies": [],
"requirements": [],
"zeroconf": [],
"dhcp": [
{"hostname": "tesla_*", "macaddress": "4CFCAA*"},
{"hostname": "tesla_*", "macaddress": "044EAF*"},
{"hostname": "tesla_*", "macaddress": "98ED5C*"},
],
"homekit": {"models": [name]},
"ssdp": [{"manufacturer": name, "modelName": name}],
},
)
async def test_get_custom_components(hass, enable_custom_integrations):
"""Verify that custom components are cached."""
test_1_integration = _get_test_integration(hass, "test_1", False)
test_2_integration = _get_test_integration(hass, "test_2", True)
name = "homeassistant.loader._async_get_custom_components"
with patch(name) as mock_get:
mock_get.return_value = {
"test_1": test_1_integration,
"test_2": test_2_integration,
}
integrations = await loader.async_get_custom_components(hass)
assert integrations == mock_get.return_value
integrations = await loader.async_get_custom_components(hass)
assert integrations == mock_get.return_value
mock_get.assert_called_once_with(hass)
async def test_get_config_flows(hass):
"""Verify that custom components with config_flow are available."""
test_1_integration = _get_test_integration(hass, "test_1", False)
test_2_integration = _get_test_integration(hass, "test_2", True)
with patch("homeassistant.loader.async_get_custom_components") as mock_get:
mock_get.return_value = {
"test_1": test_1_integration,
"test_2": test_2_integration,
}
flows = await loader.async_get_config_flows(hass)
assert "test_2" in flows
assert "test_1" not in flows
async def test_get_zeroconf(hass):
"""Verify that custom components with zeroconf are found."""
test_1_integration = _get_test_integration(hass, "test_1", True)
test_2_integration = _get_test_integration_with_zeroconf_matcher(
hass, "test_2", True
)
with patch("homeassistant.loader.async_get_custom_components") as mock_get:
mock_get.return_value = {
"test_1": test_1_integration,
"test_2": test_2_integration,
}
zeroconf = await loader.async_get_zeroconf(hass)
assert zeroconf["_test_1._tcp.local."] == [{"domain": "test_1"}]
assert zeroconf["_test_2._tcp.local."] == [
{"domain": "test_2", "name": "test_2*"}
]
async def test_get_dhcp(hass):
"""Verify that custom components with dhcp are found."""
test_1_integration = _get_test_integration_with_dhcp_matcher(hass, "test_1", True)
with patch("homeassistant.loader.async_get_custom_components") as mock_get:
mock_get.return_value = {
"test_1": test_1_integration,
}
dhcp = await loader.async_get_dhcp(hass)
dhcp_for_domain = [entry for entry in dhcp if entry["domain"] == "test_1"]
assert dhcp_for_domain == [
{"domain": "test_1", "hostname": "tesla_*", "macaddress": "4CFCAA*"},
{"domain": "test_1", "hostname": "tesla_*", "macaddress": "044EAF*"},
{"domain": "test_1", "hostname": "tesla_*", "macaddress": "98ED5C*"},
]
async def test_get_homekit(hass):
"""Verify that custom components with homekit are found."""
test_1_integration = _get_test_integration(hass, "test_1", True)
test_2_integration = _get_test_integration(hass, "test_2", True)
with patch("homeassistant.loader.async_get_custom_components") as mock_get:
mock_get.return_value = {
"test_1": test_1_integration,
"test_2": test_2_integration,
}
homekit = await loader.async_get_homekit(hass)
assert homekit["test_1"] == "test_1"
assert homekit["test_2"] == "test_2"
async def test_get_ssdp(hass):
"""Verify that custom components with ssdp are found."""
test_1_integration = _get_test_integration(hass, "test_1", True)
test_2_integration = _get_test_integration(hass, "test_2", True)
with patch("homeassistant.loader.async_get_custom_components") as mock_get:
mock_get.return_value = {
"test_1": test_1_integration,
"test_2": test_2_integration,
}
ssdp = await loader.async_get_ssdp(hass)
assert ssdp["test_1"] == [{"manufacturer": "test_1", "modelName": "test_1"}]
assert ssdp["test_2"] == [{"manufacturer": "test_2", "modelName": "test_2"}]
async def test_get_mqtt(hass):
"""Verify that custom components with MQTT are found."""
test_1_integration = _get_test_integration(hass, "test_1", True)
test_2_integration = _get_test_integration(hass, "test_2", True)
with patch("homeassistant.loader.async_get_custom_components") as mock_get:
mock_get.return_value = {
"test_1": test_1_integration,
"test_2": test_2_integration,
}
mqtt = await loader.async_get_mqtt(hass)
assert mqtt["test_1"] == ["test_1/discovery"]
assert mqtt["test_2"] == ["test_2/discovery"]
async def test_get_custom_components_safe_mode(hass):
"""Test that we get empty custom components in safe mode."""
hass.config.safe_mode = True
assert await loader.async_get_custom_components(hass) == {}
| StarcoderdataPython |
1925509 | from wrappers.glove_obs_wrapper import GloveObsWrapper
from wrappers.tokenize_obs_wrapper import TokenizeObsWrapper
from wrappers.floor_obs_wrapper import FloorObsWrapper
from wrappers.restful_cartpole_v0_wrapper import RestfulCartPoleV0Wrapper
from wrappers.restful_acrobot_v1_wrapper_v1 import RestfulAcrobotV1WrapperV1
from wrappers.cartpole_v0_http_layer_v1 import HttpController, HttpClient, CartPoleV0HttpHandler, HttpMethods
from wrappers.acrobot_v1_http_layer_v1 import HttpController, HttpClient, AcrobotV1HttpHandlerV1, HttpMethods
from wrappers.acrobot_v1_reward_modified import Acrobot_v1_reward_modified | StarcoderdataPython |
6456410 | from django.shortcuts import render, redirect
from ..models import Assignment, AssignmentModule, Course
from .. import forms
from .utilities import alwaysContext
class ModuleWrapper():
def __init__(self, name, id, assignments):
self.name = name
self.id = id
self.assignments = assignments
def modules(request, course_id):
if not request.user.is_authenticated:
return redirect('home')
context = alwaysContext(request, course_id)
if request.method == "POST":
for assn in Assignment.objects.filter(course=context["selected_course"]):
if request.POST.get(f"assignment-checkbox-{assn.id}") == "checked":
moduleid = int(request.POST.get("moduleassign"))
if moduleid == 0:
assn.module = None
else:
assn.module = AssignmentModule.objects.get(id=moduleid)
assn.save()
context["modules"] = [ModuleWrapper("No module", 0, Assignment.objects.filter(course=context["selected_course"], module=None))]
for module in AssignmentModule.objects.filter(course=context["selected_course"]).order_by("name"):
context["modules"].append(ModuleWrapper(module.name, module.id, Assignment.objects.filter(course=context["selected_course"], module=module).order_by("end_datetime")))
return render(request, "modules.html", context)
def createmodule(request, course_id):
if not request.user.is_authenticated:
return redirect('home')
context = alwaysContext(request, course_id)
if request.user != context["selected_course"].owner:
return redirect('forum:modules')
if request.method == 'POST':
form = forms.CreateModuleForm(request.POST)
if form.is_valid():
AssignmentModule.objects.create(
name=form.cleaned_data['name'],
course = context["selected_course"]
)
return redirect("forum:modules")
else:
context["form"] = forms.CreateModuleForm()
return render(request, "createmodule.html", context)
def deletemodule(request, course_id, module_id):
if not request.user.is_authenticated:
return redirect('home')
context = alwaysContext(request, course_id)
if request.user != context["selected_course"].owner:
return redirect('forum:modules')
AssignmentModule.objects.get(id=module_id).delete()
return redirect("forum:modules") | StarcoderdataPython |
3427776 | # Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SAC agent."""
from acme.agents.jax.sac.agents import DistributedSAC
from acme.agents.jax.sac.agents import SAC
from acme.agents.jax.sac.builder import SACBuilder
from acme.agents.jax.sac.config import SACConfig
from acme.agents.jax.sac.config import target_entropy_from_env_spec
from acme.agents.jax.sac.learning import SACLearner
from acme.agents.jax.sac.networks import apply_policy_and_sample
from acme.agents.jax.sac.networks import default_models_to_snapshot
from acme.agents.jax.sac.networks import make_networks
from acme.agents.jax.sac.networks import SACNetworks
| StarcoderdataPython |
9616650 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
import math
import torch
def unNormalizeData(normalized_data, data_mean, data_std, dimensions_to_use):
T = normalized_data.shape[0] # Batch size
D = data_mean.shape[0] # 96
orig_data = np.zeros((T, D), dtype=np.float32) # (64, 96)
orig_data[:, dimensions_to_use] = normalized_data # (64, 48)
# Multiply times stdev and add the mean
stdMat = data_std.reshape((1, D)) # (1, 96)
stdMat = np.repeat(stdMat, T, axis=0) # (64 , 96)
meanMat = data_mean.reshape((1, D))
meanMat = np.repeat(meanMat, T, axis=0)
orig_data = np.multiply(orig_data, stdMat) + meanMat
return orig_data
def unNormalize2dData(normalized_data, data_mean, data_std):
dimensions_to_use = np.asarray([1, 2, 3, 6, 7, 8, 12, 13, 14, 15, 17, 18, 19, 25, 26, 27])
T = normalized_data.shape[0]
cut_data_mean = data_mean.reshape(-1, 2)
orig_data_mean = cut_data_mean[dimensions_to_use, :]
orig_data_mean = orig_data_mean.reshape(1, 32)
meanMat = np.repeat(orig_data_mean, T, axis=0)
cut_data_std = data_std.reshape(-1, 2)
orig_data_std = cut_data_std[dimensions_to_use, :]
orig_data_std = orig_data_std.reshape(1, 32)
stdMat = np.repeat(orig_data_std, T, axis=0)
orig_data = np.multiply(np.asarray(normalized_data), stdMat) + meanMat
return orig_data
def unNormalize3dData(normalized_data, data_mean, data_std):
dimensions_to_use = np.asarray([1, 2, 3, 6, 7, 8, 12, 13, 14, 15, 17, 18, 19, 25, 26, 27])
T = normalized_data.shape[0]
cut_data_mean = data_mean.reshape(-1, 3)
orig_data_mean = cut_data_mean[dimensions_to_use, :]
orig_data_mean = orig_data_mean.reshape(1, 48)
meanMat = np.repeat(orig_data_mean, T, axis=0)
cut_data_std = data_std.reshape(-1, 3)
orig_data_std = cut_data_std[dimensions_to_use, :]
orig_data_std = orig_data_std.reshape(1, 48)
stdMat = np.repeat(orig_data_std, T, axis=0)
orig_data = np.multiply(np.asarray(normalized_data), stdMat) + meanMat
return orig_data
def input_norm(inputs):
head_idx = 9
hip_idx = 0
# r_hip_idx = 1
# l_hip_idx = 4
c = 10
input_set = []
input_size = len(inputs)
dist_set = []
for idx in range(len(inputs)):
# inputs = np.asarray(inputs)
current_sample = inputs[idx]
current_sample = current_sample.reshape(16,2)
# central_point = (current_sample[r_hip_idx-1] + current_sample[l_hip_idx-1]) / 2
central_point = current_sample[hip_idx]
head_point = current_sample[head_idx]
dist = math.sqrt(np.sum((central_point - head_point) ** 2))
new_sample = current_sample / (c * dist)
new_sample = np.reshape(new_sample, (1, 32))
input_set.append(new_sample)
dist_set.append(dist)
input_set = np.asarray(input_set).reshape(input_size, 32)
dist_set = np.asarray(dist_set)
return input_set, dist_set
##### check head, hip, central point
# r_hip = current_sample[r_hip_idx - 1]
# l_hip = current_sample[l_hip_idx - 1]
# head = current_sample[head_idx - 1]
# coord = np.asarray([np.asarray(head), np.asarray(l_hip), np.asarray(r_hip), np.asarray(central_point)])
# x_coord = coord[:,0]
# y_coord = coord[:,1]
# plt.figure()
# plt.scatter(x_coord, y_coord)
def output_norm(outputs):
head_idx = 9
hip_idx = 0
# r_hip_idx = 1
# l_hip_idx = 4
c = [0,0,10]
output_set = []
dist_set = []
output_size = len(outputs)
for idx in range(len(outputs)):
current_sample = outputs[idx]
current_sample = current_sample.reshape(16, 3)
adjust_current_sample = current_sample + c
# central_point = (current_sample[r_hip_idx - 1] + current_sample[l_hip_idx - 1]) / 2
# central_point = torch.tensor([0, 0, c], dtype=torch.float32)
head_point = adjust_current_sample[head_idx]
central_point = adjust_current_sample[hip_idx]
# head_point = torch.tensor(current_sample[head_idx], dtype=torch.float32)
# distance = (central_point - head_point) ** 2
# dist = math.sqrt(distance.sum())
dist = math.sqrt(np.sum((central_point - head_point) ** 2))
new_sample = adjust_current_sample / dist
new_sample = np.reshape(new_sample, (1, 48))
output_set.append(new_sample)
dist_set.append(dist)
output_set = np.asarray(output_set).reshape(output_size, 48)
dist_set = np.asarray(dist_set)
return output_set, dist_set
| StarcoderdataPython |
8495 | from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
urlpatterns = [
# Examples:
# url(r'^$', 'evetool.views.home', name='home'),
url(r'^', include('users.urls')),
url(r'^', include('apis.urls')),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| StarcoderdataPython |
1974878 | '''
Created on 11 Oct 2016
@author: <NAME>
'''
# pylint: disable=missing-docstring
import argparse
import collections
import csv
import json
import logging
import os
import re
import shutil
import sys
import zipfile
import zlib
import jinja2
import msgpack
import numpy
import pandas
import yaml
import pubtransit
LOG = logging.getLogger(__name__)
OUT_STREAM = sys.stdout
TEMPLATE_MANAGER = jinja2.Environment(
loader=jinja2.PackageLoader(pubtransit.__name__, ''))
TARGET_METHODS = {}
DEFAULT_STOPS_PER_TILE = 128
def main():
logging.basicConfig(
level=logging.WARNING, format="%(asctime)-15s | %(message)s")
parser = argparse.ArgumentParser(
description='Departures transit feed compiler.')
parser.add_argument(
'--target', type=str, nargs=1, choices=TARGET_METHODS,
default='all',
help='One between: {}'.format(', '.join(TARGET_METHODS)))
parser.add_argument(
'--build-dir', type=str, default='build',
help='Folder where to put produced data.')
parser.add_argument(
'--quiet', dest='logging_level', action='store_const',
const=logging.ERROR, default=None, help='Show only error messages.')
parser.add_argument(
'--logging-level', dest='logging_level', default=logging.WARNING,
type=int,
help='Set logging level (from {min} to {max}.'.format(
min=logging.DEBUG, max=logging.FATAL))
parser.add_argument(
'--max-stops', dest='max_stops', default=DEFAULT_STOPS_PER_TILE,
type=int, help='Set maximum number of stops-per-tile.')
parser.add_argument(
'--verbose', dest='logging_level', action='store_const',
const=logging.INFO, default=None, help='Show verbose messages.')
parser.add_argument(
'--debug', dest='logging_level', action='store_const',
const=logging.DEBUG, default=None, help='Show debug messages.')
parser.add_argument(
'files', type=str, default=['site.yml'], nargs='*',
help='Feed file to extract feed rules from.')
parser.add_argument(
'--dest', type=str, help='Destination feed file.')
args = parser.parse_args()
if args.logging_level:
# Raise logging level
logging.getLogger().setLevel(args.logging_level)
method = TARGET_METHODS[args.target[0]]
try:
for inpute_file in args.files or [None]:
method(args, inpute_file)
except Exception as error: # pylint: disable=broad-except
if args.logging_level is logging.DEBUG:
LOG.fatal("Unhandled exception.", exc_info=1)
else:
LOG.fatal(str(error) or str(type(error)))
exit(1)
except BaseException:
logging.warning('interrupted', exc_info=1)
raise
else:
logging.debug('SUCCESS')
def target_method(name):
def decorator(func):
TARGET_METHODS[name] = func
return func
return decorator
MethodParameters = collections.namedtuple(
'MethodParameters', ['site', 'feed', 'target_path'])
@target_method("version")
def print_version(args):
# pylint: disable=unused-argument
OUT_STREAM.write(pubtransit.__version__ + '\n')
@target_method("makefile")
def make_makefiles(args, site_file=None):
feeds_conf = read_yaml_file(site_file or 'site.yml')
for site in feeds_conf['feed']:
for feed in site["feeds"]:
target_path = os.path.join(
args.build_dir, site["name"], feed["name"])
if not os.path.isdir(target_path):
os.makedirs(target_path)
url = feed.get("url") or (site["url"] + '/' + feed["path"])
# pylint: disable=unused-argument,no-member
OUT_STREAM.write(target_path + ".mk ")
target_template = TEMPLATE_MANAGER.get_template("feed_item.mk")
target_make = target_template.render(
install_dir=os.path.join('$(INSTALL_DIR)', 'feed'),
build_dir=args.build_dir,
target=os.path.join(site["name"], feed["name"]),
url=url,
make_flags="--logging-level " + str(args.logging_level),
make_me='python -m pubtransit ' + ' '.join(
repr(arg) for arg in sys.argv[1:]),
script_name="pubtransit")
with open(target_path + ".mk", 'wt') as target_stream:
target_stream.write(target_make)
@target_method("datastore")
def generate_datastores(args, feed_file):
dest_dir = os.path.splitext(args.dest or args.source)[0]
if os.path.isdir(dest_dir):
shutil.rmtree(dest_dir)
os.makedirs(dest_dir)
with zipfile.ZipFile(feed_file) as zip_file:
routes = read_routes(zip_file)
generate_routes(dest_dir=dest_dir, routes=routes)
trips = read_trips(zip_file)
generate_trips(dest_dir=dest_dir, trips=trips, route_id=routes.id)
stops = read_stops(zip_file)
tiles = generate_tiled_stops(
dest_dir=dest_dir, stops=stops, max_rows=args.max_stops)
stop_times = read_stop_times(zip_file)
generate_tiled_stop_times(
dest_dir=dest_dir, stop_times=stop_times, trip_id=trips.id,
tiles=tiles)
feed_info = dict(
west=stops.west, east=stops.east, south=stops.south, north=stops.north)
store_object(feed_info, dest_dir, 'feed')
@target_method("index")
def make_index(args, feed_file=None):
feeds_conf = read_yaml_file(feed_file or 'feeds.yaml')
paths = []
west = []
east = []
south = []
north = []
for site in feeds_conf['feed']:
for feed in site["feeds"]:
target_path = os.path.join(
args.build_dir, site["name"], feed["name"])
if not os.path.isdir(target_path):
LOG.error('Target feed dir not found: %r', target_path)
os.makedirs(target_path)
with open(os.path.join(target_path, 'feed.gz')) as in_stream:
zipped = in_stream.read()
packed = zlib.decompress(zipped)
feed_info = msgpack.unpackb(packed)
paths.append(os.path.join(site["name"], feed["name"]))
west.append(feed_info['west'])
east.append(feed_info['east'])
south.append(feed_info['south'])
north.append(feed_info['north'])
store_column(paths, args.build_dir, 'index', 'path')
store_column(west, args.build_dir, 'index', 'west', float)
store_column(east, args.build_dir, 'index', 'east', float)
store_column(south, args.build_dir, 'index', 'south', float)
store_column(north, args.build_dir, 'index', 'north', float)
def generate_routes(dest_dir, routes):
store_column(routes.name, dest_dir, 'routes', 'name')
def generate_trips(dest_dir, trips, route_id):
trip_route_id = numpy.searchsorted(route_id, trips.route_id)
store_column(trip_route_id, dest_dir, 'trips', 'route_id', int)
store_column(trips.name, dest_dir, 'trips', 'name')
def generate_tiled_stops(dest_dir, stops, max_rows=None):
if not max_rows:
max_rows = len(stops.id)
max_rows = max(max_rows, 4)
tiles_tree, tiles = create_tree(
stops, index_columns=['lon', 'lat'], max_rows=max_rows)
tiles_num = len(tiles)
tiles_shape = tiles_num,
tiles_id_format = '0' + str(len(str(tiles_num)))
tiles_west = numpy.zeros(tiles_shape, dtype=float)
tiles_east = numpy.zeros(tiles_shape, dtype=float)
tiles_south = numpy.zeros(tiles_shape, dtype=float)
tiles_north = numpy.zeros(tiles_shape, dtype=float)
for i, tile in enumerate(tiles):
tile_dir = os.path.join(dest_dir, format(i, tiles_id_format))
store_column(tile.name, tile_dir, 'stops', 'name')
store_column(tile.lon, tile_dir, 'stops', 'lon', float)
store_column(tile.lat, tile_dir, 'stops', 'lat', float)
tiles_west[i] = tile.west
tiles_east[i] = tile.east
tiles_south[i] = tile.south
tiles_north[i] = tile.north
store_object(tiles_tree, os.path.join(dest_dir, 'tiles'), 'tree')
store_column(tiles_west, dest_dir, 'tiles', 'west')
store_column(tiles_east, dest_dir, 'tiles', 'east')
store_column(tiles_south, dest_dir, 'tiles', 'south')
store_column(tiles_north, dest_dir, 'tiles', 'north')
return tiles
def generate_tiled_stop_times(dest_dir, stop_times, trip_id, tiles):
# pylint: disable=too-many-locals
tiles_num = len(tiles)
tiles_id_format = '0' + str(len(str(tiles_num)))
stop_times = stop_times.sort_by('stop_id')
trip_id_sorter = numpy.argsort(trip_id)
for tile_id, stops in enumerate(tiles):
stop_id = stops.id
stop_id_sorter = numpy.argsort(stop_id)
tile_start = numpy.searchsorted(
stop_id, stop_times.stop_id, side='left', sorter=stop_id_sorter)
tile_stop = numpy.searchsorted(
stop_id, stop_times.stop_id, side='right', sorter=stop_id_sorter)
tiled_stop_times = stop_times.select(tile_start != tile_stop)
tiled_stop_times_stop_id = stop_id_sorter[numpy.searchsorted(
stop_id, tiled_stop_times.stop_id, sorter=stop_id_sorter)]
tiled_stop_times_trip_id = trip_id_sorter[numpy.searchsorted(
trip_id, tiled_stop_times.trip_id,
sorter=trip_id_sorter)]
tile_dir = os.path.join(dest_dir, format(tile_id, tiles_id_format))
store_column(
tiled_stop_times_stop_id, tile_dir, 'stop_times', 'stop_id')
store_column(
tiled_stop_times_trip_id, tile_dir, 'stop_times', 'trip_id')
store_column(
tiled_stop_times.departure_minutes, tile_dir, 'stop_times',
'departure_minutes')
stop_empty = numpy.ones_like(stop_id, dtype=int)
stop_empty[tiled_stop_times_stop_id] = 0
store_column(stop_empty, tile_dir, 'stops', 'empty')
def timestamp_to_minutes(timestamp):
timestamp = numpy.asarray(timestamp, dtype='S8')
timestamp = numpy.core.defchararray.rjust(timestamp, 8, '0')
hours = array_from_data( # pylint: disable=no-member
data=timestamp.__array_interface__['data'],
shape=timestamp.shape, typestr='S2', strides=(8,)
).astype(int) % 24
minutes = array_from_data( # pylint: disable=no-member
data=timestamp.__array_interface__['data'],
shape=timestamp.shape, typestr='S2', strides=(8,),
offset=3).astype(int) % 60
timestamp = (hours * 60) + minutes
assert timestamp.max() < 24 * 60
return timestamp
def named_tuple(*fields):
def decorator(cls):
_named_tuple_type = collections.namedtuple(cls.__name__, fields)
return type(cls.__name__, (cls, _named_tuple_type), {})
return decorator
class BaseTable(tuple):
def sort_by(self, index_name):
# sort all columns by given index
return self.sort_by_array(getattr(self, index_name))
def sort_by_array(self, index_array, sort_index_array=False, sorter=None):
# sort all columns by given index
if sorter is None:
sorter = numpy.argsort(index_array)
if sort_index_array:
index_array[:] = index_array[sorter]
return self.select(sorter)
def select(self, item):
values = [
column[item] for column in self if column is not None]
return type(self)(*values)
def create_tree(table, index_columns, max_rows=128):
# pylint: disable=too-many-locals
table_class = type(table)
ndim = len(index_columns)
tree = {}
stack = [(tree, table, 0)]
leaves = []
while stack:
node, table, level = stack.pop()
column_id = index_columns[level % ndim]
column = getattr(table, column_id)
if len(column) <= max_rows:
node['leaf'] = len(leaves)
leaves.append(table)
else:
table = table.sort_by(column_id)
column = getattr(table, column_id) # this is another table!!
mid = int(len(column) / 2)
node['col'] = column_id
node['min'] = column[0]
node['mid'] = column[mid]
node['max'] = column[-1]
node['left'] = {}
node['right'] = {}
assert node['min'] <= node['max']
assert node['min'] <= node['mid']
assert node['mid'] <= node['max']
left = table_class(*[col[:mid] for col in table])
right = table_class(*[col[mid:] for col in table])
stack.append((node['right'], right, level + 1))
stack.append((node['left'], left, level + 1))
LOG.debug('Tree generated:\n%s',
json.dumps(tree, indent=4, sort_keys=True))
return tree, leaves
@named_tuple('id', 'name')
class RouteTable(BaseTable):
@classmethod
def from_zip_file(cls, zip_file):
columns = read_table(
zip_file=zip_file, table_name='routes',
columns=['route_id', 'route_short_name'])
return cls(*columns).sort_by('id')
read_routes = RouteTable.from_zip_file # pylint: disable=invalid-name
@named_tuple('id', 'route_id', 'name')
class TripTable(BaseTable):
@classmethod
def from_zip_file(cls, zip_file):
columns = read_table(
zip_file=zip_file, table_name='trips',
columns=['trip_id', 'route_id', 'trip_headsign'])
return cls(*columns).sort_by('id')
read_trips = TripTable.from_zip_file # pylint: disable=invalid-name
@named_tuple('lon', 'lat', 'id', 'name', 'indexes')
class StopTable(BaseTable):
# pylint: disable=no-member
@classmethod
def from_zip_file(cls, zip_file):
columns = read_table(
zip_file=zip_file, table_name='stops',
columns=['stop_lon', 'stop_lat', 'stop_id', 'stop_name'],
dtypes={'stop_lon': float, 'stop_lat': float})
columns += (numpy.arange(len(columns[0])),)
return cls(*columns).sort_by('id')
_west = None
@property
def west(self):
west = self._west
if west is None:
self._west = west = numpy.amin(self.lon)
return west
_east = None
@property
def east(self):
east = self._east
if east is None:
self._east = east = numpy.amax(self.lon)
return east
_south = None
@property
def south(self):
south = self._south
if south is None:
self._south = south = numpy.amin(self.lat)
return south
_north = None
@property
def north(self):
north = self._north
if north is None:
self._north = north = numpy.amax(self.lat)
return north
read_stops = StopTable.from_zip_file # pylint: disable=invalid-name
@named_tuple('stop_id', 'trip_id', 'departure_minutes')
class StopTimeTable(BaseTable):
@classmethod
def from_zip_file(cls, zip_file):
stop_id, trip_id, departure_time = read_table(
zip_file=zip_file, table_name='stop_times',
columns=['stop_id', 'trip_id', 'departure_time'])
return cls(
stop_id=stop_id, trip_id=trip_id,
departure_minutes=timestamp_to_minutes(departure_time))
_left = None
read_stop_times = StopTimeTable.from_zip_file # pylint: disable=invalid-name
GET_COLUMN_NAME_REGEX = re.compile(b'[^\\w]')
def read_table(zip_file, table_name, columns, dtypes=None):
if not dtypes:
dtypes = {}
with zip_file.open(table_name + '.txt', 'r') as csv_stream:
hearer = csv_stream.readline().strip()
names = [
GET_COLUMN_NAME_REGEX.sub(b'', name).decode('ascii')
for name in hearer.split(b',')]
table = pandas.read_csv(
csv_stream, names=names, quotechar='"', quoting=csv.QUOTE_ALL,
usecols=[col for col in columns])
table = [
numpy.asarray(remove_nans(table[column]), dtype=dtypes.get(column))
for column in columns]
return table
def remove_nans(series):
if series.dtype == object:
series = series.fillna('')
return series
def store_column(array, dest_dir, table, column, dtype=None):
if dtype:
array = numpy.asarray(array, dtype=dtype)
if isinstance(array, list):
obj = array
else:
obj = list(array)
store_object(obj, os.path.join(dest_dir, table), column)
def store_object(obj, dest_dir, name):
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
packed = msgpack.packb(obj)
zipped = zlib.compress(packed, 9)
dest_file = os.path.join(dest_dir, name + '.gz')
with open(dest_file, 'wb') as dest_stream:
dest_stream.write(zipped)
LOG.info('Object stored:\n'
' path: %r\n'
' packet size: %d bytes\n'
' zipped size: %d bytes.\n',
dest_file, len(packed), len(zipped))
def read_yaml_file(feed_file):
with open(feed_file, 'rt') as feed_file_stream:
return yaml.load(feed_file_stream.read())
def array_from_data(data, shape, typestr, strides, offset=0):
if offset:
data = data[0] + offset, data[1],
# pylint: disable=no-member
return numpy.array(ArrayInterface(
shape=shape, typestr=typestr, data=data, strides=strides))
@named_tuple('shape', 'typestr', 'data', 'strides')
class ArrayInterface(object):
# pylint: disable=too-few-public-methods,no-member
@property
def __array_interface__(self):
return {
'shape': self.shape,
'typestr': self.typestr,
'data': self.data,
'strides': self.strides,
'version': 3}
| StarcoderdataPython |
3442935 | from typing import Any, Iterable
class CustomSet:
def __init__(self, _elements: Iterable[Any] = []):
self._elements = []
for e in _elements:
self.add(e)
def isempty(self) -> bool:
return len(self._elements) == 0
def __contains__(self, element: Any) -> bool:
return element in self._elements
def issubset(self, other: "CustomSet") -> bool:
return all(e in other for e in self._elements)
def isdisjoint(self, other: "CustomSet") -> bool:
return self.intersection(other).isempty()
def __eq__(self, other: "CustomSet") -> bool:
return len(self._elements) == len(other._elements) \
and self.issubset(other)
def add(self, element: Any) -> None:
if element not in self._elements:
self._elements.append(element)
def intersection(self, other: "CustomSet") -> "CustomSet":
return CustomSet(e for e in self._elements if e in other)
def __sub__(self, other: "CustomSet") -> "CustomSet":
return CustomSet(e for e in self._elements if e not in other)
def __add__(self, other: "CustomSet") -> "CustomSet":
return CustomSet(self._elements + other._elements)
| StarcoderdataPython |
11244050 | import argparse
from pathlib import Path
from chomskIE.dataset import (Loader,
Writer,
DummyLoader,
DummyWriter)
from chomskIE.utils import (retrieve_spacy_language,
filter_invalid_sents,
retrieve_wordnet)
from chomskIE.preprocess import *
from chomskIE.extract import *
from chomskIE.postprocess import (post_process_triples,
post_process_part_tuples)
LANGUAGE = 'en_core_web_sm'
PIPELINE = [
SentenceRecognizer,
ModelTransformer,
WordTokenizer,
Lemmatizer,
PartOfSpeechTagger,
NamedEntityRecognizer,
DependencyParser,
]
def process_document(doc):
"""Must be called following the completion of the preprocessing
pipeline to check flag for each ``chomskIE.utils.Document`` object.
Furthermore, invalid sentences are filtered out.
Arguments:
doc (chomskIE.utils.Document)
Document.
Returns:
doc (chomskIE.utils.Document)
Processed document.
"""
doc.processed = True
delattr(doc, 'model_sents')
doc = filter_invalid_sents(doc)
return doc
def extract_relations(doc):
"""Pipeline to extract relation templates.
Arguments:
doc (chomskIE.utils.Document)
Document.
Returns:
doc (chomskIE.utils.Document)
Document containing extracted relations.
"""
pte = PartTupleExtractor()
vte = VerbTemplateExtractor()
doc = process_document(doc)
doc = pte.extract(doc)
doc = post_process_part_tuples(doc)
doc = vte.extract(doc, 'born')
doc = post_process_triples('born', doc)
doc = vte.extract(doc, 'acquire')
doc = post_process_triples('acquire', doc)
return doc
def extract_born_relations(input_path, english_model, transform):
"""
"""
data_loader = DummyLoader()
if not transform:
docs, spacy_docs = data_loader.load_from_path(english_model,
input_path)
else:
doc, spacy_doc = data_loader.load(english_model, input_path)
docs, spacy_docs = [doc], [spacy_doc]
bte = BornTupleExtractor()
docs = [bte.extract(doc, spacy_docs[index]) \
for index, doc in enumerate(docs)]
return docs
def fit_transform_batch(input_path, english_model):
"""Extract relation templates from batch of documents.
Arguments:
input_path (str):
Path to folder containing .txt files.
english_model (spacy.lang)
Trained SpaCy language pipeline.
Returns:
docs (List of chomskIE.utils.Document objects)
Documents containing extracted relations.
"""
data_loader = Loader()
docs = data_loader.load_from_path(input_path)
for Pipe in PIPELINE:
docs = Pipe(english_model)(docs)
docs = [extract_relations(doc) for doc in docs]
return docs
def fit_transform(input_path, english_model):
"""Extract relation templates from single document.
Arguments:
input_path (str):
Path to folder containing .txt files.
english_model (spacy.lang)
Trained SpaCy language pipeline.
Returns:
docs (List of chomskIE.utils.Document objects)
Documents containing extracted relations.
"""
data_loader = Loader()
doc = data_loader.load(input_path)
for Pipe in PIPELINE:
doc = Pipe(english_model).transform(doc)
doc = extract_relations(doc)
return doc
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='CS6320 Final Project')
parser.add_argument('--input_path', dest='input_path',
type=str, default='../assets/data/dev/',
help='Path to input data file/directory')
parser.add_argument('--output_path', dest='output_path',
type=str, default='../assets/outputs/',
help='Path to output file location')
parser.add_argument('--transform', dest='transform', action='store_true',
help='If passed, transforms sentences in single \
dev/test .txt file.')
args = parser.parse_args()
input_path, output_path = Path(args.input_path), Path(args.output_path)
english_model = retrieve_spacy_language(lang=LANGUAGE)
if not args.transform:
# Extracting templates for 'BORN' relation
docs = extract_born_relations(input_path, english_model, args.transform)
data_writer = DummyWriter()
data_writer.write(output_path, docs, ['born'])
# Extracting templates for 'ACQUIRE' and 'PART-OF' relations
docs = fit_transform_batch(input_path, english_model)
data_writer = Writer()
data_writer.write(output_path, docs, ['acquire', 'part'])
else:
# Extracting templates for 'BORN' relation
docs = extract_born_relations(input_path, english_model, args.transform)
data_writer = DummyWriter()
data_writer.write(output_path, docs, ['born'])
# Extracting templates for 'ACQUIRE' and 'PART-OF' relations
doc = fit_transform(input_path, english_model)
data_writer = Writer()
data_writer.write(output_path, [doc], ['acquire', 'part'])
| StarcoderdataPython |
11201255 | <filename>Algorithm/QTOpt/dis/PendulumFullState1.py
import gym
import tensorflow as tf
from RunClient import runClient
def createEnvironemnt(environment = "Pendulum-v0"):
return gym.make(environment).env
enviroment = createEnvironemnt()
#enviroment.render()
print('Number of states: {} High: {} Low {}'.format(enviroment.observation_space, enviroment.observation_space.high , enviroment.observation_space.low))
print('Number of actions: {} High: {} Low {}'.format(enviroment.action_space, enviroment.action_space.high, enviroment.action_space.low ))
print('States Shape:', enviroment.observation_space.shape)
print('Action Shape:', enviroment.action_space.shape)
#print( "Action:", enviroment.action_space.sample())
#print("State", enviroment.reset())
def policyFunction(action):
return action
def getState(state):
return state
modelSrcWeights= 'saved_model/Weights/putty/FullState'
dataCollectionPath = 'saved_model/buffer/putty/FullState/NumpyData'
stateSize = 3
actionSize = 1
camerashape= (500,500,3)
loss = "mse"
optimizer = tf.keras.optimizers.SGD(learning_rate=0.0005, momentum=0.7, clipvalue=10)
dataCollerctorNumber = 1
bellmannNumber = 2
trainingsWorkerNumber = 2
def getConfiguration():
return stateSize, actionSize, camerashape, optimizer, loss
def run():
runClient(stateSize, actionSize, camerashape,
policyFunction, getState, createEnvironemnt, optimizer, loss,
modelSrcWeights, dataCollectionPath, dataCollerctorNumber, bellmannNumber, trainingsWorkerNumber)
input("... \n")
#run()
| StarcoderdataPython |
1860566 | import os
from os.path import dirname, abspath, join
def _get_posts():
parent_dir = dirname(dirname(abspath(__file__)))
postfiles = os.listdir(join(parent_dir, 'posts'))
posts = []
for pf in postfiles:
posts.append(
{
'link': pf[:-3],
'title': pf[10:-3].replace('-', ' '),
'date': pf[:10]
}
)
return posts | StarcoderdataPython |
1936690 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Author: <NAME>
from sonarqube.utils.rest_client import RestClient
from sonarqube.utils.config import (
API_FAVORITES_ADD_ENDPOINT,
API_FAVORITES_REMOVE_ENDPOINT,
API_FAVORITES_SEARCH_ENDPOINT,
)
from sonarqube.utils.common import POST, PAGE_GET
class SonarQubeFavorites(RestClient):
"""
SonarQube favorites Operations
"""
def __init__(self, **kwargs):
"""
:param kwargs:
"""
super(SonarQubeFavorites, self).__init__(**kwargs)
@PAGE_GET(API_FAVORITES_SEARCH_ENDPOINT, item="favorites")
def search_favorites(self):
"""
SINCE 6.3
Search for the authenticated user favorites.
:return:
"""
@POST(API_FAVORITES_ADD_ENDPOINT)
def add_component_to_favorites(self, component):
"""
SINCE 6.3
Add a component (project, file etc.) as favorite for the authenticated user.
:param component: Component key. Only components with qualifiers TRK, VW, SVW, APP, FIL, UTS are supported
:return:
"""
@POST(API_FAVORITES_REMOVE_ENDPOINT)
def remove_component_from_favorites(self, component):
"""
SINCE 6.3
Remove a component (project, directory, file etc.) as favorite for the authenticated user.
:param component: Component key
:return:
"""
| StarcoderdataPython |
151892 | <filename>setup.py
from setuptools import setup
setup(
name='toshiservices',
version='0.0.1',
author='<NAME>',
author_email='<EMAIL>',
packages=['toshi'],
url='http://github.com/IceExchange/ice-services-lib',
description='',
long_description=open('README.md').read(),
setup_requires=['pytest-runner'],
install_requires=[
'regex',
'tornado==4.5.1',
'ethereum==2.0.4',
'secp256k1'
],
dependency_links=[
],
tests_require=[
'pytest',
'requests',
'testing.common.database',
'testing.postgresql',
'testing.redis',
'asyncpg',
'mixpanel==4.3.2',
'redis',
'msgpack-python',
'aioredis==0.3.2'
]
)
| StarcoderdataPython |
1790389 | <gh_stars>100-1000
"""LikeTopic
Revision ID: <KEY>
Revises: <KEY>
Create Date: 2013-12-12 12:35:12.253544
"""
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<KEY>'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'like_topic',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('account_id', sa.Integer(), nullable=False),
sa.Column('topic_id', sa.Integer(), nullable=False),
sa.Column('created', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint(
'account_id', 'topic_id', name='uc_account_like_topic'
)
)
def downgrade():
op.drop_table('like_topic')
| StarcoderdataPython |
3554930 | <gh_stars>1-10
from dataclasses import dataclass
from somerandomapi import http
from somerandomapi.constants import ANIMALS
from somerandomapi.sync_async_handler import SyncAsyncHandler
@dataclass
class AnimalResponse:
"""
Attributes
----------
- fact: `str`
- image: `str`
"""
fact: str
image: str
class Animal(type):
def __getattr__(self, animal):
if animal.upper() in ANIMALS:
return SyncAsyncHandler(self.get_animal, self.async_get_animal, animal)
else:
raise AttributeError("Unknown animal.") from None
async def async_get_animal(self, animal: str):
async with http.GET(("animal", animal.lower())) as response:
get = response.json().get
DC = AnimalResponse(fact=get("fact"), image=get("image"))
return DC
def get_animal(self, animal: str):
with http.GET(("animal", animal.lower())) as response:
get = response.json().get
DC = AnimalResponse(fact=get("fact"), image=get("image"))
return DC
class AnimalMeta(metaclass=Animal):
"""
Docs: https://some-random-api.ml/docs/endpoints/animal
Attributes
----------
- dog: `AnimalResponse`
- cat: `AnimalResponse`
- panda: `AnimalResponse`
- fox: `AnimalResponse`
- red_panda: `AnimalResponse`
- koala: `AnimalResponse`
- bird: `AnimalResponse`
- raccoon: `AnimalResponse`
- kangaroo: `AnimalResponse`
"""
dog: AnimalResponse
cat: AnimalResponse
panda: AnimalResponse
fox: AnimalResponse
red_panda: AnimalResponse
koala: AnimalResponse
bird: AnimalResponse
raccoon: AnimalResponse
kangaroo: AnimalResponse
pass
| StarcoderdataPython |
8111651 | # Copyright 2014-2015 <NAME>.
# This file is part of SGGL. SGGL is licensed under the terms of the
# 2-clause BSD license. For more information, see LICENSE.txt.
import os
import xml.etree.ElementTree as etree
import collections
import io
import re
API_LIST = 'gl:core', 'gl:compatibility', 'gles1', 'gles2'
NOTICE = '/* This file is automatically generated. */\n'
COMMON_HEADER = '''\
#ifndef SGGL_COMMON_H
#define SGGL_COMMON_H
#if defined __cplusplus
# define SGGL_EXTC extern "C"
#else
# define SGGL_EXTC
#endif
#if defined _WIN32
# define SGGL_API __stdcall
# define SGGL_IMPORT SGGL_EXTC __declspec(dllimport)
#else
# define SGGL_API
# define SGGL_IMPORT SGGL_EXTC
#endif
#if defined __cplusplus
# define SGGL_INLINE inline
#else
# if defined __clang__ || defined __GNUC__
# define SGGL_INLINE static __inline__
# elif defined _MSC_VER
# define SGGL_INLINE static __inline
# endif
#endif
'''
COMMON_FOOTER = '''\
#ifdef __cplusplus
extern "C" {
#endif
/* The version of the active context, in BCD. */
extern int sggl_ver;
/* List of all OpenGL versions with emitted interfaces, in BCD. */
extern const unsigned char SGGL_VERSIONS[SGGL_VERSIONCOUNT];
/* Flags for which extensions are available. */
#if defined SGGL_HASEXTS
extern unsigned sggl_ext[(SGGL_EXTCOUNT + 31) / 32];
#endif
/* Pointers to OpenGL entry points. */
extern void *sggl_func[SGGL_ENTRYCOUNT];
/* Initialize OpenGL functions and variables. */
int
sggl_init(void);
#ifdef __cplusplus
}
#endif
#endif
'''
IFACE_HEADER = '''\
#ifndef {info.guard}_H
#define {info.guard}_H
#include "common.h"
'''
IFACE_FOOTER = '''\
#ifdef __cplusplus
}
#endif
#endif
'''
COMMAND_DIRECT = '''\
SGGL_IMPORT {type} SGGL_API gl{name}({params});
'''
COMMAND_INDIRECT = '''\
SGGL_INLINE {type} gl{name}({params}) {{ \
{retstmt}(({type} (SGGL_API*)({ptypes}))\
(sggl_func[{index}]))({pnames}); \
}}
'''
Type = collections.namedtuple('Type', 'name definition requires')
Function = collections.namedtuple('Function', 'proto params')
Parameter = collections.namedtuple('Parameter', 'name fulltype typename')
InterfaceName = collections.namedtuple(
'InterfaceName', 'category info profile')
def flat_text(node, tagmap={}):
"""Flatten XML tree, returning just the text."""
fp = io.StringIO()
def visit(node):
try:
fp.write(tagmap[node.tag])
return
except KeyError:
pass
if node.text:
fp.write(node.text)
for child in node:
visit(child)
if child.tail:
fp.write(child.tail)
visit(node)
return fp.getvalue()
def xml_info(node):
"""Print a lists of tags and attributes in an XML tree."""
tags = set()
attrib = set()
def scan(node):
tags.add(node.tag)
attrib.update(node.attrib)
for child in node:
scan(child)
scan(node)
print('Tags:', ' '.join(sorted(tags)))
print('Attributes:', ' '.join(sorted(attrib)))
def get_param(node):
"""Get a type/name from an XML tree."""
if len(node) == 1:
name, = node
fulltype = [node.text]
typename = None
elif len(node) == 2:
ptype, name = node
assert ptype.tag == 'ptype'
typename = ptype.text
fulltype = [node.text, typename, ptype.tail]
else:
assert False
fulltype = ''.join(x or '' for x in fulltype).strip()
assert name.tag == 'name'
name = name.text
return Parameter(name, fulltype, typename)
def cmd_name(name):
"""Shorten a command name."""
assert name.startswith('gl')
return name[2:]
def enum_name(name):
"""Shorten an enumeration name."""
assert name.startswith('GL_')
return name[3:]
DIGITCHUNK = re.compile('(\D*)(\d*)')
def sort_key(name):
"""Get the sort key for text containing numbers."""
ms = DIGITCHUNK.findall(name)
parts = []
for x, y in ms:
if not (x or y):
break
parts.append(x)
if not y:
break
parts.append(int(y))
return tuple(parts)
def profile_key(prof):
"""Get the sort key for profiles."""
if prof == 'core':
return (0, prof)
return (1, prof)
def ext_key(name):
"""Get the sort key for extensions."""
i = name.find('_')
if i >= 0:
k = name[:i]
if k in ('ARB', 'KHR', 'OES'):
return (0, name)
return (1, name)
def encode_version(v):
x, y = v
assert 0 <= x and 0 <= y <= 15
return '0x{:x}'.format(x * 16 + y)
class Registry(object):
"""OpenGL registry."""
__slots__ = ['types', 'groups', 'enum_groups', 'enums', 'commands',
'features', 'extensions', 'api', 'profile',
'dependencies']
def __init__(self, api, profile):
self.types = []
self.groups = {}
self.enum_groups = {}
self.enums = {}
self.commands = {}
self.features = {}
self.extensions = {}
self.api = api
self.profile = profile
self.dependencies = [__file__]
@classmethod
def load(class_, path, api, profile):
"""Load the OpenGL registry file from XML."""
if path is None:
path = os.path.join(os.path.dirname(__file__), 'gl.xml')
obj = class_(api, profile)
obj.dependencies.append(path)
tree = etree.parse(path)
for e in tree.getroot():
try:
func = getattr(obj, '_read_' + e.tag)
except AttributeError:
raise ValueError('Unknown tag: {}'.format(e.tag))
func(e)
return obj
def _read_comment(self, node):
"""Read a top-level comment tag."""
pass
def _read_types(self, node):
"""Read a top-level types tag."""
tagmap = {'apientry': 'SGGL_API'}
for child in node:
assert child.tag == 'type'
if child.attrib.get('api', 'gl') != self.api:
continue
try:
name = child.attrib['name']
except KeyError:
for e in child:
if e.tag == 'name':
name = e.text
break
else:
assert False
self.types.append(Type(
name,
flat_text(child, tagmap),
child.attrib.get('requires')))
def _read_groups(self, node):
"""Read a top-level groups tag."""
for child in node:
assert child.tag == 'group'
gname = child.attrib['name']
assert gname not in self.groups
group = []
self.groups[gname] = group
for child2 in child:
group.append(enum_name(child2.attrib['name']))
def _read_enums(self, node):
"""Read a top-level enums tag."""
gname = (node.attrib['namespace'], node.attrib.get('group', '-'))
try:
gdata = self.enums[gname]
except KeyError:
gdata = {}
self.enums[gname] = gdata
for child in node:
if child.tag != 'enum':
assert child.tag == 'unused'
continue
value = int(child.attrib['value'], 0)
gdata[enum_name(child.attrib['name'])] = value
self.enums.update(gdata)
def _read_commands(self, node):
"""Read a top-level commands tag."""
for child in node:
assert child.tag == 'command'
assert len(child) > 0
assert child[0].tag == 'proto'
proto = get_param(child[0])
params = []
for child2 in child[1:]:
if child2.tag == 'param':
params.append(get_param(child2))
else:
assert child2.tag in ('glx', 'vecequiv', 'alias')
name = cmd_name(proto.name)
assert name not in self.commands
self.commands[name] = Function(proto, params)
def _read_feature(self, node):
"""Read a top-level feature tag."""
if node.attrib['api'] != self.api:
return
version = node.attrib['number']
i = version.index('.')
version = int(version[:i]), int(version[i+1:])
self.features[version] = node
def _read_extensions(self, node):
"""Read a top-level extensions tag."""
for child in node:
supported = child.attrib['supported'].split('|')
if self.api not in supported:
continue
name = child.attrib['name']
self.extensions[enum_name(name)] = child
def get_platform_info(self, platform):
if not re.match(r'^\w+$', platform):
raise ValueError('Invalid platform: {!r}'.format(platform))
path = os.path.join(
os.path.dirname(__file__), platform + '.txt')
try:
fp = open(path)
except FileNotFoundError:
raise ValueError('Unsupported platform: {}'.format(platform))
def die(msg):
raise Exception('{}:{}: {}'.format(path, lineno, msg))
with fp:
lines = enumerate(fp, 1)
version = None
myapi = self.api, self.profile
for lineno, line in lines:
fields = line.split()
if not fields:
break
if len(fields) != 2:
die('must have two fields')
if fields[0] not in API_LIST:
die('invalid API')
api = parse_api(fields[0])
if myapi == api:
try:
version = parse_version(fields[1])
except ValueError:
die('invalid version number')
if not version:
raise ValueError(
'No data for this API and platform: {}, {!r}'
.format(platform, myapi))
exts = []
for lineno, line in lines:
line = line.strip()
if line:
exts.append(line)
return version, exts, [path]
def get_data(self, *, max_version, extensions=[], platform):
"""Get the C and C++ header data.
Returns (deps, data), where deps is a list of files used to
generate the data, and data is a dictionary mapping file names
to file contents (bytes).
"""
deps = list(self.dependencies)
files = {}
decl_version, decl_extensions, decl_deps = \
self.get_platform_info(platform)
deps.extend(decl_deps)
del decl_deps
# Validate input
if max_version not in self.features:
raise ValueError('No such version: {}.{}'.format(*max_version))
versions = [version for version in sorted(self.features)
if version <= max_version]
extensions = sorted(extensions, key=ext_key)
# Create list of interfaces
ifaces = (
[self._get_core(version, version <= decl_version)
for version in versions] +
[self._get_extension(extension, extension in decl_extensions)
for extension in extensions])
# Emit all interface headers, assign commands to numbers
command_dict = {}
command_list = []
for iface in ifaces:
if not iface.declare:
continue
for command in iface.commands:
command_dict[command] = None
for n, iface in enumerate(ifaces):
for command in sorted(iface.commands):
if command not in command_dict:
command_dict[command] = len(command_list)
command_list.append(command)
file = io.StringIO()
iface.dump(file, ifaces[:n], command_dict)
files[iface.filename] = file.getvalue()
file = io.StringIO()
self._dump_common(file, versions, extensions,
command_list, command_dict.keys())
files['common.h'] = file.getvalue()
file = io.StringIO()
self._dump_data(file, versions, extensions, command_list)
files['opengl_data.c'] = file.getvalue()
data = {name: value.encode('ASCII')
for name, value in files.items()}
return deps, data
def _dump_common(self, file, versions, extensions, command_list,
all_commands):
file.write(NOTICE)
file.write(COMMON_HEADER)
print(file=file)
# Write OpenGL types
required = set()
for cmd in all_commands:
cmd = self.commands[cmd]
required.add(cmd.proto.typename)
required.update(p.typename for p in cmd.params)
update = True
require_map = {type.name: type.requires for type in self.types}
required.discard(None)
while update:
update = False
for type in list(required):
requires = require_map[type]
if requires is None or requires in required:
continue
required.add(requires)
update = True
for type in self.types:
if type.name in required:
print(type.definition, file=file)
print(file=file)
file.write(
'enum {{\n'
' SGGL_VERSIONCOUNT = {},\n'
' SGGL_EXTCOUNT = {},\n'
' SGGL_ENTRYCOUNT = {}\n'
'}};\n'
'\n'
.format(len(versions), len(extensions), len(command_list)))
for version in versions:
print('#define SGGL_VERSION_{0[0]}_{0[1]} '
'(sggl_ver >= {1})'
.format(version, encode_version(version)),
file=file)
if extensions:
print(file=file)
print('#define SGGL_HASEXTS 1', file=file)
for n, extension in enumerate(extensions):
print('#define SGGL_{2} ((sggl_ext[{0}] & {1}u) != 0)'
.format(n // 32, 1 << (n & 31), extension), file=file)
print(file=file)
glext = ''
file.write(COMMON_FOOTER)
def _dump_data(self, file, versions, extensions, command_list):
file.write(NOTICE)
print(file=file)
file.write(
'const unsigned char SGGL_VERSIONS[{}] = {{\n'
' {}\n'
'}};\n'
'\n'
.format(len(versions),
',\n '.join(
', '.join(encode_version(v) for v in versions[i:i+8])
for i in range(0, len(versions), 8))))
print('const char SGGL_ENTRYNAME[] =', file=file)
for command in command_list:
print('"{}\\0"'.format(command), file=file)
print(';', file=file)
if extensions:
print(file=file)
print('const char SGGL_EXTENSIONNAME[] =',
file=file)
for extension in extensions:
print('"{}\\0"'.format(extension), file=file)
print(';', file=file)
def _get_core(self, version, declare):
if version not in self.features:
raise ValueError('No such version: {}.{}'.format(*version))
obj = CoreInterface(self, declare, version, self.profile)
for fversion, feature in sorted(self.features.items()):
for child in feature:
prof = child.attrib.get('profile')
if prof is not None and prof != self.profile:
continue
if child.tag == 'require':
if fversion <= version:
obj._add(child)
elif child.tag == 'remove':
obj._remove(child)
else:
assert False
return obj
def _get_extension(self, name, declare):
if name not in self.extensions:
raise ValueError('No such extension: {}'.format(name))
obj = ExtensionInterface(self, declare, name)
for child in self.extensions[name]:
api = child.attrib.get('api')
if api is not None and api != self.api:
continue
prof = child.attrib.get('profile')
if prof is not None and prof != 'core':
continue
if child.tag == 'require':
obj._add(child)
else:
assert False
return obj
class Interface(object):
__slots__ = ['registry', 'commands', 'enums',
'filename', 'guard', 'namespace', 'flag',
'declare']
def __init__(self, registry, declare):
self.registry = registry
self.commands = set()
self.enums = set()
self.declare = declare
def _add(self, node):
for child in node:
if child.tag == 'command':
self.commands.add(cmd_name(child.attrib['name']))
elif child.tag == 'enum':
self.enums.add(enum_name(child.attrib['name']))
else:
assert child.tag == 'type'
def _remove(self, node):
for child in node:
if child.tag == 'command':
self.commands.discard(cmd_name(child.attrib['name']))
elif child.tag == 'enum':
self.enums.discard(enum_name(child.attrib['name']))
else:
assert child.tag == 'type'
def get_enums(self, previous):
enums = set(self.enums)
for iface in previous:
enums.difference_update(iface.enums)
enums = [(enum, self.registry.enums[enum]) for enum in enums]
enums.sort(key=lambda x: sort_key(x[0]))
return enums
def get_commands(self, previous):
cmds = set(self.commands)
for iface in previous:
cmds.difference_update(iface.commands)
cmds = [(cmd, self.registry.commands[cmd]) for cmd in cmds]
cmds.sort()
return cmds
def dump(self, file, previous, command_dict):
file.write(NOTICE)
file.write(IFACE_HEADER.format(info=self))
for iface in previous:
print('#include "{}"'.format(iface.filename), file=file)
print('#ifdef __cplusplus', file=file)
print('namespace {} {{'.format(self.namespace), file=file)
for iface in previous:
print('using namespace {};'.format(iface.namespace), file=file)
print('#endif', file=file)
enums = self.get_enums(previous)
if enums:
print(file=file)
print('enum {', end='', file=file)
tail = ''
for name, value in enums:
print(tail, file=file)
print(' GL_{} = 0x{:04x}'.format(name, value),
end='', file=file)
tail = ','
print(file=file)
print('};', file=file)
commands = self.get_commands(previous)
if commands:
print(file=file)
for name, command in commands:
index = command_dict[name]
template = (COMMAND_DIRECT if index is None
else COMMAND_INDIRECT)
file.write(template.format(
type=command.proto.fulltype,
name=name,
params=', '.join(
'{0.fulltype} {0.name}'.format(p)
for p in command.params) or 'void',
retstmt=
'return ' if command.proto.fulltype != 'void' else '',
ptypes=
', '.join(p.fulltype for p in command.params) or 'void',
pnames=', '.join(p.name for p in command.params),
index=command_dict[name],
))
print(file=file)
file.write(IFACE_FOOTER)
class CoreInterface(Interface):
__slots__ = ['version']
def __init__(self, registry, declare, version, profile):
super(CoreInterface, self).__init__(registry, declare)
self.version = version
version = '{0[0]}_{0[1]}'.format(self.version)
if profile != 'core':
version = '{}_{}'.format(version, profile)
self.filename = '{}.h'.format(version)
self.guard = 'SGGL_{}'.format(version.upper())
self.namespace = 'gl_{}'.format(version)
self.flag = 'version_' + version
class ExtensionInterface(Interface):
__slots__ = ['name']
def __init__(self, registry, declare, name):
super(ExtensionInterface, self).__init__(registry, declare)
self.name = name
self.filename = '{}.h'.format(name)
self.guard = 'SGGL_{}'.format(name)
self.namespace = 'gl_{}'.format(name)
self.flag = name
def dump(self, file, previous, command_dict):
super(ExtensionInterface, self).dump(file, [], command_dict)
def parse_interface(name):
def die():
raise ValueError('Invalid interface: {}'.format(name))
fields = name.split(':')
if fields[0] == 'ext':
if len(fields) < 2:
die()
itype = 'ext'
iinfo = fields[1]
fields = fields[2:]
else:
version = fields[0].split('.')
if len(version) != 2:
die()
try:
x, y = version
x = int(x)
y = int(y)
version = x, y
except ValueError:
die()
itype = 'gl'
iinfo = version
fields = fields[1:]
if len(fields) == 0:
profile = 'core'
elif len(fields) == 1:
profile = fields[0]
else:
die()
if profile not in ('core', 'compatibility'):
die()
return InterfaceName(itype, iinfo, profile)
def parse_version(x):
i = x.index('.')
return int(x[:i]), int(x[i+1:])
def parse_api(s):
i = s.find(':')
if i >= 0:
return s[:i], s[i+1:]
return s, None
| StarcoderdataPython |
348823 | <reponame>milkmiruku/diorite
#! /usr/bin/env python
# encoding: UTF-8
# Copyright 2009 <NAME>
# Copyright 2017 <NAME> <<EMAIL>>
"""
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
from waflib import Task, Utils, Errors, Logs, Options, Node, Build
from waflib.TaskGen import feature, before_method
class valadoc(Task.Task):
vars = ['VALADOC', 'VALADOCFLAGS']
color = 'BLUE'
after = ['cprogram', 'cstlib', 'cshlib']
def runnable_status(self):
if self.skip:
return Task.SKIP_ME
else:
return super(valadoc, self).runnable_status()
def run(self):
cmd = self.env.VALADOC + self.env.VALADOCFLAGS
cmd.extend([a.abspath() for a in self.inputs])
return self.exec_command(cmd)
@before_method('process_source')
@feature('valadoc')
def process_valadoc2(self):
"""
Generate API documentation from Vala source code with valadoc
doc = bld(
features = 'valadoc',
files = bld.path.ant_glob('src/**/*.vala'),
output_dir = '../doc/html',
package_name = 'vala-gtk-example',
package_version = '1.0.0',
packages = 'gtk+-2.0',
vapi_dirs = '../vapi',
force = True
)
"""
try:
# Don't process vala source files with valac
self.meths.remove('process_source')
except ValueError:
pass
valadoctask = self.valadoctask = self.create_task('valadoc')
def addflags(flags):
self.env.append_value('VALADOCFLAGS', flags)
def add_attr_to_flags(name, default=None, mandatory=False):
value = getattr(self, name, default)
setattr(self, name, value)
if value:
addflags('--%s=%s' % (name.replace("_", "-"), value))
return True
elif mandatory:
self.bld.fatal('Missing attribute "%s".' % name)
else:
return False
# Input files
files = getattr(self, "files", None)
if not files:
self.bld.fatal('Missing input files')
valadoctask.inputs.extend(files)
# Output directory
if hasattr(self, 'output_dir'):
if isinstance(self.output_dir, str):
valadoctask.output_dir = self.path.get_bld().make_node(self.output_dir)
try:
valadoctask.output_dir.mkdir()
except OSError:
raise self.bld.fatal('Cannot create the valadoc output dir %r' % valadoctask.output_dir)
else:
valadoctask.output_dir = self.output_dir
else:
raise self.bld.fatal('No valadoc output directory')
valadoctask.outputs.append(valadoctask.output_dir)
addflags('--directory=%s' % valadoctask.output_dir.abspath())
# Attributes/flags
valadoctask.skip = getattr(self, "skip", False)
add_attr_to_flags("package_name", mandatory=True)
add_attr_to_flags("package_version", mandatory=True)
add_attr_to_flags("profile", 'gobject')
add_attr_to_flags("doclet")
add_attr_to_flags("gir")
add_attr_to_flags("importdir")
if not getattr(self, "protected", True):
addflags("--no-protected")
if getattr(self, "add_deps", False):
addflags("--deps")
flags = (
"internal", "private", "force", "verbose", "use_svg_images",
"enable_experimental", "enable_experimental_non_null")
for flag in flags:
if getattr(self, flag, False):
addflags("--%s" % flag.replace("_", "-"))
# Lists
self.packages = Utils.to_list(getattr(self, 'packages', []))
self.use = Utils.to_list(getattr(self, 'use', []))
self.import_packages = Utils.to_list(getattr(self, 'importpackages', []))
self.vapi_dirs = Utils.to_list(getattr(self, 'vapi_dirs', []))
self.gir_dirs = Utils.to_list(getattr(self, 'gir_dirs', []))
self.vala_defines = Utils.to_list(getattr(self, 'vala_defines', []))
if self.profile == 'gobject':
if not 'GOBJECT' in self.use:
self.use.append('GOBJECT')
self.vala_target_glib = getattr(self, 'vala_target_glib', getattr(Options.options, 'vala_target_glib', None))
if self.vala_target_glib:
addflags('--target-glib=%s' % self.vala_target_glib)
if hasattr(self, 'use'):
local_packages = Utils.to_list(self.use)[:] # make sure to have a copy
seen = []
while len(local_packages) > 0:
package = local_packages.pop()
if package in seen:
continue
seen.append(package)
# check if the package exists
try:
package_obj = self.bld.get_tgen_by_name(package)
except Errors.WafError:
continue
# in practice the other task is already processed
# but this makes it explicit
package_obj.post()
package_name = package_obj.target
for task in package_obj.tasks:
if isinstance(task, Build.inst):
# TODO are we not expecting just valatask here?
continue
for output in task.outputs:
if output.name == package_name + ".vapi":
valadoctask.set_run_after(task)
if package_name not in self.packages:
self.packages.append(package_name)
if output.parent not in self.vapi_dirs:
self.vapi_dirs.append(output.parent)
if hasattr(package_obj, 'use'):
lst = self.to_list(package_obj.use)
lst.reverse()
local_packages = [pkg for pkg in lst if pkg not in seen] + local_packages
addflags(['--define=%s' % x for x in self.vala_defines])
addflags(['--pkg=%s' % x for x in self.packages])
addflags(['--import=%s' % x for x in self.import_packages])
for vapi_dir in self.vapi_dirs:
if isinstance(vapi_dir, Node.Node):
node = vapi_dir
else:
node = self.path.find_dir(vapi_dir)
if not node:
Logs.warn('Unable to locate Vala API directory: %r', vapi_dir)
else:
addflags('--vapidir=%s' % node.abspath())
for gir_dir in self.gir_dirs:
if isinstance(gir_dir, Node.Node):
node = gir_dir
else:
node = self.path.find_dir(gir_dir)
if not node:
Logs.warn('Unable to locate gir directory: %r', gir_dir)
else:
addflags('--girdir=%s' % node.abspath())
def configure(conf):
conf.find_program('valadoc', errmsg='You must install valadoc <http://live.gnome.org/Valadoc> for generate the API documentation')
| StarcoderdataPython |
8151347 | <filename>syncmm/spotify.py
# According to old code-style
# Written by Sergievsky
# https://github.com/yepIwt
# 2021
import spotipy
from librespot.core import Session
class Library:
__scope = "user-library-read"
__tracks = []
_access_token = None
def __init__(self, login: str = None, password: str = None, token: str = None):
if token:
self._access_token = token
self.__api = spotipy.Spotify(auth = token)
else:
session = Session.Builder().user_pass(login, password).create()
self._access_token = session.tokens().get(self.__scope)
self._session = session
self.__api = spotipy.Spotify(auth = self._access_token)
def __process_tracks_data(self, data: list):
results = []
for track_data in data:
results.append(
{
"title": track_data['track']['name'],
"album": track_data['track']['album']['name'],
"artists": [art['name'] for art in track_data['track']['album']['artists']],
"uri": track_data['track']['uri'],
"cover_url": track_data['track']['album']['images'][0]['url'],
"track_num": track_data['track']['track_number'],
}
)
return results
def __process_albums_data(self, data: list):
result = []
for track_data in data:
result.append(
{
"album": track_data['album']['name'],
"artists": [ art['name'] for art in track_data['album']['artists'] ],
"uri": track_data['album']['uri'],
"cover_url": track_data['album']['images'][0]['url'],
"tracks": [
{
"title": track_data['name'],
"artists": [art['name'] for art in track_data['artists']],
"uri": track_data['uri'],
"track_num": track_data['track_number'],
}
for track_data in track_data['album']['tracks']['items']
],
}
)
return result
def __get_liked_tracks(self):
offset = len(self.__tracks)
result = self.__api.current_user_saved_tracks(limit=50,offset=offset)['items']
if result:
self.__tracks.extend(result)
self.__get_liked_tracks()
return self.__process_tracks_data(self.__tracks)
def __get_user_albums(self):
offset = len(self.__tracks)
result = self.__api.current_user_saved_albums(limit = 50, offset = offset)['items']
if result:
self.__tracks.extend(result)
self.__get_user_albums()
return self.__process_albums_data(self.__tracks)
def _get_a_track_by_uri(self, uri: str):
data = self.__api.track(uri)
n = {
'title': data['name'],
'album': data['album']['name'],
'artists': [art['name'] for art in data['album']['artists']],
'uri': data['uri'],
'cover_url': data['album']['images'][0]['url'],
'track_num': data['track_number'],
}
return n
def __link_parse(self, link):
start = link.find("track/")
if start == -1:
return None
end = link.find("?si")
return "spotify:track:{}".format(link[start+6:end])
def liked(self):
self.__tracks = []
return self.__get_liked_tracks()
def albums(self):
self.__tracks = []
return self.__get_user_albums()
def get_by_url(self, link: str):
uri = self.__link_parse(link)
return self.get_by_uri(uri)
def get_by_uri(self, uri: str):
return self.__get_a_track_by_uri(uri) | StarcoderdataPython |
8071748 | import pickle
import csv
import numpy as np
import torch
from ml_logger import logger
path = 'gs://ge-data-improbable/checkpoints/model-free/model-free/rff_post_iclr/dmc/drq/4_layer/mlp/{env}/{seed}/checkpoint/replay_buffer.pkl'
envs = ['Acrobot-swingup', 'Quadruped-run', 'Quadruped-walk', 'Humanoid-run', 'Finger-turn_hard', 'Walker-run', 'Cheetah-run', 'Hopper-hop']
seeds = [100, 200, 300, 400, 500]
with open('/Users/aajay/Desktop/drq_obs_norm_mean_std.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(['env_name', 'bias', 'scale'])
for env in envs:
obses = []
for seed in seeds:
curr_path = path.format(env=env, seed=seed)
replay_buffer = logger.load_torch(curr_path)
obses.append(replay_buffer.obses)
obses = np.concatenate(obses, axis=0)
writer.writerow([f'dmc:{env}-v1', obses.mean(axis=0).tolist(), obses.std(axis=0).tolist()])
| StarcoderdataPython |
1611714 | #!/usr/bin/env python
from __future__ import print_function
import sys
from Bio import SeqIO
def main():
'''Extract the 20 bp sgRNA sequence from a longer seqeuence string'''
if len(sys.argv) < 2:
print('Usage: {} sequence.fa'.format(sys.argv[0]), file=sys.stdout)
record = SeqIO.read(sys.argv[1], 'fasta')
seed = 'GTTTTAGAGC'
index = record.seq.find(seed)
if index == -1:
print('Not found!\n')
else:
extracted = record.seq[index - 20:index]
print('{}: {}\n'.format(index, extracted))
if __name__ == '__main__':
main()
| StarcoderdataPython |
9726950 | <filename>tests/resources/transaction/test_dashboard.py
from twisted.internet.defer import inlineCallbacks
from hathor.transaction.resources import DashboardTransactionResource
from tests import unittest
from tests.resources.base_resource import StubSite, _BaseResourceTest
class BaseDashboardTest(_BaseResourceTest._ResourceTest):
__test__ = False
def setUp(self):
super().setUp()
self.web = StubSite(DashboardTransactionResource(self.manager))
@inlineCallbacks
def test_get(self):
tx_count = block_count = 6
response = yield self.web.get("dashboard_tx", {
b'block': str(block_count).encode(),
b'tx': str(tx_count).encode()
})
data = response.json_value()
self.assertLessEqual(len(data['transactions']), tx_count)
self.assertLessEqual(len(data['blocks']), block_count)
@inlineCallbacks
def test_invalid_parameters(self):
# wrong type block
response = yield self.web.get("dashboard_tx", {b'block': b'a', b'tx': b'6'})
data = response.json_value()
self.assertFalse(data['success'])
# missing block param
response = yield self.web.get("dashboard_tx", {b'tx': b'6'})
data = response.json_value()
self.assertFalse(data['success'])
# wrong type tx
response = yield self.web.get("dashboard_tx", {b'block': b'6', b'tx': b'a'})
data = response.json_value()
self.assertFalse(data['success'])
# missing tx param
response = yield self.web.get("dashboard_tx", {b'block': b'6'})
data = response.json_value()
self.assertFalse(data['success'])
class SyncV1DashboardTest(unittest.SyncV1Params, BaseDashboardTest):
__test__ = True
class SyncV2DashboardTest(unittest.SyncV2Params, BaseDashboardTest):
__test__ = True
# sync-bridge should behave like sync-v2
class SyncBridgeDashboardTest(unittest.SyncBridgeParams, SyncV2DashboardTest):
pass
| StarcoderdataPython |
146792 | <filename>setup.py
#!/usr/bin/env python
# coding: utf-8
from yudzuki import __version__
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
long_description = ""
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read().replace('\r\n', '\n')
requirements = ""
with open("requirements.txt", "r", encoding="utf-8") as f:
requirements = f.read()
setup(
name='yudzuki.py',
version=__version__,
license='MIT',
description='An API Wrapper for Yudzuki API',
long_description=long_description,
long_description_content_type='text/markdown',
author='midorichaan',
author_email='<EMAIL>',
url='https://github.com/LunaProject-Discord/yudzuki.py',
install_requires=requirements,
packages=find_packages(),
keywords='yudzuki.py',
)
| StarcoderdataPython |
4927739 | <filename>util/clavero/CNNTrainer.py
class CNNTrainer(AbstractTrainer):
"""
Trainer for a simple class classification CNN.
"""
def __init__(self, dataset_name, train_validation_split=.8, resume_checkpoint=None, batch_size=16, workers=4,
n_gpu=0, epochs=2, learning_rate=.01, momentum=.8):
self.learning_rate = learning_rate
self.momentum = momentum
super().__init__(dataset_name, train_validation_split, resume_checkpoint, batch_size, workers, n_gpu, epochs)
@property
def initial_model(self):
return ClassificationConvolutionalNetwork()
@property
def loss(self):
return CrossEntropyLoss()
@property
def optimizer(self):
return SGD(self.model.parameters(), lr=self.learning_rate, momentum=self.momentum)
@property
def serialized_checkpoint(self):
return {**super().serialized_checkpoint, 'learning_rate': self.learning_rate, 'momentum': self.momentum}
@property
def trainer_id(self):
return 'cnn_sk'
def _create_evaluator_engine(self):
return create_supervised_evaluator(
self.model, metrics={'accuracy': Accuracy(), 'loss': Loss(self.loss)}, device=self.device)
def _create_trainer_engine(self):
return create_supervised_trainer(
self.model, self.optimizer, self.loss, device=self.device, prepare_batch=prepare_batch)
if __name__ == '__main__':
trainer = CNNTrainer(dataset_name, train_validation_split, resume_checkpoint, batch_size, workers, n_gpu, epochs,
learning_rate, momentum)
trainer.run()
| StarcoderdataPython |
3234913 | # Definition for a binary tree node
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param root, a tree node
# @param sum, an integer
# @return a list of lists of integers
def pathSum(self, root, sum):
if root is None: return []
return self.solve(root, sum)
def solve(self, root, sum):
if root.left is None and root.right is None:
return [[root.val]] if root.val == sum else []
LS, RS = [], []
if root.left :
LS = self.solve(root.left, sum - root.val)
if root.right :
RS = self.solve(root.right, sum - root.val)
return [ [root.val] + path for path in LS] + [ [root.val] + path for path in RS ]
| StarcoderdataPython |
11318380 | <filename>src/deprecated/revaluate.py<gh_stars>1-10
import os, sys, glob
import pickle
import numpy as np
from models import *
from agents import *
from utils.World import World
def evaluate(model, algorithm, graphics = False, robot = None, save_postfix=None):
"""This function evaluate a algorithm's performance on a given model.
Args:
param1 (int): The first parameter.
param2 (str): The second parameter.
model (str): a string for the model name
algorithm (str): a string for the algorithm name
graphics (bool): whether to use graphics to show the results
robot (KinematicModel): you can pass in a initialized agent with a given setting. Or the function will give a default one. This is useful when you grid search the parameters.
save_postfix (str): a string
Returns:
total_score (dict): A dict contains the algorithm's average score on different aspects.
"""
if save_postfix is None:
save_postfix = 'best'
save_dir = os.path.join('eval_results', model, algorithm, save_postfix)
# Avoid repetition, which also means if you updated some algorithm, you need to delete former results manually to see the changes.
dT = 0.02
if robot is None:
robot = eval(model + '(' + algorithm + '(), dT)')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
data_dir = 'simulate_data'
names = glob.glob1(data_dir, 'data_*')
total_score = dict()
n = len(names)
for name in names:
# print(name)
save_path = os.path.join(save_dir, name.replace('data', 'result'))
if os.path.exists(save_path):
f = open(save_path, 'rb')
record = pickle.load(f)
else:
f = open(os.path.join(data_dir, name), 'rb')
record = pickle.load(f)
record.robot_moves = np.matrix(np.zeros((np.shape(robot.x)[0], record.tot)))
record.cnt = 0
if robot.is_2D:
human = HumanBall2D(MobileAgent(), dT)
else:
human = HumanBall3D(MobileAgent(), dT)
#Make sure all the algorithms have same goal sequence
human.reset(record.dT, record.human_goals)
robot.reset(record.dT, record.robot_goals)
score = dict()
record.model = model
record.algorithm = algorithm
for t in range(record.tot):
human.update(robot)
human.move(*record.human_moves[:,t])
robot.update(human)
robot.move()
record.robot_moves[:, t] = robot.x
record.robot_closest_P[:, t] = robot.m
record.human_closest_P[:, t] = human.m
if graphics:
try:
w = World(record.dT, human, robot, record)
base.run()
except SystemExit as e:
pass
save_data(save_dir, name.replace('data', 'result'), record)
for k in robot.score.keys():
if k not in total_score:
total_score[k] = robot.score[k] / n
else:
total_score[k] = total_score[k] + robot.score[k] / n
print('score[efficiency]')
print(robot.score['efficiency'])
print('score[collision_cnt]')
print(robot.score['collision_cnt'])
# print('total_score')
# print(total_score)
print('total_score[efficiency]')
print(total_score['efficiency'])
print('total_score[collision_cnt]')
print(total_score['collision_cnt'])
save_data(save_dir, 'total_score', total_score)
return total_score
def save_data(folder, name, record):
if not os.path.exists(folder):
os.makedirs(folder)
f = open(os.path.join(folder, name), 'wb')
print(os.path.join(folder, name))
pickle.dump(record, f)
if __name__ == "__main__":
graphics = False
evaluate(sys.argv[1], sys.argv[2], graphics=graphics) | StarcoderdataPython |
6649899 | <reponame>mayi140611/mayiutils
#!/usr/bin/python
# encoding: utf-8
"""
@author: Ian
@contact:<EMAIL>
@file: scikit_surprise_wrapper.py
@time: 2019/3/13 12:22
https://pypi.org/project/scikit-surprise/#description
https://surprise.readthedocs.io/en/stable/getting_started.html
pip install scikit-surprise
scikit-surprise-1.0.6
"""
from surprise import SVD, KNNWithMeans, KNNBasic, NormalPredictor, evaluate
from surprise import Dataset, Reader
from surprise.model_selection import cross_validate
from surprise.model_selection import train_test_split
from surprise import accuracy
class SurpriseWrapper:
"""
surprise中常用的算法 http://www.360doc.com/content/17/1218/21/40769523_714320532.shtml
surprise常用的数据结构
1、surprise.dataset.Dataset:主要的作用是载入数据集
2、surprise.Trainset:
https://surprise.readthedocs.io/en/stable/trainset.html#surprise.Trainset
A trainset contains all useful data that constitutes a training set.
Trainsets are different from Datasets. You can think of a Datasets as the raw data,
and Trainsets as higher-level data where useful methods are defined.
Also, a Datasets may be comprised of multiple Trainsets (e.g. when doing cross validation).
Trainset可以Dataset生成
Dataset.folds() 已废弃
DatasetAutoFolds.build_full_trainset()
surprise.model_selection.train_test_split(Dataset)
Trainset常用属性
ur:The users ratings
This is a dictionary containing lists of tuples of the form (item_inner_id, rating).
The keys are user inner ids.
ir:The items ratings
This is a dictionary containing lists of tuples of the form (user_inner_id, rating).
The keys are item inner ids.
n_users
n_items
n_ratings
rating_scale:评分范围
global_mean:所有评分的均值
all_items():Generator function to iterate over all items.
all_ratings()
"""
def __init__(self):
pass
if __name__ == '__main__':
mode = 4
if mode == 4:
"""
官方提供的get-started
"""
submode = 403
if submode == 401:
"""
Automatic cross-validation
"""
data = Dataset.load_builtin('ml-100k')
# Use the famous SVD algorithm.
algo = SVD()
# Run 5-fold cross-validation and print results.
cross_validate(algo, data, measures=['RMSE', 'MAE', 'FCP'], cv=5, verbose=True)
"""
Fold 1 Fold 2 Fold 3 Fold 4 Fold 5 Mean Std
RMSE (testset) 0.9346 0.9331 0.9364 0.9324 0.9353 0.9344 0.0015
MAE (testset) 0.7349 0.7371 0.7388 0.7318 0.7388 0.7363 0.0026
FCP (testset) 0.7024 0.6981 0.7051 0.7001 0.7014 0.7014 0.0024
Fit time 5.91 5.88 6.01 6.00 5.78 5.92 0.08
Test time 0.21 0.19 0.20 0.20 0.18 0.20 0.01
"""
elif submode == 402:
"""
Train-test split and the fit() method
"""
# Load the movielens-100k dataset (download it if needed),
data = Dataset.load_builtin('ml-100k')
# sample random trainset and testset
# test set is made of 25% of the ratings.
trainset, testset = train_test_split(data, test_size=.25)
print(trainset.rating_scale)#(1, 5)
# We'll use the famous SVD algorithm.
algo = SVD()
# Train the algorithm on the trainset, and predict ratings for the testset
algo.fit(trainset)
predictions = algo.test(testset)
# Then compute RMSE
accuracy.rmse(predictions)#RMSE: 0.9430
elif submode == 403:
"""
Train on a whole trainset and the predict() method
"""
# Load the movielens-100k dataset
data = Dataset.load_builtin('ml-100k')
# Retrieve the trainset.
trainset = data.build_full_trainset()
# Build an algorithm, and train it.
algo = KNNBasic()
algo.fit(trainset)
uid = str(196) # raw user id (as in the ratings file). They are **strings**!
iid = str(302) # raw item id (as in the ratings file). They are **strings**!
# get a prediction for specific users and items.
# The predict() uses raw ids!
pred = algo.predict(uid, iid, r_ui=4, verbose=True)
"""
Computing the msd similarity matrix...
Done computing similarity matrix.
user: 196 item: 302 r_ui = 4.00 est = 4.06 {'actual_k': 40, 'was_impossible': False}
"""
elif submode == 404:
#载入自己的数据集
reader = Reader(line_format='user item rating', sep=',')
filepath = '../../../tmp/doc.csv'
data = Dataset.load_from_file(filepath, reader)
print(data.raw_ratings)
trainset = data.build_full_trainset()
print(type(trainset))#<class 'surprise.trainset.Trainset'>
print(trainset.n_items)#404
print(trainset.n_users)#404
data.split(n_folds=5)
if mode == 2:
# Use the famous KNNWithMeans algorithm.
algo = KNNWithMeans()
# Run 5-fold cross-validation and print results.
cross_validate(algo, data, measures=['RMSE', 'MAE', 'FCP'], cv=5, verbose=True)
"""
Computing the msd similarity matrix...
Done computing similarity matrix.
Computing the msd similarity matrix...
Done computing similarity matrix.
Computing the msd similarity matrix...
Done computing similarity matrix.
Computing the msd similarity matrix...
Done computing similarity matrix.
Computing the msd similarity matrix...
Done computing similarity matrix.
Evaluating RMSE, MAE, FCP of algorithm KNNWithMeans on 5 split(s).
Fold 1 Fold 2 Fold 3 Fold 4 Fold 5 Mean Std
RMSE (testset) 0.9514 0.9573 0.9517 0.9501 0.9455 0.9512 0.0038
MAE (testset) 0.7510 0.7503 0.7502 0.7502 0.7453 0.7494 0.0021
FCP (testset) 0.6973 0.7100 0.6979 0.7007 0.6996 0.7011 0.0046
Fit time 0.67 0.72 0.72 0.73 0.67 0.70 0.03
Test time 4.39 4.42 4.42 4.31 4.19 4.35 0.09
""" | StarcoderdataPython |
11235996 | <reponame>anjalijain22/mity<filename>utils/chunky_regions.py<gh_stars>10-100
#!/usr/bin/env python3
# This code is adapted from fasta_generate_regions.py from https://github.com/ekg/freebayes
# usage: python3 chunky_regions.py --chunk_size INT --region [CHR:START-END] --bam_header_path [PATH]"
# or
# Usage: bam_header stdin | python3 chunky_regions.py --chunk_size INT --region [CHR[:START-END]]
# if no chunk_size provided, then a file with a single column of the chromsomes is produced
# to get the bam header:
# samtools view -H <bam_path>
import sys
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Make regions from bam header')
parser.add_argument('--chunk_size', action="store", dest='chunk_size')
parser.add_argument('--region', action="store", dest='region')
parser.add_argument('--bam_header_path', action="store", dest='bam_header_path')
args = parser.parse_args()
chunk_size = args.chunk_size
region = args.region
header_path = args.bam_header_path
if chunk_size is None and region is not None:
print("Error: chunk_size is a required input if region is inputted")
print("Usage: python3 chunky_regions.py --chunk_size INT --region [CHR:START-END] --bam_header_path [PATH]")
print("or")
sys.exit("Usage: bam_header stdin | python3 chunky_regions.py --chunk_size INT --region [CHR:START-END]")
if chunk_size is not None:
chunk_size = int(chunk_size)
if header_path is None:
# print("Getting the header from stdin as it was not provided as an input")
if sys.stdin.isatty():
print("Error: No bam header supplied to stdin")
print("Usage: python3 chunky_regions.py --chunk_size INT --region [CHR:START-END] --bam_header_path [PATH]")
print("or")
sys.exit("Usage: bam_header stdin | python3 chunky_regions.py --chunk_size INT --region [CHR[:START-END]]")
else:
bam_header = sys.stdin
else:
bam_header = open(header_path)
# print(bam_header)
bam_header_contents = [line.rstrip('\n') for line in bam_header]
# print(bam_header_contents)
# print([line[0:3] for line in bam_header_contents])
bam_header_contents = [line for line in bam_header_contents if line[0:3] == "@SQ" ]
# print(x)
# for line in bam_header_contents:
# print(line)
bam_header_contents = [line.split("\t") for line in bam_header_contents]
# print(bam_header_contents)
bam_chrom = [line[1] for line in bam_header_contents]
bam_chrom = [line.split("SN:")[1] for line in bam_chrom]
length = [line[2] for line in bam_header_contents]
length = [line.split("LN:")[1] for line in length]
# print(bam_chrom)
# print(length)
if region is None:
# print("No region supplied - will create chunks over entrire bam header")
sys.stderr.write("No region supplied - will create chunks over entrire bam header \n")
if chunk_size is not None:
for chrom_name in bam_chrom:
# print(chrom_name)
chrom_idx = bam_chrom.index(chrom_name)
chrom_length = int(length[chrom_idx])
chunk_start = 0
while chunk_start < chrom_length:
chunk_end = chunk_start + chunk_size
if chunk_end > chrom_length:
chunk_end = chrom_length
print(chrom_name + ":" + str(chunk_start) + "-" + str(chunk_end))
chunk_start = chunk_end
if chunk_size is None:
sys.stderr.write("No chunk_size supplied - will just output chromosomes \n")
for chrom in bam_chrom:
print(chrom)
if region is not None:
region = region.split(":")
# print(region)
if len(region) == 1:
# split chromosome into equal size chunks
chrom_name = region[0]
chrom_idx = bam_chrom.index(chrom_name)
chrom_length = int(length[chrom_idx])
# print(chrom_length)
chunk_start = 0
while chunk_start < chrom_length:
# start = region_start
chunk_end = chunk_start + chunk_size
if chunk_end > chrom_length:
chunk_end = chrom_length
print(chrom_name + ":" + str(chunk_start) + "-" + str(chunk_end))
chunk_start = chunk_end
else:
# split chr:start-end into equal size chunks
chrom_name = region[0]
start_end = region[1]
start_end = start_end.split("-")
if len(start_end) == 2:
region_start = int(start_end[0])
region_end = int(start_end[1])
chrom_idx = bam_chrom.index(chrom_name)
chrom_length = int(length[chrom_idx])
# print(chrom_length)
chunk_start = region_start
chrom_length = min(chrom_length, region_end)
while chunk_start < chrom_length:
# start = region_start
chunk_end = chunk_start + chunk_size
if chunk_end > chrom_length:
chunk_end = chrom_length
print(chrom_name + ":" + str(chunk_start) + "-" + str(chunk_end))
chunk_start = chunk_end
else:
sys.exit("Error: region must be CHR:START-END or just CHR")
| StarcoderdataPython |
5007733 | <filename>python_files/trainer.py
import cv2
import numpy as np
from PIL import Image
import os
from get_yml import *
recognizer = cv2.face.LBPHFaceRecognizer_create()
detector = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
path1 = '../Dataset'
for classes in os.listdir(path1):
path2=os.path.join(path1,classes)
for division in os.listdir(path2):
path=os.path.join(path2,division)
def getLabels(path):
faceslist=[]
ids=[]
for id in os.listdir(path):
for image in os.listdir(os.path.join(path,id)):
PIL_img = Image.open(os.path.join(os.path.join(path,id),image))
img_numpy = np.array(PIL_img,'uint8')
print(img_numpy)
faces = detector.detectMultiScale(img_numpy)
for (x,y,w,h) in faces:
faceslist.append(img_numpy[y:y+h,x:x+w])
ids.append(id.split('.')[0])
return faceslist,ids
print ("Model training please wait...... \n")
faces,ids = getLabels(path)
for i in range(0, len(ids)):
ids[i] = int(ids[i])
recognizer.train(faces, np.array(ids))
recognizer.write('{}-{}.yml'.format(classes,division))
# path_cloud = 'yml/'+classes+'/'+division+'/'+classes+'-'+division+'.yml'
put_yml(classes,division)
print("\n\n --- Model Trained And Uploaded Success Fully ---")
| StarcoderdataPython |
5008033 | import os
import tempfile
import numpy as np
import xarray as xr
from fv3fit.emulation.data import io
def _get_ds():
return xr.Dataset(
{
"air_temperature": xr.DataArray(
data=np.arange(30).reshape(10, 3), dims=["sample", "z"]
),
"specific_humidity": xr.DataArray(
data=np.arange(30, 60).reshape(10, 3), dims=["sample", "z"]
),
}
)
def test_get_nc_files():
xr_dataset = _get_ds()
with tempfile.TemporaryDirectory() as tmpdir:
num_files = 3
orig_paths = [os.path.join(tmpdir, f"file{i}.nc") for i in range(num_files)]
for path in orig_paths:
xr_dataset.to_netcdf(path)
result_files = io.get_nc_files(tmpdir)
assert len(result_files) == num_files
for path in result_files:
assert path in orig_paths
class MockGCSFilesystem:
protocol = ("gs", "gcs")
def glob(*args):
return [
"fake-bucket/file1.nc",
"fake-bucket/file2.nc",
]
def test_get_nc_files_remote_protocol_prepend():
fs = MockGCSFilesystem()
result_files = io.get_nc_files("gs://fake-bucket", fs=fs)
assert len(result_files) == 2
for path in result_files:
assert path.startswith("gs://")
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.